hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f719804da78f16f6af3489ac457e49300a75a6b2 | 1,916 | py | Python | do_flask_mail.py | penglee87/lpython | 3a53322ccdebf83d6b358386518cf81712433c9e | [
"bzip2-1.0.6"
] | null | null | null | do_flask_mail.py | penglee87/lpython | 3a53322ccdebf83d6b358386518cf81712433c9e | [
"bzip2-1.0.6"
] | null | null | null | do_flask_mail.py | penglee87/lpython | 3a53322ccdebf83d6b358386518cf81712433c9e | [
"bzip2-1.0.6"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask
from flask_mail import Mail
from flask_mail import Message
import os
#测试成功,部分参数作用不明
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.163.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = 'penglee87@163.com'
app.config['MAIL_PASSWORD'] = '******'
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]' #邮件主题
#app.config['FLASKY_MAIL_SENDER'] = 'penglee87@163.com'
#app.config['FLASKY_ADMIN'] = 'penglee87@163.com'
mail = Mail(app)
"""
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]'
app.config['FLASKY_MAIL_SENDER'] = 'Flasky Admin <flasky@example.com>'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
"""
@app.route("/")
def index():
#Message(主题,发件人,收件人)
msg = Message("Hello",
sender="penglee87@163.com",
recipients=["lipeng@163.com"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
return '<h1>Hello World!</h1>'
if __name__ == '__main__':
app.run(debug=True)
"""
msg = Message("Hello",
sender="penglee87@163.com",
recipients=["lipeng@163.com"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
if __name__ == '__main__':
mail.send(msg)
pip install --no-deps lamson chardet flask-mail
set MAIL_USERNAME=penglee87@163.com
set MAIL_PASSWORD=******
set FLASKY_ADMIN=penglee87@163.com
>>> from flask.ext.mail import Message
>>> from hello import mail
>>> msg = Message('test subject', sender='penglee1206@gmail.com',recipients=['380517767@qq.com'])
>>> msg.body = 'text body'
>>> msg.html = '<b>HTML</b> body'
>>> with app.app_context():
... mail.send(msg)
"""
| 26.246575 | 97 | 0.647182 |
from flask import Flask
from flask_mail import Mail
from flask_mail import Message
import os
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.163.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = 'penglee87@163.com'
app.config['MAIL_PASSWORD'] = '******'
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]'
mail = Mail(app)
@app.route("/")
def index():
msg = Message("Hello",
sender="penglee87@163.com",
recipients=["lipeng@163.com"])
msg.body = "testing"
msg.html = "<b>testing</b>"
mail.send(msg)
return '<h1>Hello World!</h1>'
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f71982541576d139123ce5e181dca42523d11d05 | 459 | py | Python | blog/search_indexes.py | GITliyanfeng/blog-django | a804702026a2d58664ec83a993116e17b89e9e8e | [
"MIT"
] | 2 | 2019-03-14T12:35:36.000Z | 2019-03-14T12:35:38.000Z | blog/search_indexes.py | GITliyanfeng/blog-django | a804702026a2d58664ec83a993116e17b89e9e8e | [
"MIT"
] | null | null | null | blog/search_indexes.py | GITliyanfeng/blog-django | a804702026a2d58664ec83a993116e17b89e9e8e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2019/3/19 0019 16:25
# @Author : __Yanfeng
# @Site :
# @File : search_indexes.py
# @Software: PyCharm
from haystack import indexes
from .models import Post
class PostIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Post
def index_queryset(self, using=None):
return self.get_model().latest_posts()
| 24.157895 | 62 | 0.67756 |
from haystack import indexes
from .models import Post
class PostIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Post
def index_queryset(self, using=None):
return self.get_model().latest_posts()
| true | true |
f7198316dcf1fee5ef6b1b5530246a472718064a | 109 | py | Python | rest_framework_security/deny_repeat_password/__init__.py | RubenEu/django-rest-framework-security | 638cf271c51a5bafd434a6b6a9c25a7c4849b485 | [
"MIT"
] | 7 | 2020-09-01T09:55:25.000Z | 2021-11-04T06:59:04.000Z | rest_framework_security/deny_repeat_password/__init__.py | RubenEu/django-rest-framework-security | 638cf271c51a5bafd434a6b6a9c25a7c4849b485 | [
"MIT"
] | 32 | 2020-10-28T17:09:18.000Z | 2022-03-12T00:55:09.000Z | rest_framework_security/deny_repeat_password/__init__.py | RubenEu/django-rest-framework-security | 638cf271c51a5bafd434a6b6a9c25a7c4849b485 | [
"MIT"
] | 2 | 2020-12-18T01:26:53.000Z | 2021-11-04T06:59:07.000Z | default_app_config = (
"rest_framework_security.deny_repeat_password.apps.DenyRepeatPasswordAppConfig"
)
| 27.25 | 83 | 0.844037 | default_app_config = (
"rest_framework_security.deny_repeat_password.apps.DenyRepeatPasswordAppConfig"
)
| true | true |
f71983d5a0a270119c6b7c7701a902ea4892f18a | 20,123 | py | Python | obstools/scripts/atacr_clean_spectra.py | paudetseis/OBStools | c6c02d8864c25a14f22d1fae17ff5ad911b9ff00 | [
"MIT"
] | 1 | 2019-12-05T04:32:38.000Z | 2019-12-05T04:32:38.000Z | obstools/scripts/atacr_clean_spectra.py | paudetseis/OBStools | c6c02d8864c25a14f22d1fae17ff5ad911b9ff00 | [
"MIT"
] | 2 | 2019-12-04T02:06:45.000Z | 2019-12-06T22:20:19.000Z | obstools/scripts/atacr_clean_spectra.py | paudetseis/OBStools | c6c02d8864c25a14f22d1fae17ff5ad911b9ff00 | [
"MIT"
] | 1 | 2020-02-25T16:51:35.000Z | 2020-02-25T16:51:35.000Z | #!/usr/bin/env python
# Copyright 2019 Pascal Audet & Helen Janiszewski
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import numpy as np
import pickle
import stdb
from obstools.atacr import StaNoise, Power, Cross, Rotation
from obstools.atacr import utils, plotting
from pathlib import Path
from argparse import ArgumentParser
from os.path import exists as exist
from obspy import UTCDateTime
from numpy import nan
def get_cleanspec_arguments(argv=None):
"""
Get Options from :class:`~optparse.OptionParser` objects.
Calling options for the script `obs_clean_spectra.py` that accompany this
package.
"""
parser = ArgumentParser(
usage="%(prog)s [options] <indb>",
description="Script used "
"to extract daily spectra calculated from " +
"`obs_daily_spectra.py` and flag days for outlier " +
"PSDs and calculate spectral averages of the " +
"corresponding Fourier transforms over the entire " +
"time period specified. The stations are processed " +
"one by one and the data are stored to disk.")
parser.add_argument(
"indb",
help="Station Database to process from.",
type=str)
# General Settings
parser.add_argument(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station " +
"keys for which to perform the analysis. These must " +
"be contained within the station database. Partial " +
"keys will be used to match against those in the " +
"dictionary. For instance, providing IU will match " +
"with all stations in the IU network. " +
"[Default processes all stations in the database]")
parser.add_argument(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing data. " +
"[Default False]")
# Event Selection Criteria
DaysGroup = parser.add_argument_group(
title="Time Search Settings",
description="Time settings associated with " +
"searching for day-long seismograms")
DaysGroup.add_argument(
"--start",
action="store",
type=str,
dest="startT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start day for the data search. " +
"This will override any station start times. " +
"[Default start date of each station in database]")
DaysGroup.add_argument(
"--end",
action="store",
type=str,
dest="endT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start time for the data search. " +
"This will override any station end times. " +
"[Default end date of each station in database]")
# Constants Settings
ConstGroup = parser.add_argument_group(
title='Parameter Settings',
description="Miscellaneous default values " +
"and settings")
ConstGroup.add_argument(
"--freq-band",
action="store",
type=str,
dest="pd",
default=None,
help="Specify comma-separated frequency limits " +
"(float, in Hz) over which to calculate spectral " +
"features used in flagging the days/windows. " +
"[Default 0.004,2.0]")
ConstGroup.add_argument(
"--tolerance",
action="store",
type=float,
dest="tol",
default=1.5,
help="Specify parameter for tolerance threshold. " +
"If spectrum > std*tol, window is flagged as bad. " +
"[Default 1.5]")
ConstGroup.add_argument(
"--alpha",
action="store",
type=float,
dest="alpha",
default=0.05,
help="Confidence level for f-test, for iterative " +
"flagging of windows. [Default 0.05, or 95 percent confidence]")
# Constants Settings
FigureGroup = parser.add_argument_group(
title='Figure Settings',
description="Flags for plotting figures")
FigureGroup.add_argument(
"--figQC",
action="store_true",
dest="fig_QC",
default=False,
help="Plot Quality-Control figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Plot intermediate steps for debugging. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figAverage",
action="store_true",
dest="fig_average",
default=False,
help="Plot daily average figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCoh",
action="store_true",
dest="fig_coh_ph",
default=False,
help="Plot Coherence and Phase figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCross",
action="store_true",
dest="fig_av_cross",
default=False,
help="Plot cross-spectra figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--save-fig",
action="store_true",
dest="saveplot",
default=False,
help="Set this option if you wish to save the figure(s). [Default " +
"does not save figure]")
FigureGroup.add_argument(
"--format",
action="store",
type=str,
dest="form",
default="png",
help="Specify format of figure. Can be any one of the valid" +
"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
args = parser.parse_args(argv)
# Check inputs
if not exist(args.indb):
parser.error("Input file " + args.indb + " does not exist")
# create station key list
if len(args.stkeys) > 0:
args.stkeys = args.stkeys.split(',')
# construct start time
if len(args.startT) > 0:
try:
args.startT = UTCDateTime(args.startT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from start time: " +
args.startT)
else:
args.startT = None
# construct end time
if len(args.endT) > 0:
try:
args.endT = UTCDateTime(args.endT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from end time: " +
args.endT)
else:
args.endT = None
if args.pd is None:
args.pd = [0.004, 2.0]
else:
args.pd = [float(val) for val in args.pd.split(',')]
args.pd = sorted(args.pd)
if (len(args.pd)) != 2:
raise(Exception(
"Error: --freq-band should contain 2 " +
"comma-separated floats"))
return args
def main(args=None):
if args is None:
# Run Input Parser
args = get_cleanspec_arguments()
# Load Database
# stdb>0.1.3
try:
db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)
# stdb=0.1.3
except Exception:
db = stdb.io.load_db(fname=args.indb)
# Construct station key loop
allkeys = db.keys()
sorted(allkeys)
# Extract key subset
if len(args.stkeys) > 0:
stkeys = []
for skey in args.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
sorted(stkeys)
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Path where spectra are located
specpath = Path('SPECTRA') / stkey
if not specpath.is_dir():
raise(Exception(
"Path to " + str(specpath) +
" doesn`t exist - aborting"))
# Path where average spectra will be saved
avstpath = Path('AVG_STA') / stkey
if not avstpath.is_dir():
print("Path to "+str(avstpath)+" doesn`t exist - creating it")
avstpath.mkdir(parents=True)
# Path where plots will be saved
if args.saveplot:
plotpath = avstpath / 'PLOTS'
if not plotpath.is_dir():
plotpath.mkdir(parents=True)
else:
plotpath = False
# Get catalogue search start time
if args.startT is None:
tstart = sta.startdate
else:
tstart = args.startT
# Get catalogue search end time
if args.endT is None:
tend = sta.enddate
else:
tend = args.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
# Update Display
print("\n|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
# Filename for output average spectra
dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'
dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'
fileavst = avstpath / (dstart+dend+'avg_sta.pkl')
if fileavst.exists():
if not args.ovr:
print("* -> file "+str(fileavst)+" exists - continuing")
continue
# Containers for power and cross spectra
coh_all = []
ph_all = []
coh_12_all = []
coh_1Z_all = []
coh_1P_all = []
coh_2Z_all = []
coh_2P_all = []
coh_ZP_all = []
ph_12_all = []
ph_1Z_all = []
ph_1P_all = []
ph_2Z_all = []
ph_2P_all = []
ph_ZP_all = []
ad_12_all = []
ad_1Z_all = []
ad_1P_all = []
ad_2Z_all = []
ad_2P_all = []
ad_ZP_all = []
nwins = []
t1 = tstart
# Initialize StaNoise object
stanoise = StaNoise()
# Loop through each day withing time range
while t1 < tend:
year = str(t1.year).zfill(4)
jday = str(t1.julday).zfill(3)
tstamp = year+'.'+jday+'.'
filespec = specpath / (tstamp + 'spectra.pkl')
# Load file if it exists
if filespec.exists():
print("\n"+"*"*60)
print('* Calculating noise spectra for key ' +
stkey+' and day '+year+'.'+jday)
print("* -> file "+str(filespec)+" found - loading")
file = open(filespec, 'rb')
daynoise = pickle.load(file)
file.close()
stanoise += daynoise
else:
t1 += 3600.*24.
continue
coh_all.append(daynoise.rotation.coh)
ph_all.append(daynoise.rotation.ph)
# Coherence
coh_12_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c12,
daynoise.power.c11,
daynoise.power.c22), 50))
coh_1Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1Z,
daynoise.power.c11,
daynoise.power.cZZ), 50))
coh_1P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1P,
daynoise.power.c11,
daynoise.power.cPP), 50))
coh_2Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2Z,
daynoise.power.c22,
daynoise.power.cZZ), 50))
coh_2P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2P,
daynoise.power.c22,
daynoise.power.cPP), 50))
coh_ZP_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.cZP,
daynoise.power.cZZ,
daynoise.power.cPP), 50))
# Phase
try:
ph_12_all.append(
180./np.pi*utils.phase(daynoise.cross.c12))
except Exception:
ph_12_all.append(None)
try:
ph_1Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c1Z))
except Exception:
ph_1Z_all.append(None)
try:
ph_1P_all.append(
180./np.pi*utils.phase(daynoise.cross.c1P))
except Exception:
ph_1P_all.append(None)
try:
ph_2Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c2Z))
except Exception:
ph_2Z_all.append(None)
try:
ph_2P_all.append(
180./np.pi*utils.phase(daynoise.cross.c2P))
except Exception:
ph_2P_all.append(None)
try:
ph_ZP_all.append(
180./np.pi*utils.phase(daynoise.cross.cZP))
except Exception:
ph_ZP_all.append(None)
# Admittance
ad_12_all.append(utils.smooth(utils.admittance(
daynoise.cross.c12, daynoise.power.c11), 50))
ad_1Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1Z, daynoise.power.c11), 50))
ad_1P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1P, daynoise.power.c11), 50))
ad_2Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2Z, daynoise.power.c22), 50))
ad_2P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2P, daynoise.power.c22), 50))
ad_ZP_all.append(utils.smooth(utils.admittance(
daynoise.cross.cZP, daynoise.power.cZZ), 50))
t1 += 3600.*24.
# Convert to numpy arrays
coh_all = np.array(coh_all)
ph_all = np.array(ph_all)
coh_12_all = np.array(coh_12_all)
coh_1Z_all = np.array(coh_1Z_all)
coh_1P_all = np.array(coh_1P_all)
coh_2Z_all = np.array(coh_2Z_all)
coh_2P_all = np.array(coh_2P_all)
coh_ZP_all = np.array(coh_ZP_all)
ph_12_all = np.array(ph_12_all)
ph_1Z_all = np.array(ph_1Z_all)
ph_1P_all = np.array(ph_1P_all)
ph_2Z_all = np.array(ph_2Z_all)
ph_2P_all = np.array(ph_2P_all)
ph_ZP_all = np.array(ph_ZP_all)
ad_12_all = np.array(ad_12_all)
ad_1Z_all = np.array(ad_1Z_all)
ad_1P_all = np.array(ad_1P_all)
ad_2Z_all = np.array(ad_2Z_all)
ad_2P_all = np.array(ad_2P_all)
ad_ZP_all = np.array(ad_ZP_all)
# Store transfer functions as objects for plotting
coh = Cross(coh_12_all, coh_1Z_all, coh_1P_all,
coh_2Z_all, coh_2P_all, coh_ZP_all)
ph = Cross(ph_12_all, ph_1Z_all, ph_1P_all,
ph_2Z_all, ph_2P_all, ph_ZP_all)
ad = Cross(ad_12_all, ad_1Z_all, ad_1P_all,
ad_2Z_all, ad_2P_all, ad_ZP_all)
# Quality control to identify outliers
stanoise.QC_sta_spectra(pd=args.pd, tol=args.tol, alpha=args.alpha,
fig_QC=args.fig_QC, debug=args.debug,
save=plotpath, form=args.form)
# Average spectra for good days
stanoise.average_sta_spectra(
fig_average=args.fig_average,
save=plotpath, form=args.form)
if args.fig_av_cross:
fname = stkey + '.' + 'av_coherence'
plot = plotting.fig_av_cross(
stanoise.f, coh, stanoise.gooddays,
'Coherence', stanoise.ncomp, key=stkey, lw=0.5)
# if plotpath.is_dir():
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_admittance'
plot = plotting.fig_av_cross(
stanoise.f, ad, stanoise.gooddays,
'Admittance', stanoise.ncomp, key=stkey, lw=0.5)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_phase'
plot = plotting.fig_av_cross(
stanoise.f, ph, stanoise.gooddays,
'Phase', stanoise.ncomp, key=stkey, marker=',', lw=0)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
if args.fig_coh_ph and stanoise.direc is not None:
fname = stkey + '.' + 'coh_ph'
plot = plotting.fig_coh_ph(coh_all, ph_all, stanoise.direc)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
# Save to file
stanoise.save(fileavst)
if __name__ == "__main__":
# Run main program
main()
| 34.28109 | 79 | 0.533718 |
import numpy as np
import pickle
import stdb
from obstools.atacr import StaNoise, Power, Cross, Rotation
from obstools.atacr import utils, plotting
from pathlib import Path
from argparse import ArgumentParser
from os.path import exists as exist
from obspy import UTCDateTime
from numpy import nan
def get_cleanspec_arguments(argv=None):
parser = ArgumentParser(
usage="%(prog)s [options] <indb>",
description="Script used "
"to extract daily spectra calculated from " +
"`obs_daily_spectra.py` and flag days for outlier " +
"PSDs and calculate spectral averages of the " +
"corresponding Fourier transforms over the entire " +
"time period specified. The stations are processed " +
"one by one and the data are stored to disk.")
parser.add_argument(
"indb",
help="Station Database to process from.",
type=str)
parser.add_argument(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station " +
"keys for which to perform the analysis. These must " +
"be contained within the station database. Partial " +
"keys will be used to match against those in the " +
"dictionary. For instance, providing IU will match " +
"with all stations in the IU network. " +
"[Default processes all stations in the database]")
parser.add_argument(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing data. " +
"[Default False]")
DaysGroup = parser.add_argument_group(
title="Time Search Settings",
description="Time settings associated with " +
"searching for day-long seismograms")
DaysGroup.add_argument(
"--start",
action="store",
type=str,
dest="startT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start day for the data search. " +
"This will override any station start times. " +
"[Default start date of each station in database]")
DaysGroup.add_argument(
"--end",
action="store",
type=str,
dest="endT",
default="",
help="Specify a UTCDateTime compatible string " +
"representing the start time for the data search. " +
"This will override any station end times. " +
"[Default end date of each station in database]")
ConstGroup = parser.add_argument_group(
title='Parameter Settings',
description="Miscellaneous default values " +
"and settings")
ConstGroup.add_argument(
"--freq-band",
action="store",
type=str,
dest="pd",
default=None,
help="Specify comma-separated frequency limits " +
"(float, in Hz) over which to calculate spectral " +
"features used in flagging the days/windows. " +
"[Default 0.004,2.0]")
ConstGroup.add_argument(
"--tolerance",
action="store",
type=float,
dest="tol",
default=1.5,
help="Specify parameter for tolerance threshold. " +
"If spectrum > std*tol, window is flagged as bad. " +
"[Default 1.5]")
ConstGroup.add_argument(
"--alpha",
action="store",
type=float,
dest="alpha",
default=0.05,
help="Confidence level for f-test, for iterative " +
"flagging of windows. [Default 0.05, or 95 percent confidence]")
FigureGroup = parser.add_argument_group(
title='Figure Settings',
description="Flags for plotting figures")
FigureGroup.add_argument(
"--figQC",
action="store_true",
dest="fig_QC",
default=False,
help="Plot Quality-Control figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Plot intermediate steps for debugging. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figAverage",
action="store_true",
dest="fig_average",
default=False,
help="Plot daily average figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCoh",
action="store_true",
dest="fig_coh_ph",
default=False,
help="Plot Coherence and Phase figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--figCross",
action="store_true",
dest="fig_av_cross",
default=False,
help="Plot cross-spectra figure. " +
"[Default does not plot figure]")
FigureGroup.add_argument(
"--save-fig",
action="store_true",
dest="saveplot",
default=False,
help="Set this option if you wish to save the figure(s). [Default " +
"does not save figure]")
FigureGroup.add_argument(
"--format",
action="store",
type=str,
dest="form",
default="png",
help="Specify format of figure. Can be any one of the valid" +
"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']")
args = parser.parse_args(argv)
if not exist(args.indb):
parser.error("Input file " + args.indb + " does not exist")
if len(args.stkeys) > 0:
args.stkeys = args.stkeys.split(',')
if len(args.startT) > 0:
try:
args.startT = UTCDateTime(args.startT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from start time: " +
args.startT)
else:
args.startT = None
if len(args.endT) > 0:
try:
args.endT = UTCDateTime(args.endT)
except Exception:
parser.error(
"Error: Cannot construct UTCDateTime from end time: " +
args.endT)
else:
args.endT = None
if args.pd is None:
args.pd = [0.004, 2.0]
else:
args.pd = [float(val) for val in args.pd.split(',')]
args.pd = sorted(args.pd)
if (len(args.pd)) != 2:
raise(Exception(
"Error: --freq-band should contain 2 " +
"comma-separated floats"))
return args
def main(args=None):
if args is None:
args = get_cleanspec_arguments()
try:
db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)
except Exception:
db = stdb.io.load_db(fname=args.indb)
allkeys = db.keys()
sorted(allkeys)
if len(args.stkeys) > 0:
stkeys = []
for skey in args.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
sorted(stkeys)
for stkey in list(stkeys):
sta = db[stkey]
specpath = Path('SPECTRA') / stkey
if not specpath.is_dir():
raise(Exception(
"Path to " + str(specpath) +
" doesn`t exist - aborting"))
avstpath = Path('AVG_STA') / stkey
if not avstpath.is_dir():
print("Path to "+str(avstpath)+" doesn`t exist - creating it")
avstpath.mkdir(parents=True)
if args.saveplot:
plotpath = avstpath / 'PLOTS'
if not plotpath.is_dir():
plotpath.mkdir(parents=True)
else:
plotpath = False
if args.startT is None:
tstart = sta.startdate
else:
tstart = args.startT
if args.endT is None:
tend = sta.enddate
else:
tend = args.endT
if tstart > sta.enddate or tend < sta.startdate:
continue
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
print("\n|===============================================|")
print("|===============================================|")
print("| {0:>8s} |".format(
sta.station))
print("|===============================================|")
print("|===============================================|")
print("| Station: {0:>2s}.{1:5s} |".format(
sta.network, sta.station))
print("| Channel: {0:2s}; Locations: {1:15s} |".format(
sta.channel, ",".join(tlocs)))
print("| Lon: {0:7.2f}; Lat: {1:6.2f} |".format(
sta.longitude, sta.latitude))
print("| Start time: {0:19s} |".format(
sta.startdate.strftime("%Y-%m-%d %H:%M:%S")))
print("| End time: {0:19s} |".format(
sta.enddate.strftime("%Y-%m-%d %H:%M:%S")))
print("|-----------------------------------------------|")
dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'
dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'
fileavst = avstpath / (dstart+dend+'avg_sta.pkl')
if fileavst.exists():
if not args.ovr:
print("* -> file "+str(fileavst)+" exists - continuing")
continue
coh_all = []
ph_all = []
coh_12_all = []
coh_1Z_all = []
coh_1P_all = []
coh_2Z_all = []
coh_2P_all = []
coh_ZP_all = []
ph_12_all = []
ph_1Z_all = []
ph_1P_all = []
ph_2Z_all = []
ph_2P_all = []
ph_ZP_all = []
ad_12_all = []
ad_1Z_all = []
ad_1P_all = []
ad_2Z_all = []
ad_2P_all = []
ad_ZP_all = []
nwins = []
t1 = tstart
stanoise = StaNoise()
while t1 < tend:
year = str(t1.year).zfill(4)
jday = str(t1.julday).zfill(3)
tstamp = year+'.'+jday+'.'
filespec = specpath / (tstamp + 'spectra.pkl')
if filespec.exists():
print("\n"+"*"*60)
print('* Calculating noise spectra for key ' +
stkey+' and day '+year+'.'+jday)
print("* -> file "+str(filespec)+" found - loading")
file = open(filespec, 'rb')
daynoise = pickle.load(file)
file.close()
stanoise += daynoise
else:
t1 += 3600.*24.
continue
coh_all.append(daynoise.rotation.coh)
ph_all.append(daynoise.rotation.ph)
coh_12_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c12,
daynoise.power.c11,
daynoise.power.c22), 50))
coh_1Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1Z,
daynoise.power.c11,
daynoise.power.cZZ), 50))
coh_1P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c1P,
daynoise.power.c11,
daynoise.power.cPP), 50))
coh_2Z_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2Z,
daynoise.power.c22,
daynoise.power.cZZ), 50))
coh_2P_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.c2P,
daynoise.power.c22,
daynoise.power.cPP), 50))
coh_ZP_all.append(
utils.smooth(
utils.coherence(
daynoise.cross.cZP,
daynoise.power.cZZ,
daynoise.power.cPP), 50))
try:
ph_12_all.append(
180./np.pi*utils.phase(daynoise.cross.c12))
except Exception:
ph_12_all.append(None)
try:
ph_1Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c1Z))
except Exception:
ph_1Z_all.append(None)
try:
ph_1P_all.append(
180./np.pi*utils.phase(daynoise.cross.c1P))
except Exception:
ph_1P_all.append(None)
try:
ph_2Z_all.append(
180./np.pi*utils.phase(daynoise.cross.c2Z))
except Exception:
ph_2Z_all.append(None)
try:
ph_2P_all.append(
180./np.pi*utils.phase(daynoise.cross.c2P))
except Exception:
ph_2P_all.append(None)
try:
ph_ZP_all.append(
180./np.pi*utils.phase(daynoise.cross.cZP))
except Exception:
ph_ZP_all.append(None)
ad_12_all.append(utils.smooth(utils.admittance(
daynoise.cross.c12, daynoise.power.c11), 50))
ad_1Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1Z, daynoise.power.c11), 50))
ad_1P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c1P, daynoise.power.c11), 50))
ad_2Z_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2Z, daynoise.power.c22), 50))
ad_2P_all.append(utils.smooth(utils.admittance(
daynoise.cross.c2P, daynoise.power.c22), 50))
ad_ZP_all.append(utils.smooth(utils.admittance(
daynoise.cross.cZP, daynoise.power.cZZ), 50))
t1 += 3600.*24.
coh_all = np.array(coh_all)
ph_all = np.array(ph_all)
coh_12_all = np.array(coh_12_all)
coh_1Z_all = np.array(coh_1Z_all)
coh_1P_all = np.array(coh_1P_all)
coh_2Z_all = np.array(coh_2Z_all)
coh_2P_all = np.array(coh_2P_all)
coh_ZP_all = np.array(coh_ZP_all)
ph_12_all = np.array(ph_12_all)
ph_1Z_all = np.array(ph_1Z_all)
ph_1P_all = np.array(ph_1P_all)
ph_2Z_all = np.array(ph_2Z_all)
ph_2P_all = np.array(ph_2P_all)
ph_ZP_all = np.array(ph_ZP_all)
ad_12_all = np.array(ad_12_all)
ad_1Z_all = np.array(ad_1Z_all)
ad_1P_all = np.array(ad_1P_all)
ad_2Z_all = np.array(ad_2Z_all)
ad_2P_all = np.array(ad_2P_all)
ad_ZP_all = np.array(ad_ZP_all)
coh = Cross(coh_12_all, coh_1Z_all, coh_1P_all,
coh_2Z_all, coh_2P_all, coh_ZP_all)
ph = Cross(ph_12_all, ph_1Z_all, ph_1P_all,
ph_2Z_all, ph_2P_all, ph_ZP_all)
ad = Cross(ad_12_all, ad_1Z_all, ad_1P_all,
ad_2Z_all, ad_2P_all, ad_ZP_all)
stanoise.QC_sta_spectra(pd=args.pd, tol=args.tol, alpha=args.alpha,
fig_QC=args.fig_QC, debug=args.debug,
save=plotpath, form=args.form)
stanoise.average_sta_spectra(
fig_average=args.fig_average,
save=plotpath, form=args.form)
if args.fig_av_cross:
fname = stkey + '.' + 'av_coherence'
plot = plotting.fig_av_cross(
stanoise.f, coh, stanoise.gooddays,
'Coherence', stanoise.ncomp, key=stkey, lw=0.5)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_admittance'
plot = plotting.fig_av_cross(
stanoise.f, ad, stanoise.gooddays,
'Admittance', stanoise.ncomp, key=stkey, lw=0.5)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
fname = stkey + '.' + 'av_phase'
plot = plotting.fig_av_cross(
stanoise.f, ph, stanoise.gooddays,
'Phase', stanoise.ncomp, key=stkey, marker=',', lw=0)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
if args.fig_coh_ph and stanoise.direc is not None:
fname = stkey + '.' + 'coh_ph'
plot = plotting.fig_coh_ph(coh_all, ph_all, stanoise.direc)
if plotpath:
plot.savefig(
str(plotpath / (fname + '.' + args.form)),
dpi=300, bbox_inches='tight', format=args.form)
else:
plot.show()
stanoise.save(fileavst)
if __name__ == "__main__":
main()
| true | true |
f7198466f423c197e1cd92a6791f6a97eeca93b9 | 2,362 | py | Python | tests/demos/test_demos.py | Nicolinho/RLBench | 3014e872f518d5439e73e057e2251dee1f9df481 | [
"BSD-3-Clause"
] | 619 | 2019-09-26T23:15:57.000Z | 2022-03-15T23:46:48.000Z | tests/demos/test_demos.py | Nicolinho/RLBench | 3014e872f518d5439e73e057e2251dee1f9df481 | [
"BSD-3-Clause"
] | 147 | 2019-09-27T02:22:45.000Z | 2022-03-30T08:37:43.000Z | tests/demos/test_demos.py | Nicolinho/RLBench | 3014e872f518d5439e73e057e2251dee1f9df481 | [
"BSD-3-Clause"
] | 142 | 2019-09-27T03:43:12.000Z | 2022-03-13T19:00:18.000Z | import unittest
import rlbench.backend.task as task
import os
from rlbench.backend.utils import task_file_to_task_class
from pyrep import PyRep
from pyrep.robots.arms.panda import Panda
from pyrep.robots.end_effectors.panda_gripper import PandaGripper
from rlbench.backend.const import TTT_FILE
from tools.task_validator import task_smoke
from rlbench.observation_config import ObservationConfig
from rlbench.backend.scene import Scene
from rlbench.backend.robot import Robot
TASKS = [t for t in os.listdir(task.TASKS_PATH)
if t != '__init__.py' and t.endswith('.py')]
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
# Task does work, but fails demos often. These should eventually be improved.
FLAKY_TASKS = ['put_all_groceries_in_cupboard']
class TestTasks(unittest.TestCase):
"""Tests all of the tasks via the task_validator tool.
Given that unit tests shouldn't take forever to run, we only limit
each validation run to 1 variation. In practice, a newly created task
should be validated for all variations. Despite this, the test still takes
a while to run.
"""
def test_run_task_validator(self):
for task_file in TASKS:
test_name = task_file.split('.py')[0]
with self.subTest(task=test_name):
if test_name in FLAKY_TASKS:
self.skipTest('Flaky task.')
sim = PyRep()
ttt_file = os.path.join(
DIR_PATH, '..', '..', 'rlbench', TTT_FILE)
sim.launch(ttt_file, headless=True)
sim.step_ui()
sim.set_simulation_timestep(50.0)
sim.step_ui()
sim.start()
robot = Robot(Panda(), PandaGripper())
obs = ObservationConfig()
obs.set_all(False)
scene = Scene(sim, robot, obs)
sim.start()
task_class = task_file_to_task_class(task_file)
active_task = task_class(sim, robot)
try:
task_smoke(active_task, scene, variation=-1,
max_variations=2, success=0.25)
except Exception as e:
sim.stop()
sim.shutdown()
raise e
sim.stop()
sim.shutdown()
| 38.096774 | 78 | 0.610076 | import unittest
import rlbench.backend.task as task
import os
from rlbench.backend.utils import task_file_to_task_class
from pyrep import PyRep
from pyrep.robots.arms.panda import Panda
from pyrep.robots.end_effectors.panda_gripper import PandaGripper
from rlbench.backend.const import TTT_FILE
from tools.task_validator import task_smoke
from rlbench.observation_config import ObservationConfig
from rlbench.backend.scene import Scene
from rlbench.backend.robot import Robot
TASKS = [t for t in os.listdir(task.TASKS_PATH)
if t != '__init__.py' and t.endswith('.py')]
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
FLAKY_TASKS = ['put_all_groceries_in_cupboard']
class TestTasks(unittest.TestCase):
def test_run_task_validator(self):
for task_file in TASKS:
test_name = task_file.split('.py')[0]
with self.subTest(task=test_name):
if test_name in FLAKY_TASKS:
self.skipTest('Flaky task.')
sim = PyRep()
ttt_file = os.path.join(
DIR_PATH, '..', '..', 'rlbench', TTT_FILE)
sim.launch(ttt_file, headless=True)
sim.step_ui()
sim.set_simulation_timestep(50.0)
sim.step_ui()
sim.start()
robot = Robot(Panda(), PandaGripper())
obs = ObservationConfig()
obs.set_all(False)
scene = Scene(sim, robot, obs)
sim.start()
task_class = task_file_to_task_class(task_file)
active_task = task_class(sim, robot)
try:
task_smoke(active_task, scene, variation=-1,
max_variations=2, success=0.25)
except Exception as e:
sim.stop()
sim.shutdown()
raise e
sim.stop()
sim.shutdown()
| true | true |
f71986b928e02b3c1c5322f3668bc41a49a8abc1 | 7,013 | py | Python | GHC2018/process.py | purrcat259/n-n-hashcode | 98a1c443e6112903bc29a858bc18476a6635d460 | [
"MIT"
] | null | null | null | GHC2018/process.py | purrcat259/n-n-hashcode | 98a1c443e6112903bc29a858bc18476a6635d460 | [
"MIT"
] | null | null | null | GHC2018/process.py | purrcat259/n-n-hashcode | 98a1c443e6112903bc29a858bc18476a6635d460 | [
"MIT"
] | null | null | null | from GHC2018.input import Input
from GHC2018.models.Car import Car
from GHC2018.models.Route import Route
from GHC2018.models.ride import calculate_distance
from tqdm import tqdm
class Process:
def __init__(self, input_data, debug=True):
self.input_data = input_data
self.debug = debug
self.current_time = 0
# self.get_routes()
def initialise_cars(self):
cars = []
for i in range(0, self.input_data.vehicle_count):
car = Car(i, 0, 0)
cars.append(car)
self.cars = cars
def debug_print(self, message):
if self.debug:
print(message)
def run(self):
self.initialise_cars()
self.rides = self.input_data.rides
sim_range = range(0, self.input_data.sim_steps)
if not self.debug:
sim_range = tqdm(sim_range)
for i in sim_range:
self.debug_print('--- STEP {}/{} ---'.format(i, self.input_data.sim_steps))
self.current_time = i
# if cars are at their destination, end the ride
self.end_rides()
# schedule any cars that are not assigned a ride
self.schedule_rides()
# move any cars
self.move_cars()
self.debug_print('SIMULATION ENDED')
print('{} rides completed. {} rides left unfinished.'.format(
len(self.get_completed_rides()),
len(self.rides) - len(self.get_completed_rides()))
)
self.output_file()
def output_file(self):
output_file_path = self.input_data.file_path.replace('.in', '.out')
car_rides = {}
for ride in self.get_completed_rides():
if ride.assigned_car in car_rides.keys():
car_rides[ride.assigned_car].append(ride.ride_id)
else:
car_rides[ride.assigned_car] = [ride.ride_id]
with open(output_file_path, 'w') as output_file:
for car, rides in car_rides.items():
output_string = str(len(rides))
for ride_id in rides:
output_string += ' {}'.format(ride_id)
output_file.write(output_string + '\n')
def end_rides(self):
self.debug_print('Checking if cars have arrived')
completed_cars = [
car for car in self.get_assigned_cars() if car.is_at_destination()
]
self.debug_print('{} cars completed their ride this turn'.format(len(completed_cars)))
for car in completed_cars:
self.debug_print('Car {} has completed their ride'.format(car.car_id))
car.complete_ride()
if car.assigned_route_completed():
car.complete_route()
self.debug_print('{}/{} rides completed'.format(
len(self.get_completed_rides()),
len(self.rides)
))
def get_completed_rides(self):
return [ride for ride in self.input_data.rides if ride.completed]
def schedule_rides(self):
unassigned_cars = self.get_unassigned_cars()
self.debug_print('Scheduling {} cars'.format(len(unassigned_cars)))
unassigned_rides = self.get_unassigned_rides()
if len(unassigned_rides) == 0:
return
for car in unassigned_cars:
# next_ride = unassigned_rides.pop(0)
unassigned_rides = self.get_unassigned_rides()
next_ride = self.get_next_ride(car, unassigned_rides, self.current_time)
rides_for_route = [next_ride]
route = Route(rides_for_route)
self.debug_print('Assigned route with ride IDs {} to car: {}'.format(
route.get_route_ride_ids(),
car.car_id
))
car.assign_route(route)
def get_closest_ride_to_car(self, car, rides):
closest_ride = rides[0]
closest_distance = calculate_distance(car.row, closest_ride.row_start, car.col, closest_ride.col_start)
for i in range(1, len(rides)):
ride = rides[i]
next_closest_distance = calculate_distance(car.row, ride.row_start, car.col, ride.col_start)
if next_closest_distance < closest_distance:
closest_ride = ride
closest_distance = next_closest_distance
return rides.pop(rides.index(closest_ride))
def move_cars(self):
for car in self.get_assigned_cars():
self.debug_print('Moving car with ID: {}'.format(car.car_id))
car.move_towards_destination()
def get_assigned_cars(self):
return [car for car in self.cars if car.assigned_route is not None]
def get_unassigned_cars(self):
return [car for car in self.cars if car.assigned_route is None]
def get_unassigned_rides(self):
return [ride for ride in self.input_data.rides if ride.assigned_car is None]
def set_next_routes(self, route, routes):
for t_route in routes:
if not t_route is route:
wait = t_route.ordered_rides[0].earliest_start -route.ordered_rides[-1].latest_finish
if wait >= 0:
route.next_routes.append({'route':t_route, 'wait_time': wait})
def add_to_route(self, ride, next_ride, routes):
for route in routes:
start_ride = route.ordered_rides[0]
end_ride = route.ordered_rides[-1]
if start_ride is next_ride:
route.ordered_rides.insert(0, ride)
return routes
elif end_ride is ride:
route.ordered_rides.insert(-1, next_ride)
return routes
routes.append(Route([ride, next_ride]))
return routes
def get_next_ride(self, car, rides, actual_start_time):
# unassigned_rides = deepcopy(self.get_unassigned_rides())
best_ride = None
waiting = 0
for unassigned_ride in rides:
distance_to_next_ride = calculate_distance(car.row, unassigned_ride.row_start, car.col, unassigned_ride.col_start)
time_to_new_start = actual_start_time + distance_to_next_ride
if time_to_new_start + unassigned_ride.distance <= unassigned_ride.latest_finish:
temp_waiting = max(unassigned_ride.earliest_start - (time_to_new_start + unassigned_ride.distance), 0)
if best_ride is None or waiting > temp_waiting:
waiting = temp_waiting
best_ride = unassigned_ride
if waiting == 0:
return best_ride
return best_ride
if __name__ == '__main__':
file_names = [
'a_example.in',
'b_should_be_easy.in',
'c_no_hurry.in',
'd_metropolis.in',
'e_high_bonus.in'
]
for file_name in file_names:
print('Running: {}\n'.format(file_name))
input_parser = Input(file_name)
input_parser.read_file()
p = Process(input_data=input_parser, debug=False)
p.run()
| 39.178771 | 126 | 0.610866 | from GHC2018.input import Input
from GHC2018.models.Car import Car
from GHC2018.models.Route import Route
from GHC2018.models.ride import calculate_distance
from tqdm import tqdm
class Process:
def __init__(self, input_data, debug=True):
self.input_data = input_data
self.debug = debug
self.current_time = 0
def initialise_cars(self):
cars = []
for i in range(0, self.input_data.vehicle_count):
car = Car(i, 0, 0)
cars.append(car)
self.cars = cars
def debug_print(self, message):
if self.debug:
print(message)
def run(self):
self.initialise_cars()
self.rides = self.input_data.rides
sim_range = range(0, self.input_data.sim_steps)
if not self.debug:
sim_range = tqdm(sim_range)
for i in sim_range:
self.debug_print('--- STEP {}/{} ---'.format(i, self.input_data.sim_steps))
self.current_time = i
self.end_rides()
self.schedule_rides()
self.move_cars()
self.debug_print('SIMULATION ENDED')
print('{} rides completed. {} rides left unfinished.'.format(
len(self.get_completed_rides()),
len(self.rides) - len(self.get_completed_rides()))
)
self.output_file()
def output_file(self):
output_file_path = self.input_data.file_path.replace('.in', '.out')
car_rides = {}
for ride in self.get_completed_rides():
if ride.assigned_car in car_rides.keys():
car_rides[ride.assigned_car].append(ride.ride_id)
else:
car_rides[ride.assigned_car] = [ride.ride_id]
with open(output_file_path, 'w') as output_file:
for car, rides in car_rides.items():
output_string = str(len(rides))
for ride_id in rides:
output_string += ' {}'.format(ride_id)
output_file.write(output_string + '\n')
def end_rides(self):
self.debug_print('Checking if cars have arrived')
completed_cars = [
car for car in self.get_assigned_cars() if car.is_at_destination()
]
self.debug_print('{} cars completed their ride this turn'.format(len(completed_cars)))
for car in completed_cars:
self.debug_print('Car {} has completed their ride'.format(car.car_id))
car.complete_ride()
if car.assigned_route_completed():
car.complete_route()
self.debug_print('{}/{} rides completed'.format(
len(self.get_completed_rides()),
len(self.rides)
))
def get_completed_rides(self):
return [ride for ride in self.input_data.rides if ride.completed]
def schedule_rides(self):
unassigned_cars = self.get_unassigned_cars()
self.debug_print('Scheduling {} cars'.format(len(unassigned_cars)))
unassigned_rides = self.get_unassigned_rides()
if len(unassigned_rides) == 0:
return
for car in unassigned_cars:
unassigned_rides = self.get_unassigned_rides()
next_ride = self.get_next_ride(car, unassigned_rides, self.current_time)
rides_for_route = [next_ride]
route = Route(rides_for_route)
self.debug_print('Assigned route with ride IDs {} to car: {}'.format(
route.get_route_ride_ids(),
car.car_id
))
car.assign_route(route)
def get_closest_ride_to_car(self, car, rides):
closest_ride = rides[0]
closest_distance = calculate_distance(car.row, closest_ride.row_start, car.col, closest_ride.col_start)
for i in range(1, len(rides)):
ride = rides[i]
next_closest_distance = calculate_distance(car.row, ride.row_start, car.col, ride.col_start)
if next_closest_distance < closest_distance:
closest_ride = ride
closest_distance = next_closest_distance
return rides.pop(rides.index(closest_ride))
def move_cars(self):
for car in self.get_assigned_cars():
self.debug_print('Moving car with ID: {}'.format(car.car_id))
car.move_towards_destination()
def get_assigned_cars(self):
return [car for car in self.cars if car.assigned_route is not None]
def get_unassigned_cars(self):
return [car for car in self.cars if car.assigned_route is None]
def get_unassigned_rides(self):
return [ride for ride in self.input_data.rides if ride.assigned_car is None]
def set_next_routes(self, route, routes):
for t_route in routes:
if not t_route is route:
wait = t_route.ordered_rides[0].earliest_start -route.ordered_rides[-1].latest_finish
if wait >= 0:
route.next_routes.append({'route':t_route, 'wait_time': wait})
def add_to_route(self, ride, next_ride, routes):
for route in routes:
start_ride = route.ordered_rides[0]
end_ride = route.ordered_rides[-1]
if start_ride is next_ride:
route.ordered_rides.insert(0, ride)
return routes
elif end_ride is ride:
route.ordered_rides.insert(-1, next_ride)
return routes
routes.append(Route([ride, next_ride]))
return routes
def get_next_ride(self, car, rides, actual_start_time):
best_ride = None
waiting = 0
for unassigned_ride in rides:
distance_to_next_ride = calculate_distance(car.row, unassigned_ride.row_start, car.col, unassigned_ride.col_start)
time_to_new_start = actual_start_time + distance_to_next_ride
if time_to_new_start + unassigned_ride.distance <= unassigned_ride.latest_finish:
temp_waiting = max(unassigned_ride.earliest_start - (time_to_new_start + unassigned_ride.distance), 0)
if best_ride is None or waiting > temp_waiting:
waiting = temp_waiting
best_ride = unassigned_ride
if waiting == 0:
return best_ride
return best_ride
if __name__ == '__main__':
file_names = [
'a_example.in',
'b_should_be_easy.in',
'c_no_hurry.in',
'd_metropolis.in',
'e_high_bonus.in'
]
for file_name in file_names:
print('Running: {}\n'.format(file_name))
input_parser = Input(file_name)
input_parser.read_file()
p = Process(input_data=input_parser, debug=False)
p.run()
| true | true |
f719878d7cf2f176cf391bedf04e4b2cfa47cc02 | 1,701 | py | Python | app/core/migrations/0001_initial.py | SirEric-A/recipe-app-api | 05a767fcb87f2ca47918698930d10f6e21654576 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | SirEric-A/recipe-app-api | 05a767fcb87f2ca47918698930d10f6e21654576 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | SirEric-A/recipe-app-api | 05a767fcb87f2ca47918698930d10f6e21654576 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-18 21:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
f71987f0e511820af63a6cf60ad703869664ef65 | 4,832 | py | Python | .ycm_extra_conf.py | bigt1234/objectpool | dab515f71c12f8df22686053043f7e2c4c929354 | [
"Zlib"
] | 66 | 2016-11-07T01:00:46.000Z | 2022-03-13T01:25:54.000Z | .ycm_extra_conf.py | bigt1234/objectpool | dab515f71c12f8df22686053043f7e2c4c929354 | [
"Zlib"
] | 1 | 2020-11-26T12:08:53.000Z | 2021-09-24T01:06:49.000Z | .ycm_extra_conf.py | bigt1234/objectpool | dab515f71c12f8df22686053043f7e2c4c929354 | [
"Zlib"
] | 19 | 2016-07-18T07:58:11.000Z | 2022-03-13T01:24:07.000Z | #!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I', 'src',
'-I', 'thirdparty/nonius',
'-I', 'thirdparty/Catch',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in ['.h', '.hxx', '.hpp', '.hh', '.h++']
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 32.648649 | 79 | 0.708609 |
import os
import ycm_core
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I', 'src',
'-I', 'thirdparty/nonius',
'-I', 'thirdparty/Catch',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in ['.h', '.hxx', '.hpp', '.hh', '.h++']
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| true | true |
f71988e1a677b3eb305af40560a0785370f713df | 14,327 | py | Python | oqupy/backends/tempo_backend.py | tempoCollaboration/OQuPy | a389a161991a59259e5df47d8e0f405fcac75fe5 | [
"Apache-2.0"
] | 13 | 2022-02-15T12:33:17.000Z | 2022-03-31T10:01:57.000Z | oqupy/backends/tempo_backend.py | tempoCollaboration/OQuPy | a389a161991a59259e5df47d8e0f405fcac75fe5 | [
"Apache-2.0"
] | 11 | 2022-02-16T07:35:46.000Z | 2022-03-24T18:22:12.000Z | oqupy/backends/tempo_backend.py | tempoCollaboration/OQuPy | a389a161991a59259e5df47d8e0f405fcac75fe5 | [
"Apache-2.0"
] | 2 | 2022-02-17T01:23:55.000Z | 2022-02-17T08:51:57.000Z | # Copyright 2020 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for tempo and mean-field tempo backend.
"""
from typing import Callable, Dict, Optional, Tuple
from copy import copy
from numpy import ndarray, moveaxis, dot
from oqupy import operators
from oqupy.config import TEMPO_BACKEND_CONFIG
from oqupy.backends import node_array as na
from oqupy.util import create_delta
class BaseTempoBackend:
"""
Backend class for TEMPO.
Parameters
----------
initial_state: ndarray
The initial density matrix (as a vector).
influence: callable(int) -> ndarray
Callable that takes an integer `step` and returns the influence super
operator of that `step`.
unitary_transform: ndarray
Unitary that transforms the coupling operator into a diagonal form.
sum_north: ndarray
The summing vector for the north legs.
sum_west: ndarray
The summing vector for the west legs.
dkmax: int
Number of influences to include. If ``dkmax == None`` then all
influences are included.
epsrel: float
Maximal relative SVD truncation error.
"""
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
"""Create a TempoBackend object. """
self._initial_state = initial_state
self._influence = influence
self._unitary_transform = unitary_transform
self._sum_north = sum_north
self._sum_west = sum_west
self._dkmax = dkmax
self._epsrel = epsrel
self._step = None
self._state = None
self._config = TEMPO_BACKEND_CONFIG if config is None else config
self._mps = None
self._mpo = None
self._super_u = None
self._super_u_dagg = None
self._sum_north_na = None
@property
def step(self) -> int:
"""The current step in the TEMPO computation. """
return self._step
def _initialize_mps_mpo(self) :
"""ToDo"""
self._initial_state = copy(self._initial_state).reshape(-1)
self._super_u = operators.left_right_super(
self._unitary_transform,
self._unitary_transform.conjugate().T)
self._super_u_dagg = operators.left_right_super(
self._unitary_transform.conjugate().T,
self._unitary_transform)
self._sum_north_na = na.NodeArray([self._sum_north],
left=False,
right=False,
name="Sum north")
influences = []
if self._dkmax is None:
dkmax_pre_compute = 1
else:
dkmax_pre_compute = self._dkmax + 1
for i in range(dkmax_pre_compute):
infl = self._influence(i)
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
if i == 0:
tmp = dot(moveaxis(infl_four_legs, 1, -1),
self._super_u_dagg)
tmp = moveaxis(tmp, -1, 1)
tmp = dot(tmp, self._super_u.T)
infl_four_legs = tmp
influences.append(infl_four_legs)
self._mps = na.NodeArray([self._initial_state],
left=False,
right=False,
name="Thee MPS")
self._mpo = na.NodeArray(list(reversed(influences)),
left=True,
right=True,
name="Thee Time Evolving MPO")
def _compute_system_step(self, current_step, prop_1, prop_2) -> ndarray:
"""
Takes a step in the TEMPO tensor network computation.
For example, for at step 4, we start with:
A ... self._mps
B ... self._mpo
w ... self._sum_west
n ... self._sum_north_array
p1 ... prop_1
p2 ... prop_2
n n n n
| | | |
| | | | |
w~~ ~~B~~B~~B~~B~~ ~~p2
| | | |
p1
| | | |
A~~A~~A~~A
return:
step = 4
state = contraction of A,B,w,n,p1
effects:
self._mpo will grow to the left with the next influence functional
self._mps will be contraction of A,B,w,p1,p2
Returns
-------
step: int
The current step count.
state: ndarray
Density matrix at the current step.
"""
prop_1_na = na.NodeArray([prop_1.T],
left=False,
right=False,
name="first half-step")
prop_2_na = na.NodeArray([prop_2.T],
left=True,
right=False,
name="second half-step")
if self._dkmax is None:
mpo = self._mpo.copy()
infl = self._influence(len(mpo))
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
self._mpo = na.join(infl_na,
self._mpo,
name="The Time Evolving MPO",
copy=False)
elif current_step <= self._dkmax:
_, mpo = na.split(self._mpo,
int(0 - current_step),
copy=True)
else: # current_step > self._dkmax
mpo = self._mpo.copy()
infl = self._influence(self._dkmax-current_step)
if infl is not None:
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
_, mpo = na.split(self._mpo,
index=1,
copy=True)
mpo = na.join(infl_na,
mpo,
name="Thee Time Evolving MPO",
copy=False)
mpo.name = "temporary MPO"
mpo.apply_vector(self._sum_west, left=True)
self._mps.zip_up(prop_1_na,
axes=[(0,0)],
left_index=-1,
right_index=-1,
direction="left",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
if len(self._mps) != len(mpo):
self._mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
self._mps.zip_up(mpo,
axes=[(0, 0)],
left_index=0,
right_index=-1,
direction="right",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
self._mps.svd_sweep(from_index=-1,
to_index=0,
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True)
self._mps = na.join(self._mps,
prop_2_na,
copy=False,
name=f"The MPS ({current_step})")
tmp_mps = self._mps.copy()
for _ in range(len(tmp_mps)-1):
tmp_mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
assert len(tmp_mps) == 1
assert not tmp_mps.left
assert not tmp_mps.right
assert tmp_mps.rank == 1
state = tmp_mps.nodes[0].get_tensor()
return state
class TempoBackend(BaseTempoBackend):
"""
ToDo
"""
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int], Tuple[ndarray, ndarray]],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
"""Create a TempoBackend object. """
super().__init__(
initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
self._propagators = propagators
def initialize(self)-> Tuple[int, ndarray]:
"""
ToDo
"""
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
return self._step, copy(self._state)
def compute_step(self) -> Tuple[int, ndarray]:
"""
ToDo
"""
self._step += 1
prop_1, prop_2 = self._propagators(self._step-1)
self._state = self._compute_system_step(self._step, prop_1, prop_2)
return self._step, copy(self._state)
class TempoWithFieldBackend(BaseTempoBackend):
"""
backend for tensor network tempo with coherent field evolution.
Note the only difference from TensorNetworkTempoBackend in the
signature is the addition of the initial_field and compute_field
parameters, and the change of the propagator signature.
Parameters
----------
initial_state: ndarray
The initial density matrix (as a vector).
initial_field: complex
The initial field value.
influence: callable(int) -> ndarray
Callable that takes an integer `step` and returns the influence super
operator of that `step`.
unitary_transform: ndarray
Unitary that transforms the coupling operator into a diagonal form.
propagators: callable(int, ndarray, complex) -> ndarray, ndarray
Callable that takes an integer `step`, an ndarray `state` and a complex
`field` and returns the first and second half of the system propagator
of that `step`.
compute_field: callable(int, ndarray, complex, ndarray) -> complex
Callable that takes an integer `step`, a complex `field` (the current
value of the field) and two ndarrays for (respectively) the current and
next density matrix as vectors, and returns the next field value.
sum_north: ndarray
The summing vector for the north legs.
sum_west: ndarray
The summing vector for the west legs.
dkmax: int
Number of influences to include. If ``dkmax == -1`` then all influences
are included.
epsrel: float
Maximal relative SVD truncation error.
"""
def __init__(
self,
initial_state: ndarray,
initial_field: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int, ndarray, complex],
Tuple[ndarray, ndarray]],
compute_field: Callable[[float, ndarray, complex], complex],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Dict):
# Field specific variables
self._initial_field = initial_field
self._compute_field = compute_field
self._field = initial_field
self._propagators = propagators
"""Create a TempoWithFieldBackend object. """
super().__init__(initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
def initialize(self) -> Tuple[int, ndarray, complex]:
"""See BaseBackend.initialize() for main docstring."""
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
self._field = self._initial_field
return self._step, copy(self._state), self._field
def compute_step(self) -> Tuple[int, ndarray, complex]:
"""
ToDo
"""
current_step = self._step
next_step = current_step + 1
current_state = copy(self._state)
current_field = self._field
prop_1, prop_2 = self._propagators(current_step, current_state,
current_field)
next_state = self._compute_system_step(next_step, prop_1, prop_2)
next_field = self._compute_field(current_step, current_state,
current_field, next_state)
self._state = next_state
self._field = next_field
self._step = next_step
return self._step, copy(self._state), self._field
| 35.8175 | 79 | 0.520346 |
from typing import Callable, Dict, Optional, Tuple
from copy import copy
from numpy import ndarray, moveaxis, dot
from oqupy import operators
from oqupy.config import TEMPO_BACKEND_CONFIG
from oqupy.backends import node_array as na
from oqupy.util import create_delta
class BaseTempoBackend:
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
self._initial_state = initial_state
self._influence = influence
self._unitary_transform = unitary_transform
self._sum_north = sum_north
self._sum_west = sum_west
self._dkmax = dkmax
self._epsrel = epsrel
self._step = None
self._state = None
self._config = TEMPO_BACKEND_CONFIG if config is None else config
self._mps = None
self._mpo = None
self._super_u = None
self._super_u_dagg = None
self._sum_north_na = None
@property
def step(self) -> int:
return self._step
def _initialize_mps_mpo(self) :
self._initial_state = copy(self._initial_state).reshape(-1)
self._super_u = operators.left_right_super(
self._unitary_transform,
self._unitary_transform.conjugate().T)
self._super_u_dagg = operators.left_right_super(
self._unitary_transform.conjugate().T,
self._unitary_transform)
self._sum_north_na = na.NodeArray([self._sum_north],
left=False,
right=False,
name="Sum north")
influences = []
if self._dkmax is None:
dkmax_pre_compute = 1
else:
dkmax_pre_compute = self._dkmax + 1
for i in range(dkmax_pre_compute):
infl = self._influence(i)
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
if i == 0:
tmp = dot(moveaxis(infl_four_legs, 1, -1),
self._super_u_dagg)
tmp = moveaxis(tmp, -1, 1)
tmp = dot(tmp, self._super_u.T)
infl_four_legs = tmp
influences.append(infl_four_legs)
self._mps = na.NodeArray([self._initial_state],
left=False,
right=False,
name="Thee MPS")
self._mpo = na.NodeArray(list(reversed(influences)),
left=True,
right=True,
name="Thee Time Evolving MPO")
def _compute_system_step(self, current_step, prop_1, prop_2) -> ndarray:
prop_1_na = na.NodeArray([prop_1.T],
left=False,
right=False,
name="first half-step")
prop_2_na = na.NodeArray([prop_2.T],
left=True,
right=False,
name="second half-step")
if self._dkmax is None:
mpo = self._mpo.copy()
infl = self._influence(len(mpo))
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
self._mpo = na.join(infl_na,
self._mpo,
name="The Time Evolving MPO",
copy=False)
elif current_step <= self._dkmax:
_, mpo = na.split(self._mpo,
int(0 - current_step),
copy=True)
else:
mpo = self._mpo.copy()
infl = self._influence(self._dkmax-current_step)
if infl is not None:
infl_four_legs = create_delta(infl, [1, 0, 0, 1])
infl_na = na.NodeArray([infl_four_legs],
left=True,
right=True)
_, mpo = na.split(self._mpo,
index=1,
copy=True)
mpo = na.join(infl_na,
mpo,
name="Thee Time Evolving MPO",
copy=False)
mpo.name = "temporary MPO"
mpo.apply_vector(self._sum_west, left=True)
self._mps.zip_up(prop_1_na,
axes=[(0,0)],
left_index=-1,
right_index=-1,
direction="left",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
if len(self._mps) != len(mpo):
self._mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
self._mps.zip_up(mpo,
axes=[(0, 0)],
left_index=0,
right_index=-1,
direction="right",
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True,
copy=False)
self._mps.svd_sweep(from_index=-1,
to_index=0,
max_singular_values=None,
max_truncation_err=self._epsrel,
relative=True)
self._mps = na.join(self._mps,
prop_2_na,
copy=False,
name=f"The MPS ({current_step})")
tmp_mps = self._mps.copy()
for _ in range(len(tmp_mps)-1):
tmp_mps.contract(self._sum_north_na,
axes=[(0,0)],
left_index=0,
right_index=0,
direction="right",
copy=True)
assert len(tmp_mps) == 1
assert not tmp_mps.left
assert not tmp_mps.right
assert tmp_mps.rank == 1
state = tmp_mps.nodes[0].get_tensor()
return state
class TempoBackend(BaseTempoBackend):
def __init__(
self,
initial_state: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int], Tuple[ndarray, ndarray]],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Optional[Dict] = None):
super().__init__(
initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
self._propagators = propagators
def initialize(self)-> Tuple[int, ndarray]:
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
return self._step, copy(self._state)
def compute_step(self) -> Tuple[int, ndarray]:
self._step += 1
prop_1, prop_2 = self._propagators(self._step-1)
self._state = self._compute_system_step(self._step, prop_1, prop_2)
return self._step, copy(self._state)
class TempoWithFieldBackend(BaseTempoBackend):
def __init__(
self,
initial_state: ndarray,
initial_field: ndarray,
influence: Callable[[int], ndarray],
unitary_transform: ndarray,
propagators: Callable[[int, ndarray, complex],
Tuple[ndarray, ndarray]],
compute_field: Callable[[float, ndarray, complex], complex],
sum_north: ndarray,
sum_west: ndarray,
dkmax: int,
epsrel: float,
config: Dict):
self._initial_field = initial_field
self._compute_field = compute_field
self._field = initial_field
self._propagators = propagators
super().__init__(initial_state,
influence,
unitary_transform,
sum_north,
sum_west,
dkmax,
epsrel,
config)
def initialize(self) -> Tuple[int, ndarray, complex]:
self._step = 0
self._initialize_mps_mpo()
self._state = self._initial_state
self._field = self._initial_field
return self._step, copy(self._state), self._field
def compute_step(self) -> Tuple[int, ndarray, complex]:
current_step = self._step
next_step = current_step + 1
current_state = copy(self._state)
current_field = self._field
prop_1, prop_2 = self._propagators(current_step, current_state,
current_field)
next_state = self._compute_system_step(next_step, prop_1, prop_2)
next_field = self._compute_field(current_step, current_state,
current_field, next_state)
self._state = next_state
self._field = next_field
self._step = next_step
return self._step, copy(self._state), self._field
| true | true |
f71988f8e6cbe49da433af143788c3ecc8e82b65 | 446 | py | Python | setup.py | Moomoo-pls/NLP_Game_of_Life | afe6bb6ccd4a83b6ffeccc8ac257872251bd39bb | [
"MIT"
] | null | null | null | setup.py | Moomoo-pls/NLP_Game_of_Life | afe6bb6ccd4a83b6ffeccc8ac257872251bd39bb | [
"MIT"
] | null | null | null | setup.py | Moomoo-pls/NLP_Game_of_Life | afe6bb6ccd4a83b6ffeccc8ac257872251bd39bb | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name="Moo_NLP_Game_of_Life",
version="1.0.0",
author="Stephen Moo-Young",
author_email="mooyoung12@gmail.com",
description="Game of Life for the take home coding challenge",
url="https://github.com/Moomoo-pls/NLP_Game_of_Life",
packages=setuptools.find_packages(),
entry_points={
'console_scripts':[
'game-of-life=Game_of_Life.main:main',
]
},
) | 27.875 | 66 | 0.663677 | import setuptools
setuptools.setup(
name="Moo_NLP_Game_of_Life",
version="1.0.0",
author="Stephen Moo-Young",
author_email="mooyoung12@gmail.com",
description="Game of Life for the take home coding challenge",
url="https://github.com/Moomoo-pls/NLP_Game_of_Life",
packages=setuptools.find_packages(),
entry_points={
'console_scripts':[
'game-of-life=Game_of_Life.main:main',
]
},
) | true | true |
f719891884a715f4ed60d4d29e0a80d1b2c17515 | 8,422 | py | Python | 2_data_collection/CIFAR_10/vgg16_CIFAR10.py | j-chan-hkust/deep_testing_of_advanced_learning_systems | ec535e2b4dc489d407b664a138d3f5262b71d21e | [
"MIT"
] | null | null | null | 2_data_collection/CIFAR_10/vgg16_CIFAR10.py | j-chan-hkust/deep_testing_of_advanced_learning_systems | ec535e2b4dc489d407b664a138d3f5262b71d21e | [
"MIT"
] | null | null | null | 2_data_collection/CIFAR_10/vgg16_CIFAR10.py | j-chan-hkust/deep_testing_of_advanced_learning_systems | ec535e2b4dc489d407b664a138d3f5262b71d21e | [
"MIT"
] | null | null | null | from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
class cifar10vgg:
def __init__(self,train=True):
self.num_classes = 10
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
self.model.load_weights('cifar10vgg.h5')
def build_model(self):
# Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(self.num_classes))
model.add(Activation('softmax'))
return model
def normalize(self,X_train,X_test):
#this function normalize inputs for zero mean and unit variance
# it is used when training a model.
# Input: training set and test set
# Output: normalized training set and test set according to the trianing set statistics.
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def normalize_production(self,x):
#this function is used to normalize instances in production according to saved training set statistics
# Input: X - a training set
# Output X - a normalized training set according to normalization constants.
#these values produced during first training and are general for the standard cifar10 training set normalization
mean = 120.707
std = 64.15
return (x-mean)/(std+1e-7)
def predict(self,x,normalize=True,batch_size=50):
if normalize:
x = self.normalize_production(x)
return self.model.predict(x,batch_size)
def train(self,model):
model.load_weights("cifar10vgg.h5")
#training parameters
batch_size = 128
maxepoches = 250
learning_rate = 0.01
lr_decay = 1e-6
lr_drop = 20
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
#optimization details
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# training process in a for loop with learning rate drop every 25 epoches.
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
model.save_weights('cifar10vgg.h5')
return model
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model = cifar10vgg()
predicted_x = model.predict(x_test)
residuals = np.argmax(predicted_x,1)!=np.argmax(y_test,1)
loss = sum(residuals)/len(residuals)
print("the validation 0/1 loss is: ",loss)
| 39.172093 | 120 | 0.65412 | from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras import regularizers
class cifar10vgg:
def __init__(self,train=True):
self.num_classes = 10
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.model = self.build_model()
if train:
self.model = self.train(self.model)
else:
self.model.load_weights('cifar10vgg.h5')
def build_model(self):
model = Sequential()
weight_decay = self.weight_decay
model.add(Conv2D(64, (3, 3), padding='same',
input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(self.num_classes))
model.add(Activation('softmax'))
return model
def normalize(self,X_train,X_test):
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train, axis=(0, 1, 2, 3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
def normalize_production(self,x):
mean = 120.707
std = 64.15
return (x-mean)/(std+1e-7)
def predict(self,x,normalize=True,batch_size=50):
if normalize:
x = self.normalize_production(x)
return self.model.predict(x,batch_size)
def train(self,model):
model.load_weights("cifar10vgg.h5")
batch_size = 128
maxepoches = 250
learning_rate = 0.01
lr_decay = 1e-6
lr_drop = 20
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = self.normalize(x_train, x_test)
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
def lr_scheduler(epoch):
return learning_rate * (0.5 ** (epoch // lr_drop))
reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False)
datagen.fit(x_train)
sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
historytemp = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=maxepoches,
validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)
model.save_weights('cifar10vgg.h5')
return model
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model = cifar10vgg()
predicted_x = model.predict(x_test)
residuals = np.argmax(predicted_x,1)!=np.argmax(y_test,1)
loss = sum(residuals)/len(residuals)
print("the validation 0/1 loss is: ",loss)
| true | true |
f719892d08f0cb15a072c2fb5acf64d76d3bd3a3 | 31,288 | py | Python | scraps/forcefield_v2.py | kul-group/MAZE-sim | 0f85e74bf93f9242a73bcfaa20a593ae966f38fa | [
"MIT"
] | 13 | 2021-03-10T18:40:32.000Z | 2022-03-21T20:40:57.000Z | scraps/forcefield_v2.py | kul-group/MAZE-sim | 0f85e74bf93f9242a73bcfaa20a593ae966f38fa | [
"MIT"
] | 27 | 2021-01-28T23:18:44.000Z | 2021-05-06T19:33:09.000Z | scraps/forcefield_v2.py | kul-group/MAZE-sim | 0f85e74bf93f9242a73bcfaa20a593ae966f38fa | [
"MIT"
] | 4 | 2021-03-19T20:46:15.000Z | 2022-03-21T20:40:59.000Z | from maze.extra_framework_maker import ExtraFrameworkMaker, ExtraFrameworkAnalyzer
from maze.io_zeolite import read_vasp
from maze.zeolite import PerfectZeolite, Zeolite
from ase.neighborlist import natural_cutoffs, NeighborList
import os
from pathlib import Path
from ase.io import write, read, gromacs, proteindatabank
from ase.visualize import view
import copy
import shutil
from glob import glob
from ase.constraints import FixAtoms
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from ase.geometry.analysis import Analysis
import numpy as np
from itertools import permutations
from lxml import etree
from contextlib import closing
from collections import OrderedDict
from scipy.optimize import least_squares, minimize
import matplotlib.pyplot as plt
from statistics import mode
import pickle
import time
from ase.data import atomic_masses, atomic_numbers
def get_EF_atom_indices(atoms):
"""
for index tracking, to ensure we are comparing the DFT and FF forces on the same EF atoms after before and after
scooping out the smaller cluster.
alse used for recentering the cluster based on the EF-O atom
"""
TM_list = ['Pt', 'Cu', 'Co', 'Pd', 'Fe', 'Cr', 'Rh', 'Ru']
index_EF_TM = [a.index for a in atoms if a.symbol in TM_list]
index_Al = [a.index for a in atoms if a.symbol == 'Al']
nl = NeighborList(natural_cutoffs(atoms), bothways=True, self_interaction=False)
nl.update(atoms)
Al_neigh_list = np.concatenate((nl.get_neighbors(index_Al[0])[0], nl.get_neighbors(index_Al[1])[0]))
Al_neigh_list = [x for x in Al_neigh_list if atoms[x].symbol == 'O']
TM_neigh_list = np.concatenate((nl.get_neighbors(index_EF_TM[0])[0], nl.get_neighbors(index_EF_TM[1])[0]))
centering_o = [[x for x in TM_neigh_list if list(TM_neigh_list).count(x) > 1 and x not in Al_neigh_list][0]]
return index_EF_TM + centering_o
def get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):
""" #TODO: check whether capping is necessary
Inconsistent capping (remove all caps for now, does not need this cluster to be physical)
Possible fix: change mult in neighbor list
Extract smaller cluster containing the extra-framework atoms and cap all the O. Then the capped cluster is moved
to the center of the cell to avoid boundary issue.
Save cluster in both .traj file and .pdb format.
:param atoms:
:param folder_path:
:param file_name:
:param save_traj: if True, save clusters into .traj as well, for later comparison and trouble shooting
:param EF_O_index: if not none, will use this value, else, will find the index using Extraframework code
:return: 1. EF-cluster including 13 atoms, index of the EF atoms in original zeolite, index of the EF atoms in
the current cluster (the later two output index lists share the ordering)
"""
EFMaker = ExtraFrameworkAnalyzer(atoms)
cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]
cluster_EF_index = get_EF_atom_indices(cluster)
centering_pos = cluster.get_positions()[cluster_EF_index[-1]]
recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]
# FIXME: recentering doesn't work well for very small unit cells. eg. SOD
# cluster = Zeolite(cluster).cap_atoms()
proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)
if save_traj is True:
write(folder_path + '/%s.traj' % file_name, recentered_cluster)
return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index
def label_pdb(folder_path, file_name, del_unlabeled_pdb):
"""
Relabeling the Atom name in proteindatabank file. (required step for openMM)
The same atom type connecting to different neighboring types are treated differently due to differences in their
chemical environments, and is therefore named separately.
:param folder_path:
:param file_name:
:param del_unlabeled_pdb:
"""
filein = open(folder_path + '/%s.pdb' % file_name, 'r')
fileout = open(folder_path + '/%s_labeled.pdb' % file_name, 'w')
name_list = []
for line in filein.readlines():
if line.startswith('ATOM') or line.startswith('HETATM'):
name = line[12:16].strip()
name_list.append(name)
name = name + str(name_list.count(name))
name = name.rjust(4)
line = line.replace(line[12:16], name, 1)
# only replacing the first occurrence of line[12:16], atomic symbols are maintained
fileout.writelines(line)
filein.close()
fileout.close()
if del_unlabeled_pdb is True:
os.remove(folder_path + '/%s.pdb' % file_name)
def get_bonds(cluster, mult=1, excluded_index=None, excluded_pair=None):
"""
Using ase.geometry.analysis.Analysis to get all bonds, then remove the repeated ones.
Function also allows removing certain bonding pair defined by user (excluded_pair).
Or removing pairs including certain atomic indices (excluded_index).
:param cluster:
:param mult:
:param excluded_index: list of integers
:param excluded_pair: list of lists
:return: full bonding list, shortened list.
If both excluded_index and excluded_pair are None, bonding list == shortened list
"""
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
bond_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_bonds[0]):
for index in indices:
if [count, index] not in bond_list and [index, count] not in bond_list:
bond_list.append([count, index])
for bond in bond_list:
if all(single_index not in bond for single_index in excluded_index) and \
all(tuple(bond) not in list(permutations(pair)) for pair in excluded_pair):
shortened_list.append(bond)
return bond_list, shortened_list
def get_angles(cluster, mult=1, excluded_index=None, excluded_pair=None):
"""
#TODO: consider combining get_bonds and get_angles function
ase.geometry.analysis.Analysis.unique_angles function does not work, return all angles.
three-body interactions.
:param excluded_pair: excluding all [particle1, particle2, particle3] lists involving the excluded pair
"""
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
angle_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_angles[0]):
for index in indices:
if all(list(val) not in angle_list for val in list(permutations([count, index[0], index[1]]))):
angle_list.append([count, index[0], index[1]])
for angle in angle_list:
if all(single_index not in angle for single_index in excluded_index) and \
all(list(value) not in excluded_pair for value in list(permutations(angle, 2))):
shortened_list.append(angle)
return angle_list, shortened_list
def write_xml(atoms, bonds, save_as):
# on-the-fly generation of force field xml file, matching atoms and bonds with pdb file
root = etree.Element('ForceField')
xml_section = etree.SubElement(root, "AtomTypes")
for atom in atoms:
element_type = ''.join(filter(lambda x: not x.isdigit(), atom.name))
# properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
if element_type == 'Cu' or atom.name == 'O9':
atomic_mass = atomic_masses[atomic_numbers[element_type]]
else:
atomic_mass = 0.0
properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
etree.SubElement(xml_section, 'Type', **properties)
xml_section = etree.SubElement(root, 'Residues')
xml_residue = etree.SubElement(xml_section, 'Residue', name='MOL')
for atom in atoms:
etree.SubElement(xml_residue, 'Atom', name=atom.name, type=atom.name)
for bond in bonds:
etree.SubElement(xml_residue, 'Bond', atomName1=bond[0].name, atomName2=bond[1].name)
tree = etree.ElementTree(root)
xml = etree.tostring(tree, pretty_print=True).decode('utf-8')
with closing(open(save_as, 'w')) as f:
f.write(xml)
def check_atom_types(cluster, index):
""" assign atom types, same element connected to different neighbors are assigned into different classes.
For example, extra-framework O (in Cu-O-Cu) is in a different class from framework O (Si-O-Si). Each class
assignment is unique (each atom belongs to one class and one class only).
O_EF: extra-framework O
O-Cu: framework O, connecting to one T-site(Al) and Cu
O-H: framework O, connecting to one T-site(Al) and H (capping)
"""
nl = NeighborList(natural_cutoffs(cluster), bothways=True, self_interaction=False)
nl.update(cluster)
class_Al = [atom.index for atom in cluster if atom.symbol == 'Al']
class_Cu = [atom.index for atom in cluster if atom.symbol == 'Cu']
class_H = [atom.index for atom in cluster if atom.symbol == 'H']
class_O_EF = [get_EF_atom_indices(cluster)[-1]]
class_O_Cu = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF and
all(val not in class_H for val in nl.get_neighbors(atom.index)[0])]
class_O_H = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF + class_O_Cu]
if index in class_Al:
return 'Al'
if index in class_Cu:
return 'Cu'
if index in class_H:
return 'H'
if index in class_O_EF:
return 'O-EF'
if index in class_O_Cu:
return 'O-Cu'
if index in class_O_H:
return 'O-H'
else:
return 'None'
def get_property_types(cluster, property_list):
""" assign all bonding pairs or angles into different types based on differences in atom types. For example,
O(extra-framework)-Cu is different from O(framework)-Cu.
:param property_list: bond or angle index list of the cluster of interests
:return type_dict: return a dictionary of all unique bond-pairs or angle types, with "keys" being integers starting
from 0, and "values" being a list of two atom types string for bonds or three atom types string for angles.
eg. {0: [AtomClass1, AtomClass2], 1: [AtomClass1, AtomClass3], ...} for bonds
Note: Bond types such as [AtomClass1, AtomClass2] and [AtomClass2, AtomClass1] are considered the same. Same rules
also apply for angles.
:return whole_type_list: return the entire list of bond or angle types assignment of the input.
len(whole_type_list) = len(my_list)
"""
type_dict, repeated_list, whole_type_list, count = {}, [], [], 0
for items in property_list:
my_list = []
for val in items:
my_list.append(check_atom_types(cluster, val))
whole_type_list.append(my_list)
if all(list(pair) not in repeated_list for pair in list(permutations(my_list))):
repeated_list.append(my_list)
type_dict[count] = my_list
count += 1
return type_dict, whole_type_list
def _get_index_dict(type_dict, whole_type_list, index_list):
""" assign bond pairs or angles indices into different bond or angle types, all the pairs or angles within the same
types will share the same set of force field parameters.
:param type_dict:
:param whole_type_list:
:param index_list:
:return index_dict: return a dictionary of all bond-pairs or angle indices for each unique bond or angle type,
using the the same keys as type_dict.
"""
index_dict = {}
for key, value in type_dict.items():
temp_list = []
for count, items in enumerate(whole_type_list):
if any(list(pair) == value for pair in list(permutations(items))):
temp_list.append(index_list[count])
index_dict[key] = temp_list
return index_dict
def get_type_index_pair(type_dict, whole_type_list, index_list):
""" write bond_type and bond_index into a single dictionary; can use tuples as dictionary key, not lists
:param type_dict:
:param whole_type_list:
:param index_list:
"""
bond_index_dict = _get_index_dict(type_dict, whole_type_list, index_list)
type_index_dict = {}
for key, value in type_dict.items():
type_index_dict[tuple(value)] = bond_index_dict[key]
return type_index_dict
def pretty_print(my_dict):
""" for better visualization of the bond (or angle) types and bond (or angle) indices that belong to certain types.
"""
for key, value in my_dict.items():
print(key, '-->', value)
def shorten_index_list_by_types(type_index_dict, exclude_atom_type=None, exclude_property_type=None,
include_property_type=None, case=0):
"""
allow excluding certain property types or only including certain types
"""
if exclude_atom_type is not None and exclude_property_type is None:
case = 1
if exclude_property_type is not None and exclude_atom_type is None:
case = 2
if exclude_property_type is not None and exclude_atom_type is not None:
case = 3
if include_property_type is not None:
case = 4
shortened_list = []
for type_list, index_list in type_index_dict.items():
if case == 1 and all(single_type not in type_list for single_type in exclude_atom_type):
shortened_list.extend(index_list)
elif case == 2 and all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 3 and all(single_type not in type_list for single_type in exclude_atom_type) and \
all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 4 and any(list(value) in include_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
return shortened_list
def set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list):
""" Feed pdb topology file and xml force field file into openMM, generate a system for the MD simulation/force
calculation.
:param folder_path:
:param cluster_tag_number:
:param shortened_bond_list:
:return pdb:
:return system:
"""
pdb = PDBFile(folder_path + '/cluster_%s_labeled.pdb' % cluster_tag_number)
atoms = list(pdb.topology.atoms())
for index in shortened_bond_list:
pdb.topology.addBond(atoms[index[0]], atoms[index[1]])
bonds = list(pdb.topology.bonds())
write_xml(atoms, bonds, folder_path + '/forcefield.xml')
FF = ForceField(folder_path + '/forcefield.xml')
system = FF.createSystem(pdb.topology)
return pdb, system
def custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
""" #todo: add argument allowing this custom function to be fed in as an input (more flexible used-designed ff)
:param bond_list: list to be included into force field
:param angle_list:
:param bond_type_index_dict: {(type): [index], ...}
:param angle_type_index_dict:
:param bond_param_dict: {(type): [param], ...} Note: parameters here uses the standard units, kJ, nm, ...
:param angle_param_dict:
:return system: openMM system with custom forces added onto it
"""
force = CustomBondForce("D*(1-exp(-alpha*(r-r0)))^2") # Morse bond
force.addPerBondParameter("D")
force.addPerBondParameter("alpha")
force.addPerBondParameter("r0")
force.setUsesPeriodicBoundaryConditions(periodic=True)
for bond in bond_list:
for my_type, my_index in bond_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(bond))):
try:
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
except:
my_type = tuple(reversed(my_type))
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
# note: consider updating the info_dict to make it order insensitive
system.addForce(force)
force = HarmonicAngleForce() # Harmonic angle
force.setUsesPeriodicBoundaryConditions(periodic=True) # adding periodic conditions
for angle in angle_list:
for my_type, my_index in angle_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(angle))):
type_tag = [tuple(val) for val in list(angle_param_dict.keys()) if val in list(permutations(my_type))]
force.addAngle(int(angle[0]), int(angle[1]), int(angle[2]), *angle_param_dict.get(type_tag[0]))
system.addForce(force)
# assert(system.usesPeriodicBoundaryConditions() == True)
return system
def get_openMM_forces(pdb, system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
""" forces for a single configuration
use numb to keep track of individual configurations
integrator used for advancing the equations of motion in MD
doesn't matter what we pick here since we only need the forces on the initial structure, but do need to have it
:return: forces values on atoms in units of eV/A
"""
system = custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list,
angle_type_index_dict, angle_param_dict)
integrator = LangevinMiddleIntegrator(3 * kelvin, 1 / picosecond, 0.4 * picoseconds) # randomly picked
simulation = Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(getForces=True)
forces = np.array(state.getForces(asNumpy=True)) * 1.0364e-2 * 0.1 # convert forces from kJ/nm mol to eV/A
return forces
# NOTE: section below deals with multiple input structures for force field training
def get_EF_O_index(traj):
"""
get the mode of EF_O, and use that to extract the EF cluster for the force field training
all EF atoms should have the same indices regardless of there is binds on the zeolite, as long as the zeolite
framework is the same - (all EF atoms, aka. Cu-O-Cu insertion follows the same procedures)
:param traj: traj of configurations containing all atoms, including both the zeolite backbone and EF atoms
"""
EF_O_index_list = []
for atoms in traj:
try:
EFAnalyzer = ExtraFrameworkAnalyzer(atoms)
EF_O_index_list.append(EFAnalyzer.get_extraframework_cluster()[-1])
except:
...
return mode(tuple(EF_O_index_list))
def prep_topologies(folder_path, sample_zeolite, traj_name=None, save_traj=False, del_unlabeled_pdb=False,
show_all=False):
"""
:param folder_path:
:param sample_zeolite:
:param traj_name:
:param save_traj:
:param del_unlabeled_pdb:
:param show_all:
"""
if traj_name is not None:
traj = read(folder_path + '/%s.traj' % traj_name, ':')
output_dir = os.path.join(folder_path, traj_name)
else:
traj = read(folder_path + '/%s.traj' % sample_zeolite, ':')
output_dir = os.path.join(folder_path, sample_zeolite)
Path(output_dir).mkdir(parents=True, exist_ok=True)
cluster_traj, EF_O_index, EF_atoms_index, cluster_EF_index = [], get_EF_O_index(traj[0:100]), [], []
for count, atoms in enumerate(traj):
try:
cluster, EF_atoms_index, cluster_EF_index = get_capped_cluster(atoms, output_dir, 'cluster_' + str(count),
save_traj, [EF_O_index])
label_pdb(output_dir, 'cluster_%s' % str(count), del_unlabeled_pdb)
cluster_traj.append(cluster)
print(sample_zeolite, count)
except:
print(sample_zeolite, count, 'failed!')
if show_all is True:
view(cluster_traj)
return EF_atoms_index, cluster_EF_index
def reformat_inputs(bond_param_dict, angle_param_dict):
""" reformat input dict into lists
:return bond_type: List[List[str]] eg. ['Cu', 'O']
:return angle_type: List[List[str]] eg. ['Cu', 'O', 'Cu']
:return param_list: List[float], extend all parameters into a single list, since scipy.optimize.minimize can only
take an 1D array as initial guess parameter
"""
bond_type, angle_type, param_list = [], [], []
for types, indices in bond_param_dict.items():
bond_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
for types, indices in angle_param_dict.items():
angle_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
return bond_type, angle_type, param_list
def get_required_objects_for_ff(folder_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict):
""" To reduce computational cost, objects such as pdb, system, shortened_bond_list, bond_type_index_dict are kept
fixed for each configuration during the optimization (only run once).
"""
shortened_bond_list = shorten_index_list_by_types(bond_type_index_dict, include_property_type=included_bond_type)
shortened_angle_list = shorten_index_list_by_types(angle_type_index_dict, include_property_type=included_angle_type)
pdb, system = set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list)
return pdb, system, shortened_bond_list, shortened_angle_list
def get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index):
""" openMM forces for multiple configuration based on the same set of parameters
"""
bond_param_dict, angle_param_dict, number_of_bond_param = {}, {}, 0
for count, (types, indices) in enumerate(ini_bond_param_dict.items()):
bond_param_dict[types] = list(param[count * len(indices):(count + 1) * len(indices)])
number_of_bond_param += len(indices)
for count, (types, indices) in enumerate(ini_angle_param_dict.items()):
angle_param_dict[types] = list(
param[count * len(indices) + number_of_bond_param:(count + 1) * len(indices) + number_of_bond_param])
predicted_f = []
my_dict = copy.deepcopy(info_dict)
for config_tag, info_list in my_dict.items():
ff_forces = get_openMM_forces(info_list[0], info_list[1], info_list[2], bond_type_index_dict, bond_param_dict,
info_list[3], angle_type_index_dict, angle_param_dict)[EF_index]
predicted_f.append([force_list for force_list in ff_forces])
return predicted_f
def get_DFT_forces_single(atoms, atom_index):
"""
reference DFT forces on single atoms
"""
f_vec = atoms.calc.results['forces'][atom_index] # self.atoms.get_forces()[atom_index]
f_mag = np.linalg.norm(f_vec)
return f_vec
def get_residue(param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
"""
optimize force field parameters by minimizing this loss function (MSE), weighted by DFT electronic energies
k (Boltzmann's constant) = 8.617e-5 eV/K
T = 298 K
"""
predicted_f = get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index)
residue = np.reshape(np.array(np.reshape(predicted_f, [-1, 3])) - np.array(np.reshape(DFT_f, [-1, 3])), -1)
weighted_residue = residue * weights # 39 number of atoms
print(np.mean(weighted_residue ** 2))
return np.mean(weighted_residue ** 2)
def get_fitting_parameters(initial_param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
# todo: more flexible bond reformating and feeding
bounds = ((-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf),
(0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (0, np.pi),
(-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf))
res = minimize(get_residue, initial_param, method='Powell', bounds=bounds, options={'ftol': 0.01, 'maxiter': 1000},
args=(info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index))
print(res.success)
return res
def make_parity_plot(ff_forces, dft_forces, atom_name):
""" plot FF forces vs. DFT forces
"""
plt.figure()
fig, ax = plt.subplots()
plt.plot(dft_forces, ff_forces, 'o')
plt.xlabel('DFT_force', fontsize=18)
plt.ylabel('FF_force', fontsize=18)
lims = [np.min([ax.get_xlim(), ax.get_ylim()]), np.max([ax.get_xlim(), ax.get_ylim()])]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.title('Force fitting on %s' % atom_name, fontsize=18)
plt.show()
def func():
tic = time.perf_counter()
zeolite = 'SOD'
folder_path, sample_zeolite, traj_name = '/Users/jiaweiguo/Box/openMM_FF', zeolite, zeolite + '_md'
# prep_topologies(folder_path, sample_zeolite, traj_name, del_unlabeled_pdb=True)
"""
ini_bond_param_dict = {('O-Cu', 'Cu'): [1.2, 4, 0.3], ('O-EF', 'Cu'): [1.2, 4, 0.2], ('Al', 'Cu'): [1.2, 4, 0.4]}
ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.3, 10], ('O-Cu', 'Cu', 'O-EF'): [2.3, 10],
('Al', 'Cu', 'O-EF'): [2.3, 10]}
"""
ini_bond_param_dict = {('O-Cu', 'Cu'): [60.097, 2.267, 0.228], ('O-EF', 'Cu'): [4405.247, 4.163, 0.177],
('Al', 'Cu'): [-2.656, 4.608, 0.413]}
ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.458, 16.552], ('O-Cu', 'Cu', 'O-EF'): [3.266, 4.136],
('Al', 'Cu', 'O-EF'): [1.925, 1.673]}
included_bond_type, included_angle_type, ini_param = reformat_inputs(ini_bond_param_dict, ini_angle_param_dict)
# set up type_index_dict using a single set of data #fixme: randomly pick several initial clusters to built dict
cluster = read(os.path.join(folder_path, traj_name) + '/cluster_0_labeled.pdb', '0')
bond_index_list, shortened_bond_index_list = get_bonds(cluster, mult=2)
bond_type_dict, whole_bond_type_list = get_property_types(cluster, bond_index_list)
angle_index_list, shortened_angle_index_list = get_angles(cluster, mult=2)
angle_type_dict, whole_angle_type_list = get_property_types(cluster, angle_index_list)
bond_type_index_dict = get_type_index_pair(bond_type_dict, whole_bond_type_list, bond_index_list)
angle_type_index_dict = get_type_index_pair(angle_type_dict, whole_angle_type_list, angle_index_list)
numb_skip = 2000
info_dict, output_path = {}, os.path.join(folder_path, traj_name)
files = [files for files in os.listdir(os.path.join(folder_path, traj_name)) if '.pdb' in files]
for cluster_tag_number in np.arange(0, len(files), numb_skip):
cluster_tag_number = int(cluster_tag_number)
pdb, system, shortened_bond_list, shortened_angle_list = \
get_required_objects_for_ff(output_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict)
info_dict[cluster_tag_number] = [pdb, system, shortened_bond_list, shortened_angle_list]
print(cluster_tag_number)
with open(output_path + '/info_dict_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(info_dict, f)
with open(folder_path + '/EF_index_dict.pickle', 'rb') as f:
EF_index_dict = pickle.load(f)
traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)
DFT_f = []
for atoms in traj:
DFT_f.append([get_DFT_forces_single(atoms, atom_index=val) for val in EF_index_dict.get(zeolite)[-3:]])
print(np.array(DFT_f).shape)
ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']
DFT_E = []
for atoms in traj:
DFT_E.append(atoms.calc.results['energy'])
with open(os.path.join(folder_path, traj_name) + '/info_dict_%s.pickle' % numb_skip, 'rb') as f:
info_dict = pickle.load(f)
with open(folder_path + '/cluster_EF_index_dict.pickle', 'rb') as f:
cluster_EF_index_dict = pickle.load(f)
my_dict = copy.deepcopy(info_dict) # important, need to keep openMM "systems" fixed
weights = []
for value in np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298)):
weights.extend([value, value, value, value, value, value, value, value, value])
res = get_fitting_parameters(ini_param, my_dict, DFT_f, np.array(weights), ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
print([np.around(float(val), decimals=3) for val in res.x])
FF_f = get_FF_forces(res.x, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
make_parity_plot(np.array(np.reshape(FF_f, [-1, 3])), np.array(np.reshape(DFT_f, [-1, 3])), 'Cu-O-Cu')
force_dict = {'FF': np.array(np.reshape(FF_f, [-1, 3])), 'DFT': np.array(np.reshape(DFT_f, [-1, 3]))}
with open(output_path + '/forces_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(force_dict, f)
toc = time.perf_counter()
print(f"Program terminated in {toc - tic:0.4f} seconds")
if __name__ == '__main__':
# func()
""" weighting factor for the loss function
zeolite = 'SOD'
folder_path, traj_name, numb_skip = '/Users/jiaweiguo/Box/openMM_FF', zeolite + '_md', 2000
traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)
ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']
DFT_E = []
for atoms in traj:
DFT_E.append(atoms.calc.results['energy'])
weight = np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298))
plt.plot(DFT_E, weight, 'o')
plt.xlabel('DFT electronic energies (eV)', fontsize=16)
plt.ylabel('Boltzmann weighting', fontsize=16)
plt.show()
"""
| 46.215657 | 121 | 0.685407 | from maze.extra_framework_maker import ExtraFrameworkMaker, ExtraFrameworkAnalyzer
from maze.io_zeolite import read_vasp
from maze.zeolite import PerfectZeolite, Zeolite
from ase.neighborlist import natural_cutoffs, NeighborList
import os
from pathlib import Path
from ase.io import write, read, gromacs, proteindatabank
from ase.visualize import view
import copy
import shutil
from glob import glob
from ase.constraints import FixAtoms
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
from ase.geometry.analysis import Analysis
import numpy as np
from itertools import permutations
from lxml import etree
from contextlib import closing
from collections import OrderedDict
from scipy.optimize import least_squares, minimize
import matplotlib.pyplot as plt
from statistics import mode
import pickle
import time
from ase.data import atomic_masses, atomic_numbers
def get_EF_atom_indices(atoms):
TM_list = ['Pt', 'Cu', 'Co', 'Pd', 'Fe', 'Cr', 'Rh', 'Ru']
index_EF_TM = [a.index for a in atoms if a.symbol in TM_list]
index_Al = [a.index for a in atoms if a.symbol == 'Al']
nl = NeighborList(natural_cutoffs(atoms), bothways=True, self_interaction=False)
nl.update(atoms)
Al_neigh_list = np.concatenate((nl.get_neighbors(index_Al[0])[0], nl.get_neighbors(index_Al[1])[0]))
Al_neigh_list = [x for x in Al_neigh_list if atoms[x].symbol == 'O']
TM_neigh_list = np.concatenate((nl.get_neighbors(index_EF_TM[0])[0], nl.get_neighbors(index_EF_TM[1])[0]))
centering_o = [[x for x in TM_neigh_list if list(TM_neigh_list).count(x) > 1 and x not in Al_neigh_list][0]]
return index_EF_TM + centering_o
def get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):
EFMaker = ExtraFrameworkAnalyzer(atoms)
cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]
cluster_EF_index = get_EF_atom_indices(cluster)
centering_pos = cluster.get_positions()[cluster_EF_index[-1]]
recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]
# cluster = Zeolite(cluster).cap_atoms()
proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)
if save_traj is True:
write(folder_path + '/%s.traj' % file_name, recentered_cluster)
return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index
def label_pdb(folder_path, file_name, del_unlabeled_pdb):
filein = open(folder_path + '/%s.pdb' % file_name, 'r')
fileout = open(folder_path + '/%s_labeled.pdb' % file_name, 'w')
name_list = []
for line in filein.readlines():
if line.startswith('ATOM') or line.startswith('HETATM'):
name = line[12:16].strip()
name_list.append(name)
name = name + str(name_list.count(name))
name = name.rjust(4)
line = line.replace(line[12:16], name, 1)
# only replacing the first occurrence of line[12:16], atomic symbols are maintained
fileout.writelines(line)
filein.close()
fileout.close()
if del_unlabeled_pdb is True:
os.remove(folder_path + '/%s.pdb' % file_name)
def get_bonds(cluster, mult=1, excluded_index=None, excluded_pair=None):
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
bond_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_bonds[0]):
for index in indices:
if [count, index] not in bond_list and [index, count] not in bond_list:
bond_list.append([count, index])
for bond in bond_list:
if all(single_index not in bond for single_index in excluded_index) and \
all(tuple(bond) not in list(permutations(pair)) for pair in excluded_pair):
shortened_list.append(bond)
return bond_list, shortened_list
def get_angles(cluster, mult=1, excluded_index=None, excluded_pair=None):
if excluded_index is None:
excluded_index = []
if excluded_pair is None:
excluded_pair = []
nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)
nl.update(cluster)
angle_list, shortened_list = [], []
for count, indices in enumerate(Analysis(cluster, nl=nl).all_angles[0]):
for index in indices:
if all(list(val) not in angle_list for val in list(permutations([count, index[0], index[1]]))):
angle_list.append([count, index[0], index[1]])
for angle in angle_list:
if all(single_index not in angle for single_index in excluded_index) and \
all(list(value) not in excluded_pair for value in list(permutations(angle, 2))):
shortened_list.append(angle)
return angle_list, shortened_list
def write_xml(atoms, bonds, save_as):
# on-the-fly generation of force field xml file, matching atoms and bonds with pdb file
root = etree.Element('ForceField')
xml_section = etree.SubElement(root, "AtomTypes")
for atom in atoms:
element_type = ''.join(filter(lambda x: not x.isdigit(), atom.name))
# properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
if element_type == 'Cu' or atom.name == 'O9':
atomic_mass = atomic_masses[atomic_numbers[element_type]]
else:
atomic_mass = 0.0
properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}
etree.SubElement(xml_section, 'Type', **properties)
xml_section = etree.SubElement(root, 'Residues')
xml_residue = etree.SubElement(xml_section, 'Residue', name='MOL')
for atom in atoms:
etree.SubElement(xml_residue, 'Atom', name=atom.name, type=atom.name)
for bond in bonds:
etree.SubElement(xml_residue, 'Bond', atomName1=bond[0].name, atomName2=bond[1].name)
tree = etree.ElementTree(root)
xml = etree.tostring(tree, pretty_print=True).decode('utf-8')
with closing(open(save_as, 'w')) as f:
f.write(xml)
def check_atom_types(cluster, index):
nl = NeighborList(natural_cutoffs(cluster), bothways=True, self_interaction=False)
nl.update(cluster)
class_Al = [atom.index for atom in cluster if atom.symbol == 'Al']
class_Cu = [atom.index for atom in cluster if atom.symbol == 'Cu']
class_H = [atom.index for atom in cluster if atom.symbol == 'H']
class_O_EF = [get_EF_atom_indices(cluster)[-1]]
class_O_Cu = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF and
all(val not in class_H for val in nl.get_neighbors(atom.index)[0])]
class_O_H = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF + class_O_Cu]
if index in class_Al:
return 'Al'
if index in class_Cu:
return 'Cu'
if index in class_H:
return 'H'
if index in class_O_EF:
return 'O-EF'
if index in class_O_Cu:
return 'O-Cu'
if index in class_O_H:
return 'O-H'
else:
return 'None'
def get_property_types(cluster, property_list):
type_dict, repeated_list, whole_type_list, count = {}, [], [], 0
for items in property_list:
my_list = []
for val in items:
my_list.append(check_atom_types(cluster, val))
whole_type_list.append(my_list)
if all(list(pair) not in repeated_list for pair in list(permutations(my_list))):
repeated_list.append(my_list)
type_dict[count] = my_list
count += 1
return type_dict, whole_type_list
def _get_index_dict(type_dict, whole_type_list, index_list):
index_dict = {}
for key, value in type_dict.items():
temp_list = []
for count, items in enumerate(whole_type_list):
if any(list(pair) == value for pair in list(permutations(items))):
temp_list.append(index_list[count])
index_dict[key] = temp_list
return index_dict
def get_type_index_pair(type_dict, whole_type_list, index_list):
bond_index_dict = _get_index_dict(type_dict, whole_type_list, index_list)
type_index_dict = {}
for key, value in type_dict.items():
type_index_dict[tuple(value)] = bond_index_dict[key]
return type_index_dict
def pretty_print(my_dict):
for key, value in my_dict.items():
print(key, '-->', value)
def shorten_index_list_by_types(type_index_dict, exclude_atom_type=None, exclude_property_type=None,
include_property_type=None, case=0):
if exclude_atom_type is not None and exclude_property_type is None:
case = 1
if exclude_property_type is not None and exclude_atom_type is None:
case = 2
if exclude_property_type is not None and exclude_atom_type is not None:
case = 3
if include_property_type is not None:
case = 4
shortened_list = []
for type_list, index_list in type_index_dict.items():
if case == 1 and all(single_type not in type_list for single_type in exclude_atom_type):
shortened_list.extend(index_list)
elif case == 2 and all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 3 and all(single_type not in type_list for single_type in exclude_atom_type) and \
all(list(value) not in exclude_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
elif case == 4 and any(list(value) in include_property_type for value in list(permutations(type_list))):
shortened_list.extend(index_list)
return shortened_list
def set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list):
pdb = PDBFile(folder_path + '/cluster_%s_labeled.pdb' % cluster_tag_number)
atoms = list(pdb.topology.atoms())
for index in shortened_bond_list:
pdb.topology.addBond(atoms[index[0]], atoms[index[1]])
bonds = list(pdb.topology.bonds())
write_xml(atoms, bonds, folder_path + '/forcefield.xml')
FF = ForceField(folder_path + '/forcefield.xml')
system = FF.createSystem(pdb.topology)
return pdb, system
def custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
force = CustomBondForce("D*(1-exp(-alpha*(r-r0)))^2") # Morse bond
force.addPerBondParameter("D")
force.addPerBondParameter("alpha")
force.addPerBondParameter("r0")
force.setUsesPeriodicBoundaryConditions(periodic=True)
for bond in bond_list:
for my_type, my_index in bond_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(bond))):
try:
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
except:
my_type = tuple(reversed(my_type))
force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))
# note: consider updating the info_dict to make it order insensitive
system.addForce(force)
force = HarmonicAngleForce() # Harmonic angle
force.setUsesPeriodicBoundaryConditions(periodic=True) # adding periodic conditions
for angle in angle_list:
for my_type, my_index in angle_type_index_dict.items():
if any(list(val) in my_index for val in list(permutations(angle))):
type_tag = [tuple(val) for val in list(angle_param_dict.keys()) if val in list(permutations(my_type))]
force.addAngle(int(angle[0]), int(angle[1]), int(angle[2]), *angle_param_dict.get(type_tag[0]))
system.addForce(force)
# assert(system.usesPeriodicBoundaryConditions() == True)
return system
def get_openMM_forces(pdb, system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,
angle_type_index_dict=None, angle_param_dict=None):
system = custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list,
angle_type_index_dict, angle_param_dict)
integrator = LangevinMiddleIntegrator(3 * kelvin, 1 / picosecond, 0.4 * picoseconds) # randomly picked
simulation = Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(getForces=True)
forces = np.array(state.getForces(asNumpy=True)) * 1.0364e-2 * 0.1 # convert forces from kJ/nm mol to eV/A
return forces
# NOTE: section below deals with multiple input structures for force field training
def get_EF_O_index(traj):
EF_O_index_list = []
for atoms in traj:
try:
EFAnalyzer = ExtraFrameworkAnalyzer(atoms)
EF_O_index_list.append(EFAnalyzer.get_extraframework_cluster()[-1])
except:
...
return mode(tuple(EF_O_index_list))
def prep_topologies(folder_path, sample_zeolite, traj_name=None, save_traj=False, del_unlabeled_pdb=False,
show_all=False):
if traj_name is not None:
traj = read(folder_path + '/%s.traj' % traj_name, ':')
output_dir = os.path.join(folder_path, traj_name)
else:
traj = read(folder_path + '/%s.traj' % sample_zeolite, ':')
output_dir = os.path.join(folder_path, sample_zeolite)
Path(output_dir).mkdir(parents=True, exist_ok=True)
cluster_traj, EF_O_index, EF_atoms_index, cluster_EF_index = [], get_EF_O_index(traj[0:100]), [], []
for count, atoms in enumerate(traj):
try:
cluster, EF_atoms_index, cluster_EF_index = get_capped_cluster(atoms, output_dir, 'cluster_' + str(count),
save_traj, [EF_O_index])
label_pdb(output_dir, 'cluster_%s' % str(count), del_unlabeled_pdb)
cluster_traj.append(cluster)
print(sample_zeolite, count)
except:
print(sample_zeolite, count, 'failed!')
if show_all is True:
view(cluster_traj)
return EF_atoms_index, cluster_EF_index
def reformat_inputs(bond_param_dict, angle_param_dict):
bond_type, angle_type, param_list = [], [], []
for types, indices in bond_param_dict.items():
bond_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
for types, indices in angle_param_dict.items():
angle_type.append(list(types))
param_list.extend([val for val in np.array(indices)])
return bond_type, angle_type, param_list
def get_required_objects_for_ff(folder_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict):
shortened_bond_list = shorten_index_list_by_types(bond_type_index_dict, include_property_type=included_bond_type)
shortened_angle_list = shorten_index_list_by_types(angle_type_index_dict, include_property_type=included_angle_type)
pdb, system = set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list)
return pdb, system, shortened_bond_list, shortened_angle_list
def get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index):
bond_param_dict, angle_param_dict, number_of_bond_param = {}, {}, 0
for count, (types, indices) in enumerate(ini_bond_param_dict.items()):
bond_param_dict[types] = list(param[count * len(indices):(count + 1) * len(indices)])
number_of_bond_param += len(indices)
for count, (types, indices) in enumerate(ini_angle_param_dict.items()):
angle_param_dict[types] = list(
param[count * len(indices) + number_of_bond_param:(count + 1) * len(indices) + number_of_bond_param])
predicted_f = []
my_dict = copy.deepcopy(info_dict)
for config_tag, info_list in my_dict.items():
ff_forces = get_openMM_forces(info_list[0], info_list[1], info_list[2], bond_type_index_dict, bond_param_dict,
info_list[3], angle_type_index_dict, angle_param_dict)[EF_index]
predicted_f.append([force_list for force_list in ff_forces])
return predicted_f
def get_DFT_forces_single(atoms, atom_index):
f_vec = atoms.calc.results['forces'][atom_index] # self.atoms.get_forces()[atom_index]
f_mag = np.linalg.norm(f_vec)
return f_vec
def get_residue(param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
predicted_f = get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, EF_index)
residue = np.reshape(np.array(np.reshape(predicted_f, [-1, 3])) - np.array(np.reshape(DFT_f, [-1, 3])), -1)
weighted_residue = residue * weights # 39 number of atoms
print(np.mean(weighted_residue ** 2))
return np.mean(weighted_residue ** 2)
def get_fitting_parameters(initial_param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index):
# todo: more flexible bond reformating and feeding
bounds = ((-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf),
(0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (0, np.pi),
(-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf))
res = minimize(get_residue, initial_param, method='Powell', bounds=bounds, options={'ftol': 0.01, 'maxiter': 1000},
args=(info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, EF_index))
print(res.success)
return res
def make_parity_plot(ff_forces, dft_forces, atom_name):
plt.figure()
fig, ax = plt.subplots()
plt.plot(dft_forces, ff_forces, 'o')
plt.xlabel('DFT_force', fontsize=18)
plt.ylabel('FF_force', fontsize=18)
lims = [np.min([ax.get_xlim(), ax.get_ylim()]), np.max([ax.get_xlim(), ax.get_ylim()])]
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.title('Force fitting on %s' % atom_name, fontsize=18)
plt.show()
def func():
tic = time.perf_counter()
zeolite = 'SOD'
folder_path, sample_zeolite, traj_name = '/Users/jiaweiguo/Box/openMM_FF', zeolite, zeolite + '_md'
# prep_topologies(folder_path, sample_zeolite, traj_name, del_unlabeled_pdb=True)
ini_bond_param_dict = {('O-Cu', 'Cu'): [60.097, 2.267, 0.228], ('O-EF', 'Cu'): [4405.247, 4.163, 0.177],
('Al', 'Cu'): [-2.656, 4.608, 0.413]}
ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.458, 16.552], ('O-Cu', 'Cu', 'O-EF'): [3.266, 4.136],
('Al', 'Cu', 'O-EF'): [1.925, 1.673]}
included_bond_type, included_angle_type, ini_param = reformat_inputs(ini_bond_param_dict, ini_angle_param_dict)
# set up type_index_dict using a single set of data #fixme: randomly pick several initial clusters to built dict
cluster = read(os.path.join(folder_path, traj_name) + '/cluster_0_labeled.pdb', '0')
bond_index_list, shortened_bond_index_list = get_bonds(cluster, mult=2)
bond_type_dict, whole_bond_type_list = get_property_types(cluster, bond_index_list)
angle_index_list, shortened_angle_index_list = get_angles(cluster, mult=2)
angle_type_dict, whole_angle_type_list = get_property_types(cluster, angle_index_list)
bond_type_index_dict = get_type_index_pair(bond_type_dict, whole_bond_type_list, bond_index_list)
angle_type_index_dict = get_type_index_pair(angle_type_dict, whole_angle_type_list, angle_index_list)
numb_skip = 2000
info_dict, output_path = {}, os.path.join(folder_path, traj_name)
files = [files for files in os.listdir(os.path.join(folder_path, traj_name)) if '.pdb' in files]
for cluster_tag_number in np.arange(0, len(files), numb_skip):
cluster_tag_number = int(cluster_tag_number)
pdb, system, shortened_bond_list, shortened_angle_list = \
get_required_objects_for_ff(output_path, cluster_tag_number, included_bond_type, included_angle_type,
bond_type_index_dict, angle_type_index_dict)
info_dict[cluster_tag_number] = [pdb, system, shortened_bond_list, shortened_angle_list]
print(cluster_tag_number)
with open(output_path + '/info_dict_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(info_dict, f)
with open(folder_path + '/EF_index_dict.pickle', 'rb') as f:
EF_index_dict = pickle.load(f)
traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)
DFT_f = []
for atoms in traj:
DFT_f.append([get_DFT_forces_single(atoms, atom_index=val) for val in EF_index_dict.get(zeolite)[-3:]])
print(np.array(DFT_f).shape)
ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']
DFT_E = []
for atoms in traj:
DFT_E.append(atoms.calc.results['energy'])
with open(os.path.join(folder_path, traj_name) + '/info_dict_%s.pickle' % numb_skip, 'rb') as f:
info_dict = pickle.load(f)
with open(folder_path + '/cluster_EF_index_dict.pickle', 'rb') as f:
cluster_EF_index_dict = pickle.load(f)
my_dict = copy.deepcopy(info_dict) # important, need to keep openMM "systems" fixed
weights = []
for value in np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298)):
weights.extend([value, value, value, value, value, value, value, value, value])
res = get_fitting_parameters(ini_param, my_dict, DFT_f, np.array(weights), ini_bond_param_dict, ini_angle_param_dict,
bond_type_index_dict, angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
print([np.around(float(val), decimals=3) for val in res.x])
FF_f = get_FF_forces(res.x, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,
angle_type_index_dict, cluster_EF_index_dict.get(zeolite))
make_parity_plot(np.array(np.reshape(FF_f, [-1, 3])), np.array(np.reshape(DFT_f, [-1, 3])), 'Cu-O-Cu')
force_dict = {'FF': np.array(np.reshape(FF_f, [-1, 3])), 'DFT': np.array(np.reshape(DFT_f, [-1, 3]))}
with open(output_path + '/forces_%s.pickle' % numb_skip, 'wb') as f:
pickle.dump(force_dict, f)
toc = time.perf_counter()
print(f"Program terminated in {toc - tic:0.4f} seconds")
if __name__ == '__main__':
# func()
| true | true |
f71989a26c51d5d0de8be179c705597a99ff7aea | 17,373 | py | Python | python/ccxt/async_support/bitbay.py | Richard-L-Johnson/ccxt1 | 903aa1288694f9192b15d22b945508661bdc8807 | [
"MIT"
] | 13 | 2019-01-26T14:41:37.000Z | 2022-03-26T03:33:12.000Z | python/ccxt/async_support/bitbay.py | Richard-L-Johnson/ccxt1 | 903aa1288694f9192b15d22b945508661bdc8807 | [
"MIT"
] | 17 | 2018-10-02T04:43:13.000Z | 2018-11-01T17:07:37.000Z | python/ccxt/async_support/bitbay.py | Richard-L-Johnson/ccxt1 | 903aa1288694f9192b15d22b945508661bdc8807 | [
"MIT"
] | 12 | 2018-12-24T02:19:02.000Z | 2022-03-26T05:04:25.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import InvalidNonce
class bitbay (Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'], # Malta
'rateLimit': 1000,
'has': {
'CORS': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://bitbay.net/API/Public',
'private': 'https://bitbay.net/API/Trading/tradingApi.php',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
],
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
},
'markets': {
'BTC/USD': {'id': 'BTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'baseId': 'BTC', 'quoteId': 'USD'},
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'BTC', 'quoteId': 'EUR'},
'BTC/PLN': {'id': 'BTCPLN', 'symbol': 'BTC/PLN', 'base': 'BTC', 'quote': 'PLN', 'baseId': 'BTC', 'quoteId': 'PLN'},
'LTC/USD': {'id': 'LTCUSD', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'baseId': 'LTC', 'quoteId': 'USD'},
'LTC/EUR': {'id': 'LTCEUR', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'LTC', 'quoteId': 'EUR'},
'LTC/PLN': {'id': 'LTCPLN', 'symbol': 'LTC/PLN', 'base': 'LTC', 'quote': 'PLN', 'baseId': 'LTC', 'quoteId': 'PLN'},
'LTC/BTC': {'id': 'LTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'LTC', 'quoteId': 'BTC'},
'ETH/USD': {'id': 'ETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'baseId': 'ETH', 'quoteId': 'USD'},
'ETH/EUR': {'id': 'ETHEUR', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'baseId': 'ETH', 'quoteId': 'EUR'},
'ETH/PLN': {'id': 'ETHPLN', 'symbol': 'ETH/PLN', 'base': 'ETH', 'quote': 'PLN', 'baseId': 'ETH', 'quoteId': 'PLN'},
'ETH/BTC': {'id': 'ETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'ETH', 'quoteId': 'BTC'},
'LSK/USD': {'id': 'LSKUSD', 'symbol': 'LSK/USD', 'base': 'LSK', 'quote': 'USD', 'baseId': 'LSK', 'quoteId': 'USD'},
'LSK/EUR': {'id': 'LSKEUR', 'symbol': 'LSK/EUR', 'base': 'LSK', 'quote': 'EUR', 'baseId': 'LSK', 'quoteId': 'EUR'},
'LSK/PLN': {'id': 'LSKPLN', 'symbol': 'LSK/PLN', 'base': 'LSK', 'quote': 'PLN', 'baseId': 'LSK', 'quoteId': 'PLN'},
'LSK/BTC': {'id': 'LSKBTC', 'symbol': 'LSK/BTC', 'base': 'LSK', 'quote': 'BTC', 'baseId': 'LSK', 'quoteId': 'BTC'},
'BCH/USD': {'id': 'BCCUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD', 'baseId': 'BCC', 'quoteId': 'USD'},
'BCH/EUR': {'id': 'BCCEUR', 'symbol': 'BCH/EUR', 'base': 'BCH', 'quote': 'EUR', 'baseId': 'BCC', 'quoteId': 'EUR'},
'BCH/PLN': {'id': 'BCCPLN', 'symbol': 'BCH/PLN', 'base': 'BCH', 'quote': 'PLN', 'baseId': 'BCC', 'quoteId': 'PLN'},
'BCH/BTC': {'id': 'BCCBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'BCC', 'quoteId': 'BTC'},
'BTG/USD': {'id': 'BTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD', 'baseId': 'BTG', 'quoteId': 'USD'},
'BTG/EUR': {'id': 'BTGEUR', 'symbol': 'BTG/EUR', 'base': 'BTG', 'quote': 'EUR', 'baseId': 'BTG', 'quoteId': 'EUR'},
'BTG/PLN': {'id': 'BTGPLN', 'symbol': 'BTG/PLN', 'base': 'BTG', 'quote': 'PLN', 'baseId': 'BTG', 'quoteId': 'PLN'},
'BTG/BTC': {'id': 'BTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC', 'baseId': 'BTG', 'quoteId': 'BTC'},
'DASH/USD': {'id': 'DASHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD', 'baseId': 'DASH', 'quoteId': 'USD'},
'DASH/EUR': {'id': 'DASHEUR', 'symbol': 'DASH/EUR', 'base': 'DASH', 'quote': 'EUR', 'baseId': 'DASH', 'quoteId': 'EUR'},
'DASH/PLN': {'id': 'DASHPLN', 'symbol': 'DASH/PLN', 'base': 'DASH', 'quote': 'PLN', 'baseId': 'DASH', 'quoteId': 'PLN'},
'DASH/BTC': {'id': 'DASHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC', 'baseId': 'DASH', 'quoteId': 'BTC'},
'GAME/USD': {'id': 'GAMEUSD', 'symbol': 'GAME/USD', 'base': 'GAME', 'quote': 'USD', 'baseId': 'GAME', 'quoteId': 'USD'},
'GAME/EUR': {'id': 'GAMEEUR', 'symbol': 'GAME/EUR', 'base': 'GAME', 'quote': 'EUR', 'baseId': 'GAME', 'quoteId': 'EUR'},
'GAME/PLN': {'id': 'GAMEPLN', 'symbol': 'GAME/PLN', 'base': 'GAME', 'quote': 'PLN', 'baseId': 'GAME', 'quoteId': 'PLN'},
'GAME/BTC': {'id': 'GAMEBTC', 'symbol': 'GAME/BTC', 'base': 'GAME', 'quote': 'BTC', 'baseId': 'GAME', 'quoteId': 'BTC'},
'XRP/USD': {'id': 'XRPUSD', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'baseId': 'XRP', 'quoteId': 'USD'},
'XRP/EUR': {'id': 'XRPEUR', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'baseId': 'XRP', 'quoteId': 'EUR'},
'XRP/PLN': {'id': 'XRPPLN', 'symbol': 'XRP/PLN', 'base': 'XRP', 'quote': 'PLN', 'baseId': 'XRP', 'quoteId': 'PLN'},
'XRP/BTC': {'id': 'XRPBTC', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'baseId': 'XRP', 'quoteId': 'BTC'},
# 'XIN/USD': {'id': 'XINUSD', 'symbol': 'XIN/USD', 'base': 'XIN', 'quote': 'USD', 'baseId': 'XIN', 'quoteId': 'USD'},
# 'XIN/EUR': {'id': 'XINEUR', 'symbol': 'XIN/EUR', 'base': 'XIN', 'quote': 'EUR', 'baseId': 'XIN', 'quoteId': 'EUR'},
# 'XIN/PLN': {'id': 'XINPLN', 'symbol': 'XIN/PLN', 'base': 'XIN', 'quote': 'PLN', 'baseId': 'XIN', 'quoteId': 'PLN'},
'XIN/BTC': {'id': 'XINBTC', 'symbol': 'XIN/BTC', 'base': 'XIN', 'quote': 'BTC', 'baseId': 'XIN', 'quoteId': 'BTC'},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.0043,
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'exceptions': {
'400': ExchangeError, # At least one parameter wasn't set
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds, # No enough money or crypto
# code 407 not specified are not specified in their docs
'408': InvalidOrder, # Invalid currency name
'501': AuthenticationError, # Invalid public key
'502': AuthenticationError, # Invalid sign
'503': InvalidNonce, # Invalid moment parameter. Request time doesn't match current server time
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AuthenticationError, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': ExchangeError, # Invalid market name
},
})
async def fetch_balance(self, params={}):
response = await self.privatePostInfo()
if 'balances' in response:
balance = response['balances']
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currencies[code]
id = currency['id']
account = self.account()
if id in balance:
account['free'] = float(balance[id]['available'])
account['used'] = float(balance[id]['locked'])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
async def fetch_order_book(self, symbol, limit=None, params={}):
orderbook = await self.publicGetIdOrderbook(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
baseVolume = self.safe_float(ticker, 'volume')
vwap = self.safe_float(ticker, 'vwap')
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max'),
'low': self.safe_float(ticker, 'min'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
market = self.market(symbol)
return self.privatePostTrade(self.extend({
'type': side,
'currency': market['baseId'],
'amount': amount,
'payment_currency': market['quoteId'],
'rate': price,
}, params))
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancel({'id': id})
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
if currency in fiatCurrencies:
return True
return False
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
# request['bic'] = ''
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'code' in response:
#
# bitbay returns the integer 'success': 1 key from their private API
# or an integer 'code' value from 0 to 510 and an error message
#
# {'success': 1, ...}
# {'code': 502, 'message': 'Invalid sign'}
# {'code': 0, 'message': 'offer funds not exceeding minimums'}
#
# 400 At least one parameter wasn't set
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 406 No enough money or crypto
# 408 Invalid currency name
# 501 Invalid public key
# 502 Invalid sign
# 503 Invalid moment parameter. Request time doesn't match current server time
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = response['code'] # always an integer
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in self.exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 50.650146 | 136 | 0.476141 |
rt.base.exchange import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import InvalidNonce
class bitbay (Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'],
'rateLimit': 1000,
'has': {
'CORS': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://bitbay.net/API/Public',
'private': 'https://bitbay.net/API/Trading/tradingApi.php',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
],
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
},
'markets': {
'BTC/USD': {'id': 'BTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'baseId': 'BTC', 'quoteId': 'USD'},
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'BTC', 'quoteId': 'EUR'},
'BTC/PLN': {'id': 'BTCPLN', 'symbol': 'BTC/PLN', 'base': 'BTC', 'quote': 'PLN', 'baseId': 'BTC', 'quoteId': 'PLN'},
'LTC/USD': {'id': 'LTCUSD', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'baseId': 'LTC', 'quoteId': 'USD'},
'LTC/EUR': {'id': 'LTCEUR', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'LTC', 'quoteId': 'EUR'},
'LTC/PLN': {'id': 'LTCPLN', 'symbol': 'LTC/PLN', 'base': 'LTC', 'quote': 'PLN', 'baseId': 'LTC', 'quoteId': 'PLN'},
'LTC/BTC': {'id': 'LTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'LTC', 'quoteId': 'BTC'},
'ETH/USD': {'id': 'ETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'baseId': 'ETH', 'quoteId': 'USD'},
'ETH/EUR': {'id': 'ETHEUR', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'baseId': 'ETH', 'quoteId': 'EUR'},
'ETH/PLN': {'id': 'ETHPLN', 'symbol': 'ETH/PLN', 'base': 'ETH', 'quote': 'PLN', 'baseId': 'ETH', 'quoteId': 'PLN'},
'ETH/BTC': {'id': 'ETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'ETH', 'quoteId': 'BTC'},
'LSK/USD': {'id': 'LSKUSD', 'symbol': 'LSK/USD', 'base': 'LSK', 'quote': 'USD', 'baseId': 'LSK', 'quoteId': 'USD'},
'LSK/EUR': {'id': 'LSKEUR', 'symbol': 'LSK/EUR', 'base': 'LSK', 'quote': 'EUR', 'baseId': 'LSK', 'quoteId': 'EUR'},
'LSK/PLN': {'id': 'LSKPLN', 'symbol': 'LSK/PLN', 'base': 'LSK', 'quote': 'PLN', 'baseId': 'LSK', 'quoteId': 'PLN'},
'LSK/BTC': {'id': 'LSKBTC', 'symbol': 'LSK/BTC', 'base': 'LSK', 'quote': 'BTC', 'baseId': 'LSK', 'quoteId': 'BTC'},
'BCH/USD': {'id': 'BCCUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD', 'baseId': 'BCC', 'quoteId': 'USD'},
'BCH/EUR': {'id': 'BCCEUR', 'symbol': 'BCH/EUR', 'base': 'BCH', 'quote': 'EUR', 'baseId': 'BCC', 'quoteId': 'EUR'},
'BCH/PLN': {'id': 'BCCPLN', 'symbol': 'BCH/PLN', 'base': 'BCH', 'quote': 'PLN', 'baseId': 'BCC', 'quoteId': 'PLN'},
'BCH/BTC': {'id': 'BCCBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'BCC', 'quoteId': 'BTC'},
'BTG/USD': {'id': 'BTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD', 'baseId': 'BTG', 'quoteId': 'USD'},
'BTG/EUR': {'id': 'BTGEUR', 'symbol': 'BTG/EUR', 'base': 'BTG', 'quote': 'EUR', 'baseId': 'BTG', 'quoteId': 'EUR'},
'BTG/PLN': {'id': 'BTGPLN', 'symbol': 'BTG/PLN', 'base': 'BTG', 'quote': 'PLN', 'baseId': 'BTG', 'quoteId': 'PLN'},
'BTG/BTC': {'id': 'BTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC', 'baseId': 'BTG', 'quoteId': 'BTC'},
'DASH/USD': {'id': 'DASHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD', 'baseId': 'DASH', 'quoteId': 'USD'},
'DASH/EUR': {'id': 'DASHEUR', 'symbol': 'DASH/EUR', 'base': 'DASH', 'quote': 'EUR', 'baseId': 'DASH', 'quoteId': 'EUR'},
'DASH/PLN': {'id': 'DASHPLN', 'symbol': 'DASH/PLN', 'base': 'DASH', 'quote': 'PLN', 'baseId': 'DASH', 'quoteId': 'PLN'},
'DASH/BTC': {'id': 'DASHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC', 'baseId': 'DASH', 'quoteId': 'BTC'},
'GAME/USD': {'id': 'GAMEUSD', 'symbol': 'GAME/USD', 'base': 'GAME', 'quote': 'USD', 'baseId': 'GAME', 'quoteId': 'USD'},
'GAME/EUR': {'id': 'GAMEEUR', 'symbol': 'GAME/EUR', 'base': 'GAME', 'quote': 'EUR', 'baseId': 'GAME', 'quoteId': 'EUR'},
'GAME/PLN': {'id': 'GAMEPLN', 'symbol': 'GAME/PLN', 'base': 'GAME', 'quote': 'PLN', 'baseId': 'GAME', 'quoteId': 'PLN'},
'GAME/BTC': {'id': 'GAMEBTC', 'symbol': 'GAME/BTC', 'base': 'GAME', 'quote': 'BTC', 'baseId': 'GAME', 'quoteId': 'BTC'},
'XRP/USD': {'id': 'XRPUSD', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'baseId': 'XRP', 'quoteId': 'USD'},
'XRP/EUR': {'id': 'XRPEUR', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'baseId': 'XRP', 'quoteId': 'EUR'},
'XRP/PLN': {'id': 'XRPPLN', 'symbol': 'XRP/PLN', 'base': 'XRP', 'quote': 'PLN', 'baseId': 'XRP', 'quoteId': 'PLN'},
'XRP/BTC': {'id': 'XRPBTC', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'baseId': 'XRP', 'quoteId': 'BTC'},
'XIN/BTC': {'id': 'XINBTC', 'symbol': 'XIN/BTC', 'base': 'XIN', 'quote': 'BTC', 'baseId': 'XIN', 'quoteId': 'BTC'},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.0043,
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'exceptions': {
'400': ExchangeError,
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds,
'408': InvalidOrder,
'501': AuthenticationError,
'502': AuthenticationError,
'503': InvalidNonce,
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AuthenticationError, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': ExchangeError, # Invalid market name
},
})
async def fetch_balance(self, params={}):
response = await self.privatePostInfo()
if 'balances' in response:
balance = response['balances']
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currencies[code]
id = currency['id']
account = self.account()
if id in balance:
account['free'] = float(balance[id]['available'])
account['used'] = float(balance[id]['locked'])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
async def fetch_order_book(self, symbol, limit=None, params={}):
orderbook = await self.publicGetIdOrderbook(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
baseVolume = self.safe_float(ticker, 'volume')
vwap = self.safe_float(ticker, 'vwap')
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max'),
'low': self.safe_float(ticker, 'min'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
market = self.market(symbol)
return self.privatePostTrade(self.extend({
'type': side,
'currency': market['baseId'],
'amount': amount,
'payment_currency': market['quoteId'],
'rate': price,
}, params))
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostCancel({'id': id})
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
if currency in fiatCurrencies:
return True
return False
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return
if len(body) < 2:
return
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'code' in response:
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = response['code'] # always an integer
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in self.exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| true | true |
f7198ae184bcaa5b0b938cc560dc8df6ff0d66d1 | 93,728 | py | Python | keras/layers/recurrent.py | Duncanswilson/keras | 32aa192548b6b59bf407e583fbd246ba9f5f5676 | [
"MIT"
] | 1 | 2017-11-01T19:10:35.000Z | 2017-11-01T19:10:35.000Z | keras/layers/recurrent.py | dmaniry/keras | 32aa192548b6b59bf407e583fbd246ba9f5f5676 | [
"MIT"
] | null | null | null | keras/layers/recurrent.py | dmaniry/keras | 32aa192548b6b59bf407e583fbd246ba9f5f5676 | [
"MIT"
] | 1 | 2019-02-22T03:06:41.000Z | 2019-02-22T03:06:41.000Z | # -*- coding: utf-8 -*-
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..utils.generic_utils import has_arg
# Legacy support.
from ..legacy.layers import Recurrent
from ..legacy import interfaces
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
# Arguments
cells: List of RNN cell instances.
# Examples
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
# States are a flat list
# in reverse order of the cell stack.
# This allows to preserve the requirement
# `stack.state_size[0] == output_dim`.
# e.g. states of a 2-layer LSTM would be
# `[h2, c2, h1, c1]`
# (assuming one LSTM has states [h, c])
state_size = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
def call(self, inputs, states, **kwargs):
# Recover per-cell states.
nested_states = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
nested_states = nested_states[::-1]
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
# Format the new states as a flat list
# in reverse cell order.
states = []
for cell_states in new_nested_states[::-1]:
states += cell_states
return inputs, states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer):
cell.build(input_shape)
if hasattr(cell.state_size, '__len__'):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = (input_shape[0], input_shape[1], output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({'class_name': cell.__class__.__name__,
'config': cell.get_config()})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cells = []
for cell_config in config.pop('cells'):
cells.append(deserialize_layer(cell_config,
custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
"""Retrieves the weights of the model.
# Returns
A flat list of Numpy arrays.
"""
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
# Arguments
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.losses
losses += cell_losses
return losses
def get_losses_for(self, inputs=None):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.get_losses_for(inputs)
losses += cell_losses
return losses
class RNN(Layer):
"""Base class for recurrent layers.
# Arguments
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the size of the recurrent state
(which should be the same as the size of the cell output).
This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
It is also possible for `cell` to be a list of RNN cell instances,
in which cases the cells get stacked on after the other in the RNN,
implementing an efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer).
This argument (or alternatively,
the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
# Input shape
3D tensor with shape `(batch_size, timesteps, input_dim)`.
# Output shape
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
# Examples
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
@property
def states(self):
if self._states is None:
if isinstance(self.cell.state_size, int):
num_states = 1
else:
num_states = len(self.cell.state_size)
return [None for _ in range(num_states)]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
output_dim = state_size[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], output_dim)
else:
output_shape = (input_shape[0], output_dim)
if self.return_state:
state_shape = [(input_shape[0], dim) for dim in state_size]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
input_dim = input_shape[-1]
self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if [spec.shape[-1] for spec in self.state_spec] != state_size:
raise ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(self.state_spec, self.cell.state_size))
else:
self.state_spec = [InputSpec(shape=(None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
if hasattr(self.cell.state_size, '__len__'):
return [K.tile(initial_state, [1, dim])
for dim in self.cell.state_size]
else:
return [K.tile(initial_state, [1, self.cell.state_size])]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')
for tensor in additional_inputs:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
# Properly set learning phase
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def _standardize_args(self, inputs, initial_state, constants):
"""Standardize `__call__` to a single list of tensor inputs.
When running a model loaded from file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
# Arguments
inputs: tensor or list/tuple of tensors
initial_state: tensor or list of tensors or None
constants: tensor or list of tensors or None
# Returns
inputs: tensor
initial_state: list of tensors or None
constants: list of tensors or None
"""
if isinstance(inputs, list):
assert initial_state is None and constants is None
if self._num_constants is not None:
constants = inputs[-self._num_constants:]
inputs = inputs[:-self._num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros((batch_size, dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros((batch_size, self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros((batch_size, dim)))
else:
K.set_value(self.states[0],
np.zeros((batch_size, self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != (batch_size, dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, dim)) +
', found shape=' + str(value.shape))
# TODO: consider batch calls to `set_value`.
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {'class_name': self.cell.__class__.__name__,
'config': cell_config}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cell = deserialize_layer(config.pop('cell'),
custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
if isinstance(self.cell, Layer):
return self.cell.losses
return []
def get_losses_for(self, inputs=None):
if isinstance(self.cell, Layer):
cell_losses = self.cell.get_losses_for(inputs)
return cell_losses + super(RNN, self).get_losses_for(inputs)
return super(RNN, self).get_losses_for(inputs)
class SimpleRNNCell(Layer):
"""Cell class for SimpleRNN.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
output._uses_learning_phase = True
return output, [output]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
warnings.warn('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
class GRUCell(Layer):
"""Cell class for the GRU layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=3)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel_z)
x_r = K.dot(inputs_r, self.kernel_r)
x_h = K.dot(inputs_h, self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
z = self.recurrent_activation(x_z + K.dot(h_tm1_z,
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1_r,
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1_h,
self.recurrent_kernel_h))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
matrix_inner = K.dot(h_tm1,
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = GRUCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(GRU, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
class LSTMCell(Layer):
"""Cell class for the LSTM layer.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(RNN):
"""Long-Short Term Memory layer - Hochreiter 1997.
# Arguments
units: Positive integer, dimensionality of the output space.
activation: Activation function to use
(see [activations](../activations.md)).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step
(see [activations](../activations.md)).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
(see [initializers](../initializers.md)).
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
# References
- [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
- [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)
- [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
"""
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_ones(inputs, dims):
# Currently, CTNK can't instantiate `ones` with symbolic shapes.
# Will update workaround once CTNK supports it.
if K.backend() == 'cntk':
ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))
return K.tile(ones, (1, dims))
else:
return K.ones((K.shape(inputs)[0], dims))
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
| 43.614705 | 130 | 0.583998 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from .. import backend as K
from .. import activations
from .. import initializers
from .. import regularizers
from .. import constraints
from ..engine import Layer
from ..engine import InputSpec
from ..utils.generic_utils import has_arg
from ..legacy.layers import Recurrent
from ..legacy import interfaces
class StackedRNNCells(Layer):
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
state_size = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
state_size += list(cell.state_size)
else:
state_size.append(cell.state_size)
return tuple(state_size)
def call(self, inputs, states, **kwargs):
nested_states = []
for cell in self.cells[::-1]:
if hasattr(cell.state_size, '__len__'):
nested_states.append(states[:len(cell.state_size)])
states = states[len(cell.state_size):]
else:
nested_states.append([states[0]])
states = states[1:]
nested_states = nested_states[::-1]
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
states = []
for cell_states in new_nested_states[::-1]:
states += cell_states
return inputs, states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer):
cell.build(input_shape)
if hasattr(cell.state_size, '__len__'):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = (input_shape[0], input_shape[1], output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({'class_name': cell.__class__.__name__,
'config': cell.get_config()})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cells = []
for cell_config in config.pop('cells'):
cells.append(deserialize_layer(cell_config,
custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.losses
losses += cell_losses
return losses
def get_losses_for(self, inputs=None):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
cell_losses = cell.get_losses_for(inputs)
losses += cell_losses
return losses
class RNN(Layer):
def __init__(self, cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
@property
def states(self):
if self._states is None:
if isinstance(self.cell.state_size, int):
num_states = 1
else:
num_states = len(self.cell.state_size)
return [None for _ in range(num_states)]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
output_dim = state_size[0]
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], output_dim)
else:
output_shape = (input_shape[0], output_dim)
if self.return_state:
state_shape = [(input_shape[0], dim) for dim in state_size]
return [output_shape] + state_shape
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
input_dim = input_shape[-1]
self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
if [spec.shape[-1] for spec in self.state_spec] != state_size:
raise ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(self.state_spec, self.cell.state_size))
else:
self.state_spec = [InputSpec(shape=(None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
def get_initial_state(self, inputs):
initial_state = K.zeros_like(inputs)
initial_state = K.sum(initial_state, axis=(1, 2))
initial_state = K.expand_dims(initial_state)
if hasattr(self.cell.state_size, '__len__'):
return [K.tile(initial_state, [1, dim])
for dim in self.cell.state_size]
else:
return [K.tile(initial_state, [1, self.cell.state_size])]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = [InputSpec(shape=K.int_shape(state))
for state in initial_state]
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')
for tensor in additional_inputs:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if is_keras_tensor:
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
if getattr(last_output, '_uses_learning_phase', False):
output._uses_learning_phase = True
for state in states:
state._uses_learning_phase = True
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def _standardize_args(self, inputs, initial_state, constants):
if isinstance(inputs, list):
assert initial_state is None and constants is None
if self._num_constants is not None:
constants = inputs[-self._num_constants:]
inputs = inputs[:-self._num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros((batch_size, dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros((batch_size, self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros((batch_size, dim)))
else:
K.set_value(self.states[0],
np.zeros((batch_size, self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' +
str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != (batch_size, dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str((batch_size, dim)) +
', found shape=' + str(value.shape))
K.set_value(state, value)
def get_config(self):
config = {'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
cell_config = self.cell.get_config()
config['cell'] = {'class_name': self.cell.__class__.__name__,
'config': cell_config}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from . import deserialize as deserialize_layer
cell = deserialize_layer(config.pop('cell'),
custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
if isinstance(self.cell, Layer):
return self.cell.losses
return []
def get_losses_for(self, inputs=None):
if isinstance(self.cell, Layer):
cell_losses = self.cell.get_losses_for(inputs)
return cell_losses + super(RNN, self).get_losses_for(inputs)
return super(RNN, self).get_losses_for(inputs)
class SimpleRNNCell(Layer):
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
output._uses_learning_phase = True
return output, [output]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(RNN):
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
warnings.warn('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = SimpleRNNCell(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(SimpleRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
class GRUCell(Layer):
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units: self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:,
self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units: self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=3)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel_z)
x_r = K.dot(inputs_r, self.kernel_r)
x_h = K.dot(inputs_h, self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
z = self.recurrent_activation(x_z + K.dot(h_tm1_z,
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1_r,
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1_h,
self.recurrent_kernel_h))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
matrix_inner = K.dot(h_tm1,
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units: 2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(RNN):
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = GRUCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(GRU, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
class LSTMCell(Layer):
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = (self.units, self.units)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units: self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units: self.units * 2]
self.bias_c = self.bias[self.units * 2: self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, K.shape(inputs)[-1]),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
_generate_dropout_ones(inputs, self.units),
self.recurrent_dropout,
training=training,
count=4)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0]
c_tm1 = states[1]
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(RNN):
@interfaces.legacy_recurrent_support
def __init__(self, units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
warnings.warn('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if K.backend() == 'theano':
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
cell = LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_ones(inputs, dims):
# Will update workaround once CTNK supports it.
if K.backend() == 'cntk':
ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))
return K.tile(ones, (1, dims))
else:
return K.ones((K.shape(inputs)[0], dims))
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
| true | true |
f7198b1249cfc281e7acad93f4e91961e055e201 | 13,206 | py | Python | mirage/libs/ble_utils/scapy_btlejack_layers.py | HomeSen/mirage | 6beb4df508758bd152f5d929ba3e6353f161ef27 | [
"MIT"
] | null | null | null | mirage/libs/ble_utils/scapy_btlejack_layers.py | HomeSen/mirage | 6beb4df508758bd152f5d929ba3e6353f161ef27 | [
"MIT"
] | null | null | null | mirage/libs/ble_utils/scapy_btlejack_layers.py | HomeSen/mirage | 6beb4df508758bd152f5d929ba3e6353f161ef27 | [
"MIT"
] | null | null | null | from scapy.all import *
'''
This module contains some scapy definitions for communicating with a BTLEJack device.
'''
BTLEJACK_PACKETS_TYPES = {
0x1 : "command",
0x2 : "response",
0x4 : "notification"
}
BTLEJACK_PACKETS_OPCODES = {
0x1 : "version",
0x2 : "reset",
0x3 : "scan_access_address",
0x4 : "recover",
0x5 : "recover_channel_map",
0x6 : "recover_hop_interval",
0x7 : "sniff_connection_requests",
0x8 : "enable_jamming",
0x9 : "enable_hijacking",
0xa : "send_packet",
0xb : "collaborative_channel_map",
0xe : "debug",
0xf : "verbose"
}
BTLEJACK_NOTIFICATION_TYPES = {
0x0 : "access_address",
0x1 : "crc",
0x2 : "channel_map",
0x3 : "hop_interval",
0x4 : "hop_increment",
0x5 : "packet",
0x6 : "connection_request",
0x7 : "packet_nordic",
0x8 : "hijack_status",
0x9 : "connection_lost",
0xa : "advertisement"
}
class BTLEJack_Hdr(Packet):
name = "BTLEJack Packet"
fields_desc = [
XByteField("magic",0xBC),
BitEnumField("packet_type",None, 4, BTLEJACK_PACKETS_TYPES),
ConditionalField(BitEnumField("opcode",None, 4, BTLEJACK_PACKETS_OPCODES), lambda pkt:pkt.packet_type <= 0x3),
ConditionalField(BitEnumField("notification_type",None, 4, BTLEJACK_NOTIFICATION_TYPES), lambda pkt:pkt.packet_type == 0x4),
LEShortField("length",None),
XByteField("crc",None)
]
def pre_dissect(self,data):
return data[0:4] + data[-1:] + data[4:-1]
def post_build(self,p,pay):
if self.crc is None:
self.crc = 0xFF
for byte in p+pay:
self.crc ^= byte
if self.length is None:
self.length = len(pay)
self.crc ^= self.length
return p[0:2]+struct.pack('<H',self.length)+pay+struct.pack('B',self.crc)
# BTLEJack Commands
class BTLEJack_Version_Command(Packet):
name = "BTLEJack Version Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Scan_Connections_Command(Packet):
name = "BTLEJack Scan Connections Command"
class BTLEJack_Collaborative_Channel_Map_Command(Packet):
name = "BTLEJack Collaborative Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37)
]
class BTLEJack_Recover_Command(Packet):
name = "BTLEJack Recover Command"
fields_desc = [
ByteEnumField("operation_type",None, {
0x00 : "recover_crc_init",
0x01 : "recover_channel_map",
0x02 : "recover_hop"
})
]
class BTLEJack_Recover_Crcinit_Command(Packet):
name = "BTLEJack Recover CRCInit Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Channel_Map_Command(Packet):
name = "BTLEJack Recover Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37),
LEIntField("timeout",None)
]
class BTLEJack_Recover_Hopping_Parameters_Command(Packet):
name = "BTLEJack Recover Hopping Parameters Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Command(Packet):
name = "BTLEJack Recover Connection AA Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Chm_HopInterval_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Sniff_Connection_Request_Command(Packet):
name = "BTLEJack Sniff Connection Request Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Sniff_Advertisements_Command(Packet):
name = "BTLEJack Sniff Advertisements Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Jam_Advertisements_Command(Packet):
name = "BTLEJack Jam Advertisements Command"
fields_desc = [
ByteField("channel",37),
ByteField("offset",None),
FieldLenField("pattern_length", None,fmt="B", length_of="pattern"),
StrField("pattern",None)
]
class BTLEJack_Enable_Jamming_Command(Packet):
name = "BTLEJack Enable Jamming Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Enable_Hijacking_Command(Packet):
name = "BTLEJack Enable Hijacking Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Send_Packet_Command(Packet):
name = "BTLEJack Send Packet Command"
fields_desc = [
PacketField("ble_payload",None,BTLE_DATA)
]
# BTLEJack Responses
class BTLEJack_Send_Packet_Response(Packet):
name = "BTLEJack Send Packet Response"
class BTLEJack_Enable_Jamming_Response(Packet):
name = "BTLEJack Enable Jamming Response"
class BTLEJack_Enable_Hijacking_Response(Packet):
name = "BTLEJack Enable Hijacking Response"
class BTLEJack_Recover_Response(Packet):
name = "BTLEJack Recover Response"
class BTLEJack_Scan_Connections_Response(Packet):
name = "BTLEJack Scan Connections Response"
class BTLEJack_Collaborative_Channel_Map_Response(Packet):
name = "BTLEJack Collaborative Channel Map Response"
class BTLEJack_Version_Response(Packet):
name = "BTLEJack Version Response"
fields_desc = [
ByteField("major",None),
ByteField("minor",None)
]
class BTLEJack_Reset_Response(Packet):
name = "BTLEJack Reset Response"
class BTLEJack_Sniff_Connection_Request_Response(Packet):
name = "BTLEJack Sniff Connection Request Response"
class BTLEJack_Sniff_Advertisements_Response(Packet):
name = "BTLEJack Sniff Advertisements Response"
class BTLEJack_Jam_Advertisements_Response(Packet):
name = "BTLEJack Jam Advertisements Response"
class BTLEJack_Verbose_Response(Packet):
name = "BTLEJack Verbose Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Debug_Response(Packet):
name = "BTLEJack Debug Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Recover_Connection_AA_Response(Packet):
name = "BTLEJack Recover Connection AA Response"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Response(Packet):
name = "BTLEJack Recover Connection AA Chm Response"
fields_desc = [
XLEIntField("access_address",None)
]
# BTLEJack Notifications
class BTLEJack_Access_Address_Notification(Packet):
name = "BTLEJack Access Address Notification"
fields_desc = [
ByteField("channel",None),
ByteField("rssi", None),
XLEIntField("access_address",None)
]
class BTLEJack_CRCInit_Notification(Packet):
name = "BTLEJack CRCInit Notification"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("unused",0)
]
class BTLEJack_Channel_Map_Notification(Packet):
name = "BTLEJack Channel Map Notification"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Hop_Interval_Notification(Packet):
name = "BTLEJack Hop Interval Notification"
fields_desc = [
XLEIntField("access_address",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Hop_Increment_Notification(Packet):
name = "BTLEJack Hop Increment Notification"
fields_desc = [
XLEIntField("access_address",None),
ByteField("hop_increment",None)
]
class BTLEJack_Nordic_Tap_Packet_Notification(Packet):
name = "BTLEJack Nordic Tap Packet Notification"
fields_desc = [
ByteField("header_length",None),
ByteField("flags",None),
ByteField("channel",None),
ByteField("rssi",None),
LEShortField("event_counter",None),
LEIntField("delta", None),
PacketField("ble_payload",None, BTLE_DATA)
]
class BTLEJack_Hijack_Status_Notification(Packet):
name = "BTLEJack Hijack Status Notification"
fields_desc = [
ByteEnumField("status",None, {0 : "success", 1 : "failure"})
]
class BTLEJack_Connection_Lost_Notification(Packet):
name = "BTLEJack Connection Lost Notification"
class BTLEJack_Advertisement_Notification(Packet):
name = "BTLEJack Advertisement Notification"
fields_desc = [
PacketField("ble_payload",None,BTLE_ADV)
]
class BTLEJack_Connection_Request_Notification(Packet):
name = "BTLEJack Connection Request Notification"
fields_desc = [
BitEnumField("RxAdd", 0, 1, {0: "public", 1: "random"}),
BitEnumField("TxAdd", 0, 1, {0: "public", 1: "random"}),
BitField("RFU", 0, 2), # Unused
BitEnumField("PDU_type", 0, 4, {0: "ADV_IND", 1: "ADV_DIRECT_IND", 2: "ADV_NONCONN_IND", 3: "SCAN_REQ",
4: "SCAN_RSP", 5: "CONNECT_REQ", 6: "ADV_SCAN_IND"}),
ByteField("payload_length", 0x22),
PacketField("ble_payload",None,BTLE_CONNECT_REQ)
]
# Binding BTLEJack Commands
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Command,packet_type=0x1, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Command,packet_type=0x1, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Command, packet_type=0x1,opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Command,packet_type=0x1,opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Command,packet_type=0x1, opcode=0x4)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Crcinit_Command,operation_type=0x00)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Channel_Map_Command,operation_type=0x01)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Hopping_Parameters_Command,operation_type=0x02)
#bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Command,packet_type=0x1,opcode=0x4)
#bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Chm_Command,packet_type=0x1,opcode=0x5)
#bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Chm_HopInterval_Command,packet_type=0x1,opcode=0x6)
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1, opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Command,packet_type=0x1,opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Command,packet_type=0x1,opcode=0xc)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Command,packet_type=0x1,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Command,packet_type=0x1,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Command,packet_type=0x1,opcode=0xa)
# Binding BTLEJack Responses
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Response,packet_type=0x2,opcode=0xa)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Response,packet_type=0x2,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Response,packet_type=0x2,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Response,packet_type=0x2, opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Response,packet_type=0x1,opcode=0xc)
'''
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Response,packet_type=0x2, opcode=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Connection_AA_Chm_Response,packet_type=0x2, opcode=0x5)
'''
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1,opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Response,packet_type=0x2, opcode=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Response,packet_type=0x2, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Response,packet_type=0x2, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Response,packet_type=0x2, opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Response,packet_type=0x2, opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Debug_Response,packet_type=0x2, opcode=0xe)
bind_layers(BTLEJack_Hdr, BTLEJack_Verbose_Response,packet_type=0x2, opcode=0xf)
# Binding BTLEJack Notifications
bind_layers(BTLEJack_Hdr, BTLEJack_Access_Address_Notification, packet_type=0x4, notification_type=0x0)
bind_layers(BTLEJack_Hdr, BTLEJack_CRCInit_Notification, packet_type=0x4, notification_type=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Channel_Map_Notification, packet_type=0x4, notification_type=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Interval_Notification, packet_type=0x4, notification_type=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Increment_Notification, packet_type=0x4, notification_type=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Nordic_Tap_Packet_Notification, packet_type=0x4, notification_type=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Hijack_Status_Notification, packet_type=0x4, notification_type=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Lost_Notification, packet_type=0x4, notification_type=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Request_Notification, packet_type=0x4, notification_type=0x6)
bind_layers(BTLEJack_Hdr, BTLEJack_Advertisement_Notification, packet_type=0x4, notification_type=0xa)
| 35.5 | 126 | 0.783659 | from scapy.all import *
BTLEJACK_PACKETS_TYPES = {
0x1 : "command",
0x2 : "response",
0x4 : "notification"
}
BTLEJACK_PACKETS_OPCODES = {
0x1 : "version",
0x2 : "reset",
0x3 : "scan_access_address",
0x4 : "recover",
0x5 : "recover_channel_map",
0x6 : "recover_hop_interval",
0x7 : "sniff_connection_requests",
0x8 : "enable_jamming",
0x9 : "enable_hijacking",
0xa : "send_packet",
0xb : "collaborative_channel_map",
0xe : "debug",
0xf : "verbose"
}
BTLEJACK_NOTIFICATION_TYPES = {
0x0 : "access_address",
0x1 : "crc",
0x2 : "channel_map",
0x3 : "hop_interval",
0x4 : "hop_increment",
0x5 : "packet",
0x6 : "connection_request",
0x7 : "packet_nordic",
0x8 : "hijack_status",
0x9 : "connection_lost",
0xa : "advertisement"
}
class BTLEJack_Hdr(Packet):
name = "BTLEJack Packet"
fields_desc = [
XByteField("magic",0xBC),
BitEnumField("packet_type",None, 4, BTLEJACK_PACKETS_TYPES),
ConditionalField(BitEnumField("opcode",None, 4, BTLEJACK_PACKETS_OPCODES), lambda pkt:pkt.packet_type <= 0x3),
ConditionalField(BitEnumField("notification_type",None, 4, BTLEJACK_NOTIFICATION_TYPES), lambda pkt:pkt.packet_type == 0x4),
LEShortField("length",None),
XByteField("crc",None)
]
def pre_dissect(self,data):
return data[0:4] + data[-1:] + data[4:-1]
def post_build(self,p,pay):
if self.crc is None:
self.crc = 0xFF
for byte in p+pay:
self.crc ^= byte
if self.length is None:
self.length = len(pay)
self.crc ^= self.length
return p[0:2]+struct.pack('<H',self.length)+pay+struct.pack('B',self.crc)
class BTLEJack_Version_Command(Packet):
name = "BTLEJack Version Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Reset_Command(Packet):
name = "BTLEJack Reset Command"
class BTLEJack_Scan_Connections_Command(Packet):
name = "BTLEJack Scan Connections Command"
class BTLEJack_Collaborative_Channel_Map_Command(Packet):
name = "BTLEJack Collaborative Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37)
]
class BTLEJack_Recover_Command(Packet):
name = "BTLEJack Recover Command"
fields_desc = [
ByteEnumField("operation_type",None, {
0x00 : "recover_crc_init",
0x01 : "recover_channel_map",
0x02 : "recover_hop"
})
]
class BTLEJack_Recover_Crcinit_Command(Packet):
name = "BTLEJack Recover CRCInit Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Channel_Map_Command(Packet):
name = "BTLEJack Recover Channel Map Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("start_channel",0),
ByteField("end_channel",37),
LEIntField("timeout",None)
]
class BTLEJack_Recover_Hopping_Parameters_Command(Packet):
name = "BTLEJack Recover Hopping Parameters Command"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Command(Packet):
name = "BTLEJack Recover Connection AA Command"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Recover_Connection_AA_Chm_HopInterval_Command(Packet):
name = "BTLEJack Recover Connection AA Chm Command"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Sniff_Connection_Request_Command(Packet):
name = "BTLEJack Sniff Connection Request Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Sniff_Advertisements_Command(Packet):
name = "BTLEJack Sniff Advertisements Command"
fields_desc = [
BDAddrField("address",None),
ByteField("channel",37)
]
class BTLEJack_Jam_Advertisements_Command(Packet):
name = "BTLEJack Jam Advertisements Command"
fields_desc = [
ByteField("channel",37),
ByteField("offset",None),
FieldLenField("pattern_length", None,fmt="B", length_of="pattern"),
StrField("pattern",None)
]
class BTLEJack_Enable_Jamming_Command(Packet):
name = "BTLEJack Enable Jamming Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Enable_Hijacking_Command(Packet):
name = "BTLEJack Enable Hijacking Command"
fields_desc = [
ByteEnumField("enabled",None,{0x00 : "no",0x01 : "yes"})
]
class BTLEJack_Send_Packet_Command(Packet):
name = "BTLEJack Send Packet Command"
fields_desc = [
PacketField("ble_payload",None,BTLE_DATA)
]
class BTLEJack_Send_Packet_Response(Packet):
name = "BTLEJack Send Packet Response"
class BTLEJack_Enable_Jamming_Response(Packet):
name = "BTLEJack Enable Jamming Response"
class BTLEJack_Enable_Hijacking_Response(Packet):
name = "BTLEJack Enable Hijacking Response"
class BTLEJack_Recover_Response(Packet):
name = "BTLEJack Recover Response"
class BTLEJack_Scan_Connections_Response(Packet):
name = "BTLEJack Scan Connections Response"
class BTLEJack_Collaborative_Channel_Map_Response(Packet):
name = "BTLEJack Collaborative Channel Map Response"
class BTLEJack_Version_Response(Packet):
name = "BTLEJack Version Response"
fields_desc = [
ByteField("major",None),
ByteField("minor",None)
]
class BTLEJack_Reset_Response(Packet):
name = "BTLEJack Reset Response"
class BTLEJack_Sniff_Connection_Request_Response(Packet):
name = "BTLEJack Sniff Connection Request Response"
class BTLEJack_Sniff_Advertisements_Response(Packet):
name = "BTLEJack Sniff Advertisements Response"
class BTLEJack_Jam_Advertisements_Response(Packet):
name = "BTLEJack Jam Advertisements Response"
class BTLEJack_Verbose_Response(Packet):
name = "BTLEJack Verbose Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Debug_Response(Packet):
name = "BTLEJack Debug Response"
fields_desc = [StrField("message",None)]
class BTLEJack_Recover_Connection_AA_Response(Packet):
name = "BTLEJack Recover Connection AA Response"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Recover_Connection_AA_Chm_Response(Packet):
name = "BTLEJack Recover Connection AA Chm Response"
fields_desc = [
XLEIntField("access_address",None)
]
class BTLEJack_Access_Address_Notification(Packet):
name = "BTLEJack Access Address Notification"
fields_desc = [
ByteField("channel",None),
ByteField("rssi", None),
XLEIntField("access_address",None)
]
class BTLEJack_CRCInit_Notification(Packet):
name = "BTLEJack CRCInit Notification"
fields_desc = [
XLEIntField("access_address",None),
LEX3BytesField("crc_init",None),
ByteField("unused",0)
]
class BTLEJack_Channel_Map_Notification(Packet):
name = "BTLEJack Channel Map Notification"
fields_desc = [
XLEIntField("access_address",None),
BTLEChanMapField("channel_map",None)
]
class BTLEJack_Hop_Interval_Notification(Packet):
name = "BTLEJack Hop Interval Notification"
fields_desc = [
XLEIntField("access_address",None),
XLEShortField("hop_interval",None)
]
class BTLEJack_Hop_Increment_Notification(Packet):
name = "BTLEJack Hop Increment Notification"
fields_desc = [
XLEIntField("access_address",None),
ByteField("hop_increment",None)
]
class BTLEJack_Nordic_Tap_Packet_Notification(Packet):
name = "BTLEJack Nordic Tap Packet Notification"
fields_desc = [
ByteField("header_length",None),
ByteField("flags",None),
ByteField("channel",None),
ByteField("rssi",None),
LEShortField("event_counter",None),
LEIntField("delta", None),
PacketField("ble_payload",None, BTLE_DATA)
]
class BTLEJack_Hijack_Status_Notification(Packet):
name = "BTLEJack Hijack Status Notification"
fields_desc = [
ByteEnumField("status",None, {0 : "success", 1 : "failure"})
]
class BTLEJack_Connection_Lost_Notification(Packet):
name = "BTLEJack Connection Lost Notification"
class BTLEJack_Advertisement_Notification(Packet):
name = "BTLEJack Advertisement Notification"
fields_desc = [
PacketField("ble_payload",None,BTLE_ADV)
]
class BTLEJack_Connection_Request_Notification(Packet):
name = "BTLEJack Connection Request Notification"
fields_desc = [
BitEnumField("RxAdd", 0, 1, {0: "public", 1: "random"}),
BitEnumField("TxAdd", 0, 1, {0: "public", 1: "random"}),
BitField("RFU", 0, 2),
BitEnumField("PDU_type", 0, 4, {0: "ADV_IND", 1: "ADV_DIRECT_IND", 2: "ADV_NONCONN_IND", 3: "SCAN_REQ",
4: "SCAN_RSP", 5: "CONNECT_REQ", 6: "ADV_SCAN_IND"}),
ByteField("payload_length", 0x22),
PacketField("ble_payload",None,BTLE_CONNECT_REQ)
]
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Command,packet_type=0x1, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Command,packet_type=0x1, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Command, packet_type=0x1,opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Command,packet_type=0x1,opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Command,packet_type=0x1, opcode=0x4)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Crcinit_Command,operation_type=0x00)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Channel_Map_Command,operation_type=0x01)
bind_layers(BTLEJack_Recover_Command,BTLEJack_Recover_Hopping_Parameters_Command,operation_type=0x02)
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1, opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Command,packet_type=0x1,opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Command,packet_type=0x1,opcode=0xc)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Command,packet_type=0x1,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Command,packet_type=0x1,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Command,packet_type=0x1,opcode=0xa)
bind_layers(BTLEJack_Hdr, BTLEJack_Send_Packet_Response,packet_type=0x2,opcode=0xa)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Jamming_Response,packet_type=0x2,opcode=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Enable_Hijacking_Response,packet_type=0x2,opcode=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Connection_Request_Response,packet_type=0x2, opcode=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Sniff_Advertisements_Response,packet_type=0x1,opcode=0xc)
bind_layers(BTLEJack_Hdr, BTLEJack_Jam_Advertisements_Command,packet_type=0x1,opcode=0x5)
bind_layers(BTLEJack_Hdr, BTLEJack_Recover_Response,packet_type=0x2, opcode=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Version_Response,packet_type=0x2, opcode=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Reset_Response,packet_type=0x2, opcode=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Scan_Connections_Response,packet_type=0x2, opcode=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Collaborative_Channel_Map_Response,packet_type=0x2, opcode=0xb)
bind_layers(BTLEJack_Hdr, BTLEJack_Debug_Response,packet_type=0x2, opcode=0xe)
bind_layers(BTLEJack_Hdr, BTLEJack_Verbose_Response,packet_type=0x2, opcode=0xf)
bind_layers(BTLEJack_Hdr, BTLEJack_Access_Address_Notification, packet_type=0x4, notification_type=0x0)
bind_layers(BTLEJack_Hdr, BTLEJack_CRCInit_Notification, packet_type=0x4, notification_type=0x1)
bind_layers(BTLEJack_Hdr, BTLEJack_Channel_Map_Notification, packet_type=0x4, notification_type=0x2)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Interval_Notification, packet_type=0x4, notification_type=0x3)
bind_layers(BTLEJack_Hdr, BTLEJack_Hop_Increment_Notification, packet_type=0x4, notification_type=0x4)
bind_layers(BTLEJack_Hdr, BTLEJack_Nordic_Tap_Packet_Notification, packet_type=0x4, notification_type=0x7)
bind_layers(BTLEJack_Hdr, BTLEJack_Hijack_Status_Notification, packet_type=0x4, notification_type=0x8)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Lost_Notification, packet_type=0x4, notification_type=0x9)
bind_layers(BTLEJack_Hdr, BTLEJack_Connection_Request_Notification, packet_type=0x4, notification_type=0x6)
bind_layers(BTLEJack_Hdr, BTLEJack_Advertisement_Notification, packet_type=0x4, notification_type=0xa)
| true | true |
f7198b76ba36f1f12ec60d6aea9e6f66c8d175da | 7,421 | py | Python | backend/server/models.py | thunderlink/thunderfish | a600021187a50bb078d9c36306564470cc6e9fd8 | [
"MIT"
] | 3 | 2019-04-18T04:45:27.000Z | 2019-11-06T18:17:29.000Z | backend/server/models.py | thunderlink/thunderfish | a600021187a50bb078d9c36306564470cc6e9fd8 | [
"MIT"
] | 59 | 2019-04-22T07:05:52.000Z | 2022-03-11T23:48:33.000Z | backend/server/models.py | thunderlink/thunderfish | a600021187a50bb078d9c36306564470cc6e9fd8 | [
"MIT"
] | 4 | 2019-04-24T05:49:21.000Z | 2019-11-21T00:26:00.000Z | from django.db import models
from django.contrib.auth.models import User
import re
from math import sqrt, pi
# Path to default image
DEFAULT_IMAGE = '../media/app_logo.png'
DEFAULT_PROFILE_IMG = 1
DEFAULT_MEETING_IMG = 2
MEDIA_URL = '/media/'
# Unique email for each user
User._meta.local_fields[7].__dict__['_unique'] = True
class Image(models.Model):
profile = models.ImageField(blank=True, null=False, default=DEFAULT_IMAGE)
title = models.CharField(max_length=100, blank=True)
url = models.CharField(max_length=1000, blank=True, null=True)
def __str__(self):
return str(self.id)
class Profile(models.Model):
GENDER_MALE = 0
GENDER_FEMALE = 1
GENDER_PRIVATE = 2
GENDER_CHOICES = [(GENDER_MALE, 'Male'), (GENDER_FEMALE, 'Female'), (GENDER_PRIVATE, 'Private')]
user = models.OneToOneField(User, on_delete=models.DO_NOTHING)
nickname = models.CharField(max_length=20)
photo = models.ForeignKey(Image, related_name="profile_photo", on_delete=models.CASCADE, default=DEFAULT_PROFILE_IMG)
# email = models.EmailField(max_length=30)
name = models.CharField(max_length=50)
gender = models.IntegerField(choices=GENDER_CHOICES, default=GENDER_PRIVATE)
region = models.CharField(max_length=100, blank = True) # may not be necessary, use API ??
introduce = models.CharField(max_length=200, blank = True)
def __str__(self):
return self.nickname
class Meta:
ordering = ('name', )
class Meeting(models.Model):
STATUS_RECRUITING = 0
STATUS_COMPLETE = 1
STATUS_CANCELED = 2
STATUS_CHOICES = [(STATUS_RECRUITING, 'Recruiting'), (STATUS_COMPLETE, 'Complete'), (STATUS_CANCELED, 'Canceled')]
name = models.CharField(max_length=50)
host = models.ForeignKey(Profile, related_name="meeting_hosted", on_delete=models.DO_NOTHING)
date = models.DateTimeField('meeting date')
posted_date = models.DateTimeField('posted date', auto_now_add=True)
participant = models.ManyToManyField(Profile, through = 'Membership')
# contributer - people who opened the meeting with the host
max_participant = models.IntegerField()
deadline = models.DateTimeField('meeting deadline')
region = models.CharField(max_length=100, blank=True)
photo = models.ForeignKey(Image, related_name="meeting_photo", on_delete=models.CASCADE, default=DEFAULT_MEETING_IMG)
content = models.CharField(max_length=500)
tag_set = models.ManyToManyField('Tag', blank=True)
status = models.IntegerField(choices=STATUS_CHOICES) # 1 as pending, 0 as complete ?
open_chat = models.URLField(max_length=100, blank=True) # remove default
latitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
longitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
# content에서 tags를 추출하여, Tag 객체 가져오기, 신규 태그는 Tag instance 생성, 본인의 tag_set에 등록,
# Question : Does \w support korean?
# We should add exceptional control code for unvalid tag.
def tag_save(self, tag_string):
tags = re.findall(r'\b(\w+)\b', self.content)
if not tags:
return
for t in tags:
tag, tag_created = Tag.objects.get_or_create(name=t)
self.tag_set.add(tag)
def __str__(self):
return self.name
@staticmethod
def distance_search(result, dist, lat, long):
## Returns list of meetings that is
## less than dist kilometers far from (latitude, longitude)
## Ordered by increasing distance
ret = []
for meet in result:
delta_phi = abs(float(meet.latitude) - lat) ** 2
delta_theta = abs(float(meet.longitude) - long) ** 2
calculated_distance = float(6371 * sqrt(delta_phi + delta_theta) * 2 * pi / 360)
if calculated_distance <= dist:
ret.append((result.get(pk=meet.id), calculated_distance))
ret.sort(key = lambda item : item[1])
print(ret)
return ret
class Meta:
ordering = ['-id']
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.name
class Comment(models.Model):
date = models.DateTimeField('commented date', auto_now_add=True)
comment_text = models.CharField(max_length=1000, default="Test Text")
# parent_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)
parent_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
writer = models.ForeignKey(Profile, on_delete=models.CASCADE)
def __str__(self):
return self.comment_text
# For notification 1 : New comment for host
def save(self, *args, **kwargs):
notification = Notification(meeting=self.parent_meeting, profile=self.parent_meeting.host, notification = Notification.NOTIFICATION_NEW_COMMENT_FOR_HOST)
notification.save()
super().save(*args, **kwargs)
# we should add url field.
class Notification(models.Model):
NOTIFICATION_NEW_APPLY = 0
NOTIFICATION_NEW_COMMENT_FOR_HOST = 1
NOTIFICATION_APPLY_REJECTED = 2
NOTIFICATION_APPLY_APPROVED = 3
NOTIFICATION_CHOICES = [(NOTIFICATION_NEW_APPLY, 'new apply'), (NOTIFICATION_NEW_COMMENT_FOR_HOST, 'new comment for host'),(NOTIFICATION_APPLY_REJECTED, 'apply is rejected'),(NOTIFICATION_APPLY_APPROVED, 'apply is approved')]
profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
checked = models.BooleanField(default=False)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, null=True)
notification = models.IntegerField(choices=NOTIFICATION_CHOICES)
def __str__(self):
return str(self.profile)
class Meta:
ordering = ['checked', '-id']
class Membership(models.Model):
STATUS_WAITING = 0
STATUS_APPROVED = 1
STATUS_REJECTED = 2
STATUS_CHOICES = [(STATUS_WAITING, 'waiting'), (STATUS_APPROVED, 'approved'), (STATUS_REJECTED, 'rejected')]
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS_CHOICES)
message = models.CharField(max_length = 500, null=True, blank=True)
def __str__(self):
return str(self.meeting.id) + '@' + str(self.profile.id)
class Meta:
unique_together = (
('profile', 'meeting')
)
# For notification 0 : New apply
# For notification 2 : Apply rejected
# For notification 3 : Apply approved
def save(self, *args, **kwargs):
if(self.pk==None):
notification = Notification(meeting=self.meeting, profile=self.meeting.host, notification = Notification.NOTIFICATION_NEW_APPLY)
notification.save()
else:
if(self.status == self.STATUS_CHOICES[1][0]):
notification = Notification(meeting=self.meeting, profile=self.profile, notification = Notification.NOTIFICATION_APPLY_APPROVED)
notification.save()
print("Notify")
elif(self.status == self.STATUS_CHOICES[2][0]):
notification = Notification(meeting = self.meeting, profile = self.profile, notification = Notification.NOTIFICATION_APPLY_REJECTED)
notification.save()
super().save(*args, **kwargs)
| 41 | 229 | 0.694381 | from django.db import models
from django.contrib.auth.models import User
import re
from math import sqrt, pi
DEFAULT_IMAGE = '../media/app_logo.png'
DEFAULT_PROFILE_IMG = 1
DEFAULT_MEETING_IMG = 2
MEDIA_URL = '/media/'
User._meta.local_fields[7].__dict__['_unique'] = True
class Image(models.Model):
profile = models.ImageField(blank=True, null=False, default=DEFAULT_IMAGE)
title = models.CharField(max_length=100, blank=True)
url = models.CharField(max_length=1000, blank=True, null=True)
def __str__(self):
return str(self.id)
class Profile(models.Model):
GENDER_MALE = 0
GENDER_FEMALE = 1
GENDER_PRIVATE = 2
GENDER_CHOICES = [(GENDER_MALE, 'Male'), (GENDER_FEMALE, 'Female'), (GENDER_PRIVATE, 'Private')]
user = models.OneToOneField(User, on_delete=models.DO_NOTHING)
nickname = models.CharField(max_length=20)
photo = models.ForeignKey(Image, related_name="profile_photo", on_delete=models.CASCADE, default=DEFAULT_PROFILE_IMG)
name = models.CharField(max_length=50)
gender = models.IntegerField(choices=GENDER_CHOICES, default=GENDER_PRIVATE)
region = models.CharField(max_length=100, blank = True)
introduce = models.CharField(max_length=200, blank = True)
def __str__(self):
return self.nickname
class Meta:
ordering = ('name', )
class Meeting(models.Model):
STATUS_RECRUITING = 0
STATUS_COMPLETE = 1
STATUS_CANCELED = 2
STATUS_CHOICES = [(STATUS_RECRUITING, 'Recruiting'), (STATUS_COMPLETE, 'Complete'), (STATUS_CANCELED, 'Canceled')]
name = models.CharField(max_length=50)
host = models.ForeignKey(Profile, related_name="meeting_hosted", on_delete=models.DO_NOTHING)
date = models.DateTimeField('meeting date')
posted_date = models.DateTimeField('posted date', auto_now_add=True)
participant = models.ManyToManyField(Profile, through = 'Membership')
max_participant = models.IntegerField()
deadline = models.DateTimeField('meeting deadline')
region = models.CharField(max_length=100, blank=True)
photo = models.ForeignKey(Image, related_name="meeting_photo", on_delete=models.CASCADE, default=DEFAULT_MEETING_IMG)
content = models.CharField(max_length=500)
tag_set = models.ManyToManyField('Tag', blank=True)
status = models.IntegerField(choices=STATUS_CHOICES)
open_chat = models.URLField(max_length=100, blank=True)
latitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
longitude = models.DecimalField(max_digits=30, decimal_places=15, default=0, blank=True)
def tag_save(self, tag_string):
tags = re.findall(r'\b(\w+)\b', self.content)
if not tags:
return
for t in tags:
tag, tag_created = Tag.objects.get_or_create(name=t)
self.tag_set.add(tag)
def __str__(self):
return self.name
@staticmethod
def distance_search(result, dist, lat, long):
delta_theta = abs(float(meet.longitude) - long) ** 2
calculated_distance = float(6371 * sqrt(delta_phi + delta_theta) * 2 * pi / 360)
if calculated_distance <= dist:
ret.append((result.get(pk=meet.id), calculated_distance))
ret.sort(key = lambda item : item[1])
print(ret)
return ret
class Meta:
ordering = ['-id']
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.name
class Comment(models.Model):
date = models.DateTimeField('commented date', auto_now_add=True)
comment_text = models.CharField(max_length=1000, default="Test Text")
parent_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
writer = models.ForeignKey(Profile, on_delete=models.CASCADE)
def __str__(self):
return self.comment_text
def save(self, *args, **kwargs):
notification = Notification(meeting=self.parent_meeting, profile=self.parent_meeting.host, notification = Notification.NOTIFICATION_NEW_COMMENT_FOR_HOST)
notification.save()
super().save(*args, **kwargs)
class Notification(models.Model):
NOTIFICATION_NEW_APPLY = 0
NOTIFICATION_NEW_COMMENT_FOR_HOST = 1
NOTIFICATION_APPLY_REJECTED = 2
NOTIFICATION_APPLY_APPROVED = 3
NOTIFICATION_CHOICES = [(NOTIFICATION_NEW_APPLY, 'new apply'), (NOTIFICATION_NEW_COMMENT_FOR_HOST, 'new comment for host'),(NOTIFICATION_APPLY_REJECTED, 'apply is rejected'),(NOTIFICATION_APPLY_APPROVED, 'apply is approved')]
profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
checked = models.BooleanField(default=False)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, null=True)
notification = models.IntegerField(choices=NOTIFICATION_CHOICES)
def __str__(self):
return str(self.profile)
class Meta:
ordering = ['checked', '-id']
class Membership(models.Model):
STATUS_WAITING = 0
STATUS_APPROVED = 1
STATUS_REJECTED = 2
STATUS_CHOICES = [(STATUS_WAITING, 'waiting'), (STATUS_APPROVED, 'approved'), (STATUS_REJECTED, 'rejected')]
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=STATUS_CHOICES)
message = models.CharField(max_length = 500, null=True, blank=True)
def __str__(self):
return str(self.meeting.id) + '@' + str(self.profile.id)
class Meta:
unique_together = (
('profile', 'meeting')
)
def save(self, *args, **kwargs):
if(self.pk==None):
notification = Notification(meeting=self.meeting, profile=self.meeting.host, notification = Notification.NOTIFICATION_NEW_APPLY)
notification.save()
else:
if(self.status == self.STATUS_CHOICES[1][0]):
notification = Notification(meeting=self.meeting, profile=self.profile, notification = Notification.NOTIFICATION_APPLY_APPROVED)
notification.save()
print("Notify")
elif(self.status == self.STATUS_CHOICES[2][0]):
notification = Notification(meeting = self.meeting, profile = self.profile, notification = Notification.NOTIFICATION_APPLY_REJECTED)
notification.save()
super().save(*args, **kwargs)
| true | true |
f7198bd1b623cee47276165d5348854e67b0535b | 45,311 | py | Python | pyNastran/dev/bdf_vectorized/cards/dynamic.py | Msegade/pyNastran | ae36548579c6bb2ee3a4fff207f7211c1986a5ab | [
"BSD-3-Clause"
] | null | null | null | pyNastran/dev/bdf_vectorized/cards/dynamic.py | Msegade/pyNastran | ae36548579c6bb2ee3a4fff207f7211c1986a5ab | [
"BSD-3-Clause"
] | null | null | null | pyNastran/dev/bdf_vectorized/cards/dynamic.py | Msegade/pyNastran | ae36548579c6bb2ee3a4fff207f7211c1986a5ab | [
"BSD-3-Clause"
] | 1 | 2020-10-04T19:28:07.000Z | 2020-10-04T19:28:07.000Z | # pylint: disable=C0103,R0902,R0904,R0914
"""
All dynamic control cards are defined in this file. This includes:
* FREQ
* FREQ1
* FREQ2 (not implemented)
* FREQ3
* FREQ4
* FREQ5 (not implemented)
* NLPCI
* NLPARM
* TSTEP
* TSTEPNL
All cards are BaseCard objects.
"""
from math import log, exp, ceil
import numpy as np
from numpy import unique, hstack
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank,
string_or_blank, blank, fields, components_or_blank
)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class DELAY(BaseCard):
type = 'DELAY'
def __init__(self, sid, nodes, components, delays, comment=''):
"""
+-------+-----+-----------+-----+--------+------+-----+--------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+===========+=====+========+======+=====+========+=====+
| DELAY | SID | POINT ID1 | C1 | T1 | P2 | C2 | T2 | |
+-------+-----+-----------+-----+--------+------+-----+--------+-----+
"""
if comment:
self.comment = comment
#: Identification number of DELAY entry. (Integer > 0)
self.sid = sid
#: Grid, extra, or scalar point identification number. (Integer > 0)
self.nodes = nodes
#: Component number. (Integers 1 through 6 for grid points; zero or blank for extra
#: or scalar points)
self.components = components
#: Time delay (tau) for designated point Pi and component Ci. (Real)
self.delays = delays
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DELAY card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
delays = [double_or_blank(card, 4, 'delay')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
delays.append(double_or_blank(card, 7, 'delay'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DELAY(sid, nodes, components, delays, comment=comment)
def add(self, delay):
assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)
if delay.comment:
if hasattr('_comment'):
self._comment += delay.comment
else:
self._comment = delay.comment
self.nodes += delay.nodes
self.components += delay.components
self.delays += delay.delays
def get_delay_at_freq(self, freq):
return self.nodes, self.components, self.delays
#def cross_reference(self, model: BDF) -> None:
#"""
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
#msg = ', which is required by DELAY sid=%s' % self.sid
#self.nodes_ref = model.Node(self.node_ids, msg=msg)
#@property
#def node_id1(self):
#if isinstance(self.nodes[0], integer_types):
#return self.nodes[0]
#return self.nodes_ref[0].nid
#@property
#def node_id2(self):
#if isinstance(self.nodes[1], integer_types):
#return self.nodes[1]
#return self.nodes_ref[1].nid
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DELAY', self.sid]
for nid, comp, delay in zip(self.node_ids, self.components, self.delays):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_8(['DELAY', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_16(['DELAY', self.sid, nid, comp, delay])
return msg
class DPHASE(BaseCard):
type = 'DPHASE'
def __init__(self, sid, nodes, components, phase_leads, comment=''):
"""
+--------+-----+-----------+-----+------+------+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+===========+=====+======+======+=====+=====+=====+
| DPHASE | SID | POINT ID1 | C1 | TH1 | P2 | C2 | TH2 | |
+--------+-----+-----------+-----+------+------+-----+-----+-----+
"""
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.phase_leads = phase_leads
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a DPHASE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
phase_leads = [double_or_blank(card, 4, 'phase_lead')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
phase_leads.append(double_or_blank(card, 7, 'phase_lead'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DPHASE(sid, nodes, components, phase_leads, comment=comment)
def add(self, dphase):
assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)
if dphase.comment:
if hasattr('_comment'):
self._comment += dphase.comment
else:
self._comment = dphase.comment
self.nodes += dphase.nodes
self.components += dphase.components
self.phase_leads += dphase.phase_leads
#def cross_reference(self, model: BDF) -> None:
#"""
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
#msg = ', which is required by DPHASE sid=%s' % self.sid
#self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
#@property
#def node_id1(self):
#if isinstance(self.nodes[0], integer_types):
#return self.nodes[0]
#return self.nodes_ref[0].nid
#@property
#def node_id2(self):
#if isinstance(self.nodes[1], integer_types):
#return self.nodes[1]
#return self.nodes_ref[1].nid
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DPHASE', self.sid]
for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])
return msg
class FREQ(BaseCard):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems.
+------+-----+-----+-----+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+======+=====+=====+=====+======+=====+=====+=====+=====+
| FREQ | SID | F1 | F2 | etc. | | | | |
+------+-----+-----+-----+------+-----+-----+-----+-----+
"""
type = 'FREQ'
def __init__(self, sid, freqs, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
freqs = fields(double, card, 'freq', i=2, j=len(card))
return FREQ(sid, freqs, comment=comment)
def get_freqs(self):
return self.freqs
def add_frequencies(self, freqs):
"""
Combines the frequencies from 1 FREQx object with another.
All FREQi entries with the same frequency set identification numbers
will be used. Duplicate frequencies will be ignored.
Parameters
----------
freqs : ???
the frequencies for a FREQx object
"""
#print("self.freqs = ",self.freqs)
#print("freqs = ",freqs)
self.freqs = unique(hstack([self.freqs, freqs]))
def add_frequency_object(self, freq):
"""
:param freq: a FREQx object
.. seealso:: :func:`addFrequencies`
"""
self.add_frequencies(freq.freqs)
def raw_fields(self):
list_fields = ['FREQ', self.sid] + list(self.freqs)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ1(FREQ):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems by specification of a starting frequency, frequency
increment, and the number of increments desired.
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+=====+
| FREQ1 | SID | F1 | DF | NDF | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
"""
type = 'FREQ1'
def __init__(self, sid, f1, df, ndf, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.df = df
self.ndf = ndf
freqs = []
for i in range(ndf):
freqs.append(f1 + i * df)
self.freqs = unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
df = double(card, 3, 'df')
ndf = integer_or_blank(card, 4, 'ndf', 1)
assert len(card) <= 5, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ1(sid, f1, df, ndf, comment=comment)
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ2(FREQ):
"""
Defines a set of frequencies to be used in the solution of frequency
response problems by specification of a starting frequency, final
frequency, and the number of logarithmic increments desired.
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+=====+
| FREQ2 | SID | F1 | F2 | NDF | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
"""
type = 'FREQ2'
def __init__(self, sid, f1, f2, ndf=1, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.ndf = ndf
d = 1. / ndf * log(f2 / f1)
freqs = []
for i in range(ndf):
freqs.append(f1 * exp(i * d)) # 0 based index
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double(card, 2, 'f1') # default=0.0 ?
f2 = double(card, 3, 'f2')
ndf = integer_or_blank(card, 4, 'nf', 1)
assert len(card) <= 5, 'len(FREQ2 card) = %i\ncard=%s' % (len(card), card)
return FREQ2(sid, f1, f2, ndf, comment=comment)
#return FREQ(sid, freqs, comment=comment)
class FREQ3(FREQ):
"""
+-------+-----+------+-------+--------+-----+---------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+=======+=====+======+=======+========+=====+=========+
| FREQ3 | SID | F1 | F2 | TYPE | NEF | CLUSTER |
+-------+-----+------+-------+--------+-----+---------+
| FREQ3 | 6 | 20.0 | 200.0 | LINEAR | 10 | 2.0 |
+-------+-----+------+-------+--------+-----+---------+
"""
type = 'FREQ3'
def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):
if comment:
self.comment = comment
if f2 is None:
f2 = f1
self.sid = sid
self.f1 = f1
self.f2 = f2
self.Type = Type
self.nef = nef
self.cluster = cluster
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 1, 'f1')
f2 = integer_or_blank(card, 1, 'f2', f1)
Type = string_or_blank(card, 1, 'Type', 'LINEAR')
nef = integer_or_blank(card, 1, 'nef', 10)
cluster = double_or_blank(card, 1, 'cluster', 1.0)
return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')
def raw_fields(self):
return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ4(FREQ):
"""
Defines a set of frequencies used in the solution of modal frequency
response problems by specifying the amount of 'spread' around each natural
frequency and the number of equally spaced excitation frequencies within
the spread.
+-------+-----+-----+-----+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+======+=====+=====+=====+=====+
| FREQ4 | SID | F1 | F2 | FSPD | NFM | | | |
+-------+-----+-----+-----+------+-----+-----+-----+-----+
.. note:: this card rewrites as a FREQ card
.. todo:: not done...
"""
type = 'FREQ4'
def __init__(self, sid, f1, f2, fspread, nfm, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.fspread = fspread
self.nfm = nfm
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a FREQ4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
f2 = double_or_blank(card, 3, 'f2', 1.e20)
fspread = double_or_blank(card, 4, 'fspd', 0.1)
nfm = integer_or_blank(card, 5, 'nfm', 3)
assert len(card) <= 6, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)
def raw_fields(self):
list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,
self.nfm]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
#class FREQ5(FREQ):
#type = 'FREQ5'
#def __init__(self, card=None, data=None, comment=''):
#if comment:
# self.comment = comment
#raise NotImplementedError()
#def write_card(self, size: int=8, is_double: bool=False) -> str:
#card = self.repr_fields()
#if size == 8:
#return self.comment + print_card_8(card)
#return self.comment + print_card_16(card)
class NLPARM(BaseCard):
"""
Defines a set of parameters for nonlinear static analysis iteration
strategy.
+--------+--------+------+------+---------+-------+---------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+======+======+=========+=======+=========+=========+========+
| NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | MAXBIS | | | | MAXR | | RTOLB | CONV |
+--------+--------+------+------+---------+-------+---------+---------+--------+
"""
type = 'NLPARM'
def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,
max_iter=25, conv='PW', int_out='NO',
eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,
fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):
if comment:
self.comment = comment
self.nlparm_id = nlparm_id
self.ninc = ninc
self.dt = dt
self.kmethod = kmethod
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.int_out = int_out
# line 2
self.eps_p = eps_p
self.eps_u = eps_u
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.ls_tol = ls_tol
# line 3
self.max_bisect = max_bisect
self.max_r = max_r
self.rtol_b = rtol_b
if self.max_qn is None:
if kmethod == 'PFNT':
self.max_qn = 0
else:
self.max_qn = max_iter
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NLPARM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nlparm_id = integer(card, 1, 'nlparm_id')
ninc = integer_or_blank(card, 2, 'ninc', 10)
dt = double_or_blank(card, 3, 'dt', 0.0)
kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')
kstep = integer_or_blank(card, 5, 'kstep', 5)
max_iter = integer_or_blank(card, 6, 'max_iter', 25)
conv = string_or_blank(card, 7, 'conv', 'PW')
int_out = string_or_blank(card, 8, 'intOut', 'NO')
# line 2
eps_u = double_or_blank(card, 9, 'eps_u', 0.01)
eps_p = double_or_blank(card, 10, 'eps_p', 0.01)
eps_w = double_or_blank(card, 11, 'eps_w', 0.01)
max_div = integer_or_blank(card, 12, 'max_div', 3)
if kmethod == 'PFNT':
max_qn = integer_or_blank(card, 13, 'max_qn', 0)
else:
max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)
max_ls = integer_or_blank(card, 14, 'max_ls', 4)
fstress = double_or_blank(card, 15, 'fstress', 0.2)
ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)
# line 3
max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)
max_r = double_or_blank(card, 21, 'max_r', 20.)
rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)
assert len(card) <= 24, 'len(NLPARM card) = %i\ncard=%s' % (len(card), card)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a NLPARM card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,
eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,
rtol_b) = data
if kmethod == 1:
kmethod = 'AUTO'
elif kmethod == 2:
kmethod = 'ITER'
elif kmethod == 4:
kmethod = 'SEMI'
elif kmethod == 3:
kmethod = 'ADAPT'
else:
msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)
raise NotImplementedError(msg)
if conv == 1:
conv = 'W'
elif conv == 2:
conv = 'P'
elif conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
elif conv == 5:
conv = 'UW'
elif conv == 6:
conv = 'UP'
elif conv == 7:
conv = 'UPW'
else:
msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)
raise NotImplementedError(msg)
if int_out == 0:
int_out = 'NO'
elif int_out == 1:
int_out = 'YES'
elif int_out == 2:
int_out = 'ALL'
else:
msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)
raise NotImplementedError(msg)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,
self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, self.ls_tol, self.max_bisect, None, None, None,
self.max_r, None, self.rtol_b]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kmethod = set_blank_if_default(self.kmethod, 'AUTO')
kstep = set_blank_if_default(self.kstep, 5)
max_iter = set_blank_if_default(self.max_iter, 25)
conv = set_blank_if_default(self.conv, 'PW')
int_out = set_blank_if_default(self.int_out, 'NO')
eps_u = set_blank_if_default(self.eps_u, 0.01)
eps_p = set_blank_if_default(self.eps_p, 0.01)
eps_w = set_blank_if_default(self.eps_w, 0.01)
max_div = set_blank_if_default(self.max_div, 3)
max_qn = set_blank_if_default(self.max_qn, self.max_iter)
max_ls = set_blank_if_default(self.max_ls, 4)
fstress = set_blank_if_default(self.fstress, 0.2)
ls_tol = set_blank_if_default(self.ls_tol, 0.5)
max_bisect = set_blank_if_default(self.max_bisect, 5)
max_r = set_blank_if_default(self.max_r, 20.)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,
conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,
fstress, ls_tol, max_bisect, None, None, None, max_r, None,
rtol_b]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card) # having trouble with double precision...
return self.comment + print_card_16(card)
class NLPCI(BaseCard):
type = 'NLPCI'
def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20, comment=''):
if comment:
self.comment = comment
self.nlpci_id = nlpci_id
self.Type = Type
self.minalr = minalr
self.maxalr = maxalr
self.scale = scale
self.desiter = desiter
self.mxinc = mxinc
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a NLPCI card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
nlpci_id = integer(card, 1, 'nlpci_id')
Type = string_or_blank(card, 2, 'Type', 'CRIS')
minalr = double_or_blank(card, 3, 'minalr', 0.25)
maxalr = double_or_blank(card, 4, 'maxalr', 4.0)
scale = double_or_blank(card, 5, 'scale', 0.0)
blank(card, 6, 'blank')
desiter = integer_or_blank(card, 7, 'desiter', 12)
mxinc = integer_or_blank(card, 8, 'mxinc', 20)
return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,
scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)
def raw_fields(self):
list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,
self.maxalr, self.scale, None, self.desiter, self.mxinc]
return list_fields
def repr_fields(self):
#minalr = set_blank_if_default(self.minalr, 0.25)
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TF(BaseCard):
"""
Defines a dynamic transfer function of the form:
(B0 + B1 p + B2 *p2)*ud sum(A0_i + A1_i*p + A2_i*p2)*ui = 0
+----+-----+-----+------+------+------+--------+----+----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+====+=====+=====+======+======+======+========+====+====+
| TF | SID | GD | CD | B0 | B1 | B2 | | |
+----+-----+-----+------+------+------+--------+----+----+
| | G_1 | C_1 | A0_1 | A1_1 | A2_1 | etc. | | |
+----+-----+-----+------+------+------+--------+----+----+
"""
type = 'TF'
def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nid0 = nid0
self.c = c
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.nids = nids
self.components = components
self.a = a
def validate(self):
pass
#assert len(self.grids1) > 0, 'ngrids1=%s\n%s' % (len(self.grids1), str(self))
#def cross_reference(self, model: BDF) -> None:
#pass
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TF card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
nid0 = integer(card, 2, 'nid0')
# component 0 means an SPOINT/EPOINT
c = components_or_blank(card, 3, 'components_0', 0)
b0 = double_or_blank(card, 4, 'b0', 0.)
b1 = double_or_blank(card, 5, 'b1', 0.)
b2 = double_or_blank(card, 6, 'b2', 0.)
nfields = len(card) - 9
nrows = nfields // 8
if nfields % 8 > 0:
nrows += 1
nids = []
components = []
a = []
for irow in range(nrows):
j = irow * 8 + 9
#ifield = irow + 1
nid = integer(card, j, 'grid_%i' % (irow + 1))
component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)
a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)
a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)
a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)
nids.append(nid)
components.append(component)
a.append([a0, a1, a2])
return TF(sid, nid0, c, b0, b1, b2, nids, components, a,
comment=comment)
def raw_fields(self):
list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]
for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):
list_fields += [grid, c, a0, a1, a2, None, None, None]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
# double precision?
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEP(BaseCard):
"""
Transient Time Step
Defines time step intervals at which a solution will be generated and
output in transient analysis.
+-------+------+------+------+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+======+=====+=====+=====+=====+
| TSTEP | SID | N1 | DT1 | NO1 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | N2 | DT2 | NO2 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | etc. | | | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
+-------+------+------+------+------+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+======+======+======+======+=====+=====+=====+=====+
| TSTEP | 101 | 9000 | .001 | 9000 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
| | | 1000 | .001 | 1 | | | | |
+-------+------+------+------+------+-----+-----+-----+-----+
"""
type = 'TSTEP'
def __init__(self, sid, N, DT, NO, comment=''):
"""
Creates a TSTEP card
Parameters
----------
sid : int
the time step id
N : List[int/None]
???
DT : List[float/None]
???
NO : List[int/None]
???
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
self.sid = sid
#: Number of time steps of value DTi. (Integer > 1)
self.N = N
#: Time increment (float)
self.DT = DT
#: Skip factor for output. Every NOi-th step will be saved for output (default=1)
self.NO = NO
def validate(self):
assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)
assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TSTEP card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
N = []
DT = []
NO = []
nrows = int(ceil((len(card) - 1.) / 8.))
for i in range(nrows):
n = 8 * i + 1
ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)
dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)
no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)
N.append(ni)
DT.append(dt)
NO.append(no)
return TSTEP(sid, N, DT, NO, comment=comment)
def raw_fields(self):
list_fields = ['TSTEP', self.sid]
for (N, dt, no) in zip(self.N, self.DT, self.NO):
list_fields += [N, dt, no, None, None, None, None, None]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEPNL(BaseCard):
"""
Defines parametric controls and data for nonlinear transient structural or
heat transfer analysis. TSTEPNL is intended for SOLs 129, 159, and 600.
Parameters for Nonlinear Transient Analysis.
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+========+========+=======+========+========+=======+=========+======+
| TSTEPNL | ID | NDT | DT | NO | METHOD | KSTEP | MAXITER | CONV |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
| | MAXBIS | ADJUST | MSTEP | RB | MAXR | UTOL | RTOLB | |
+---------+--------+--------+-------+--------+--------+-------+---------+------+
method = None for NX, but apparently TSTEP as well, which is not in the QRG
"""
type = 'TSTEPNL'
allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT', # MSC
'TSTEP'] # NX
def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,
max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,
eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,
fstress=0.2, max_bisect=5, adjust=5, mstep=None,
rb=0.6, max_r=32., utol=0.1, rtol_b=20.,
min_iter=None, comment=''):
"""
Creates a TSTEPNL card
Parameters
----------
sid : int
the time step id
ndt : ???
???
dt : ???
???
no : ???
???
eps_u : float; default=1.e-2
???
eps_p : float; default=1.e-3
???
eps_w : float; default=1.e-6
???
max_div : int; default=2
???
max_qn : int; default=10
???
max_ls : int; default=2
???
fstress : float; default=0.2
???
max_bisect : int; default=5
???
adjust : int; default=5
???
mstep : int; default=None
???
rb : float; default=0.6
???
max_r = float; default=32.
???
utol = float; default=0.1
???
rtol_b = float; default=20.
???
min_iter : int; default=None
not listed in all QRGs
comment : str; default=''
a comment for the card
"""
if comment:
self.comment = comment
# line 1
self.sid = sid
self.ndt = ndt
self.dt = dt
self.no = no
self.method = method
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.eps_u = eps_u
self.eps_p = eps_p
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
# line 3
self.max_bisect = max_bisect
self.adjust = adjust
self.mstep = mstep
self.rb = rb
self.max_r = max_r
self.utol = utol
self.rtol_b = rtol_b
self.min_iter = min_iter
assert self.ndt >= 3
assert self.dt > 0.
def validate(self):
if self.method not in self.allowed_methods:
msg = 'method=%r allowed_methods=[%s]' % (
self.method, ', '.join(self.allowed_methods))
raise ValueError(msg)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a TSTEPNL card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
ndt = integer(card, 2, 'ndt')
dt = double(card, 3, 'dt')
no = integer_or_blank(card, 4, 'no', 1)
#: .. note:: not listed in all QRGs
method = string_or_blank(card, 5, 'method', 'ADAPT')
if method == 'ADAPT':
kstep = integer_or_blank(card, 6, 'kStep', 2)
elif method == 'ITER':
kstep = integer_or_blank(card, 6, 'kStep', 10)
elif method in ['AUTO', 'TSTEP', 'SEMI']:
kstep = None
#kstep = blank(card, 6, 'kStep') #: .. todo:: not blank
else:
msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (
method, ', '.join(cls.allowed_methods))
raise RuntimeError(msg)
max_iter = integer_or_blank(card, 7, 'maxIter', 10)
conv = string_or_blank(card, 8, 'conv', 'PW')
# line 2
eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)
eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)
eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)
max_div = integer_or_blank(card, 12, 'maxDiv', 2)
max_qn = integer_or_blank(card, 13, 'maxQn', 10)
max_ls = integer_or_blank(card, 14, 'MaxLs', 2)
fstress = double_or_blank(card, 15, 'fStress', 0.2)
# line 3
max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)
adjust = integer_or_blank(card, 18, 'adjust', 5)
mstep = integer_or_blank(card, 19, 'mStep')
rb = double_or_blank(card, 20, 'rb', 0.6)
max_r = double_or_blank(card, 21, 'maxR', 32.)
utol = double_or_blank(card, 22, 'uTol', 0.1)
rtol_b = double_or_blank(card, 23, 'rTolB', 20.)
# not listed in all QRGs
min_iter = integer_or_blank(card, 24, 'minIter')
assert len(card) <= 25, 'len(TSTEPNL card) = %i\ncard=%s' % (len(card), card)
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a TSTEPNL card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,
max_div, max_qn, max_ls, fstress, max_bisect,
adjust, mstep, rb, max_r, utol, rtol_b) = data
if method == 1:
method = 'AUTO'
elif method == 3:
method = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))
if conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
#elif conv == 3:
#conv = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))
min_iter = None # not listed in DMAP 2005
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
#self.sid = sid
#self.ndt = ndt
#self.dt = dt
#self.no = no
#self.method = method
#self.kStep = kStep
#self.maxIter = maxIter
#self.conv = conv
## line 2
#self.epsU = epsU
#self.epsP = epsP
#self.epsW = epsW
#self.maxDiv = maxDiv
#self.maxQn = maxQn
#self.MaxLs = maxLs
#self.fStress = fStress
## line 3
#self.maxBisect = maxBisect
#self.adjust = adjust
#self.mStep = mStep
#self.rb = rb
#self.maxR = maxR
#self.uTol = uTol
#self.rTolB = rTolB
def raw_fields(self):
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,
self.method, self.kstep, self.max_iter, self.conv, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, None, self.max_bisect, self.adjust, self.mstep,
self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]
return list_fields
def repr_fields(self):
#no = set_blank_if_default(self.no,1)
no = self.no
method = set_blank_if_default(self.method, 'ADAPT')
kstep = self.kstep
#if self.method == 'ADAPT':
#kStep = set_blank_if_default(self.kStep, 2)
#elif self.method == 'ITER':
#kStep = set_blank_if_default(self.kStep, 10)
#else:
#msg = 'invalid TSTEPNL Method. method=|%s|' %(self.method)
#raise RuntimeError(msg)
#maxIter = set_blank_if_default(self.maxIter, 10)
conv = set_blank_if_default(self.conv, 'PW')
eps_u = set_blank_if_default(self.eps_u, 1e-2)
eps_p = set_blank_if_default(self.eps_p, 1e-3)
eps_w = set_blank_if_default(self.eps_w, 1e-6)
max_div = set_blank_if_default(self.max_div, 2)
max_qn = set_blank_if_default(self.max_qn, 10)
max_ls = set_blank_if_default(self.max_ls, 2)
fstress = set_blank_if_default(self.fstress, 0.2)
max_bisect = set_blank_if_default(self.max_bisect, 5)
adjust = set_blank_if_default(self.adjust, 5)
rb = set_blank_if_default(self.rb, 0.6)
max_r = set_blank_if_default(self.max_r, 32.)
utol = set_blank_if_default(self.utol, 0.1)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,
kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,
max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,
max_r, utol, rtol_b, self.min_iter]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
| 35.016229 | 96 | 0.493809 |
from math import log, exp, ceil
import numpy as np
from numpy import unique, hstack
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank,
string_or_blank, blank, fields, components_or_blank
)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING:
from pyNastran.bdf.bdf import BDF
class DELAY(BaseCard):
type = 'DELAY'
def __init__(self, sid, nodes, components, delays, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.delays = delays
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
delays = [double_or_blank(card, 4, 'delay')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
delays.append(double_or_blank(card, 7, 'delay'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DELAY(sid, nodes, components, delays, comment=comment)
def add(self, delay):
assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)
if delay.comment:
if hasattr('_comment'):
self._comment += delay.comment
else:
self._comment = delay.comment
self.nodes += delay.nodes
self.components += delay.components
self.delays += delay.delays
def get_delay_at_freq(self, freq):
return self.nodes, self.components, self.delays
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DELAY', self.sid]
for nid, comp, delay in zip(self.node_ids, self.components, self.delays):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_8(['DELAY', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.delays):
msg += print_card_16(['DELAY', self.sid, nid, comp, delay])
return msg
class DPHASE(BaseCard):
type = 'DPHASE'
def __init__(self, sid, nodes, components, phase_leads, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nodes = nodes
self.components = components
self.phase_leads = phase_leads
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
nodes = [integer(card, 2, 'node')]
components = [integer(card, 3, 'components')]
phase_leads = [double_or_blank(card, 4, 'phase_lead')]
assert components[0] in [0, 1, 2, 3, 4, 5, 6], components
if card.field(5):
nodes.append(integer(card, 5, 'node'))
components.append(integer(card, 6, 'components'))
phase_leads.append(double_or_blank(card, 7, 'phase_lead'))
assert components[1] in [0, 1, 2, 3, 4, 5, 6], components
return DPHASE(sid, nodes, components, phase_leads, comment=comment)
def add(self, dphase):
assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)
if dphase.comment:
if hasattr('_comment'):
self._comment += dphase.comment
else:
self._comment = dphase.comment
self.nodes += dphase.nodes
self.components += dphase.components
self.phase_leads += dphase.phase_leads
#Cross links the card so referenced cards can be extracted directly
#Parameters
#----------
#model : BDF()
#the BDF object
#"""
@property
def node_ids(self):
node_ids = [self.node_id1]
if len(self.components) == 2:
node_ids.append(self.node_id2)
return node_ids
def raw_fields(self):
list_fields = ['DPHASE', self.sid]
for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):
if isinstance(nid, integer_types):
nidi = nid
else:
nidi = nid.nid
list_fields += [nidi, comp, delay]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
msg = self.comment
node_ids = self.node_ids
if size == 8:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])
else:
for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):
msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])
return msg
class FREQ(BaseCard):
type = 'FREQ'
def __init__(self, sid, freqs, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
freqs = fields(double, card, 'freq', i=2, j=len(card))
return FREQ(sid, freqs, comment=comment)
def get_freqs(self):
return self.freqs
def add_frequencies(self, freqs):
self.freqs = unique(hstack([self.freqs, freqs]))
def add_frequency_object(self, freq):
self.add_frequencies(freq.freqs)
def raw_fields(self):
list_fields = ['FREQ', self.sid] + list(self.freqs)
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ1(FREQ):
type = 'FREQ1'
def __init__(self, sid, f1, df, ndf, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.df = df
self.ndf = ndf
freqs = []
for i in range(ndf):
freqs.append(f1 + i * df)
self.freqs = unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
df = double(card, 3, 'df')
ndf = integer_or_blank(card, 4, 'ndf', 1)
assert len(card) <= 5, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ1(sid, f1, df, ndf, comment=comment)
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ2(FREQ):
type = 'FREQ2'
def __init__(self, sid, f1, f2, ndf=1, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.ndf = ndf
d = 1. / ndf * log(f2 / f1)
freqs = []
for i in range(ndf):
freqs.append(f1 * exp(i * d))
self.freqs = np.unique(freqs)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 2, 'f1')
f2 = double(card, 3, 'f2')
ndf = integer_or_blank(card, 4, 'nf', 1)
assert len(card) <= 5, 'len(FREQ2 card) = %i\ncard=%s' % (len(card), card)
return FREQ2(sid, f1, f2, ndf, comment=comment)
class FREQ3(FREQ):
type = 'FREQ3'
def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):
if comment:
self.comment = comment
if f2 is None:
f2 = f1
self.sid = sid
self.f1 = f1
self.f2 = f2
self.Type = Type
self.nef = nef
self.cluster = cluster
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double(card, 1, 'f1')
f2 = integer_or_blank(card, 1, 'f2', f1)
Type = string_or_blank(card, 1, 'Type', 'LINEAR')
nef = integer_or_blank(card, 1, 'nef', 10)
cluster = double_or_blank(card, 1, 'cluster', 1.0)
return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')
def raw_fields(self):
return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class FREQ4(FREQ):
type = 'FREQ4'
def __init__(self, sid, f1, f2, fspread, nfm, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.f1 = f1
self.f2 = f2
self.fspread = fspread
self.nfm = nfm
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
f1 = double_or_blank(card, 2, 'f1', 0.0)
f2 = double_or_blank(card, 3, 'f2', 1.e20)
fspread = double_or_blank(card, 4, 'fspd', 0.1)
nfm = integer_or_blank(card, 5, 'nfm', 3)
assert len(card) <= 6, 'len(FREQ card) = %i\ncard=%s' % (len(card), card)
return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)
def raw_fields(self):
list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,
self.nfm]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class NLPARM(BaseCard):
type = 'NLPARM'
def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,
max_iter=25, conv='PW', int_out='NO',
eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,
fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):
if comment:
self.comment = comment
self.nlparm_id = nlparm_id
self.ninc = ninc
self.dt = dt
self.kmethod = kmethod
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.int_out = int_out
self.eps_p = eps_p
self.eps_u = eps_u
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.ls_tol = ls_tol
self.max_bisect = max_bisect
self.max_r = max_r
self.rtol_b = rtol_b
if self.max_qn is None:
if kmethod == 'PFNT':
self.max_qn = 0
else:
self.max_qn = max_iter
@classmethod
def add_card(cls, card, comment=''):
nlparm_id = integer(card, 1, 'nlparm_id')
ninc = integer_or_blank(card, 2, 'ninc', 10)
dt = double_or_blank(card, 3, 'dt', 0.0)
kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')
kstep = integer_or_blank(card, 5, 'kstep', 5)
max_iter = integer_or_blank(card, 6, 'max_iter', 25)
conv = string_or_blank(card, 7, 'conv', 'PW')
int_out = string_or_blank(card, 8, 'intOut', 'NO')
eps_u = double_or_blank(card, 9, 'eps_u', 0.01)
eps_p = double_or_blank(card, 10, 'eps_p', 0.01)
eps_w = double_or_blank(card, 11, 'eps_w', 0.01)
max_div = integer_or_blank(card, 12, 'max_div', 3)
if kmethod == 'PFNT':
max_qn = integer_or_blank(card, 13, 'max_qn', 0)
else:
max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)
max_ls = integer_or_blank(card, 14, 'max_ls', 4)
fstress = double_or_blank(card, 15, 'fstress', 0.2)
ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)
max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)
max_r = double_or_blank(card, 21, 'max_r', 20.)
rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)
assert len(card) <= 24, 'len(NLPARM card) = %i\ncard=%s' % (len(card), card)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,
eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,
rtol_b) = data
if kmethod == 1:
kmethod = 'AUTO'
elif kmethod == 2:
kmethod = 'ITER'
elif kmethod == 4:
kmethod = 'SEMI'
elif kmethod == 3:
kmethod = 'ADAPT'
else:
msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)
raise NotImplementedError(msg)
if conv == 1:
conv = 'W'
elif conv == 2:
conv = 'P'
elif conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
elif conv == 5:
conv = 'UW'
elif conv == 6:
conv = 'UP'
elif conv == 7:
conv = 'UPW'
else:
msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)
raise NotImplementedError(msg)
if int_out == 0:
int_out = 'NO'
elif int_out == 1:
int_out = 'YES'
elif int_out == 2:
int_out = 'ALL'
else:
msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)
raise NotImplementedError(msg)
return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,
int_out, eps_u, eps_p, eps_w, max_div,
max_qn, max_ls, fstress,
ls_tol, max_bisect, max_r,
rtol_b, comment=comment)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,
self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, self.ls_tol, self.max_bisect, None, None, None,
self.max_r, None, self.rtol_b]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kmethod = set_blank_if_default(self.kmethod, 'AUTO')
kstep = set_blank_if_default(self.kstep, 5)
max_iter = set_blank_if_default(self.max_iter, 25)
conv = set_blank_if_default(self.conv, 'PW')
int_out = set_blank_if_default(self.int_out, 'NO')
eps_u = set_blank_if_default(self.eps_u, 0.01)
eps_p = set_blank_if_default(self.eps_p, 0.01)
eps_w = set_blank_if_default(self.eps_w, 0.01)
max_div = set_blank_if_default(self.max_div, 3)
max_qn = set_blank_if_default(self.max_qn, self.max_iter)
max_ls = set_blank_if_default(self.max_ls, 4)
fstress = set_blank_if_default(self.fstress, 0.2)
ls_tol = set_blank_if_default(self.ls_tol, 0.5)
max_bisect = set_blank_if_default(self.max_bisect, 5)
max_r = set_blank_if_default(self.max_r, 20.)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,
conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,
fstress, ls_tol, max_bisect, None, None, None, max_r, None,
rtol_b]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class NLPCI(BaseCard):
type = 'NLPCI'
def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20, comment=''):
if comment:
self.comment = comment
self.nlpci_id = nlpci_id
self.Type = Type
self.minalr = minalr
self.maxalr = maxalr
self.scale = scale
self.desiter = desiter
self.mxinc = mxinc
@classmethod
def add_card(cls, card, comment=''):
nlpci_id = integer(card, 1, 'nlpci_id')
Type = string_or_blank(card, 2, 'Type', 'CRIS')
minalr = double_or_blank(card, 3, 'minalr', 0.25)
maxalr = double_or_blank(card, 4, 'maxalr', 4.0)
scale = double_or_blank(card, 5, 'scale', 0.0)
blank(card, 6, 'blank')
desiter = integer_or_blank(card, 7, 'desiter', 12)
mxinc = integer_or_blank(card, 8, 'mxinc', 20)
return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,
scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)
def raw_fields(self):
list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,
self.maxalr, self.scale, None, self.desiter, self.mxinc]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TF(BaseCard):
type = 'TF'
def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.nid0 = nid0
self.c = c
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.nids = nids
self.components = components
self.a = a
def validate(self):
pass
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
nid0 = integer(card, 2, 'nid0')
c = components_or_blank(card, 3, 'components_0', 0)
b0 = double_or_blank(card, 4, 'b0', 0.)
b1 = double_or_blank(card, 5, 'b1', 0.)
b2 = double_or_blank(card, 6, 'b2', 0.)
nfields = len(card) - 9
nrows = nfields // 8
if nfields % 8 > 0:
nrows += 1
nids = []
components = []
a = []
for irow in range(nrows):
j = irow * 8 + 9
nid = integer(card, j, 'grid_%i' % (irow + 1))
component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)
a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)
a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)
a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)
nids.append(nid)
components.append(component)
a.append([a0, a1, a2])
return TF(sid, nid0, c, b0, b1, b2, nids, components, a,
comment=comment)
def raw_fields(self):
list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]
for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):
list_fields += [grid, c, a0, a1, a2, None, None, None]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEP(BaseCard):
type = 'TSTEP'
def __init__(self, sid, N, DT, NO, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.N = N
self.DT = DT
self.NO = NO
def validate(self):
assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)
assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
N = []
DT = []
NO = []
nrows = int(ceil((len(card) - 1.) / 8.))
for i in range(nrows):
n = 8 * i + 1
ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)
dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)
no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)
N.append(ni)
DT.append(dt)
NO.append(no)
return TSTEP(sid, N, DT, NO, comment=comment)
def raw_fields(self):
list_fields = ['TSTEP', self.sid]
for (N, dt, no) in zip(self.N, self.DT, self.NO):
list_fields += [N, dt, no, None, None, None, None, None]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class TSTEPNL(BaseCard):
type = 'TSTEPNL'
allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT',
'TSTEP']
def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,
max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,
eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,
fstress=0.2, max_bisect=5, adjust=5, mstep=None,
rb=0.6, max_r=32., utol=0.1, rtol_b=20.,
min_iter=None, comment=''):
if comment:
self.comment = comment
self.sid = sid
self.ndt = ndt
self.dt = dt
self.no = no
self.method = method
self.kstep = kstep
self.max_iter = max_iter
self.conv = conv
self.eps_u = eps_u
self.eps_p = eps_p
self.eps_w = eps_w
self.max_div = max_div
self.max_qn = max_qn
self.max_ls = max_ls
self.fstress = fstress
self.max_bisect = max_bisect
self.adjust = adjust
self.mstep = mstep
self.rb = rb
self.max_r = max_r
self.utol = utol
self.rtol_b = rtol_b
self.min_iter = min_iter
assert self.ndt >= 3
assert self.dt > 0.
def validate(self):
if self.method not in self.allowed_methods:
msg = 'method=%r allowed_methods=[%s]' % (
self.method, ', '.join(self.allowed_methods))
raise ValueError(msg)
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
ndt = integer(card, 2, 'ndt')
dt = double(card, 3, 'dt')
no = integer_or_blank(card, 4, 'no', 1)
method = string_or_blank(card, 5, 'method', 'ADAPT')
if method == 'ADAPT':
kstep = integer_or_blank(card, 6, 'kStep', 2)
elif method == 'ITER':
kstep = integer_or_blank(card, 6, 'kStep', 10)
elif method in ['AUTO', 'TSTEP', 'SEMI']:
kstep = None
msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (
method, ', '.join(cls.allowed_methods))
raise RuntimeError(msg)
max_iter = integer_or_blank(card, 7, 'maxIter', 10)
conv = string_or_blank(card, 8, 'conv', 'PW')
eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)
eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)
eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)
max_div = integer_or_blank(card, 12, 'maxDiv', 2)
max_qn = integer_or_blank(card, 13, 'maxQn', 10)
max_ls = integer_or_blank(card, 14, 'MaxLs', 2)
fstress = double_or_blank(card, 15, 'fStress', 0.2)
max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)
adjust = integer_or_blank(card, 18, 'adjust', 5)
mstep = integer_or_blank(card, 19, 'mStep')
rb = double_or_blank(card, 20, 'rb', 0.6)
max_r = double_or_blank(card, 21, 'maxR', 32.)
utol = double_or_blank(card, 22, 'uTol', 0.1)
rtol_b = double_or_blank(card, 23, 'rTolB', 20.)
min_iter = integer_or_blank(card, 24, 'minIter')
assert len(card) <= 25, 'len(TSTEPNL card) = %i\ncard=%s' % (len(card), card)
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
(sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,
max_div, max_qn, max_ls, fstress, max_bisect,
adjust, mstep, rb, max_r, utol, rtol_b) = data
if method == 1:
method = 'AUTO'
elif method == 3:
method = 'ADAPT'
else:
raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))
if conv == 3:
conv = 'PW'
elif conv == 4:
conv = 'U'
else:
raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))
min_iter = None
return TSTEPNL(
sid, ndt, dt, no, method, kstep, max_iter, conv,
eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,
max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,
comment=comment)
def raw_fields(self):
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,
self.method, self.kstep, self.max_iter, self.conv, self.eps_u,
self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,
self.fstress, None, self.max_bisect, self.adjust, self.mstep,
self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]
return list_fields
def repr_fields(self):
no = self.no
method = set_blank_if_default(self.method, 'ADAPT')
kstep = self.kstep
conv = set_blank_if_default(self.conv, 'PW')
eps_u = set_blank_if_default(self.eps_u, 1e-2)
eps_p = set_blank_if_default(self.eps_p, 1e-3)
eps_w = set_blank_if_default(self.eps_w, 1e-6)
max_div = set_blank_if_default(self.max_div, 2)
max_qn = set_blank_if_default(self.max_qn, 10)
max_ls = set_blank_if_default(self.max_ls, 2)
fstress = set_blank_if_default(self.fstress, 0.2)
max_bisect = set_blank_if_default(self.max_bisect, 5)
adjust = set_blank_if_default(self.adjust, 5)
rb = set_blank_if_default(self.rb, 0.6)
max_r = set_blank_if_default(self.max_r, 32.)
utol = set_blank_if_default(self.utol, 0.1)
rtol_b = set_blank_if_default(self.rtol_b, 20.)
list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,
kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,
max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,
max_r, utol, rtol_b, self.min_iter]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
| true | true |
f7198c8a3b00d357347baf407e57a7dd4b984119 | 620 | py | Python | polls/admin.py | Obsinqsob01/polls | 52f42029bd76e7a4f1dbdc947c5217ca9e2c0f1d | [
"MIT"
] | null | null | null | polls/admin.py | Obsinqsob01/polls | 52f42029bd76e7a4f1dbdc947c5217ca9e2c0f1d | [
"MIT"
] | null | null | null | polls/admin.py | Obsinqsob01/polls | 52f42029bd76e7a4f1dbdc947c5217ca9e2c0f1d | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
list_display = ('question_text', 'pub_date')
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question_text', 'pub_date', 'was_published_recently')
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin) | 29.52381 | 80 | 0.659677 | from django.contrib import admin
from .models import Choice, Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
list_display = ('question_text', 'pub_date')
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question_text', 'pub_date', 'was_published_recently')
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
admin.site.register(Question, QuestionAdmin) | true | true |
f7198cbf53eb86b681a5ce28880882ab6561e873 | 706 | py | Python | 2-add-two-numbers/2-add-two-numbers.py | Atri10/Leet-code---Atri_Patel | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | [
"MIT"
] | 1 | 2021-10-10T20:21:18.000Z | 2021-10-10T20:21:18.000Z | 2-add-two-numbers/2-add-two-numbers.py | Atri10/Leet-code---Atri_Patel | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | [
"MIT"
] | null | null | null | 2-add-two-numbers/2-add-two-numbers.py | Atri10/Leet-code---Atri_Patel | 49fc59b9147a44ab04a66128fbb2ef259b5f7b7c | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
n = cur = ListNode(-1)
carry = 0
while l1 or l2 or carry:
if l1:
carry += l1.val
l1 = l1.next
if l2:
carry += l2.val
l2 = l2.next
cur.next = ListNode(carry % 10)
cur = cur.next
carry = carry // 10
return n.next
| 27.153846 | 98 | 0.441926 |
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
n = cur = ListNode(-1)
carry = 0
while l1 or l2 or carry:
if l1:
carry += l1.val
l1 = l1.next
if l2:
carry += l2.val
l2 = l2.next
cur.next = ListNode(carry % 10)
cur = cur.next
carry = carry // 10
return n.next
| true | true |
f7198d790f74aa6993a89e96a1b3903ca05a53bc | 15,654 | py | Python | manim/scene/three_d_scene.py | behackl/manim | 3759b73d555792d077e1d77c854d5dbe88043b98 | [
"MIT"
] | 2 | 2020-11-17T19:00:44.000Z | 2021-10-17T16:14:55.000Z | manim/scene/three_d_scene.py | behackl/manim | 3759b73d555792d077e1d77c854d5dbe88043b98 | [
"MIT"
] | null | null | null | manim/scene/three_d_scene.py | behackl/manim | 3759b73d555792d077e1d77c854d5dbe88043b98 | [
"MIT"
] | null | null | null | """A scene suitable for rendering three-dimensional objects and animations."""
__all__ = ["ThreeDScene", "SpecialThreeDScene"]
from typing import Iterable, Optional, Sequence, Union
import numpy as np
from .. import config
from ..animation.animation import Animation
from ..animation.transform import ApplyMethod
from ..camera.three_d_camera import ThreeDCamera
from ..constants import DEGREES
from ..mobject.coordinate_systems import ThreeDAxes
from ..mobject.geometry import Line
from ..mobject.mobject import Mobject
from ..mobject.three_dimensions import Sphere
from ..mobject.types.vectorized_mobject import VectorizedPoint, VGroup
from ..mobject.value_tracker import ValueTracker
from ..scene.scene import Scene
from ..utils.config_ops import merge_dicts_recursively
class ThreeDScene(Scene):
"""
This is a Scene, with special configurations and properties that
make it suitable for Three Dimensional Scenes.
"""
def __init__(
self,
camera_class=ThreeDCamera,
ambient_camera_rotation=None,
default_angled_camera_orientation_kwargs=None,
**kwargs,
):
self.ambient_camera_rotation = ambient_camera_rotation
if default_angled_camera_orientation_kwargs is None:
default_angled_camera_orientation_kwargs = {
"phi": 70 * DEGREES,
"theta": -135 * DEGREES,
}
self.default_angled_camera_orientation_kwargs = (
default_angled_camera_orientation_kwargs
)
super().__init__(camera_class=camera_class, **kwargs)
def set_camera_orientation(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
):
"""
This method sets the orientation of the camera in the scene.
Parameters
----------
phi : int or float, optional
The polar angle i.e the angle between Z_AXIS and Camera through ORIGIN in radians.
theta : int or float, optional
The azimuthal angle i.e the angle that spins the camera around the Z_AXIS.
distance : int or float, optional
The radial distance between ORIGIN and Camera.
gamma : int or float, optional
The rotation of the camera about the vector from the ORIGIN to the Camera.
frame_center : list, tuple or np.array, optional
The new center of the camera frame in cartesian coordinates.
"""
if phi is not None:
self.renderer.camera.set_phi(phi)
if theta is not None:
self.renderer.camera.set_theta(theta)
if distance is not None:
self.renderer.camera.set_distance(distance)
if gamma is not None:
self.renderer.camera.set_gamma(gamma)
if frame_center is not None:
self.renderer.camera._frame_center.move_to(frame_center)
def begin_ambient_camera_rotation(self, rate=0.02, about="theta"):
"""
This method begins an ambient rotation of the camera about the Z_AXIS,
in the anticlockwise direction
Parameters
----------
rate : int or float, optional
The rate at which the camera should rotate about the Z_AXIS.
Negative rate means clockwise rotation.
about: (str)
one of 3 options: ["theta", "phi", "gamma"]. defaults to theta.
"""
# TODO, use a ValueTracker for rate, so that it
# can begin and end smoothly
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.add_updater(lambda m, dt: m.increment_value(rate * dt))
self.add(x)
def stop_ambient_camera_rotation(self, about="theta"):
"""
This method stops all ambient camera rotation.
"""
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.clear_updaters()
self.remove(x)
def begin_3dillusion_camera_rotation(
self, rate=1, origin_theta=-60 * DEGREES, origin_phi=75 * DEGREES
):
val_tracker_theta = ValueTracker(0)
def update_theta(m, dt):
val_tracker_theta.increment_value(dt * rate)
val_for_left_right = 0.2 * np.sin(val_tracker_theta.get_value())
return m.set_value(origin_theta + val_for_left_right)
self.renderer.camera.theta_tracker.add_updater(update_theta)
self.add(self.renderer.camera.theta_tracker)
val_tracker_phi = ValueTracker(0)
def update_phi(m, dt):
val_tracker_phi.increment_value(dt * rate)
val_for_up_down = 0.1 * np.cos(val_tracker_phi.get_value())
return m.set_value(origin_phi + val_for_up_down)
self.renderer.camera.phi_tracker.add_updater(update_phi)
self.add(self.renderer.camera.phi_tracker)
def stop_3dillusion_camera_rotation(self):
"""
This method stops all illusion camera rotations.
"""
self.renderer.camera.theta_tracker.clear_updaters()
self.remove(self.renderer.camera.theta_tracker)
self.renderer.camera.phi_tracker.clear_updaters()
self.remove(self.renderer.camera.phi_tracker)
def move_camera(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
added_anims: Iterable["Animation"] = [],
**kwargs,
):
"""
This method animates the movement of the camera
to the given spherical coordinates.
Parameters
----------
phi : int or float, optional
The polar angle i.e the angle between Z_AXIS and Camera through ORIGIN in radians.
theta : int or float, optional
The azimuthal angle i.e the angle that spins the camera around the Z_AXIS.
distance : int or float, optional
The radial distance between ORIGIN and Camera.
gamma : int or float, optional
The rotation of the camera about the vector from the ORIGIN to the Camera.
frame_center : list, tuple or np.array, optional
The new center of the camera frame in cartesian coordinates.
added_anims : list, optional
Any other animations to be played at the same time.
"""
anims = []
value_tracker_pairs = [
(phi, self.renderer.camera.phi_tracker),
(theta, self.renderer.camera.theta_tracker),
(distance, self.renderer.camera.distance_tracker),
(gamma, self.renderer.camera.gamma_tracker),
]
for value, tracker in value_tracker_pairs:
if value is not None:
anims.append(ApplyMethod(tracker.set_value, value, **kwargs))
if frame_center is not None:
anims.append(
ApplyMethod(
self.renderer.camera._frame_center.move_to, frame_center, **kwargs
)
)
self.play(*anims + added_anims)
# These lines are added to improve performance. If manim thinks that frame_center is moving,
# it is required to redraw every object. These lines remove frame_center from the Scene once
# its animation is done, ensuring that manim does not think that it is moving. Since the
# frame_center is never actually drawn, this shouldn't break anything.
if frame_center is not None:
self.remove(self.renderer.camera._frame_center)
def get_moving_mobjects(self, *animations):
"""
This method returns a list of all of the Mobjects in the Scene that
are moving, that are also in the animations passed.
Parameters
----------
*animations : Animation
The animations whose mobjects will be checked.
"""
moving_mobjects = Scene.get_moving_mobjects(self, *animations)
camera_mobjects = self.renderer.camera.get_value_trackers() + [
self.renderer.camera._frame_center
]
if any([cm in moving_mobjects for cm in camera_mobjects]):
return self.mobjects
return moving_mobjects
def add_fixed_orientation_mobjects(self, *mobjects, **kwargs):
"""
This method is used to prevent the rotation and tilting
of mobjects as the camera moves around. The mobject can
still move in the x,y,z directions, but will always be
at the angle (relative to the camera) that it was at
when it was passed through this method.)
Parameters
----------
*mobjects : Mobject
The Mobject(s) whose orientation must be fixed.
**kwargs
Some valid kwargs are
use_static_center_func : bool
center_func : function
"""
self.add(*mobjects)
self.renderer.camera.add_fixed_orientation_mobjects(*mobjects, **kwargs)
def add_fixed_in_frame_mobjects(self, *mobjects):
"""
This method is used to prevent the rotation and movement
of mobjects as the camera moves around. The mobject is
essentially overlaid, and is not impacted by the camera's
movement in any way.
Parameters
----------
*mobjects : Mobjects
The Mobjects whose orientation must be fixed.
"""
self.add(*mobjects)
self.renderer.camera.add_fixed_in_frame_mobjects(*mobjects)
def remove_fixed_orientation_mobjects(self, *mobjects):
"""
This method "unfixes" the orientation of the mobjects
passed, meaning they will no longer be at the same angle
relative to the camera. This only makes sense if the
mobject was passed through add_fixed_orientation_mobjects first.
Parameters
----------
*mobjects : Mobjects
The Mobjects whose orientation must be unfixed.
"""
self.renderer.camera.remove_fixed_orientation_mobjects(*mobjects)
def remove_fixed_in_frame_mobjects(self, *mobjects):
"""
This method undoes what add_fixed_in_frame_mobjects does.
It allows the mobject to be affected by the movement of
the camera.
Parameters
----------
*mobjects : Mobjects
The Mobjects whose position and orientation must be unfixed.
"""
self.renderer.camera.remove_fixed_in_frame_mobjects(*mobjects)
##
def set_to_default_angled_camera_orientation(self, **kwargs):
"""
This method sets the default_angled_camera_orientation to the
keyword arguments passed, and sets the camera to that orientation.
Parameters
----------
**kwargs
Some recognised kwargs are phi, theta, distance, gamma,
which have the same meaning as the parameters in set_camera_orientation.
"""
config = dict(
self.default_camera_orientation_kwargs
) # Where doe this come from?
config.update(kwargs)
self.set_camera_orientation(**config)
class SpecialThreeDScene(ThreeDScene):
"""An extension of :class:`ThreeDScene` with more settings.
It has some extra configuration for axes, spheres,
and an override for low quality rendering. Further key differences
are:
* The camera shades applicable 3DMobjects by default,
except if rendering in low quality.
* Some default params for Spheres and Axes have been added.
"""
def __init__(
self,
cut_axes_at_radius=True,
camera_config={"should_apply_shading": True, "exponential_projection": True},
three_d_axes_config={
"num_axis_pieces": 1,
"axis_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
},
sphere_config={"radius": 2, "resolution": (24, 48)},
default_angled_camera_position={
"phi": 70 * DEGREES,
"theta": -110 * DEGREES,
},
# When scene is extracted with -l flag, this
# configuration will override the above configuration.
low_quality_config={
"camera_config": {"should_apply_shading": False},
"three_d_axes_config": {"num_axis_pieces": 1},
"sphere_config": {"resolution": (12, 24)},
},
**kwargs,
):
self.cut_axes_at_radius = cut_axes_at_radius
self.camera_config = camera_config
self.three_d_axes_config = three_d_axes_config
self.sphere_config = sphere_config
self.default_angled_camera_position = default_angled_camera_position
self.low_quality_config = low_quality_config
if self.renderer.camera_config["pixel_width"] == config["pixel_width"]:
_config = {}
else:
_config = self.low_quality_config
_config = merge_dicts_recursively(_config, kwargs)
ThreeDScene.__init__(self, **_config)
def get_axes(self):
"""Return a set of 3D axes.
Returns
-------
:class:`.ThreeDAxes`
A set of 3D axes.
"""
axes = ThreeDAxes(**self.three_d_axes_config)
for axis in axes:
if self.cut_axes_at_radius:
p0 = axis.get_start()
p1 = axis.number_to_point(-1)
p2 = axis.number_to_point(1)
p3 = axis.get_end()
new_pieces = VGroup(Line(p0, p1), Line(p1, p2), Line(p2, p3))
for piece in new_pieces:
piece.shade_in_3d = True
new_pieces.match_style(axis.pieces)
axis.pieces.submobjects = new_pieces.submobjects
for tick in axis.tick_marks:
tick.add(VectorizedPoint(1.5 * tick.get_center()))
return axes
def get_sphere(self, **kwargs):
"""
Returns a sphere with the passed keyword arguments as properties.
Parameters
----------
**kwargs
Any valid parameter of :class:`~.Sphere` or :class:`~.Surface`.
Returns
-------
:class:`~.Sphere`
The sphere object.
"""
config = merge_dicts_recursively(self.sphere_config, kwargs)
return Sphere(**config)
def get_default_camera_position(self):
"""
Returns the default_angled_camera position.
Returns
-------
dict
Dictionary of phi, theta, distance, and gamma.
"""
return self.default_angled_camera_position
def set_camera_to_default_position(self):
"""
Sets the camera to its default position.
"""
self.set_camera_orientation(**self.default_angled_camera_position)
| 35.986207 | 100 | 0.622972 |
__all__ = ["ThreeDScene", "SpecialThreeDScene"]
from typing import Iterable, Optional, Sequence, Union
import numpy as np
from .. import config
from ..animation.animation import Animation
from ..animation.transform import ApplyMethod
from ..camera.three_d_camera import ThreeDCamera
from ..constants import DEGREES
from ..mobject.coordinate_systems import ThreeDAxes
from ..mobject.geometry import Line
from ..mobject.mobject import Mobject
from ..mobject.three_dimensions import Sphere
from ..mobject.types.vectorized_mobject import VectorizedPoint, VGroup
from ..mobject.value_tracker import ValueTracker
from ..scene.scene import Scene
from ..utils.config_ops import merge_dicts_recursively
class ThreeDScene(Scene):
def __init__(
self,
camera_class=ThreeDCamera,
ambient_camera_rotation=None,
default_angled_camera_orientation_kwargs=None,
**kwargs,
):
self.ambient_camera_rotation = ambient_camera_rotation
if default_angled_camera_orientation_kwargs is None:
default_angled_camera_orientation_kwargs = {
"phi": 70 * DEGREES,
"theta": -135 * DEGREES,
}
self.default_angled_camera_orientation_kwargs = (
default_angled_camera_orientation_kwargs
)
super().__init__(camera_class=camera_class, **kwargs)
def set_camera_orientation(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
):
if phi is not None:
self.renderer.camera.set_phi(phi)
if theta is not None:
self.renderer.camera.set_theta(theta)
if distance is not None:
self.renderer.camera.set_distance(distance)
if gamma is not None:
self.renderer.camera.set_gamma(gamma)
if frame_center is not None:
self.renderer.camera._frame_center.move_to(frame_center)
def begin_ambient_camera_rotation(self, rate=0.02, about="theta"):
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.add_updater(lambda m, dt: m.increment_value(rate * dt))
self.add(x)
def stop_ambient_camera_rotation(self, about="theta"):
if about.lower() == "phi":
x = self.renderer.camera.phi_tracker
elif about.lower() == "gamma":
x = self.renderer.camera.gamma_tracker
elif about.lower() == "theta":
x = self.renderer.camera.theta_tracker
else:
raise ValueError("Invalid ambient rotation angle.")
x.clear_updaters()
self.remove(x)
def begin_3dillusion_camera_rotation(
self, rate=1, origin_theta=-60 * DEGREES, origin_phi=75 * DEGREES
):
val_tracker_theta = ValueTracker(0)
def update_theta(m, dt):
val_tracker_theta.increment_value(dt * rate)
val_for_left_right = 0.2 * np.sin(val_tracker_theta.get_value())
return m.set_value(origin_theta + val_for_left_right)
self.renderer.camera.theta_tracker.add_updater(update_theta)
self.add(self.renderer.camera.theta_tracker)
val_tracker_phi = ValueTracker(0)
def update_phi(m, dt):
val_tracker_phi.increment_value(dt * rate)
val_for_up_down = 0.1 * np.cos(val_tracker_phi.get_value())
return m.set_value(origin_phi + val_for_up_down)
self.renderer.camera.phi_tracker.add_updater(update_phi)
self.add(self.renderer.camera.phi_tracker)
def stop_3dillusion_camera_rotation(self):
self.renderer.camera.theta_tracker.clear_updaters()
self.remove(self.renderer.camera.theta_tracker)
self.renderer.camera.phi_tracker.clear_updaters()
self.remove(self.renderer.camera.phi_tracker)
def move_camera(
self,
phi: Optional[float] = None,
theta: Optional[float] = None,
gamma: Optional[float] = None,
distance: Optional[float] = None,
frame_center: Optional[Union["Mobject", Sequence[float]]] = None,
added_anims: Iterable["Animation"] = [],
**kwargs,
):
anims = []
value_tracker_pairs = [
(phi, self.renderer.camera.phi_tracker),
(theta, self.renderer.camera.theta_tracker),
(distance, self.renderer.camera.distance_tracker),
(gamma, self.renderer.camera.gamma_tracker),
]
for value, tracker in value_tracker_pairs:
if value is not None:
anims.append(ApplyMethod(tracker.set_value, value, **kwargs))
if frame_center is not None:
anims.append(
ApplyMethod(
self.renderer.camera._frame_center.move_to, frame_center, **kwargs
)
)
self.play(*anims + added_anims)
if frame_center is not None:
self.remove(self.renderer.camera._frame_center)
def get_moving_mobjects(self, *animations):
moving_mobjects = Scene.get_moving_mobjects(self, *animations)
camera_mobjects = self.renderer.camera.get_value_trackers() + [
self.renderer.camera._frame_center
]
if any([cm in moving_mobjects for cm in camera_mobjects]):
return self.mobjects
return moving_mobjects
def add_fixed_orientation_mobjects(self, *mobjects, **kwargs):
self.add(*mobjects)
self.renderer.camera.add_fixed_orientation_mobjects(*mobjects, **kwargs)
def add_fixed_in_frame_mobjects(self, *mobjects):
self.add(*mobjects)
self.renderer.camera.add_fixed_in_frame_mobjects(*mobjects)
def remove_fixed_orientation_mobjects(self, *mobjects):
self.renderer.camera.remove_fixed_orientation_mobjects(*mobjects)
def remove_fixed_in_frame_mobjects(self, *mobjects):
self.renderer.camera.remove_fixed_in_frame_mobjects(*mobjects)
##
def set_to_default_angled_camera_orientation(self, **kwargs):
config = dict(
self.default_camera_orientation_kwargs
) # Where doe this come from?
config.update(kwargs)
self.set_camera_orientation(**config)
class SpecialThreeDScene(ThreeDScene):
def __init__(
self,
cut_axes_at_radius=True,
camera_config={"should_apply_shading": True, "exponential_projection": True},
three_d_axes_config={
"num_axis_pieces": 1,
"axis_config": {
"unit_size": 2,
"tick_frequency": 1,
"numbers_with_elongated_ticks": [0, 1, 2],
"stroke_width": 2,
},
},
sphere_config={"radius": 2, "resolution": (24, 48)},
default_angled_camera_position={
"phi": 70 * DEGREES,
"theta": -110 * DEGREES,
},
# When scene is extracted with -l flag, this
# configuration will override the above configuration.
low_quality_config={
"camera_config": {"should_apply_shading": False},
"three_d_axes_config": {"num_axis_pieces": 1},
"sphere_config": {"resolution": (12, 24)},
},
**kwargs,
):
self.cut_axes_at_radius = cut_axes_at_radius
self.camera_config = camera_config
self.three_d_axes_config = three_d_axes_config
self.sphere_config = sphere_config
self.default_angled_camera_position = default_angled_camera_position
self.low_quality_config = low_quality_config
if self.renderer.camera_config["pixel_width"] == config["pixel_width"]:
_config = {}
else:
_config = self.low_quality_config
_config = merge_dicts_recursively(_config, kwargs)
ThreeDScene.__init__(self, **_config)
def get_axes(self):
axes = ThreeDAxes(**self.three_d_axes_config)
for axis in axes:
if self.cut_axes_at_radius:
p0 = axis.get_start()
p1 = axis.number_to_point(-1)
p2 = axis.number_to_point(1)
p3 = axis.get_end()
new_pieces = VGroup(Line(p0, p1), Line(p1, p2), Line(p2, p3))
for piece in new_pieces:
piece.shade_in_3d = True
new_pieces.match_style(axis.pieces)
axis.pieces.submobjects = new_pieces.submobjects
for tick in axis.tick_marks:
tick.add(VectorizedPoint(1.5 * tick.get_center()))
return axes
def get_sphere(self, **kwargs):
config = merge_dicts_recursively(self.sphere_config, kwargs)
return Sphere(**config)
def get_default_camera_position(self):
return self.default_angled_camera_position
def set_camera_to_default_position(self):
self.set_camera_orientation(**self.default_angled_camera_position)
| true | true |
f7198e330d6123f84319f87eb566ae8978c38f58 | 7,124 | py | Python | corehq/apps/reports/urls.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 1 | 2020-07-14T13:00:23.000Z | 2020-07-14T13:00:23.000Z | corehq/apps/reports/urls.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | corehq/apps/reports/urls.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | import logging
from django.conf.urls import include, url
from django.core.exceptions import ImproperlyConfigured
from corehq.apps.reports.standard.forms.reports import ReprocessXFormErrorView
from corehq.apps.userreports.reports.view import (
ConfigurableReportView,
CustomConfigurableReportDispatcher,
)
from corehq.apps.userreports.views import (
ConfigureReport,
EditReportInBuilder,
ReportBuilderDataSourceSelect,
ReportBuilderPaywallActivatingSubscription,
ReportBuilderPaywallPricing,
ReportPreview,
)
from .dispatcher import (
CustomProjectReportDispatcher,
ProjectReportDispatcher,
)
from .filters import urls as filter_urls
from .util import get_installed_custom_modules
from .views import (
AddSavedReportConfigView,
CaseAttachmentsView,
CaseDataView,
EditFormInstance,
FormDataView,
MySavedReportsView,
ScheduledReportsView,
archive_form,
case_form_data,
case_forms,
case_property_changes,
case_property_names,
case_xml,
close_case_view,
delete_config,
delete_scheduled_report,
download_case_history,
download_form,
edit_case_view,
edit_form,
email_report,
export_case_transactions,
export_report,
project_health_user_details,
rebuild_case_view,
resave_case_view,
resave_form_view,
restore_edit,
send_test_scheduled_report,
unarchive_form,
undo_close_case_view,
view_scheduled_report,
)
custom_report_urls = [
CustomProjectReportDispatcher.url_pattern(),
]
urlpatterns = [
ConfigurableReportView.url_pattern(),
CustomConfigurableReportDispatcher.url_pattern(),
# Report Builder
url(r'^builder/select_source/$', ReportBuilderDataSourceSelect.as_view(),
name=ReportBuilderDataSourceSelect.urlname),
url(r'^builder/configure/$', ConfigureReport.as_view(), name=ConfigureReport.urlname),
url(r'^builder/preview/(?P<data_source>[\w\-]+)/$', ReportPreview.as_view(), name=ReportPreview.urlname),
url(r'^builder/edit/(?P<report_id>[\w\-]+)/$', EditReportInBuilder.as_view(), name='edit_report_in_builder'),
url(r'builder/subscribe/pricing/$', ReportBuilderPaywallPricing.as_view(),
name=ReportBuilderPaywallPricing.urlname),
url(r'builder/subscribe/activating_subscription/$', ReportBuilderPaywallActivatingSubscription.as_view(),
name=ReportBuilderPaywallActivatingSubscription.urlname),
url(r'^$', MySavedReportsView.as_view(), name="reports_home"),
url(r'^saved/', MySavedReportsView.as_view(), name=MySavedReportsView.urlname),
url(r'^saved_reports', MySavedReportsView.as_view(), name="old_saved_reports"),
url(r'^case_data/(?P<case_id>[\w\-]+)/$', CaseDataView.as_view(), name=CaseDataView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/forms/$', case_forms, name="single_case_forms"),
url(r'^case_data/(?P<case_id>[\w\-]+)/attachments/$',
CaseAttachmentsView.as_view(), name=CaseAttachmentsView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/view/xml/$', case_xml, name="single_case_xml"),
url(r'^case_data/(?P<case_id>[\w\-]+)/properties/$', case_property_names, name="case_property_names"),
url(r'^case_data/(?P<case_id>[\w\-]+)/history/$', download_case_history, name="download_case_history"),
url(r'^case_data/(?P<case_id>[\w\-]+)/edit/$', edit_case_view, name="edit_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/rebuild/$', rebuild_case_view, name="rebuild_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/resave/$', resave_case_view, name="resave_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/close/$', close_case_view, name="close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/undo-close/(?P<xform_id>[\w\-:]+)/$',
undo_close_case_view, name="undo_close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/export_transactions/$',
export_case_transactions, name="export_case_transactions"),
url(r'^case_data/(?P<case_id>[\w\-]+)/(?P<xform_id>[\w\-:]+)/$', case_form_data, name="case_form_data"),
url(r'^case_data/(?P<case_id>[\w\-]+)/case_property/(?P<case_property_name>[\w_\-.]+)/$',
case_property_changes, name="case_property_changes"),
# Download and view form data
url(r'^form_data/(?P<instance_id>[\w\-:]+)/$', FormDataView.as_view(), name=FormDataView.urlname),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/download/$', download_form, name='download_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/edit/$', EditFormInstance.as_view(), name='edit_form_instance'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/restore_version/$', restore_edit, name='restore_edit'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/correct_data/$', edit_form, name='edit_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/archive/$', archive_form, name='archive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/unarchive/$', unarchive_form, name='unarchive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/rebuild/$', resave_form_view, name='resave_form'),
# project health ajax
url(r'^project_health/ajax/(?P<user_id>[\w\-]+)/$', project_health_user_details,
name='project_health_user_details'),
# Full Excel export
url(r'^full_excel_export/(?P<export_hash>[\w\-]+)/(?P<format>[\w\-]+)$', export_report, name="export_report"),
# once off email
url(r"^email_onceoff/(?P<report_slug>[\w_]+)/$", email_report, kwargs=dict(once=True), name='email_report'),
url(r"^custom/email_onceoff/(?P<report_slug>[\w_]+)/$", email_report,
kwargs=dict(report_type=CustomProjectReportDispatcher.prefix, once=True), name='email_onceoff'),
# Saved reports
url(r"^configs$", AddSavedReportConfigView.as_view(), name=AddSavedReportConfigView.name),
url(r"^configs/(?P<config_id>[\w-]+)$", delete_config,
name='delete_report_config'),
# Scheduled reports
url(r'^scheduled_reports/(?P<scheduled_report_id>[\w-]+)?$',
ScheduledReportsView.as_view(), name=ScheduledReportsView.urlname),
url(r'^scheduled_report/(?P<scheduled_report_id>[\w-]+)/delete$',
delete_scheduled_report, name='delete_scheduled_report'),
url(r'^send_test_scheduled_report/(?P<scheduled_report_id>[\w-]+)/$',
send_test_scheduled_report, name='send_test_scheduled_report'),
url(r'^view_scheduled_report/(?P<scheduled_report_id>[\w_]+)/$',
view_scheduled_report, name='view_scheduled_report'),
# V2 Reports
url(r'^v2/', include('corehq.apps.reports.v2.urls')),
# Internal Use
url(r'^reprocess_error_form/$', ReprocessXFormErrorView.as_view(),
name=ReprocessXFormErrorView.urlname),
url(r'^custom/', include(custom_report_urls)),
url(r'^filters/', include(filter_urls)),
ProjectReportDispatcher.url_pattern(),
]
for module in get_installed_custom_modules():
module_name = module.__name__.split('.')[-1]
try:
custom_report_urls += [
url(r"^%s/" % module_name, include('{0}.urls'.format(module.__name__))),
]
except ImproperlyConfigured:
logging.info("Module %s does not provide urls" % module_name)
| 44.525 | 114 | 0.701291 | import logging
from django.conf.urls import include, url
from django.core.exceptions import ImproperlyConfigured
from corehq.apps.reports.standard.forms.reports import ReprocessXFormErrorView
from corehq.apps.userreports.reports.view import (
ConfigurableReportView,
CustomConfigurableReportDispatcher,
)
from corehq.apps.userreports.views import (
ConfigureReport,
EditReportInBuilder,
ReportBuilderDataSourceSelect,
ReportBuilderPaywallActivatingSubscription,
ReportBuilderPaywallPricing,
ReportPreview,
)
from .dispatcher import (
CustomProjectReportDispatcher,
ProjectReportDispatcher,
)
from .filters import urls as filter_urls
from .util import get_installed_custom_modules
from .views import (
AddSavedReportConfigView,
CaseAttachmentsView,
CaseDataView,
EditFormInstance,
FormDataView,
MySavedReportsView,
ScheduledReportsView,
archive_form,
case_form_data,
case_forms,
case_property_changes,
case_property_names,
case_xml,
close_case_view,
delete_config,
delete_scheduled_report,
download_case_history,
download_form,
edit_case_view,
edit_form,
email_report,
export_case_transactions,
export_report,
project_health_user_details,
rebuild_case_view,
resave_case_view,
resave_form_view,
restore_edit,
send_test_scheduled_report,
unarchive_form,
undo_close_case_view,
view_scheduled_report,
)
custom_report_urls = [
CustomProjectReportDispatcher.url_pattern(),
]
urlpatterns = [
ConfigurableReportView.url_pattern(),
CustomConfigurableReportDispatcher.url_pattern(),
url(r'^builder/select_source/$', ReportBuilderDataSourceSelect.as_view(),
name=ReportBuilderDataSourceSelect.urlname),
url(r'^builder/configure/$', ConfigureReport.as_view(), name=ConfigureReport.urlname),
url(r'^builder/preview/(?P<data_source>[\w\-]+)/$', ReportPreview.as_view(), name=ReportPreview.urlname),
url(r'^builder/edit/(?P<report_id>[\w\-]+)/$', EditReportInBuilder.as_view(), name='edit_report_in_builder'),
url(r'builder/subscribe/pricing/$', ReportBuilderPaywallPricing.as_view(),
name=ReportBuilderPaywallPricing.urlname),
url(r'builder/subscribe/activating_subscription/$', ReportBuilderPaywallActivatingSubscription.as_view(),
name=ReportBuilderPaywallActivatingSubscription.urlname),
url(r'^$', MySavedReportsView.as_view(), name="reports_home"),
url(r'^saved/', MySavedReportsView.as_view(), name=MySavedReportsView.urlname),
url(r'^saved_reports', MySavedReportsView.as_view(), name="old_saved_reports"),
url(r'^case_data/(?P<case_id>[\w\-]+)/$', CaseDataView.as_view(), name=CaseDataView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/forms/$', case_forms, name="single_case_forms"),
url(r'^case_data/(?P<case_id>[\w\-]+)/attachments/$',
CaseAttachmentsView.as_view(), name=CaseAttachmentsView.urlname),
url(r'^case_data/(?P<case_id>[\w\-]+)/view/xml/$', case_xml, name="single_case_xml"),
url(r'^case_data/(?P<case_id>[\w\-]+)/properties/$', case_property_names, name="case_property_names"),
url(r'^case_data/(?P<case_id>[\w\-]+)/history/$', download_case_history, name="download_case_history"),
url(r'^case_data/(?P<case_id>[\w\-]+)/edit/$', edit_case_view, name="edit_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/rebuild/$', rebuild_case_view, name="rebuild_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/resave/$', resave_case_view, name="resave_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/close/$', close_case_view, name="close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/undo-close/(?P<xform_id>[\w\-:]+)/$',
undo_close_case_view, name="undo_close_case"),
url(r'^case_data/(?P<case_id>[\w\-]+)/export_transactions/$',
export_case_transactions, name="export_case_transactions"),
url(r'^case_data/(?P<case_id>[\w\-]+)/(?P<xform_id>[\w\-:]+)/$', case_form_data, name="case_form_data"),
url(r'^case_data/(?P<case_id>[\w\-]+)/case_property/(?P<case_property_name>[\w_\-.]+)/$',
case_property_changes, name="case_property_changes"),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/$', FormDataView.as_view(), name=FormDataView.urlname),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/download/$', download_form, name='download_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/edit/$', EditFormInstance.as_view(), name='edit_form_instance'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/restore_version/$', restore_edit, name='restore_edit'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/correct_data/$', edit_form, name='edit_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/archive/$', archive_form, name='archive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/unarchive/$', unarchive_form, name='unarchive_form'),
url(r'^form_data/(?P<instance_id>[\w\-:]+)/rebuild/$', resave_form_view, name='resave_form'),
url(r'^project_health/ajax/(?P<user_id>[\w\-]+)/$', project_health_user_details,
name='project_health_user_details'),
url(r'^full_excel_export/(?P<export_hash>[\w\-]+)/(?P<format>[\w\-]+)$', export_report, name="export_report"),
url(r"^email_onceoff/(?P<report_slug>[\w_]+)/$", email_report, kwargs=dict(once=True), name='email_report'),
url(r"^custom/email_onceoff/(?P<report_slug>[\w_]+)/$", email_report,
kwargs=dict(report_type=CustomProjectReportDispatcher.prefix, once=True), name='email_onceoff'),
url(r"^configs$", AddSavedReportConfigView.as_view(), name=AddSavedReportConfigView.name),
url(r"^configs/(?P<config_id>[\w-]+)$", delete_config,
name='delete_report_config'),
url(r'^scheduled_reports/(?P<scheduled_report_id>[\w-]+)?$',
ScheduledReportsView.as_view(), name=ScheduledReportsView.urlname),
url(r'^scheduled_report/(?P<scheduled_report_id>[\w-]+)/delete$',
delete_scheduled_report, name='delete_scheduled_report'),
url(r'^send_test_scheduled_report/(?P<scheduled_report_id>[\w-]+)/$',
send_test_scheduled_report, name='send_test_scheduled_report'),
url(r'^view_scheduled_report/(?P<scheduled_report_id>[\w_]+)/$',
view_scheduled_report, name='view_scheduled_report'),
url(r'^v2/', include('corehq.apps.reports.v2.urls')),
url(r'^reprocess_error_form/$', ReprocessXFormErrorView.as_view(),
name=ReprocessXFormErrorView.urlname),
url(r'^custom/', include(custom_report_urls)),
url(r'^filters/', include(filter_urls)),
ProjectReportDispatcher.url_pattern(),
]
for module in get_installed_custom_modules():
module_name = module.__name__.split('.')[-1]
try:
custom_report_urls += [
url(r"^%s/" % module_name, include('{0}.urls'.format(module.__name__))),
]
except ImproperlyConfigured:
logging.info("Module %s does not provide urls" % module_name)
| true | true |
f7198e35f24a43baae21005438b0076176ee416a | 561 | py | Python | oving_8_c.py | W3OP/Oving_9_round2 | 090cbc3b135840914659d50c6fa48ab756e5449e | [
"MIT"
] | null | null | null | oving_8_c.py | W3OP/Oving_9_round2 | 090cbc3b135840914659d50c6fa48ab756e5449e | [
"MIT"
] | null | null | null | oving_8_c.py | W3OP/Oving_9_round2 | 090cbc3b135840914659d50c6fa48ab756e5449e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 22 10:13:37 2021
@author: palme
"""
import oving_8_b as o8b
test = o8b.Quiz("Hvor mange bein har en hest", [1, 2, 3, 4],4)
print(test)
dude = int(input("Svar: "))
svar1 = test.svaret(dude)
if svar1:
print("Svaret er rett")
else:
print("Svaret er feil")
print("\n \n")
test2 = o8b.Quiz("Hvilket land er i i nå?", ["norge", "sverie", "danmark"],1)
print(test2)
dude2 = int(input("Svar: "))
svar2 = test2.svaret(dude2)
if svar2:
print("Svaret er rett!")
else:
print("Svaret er feil")
| 15.162162 | 77 | 0.611408 |
import oving_8_b as o8b
test = o8b.Quiz("Hvor mange bein har en hest", [1, 2, 3, 4],4)
print(test)
dude = int(input("Svar: "))
svar1 = test.svaret(dude)
if svar1:
print("Svaret er rett")
else:
print("Svaret er feil")
print("\n \n")
test2 = o8b.Quiz("Hvilket land er i i nå?", ["norge", "sverie", "danmark"],1)
print(test2)
dude2 = int(input("Svar: "))
svar2 = test2.svaret(dude2)
if svar2:
print("Svaret er rett!")
else:
print("Svaret er feil")
| true | true |
f7198ec98548e880b167ef7ccfc9be00d9b58137 | 5,121 | py | Python | zipkin/binding/pyramid/pyramidhook.py | Themimitoof/python-zipkin | f91169d044a49f641930bdfc456f34e497690fe8 | [
"Apache-2.0"
] | null | null | null | zipkin/binding/pyramid/pyramidhook.py | Themimitoof/python-zipkin | f91169d044a49f641930bdfc456f34e497690fe8 | [
"Apache-2.0"
] | null | null | null | zipkin/binding/pyramid/pyramidhook.py | Themimitoof/python-zipkin | f91169d044a49f641930bdfc456f34e497690fe8 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import time
import logging
from pyramid.tweens import INGRESS
from pyramid.settings import aslist
from zipkin import local
from zipkin.api import stack_trace
from zipkin.models import Trace, Annotation
from zipkin.util import int_or_none
from zipkin.client import log as zipkin_log
from zipkin.config import configure as configure_zk
log = logging.getLogger(__name__)
class AllTraceTweenView(object):
endpoint = None
@classmethod
def configure(cls, settings):
default_name = "Registry" # Keep compat with `registry.__name__` ?
name = settings.get("zipkin.service_name", default_name)
bindings = aslist(settings.get("zipkin.bindings", "requests celery xmlrpclib"))
cls.endpoint = configure_zk(
name,
settings,
use_requests="requests" in bindings,
use_celery="celery" in bindings,
use_xmlrpclib="xmlrpclib" in bindings,
)
def __init__(self, handler, registry):
self.handler = handler
self.trace = None
def track_start_request(self, request):
headers = request.headers
trace_name = request.path_qs
if request.matched_route:
# we only get a matched route if we've gone through the router.
trace_name = request.matched_route.pattern
trace = Trace(
request.method + " " + trace_name,
int_or_none(headers.get("X-B3-TraceId", None)),
int_or_none(headers.get("X-B3-SpanId", None)),
int_or_none(headers.get("X-B3-ParentSpanId", None)),
endpoint=self.endpoint,
)
if "X-B3-TraceId" not in headers:
log.info("no trace info from request: %s", request.path_qs)
if request.matchdict: # matchdict maybe none if no route is registered
for k, v in request.matchdict.items():
trace.record(Annotation.string("route.param.%s" % k, v))
trace.record(Annotation.string("http.path", request.path_qs))
log.info("new trace %r", trace.trace_id)
stack_trace(trace)
trace.record(Annotation.server_recv())
self.trace = trace
def track_end_request(self, request, response):
if self.trace:
self.trace.record(Annotation.server_send())
log.info("reporting trace %s", self.trace.name)
response.headers["Trace-Id"] = str(self.trace.trace_id)
zipkin_log(self.trace)
def __call__(self, request):
self.track_start_request(request)
response = None
try:
response = self.handler(request)
finally:
# request.response in case an exception is raised ?
self.track_end_request(request, response or request.response)
local().reset()
self.trace = None
return response or request.response
class SlowQueryTweenView(AllTraceTweenView):
max_duration = None
@classmethod
def configure(cls, settings):
super(SlowQueryTweenView, cls).configure(settings)
setting = settings.get("zipkin.slow_log_duration_exceed")
if setting is None:
log.error(
"Missing setting 'zipkin.slow_log_duration_exceed' %r",
list(settings.keys()),
)
return
try:
cls.max_duration = float(setting)
except ValueError:
log.error("Invalid setting 'zipkin.slow_log_duration_exceed'")
def __init__(self, handler, registry):
super(SlowQueryTweenView, self).__init__(handler, registry)
self.start = None
def track_start_request(self, request):
self.start = time.time()
super(SlowQueryTweenView, self).track_start_request(request)
def track_end_request(self, request, response):
if self.max_duration is None:
# unconfigure, we don't care
return
if self.start:
duration = time.time() - self.start
if duration > self.max_duration:
super(SlowQueryTweenView, self).track_end_request(request, response)
def includeme(config):
"""Include the zipkin definitions"""
# Attach the subscriber a couple of times, this allow to start logging as
# early as possible. Later calls on the same request will enhance the more
# we proceed through the stack (after authentication, after router, ...)
settings = config.registry.settings
tween_factory = settings.get("zipkin.tween_factory", "all")
assert tween_factory in ["all", "slow_query"]
if tween_factory == "all":
tween_factory = AllTraceTweenView
elif tween_factory == "slow_query":
tween_factory = SlowQueryTweenView
else:
log.error(
"Invalid value for settings 'zipkin.tween_factory', should be all or slow_query, not %s",
tween_factory,
)
return
tween_factory.configure(settings)
config.add_tween(
"{}.{}".format(tween_factory.__module__, tween_factory.__name__),
under=INGRESS,
)
| 32.617834 | 101 | 0.641672 | from __future__ import absolute_import
import time
import logging
from pyramid.tweens import INGRESS
from pyramid.settings import aslist
from zipkin import local
from zipkin.api import stack_trace
from zipkin.models import Trace, Annotation
from zipkin.util import int_or_none
from zipkin.client import log as zipkin_log
from zipkin.config import configure as configure_zk
log = logging.getLogger(__name__)
class AllTraceTweenView(object):
endpoint = None
@classmethod
def configure(cls, settings):
default_name = "Registry"
name = settings.get("zipkin.service_name", default_name)
bindings = aslist(settings.get("zipkin.bindings", "requests celery xmlrpclib"))
cls.endpoint = configure_zk(
name,
settings,
use_requests="requests" in bindings,
use_celery="celery" in bindings,
use_xmlrpclib="xmlrpclib" in bindings,
)
def __init__(self, handler, registry):
self.handler = handler
self.trace = None
def track_start_request(self, request):
headers = request.headers
trace_name = request.path_qs
if request.matched_route:
trace_name = request.matched_route.pattern
trace = Trace(
request.method + " " + trace_name,
int_or_none(headers.get("X-B3-TraceId", None)),
int_or_none(headers.get("X-B3-SpanId", None)),
int_or_none(headers.get("X-B3-ParentSpanId", None)),
endpoint=self.endpoint,
)
if "X-B3-TraceId" not in headers:
log.info("no trace info from request: %s", request.path_qs)
if request.matchdict: # matchdict maybe none if no route is registered
for k, v in request.matchdict.items():
trace.record(Annotation.string("route.param.%s" % k, v))
trace.record(Annotation.string("http.path", request.path_qs))
log.info("new trace %r", trace.trace_id)
stack_trace(trace)
trace.record(Annotation.server_recv())
self.trace = trace
def track_end_request(self, request, response):
if self.trace:
self.trace.record(Annotation.server_send())
log.info("reporting trace %s", self.trace.name)
response.headers["Trace-Id"] = str(self.trace.trace_id)
zipkin_log(self.trace)
def __call__(self, request):
self.track_start_request(request)
response = None
try:
response = self.handler(request)
finally:
# request.response in case an exception is raised ?
self.track_end_request(request, response or request.response)
local().reset()
self.trace = None
return response or request.response
class SlowQueryTweenView(AllTraceTweenView):
max_duration = None
@classmethod
def configure(cls, settings):
super(SlowQueryTweenView, cls).configure(settings)
setting = settings.get("zipkin.slow_log_duration_exceed")
if setting is None:
log.error(
"Missing setting 'zipkin.slow_log_duration_exceed' %r",
list(settings.keys()),
)
return
try:
cls.max_duration = float(setting)
except ValueError:
log.error("Invalid setting 'zipkin.slow_log_duration_exceed'")
def __init__(self, handler, registry):
super(SlowQueryTweenView, self).__init__(handler, registry)
self.start = None
def track_start_request(self, request):
self.start = time.time()
super(SlowQueryTweenView, self).track_start_request(request)
def track_end_request(self, request, response):
if self.max_duration is None:
# unconfigure, we don't care
return
if self.start:
duration = time.time() - self.start
if duration > self.max_duration:
super(SlowQueryTweenView, self).track_end_request(request, response)
def includeme(config):
settings = config.registry.settings
tween_factory = settings.get("zipkin.tween_factory", "all")
assert tween_factory in ["all", "slow_query"]
if tween_factory == "all":
tween_factory = AllTraceTweenView
elif tween_factory == "slow_query":
tween_factory = SlowQueryTweenView
else:
log.error(
"Invalid value for settings 'zipkin.tween_factory', should be all or slow_query, not %s",
tween_factory,
)
return
tween_factory.configure(settings)
config.add_tween(
"{}.{}".format(tween_factory.__module__, tween_factory.__name__),
under=INGRESS,
)
| true | true |
f7198ece6a41b7a5f0f2edead87cf05f2c1c0cd4 | 10,093 | py | Python | sdks/python/http_client/v1/polyaxon_sdk/models/v1_bayes.py | onilton/polyaxon | 3b0d7cbeead74e62eb0eedbb2962f605ebb9fa81 | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/polyaxon_sdk/models/v1_bayes.py | onilton/polyaxon | 3b0d7cbeead74e62eb0eedbb2962f605ebb9fa81 | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/polyaxon_sdk/models/v1_bayes.py | onilton/polyaxon | 3b0d7cbeead74e62eb0eedbb2962f605ebb9fa81 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.9.4
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Bayes(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kind': 'str',
'params': 'dict(str, object)',
'num_initial_runs': 'int',
'max_iterations': 'int',
'utility_function': 'object',
'metric': 'V1OptimizationMetric',
'seed': 'int',
'concurrency': 'int',
'tuner': 'V1Tuner',
'early_stopping': 'list[object]'
}
attribute_map = {
'kind': 'kind',
'params': 'params',
'num_initial_runs': 'numInitialRuns',
'max_iterations': 'maxIterations',
'utility_function': 'utilityFunction',
'metric': 'metric',
'seed': 'seed',
'concurrency': 'concurrency',
'tuner': 'tuner',
'early_stopping': 'earlyStopping'
}
def __init__(self, kind='bayes', params=None, num_initial_runs=None, max_iterations=None, utility_function=None, metric=None, seed=None, concurrency=None, tuner=None, early_stopping=None, local_vars_configuration=None): # noqa: E501
"""V1Bayes - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._params = None
self._num_initial_runs = None
self._max_iterations = None
self._utility_function = None
self._metric = None
self._seed = None
self._concurrency = None
self._tuner = None
self._early_stopping = None
self.discriminator = None
if kind is not None:
self.kind = kind
if params is not None:
self.params = params
if num_initial_runs is not None:
self.num_initial_runs = num_initial_runs
if max_iterations is not None:
self.max_iterations = max_iterations
if utility_function is not None:
self.utility_function = utility_function
if metric is not None:
self.metric = metric
if seed is not None:
self.seed = seed
if concurrency is not None:
self.concurrency = concurrency
if tuner is not None:
self.tuner = tuner
if early_stopping is not None:
self.early_stopping = early_stopping
@property
def kind(self):
"""Gets the kind of this V1Bayes. # noqa: E501
:return: The kind of this V1Bayes. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Bayes.
:param kind: The kind of this V1Bayes. # noqa: E501
:type: str
"""
self._kind = kind
@property
def params(self):
"""Gets the params of this V1Bayes. # noqa: E501
:return: The params of this V1Bayes. # noqa: E501
:rtype: dict(str, object)
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this V1Bayes.
:param params: The params of this V1Bayes. # noqa: E501
:type: dict(str, object)
"""
self._params = params
@property
def num_initial_runs(self):
"""Gets the num_initial_runs of this V1Bayes. # noqa: E501
:return: The num_initial_runs of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._num_initial_runs
@num_initial_runs.setter
def num_initial_runs(self, num_initial_runs):
"""Sets the num_initial_runs of this V1Bayes.
:param num_initial_runs: The num_initial_runs of this V1Bayes. # noqa: E501
:type: int
"""
self._num_initial_runs = num_initial_runs
@property
def max_iterations(self):
"""Gets the max_iterations of this V1Bayes. # noqa: E501
:return: The max_iterations of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._max_iterations
@max_iterations.setter
def max_iterations(self, max_iterations):
"""Sets the max_iterations of this V1Bayes.
:param max_iterations: The max_iterations of this V1Bayes. # noqa: E501
:type: int
"""
self._max_iterations = max_iterations
@property
def utility_function(self):
"""Gets the utility_function of this V1Bayes. # noqa: E501
:return: The utility_function of this V1Bayes. # noqa: E501
:rtype: object
"""
return self._utility_function
@utility_function.setter
def utility_function(self, utility_function):
"""Sets the utility_function of this V1Bayes.
:param utility_function: The utility_function of this V1Bayes. # noqa: E501
:type: object
"""
self._utility_function = utility_function
@property
def metric(self):
"""Gets the metric of this V1Bayes. # noqa: E501
:return: The metric of this V1Bayes. # noqa: E501
:rtype: V1OptimizationMetric
"""
return self._metric
@metric.setter
def metric(self, metric):
"""Sets the metric of this V1Bayes.
:param metric: The metric of this V1Bayes. # noqa: E501
:type: V1OptimizationMetric
"""
self._metric = metric
@property
def seed(self):
"""Gets the seed of this V1Bayes. # noqa: E501
:return: The seed of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._seed
@seed.setter
def seed(self, seed):
"""Sets the seed of this V1Bayes.
:param seed: The seed of this V1Bayes. # noqa: E501
:type: int
"""
self._seed = seed
@property
def concurrency(self):
"""Gets the concurrency of this V1Bayes. # noqa: E501
:return: The concurrency of this V1Bayes. # noqa: E501
:rtype: int
"""
return self._concurrency
@concurrency.setter
def concurrency(self, concurrency):
"""Sets the concurrency of this V1Bayes.
:param concurrency: The concurrency of this V1Bayes. # noqa: E501
:type: int
"""
self._concurrency = concurrency
@property
def tuner(self):
"""Gets the tuner of this V1Bayes. # noqa: E501
:return: The tuner of this V1Bayes. # noqa: E501
:rtype: V1Tuner
"""
return self._tuner
@tuner.setter
def tuner(self, tuner):
"""Sets the tuner of this V1Bayes.
:param tuner: The tuner of this V1Bayes. # noqa: E501
:type: V1Tuner
"""
self._tuner = tuner
@property
def early_stopping(self):
"""Gets the early_stopping of this V1Bayes. # noqa: E501
:return: The early_stopping of this V1Bayes. # noqa: E501
:rtype: list[object]
"""
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
"""Sets the early_stopping of this V1Bayes.
:param early_stopping: The early_stopping of this V1Bayes. # noqa: E501
:type: list[object]
"""
self._early_stopping = early_stopping
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Bayes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Bayes):
return True
return self.to_dict() != other.to_dict()
| 27.13172 | 237 | 0.593382 |
import pprint
import re
import six
from polyaxon_sdk.configuration import Configuration
class V1Bayes(object):
openapi_types = {
'kind': 'str',
'params': 'dict(str, object)',
'num_initial_runs': 'int',
'max_iterations': 'int',
'utility_function': 'object',
'metric': 'V1OptimizationMetric',
'seed': 'int',
'concurrency': 'int',
'tuner': 'V1Tuner',
'early_stopping': 'list[object]'
}
attribute_map = {
'kind': 'kind',
'params': 'params',
'num_initial_runs': 'numInitialRuns',
'max_iterations': 'maxIterations',
'utility_function': 'utilityFunction',
'metric': 'metric',
'seed': 'seed',
'concurrency': 'concurrency',
'tuner': 'tuner',
'early_stopping': 'earlyStopping'
}
def __init__(self, kind='bayes', params=None, num_initial_runs=None, max_iterations=None, utility_function=None, metric=None, seed=None, concurrency=None, tuner=None, early_stopping=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._params = None
self._num_initial_runs = None
self._max_iterations = None
self._utility_function = None
self._metric = None
self._seed = None
self._concurrency = None
self._tuner = None
self._early_stopping = None
self.discriminator = None
if kind is not None:
self.kind = kind
if params is not None:
self.params = params
if num_initial_runs is not None:
self.num_initial_runs = num_initial_runs
if max_iterations is not None:
self.max_iterations = max_iterations
if utility_function is not None:
self.utility_function = utility_function
if metric is not None:
self.metric = metric
if seed is not None:
self.seed = seed
if concurrency is not None:
self.concurrency = concurrency
if tuner is not None:
self.tuner = tuner
if early_stopping is not None:
self.early_stopping = early_stopping
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = params
@property
def num_initial_runs(self):
return self._num_initial_runs
@num_initial_runs.setter
def num_initial_runs(self, num_initial_runs):
self._num_initial_runs = num_initial_runs
@property
def max_iterations(self):
return self._max_iterations
@max_iterations.setter
def max_iterations(self, max_iterations):
self._max_iterations = max_iterations
@property
def utility_function(self):
return self._utility_function
@utility_function.setter
def utility_function(self, utility_function):
self._utility_function = utility_function
@property
def metric(self):
return self._metric
@metric.setter
def metric(self, metric):
self._metric = metric
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, concurrency):
self._concurrency = concurrency
@property
def tuner(self):
return self._tuner
@tuner.setter
def tuner(self, tuner):
self._tuner = tuner
@property
def early_stopping(self):
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
self._early_stopping = early_stopping
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1Bayes):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1Bayes):
return True
return self.to_dict() != other.to_dict()
| true | true |
f7198f349b0048d3b6330725d65dfdf36b553ff4 | 1,458 | py | Python | soltrannet/__init__.py | hengwei-chan/molecular_attention_transformer | 29193d4155df528e3a6a0c1e0da39111d0b8db93 | [
"Apache-2.0"
] | 16 | 2021-03-10T17:10:06.000Z | 2022-03-16T13:07:58.000Z | soltrannet/__init__.py | hengwei-chan/molecular_attention_transformer | 29193d4155df528e3a6a0c1e0da39111d0b8db93 | [
"Apache-2.0"
] | null | null | null | soltrannet/__init__.py | hengwei-chan/molecular_attention_transformer | 29193d4155df528e3a6a0c1e0da39111d0b8db93 | [
"Apache-2.0"
] | 10 | 2021-06-01T03:36:08.000Z | 2022-03-18T16:58:25.000Z | from .predict import predict
import argparse
import sys, multiprocessing
import torch
def _parse_args():
parser=argparse.ArgumentParser(description="Run SolTranNet aqueous solubility predictor")
parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')
parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')
parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')
parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')
parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')
args=parser.parse_args()
return args
def _run(args):
smiles=[x.rstrip() for x in args.input]
if args.cpu_predict:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))
else:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)
for pred, smi, warn in predictions:
args.output.write(f'{smi},{pred:.3f},{warn}\n')
| 52.071429 | 199 | 0.739369 | from .predict import predict
import argparse
import sys, multiprocessing
import torch
def _parse_args():
parser=argparse.ArgumentParser(description="Run SolTranNet aqueous solubility predictor")
parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')
parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')
parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')
parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')
parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')
args=parser.parse_args()
return args
def _run(args):
smiles=[x.rstrip() for x in args.input]
if args.cpu_predict:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))
else:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)
for pred, smi, warn in predictions:
args.output.write(f'{smi},{pred:.3f},{warn}\n')
| true | true |
f7198f927dcfc0aeb6186a86d48263d8c4b1d8eb | 5,831 | py | Python | src/garage/torch/algos/_utils.py | adibellathur/garage | 8394f0cf2b77c0a5b3a7b1ea977fa6cb3f9df0ca | [
"MIT"
] | 1 | 2020-02-19T00:01:29.000Z | 2020-02-19T00:01:29.000Z | src/garage/torch/algos/_utils.py | Ashutosh-Adhikari/garage | 482a26a07d46091f878c41b582f1478588e397ff | [
"MIT"
] | null | null | null | src/garage/torch/algos/_utils.py | Ashutosh-Adhikari/garage | 482a26a07d46091f878c41b582f1478588e397ff | [
"MIT"
] | 1 | 2020-02-13T12:05:35.000Z | 2020-02-13T12:05:35.000Z | """Utility functions used by PyTorch algorithms."""
import torch
import torch.nn.functional as F
class _Default: # pylint: disable=too-few-public-methods
"""A wrapper class to represent default arguments.
Args:
val (object): Argument value.
"""
def __init__(self, val):
self.val = val
def make_optimizer(optimizer_type, module, **kwargs):
"""Create an optimizer for PyTorch algos.
Args:
optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.
This can be an optimizer type such as 'torch.optim.Adam' or a
tuple of type and dictionary, where dictionary contains arguments
to initialize the optimizer e.g. (torch.optim.Adam, {'lr' = 1e-3})
module (torch.nn.Module): The module whose parameters needs to be
optimized.
kwargs (dict): Other keyword arguments to initialize optimizer. This
is not used when `optimizer_type` is tuple.
Returns:
torch.optim.Optimizer: Constructed optimizer.
Raises:
ValueError: Raises value error when `optimizer_type` is tuple, and
non-default argument is passed in `kwargs`.
"""
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
return opt_type(module.parameters(), **opt_args)
opt_args = {}
for name, arg in kwargs.items():
if isinstance(arg, _Default):
opt_args[name] = arg.val
else:
opt_args[name] = arg
return optimizer_type(module.parameters(), **opt_args)
def compute_advantages(discount, gae_lambda, max_path_length, baselines,
rewards):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
Calculate advantages using a baseline (value function) according to
Generalized Advantage Estimation (GAE)
The discounted cumulative sum can be computed using conv2d with filter.
filter:
[1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]
where the length is same with max_path_length.
baselines and rewards are also has same shape.
baselines:
[ [b_11, b_12, b_13, ... b_1n],
[b_21, b_22, b_23, ... b_2n],
...
[b_m1, b_m2, b_m3, ... b_mn] ]
rewards:
[ [r_11, r_12, r_13, ... r_1n],
[r_21, r_22, r_23, ... r_2n],
...
[r_m1, r_m2, r_m3, ... r_mn] ]
Args:
discount (float): RL discount factor (i.e. gamma).
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_path_length (int): Maximum length of a single rollout.
baselines (torch.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum path length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
rewards (torch.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining elements in
that episode should be set to 0.
Returns:
torch.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining values in that
episode should be set to 0.
"""
adv_filter = torch.full((1, 1, 1, max_path_length - 1),
discount * gae_lambda)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()
return advantages
def pad_to_last(nums, total_length, axis=-1, val=0):
"""Pad val to last in nums in given axis.
length of the result in given axis should be total_length.
Raises:
IndexError: If the input axis value is out of range of the nums array
Args:
nums (numpy.ndarray): The array to pad.
total_length (int): The final width of the Array.
axis (int): Axis along which a sum is performed.
val (int): The value to set the padded value.
Returns:
torch.Tensor: Padded array
"""
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
def filter_valids(tensor, valids):
"""Filter out tensor using valids (last index of valid tensors).
valids contains last indices of each rows.
Args:
tensor (torch.Tensor): The tensor to filter
valids (list[int]): Array of length of the valid values
Returns:
torch.Tensor: Filtered Tensor
"""
return [tensor[i][:valids[i]] for i in range(len(valids))]
| 35.993827 | 79 | 0.630081 | import torch
import torch.nn.functional as F
class _Default:
def __init__(self, val):
self.val = val
def make_optimizer(optimizer_type, module, **kwargs):
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
return opt_type(module.parameters(), **opt_args)
opt_args = {}
for name, arg in kwargs.items():
if isinstance(arg, _Default):
opt_args[name] = arg.val
else:
opt_args[name] = arg
return optimizer_type(module.parameters(), **opt_args)
def compute_advantages(discount, gae_lambda, max_path_length, baselines,
rewards):
adv_filter = torch.full((1, 1, 1, max_path_length - 1),
discount * gae_lambda)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()
return advantages
def pad_to_last(nums, total_length, axis=-1, val=0):
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
def filter_valids(tensor, valids):
return [tensor[i][:valids[i]] for i in range(len(valids))]
| true | true |
f7198f9535491c7521d5ae47ee77aaa8910d0441 | 801 | py | Python | tests/test_export_id.py | David-Le-Nir/sphinxcontrib-needs | fe809445505fa1e9bf5963eab1d6283dad405e92 | [
"MIT"
] | null | null | null | tests/test_export_id.py | David-Le-Nir/sphinxcontrib-needs | fe809445505fa1e9bf5963eab1d6283dad405e92 | [
"MIT"
] | 2 | 2022-02-13T19:49:18.000Z | 2022-02-13T19:49:18.000Z | tests/test_export_id.py | David-Le-Nir/sphinxcontrib-needs | fe809445505fa1e9bf5963eab1d6283dad405e92 | [
"MIT"
] | null | null | null | import json
import os
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="needs", srcdir="doc_test/doc_export_id")
def test_export_id(app, status, warning):
app.build()
content = Path(app.outdir, "needs.json").read_text()
assert "filters" in content
content_obj = json.loads(content)
assert content_obj is not None
assert "created" in content_obj
assert "FLOW_1" in content_obj["versions"]["1.0"]["filters"]
assert "TABLE_1" in content_obj["versions"]["1.0"]["filters"]
assert "LIST_1" in content_obj["versions"]["1.0"]["filters"]
@with_app(buildername="html", srcdir="doc_test/doc_export_id")
def test_export_id_html(app, status, warning):
app.build()
assert not os.path.exists(os.path.join(app.outdir, "needs.json"))
| 30.807692 | 69 | 0.716604 | import json
import os
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="needs", srcdir="doc_test/doc_export_id")
def test_export_id(app, status, warning):
app.build()
content = Path(app.outdir, "needs.json").read_text()
assert "filters" in content
content_obj = json.loads(content)
assert content_obj is not None
assert "created" in content_obj
assert "FLOW_1" in content_obj["versions"]["1.0"]["filters"]
assert "TABLE_1" in content_obj["versions"]["1.0"]["filters"]
assert "LIST_1" in content_obj["versions"]["1.0"]["filters"]
@with_app(buildername="html", srcdir="doc_test/doc_export_id")
def test_export_id_html(app, status, warning):
app.build()
assert not os.path.exists(os.path.join(app.outdir, "needs.json"))
| true | true |
f719907ff48a40bf779cf6020839f0d298c921ad | 7,308 | py | Python | wavedata/tools/core/voxel_grid_2d.py | amuamushu/wavedata | 1745c646ff3a76b38a81c439a0edd900c986c9f7 | [
"MIT"
] | null | null | null | wavedata/tools/core/voxel_grid_2d.py | amuamushu/wavedata | 1745c646ff3a76b38a81c439a0edd900c986c9f7 | [
"MIT"
] | null | null | null | wavedata/tools/core/voxel_grid_2d.py | amuamushu/wavedata | 1745c646ff3a76b38a81c439a0edd900c986c9f7 | [
"MIT"
] | null | null | null | import numpy as np
from wavedata.wavedata.tools.core import geometry_utils
class VoxelGrid2D(object):
"""
Voxel grids represent occupancy info. The voxelize_2d method projects a point cloud
onto a plane, while saving height and point density information for each voxel.
"""
# Class Constants
VOXEL_EMPTY = -1
VOXEL_FILLED = 0
def __init__(self):
# Quantization size of the voxel grid
self.voxel_size = 0.0
# Voxels at the most negative/positive xyz
self.min_voxel_coord = np.array([])
self.max_voxel_coord = np.array([])
# Size of the voxel grid along each axis
self.num_divisions = np.array([0, 0, 0])
# Points in sorted order, to match the order of the voxels
self.points = []
# Indices of filled voxels
self.voxel_indices = []
# Max point height in projected voxel
self.heights = []
# Number of points corresponding to projected voxel
self.num_pts_in_voxel = []
# Full occupancy grid, VOXEL_EMPTY or VOXEL_FILLED
self.leaf_layout_2d = []
def voxelize_2d(self, pts, voxel_size, extents=None,
ground_plane=None, create_leaf_layout=True):
"""Voxelizes the point cloud into a 2D voxel grid by
projecting it down into a flat plane, and stores the maximum
point height, and number of points corresponding to the voxel
:param pts: Point cloud as N x [x, y, z]
:param voxel_size: Quantization size for the grid
:param extents: Optional, specifies the full extents of the point cloud.
Used for creating same sized voxel grids.
:param ground_plane: Plane coefficients (a, b, c, d), xz plane used if
not specified
:param create_leaf_layout: Set this to False to create an empty
leaf_layout, which will save computation
time.
"""
# Check if points are 3D, otherwise early exit
if pts.shape[1] != 3:
raise ValueError("Points have the wrong shape: {}".format(
pts.shape))
self.voxel_size = voxel_size
# Discretize voxel coordinates to given quantization size
discrete_pts = np.floor(pts / voxel_size).astype(np.int32)
# Use Lex Sort, sort by x, then z, then y (
x_col = discrete_pts[:, 0]
y_col = discrete_pts[:, 1]
z_col = discrete_pts[:, 2]
sorted_order = np.lexsort((y_col, z_col, x_col))
# Save original points in sorted order
self.points = pts[sorted_order]
# Save discrete points in sorted order
discrete_pts = discrete_pts[sorted_order]
# Project all points to a 2D plane
discrete_pts_2d = discrete_pts.copy()
discrete_pts_2d[:, 1] = 0
# Format the array to c-contiguous array for unique function
contiguous_array = np.ascontiguousarray(discrete_pts_2d).view(
np.dtype((np.void, discrete_pts_2d.dtype.itemsize *
discrete_pts_2d.shape[1])))
# The new coordinates are the discretized array with its unique indexes
_, unique_indices = np.unique(contiguous_array, return_index=True)
# Sort unique indices to preserve order
unique_indices.sort()
voxel_coords = discrete_pts_2d[unique_indices]
# Number of points per voxel, last voxel calculated separately
num_points_in_voxel = np.diff(unique_indices)
num_points_in_voxel = np.append(num_points_in_voxel,
discrete_pts_2d.shape[0] -
unique_indices[-1])
if ground_plane is None:
# Use first point in voxel as highest point
height_in_voxel = self.points[unique_indices, 1]
else:
# Ground plane provided
height_in_voxel = geometry_utils.dist_to_plane(
ground_plane, self.points[unique_indices])
# Set the height and number of points for each voxel
self.heights = height_in_voxel
self.num_pts_in_voxel = num_points_in_voxel
# Find the minimum and maximum voxel coordinates
if extents is not None:
# Check provided extents
extents_transpose = np.array(extents).transpose()
if extents_transpose.shape != (2, 3):
raise ValueError("Extents are the wrong shape {}".format(
extents.shape))
# Set voxel grid extents
self.min_voxel_coord = np.floor(extents_transpose[0] / voxel_size)
self.max_voxel_coord = \
np.ceil((extents_transpose[1] / voxel_size) - 1)
self.min_voxel_coord[1] = 0
self.max_voxel_coord[1] = 0
# Check that points are bounded by new extents
if not (self.min_voxel_coord <= np.amin(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than min_voxel_coord")
if not (self.max_voxel_coord >= np.amax(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than max_voxel_coord")
else:
# Automatically calculate extents
self.min_voxel_coord = np.amin(voxel_coords, axis=0)
self.max_voxel_coord = np.amax(voxel_coords, axis=0)
# Get the voxel grid dimensions
self.num_divisions = ((self.max_voxel_coord - self.min_voxel_coord)
+ 1).astype(np.int32)
# Bring the min voxel to the origin
self.voxel_indices = (voxel_coords - self.min_voxel_coord).astype(int)
if create_leaf_layout:
# Create Voxel Object with -1 as empty/occluded, 0 as occupied
self.leaf_layout_2d = self.VOXEL_EMPTY * \
np.ones(self.num_divisions.astype(int))
# Fill out the leaf layout
self.leaf_layout_2d[self.voxel_indices[:, 0], 0,
self.voxel_indices[:, 2]] = \
self.VOXEL_FILLED
def map_to_index(self, map_index):
"""Converts map coordinate values to 1-based discretized grid index
coordinate. Note: Any values outside the extent of the grid will be
forced to be the maximum grid coordinate.
:param map_index: N x 2 points
:return: N x length(dim) (grid coordinate)
[] if min_voxel_coord or voxel_size or grid_index or dim is not set
"""
if self.voxel_size == 0 \
or len(self.min_voxel_coord) == 0 \
or len(map_index) == 0:
return []
num_divisions_2d = self.num_divisions[[0, 2]]
min_voxel_coord_2d = self.min_voxel_coord[[0, 2]]
# Truncate index (same as np.floor for positive values) and clip
# to valid voxel index range
indices = np.int32(map_index / self.voxel_size) - min_voxel_coord_2d
indices[:, 0] = np.clip(indices[:, 0], 0, num_divisions_2d[0])
indices[:, 1] = np.clip(indices[:, 1], 0, num_divisions_2d[1])
return indices
| 39.080214 | 87 | 0.601122 | import numpy as np
from wavedata.wavedata.tools.core import geometry_utils
class VoxelGrid2D(object):
VOXEL_EMPTY = -1
VOXEL_FILLED = 0
def __init__(self):
self.voxel_size = 0.0
self.min_voxel_coord = np.array([])
self.max_voxel_coord = np.array([])
self.num_divisions = np.array([0, 0, 0])
self.points = []
self.voxel_indices = []
self.heights = []
self.num_pts_in_voxel = []
self.leaf_layout_2d = []
def voxelize_2d(self, pts, voxel_size, extents=None,
ground_plane=None, create_leaf_layout=True):
if pts.shape[1] != 3:
raise ValueError("Points have the wrong shape: {}".format(
pts.shape))
self.voxel_size = voxel_size
discrete_pts = np.floor(pts / voxel_size).astype(np.int32)
x_col = discrete_pts[:, 0]
y_col = discrete_pts[:, 1]
z_col = discrete_pts[:, 2]
sorted_order = np.lexsort((y_col, z_col, x_col))
self.points = pts[sorted_order]
discrete_pts = discrete_pts[sorted_order]
discrete_pts_2d = discrete_pts.copy()
discrete_pts_2d[:, 1] = 0
contiguous_array = np.ascontiguousarray(discrete_pts_2d).view(
np.dtype((np.void, discrete_pts_2d.dtype.itemsize *
discrete_pts_2d.shape[1])))
_, unique_indices = np.unique(contiguous_array, return_index=True)
unique_indices.sort()
voxel_coords = discrete_pts_2d[unique_indices]
num_points_in_voxel = np.diff(unique_indices)
num_points_in_voxel = np.append(num_points_in_voxel,
discrete_pts_2d.shape[0] -
unique_indices[-1])
if ground_plane is None:
height_in_voxel = self.points[unique_indices, 1]
else:
height_in_voxel = geometry_utils.dist_to_plane(
ground_plane, self.points[unique_indices])
self.heights = height_in_voxel
self.num_pts_in_voxel = num_points_in_voxel
if extents is not None:
extents_transpose = np.array(extents).transpose()
if extents_transpose.shape != (2, 3):
raise ValueError("Extents are the wrong shape {}".format(
extents.shape))
self.min_voxel_coord = np.floor(extents_transpose[0] / voxel_size)
self.max_voxel_coord = \
np.ceil((extents_transpose[1] / voxel_size) - 1)
self.min_voxel_coord[1] = 0
self.max_voxel_coord[1] = 0
if not (self.min_voxel_coord <= np.amin(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than min_voxel_coord")
if not (self.max_voxel_coord >= np.amax(voxel_coords,
axis=0)).all():
raise ValueError("Extents are smaller than max_voxel_coord")
else:
self.min_voxel_coord = np.amin(voxel_coords, axis=0)
self.max_voxel_coord = np.amax(voxel_coords, axis=0)
self.num_divisions = ((self.max_voxel_coord - self.min_voxel_coord)
+ 1).astype(np.int32)
self.voxel_indices = (voxel_coords - self.min_voxel_coord).astype(int)
if create_leaf_layout:
self.leaf_layout_2d = self.VOXEL_EMPTY * \
np.ones(self.num_divisions.astype(int))
self.leaf_layout_2d[self.voxel_indices[:, 0], 0,
self.voxel_indices[:, 2]] = \
self.VOXEL_FILLED
def map_to_index(self, map_index):
if self.voxel_size == 0 \
or len(self.min_voxel_coord) == 0 \
or len(map_index) == 0:
return []
num_divisions_2d = self.num_divisions[[0, 2]]
min_voxel_coord_2d = self.min_voxel_coord[[0, 2]]
indices = np.int32(map_index / self.voxel_size) - min_voxel_coord_2d
indices[:, 0] = np.clip(indices[:, 0], 0, num_divisions_2d[0])
indices[:, 1] = np.clip(indices[:, 1], 0, num_divisions_2d[1])
return indices
| true | true |
f719919bea61d2bf5cccc3f7d4e1bee9157cfd2e | 1,230 | py | Python | service/scripts/resetadmin.py | OA-DeepGreen/jper | 042719a790a34f877050a32f896b947ce4407b4e | [
"Apache-2.0"
] | null | null | null | service/scripts/resetadmin.py | OA-DeepGreen/jper | 042719a790a34f877050a32f896b947ce4407b4e | [
"Apache-2.0"
] | 1 | 2022-02-03T12:35:18.000Z | 2022-02-03T12:35:18.000Z | service/scripts/resetadmin.py | OA-DeepGreen/jper | 042719a790a34f877050a32f896b947ce4407b4e | [
"Apache-2.0"
] | 3 | 2016-07-15T07:29:33.000Z | 2020-02-03T11:20:34.000Z | """
This is a script to reset the admin account in a live system.
On production this should be run once, and never again, as it removes the old
account and builds a new one in its place. This means no historical data will
be kept from the before time.
"""
from octopus.core import add_configuration, app
from service.models import Account
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
# some general script running features
parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)")
args = parser.parse_args()
if args.config:
add_configuration(app, args.config)
a = Account.pull('admin')
if not a:
a = Account()
username = 'admin'
password = 'D33pGr33n'
params = {
"id": username,
"role": ["admin"],
"email": "green@deepgreen.org",
"api_key": "admin",
"password": password
}
a.add_account(params)
a.save()
print("superuser account reseted for user " + username + " with password " + password)
print("THIS SUPERUSER ACCOUNT IS INSECURE! GENERATE A NEW PASSWORD FOR IT IMMEDIATELY! OR CREATE A NEW ACCOUNT AND DELETE THIS ONE...")
| 31.538462 | 139 | 0.664228 | from octopus.core import add_configuration, app
from service.models import Account
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)")
args = parser.parse_args()
if args.config:
add_configuration(app, args.config)
a = Account.pull('admin')
if not a:
a = Account()
username = 'admin'
password = 'D33pGr33n'
params = {
"id": username,
"role": ["admin"],
"email": "green@deepgreen.org",
"api_key": "admin",
"password": password
}
a.add_account(params)
a.save()
print("superuser account reseted for user " + username + " with password " + password)
print("THIS SUPERUSER ACCOUNT IS INSECURE! GENERATE A NEW PASSWORD FOR IT IMMEDIATELY! OR CREATE A NEW ACCOUNT AND DELETE THIS ONE...")
| true | true |
f719923795059f5abc5f26d2960058e68c7ca4e6 | 539 | py | Python | game_data/migrations/0003_auto_20210103_1621.py | cmerwin3/Adventure_Project | 1816978e952f1250049e8d1e7fcf172620903596 | [
"Apache-2.0"
] | null | null | null | game_data/migrations/0003_auto_20210103_1621.py | cmerwin3/Adventure_Project | 1816978e952f1250049e8d1e7fcf172620903596 | [
"Apache-2.0"
] | null | null | null | game_data/migrations/0003_auto_20210103_1621.py | cmerwin3/Adventure_Project | 1816978e952f1250049e8d1e7fcf172620903596 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.1 on 2021-01-03 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game_data', '0002_auto_20201220_2025'),
]
operations = [
migrations.RemoveField(
model_name='gamedata',
name='pin',
),
migrations.AddField(
model_name='gamedata',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| 22.458333 | 61 | 0.575139 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game_data', '0002_auto_20201220_2025'),
]
operations = [
migrations.RemoveField(
model_name='gamedata',
name='pin',
),
migrations.AddField(
model_name='gamedata',
name='password',
field=models.CharField(default=1, max_length=30),
preserve_default=False,
),
]
| true | true |
f719927ab980abbbc3d3ffdce109f65dd7ddd35e | 118 | py | Python | framework/conf.py | shew91/Retropy | 9feb34855b997c48d93a5343a9842788d19582e6 | [
"MIT"
] | 13 | 2018-06-02T09:11:15.000Z | 2020-08-29T01:01:19.000Z | framework/conf.py | shew91/Retropy | 9feb34855b997c48d93a5343a9842788d19582e6 | [
"MIT"
] | 1 | 2021-01-17T14:03:13.000Z | 2021-01-17T14:03:13.000Z | framework/conf.py | shew91/Retropy | 9feb34855b997c48d93a5343a9842788d19582e6 | [
"MIT"
] | 6 | 2018-06-02T16:20:47.000Z | 2021-12-30T22:26:54.000Z | # (hack) Global configs
conf_cache_disk = True
conf_cache_memory = True
conf_cache_fails = False
ignoredAssets = []
| 14.75 | 24 | 0.771186 |
conf_cache_disk = True
conf_cache_memory = True
conf_cache_fails = False
ignoredAssets = []
| true | true |
f71992c33b60881673856eebed695c0f089619b3 | 8,381 | py | Python | adwords_python3_examples_10.1.0/v201802/shopping/add_product_partition_tree.py | xyla-io/hazel | 260ce906761d8b808c21ca61b44cc71ca3329e8c | [
"MIT"
] | null | null | null | adwords_python3_examples_10.1.0/v201802/shopping/add_product_partition_tree.py | xyla-io/hazel | 260ce906761d8b808c21ca61b44cc71ca3329e8c | [
"MIT"
] | null | null | null | adwords_python3_examples_10.1.0/v201802/shopping/add_product_partition_tree.py | xyla-io/hazel | 260ce906761d8b808c21ca61b44cc71ca3329e8c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a ProductPartition tree.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import adwords
ADGROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
class ProductPartitionHelper(object):
"""A helper for creating ProductPartition trees."""
def __init__(self, adgroup_id):
"""Initializer.
Args:
adgroup_id: The ID of the AdGroup that we wish to attach the partition
tree to.
"""
# The next temporary criterion ID to be used.
# When creating our tree we need to specify the parent-child relationships
# between nodes. However, until a criterion has been created on the server
# we do not have a criterion ID with which to refer to it.
# Instead we can specify temporary IDs that are specific to a single mutate
# request. Once the criteria have been created they are assigned an ID as
# normal and the temporary ID will no longer refer to it.
# A valid temporary ID is any negative integer.
self.next_id = -1
# The set of mutate operations needed to create the current tree.
self.operations = []
self.adgroup_id = adgroup_id
def CreateSubdivision(self, parent=None, value=None):
"""Creates a subdivision node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
Returns:
A new subdivision node.
"""
division = {
'xsi_type': 'ProductPartition',
'partitionType': 'SUBDIVISION',
'id': str(self.next_id)
}
# The root has neither a parent nor a value.
if parent is not None:
division['parentCriterionId'] = parent['id']
division['caseValue'] = value
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.adgroup_id,
'criterion': division
}
self.CreateAddOperation(adgroup_criterion)
self.next_id -= 1
return division
def CreateUnit(self, parent=None, value=None, bid_amount=None):
"""Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
"""
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
# The root node has neither a parent nor a value.
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit
def GetOperations(self):
"""Returns the set of mutate operations needed to create the current tree.
Returns:
The set of operations
"""
return self.operations
def CreateAddOperation(self, criterion):
"""Creates an AdGroupCriterionOperation for the given criterion.
Args:
criterion: The criterion we want to add.
"""
operation = {
'operator': 'ADD',
'operand': criterion
}
self.operations.append(operation)
def main(client, adgroup_id):
"""Runs the example."""
adgroup_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201802')
helper = ProductPartitionHelper(adgroup_id)
# The most trivial partition tree has only a unit node as the root, e.g.:
# helper.CreateUnit(bid_amount=100000)
root = helper.CreateSubdivision()
new_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'NEW'
}
used_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'USED'
}
other_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
}
helper.CreateUnit(root, new_product_canonical_condition, 200000)
helper.CreateUnit(root, used_product_canonical_condition, 100000)
other_condition = helper.CreateSubdivision(
root, other_product_canonical_condition)
cool_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CoolBrand'
}
cheap_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CheapBrand'
}
other_product_brand = {
'xsi_type': 'ProductBrand',
}
helper.CreateUnit(other_condition, cool_product_brand, 900000)
helper.CreateUnit(other_condition, cheap_product_brand, 10000)
other_brand = helper.CreateSubdivision(other_condition, other_product_brand)
# The value for the bidding category is a fixed ID for the 'Luggage & Bags'
# category. You can retrieve IDs for categories from the ConstantDataService.
# See the 'GetProductTaxonomy' example for more details.
luggage_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
'value': '-5914235892932915235'
}
generic_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
}
helper.CreateUnit(other_brand, luggage_category, 750000)
helper.CreateUnit(other_brand, generic_category, 110000)
# Make the mutate request
result = adgroup_criterion_service.mutate(helper.GetOperations())
children = {}
root_node = None
# For each criterion, make an array containing each of its children.
# We always create the parent before the child, so we can rely on that here.
for adgroup_criterion in result['value']:
children[adgroup_criterion['criterion']['id']] = []
if 'parentCriterionId' in adgroup_criterion['criterion']:
children[adgroup_criterion['criterion']['parentCriterionId']].append(
adgroup_criterion['criterion'])
else:
root_node = adgroup_criterion['criterion']
# Show the tree
DisplayTree(root_node, children)
def DisplayTree(node, children, level=0):
"""Recursively display a node and each of its children.
Args:
node: The node we're displaying the children of.
children: Children of the parent node.
level: How deep in the tree we are.
"""
value = ''
node_type = ''
if 'caseValue' in node:
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if node_type == 'ProductCanonicalCondition':
value = (case_value['condition'] if 'condition' in case_value
else 'OTHER')
elif node_type == 'ProductBiddingCategory':
value = '%s(%s)' % (case_value['type'], case_value['value']
if 'value' in case_value else 'OTHER')
else:
value = (case_value['value'] if 'value' in case_value else 'OTHER')
print(('%sid: %s, node_type: %s, value: %s\n'
% (' ' * level, node['id'], node_type, value)))
for child_node in children[node['id']]:
DisplayTree(child_node, children, level + 1)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
| 29.932143 | 79 | 0.681064 |
from googleads import adwords
ADGROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
class ProductPartitionHelper(object):
def __init__(self, adgroup_id):
self.next_id = -1
self.operations = []
self.adgroup_id = adgroup_id
def CreateSubdivision(self, parent=None, value=None):
division = {
'xsi_type': 'ProductPartition',
'partitionType': 'SUBDIVISION',
'id': str(self.next_id)
}
if parent is not None:
division['parentCriterionId'] = parent['id']
division['caseValue'] = value
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.adgroup_id,
'criterion': division
}
self.CreateAddOperation(adgroup_criterion)
self.next_id -= 1
return division
def CreateUnit(self, parent=None, value=None, bid_amount=None):
unit = {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
}
if parent is not None:
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if bid_amount is not None and bid_amount > 0:
bidding_strategy_configuration = {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'xsi_type': 'Money',
'microAmount': str(bid_amount)
}
}]
}
adgroup_criterion = {
'xsi_type': 'BiddableAdGroupCriterion',
'biddingStrategyConfiguration': bidding_strategy_configuration
}
else:
adgroup_criterion = {
'xsi_type': 'NegativeAdGroupCriterion'
}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit
def GetOperations(self):
return self.operations
def CreateAddOperation(self, criterion):
operation = {
'operator': 'ADD',
'operand': criterion
}
self.operations.append(operation)
def main(client, adgroup_id):
adgroup_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201802')
helper = ProductPartitionHelper(adgroup_id)
root = helper.CreateSubdivision()
new_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'NEW'
}
used_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
'condition': 'USED'
}
other_product_canonical_condition = {
'xsi_type': 'ProductCanonicalCondition',
}
helper.CreateUnit(root, new_product_canonical_condition, 200000)
helper.CreateUnit(root, used_product_canonical_condition, 100000)
other_condition = helper.CreateSubdivision(
root, other_product_canonical_condition)
cool_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CoolBrand'
}
cheap_product_brand = {
'xsi_type': 'ProductBrand',
'value': 'CheapBrand'
}
other_product_brand = {
'xsi_type': 'ProductBrand',
}
helper.CreateUnit(other_condition, cool_product_brand, 900000)
helper.CreateUnit(other_condition, cheap_product_brand, 10000)
other_brand = helper.CreateSubdivision(other_condition, other_product_brand)
luggage_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
'value': '-5914235892932915235'
}
generic_category = {
'xsi_type': 'ProductBiddingCategory',
'type': 'BIDDING_CATEGORY_L1',
}
helper.CreateUnit(other_brand, luggage_category, 750000)
helper.CreateUnit(other_brand, generic_category, 110000)
result = adgroup_criterion_service.mutate(helper.GetOperations())
children = {}
root_node = None
for adgroup_criterion in result['value']:
children[adgroup_criterion['criterion']['id']] = []
if 'parentCriterionId' in adgroup_criterion['criterion']:
children[adgroup_criterion['criterion']['parentCriterionId']].append(
adgroup_criterion['criterion'])
else:
root_node = adgroup_criterion['criterion']
DisplayTree(root_node, children)
def DisplayTree(node, children, level=0):
value = ''
node_type = ''
if 'caseValue' in node:
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if node_type == 'ProductCanonicalCondition':
value = (case_value['condition'] if 'condition' in case_value
else 'OTHER')
elif node_type == 'ProductBiddingCategory':
value = '%s(%s)' % (case_value['type'], case_value['value']
if 'value' in case_value else 'OTHER')
else:
value = (case_value['value'] if 'value' in case_value else 'OTHER')
print(('%sid: %s, node_type: %s, value: %s\n'
% (' ' * level, node['id'], node_type, value)))
for child_node in children[node['id']]:
DisplayTree(child_node, children, level + 1)
if __name__ == '__main__':
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUP_ID)
| true | true |
f71992ce83b2d3db02c5c551a3d398f75815bd4c | 1,118 | py | Python | tests/test_integration.py | vadim2404/pybox | 3c4686245dca3d58afa5b923bcfede2172436bfd | [
"MIT"
] | null | null | null | tests/test_integration.py | vadim2404/pybox | 3c4686245dca3d58afa5b923bcfede2172436bfd | [
"MIT"
] | null | null | null | tests/test_integration.py | vadim2404/pybox | 3c4686245dca3d58afa5b923bcfede2172436bfd | [
"MIT"
] | null | null | null | from pybox.inject import Inject, InjectLazy
from pybox.service import IService, ServiceMode
class SingletonService(IService):
def who_am_i(self):
print(f'Singleton {id(self)}')
class FactoryService(IService):
singleton = Inject(SingletonService)
@classmethod
def service_mode(self):
return ServiceMode.FACTORY
def who_am_i(self):
print(f'Factory {id(self)}')
class A:
singleton1 = Inject(SingletonService)
singleton2 = InjectLazy(SingletonService)
factory1 = Inject(FactoryService)
factory2 = InjectLazy(FactoryService)
def who_am_i(self):
print(f'A {id(self)}')
if __name__ == '__main__':
a = A()
assert a.singleton1 is a.singleton2
assert isinstance(a.singleton1, SingletonService)
assert isinstance(a.factory1, FactoryService)
assert isinstance(a.factory2, FactoryService)
assert a.factory1 is not a.factory2
a.factory1.who_am_i()
a.factory2.who_am_i()
a.singleton1.who_am_i()
a.singleton2.who_am_i()
a.factory1.singleton.who_am_i()
a.factory2.singleton.who_am_i()
a.who_am_i()
| 23.787234 | 53 | 0.701252 | from pybox.inject import Inject, InjectLazy
from pybox.service import IService, ServiceMode
class SingletonService(IService):
def who_am_i(self):
print(f'Singleton {id(self)}')
class FactoryService(IService):
singleton = Inject(SingletonService)
@classmethod
def service_mode(self):
return ServiceMode.FACTORY
def who_am_i(self):
print(f'Factory {id(self)}')
class A:
singleton1 = Inject(SingletonService)
singleton2 = InjectLazy(SingletonService)
factory1 = Inject(FactoryService)
factory2 = InjectLazy(FactoryService)
def who_am_i(self):
print(f'A {id(self)}')
if __name__ == '__main__':
a = A()
assert a.singleton1 is a.singleton2
assert isinstance(a.singleton1, SingletonService)
assert isinstance(a.factory1, FactoryService)
assert isinstance(a.factory2, FactoryService)
assert a.factory1 is not a.factory2
a.factory1.who_am_i()
a.factory2.who_am_i()
a.singleton1.who_am_i()
a.singleton2.who_am_i()
a.factory1.singleton.who_am_i()
a.factory2.singleton.who_am_i()
a.who_am_i()
| true | true |
f719931b5d6abfb3ad9bbf8bcd7dabd34ac4e957 | 1,023 | py | Python | stacked_queue/stack_queue.py | steveflys/data-structures-and-algorithms | 9c89cb24449ca7bc09578408cba3c877fe74e000 | [
"MIT"
] | null | null | null | stacked_queue/stack_queue.py | steveflys/data-structures-and-algorithms | 9c89cb24449ca7bc09578408cba3c877fe74e000 | [
"MIT"
] | 3 | 2018-05-01T18:07:50.000Z | 2018-05-11T16:52:16.000Z | stacked_queue/stack_queue.py | steveflys/data-structures-and-algorithms | 9c89cb24449ca7bc09578408cba3c877fe74e000 | [
"MIT"
] | null | null | null | from .node import Node
from .stack import Stack
class Stack_Queue:
def __init__(self):
self.stack_front = Stack()
self.stack_back = Stack()
self._size = 0
def enqueue(self, val):
"""This will add a node the back of the queue and increment the ._size"""
try:
node = Node(val)
except TypeError:
raise TypeError('Cannot enqueue a value of none')
node._next = self.stack_back.top
self.stack_back.top = node
self._size += 1
return self.stack_back.top
def dequeue(self):
"""remove the node at the front of the queue, decrement the ._size and return the value"""
while self.stack_back.top._next:
self.stack_front.push(self.stack_back.pop())
val = self.stack_back.pop()
while self.stack_front.top._next:
self.stack_back.push(self.stack_front.pop())
self.stack_back.push(self.stack_front.pop())
self._size -= 1
return val
| 25.575 | 98 | 0.605083 | from .node import Node
from .stack import Stack
class Stack_Queue:
def __init__(self):
self.stack_front = Stack()
self.stack_back = Stack()
self._size = 0
def enqueue(self, val):
try:
node = Node(val)
except TypeError:
raise TypeError('Cannot enqueue a value of none')
node._next = self.stack_back.top
self.stack_back.top = node
self._size += 1
return self.stack_back.top
def dequeue(self):
while self.stack_back.top._next:
self.stack_front.push(self.stack_back.pop())
val = self.stack_back.pop()
while self.stack_front.top._next:
self.stack_back.push(self.stack_front.pop())
self.stack_back.push(self.stack_front.pop())
self._size -= 1
return val
| true | true |
f7199346c4d451ef333dfac98139b138cfe947b2 | 1,924 | py | Python | _discord.py | blairg23/discord-scheduler-bot | bd6bcc25b51b50c9eeca195adefe5cfc2eab4923 | [
"MIT"
] | null | null | null | _discord.py | blairg23/discord-scheduler-bot | bd6bcc25b51b50c9eeca195adefe5cfc2eab4923 | [
"MIT"
] | null | null | null | _discord.py | blairg23/discord-scheduler-bot | bd6bcc25b51b50c9eeca195adefe5cfc2eab4923 | [
"MIT"
] | null | null | null | import discord
import asyncio
from datetime import datetime
class Discord:
_instance = None
client = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self):
if self.client is None:
self.client = discord.Client()
if self.client is not None:
print("Discord bot pooling created successfully")
def get_client(self):
return self.client
async def send_message(self, channel, content="", embed=None):
'''
Just a wrapper for sending messages, so I don't have to deal with exceptions inside code
'''
try:
return await self.client.send_message(channel, content=content, embed=embed)
except Exception as e:
pass
#print("ERROR: cmonBruh (send_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
async def get_message(self, channel, id):
'''
Wrapper for getting a message to handle exceptions
'''
msg = None
try:
msg = await self.client.get_message(channel, id)
except Exception as e:
pass
#print("ERROR: SwiftStrike (get_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return msg
async def edit_message(self, message, new_content=None, embed=None):
'''
Wrapper for editing a message to handle exceptions
'''
msg = None
try:
msg = await self.client.edit_message(message, new_content=new_content, embed=embed)
except Exception as e:
pass
#print("ERROR: :rage: (edit_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return msg | 34.357143 | 118 | 0.576403 | import discord
import asyncio
from datetime import datetime
class Discord:
_instance = None
client = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self):
if self.client is None:
self.client = discord.Client()
if self.client is not None:
print("Discord bot pooling created successfully")
def get_client(self):
return self.client
async def send_message(self, channel, content="", embed=None):
try:
return await self.client.send_message(channel, content=content, embed=embed)
except Exception as e:
pass
async def get_message(self, channel, id):
msg = None
try:
msg = await self.client.get_message(channel, id)
except Exception as e:
pass
return msg
async def edit_message(self, message, new_content=None, embed=None):
msg = None
try:
msg = await self.client.edit_message(message, new_content=new_content, embed=embed)
except Exception as e:
pass
return msg | true | true |
f719938d7b8a9a714e3e8d344249a6a2588ede43 | 3,085 | py | Python | src/app/routes.py | taishengG/jama-slack-integration | 746b7186ceaf955ca81e9e0ad4862141ce35eb8d | [
"MIT"
] | null | null | null | src/app/routes.py | taishengG/jama-slack-integration | 746b7186ceaf955ca81e9e0ad4862141ce35eb8d | [
"MIT"
] | null | null | null | src/app/routes.py | taishengG/jama-slack-integration | 746b7186ceaf955ca81e9e0ad4862141ce35eb8d | [
"MIT"
] | null | null | null | import os
import requests
import json
from flask import request, make_response
from app import app
from app import route_handler as rt_handle
"""
This module handles the "intake" of requests to the server.
The requests are then passed off the route_handler.py where arguments
are then parsed and passed off to other packages for the different
functionalities: comment, create, search.
All verification for reqets is made at this level.
Attributes:
base_url (String): Module level variable pulls in environment
variable (JAMA_URL). which is the url of the specified Jama
instance.
url_rule (String): Variable uses environment variable which stands
for the main/base url slug.
Example: URL_RULE="/jama"
"""
base_url = os.environ['JAMA_URL']
url_rule = os.environ['URL_RULE']
@app.route(url_rule + "/dialog", methods=['GET', 'PUT', 'POST'])
def jama_dialog():
"""API intake for dialog submissions from Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
print("DIALOG")
try:
submit_payload = json.loads(request.form['payload'])
return rt_handle.resolve_dialog_submit(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/menu', methods=['GET', 'PUT', 'POST'])
def jama_menu():
"""API intake to pass off dynamic dialog data to Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
print("MENU")
try:
submit_payload = json.loads(request.form["payload"])
return rt_handle.resolve_menu_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/bot', methods=['GET', 'PUT', 'POST'])
def jama_bot():
"""API intake to pass off slackbot data to Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
print("BOT")
try:
submit_payload = request.get_json()
return rt_handle.resolve_bot_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule, methods=['GET', 'PUT', 'POST'])
def jama():
"""API intake to pass off dynamic dialog data to Slack.
Passes json payload off to route_handler, otherwise an error is
thrown.
Args:
None
Returns:
Response Class object
"""
if not rt_handle.verify_req(request):
return make_response("", 401)
return rt_handle.resolve_jama_req(base_url, request)
| 24.879032 | 72 | 0.666775 | import os
import requests
import json
from flask import request, make_response
from app import app
from app import route_handler as rt_handle
base_url = os.environ['JAMA_URL']
url_rule = os.environ['URL_RULE']
@app.route(url_rule + "/dialog", methods=['GET', 'PUT', 'POST'])
def jama_dialog():
if not rt_handle.verify_req(request):
return make_response("", 401)
print("DIALOG")
try:
submit_payload = json.loads(request.form['payload'])
return rt_handle.resolve_dialog_submit(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/menu', methods=['GET', 'PUT', 'POST'])
def jama_menu():
if not rt_handle.verify_req(request):
return make_response("", 401)
print("MENU")
try:
submit_payload = json.loads(request.form["payload"])
return rt_handle.resolve_menu_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule + '/bot', methods=['GET', 'PUT', 'POST'])
def jama_bot():
if not rt_handle.verify_req(request):
return make_response("", 401)
print("BOT")
try:
submit_payload = request.get_json()
return rt_handle.resolve_bot_req(base_url, submit_payload)
except Exception as err:
print(err)
return make_response("", 500)
@app.route(url_rule, methods=['GET', 'PUT', 'POST'])
def jama():
if not rt_handle.verify_req(request):
return make_response("", 401)
return rt_handle.resolve_jama_req(base_url, request)
| true | true |
f719944e384288656e4d709f07457f69d21c6a92 | 1,473 | pyw | Python | Tkinter/tk5.pyw | Jav10/Python | b419a86825313b8ee537757079c95f3097f4dbad | [
"MIT"
] | null | null | null | Tkinter/tk5.pyw | Jav10/Python | b419a86825313b8ee537757079c95f3097f4dbad | [
"MIT"
] | null | null | null | Tkinter/tk5.pyw | Jav10/Python | b419a86825313b8ee537757079c95f3097f4dbad | [
"MIT"
] | null | null | null | #GUI con TKinter
#Autor: Javier Arturo Hernández Sosa
#Fecha: 20/Sep/2017
#Descripcion: Curso Python FES Acatlán
from tkinter import *
#Definición de funciones
def suma():
r.set(x.get() + y.get())
def multi():
r.set(x.get() * y.get())
def resta():
r.set(x.get() - y.get())
def dividir():
r.set(x.get() / y.get())
#Ventana raíz
root = Tk()
#Configuración raíz
root.geometry("300x300")
root.title("Botones y funciones")
root.config(bd=15)
#variables para widgets
x = DoubleVar()
y = DoubleVar()
r = StringVar()
#Entradas y resultado
numero1 = Entry(root,textvariable=x, justify="center")
numero2 =Entry(root,textvariable=y, justify="center")
resultado = Entry(root, textvariable=r, justify="center", state="disabled") #stated para bloquear el widget
#Empaquetado
numero1.grid(row=0,column=0,padx=5,pady=5)
numero2.grid(row=0,column=1,padx=5,pady=5)
resultado.grid(row=3,column=0, columnspan=2,padx=5,pady=5) #Expandir columnas
#Botones
sumar = Button(root, text="Sumar", command=suma) #botones, command para pasar funcion
sumar.grid(row=1,column=0,padx=5,pady=5)
multiplicar = Button(root, text="Multiplicar", command=multi)
multiplicar.grid(row=1,column=1,padx=5,pady=5)
restar = Button(root, text="Restar", command=resta)
restar.grid(row=2,column=0,padx=5,pady=5)
dividir = Button(root, text="Dividir", command=dividir)
dividir.grid(row=2,column=1,padx=5,pady=5)
#loop principal
root.mainloop()
| 25.396552 | 108 | 0.696538 |
from tkinter import *
def suma():
r.set(x.get() + y.get())
def multi():
r.set(x.get() * y.get())
def resta():
r.set(x.get() - y.get())
def dividir():
r.set(x.get() / y.get())
root = Tk()
root.geometry("300x300")
root.title("Botones y funciones")
root.config(bd=15)
x = DoubleVar()
y = DoubleVar()
r = StringVar()
numero1 = Entry(root,textvariable=x, justify="center")
numero2 =Entry(root,textvariable=y, justify="center")
resultado = Entry(root, textvariable=r, justify="center", state="disabled")
numero1.grid(row=0,column=0,padx=5,pady=5)
numero2.grid(row=0,column=1,padx=5,pady=5)
resultado.grid(row=3,column=0, columnspan=2,padx=5,pady=5)
sumar = Button(root, text="Sumar", command=suma)
sumar.grid(row=1,column=0,padx=5,pady=5)
multiplicar = Button(root, text="Multiplicar", command=multi)
multiplicar.grid(row=1,column=1,padx=5,pady=5)
restar = Button(root, text="Restar", command=resta)
restar.grid(row=2,column=0,padx=5,pady=5)
dividir = Button(root, text="Dividir", command=dividir)
dividir.grid(row=2,column=1,padx=5,pady=5)
root.mainloop()
| true | true |
f719946d0d254ecdc9ccfe5fb6f0233c8c62eb2a | 1,485 | py | Python | src/data/dataset.py | zmcx16/ReclassifyAnimeCG | f5f95b229447564502564d9ffc7edf6215fec83d | [
"MIT"
] | 3 | 2021-10-30T10:13:40.000Z | 2021-12-12T10:26:14.000Z | src/data/dataset.py | zmcx16/ReclassifyAnimeCG | f5f95b229447564502564d9ffc7edf6215fec83d | [
"MIT"
] | null | null | null | src/data/dataset.py | zmcx16/ReclassifyAnimeCG | f5f95b229447564502564d9ffc7edf6215fec83d | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from data import get_train_transform, get_test_transform
class CustomDataset(Dataset):
img_aug = True
imgs = []
transform = None
def __init__(self, label_file, image_set, input_size):
with open(label_file, 'r', encoding="utf-8") as f:
self.imgs = list(map(lambda line: line.strip().split('|'), f))
if image_set == 'train':
self.transform = get_train_transform(size=input_size)
else:
self.transform = get_test_transform(size=input_size)
self.input_size = input_size
def __getitem__(self, index):
# print(self.imgs)
# print(index)
# print(len(self.imgs[index]))
img_path, label = self.imgs[index]
# print(img_path)
img = Image.open(img_path).convert('RGB')
if self.img_aug:
img = self.transform(img)
else:
img = np.array(img)
img = torch.from_numpy(img)
return img, torch.from_numpy(np.array(int(label)))
def __len__(self):
return len(self.imgs)
def get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):
_dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)
_dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
return _dataset, _dataloader
| 30.9375 | 90 | 0.658586 | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from data import get_train_transform, get_test_transform
class CustomDataset(Dataset):
img_aug = True
imgs = []
transform = None
def __init__(self, label_file, image_set, input_size):
with open(label_file, 'r', encoding="utf-8") as f:
self.imgs = list(map(lambda line: line.strip().split('|'), f))
if image_set == 'train':
self.transform = get_train_transform(size=input_size)
else:
self.transform = get_test_transform(size=input_size)
self.input_size = input_size
def __getitem__(self, index):
img_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
if self.img_aug:
img = self.transform(img)
else:
img = np.array(img)
img = torch.from_numpy(img)
return img, torch.from_numpy(np.array(int(label)))
def __len__(self):
return len(self.imgs)
def get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):
_dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)
_dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
return _dataset, _dataloader
| true | true |
f71994d1600fc241664b82c32779973864dfe5a1 | 337 | py | Python | AHtask2.py | Irinakene/AHtask | 6f776477c6867b8f7650394aac1c3292bced8ca9 | [
"MIT"
] | null | null | null | AHtask2.py | Irinakene/AHtask | 6f776477c6867b8f7650394aac1c3292bced8ca9 | [
"MIT"
] | null | null | null | AHtask2.py | Irinakene/AHtask | 6f776477c6867b8f7650394aac1c3292bced8ca9 | [
"MIT"
] | null | null | null | import csv
name = input('Enter your name: ')
email = input('Enter your email: ')
phone = input('Enter your phone: ')
githublink = input('Enter your githublink: ')
save = input('Save to CSV? ')
if save == 'yes':
file = open('results.csv', 'a')
csv_writer = csv.writer(file)
csv_writer.writerow([name, githublink, email, phone]) | 22.466667 | 54 | 0.664688 | import csv
name = input('Enter your name: ')
email = input('Enter your email: ')
phone = input('Enter your phone: ')
githublink = input('Enter your githublink: ')
save = input('Save to CSV? ')
if save == 'yes':
file = open('results.csv', 'a')
csv_writer = csv.writer(file)
csv_writer.writerow([name, githublink, email, phone]) | true | true |
f71997563231cf56306173544d65e6f6f5c14345 | 27,571 | py | Python | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | null | null | null | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 1 | 2021-05-31T08:56:01.000Z | 2021-05-31T08:56:01.000Z | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from ._shared import KeyVaultClientBase
from ._shared.exceptions import error_map as _error_map
from ._shared._polling import DeleteRecoverPollingMethod, KeyVaultOperationPoller
from ._models import KeyVaultKey, KeyProperties, DeletedKey
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint:disable=unused-import
from typing import Any, Optional, Union
from azure.core.paging import ItemPaged
from ._models import JsonWebKey
class KeyClient(KeyVaultClientBase):
"""A high-level interface for managing a vault's keys.
:param str vault_url: URL of the vault the client will access. This is also called the vault's "DNS Name".
:param credential: An object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity`
:keyword api_version: version of the Key Vault API to use. Defaults to the most recent.
:paramtype api_version: ~azure.keyvault.keys.ApiVersion
:keyword transport: transport to use. Defaults to :class:`~azure.core.pipeline.transport.RequestsTransport`.
:paramtype transport: ~azure.core.pipeline.transport.HttpTransport
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key_client]
:end-before: [END create_key_client]
:language: python
:caption: Create a new ``KeyClient``
:dedent: 4
"""
# pylint:disable=protected-access
@distributed_trace
def create_key(self, name, key_type, **kwargs):
# type: (str, Union[str, azure.keyvault.keys.KeyType], **Any) -> KeyVaultKey
"""Create a key or, if `name` is already in use, create a new version of the key.
Requires keys/create permission.
:param str name: The name of the new key.
:param key_type: The type of key to create
:type key_type: ~azure.keyvault.keys.KeyType or str
:keyword int size: Key size in bits. Applies only to RSA and symmetric keys. Consider using
:func:`create_rsa_key` or :func:`create_oct_key` instead.
:keyword curve: Elliptic curve name. Applies only to elliptic curve keys. Defaults to the NIST P-256
elliptic curve. To create an elliptic curve key, consider using :func:`create_ec_key` instead.
:paramtype curve: ~azure.keyvault.keys.KeyCurveName or str
:keyword int public_exponent: The RSA public exponent to use. Applies only to RSA keys created in a Managed HSM.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key]
:end-before: [END create_key]
:language: python
:caption: Create a key
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyCreateParameters(
kty=key_type,
key_size=kwargs.pop("size", None),
key_attributes=attributes,
key_ops=kwargs.pop("key_operations", None),
tags=kwargs.pop("tags", None),
curve=kwargs.pop("curve", None),
public_exponent=kwargs.pop("public_exponent", None)
)
bundle = self._client.create_key(
vault_base_url=self.vault_url,
key_name=name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def create_rsa_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new RSA key or, if `name` is already in use, create a new version of the key
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword int size: Key size in bits, for example 2048, 3072, or 4096.
:keyword int public_exponent: The RSA public exponent to use. Applies only to RSA keys created in a Managed HSM.
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_rsa_key]
:end-before: [END create_rsa_key]
:language: python
:caption: Create RSA key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="RSA-HSM" if hsm else "RSA", **kwargs)
@distributed_trace
def create_ec_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new elliptic curve key or, if `name` is already in use, create a new version of the key.
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword curve: Elliptic curve name. Defaults to the NIST P-256 elliptic curve.
:paramtype curve: ~azure.keyvault.keys.KeyCurveName or str
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_ec_key]
:end-before: [END create_ec_key]
:language: python
:caption: Create an elliptic curve key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="EC-HSM" if hsm else "EC", **kwargs)
@distributed_trace
def create_oct_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new octet sequence (symmetric) key or, if `name` is already in use, create a new version of the key.
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword int size: Key size in bits, for example 128, 192, or 256.
:keyword key_operations: Allowed key operations.
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_oct_key]
:end-before: [END create_oct_key]
:language: python
:caption: Create an octet sequence (symmetric) key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="oct-HSM" if hsm else "oct", **kwargs)
@distributed_trace
def begin_delete_key(self, name, **kwargs):
# type: (str, **Any) -> DeletedKey
"""Delete all versions of a key and its cryptographic material. Requires keys/delete permission.
When this method returns Key Vault has begun deleting the key. Deletion may take several seconds in a vault
with soft-delete enabled. This method therefore returns a poller enabling you to wait for deletion to complete.
:param str name: The name of the key to delete.
:returns: A poller for the delete key operation. The poller's `result` method returns the
:class:`~azure.keyvault.keys.DeletedKey` without waiting for deletion to complete. If the vault has
soft-delete enabled and you want to permanently delete the key with :func:`purge_deleted_key`, call the
poller's `wait` method first. It will block until the deletion is complete. The `wait` method requires
keys/get permission.
:rtype: ~azure.core.polling.LROPoller[~azure.keyvault.keys.DeletedKey]
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START delete_key]
:end-before: [END delete_key]
:language: python
:caption: Delete a key
:dedent: 8
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
deleted_key = DeletedKey._from_deleted_key_bundle(
self._client.delete_key(self.vault_url, name, error_map=_error_map, **kwargs)
)
command = partial(self.get_deleted_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
# no recovery ID means soft-delete is disabled, in which case we initialize the poller as finished
finished=deleted_key.recovery_id is None,
command=command,
final_resource=deleted_key,
interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def get_key(self, name, version=None, **kwargs):
# type: (str, Optional[str], **Any) -> KeyVaultKey
"""Get a key's attributes and, if it's an asymmetric key, its public material. Requires keys/get permission.
:param str name: The name of the key to get.
:param str version: (optional) A specific version of the key to get. If not specified, gets the latest version
of the key.
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_key]
:end-before: [END get_key]
:language: python
:caption: Get a key
:dedent: 8
"""
bundle = self._client.get_key(self.vault_url, name, key_version=version or "", error_map=_error_map, **kwargs)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def get_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> DeletedKey
"""Get a deleted key. Possible only in a vault with soft-delete enabled. Requires keys/get permission.
:param str name: The name of the key
:returns: The deleted key
:rtype: ~azure.keyvault.keys.DeletedKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_deleted_key]
:end-before: [END get_deleted_key]
:language: python
:caption: Get a deleted key
:dedent: 8
"""
bundle = self._client.get_deleted_key(self.vault_url, name, error_map=_error_map, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
@distributed_trace
def list_deleted_keys(self, **kwargs):
# type: (**Any) -> ItemPaged[DeletedKey]
"""List all deleted keys, including the public part of each. Possible only in a vault with soft-delete enabled.
Requires keys/list permission.
:returns: An iterator of deleted keys
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.DeletedKey]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_deleted_keys]
:end-before: [END list_deleted_keys]
:language: python
:caption: List all the deleted keys
:dedent: 8
"""
return self._client.get_deleted_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [DeletedKey._from_deleted_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_keys(self, **kwargs):
# type: (**Any) -> ItemPaged[KeyProperties]
"""List identifiers and properties of all keys in the vault. Requires keys/list permission.
:returns: An iterator of keys without their cryptographic material or version information
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.KeyProperties]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_keys]
:end-before: [END list_keys]
:language: python
:caption: List all keys
:dedent: 8
"""
return self._client.get_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_key_versions(self, name, **kwargs):
# type: (str, **Any) -> ItemPaged[KeyProperties]
"""List the identifiers and properties of a key's versions. Requires keys/list permission.
:param str name: The name of the key
:returns: An iterator of keys without their cryptographic material
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.KeyProperties]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_properties_of_key_versions]
:end-before: [END list_properties_of_key_versions]
:language: python
:caption: List all versions of a key
:dedent: 8
"""
return self._client.get_key_versions(
self._vault_url,
name,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def purge_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> None
"""Permanently deletes a deleted key. Only possible in a vault with soft-delete enabled.
Performs an irreversible deletion of the specified key, without
possibility for recovery. The operation is not available if the
:py:attr:`~azure.keyvault.keys.KeyProperties.recovery_level` does not specify 'Purgeable'.
This method is only necessary for purging a key before its
:py:attr:`~azure.keyvault.keys.DeletedKey.scheduled_purge_date`.
Requires keys/purge permission.
:param str name: The name of the deleted key to purge
:returns: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. code-block:: python
# if the vault has soft-delete enabled, purge permanently deletes a deleted key
# (with soft-delete disabled, begin_delete_key is permanent)
key_client.purge_deleted_key("key-name")
"""
self._client.purge_deleted_key(vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs)
@distributed_trace
def begin_recover_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Recover a deleted key to its latest version. Possible only in a vault with soft-delete enabled.
Requires keys/recover permission.
When this method returns Key Vault has begun recovering the key. Recovery may take several seconds. This
method therefore returns a poller enabling you to wait for recovery to complete. Waiting is only necessary when
you want to use the recovered key in another operation immediately.
:param str name: The name of the deleted key to recover
:returns: A poller for the recovery operation. The poller's `result` method returns the recovered
:class:`~azure.keyvault.keys.KeyVaultKey` without waiting for recovery to complete. If you want to use the
recovered key immediately, call the poller's `wait` method, which blocks until the key is ready to use. The
`wait` method requires keys/get permission.
:rtype: ~azure.core.polling.LROPoller[~azure.keyvault.keys.KeyVaultKey]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START recover_deleted_key]
:end-before: [END recover_deleted_key]
:language: python
:caption: Recover a deleted key
:dedent: 8
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
recovered_key = KeyVaultKey._from_key_bundle(
self._client.recover_deleted_key(
vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs
)
)
command = partial(self.get_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=False, command=command, final_resource=recovered_key, interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def update_key_properties(self, name, version=None, **kwargs):
# type: (str, Optional[str], **Any) -> KeyVaultKey
"""Change a key's properties (not its cryptographic material). Requires keys/update permission.
:param str name: The name of key to update
:param str version: (optional) The version of the key to update. If unspecified, the latest version is updated.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The updated key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START update_key]
:end-before: [END update_key]
:language: python
:caption: Update a key's attributes
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyUpdateParameters(
key_ops=kwargs.pop("key_operations", None),
key_attributes=attributes,
tags=kwargs.pop("tags", None)
)
bundle = self._client.update_key(
self.vault_url,
name,
key_version=version or "",
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def backup_key(self, name, **kwargs):
# type: (str, **Any) -> bytes
"""Back up a key in a protected form useable only by Azure Key Vault. Requires keys/backup permission.
This is intended to allow copying a key from one vault to another. Both vaults must be owned by the same Azure
subscription. Also, backup / restore cannot be performed across geopolitical boundaries. For example, a backup
from a vault in a USA region cannot be restored to a vault in an EU region.
:param str name: The name of the key to back up
:rtype: bytes
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START backup_key]
:end-before: [END backup_key]
:language: python
:caption: Get a key backup
:dedent: 8
"""
backup_result = self._client.backup_key(self.vault_url, name, error_map=_error_map, **kwargs)
return backup_result.value
@distributed_trace
def restore_key_backup(self, backup, **kwargs):
# type: (bytes, **Any) -> KeyVaultKey
"""Restore a key backup to the vault. Requires keys/restore permission.
This imports all versions of the key, with its name, attributes, and access control policies. If the key's name
is already in use, restoring it will fail. Also, the target vault must be owned by the same Microsoft Azure
subscription as the source vault.
:param bytes backup: A key backup as returned by :func:`backup_key`
:returns: The restored key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceExistsError` if the backed up key's name is already in use,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START restore_key_backup]
:end-before: [END restore_key_backup]
:language: python
:caption: Restore a key backup
:dedent: 8
"""
bundle = self._client.restore_key(
self.vault_url,
parameters=self._models.KeyRestoreParameters(key_bundle_backup=backup),
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def import_key(self, name, key, **kwargs):
# type: (str, JsonWebKey, **Any) -> KeyVaultKey
"""Import a key created externally. Requires keys/import permission.
If `name` is already in use, the key will be imported as a new version.
:param str name: Name for the imported key
:param key: The JSON web key to import
:type key: ~azure.keyvault.keys.JsonWebKey
:keyword bool hardware_protected: Whether the key should be backed by a hardware security module
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The imported key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyImportParameters(
key=key._to_generated_model(),
key_attributes=attributes,
hsm=kwargs.pop("hardware_protected", None),
tags=kwargs.pop("tags", None)
)
bundle = self._client.import_key(
self.vault_url,
name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
| 45.875208 | 120 | 0.641507 |
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from ._shared import KeyVaultClientBase
from ._shared.exceptions import error_map as _error_map
from ._shared._polling import DeleteRecoverPollingMethod, KeyVaultOperationPoller
from ._models import KeyVaultKey, KeyProperties, DeletedKey
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Optional, Union
from azure.core.paging import ItemPaged
from ._models import JsonWebKey
class KeyClient(KeyVaultClientBase):
@distributed_trace
def create_key(self, name, key_type, **kwargs):
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyCreateParameters(
kty=key_type,
key_size=kwargs.pop("size", None),
key_attributes=attributes,
key_ops=kwargs.pop("key_operations", None),
tags=kwargs.pop("tags", None),
curve=kwargs.pop("curve", None),
public_exponent=kwargs.pop("public_exponent", None)
)
bundle = self._client.create_key(
vault_base_url=self.vault_url,
key_name=name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def create_rsa_key(self, name, **kwargs):
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="RSA-HSM" if hsm else "RSA", **kwargs)
@distributed_trace
def create_ec_key(self, name, **kwargs):
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="EC-HSM" if hsm else "EC", **kwargs)
@distributed_trace
def create_oct_key(self, name, **kwargs):
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="oct-HSM" if hsm else "oct", **kwargs)
@distributed_trace
def begin_delete_key(self, name, **kwargs):
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
deleted_key = DeletedKey._from_deleted_key_bundle(
self._client.delete_key(self.vault_url, name, error_map=_error_map, **kwargs)
)
command = partial(self.get_deleted_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=deleted_key.recovery_id is None,
command=command,
final_resource=deleted_key,
interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def get_key(self, name, version=None, **kwargs):
bundle = self._client.get_key(self.vault_url, name, key_version=version or "", error_map=_error_map, **kwargs)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def get_deleted_key(self, name, **kwargs):
bundle = self._client.get_deleted_key(self.vault_url, name, error_map=_error_map, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
@distributed_trace
def list_deleted_keys(self, **kwargs):
return self._client.get_deleted_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [DeletedKey._from_deleted_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_keys(self, **kwargs):
return self._client.get_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_key_versions(self, name, **kwargs):
return self._client.get_key_versions(
self._vault_url,
name,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def purge_deleted_key(self, name, **kwargs):
self._client.purge_deleted_key(vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs)
@distributed_trace
def begin_recover_deleted_key(self, name, **kwargs):
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
recovered_key = KeyVaultKey._from_key_bundle(
self._client.recover_deleted_key(
vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs
)
)
command = partial(self.get_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=False, command=command, final_resource=recovered_key, interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def update_key_properties(self, name, version=None, **kwargs):
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyUpdateParameters(
key_ops=kwargs.pop("key_operations", None),
key_attributes=attributes,
tags=kwargs.pop("tags", None)
)
bundle = self._client.update_key(
self.vault_url,
name,
key_version=version or "",
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def backup_key(self, name, **kwargs):
backup_result = self._client.backup_key(self.vault_url, name, error_map=_error_map, **kwargs)
return backup_result.value
@distributed_trace
def restore_key_backup(self, backup, **kwargs):
bundle = self._client.restore_key(
self.vault_url,
parameters=self._models.KeyRestoreParameters(key_bundle_backup=backup),
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def import_key(self, name, key, **kwargs):
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyImportParameters(
key=key._to_generated_model(),
key_attributes=attributes,
hsm=kwargs.pop("hardware_protected", None),
tags=kwargs.pop("tags", None)
)
bundle = self._client.import_key(
self.vault_url,
name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
| true | true |
f7199764cac1f3e56cc1b5f43ff6f14fb40c8601 | 3,487 | py | Python | tests/test_cookies.py | tripsolutions/pyramid_jwt | 320ed080216971467ae5e12b1f9888b50a9a29b7 | [
"BSD-2-Clause"
] | null | null | null | tests/test_cookies.py | tripsolutions/pyramid_jwt | 320ed080216971467ae5e12b1f9888b50a9a29b7 | [
"BSD-2-Clause"
] | null | null | null | tests/test_cookies.py | tripsolutions/pyramid_jwt | 320ed080216971467ae5e12b1f9888b50a9a29b7 | [
"BSD-2-Clause"
] | null | null | null | import uuid
import pytest
from pyramid.interfaces import IAuthenticationPolicy
from webob import Request
from zope.interface.verify import verifyObject
from pyramid_jwt.policy import JWTCookieAuthenticationPolicy
@pytest.fixture(scope="module")
def principal():
return str(uuid.uuid4())
def test_interface():
verifyObject(IAuthenticationPolicy, JWTCookieAuthenticationPolicy("secret"))
def test_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret")
token = policy.create_token(principal)
cookie = policy.remember(dummy_request, token).pop()
assert len(cookie) == 2
header, cookie = cookie
assert header == "Set-Cookie"
assert len(cookie) > 0
def test_cookie_name(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth")
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
assert name == "auth"
def test_secure_cookie():
policy = JWTCookieAuthenticationPolicy("secret", https_only=True)
dummy_request = Request.blank("/")
token = policy.create_token(str(uuid.uuid4()))
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" in cookie
assert "; HttpOnly" in cookie
def test_insecure_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" not in cookie
assert "; HttpOnly" in cookie
def test_cookie_decode(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims["sub"] == principal
def test_invalid_cookie_reissue(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False, reissue_time=10)
token = "invalid value"
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert not claims
def test_cookie_max_age(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=100)
_, cookie = policy.remember(dummy_request, principal).pop()
_, value = cookie.split("=", 1)
_, meta = value.split(";", 1)
assert "Max-Age=100" in meta
assert "expires" in meta
@pytest.mark.freeze_time
def test_expired_token(principal, freezer):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=1)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
freezer.tick(delta=2)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims == {}
| 29.058333 | 88 | 0.706051 | import uuid
import pytest
from pyramid.interfaces import IAuthenticationPolicy
from webob import Request
from zope.interface.verify import verifyObject
from pyramid_jwt.policy import JWTCookieAuthenticationPolicy
@pytest.fixture(scope="module")
def principal():
return str(uuid.uuid4())
def test_interface():
verifyObject(IAuthenticationPolicy, JWTCookieAuthenticationPolicy("secret"))
def test_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret")
token = policy.create_token(principal)
cookie = policy.remember(dummy_request, token).pop()
assert len(cookie) == 2
header, cookie = cookie
assert header == "Set-Cookie"
assert len(cookie) > 0
def test_cookie_name(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth")
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
assert name == "auth"
def test_secure_cookie():
policy = JWTCookieAuthenticationPolicy("secret", https_only=True)
dummy_request = Request.blank("/")
token = policy.create_token(str(uuid.uuid4()))
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" in cookie
assert "; HttpOnly" in cookie
def test_insecure_cookie(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
assert "; secure;" not in cookie
assert "; HttpOnly" in cookie
def test_cookie_decode(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False)
token = policy.create_token(principal)
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims["sub"] == principal
def test_invalid_cookie_reissue(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", https_only=False, reissue_time=10)
token = "invalid value"
header, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert not claims
def test_cookie_max_age(principal):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=100)
_, cookie = policy.remember(dummy_request, principal).pop()
_, value = cookie.split("=", 1)
_, meta = value.split(";", 1)
assert "Max-Age=100" in meta
assert "expires" in meta
@pytest.mark.freeze_time
def test_expired_token(principal, freezer):
dummy_request = Request.blank("/")
policy = JWTCookieAuthenticationPolicy("secret", cookie_name="auth", expiration=1)
token = policy.create_token(principal)
_, cookie = policy.remember(dummy_request, token).pop()
name, value = cookie.split("=", 1)
freezer.tick(delta=2)
value, _ = value.split(";", 1)
dummy_request.cookies = {name: value}
claims = policy.get_claims(dummy_request)
assert claims == {}
| true | true |
f719982c32746d402b0277ba15a13000bcc77119 | 94 | py | Python | my_classes/.history/ModulesPackages_PackageNamespaces/example3b/main_20210726185941.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/example3b/main_20210726185941.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/example3b/main_20210726185941.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null |
import sys
import importer
module1 = importer.import_('module1', 'module1_source.py', '.')
| 13.428571 | 63 | 0.723404 |
import sys
import importer
module1 = importer.import_('module1', 'module1_source.py', '.')
| true | true |
f719994b12c769c14062f52ec104eb9f369ef914 | 757 | py | Python | Exercicios Loop/exercicio 35 - secao 06.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | Exercicios Loop/exercicio 35 - secao 06.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | Exercicios Loop/exercicio 35 - secao 06.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | """
35 - Faça um programa que some os números impares contidos em um intervalo definido pelo usuário.
O usuário define o valor inicial do intervalo e o valor final deste intervalo e o programa deve
somar todos os números ímpares contidos neste intervalo (começando por um valor maior que o valor final)
deve ser escrito uma mensagem de erro na tela, "Intervalo de valores inválido" e o programa termina.
Exemplo de tela de saída:
Digite o valor inicial e valor final: 5 10
Soma dos ímpares neste intervalo: 21
"""
impar = 0
inicial, final = [int(x) for x in input("Digite o valor inicial e valor final: ").split()]
for i in list(range(inicial, final)):
if i % 2 != 0:
impar = impar + i
print(f'A soma dos ímpares neste intervalo é {impar}')
| 42.055556 | 104 | 0.73712 | impar = 0
inicial, final = [int(x) for x in input("Digite o valor inicial e valor final: ").split()]
for i in list(range(inicial, final)):
if i % 2 != 0:
impar = impar + i
print(f'A soma dos ímpares neste intervalo é {impar}')
| true | true |
f71999d547a46a0a1493f4a1de55c28d65419f04 | 421 | py | Python | strava/cli/activity/commands.py | dparret/strava-cli | 2426ea7f3fe4580aea352476b261cec31d3f0b11 | [
"MIT"
] | null | null | null | strava/cli/activity/commands.py | dparret/strava-cli | 2426ea7f3fe4580aea352476b261cec31d3f0b11 | [
"MIT"
] | null | null | null | strava/cli/activity/commands.py | dparret/strava-cli | 2426ea7f3fe4580aea352476b261cec31d3f0b11 | [
"MIT"
] | null | null | null | import click
from strava.commands import get_activity, get_constrain_activity, get_weekly_activity, get_lap_activity
@click.group(name='activity', help='[GROUP] Get the summary of one or multiple activities.')
def cli_activity():
pass
cli_activity.add_command(get_activity)
cli_activity.add_command(get_constrain_activity)
cli_activity.add_command(get_weekly_activity)
cli_activity.add_command(get_lap_activity)
| 28.066667 | 103 | 0.83848 | import click
from strava.commands import get_activity, get_constrain_activity, get_weekly_activity, get_lap_activity
@click.group(name='activity', help='[GROUP] Get the summary of one or multiple activities.')
def cli_activity():
pass
cli_activity.add_command(get_activity)
cli_activity.add_command(get_constrain_activity)
cli_activity.add_command(get_weekly_activity)
cli_activity.add_command(get_lap_activity)
| true | true |
f7199aebd95eaaf673576198d3754ac18ebe3786 | 4,928 | py | Python | 3.Netdata_package/zipcontents/bin/netdata/usr/libexec/netdata/python.d/cpuidle.chart.py | NordicID/ar8x_samples | 2ac78750d6f4ff924628d1e225990f4bfcecfda0 | [
"MIT"
] | 4 | 2017-10-17T13:28:28.000Z | 2020-12-23T09:46:10.000Z | 3.Netdata_package/zipcontents/bin/netdata/usr/libexec/netdata/python.d/cpuidle.chart.py | NordicID/ar8x_samples | 2ac78750d6f4ff924628d1e225990f4bfcecfda0 | [
"MIT"
] | 8 | 2019-02-09T15:29:12.000Z | 2021-03-15T17:45:49.000Z | 3.Netdata_package/zipcontents/bin/netdata/usr/libexec/netdata/python.d/cpuidle.chart.py | NordicID/ar8x_samples | 2ac78750d6f4ff924628d1e225990f4bfcecfda0 | [
"MIT"
] | 3 | 2018-05-24T16:27:43.000Z | 2019-08-04T23:39:22.000Z | # -*- coding: utf-8 -*-
# Description: cpuidle netdata python.d module
# Author: Steven Noonan (tycho)
import glob
import os
import platform
import time
from base import SimpleService
import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
# default module values (can be overridden per job in `config`)
# update_every = 2
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
prefix = os.getenv('NETDATA_HOST_PREFIX', "")
if prefix.endswith('/'):
prefix = prefix[:-1]
self.sys_dir = prefix + "/sys/devices/system/cpu"
self.schedstat_path = prefix + "/proc/schedstat"
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = []
self.definitions = {}
self._orig_name = ""
self.assignment = {}
def __gettid(self):
# This is horrendous. We need the *thread id* (not the *process id*),
# but there's no Python standard library way of doing that. If you need
# to enable this module on a non-x86 machine type, you'll have to find
# the Linux syscall number for gettid() and add it to the dictionary
# below.
syscalls = {
'i386': 224,
'x86_64': 186,
}
if platform.machine() not in syscalls:
return None
tid = syscall(syscalls[platform.machine()])
return tid
def __wake_cpus(self):
# Requires Python 3.3+. This will "tickle" each CPU to force it to
# update its idle counters.
if hasattr(os, 'sched_setaffinity'):
pid = self.__gettid()
save_affinity = os.sched_getaffinity(pid)
for idx in range(0, len(self.assignment)):
os.sched_setaffinity(pid, [idx])
os.sched_getaffinity(pid)
os.sched_setaffinity(pid, save_affinity)
def __read_schedstat(self):
cpus = {}
for line in open(self.schedstat_path, 'r'):
if not line.startswith('cpu'):
continue
line = line.rstrip().split()
cpu = line[0]
active_time = line[7]
cpus[cpu] = int(active_time) // 1000
return cpus
def _get_data(self):
results = {}
# This line is critical for the stats to update. If we don't "tickle"
# all the CPUs, then all the counters stop counting.
self.__wake_cpus()
# Use the kernel scheduler stats to determine how much time was spent
# in C0 (active).
schedstat = self.__read_schedstat()
for cpu, metrics in self.assignment.items():
update_time = schedstat[cpu]
results[cpu + '_active_time'] = update_time
for metric, path in metrics.items():
residency = int(open(path, 'r').read())
results[metric] = residency
return results
def check(self):
if self.__gettid() is None:
self.error("Cannot get thread ID. Stats would be completely broken.")
return False
self._orig_name = self.chart_name
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
# ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
path_elem = path.split('/')
cpu = path_elem[-4]
state = path_elem[-2]
statename = open(path, 'rt').read().rstrip()
orderid = '%s_cpuidle' % (cpu,)
if orderid not in self.definitions:
self.order.append(orderid)
active_name = '%s_active_time' % (cpu,)
self.definitions[orderid] = {
'options': [None, 'C-state residency', 'time%', 'cpuidle', None, 'stacked'],
'lines': [
[active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
],
}
self.assignment[cpu] = {}
defid = '%s_%s_time' % (orderid, state)
self.definitions[orderid]['lines'].append(
[defid, statename, 'percentage-of-incremental-row', 1, 1]
)
self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
if len(self.definitions) == 0:
self.error("couldn't find cstate stats")
return False
return True
def create(self):
self.chart_name = "cpu"
status = SimpleService.create(self)
self.chart_name = self._orig_name
return status
def update(self, interval):
self.chart_name = "cpu"
status = SimpleService.update(self, interval=interval)
self.chart_name = self._orig_name
return status
# vim: set ts=4 sts=4 sw=4 et:
| 34.222222 | 96 | 0.565544 |
import glob
import os
import platform
import time
from base import SimpleService
import ctypes
syscall = ctypes.CDLL('libc.so.6').syscall
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
prefix = os.getenv('NETDATA_HOST_PREFIX', "")
if prefix.endswith('/'):
prefix = prefix[:-1]
self.sys_dir = prefix + "/sys/devices/system/cpu"
self.schedstat_path = prefix + "/proc/schedstat"
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = []
self.definitions = {}
self._orig_name = ""
self.assignment = {}
def __gettid(self):
# to enable this module on a non-x86 machine type, you'll have to find
syscalls = {
'i386': 224,
'x86_64': 186,
}
if platform.machine() not in syscalls:
return None
tid = syscall(syscalls[platform.machine()])
return tid
def __wake_cpus(self):
if hasattr(os, 'sched_setaffinity'):
pid = self.__gettid()
save_affinity = os.sched_getaffinity(pid)
for idx in range(0, len(self.assignment)):
os.sched_setaffinity(pid, [idx])
os.sched_getaffinity(pid)
os.sched_setaffinity(pid, save_affinity)
def __read_schedstat(self):
cpus = {}
for line in open(self.schedstat_path, 'r'):
if not line.startswith('cpu'):
continue
line = line.rstrip().split()
cpu = line[0]
active_time = line[7]
cpus[cpu] = int(active_time) // 1000
return cpus
def _get_data(self):
results = {}
# all the CPUs, then all the counters stop counting.
self.__wake_cpus()
# Use the kernel scheduler stats to determine how much time was spent
# in C0 (active).
schedstat = self.__read_schedstat()
for cpu, metrics in self.assignment.items():
update_time = schedstat[cpu]
results[cpu + '_active_time'] = update_time
for metric, path in metrics.items():
residency = int(open(path, 'r').read())
results[metric] = residency
return results
def check(self):
if self.__gettid() is None:
self.error("Cannot get thread ID. Stats would be completely broken.")
return False
self._orig_name = self.chart_name
for path in sorted(glob.glob(self.sys_dir + '/cpu*/cpuidle/state*/name')):
# ['', 'sys', 'devices', 'system', 'cpu', 'cpu0', 'cpuidle', 'state3', 'name']
path_elem = path.split('/')
cpu = path_elem[-4]
state = path_elem[-2]
statename = open(path, 'rt').read().rstrip()
orderid = '%s_cpuidle' % (cpu,)
if orderid not in self.definitions:
self.order.append(orderid)
active_name = '%s_active_time' % (cpu,)
self.definitions[orderid] = {
'options': [None, 'C-state residency', 'time%', 'cpuidle', None, 'stacked'],
'lines': [
[active_name, 'C0 (active)', 'percentage-of-incremental-row', 1, 1],
],
}
self.assignment[cpu] = {}
defid = '%s_%s_time' % (orderid, state)
self.definitions[orderid]['lines'].append(
[defid, statename, 'percentage-of-incremental-row', 1, 1]
)
self.assignment[cpu][defid] = '/'.join(path_elem[:-1] + ['time'])
# Sort order by kernel-specified CPU index
self.order.sort(key=lambda x: int(x.split('_')[0][3:]))
if len(self.definitions) == 0:
self.error("couldn't find cstate stats")
return False
return True
def create(self):
self.chart_name = "cpu"
status = SimpleService.create(self)
self.chart_name = self._orig_name
return status
def update(self, interval):
self.chart_name = "cpu"
status = SimpleService.update(self, interval=interval)
self.chart_name = self._orig_name
return status
| true | true |
f7199b4c4ff664a5de4259b1a156f514807f75ec | 358 | py | Python | Ch6/picnic_table.py | dmdinh22/ATBS | 3ddd331757cc434faa5f27997b178f8a39e3b5d2 | [
"MIT"
] | null | null | null | Ch6/picnic_table.py | dmdinh22/ATBS | 3ddd331757cc434faa5f27997b178f8a39e3b5d2 | [
"MIT"
] | null | null | null | Ch6/picnic_table.py | dmdinh22/ATBS | 3ddd331757cc434faa5f27997b178f8a39e3b5d2 | [
"MIT"
] | null | null | null | def print_picnic(itemsDict, leftWidth, rightWidth):
print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))
for k, v in itemsDict.items():
print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))
picnic_items = {'sandwiches': 4, 'apples': 12, 'cups': 4, 'cookies': 8000}
print_picnic(picnic_items, 12, 5)
print_picnic(picnic_items, 20, 6)
| 44.75 | 74 | 0.684358 | def print_picnic(itemsDict, leftWidth, rightWidth):
print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))
for k, v in itemsDict.items():
print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))
picnic_items = {'sandwiches': 4, 'apples': 12, 'cups': 4, 'cookies': 8000}
print_picnic(picnic_items, 12, 5)
print_picnic(picnic_items, 20, 6)
| true | true |
f7199b6017b06f096a888ac161723abab17bf6d1 | 80 | py | Python | notebooks/_solutions/13-raster-processing42.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 58 | 2020-10-09T10:10:59.000Z | 2022-03-07T14:58:07.000Z | notebooks/_solutions/13-raster-processing42.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 24 | 2020-09-30T19:57:14.000Z | 2021-10-05T07:21:09.000Z | notebooks/_solutions/13-raster-processing42.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 19 | 2020-10-05T09:32:18.000Z | 2022-03-20T00:09:14.000Z | green = geopandas.read_file("data/gent/vector/parken-gent.geojson")
green.head() | 40 | 67 | 0.7875 | green = geopandas.read_file("data/gent/vector/parken-gent.geojson")
green.head() | true | true |
f7199bd2f937de5095eb9d5c4cafe386b70039eb | 1,325 | py | Python | kale/util/ints.py | inan0812/kale-blockchain | 1b502fe21a4be10b4db0171c3a7030079dcefa1b | [
"Apache-2.0"
] | null | null | null | kale/util/ints.py | inan0812/kale-blockchain | 1b502fe21a4be10b4db0171c3a7030079dcefa1b | [
"Apache-2.0"
] | null | null | null | kale/util/ints.py | inan0812/kale-blockchain | 1b502fe21a4be10b4db0171c3a7030079dcefa1b | [
"Apache-2.0"
] | null | null | null | from typing import Any, BinaryIO
from kale.util.struct_stream import StructStream
class int8(StructStream):
PACK = "!b"
class uint8(StructStream):
PACK = "!B"
class int16(StructStream):
PACK = "!h"
class uint16(StructStream):
PACK = "!H"
class int32(StructStream):
PACK = "!l"
class uint32(StructStream):
PACK = "!L"
class int64(StructStream):
PACK = "!q"
class uint64(StructStream):
PACK = "!Q"
class uint128(int):
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(16)
assert len(read_bytes) == 16
n = int.from_bytes(read_bytes, "big", signed=False)
assert n <= (2 ** 128) - 1 and n >= 0
return cls(n)
def stream(self, f):
assert self <= (2 ** 128) - 1 and self >= 0
f.write(self.to_bytes(16, "big", signed=False))
class int512(int):
# Uses 65 bytes to fit in the sign bit
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(65)
assert len(read_bytes) == 65
n = int.from_bytes(read_bytes, "big", signed=True)
assert n <= (2 ** 512) - 1 and n >= -(2 ** 512)
return cls(n)
def stream(self, f):
assert self <= (2 ** 512) - 1 and self >= -(2 ** 512)
f.write(self.to_bytes(65, "big", signed=True))
| 20.384615 | 61 | 0.577358 | from typing import Any, BinaryIO
from kale.util.struct_stream import StructStream
class int8(StructStream):
PACK = "!b"
class uint8(StructStream):
PACK = "!B"
class int16(StructStream):
PACK = "!h"
class uint16(StructStream):
PACK = "!H"
class int32(StructStream):
PACK = "!l"
class uint32(StructStream):
PACK = "!L"
class int64(StructStream):
PACK = "!q"
class uint64(StructStream):
PACK = "!Q"
class uint128(int):
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(16)
assert len(read_bytes) == 16
n = int.from_bytes(read_bytes, "big", signed=False)
assert n <= (2 ** 128) - 1 and n >= 0
return cls(n)
def stream(self, f):
assert self <= (2 ** 128) - 1 and self >= 0
f.write(self.to_bytes(16, "big", signed=False))
class int512(int):
@classmethod
def parse(cls, f: BinaryIO) -> Any:
read_bytes = f.read(65)
assert len(read_bytes) == 65
n = int.from_bytes(read_bytes, "big", signed=True)
assert n <= (2 ** 512) - 1 and n >= -(2 ** 512)
return cls(n)
def stream(self, f):
assert self <= (2 ** 512) - 1 and self >= -(2 ** 512)
f.write(self.to_bytes(65, "big", signed=True))
| true | true |
f7199cc541ada1d15fae75b62fc319d80df9c669 | 428 | py | Python | src/upload/admin.py | bpilkerton/vendor-upload | ba43b620340c9fffd26cf6a8ee5bc9f97ffabda1 | [
"Unlicense"
] | null | null | null | src/upload/admin.py | bpilkerton/vendor-upload | ba43b620340c9fffd26cf6a8ee5bc9f97ffabda1 | [
"Unlicense"
] | null | null | null | src/upload/admin.py | bpilkerton/vendor-upload | ba43b620340c9fffd26cf6a8ee5bc9f97ffabda1 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from .models import Upload,VendorData
class UploadAdmin(admin.ModelAdmin):
list_display = ('id','uploaded_file','uploaded_date')
class VendordataAdmin(admin.ModelAdmin):
list_display = ('id','sub_id','first_name','last_name','status')
admin.site.site_header = "Subscription Fulfillment Upload"
admin.site.register(Upload, UploadAdmin)
admin.site.register(VendorData, VendordataAdmin)
| 32.923077 | 68 | 0.785047 | from django.contrib import admin
from .models import Upload,VendorData
class UploadAdmin(admin.ModelAdmin):
list_display = ('id','uploaded_file','uploaded_date')
class VendordataAdmin(admin.ModelAdmin):
list_display = ('id','sub_id','first_name','last_name','status')
admin.site.site_header = "Subscription Fulfillment Upload"
admin.site.register(Upload, UploadAdmin)
admin.site.register(VendorData, VendordataAdmin)
| true | true |
f7199d3d3a6e51cfe86975c9d26b03a1bb377073 | 228 | py | Python | Django Rest Class Based API view/Person/admin.py | abhisheksahu92/Django-Rest-Framework | 45ddafb93ed1f2e232d2f537f144bf79cb30bf3d | [
"MIT"
] | null | null | null | Django Rest Class Based API view/Person/admin.py | abhisheksahu92/Django-Rest-Framework | 45ddafb93ed1f2e232d2f537f144bf79cb30bf3d | [
"MIT"
] | null | null | null | Django Rest Class Based API view/Person/admin.py | abhisheksahu92/Django-Rest-Framework | 45ddafb93ed1f2e232d2f537f144bf79cb30bf3d | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Person
# Register your models here.
@admin.register(Person)
class PersonModel(admin.ModelAdmin):
list_display = ['first_name','last_name','email','phone','date_of_birth'] | 32.571429 | 77 | 0.77193 | from django.contrib import admin
from .models import Person
@admin.register(Person)
class PersonModel(admin.ModelAdmin):
list_display = ['first_name','last_name','email','phone','date_of_birth'] | true | true |
f7199de7d432eb5ce623737f74e8d53b751b22d7 | 9,267 | py | Python | ckan/views/admin.py | robin-NEC/ckan | 71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a | [
"BSD-3-Clause"
] | 1 | 2021-10-01T12:47:19.000Z | 2021-10-01T12:47:19.000Z | ckan/views/admin.py | robin-NEC/ckan | 71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a | [
"BSD-3-Clause"
] | null | null | null | ckan/views/admin.py | robin-NEC/ckan | 71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a | [
"BSD-3-Clause"
] | 2 | 2018-01-21T17:03:08.000Z | 2019-07-23T08:49:52.000Z | # encoding: utf-8
from __future__ import annotations
import logging
from typing import Any, Union, cast, List
from flask import Blueprint
from flask.views import MethodView
from flask.wrappers import Response
import ckan.lib.app_globals as app_globals
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.model as model
import ckan.logic.schema
from ckan.common import g, _, config, request
from ckan.views.home import CACHE_PARAMETERS
from ckan.types import Context, Query
log = logging.getLogger(__name__)
admin = Blueprint(u'admin', __name__, url_prefix=u'/ckan-admin')
def _get_sysadmins() -> "Query[model.User]":
q = model.Session.query(model.User).filter(
# type_ignore_reason: incomplete SQLAlchemy types
model.User.sysadmin.is_(True), # type: ignore
model.User.state == u'active')
return q
def _get_config_options() -> dict[str, list[dict[str, str]]]:
homepages = [{
u'value': u'1',
u'text': (u'Introductory area, search, featured'
u' group and featured organization')
}, {
u'value': u'2',
u'text': (u'Search, stats, introductory area, '
u'featured organization and featured group')
}, {
u'value': u'3',
u'text': u'Search, introductory area and stats'
}]
return dict(homepages=homepages)
def _get_config_items() -> list[str]:
return [
u'ckan.site_title', u'ckan.main_css', u'ckan.site_description',
u'ckan.site_logo', u'ckan.site_about', u'ckan.site_intro_text',
u'ckan.site_custom_css', u'ckan.homepage_style'
]
@admin.before_request
def before_request() -> None:
try:
context = cast(
Context,
{"model": model, "user": g.user, "auth_user_obj": g.userobj}
)
logic.check_access(u'sysadmin', context)
except logic.NotAuthorized:
base.abort(403, _(u'Need to be system administrator to administer'))
def index() -> str:
data = dict(sysadmins=[a.name for a in _get_sysadmins()])
return base.render(u'admin/index.html', extra_vars=data)
class ResetConfigView(MethodView):
def get(self) -> Union[str, Response]:
if u'cancel' in request.args:
return h.redirect_to(u'admin.config')
return base.render(u'admin/confirm_reset.html', extra_vars={})
def post(self) -> Response:
# remove sys info items
for item in _get_config_items():
model.delete_system_info(item)
# reset to values in config
app_globals.reset()
return h.redirect_to(u'admin.config')
class ConfigView(MethodView):
def get(self) -> str:
items = _get_config_options()
schema = ckan.logic.schema.update_configuration_schema()
data = {}
for key in schema:
data[key] = config.get(key)
vars: dict[str, Any] = dict(data=data, errors={}, **items)
return base.render(u'admin/config.html', extra_vars=vars)
def post(self) -> Union[str, Response]:
try:
req: dict[str, Any] = request.form.copy()
req.update(request.files.to_dict())
data_dict = logic.clean_dict(
dict_fns.unflatten(
logic.tuplize_dict(
logic.parse_params(req,
ignore_keys=CACHE_PARAMETERS))))
del data_dict['save']
data = logic.get_action(u'config_option_update')({
u'user': g.user
}, data_dict)
except logic.ValidationError as e:
items = _get_config_options()
data = request.form
errors = e.error_dict
error_summary = e.error_summary
vars = dict(data=data,
errors=errors,
error_summary=error_summary,
form_items=items,
**items)
return base.render(u'admin/config.html', extra_vars=vars)
return h.redirect_to(u'admin.config')
class TrashView(MethodView):
def __init__(self):
self.deleted_packages = self._get_deleted_datasets()
self.deleted_orgs = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=True)
self.deleted_groups = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=False)
self.deleted_entities = {
u'package': self.deleted_packages,
u'organization': self.deleted_orgs,
u'group': self.deleted_groups
}
self.messages = {
u'confirm': {
u'all': _(u'Are you sure you want to purge everything?'),
u'package': _(u'Are you sure you want to purge datasets?'),
u'organization':
_(u'Are you sure you want to purge organizations?'),
u'group': _(u'Are you sure you want to purge groups?')
},
u'success': {
u'package': _(u'{number} datasets have been purged'),
u'organization': _(u'{number} organizations have been purged'),
u'group': _(u'{number} groups have been purged')
},
u'empty': {
u'package': _(u'There are no datasets to purge'),
u'organization': _(u'There are no organizations to purge'),
u'group': _(u'There are no groups to purge')
}
}
def _get_deleted_datasets(
self
) -> Union["Query[model.Package]", List[Any]]:
if config.get_value('ckan.search.remove_deleted_packages'):
return self._get_deleted_datasets_from_db()
else:
return self._get_deleted_datasets_from_search_index()
def _get_deleted_datasets_from_db(self) -> "Query[model.Package]":
return model.Session.query(
model.Package
).filter_by(
state=model.State.DELETED
)
def _get_deleted_datasets_from_search_index(self) -> List[Any]:
package_search = logic.get_action('package_search')
search_params = {
'fq': '+state:deleted',
'include_private': True,
}
base_results = package_search(
{'ignore_auth': True},
search_params
)
return base_results['results']
def get(self) -> str:
ent_type = request.args.get(u'name')
if ent_type:
return base.render(u'admin/snippets/confirm_delete.html',
extra_vars={
u'ent_type': ent_type,
u'messages': self.messages})
data = dict(data=self.deleted_entities, messages=self.messages)
return base.render(u'admin/trash.html', extra_vars=data)
def post(self) -> Response:
if u'cancel' in request.form:
return h.redirect_to(u'admin.trash')
req_action = request.form.get(u'action', '')
if req_action == u'all':
self.purge_all()
elif req_action in (u'package', u'organization', u'group'):
self.purge_entity(req_action)
else:
h.flash_error(_(u'Action not implemented.'))
return h.redirect_to(u'admin.trash')
def purge_all(self):
actions = (u'dataset_purge', u'group_purge', u'organization_purge')
entities = (
self.deleted_packages,
self.deleted_groups,
self.deleted_orgs
)
for action, deleted_entities in zip(actions, entities):
for entity in deleted_entities:
ent_id = entity.id if hasattr(entity, 'id') \
else entity['id'] # type: ignore
logic.get_action(action)(
{u'user': g.user}, {u'id': ent_id}
)
model.Session.remove()
h.flash_success(_(u'Massive purge complete'))
def purge_entity(self, ent_type: str):
entities = self.deleted_entities[ent_type]
number = len(entities) if type(entities) == list else entities.count()
for ent in entities:
entity_id = ent.id if hasattr(ent, 'id') else ent['id']
logic.get_action(self._get_purge_action(ent_type))(
{u'user': g.user},
{u'id': entity_id}
)
model.Session.remove()
h.flash_success(self.messages[u'success'][ent_type].format(
number=number
))
@staticmethod
def _get_purge_action(ent_type: str) -> str:
actions = {
"package": "dataset_purge",
"organization": "organization_purge",
"group": "group_purge",
}
return actions[ent_type]
admin.add_url_rule(
u'/', view_func=index, methods=['GET'], strict_slashes=False
)
admin.add_url_rule(u'/reset_config',
view_func=ResetConfigView.as_view(str(u'reset_config')))
admin.add_url_rule(u'/config', view_func=ConfigView.as_view(str(u'config')))
admin.add_url_rule(u'/trash', view_func=TrashView.as_view(str(u'trash')))
| 33.698182 | 79 | 0.589403 |
from __future__ import annotations
import logging
from typing import Any, Union, cast, List
from flask import Blueprint
from flask.views import MethodView
from flask.wrappers import Response
import ckan.lib.app_globals as app_globals
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.logic as logic
import ckan.model as model
import ckan.logic.schema
from ckan.common import g, _, config, request
from ckan.views.home import CACHE_PARAMETERS
from ckan.types import Context, Query
log = logging.getLogger(__name__)
admin = Blueprint(u'admin', __name__, url_prefix=u'/ckan-admin')
def _get_sysadmins() -> "Query[model.User]":
q = model.Session.query(model.User).filter(
model.User.sysadmin.is_(True),
model.User.state == u'active')
return q
def _get_config_options() -> dict[str, list[dict[str, str]]]:
homepages = [{
u'value': u'1',
u'text': (u'Introductory area, search, featured'
u' group and featured organization')
}, {
u'value': u'2',
u'text': (u'Search, stats, introductory area, '
u'featured organization and featured group')
}, {
u'value': u'3',
u'text': u'Search, introductory area and stats'
}]
return dict(homepages=homepages)
def _get_config_items() -> list[str]:
return [
u'ckan.site_title', u'ckan.main_css', u'ckan.site_description',
u'ckan.site_logo', u'ckan.site_about', u'ckan.site_intro_text',
u'ckan.site_custom_css', u'ckan.homepage_style'
]
@admin.before_request
def before_request() -> None:
try:
context = cast(
Context,
{"model": model, "user": g.user, "auth_user_obj": g.userobj}
)
logic.check_access(u'sysadmin', context)
except logic.NotAuthorized:
base.abort(403, _(u'Need to be system administrator to administer'))
def index() -> str:
data = dict(sysadmins=[a.name for a in _get_sysadmins()])
return base.render(u'admin/index.html', extra_vars=data)
class ResetConfigView(MethodView):
def get(self) -> Union[str, Response]:
if u'cancel' in request.args:
return h.redirect_to(u'admin.config')
return base.render(u'admin/confirm_reset.html', extra_vars={})
def post(self) -> Response:
for item in _get_config_items():
model.delete_system_info(item)
app_globals.reset()
return h.redirect_to(u'admin.config')
class ConfigView(MethodView):
def get(self) -> str:
items = _get_config_options()
schema = ckan.logic.schema.update_configuration_schema()
data = {}
for key in schema:
data[key] = config.get(key)
vars: dict[str, Any] = dict(data=data, errors={}, **items)
return base.render(u'admin/config.html', extra_vars=vars)
def post(self) -> Union[str, Response]:
try:
req: dict[str, Any] = request.form.copy()
req.update(request.files.to_dict())
data_dict = logic.clean_dict(
dict_fns.unflatten(
logic.tuplize_dict(
logic.parse_params(req,
ignore_keys=CACHE_PARAMETERS))))
del data_dict['save']
data = logic.get_action(u'config_option_update')({
u'user': g.user
}, data_dict)
except logic.ValidationError as e:
items = _get_config_options()
data = request.form
errors = e.error_dict
error_summary = e.error_summary
vars = dict(data=data,
errors=errors,
error_summary=error_summary,
form_items=items,
**items)
return base.render(u'admin/config.html', extra_vars=vars)
return h.redirect_to(u'admin.config')
class TrashView(MethodView):
def __init__(self):
self.deleted_packages = self._get_deleted_datasets()
self.deleted_orgs = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=True)
self.deleted_groups = model.Session.query(model.Group).filter_by(
state=model.State.DELETED, is_organization=False)
self.deleted_entities = {
u'package': self.deleted_packages,
u'organization': self.deleted_orgs,
u'group': self.deleted_groups
}
self.messages = {
u'confirm': {
u'all': _(u'Are you sure you want to purge everything?'),
u'package': _(u'Are you sure you want to purge datasets?'),
u'organization':
_(u'Are you sure you want to purge organizations?'),
u'group': _(u'Are you sure you want to purge groups?')
},
u'success': {
u'package': _(u'{number} datasets have been purged'),
u'organization': _(u'{number} organizations have been purged'),
u'group': _(u'{number} groups have been purged')
},
u'empty': {
u'package': _(u'There are no datasets to purge'),
u'organization': _(u'There are no organizations to purge'),
u'group': _(u'There are no groups to purge')
}
}
def _get_deleted_datasets(
self
) -> Union["Query[model.Package]", List[Any]]:
if config.get_value('ckan.search.remove_deleted_packages'):
return self._get_deleted_datasets_from_db()
else:
return self._get_deleted_datasets_from_search_index()
def _get_deleted_datasets_from_db(self) -> "Query[model.Package]":
return model.Session.query(
model.Package
).filter_by(
state=model.State.DELETED
)
def _get_deleted_datasets_from_search_index(self) -> List[Any]:
package_search = logic.get_action('package_search')
search_params = {
'fq': '+state:deleted',
'include_private': True,
}
base_results = package_search(
{'ignore_auth': True},
search_params
)
return base_results['results']
def get(self) -> str:
ent_type = request.args.get(u'name')
if ent_type:
return base.render(u'admin/snippets/confirm_delete.html',
extra_vars={
u'ent_type': ent_type,
u'messages': self.messages})
data = dict(data=self.deleted_entities, messages=self.messages)
return base.render(u'admin/trash.html', extra_vars=data)
def post(self) -> Response:
if u'cancel' in request.form:
return h.redirect_to(u'admin.trash')
req_action = request.form.get(u'action', '')
if req_action == u'all':
self.purge_all()
elif req_action in (u'package', u'organization', u'group'):
self.purge_entity(req_action)
else:
h.flash_error(_(u'Action not implemented.'))
return h.redirect_to(u'admin.trash')
def purge_all(self):
actions = (u'dataset_purge', u'group_purge', u'organization_purge')
entities = (
self.deleted_packages,
self.deleted_groups,
self.deleted_orgs
)
for action, deleted_entities in zip(actions, entities):
for entity in deleted_entities:
ent_id = entity.id if hasattr(entity, 'id') \
else entity['id']
logic.get_action(action)(
{u'user': g.user}, {u'id': ent_id}
)
model.Session.remove()
h.flash_success(_(u'Massive purge complete'))
def purge_entity(self, ent_type: str):
entities = self.deleted_entities[ent_type]
number = len(entities) if type(entities) == list else entities.count()
for ent in entities:
entity_id = ent.id if hasattr(ent, 'id') else ent['id']
logic.get_action(self._get_purge_action(ent_type))(
{u'user': g.user},
{u'id': entity_id}
)
model.Session.remove()
h.flash_success(self.messages[u'success'][ent_type].format(
number=number
))
@staticmethod
def _get_purge_action(ent_type: str) -> str:
actions = {
"package": "dataset_purge",
"organization": "organization_purge",
"group": "group_purge",
}
return actions[ent_type]
admin.add_url_rule(
u'/', view_func=index, methods=['GET'], strict_slashes=False
)
admin.add_url_rule(u'/reset_config',
view_func=ResetConfigView.as_view(str(u'reset_config')))
admin.add_url_rule(u'/config', view_func=ConfigView.as_view(str(u'config')))
admin.add_url_rule(u'/trash', view_func=TrashView.as_view(str(u'trash')))
| true | true |
f7199e876ff568e200ceb2dbf17c8e228d670c71 | 1,919 | py | Python | test/Entry.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | 1 | 2019-09-18T06:37:02.000Z | 2019-09-18T06:37:02.000Z | test/Entry.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | test/Entry.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Entry.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that the Entry() global function and environment method work
correctly, and that the former does not try to expand construction
variables.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print Entry('ddd')
print Entry('$FOO')
print Entry('${BAR}_$BAR')
print env.Entry('eee')
print env.Entry('$FOO')
print env.Entry('${BAR}_$BAR')
""")
test.run(stdout = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
""", build_str = """\
scons: `.' is up to date.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.523077 | 87 | 0.738927 |
__revision__ = "test/Entry.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(FOO = 'fff', BAR = 'bbb')
print Entry('ddd')
print Entry('$FOO')
print Entry('${BAR}_$BAR')
print env.Entry('eee')
print env.Entry('$FOO')
print env.Entry('${BAR}_$BAR')
""")
test.run(stdout = test.wrap_stdout(read_str = """\
ddd
$FOO
${BAR}_$BAR
eee
fff
bbb_bbb
""", build_str = """\
scons: `.' is up to date.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| true | true |
f719a19921e0717fd82f09f6ab40bd54a1718ceb | 18,564 | py | Python | sciencebeam_parser/models/model.py | elifesciences/sciencebeam-parser | 66964f283612b8d6fa8a23ad8790292c1ec07651 | [
"MIT"
] | 13 | 2021-08-04T12:11:17.000Z | 2022-03-28T20:41:20.000Z | sciencebeam_parser/models/model.py | elifesciences/sciencebeam-parser | 66964f283612b8d6fa8a23ad8790292c1ec07651 | [
"MIT"
] | 33 | 2021-08-05T08:37:59.000Z | 2022-03-29T18:42:09.000Z | sciencebeam_parser/models/model.py | elifesciences/sciencebeam-parser | 66964f283612b8d6fa8a23ad8790292c1ec07651 | [
"MIT"
] | 1 | 2022-01-05T14:53:06.000Z | 2022-01-05T14:53:06.000Z | import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_parser.utils.labels import get_split_prefix_label, strip_tag_prefix
from sciencebeam_parser.document.layout_document import (
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument
)
from sciencebeam_parser.models.data import (
AppFeaturesContext,
DocumentFeaturesContext,
LabeledLayoutModelData,
LayoutModelData,
ModelDataGenerator
)
from sciencebeam_parser.models.extract import ModelSemanticExtractor
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper
from sciencebeam_parser.models.model_impl import ModelImpl, T_ModelImplFactory
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
U = TypeVar('U')
@dataclass
class LayoutModelLabel:
label: str
label_token_text: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
class LabeledLayoutToken(NamedTuple):
label: str
layout_token: LayoutToken
class NewDocumentMarker:
pass
NEW_DOCUMENT_MARKER = NewDocumentMarker()
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[str, int, int]]:
"""
Similar to get_entities, but also other (`O`) tag
"""
prev_tag = 'O'
prev_start = 0
for index, prefixed_tag in enumerate(seq):
prefix, tag = get_split_prefix_label(prefixed_tag)
if prefix == 'B' or tag != prev_tag:
if prev_start < index:
yield prev_tag, prev_start, index - 1
prev_tag = tag
prev_start = index
if prev_start < len(seq):
yield prev_tag, prev_start, len(seq) - 1
def get_entities_including_other(seq: List[str]) -> List[Tuple[str, int, int]]:
return list(iter_entities_including_other(seq))
class LayoutDocumentLabelResult:
def __init__(
self,
layout_document: LayoutDocument,
layout_model_label_iterable: Iterable[LayoutModelLabel]
):
self.layout_document = layout_document
self.layout_model_label_list = list(layout_model_label_iterable)
self.layout_document_labels_by_label: Dict[str, List[LayoutModelLabel]] = (
defaultdict(list)
)
for layout_model_label in self.layout_model_label_list:
tag_without_prefix = strip_tag_prefix(layout_model_label.label)
self.layout_document_labels_by_label[tag_without_prefix].append(
layout_model_label
)
def get_available_labels(self) -> Set[str]:
return set(self.layout_document_labels_by_label.keys())
def get_layout_document_labels_by_labels(self, labels: List[str]) -> List[LayoutModelLabel]:
if not labels:
return []
if len(labels) == 1:
return self.layout_document_labels_by_label.get(labels[0], [])
result: List[LayoutModelLabel] = []
for label in labels:
result.extend(self.layout_document_labels_by_label.get(label, []))
return result
def get_filtered_document_by_label(self, label: str) -> LayoutDocument:
return self.get_filtered_document_by_labels([label])
def get_filtered_document_by_labels(
self,
labels: List[str]
): # pylint: disable=too-many-branches
layout_document = LayoutDocument(pages=[])
layout_document_labels = self.get_layout_document_labels_by_labels(labels)
if not layout_document_labels:
LOGGER.warning(
'no layout_lines_to_include found for: %r, available keys=%r',
labels, self.layout_document_labels_by_label.keys()
)
return layout_document
layout_token_ids_to_include = {
id(layout_document_label.layout_token)
for layout_document_label in layout_document_labels
if layout_document_label.layout_token
}
LOGGER.debug('layout_tokens_to_include: %s', layout_token_ids_to_include)
layout_line_ids_to_include: Set[int] = set()
if not layout_token_ids_to_include:
layout_line_ids_to_include = {
id(layout_document_label.layout_line)
for layout_document_label in layout_document_labels
if layout_document_label.layout_line
}
LOGGER.debug('layout_line_ids_to_include: %s', layout_line_ids_to_include)
result_page: Optional[LayoutPage] = None
for page in self.layout_document.pages: # pylint: disable=too-many-nested-blocks
result_page = None
result_block: Optional[LayoutBlock] = None
for block in page.blocks:
result_block = None
for line in block.lines:
accepted_line: Optional[LayoutLine] = None
if layout_token_ids_to_include:
accepted_tokens: List[LayoutToken] = []
for token in line.tokens:
if id(token) in layout_token_ids_to_include:
accepted_tokens.append(token)
if not accepted_tokens:
continue
if len(line.tokens) == accepted_tokens:
accepted_line = line
else:
accepted_line = LayoutLine(tokens=accepted_tokens)
else:
if id(line) not in layout_line_ids_to_include:
continue
accepted_line = line
if result_page is None:
result_page = LayoutPage(blocks=[])
layout_document.pages.append(result_page)
if result_block is None:
result_block = LayoutBlock(lines=[])
result_page.blocks.append(result_block)
result_block.lines.append(accepted_line)
return layout_document
def iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
layout_tokens = [result.layout_token for result in labeled_layout_tokens]
labels = [result.label for result in labeled_layout_tokens]
LOGGER.debug('layout_tokens: %s', layout_tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, LayoutBlock.for_tokens(layout_tokens[start:end + 1])
def iter_entity_values_predicted_labels(
tag_result: List[Tuple[str, str]]
) -> Iterable[Tuple[str, str]]:
tokens, labels = zip(*tag_result)
LOGGER.debug('tokens: %s', tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, ' '.join(tokens[start:end + 1])
def iter_labeled_layout_token_for_layout_model_label(
layout_model_label_iterable: Iterable[LayoutModelLabel]
) -> Iterable[LabeledLayoutToken]:
for layout_model_label in layout_model_label_iterable:
layout_token = layout_model_label.layout_token
assert layout_token is not None
yield LabeledLayoutToken(
layout_model_label.label,
layout_token
)
def iter_data_lines_for_model_data_iterables(
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> Iterable[str]:
for index, model_data_list in enumerate(model_data_iterables):
if index > 0:
yield ''
for model_data in model_data_list:
yield model_data.data_line
class Model(ABC, Preloadable):
def __init__(
self,
model_impl_factory: Optional[T_ModelImplFactory],
model_config: Optional[dict] = None
) -> None:
self._model_impl_factory = model_impl_factory
self._lazy_model_impl = LazyLoaded[ModelImpl](self._load_model_impl)
self.model_config = model_config or {}
def __repr__(self) -> str:
return '%s(model_config=%r, loaded=%r)' % (
type(self).__name__, self.model_config, self._lazy_model_impl.is_loaded
)
@abstractmethod
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> ModelDataGenerator:
pass
# @abstractmethod
def get_semantic_extractor(self) -> ModelSemanticExtractor:
raise NotImplementedError()
# @abstractmethod
def get_tei_training_data_generator(self) -> TeiTrainingDataGenerator:
raise NotImplementedError()
def _load_model_impl(self) -> ModelImpl:
assert self._model_impl_factory, 'model impl factory required'
LOGGER.info('creating model impl: %r', self._model_impl_factory)
model_impl = self._model_impl_factory()
if not isinstance(model_impl, ModelImpl):
raise TypeError('invalid model impl type: %r' % model_impl)
return model_impl
@property
def model_impl(self) -> ModelImpl:
was_loaded = self._lazy_model_impl.is_loaded
model_impl = self._lazy_model_impl.get()
if was_loaded:
LOGGER.info('model impl already loaded: %r', model_impl)
return model_impl
def preload(self):
model_impl = self.model_impl
model_impl.preload()
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.get_semantic_extractor().iter_semantic_content_for_entity_blocks(
entity_tokens,
**kwargs
)
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
return self.model_impl.predict_labels(texts, features, output_format)
def _iter_flat_label_model_data_lists_to( # pylint: disable=too-many-locals
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Union[T, NewDocumentMarker]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
data_lines = list(iter_data_lines_for_model_data_iterables(
model_data_lists
))
texts, features = load_data_crf_lines(data_lines)
texts = texts.tolist()
tag_result = self.predict_labels(
texts=texts, features=features, output_format=None
)
if not tag_result:
return
if len(tag_result) != len(model_data_lists):
raise AssertionError('tag result does not match number of docs: %d != %d' % (
len(tag_result), len(model_data_lists)
))
for index, (doc_tag_result, model_data_list) in enumerate(
zip(tag_result, model_data_lists)
):
if index > 0:
yield NEW_DOCUMENT_MARKER
if len(doc_tag_result) != len(model_data_list):
raise AssertionError('doc tag result does not match data: %d != %d' % (
len(doc_tag_result), len(model_data_list)
))
for token_tag_result, token_model_data in zip(doc_tag_result, model_data_list):
label_token_text, token_label = token_tag_result
if label_token_text != token_model_data.label_token_text:
raise AssertionError(
f'actual: {repr(label_token_text)}'
f', expected: {repr(token_model_data.label_token_text)}'
)
yield item_factory(
token_label,
token_model_data
)
def _iter_stacked_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Sequence[T]]:
# Note: currently we do need a list
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
doc_items: List[T] = []
result_doc_count = 0
for item in self._iter_flat_label_model_data_lists_to(
model_data_lists,
item_factory=item_factory
):
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
result_doc_count += 1
continue
doc_items.append(item)
if result_doc_count < len(model_data_lists):
yield doc_items
def iter_label_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[List[LayoutModelLabel]]:
doc_layout_model_labels: List[LayoutModelLabel] = []
result_doc_count = 0
for layout_model_label in self._iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
):
if isinstance(layout_model_label, NewDocumentMarker):
yield doc_layout_model_labels
doc_layout_model_labels = []
result_doc_count += 1
continue
doc_layout_model_labels.append(layout_model_label)
if result_doc_count < len(layout_documents):
yield doc_layout_model_labels
def iter_label_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LayoutModelLabel]:
for layout_model_label in self._iter_label_layout_documents(
[layout_document],
app_features_context=app_features_context
):
assert isinstance(layout_model_label, LayoutModelLabel)
yield layout_model_label
def _iter_label_layout_documents( # pylint: disable=too-many-locals
self,
layout_documents: Iterable[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[Union[LayoutModelLabel, NewDocumentMarker]]:
data_generator = self.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=app_features_context
)
)
model_data_lists = [
list(data_generator.iter_model_data_for_layout_document(
layout_document
))
for layout_document in layout_documents
]
return self._iter_flat_label_model_data_lists_to(
model_data_lists,
lambda label, model_data: LayoutModelLabel(
label=label,
label_token_text=model_data.label_token_text,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token
)
)
def iter_labeled_model_data_list_for_model_data_list_iterable(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]]
) -> Iterable[Sequence[LabeledLayoutModelData]]:
return self._iter_stacked_label_model_data_lists_to(
model_data_list_iterable,
lambda label, model_data: LabeledLayoutModelData.from_model_data(
model_data,
label=label
)
)
def get_label_layout_document_result(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def iter_predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LabeledLayoutToken]:
# Note: this should get merged with Model.iter_label_layout_document
yield from iter_labeled_layout_token_for_layout_model_label(
self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> List[LabeledLayoutToken]:
return list(self.iter_predict_labels_for_layout_document(
layout_document,
app_features_context=app_features_context
))
def predict_labels_for_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> List[List[LabeledLayoutToken]]:
return [
list(iter_labeled_layout_token_for_layout_model_label(
layout_model_labels
))
for layout_model_labels in self.iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
)
]
def iter_entity_layout_blocks_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
return iter_entity_layout_blocks_for_labeled_layout_tokens(labeled_layout_tokens)
def iter_semantic_content_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.iter_semantic_content_for_entity_blocks(
self.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
),
**kwargs
)
| 37.053892 | 96 | 0.652823 | import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import (
Callable,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union
)
from sciencebeam_trainer_delft.sequence_labelling.reader import load_data_crf_lines
from sciencebeam_parser.utils.labels import get_split_prefix_label, strip_tag_prefix
from sciencebeam_parser.document.layout_document import (
LayoutToken,
LayoutLine,
LayoutBlock,
LayoutPage,
LayoutDocument
)
from sciencebeam_parser.models.data import (
AppFeaturesContext,
DocumentFeaturesContext,
LabeledLayoutModelData,
LayoutModelData,
ModelDataGenerator
)
from sciencebeam_parser.models.extract import ModelSemanticExtractor
from sciencebeam_parser.models.training_data import TeiTrainingDataGenerator
from sciencebeam_parser.document.semantic_document import SemanticContentWrapper
from sciencebeam_parser.models.model_impl import ModelImpl, T_ModelImplFactory
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
U = TypeVar('U')
@dataclass
class LayoutModelLabel:
label: str
label_token_text: str
layout_line: Optional[LayoutLine] = field(repr=False, default=None)
layout_token: Optional[LayoutToken] = field(repr=False, default=None)
class LabeledLayoutToken(NamedTuple):
label: str
layout_token: LayoutToken
class NewDocumentMarker:
pass
NEW_DOCUMENT_MARKER = NewDocumentMarker()
def iter_entities_including_other(seq: List[str]) -> Iterable[Tuple[str, int, int]]:
prev_tag = 'O'
prev_start = 0
for index, prefixed_tag in enumerate(seq):
prefix, tag = get_split_prefix_label(prefixed_tag)
if prefix == 'B' or tag != prev_tag:
if prev_start < index:
yield prev_tag, prev_start, index - 1
prev_tag = tag
prev_start = index
if prev_start < len(seq):
yield prev_tag, prev_start, len(seq) - 1
def get_entities_including_other(seq: List[str]) -> List[Tuple[str, int, int]]:
return list(iter_entities_including_other(seq))
class LayoutDocumentLabelResult:
def __init__(
self,
layout_document: LayoutDocument,
layout_model_label_iterable: Iterable[LayoutModelLabel]
):
self.layout_document = layout_document
self.layout_model_label_list = list(layout_model_label_iterable)
self.layout_document_labels_by_label: Dict[str, List[LayoutModelLabel]] = (
defaultdict(list)
)
for layout_model_label in self.layout_model_label_list:
tag_without_prefix = strip_tag_prefix(layout_model_label.label)
self.layout_document_labels_by_label[tag_without_prefix].append(
layout_model_label
)
def get_available_labels(self) -> Set[str]:
return set(self.layout_document_labels_by_label.keys())
def get_layout_document_labels_by_labels(self, labels: List[str]) -> List[LayoutModelLabel]:
if not labels:
return []
if len(labels) == 1:
return self.layout_document_labels_by_label.get(labels[0], [])
result: List[LayoutModelLabel] = []
for label in labels:
result.extend(self.layout_document_labels_by_label.get(label, []))
return result
def get_filtered_document_by_label(self, label: str) -> LayoutDocument:
return self.get_filtered_document_by_labels([label])
def get_filtered_document_by_labels(
self,
labels: List[str]
):
layout_document = LayoutDocument(pages=[])
layout_document_labels = self.get_layout_document_labels_by_labels(labels)
if not layout_document_labels:
LOGGER.warning(
'no layout_lines_to_include found for: %r, available keys=%r',
labels, self.layout_document_labels_by_label.keys()
)
return layout_document
layout_token_ids_to_include = {
id(layout_document_label.layout_token)
for layout_document_label in layout_document_labels
if layout_document_label.layout_token
}
LOGGER.debug('layout_tokens_to_include: %s', layout_token_ids_to_include)
layout_line_ids_to_include: Set[int] = set()
if not layout_token_ids_to_include:
layout_line_ids_to_include = {
id(layout_document_label.layout_line)
for layout_document_label in layout_document_labels
if layout_document_label.layout_line
}
LOGGER.debug('layout_line_ids_to_include: %s', layout_line_ids_to_include)
result_page: Optional[LayoutPage] = None
for page in self.layout_document.pages:
result_page = None
result_block: Optional[LayoutBlock] = None
for block in page.blocks:
result_block = None
for line in block.lines:
accepted_line: Optional[LayoutLine] = None
if layout_token_ids_to_include:
accepted_tokens: List[LayoutToken] = []
for token in line.tokens:
if id(token) in layout_token_ids_to_include:
accepted_tokens.append(token)
if not accepted_tokens:
continue
if len(line.tokens) == accepted_tokens:
accepted_line = line
else:
accepted_line = LayoutLine(tokens=accepted_tokens)
else:
if id(line) not in layout_line_ids_to_include:
continue
accepted_line = line
if result_page is None:
result_page = LayoutPage(blocks=[])
layout_document.pages.append(result_page)
if result_block is None:
result_block = LayoutBlock(lines=[])
result_page.blocks.append(result_block)
result_block.lines.append(accepted_line)
return layout_document
def iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
layout_tokens = [result.layout_token for result in labeled_layout_tokens]
labels = [result.label for result in labeled_layout_tokens]
LOGGER.debug('layout_tokens: %s', layout_tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, LayoutBlock.for_tokens(layout_tokens[start:end + 1])
def iter_entity_values_predicted_labels(
tag_result: List[Tuple[str, str]]
) -> Iterable[Tuple[str, str]]:
tokens, labels = zip(*tag_result)
LOGGER.debug('tokens: %s', tokens)
LOGGER.debug('labels: %s', labels)
for tag, start, end in get_entities_including_other(list(labels)):
yield tag, ' '.join(tokens[start:end + 1])
def iter_labeled_layout_token_for_layout_model_label(
layout_model_label_iterable: Iterable[LayoutModelLabel]
) -> Iterable[LabeledLayoutToken]:
for layout_model_label in layout_model_label_iterable:
layout_token = layout_model_label.layout_token
assert layout_token is not None
yield LabeledLayoutToken(
layout_model_label.label,
layout_token
)
def iter_data_lines_for_model_data_iterables(
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> Iterable[str]:
for index, model_data_list in enumerate(model_data_iterables):
if index > 0:
yield ''
for model_data in model_data_list:
yield model_data.data_line
class Model(ABC, Preloadable):
def __init__(
self,
model_impl_factory: Optional[T_ModelImplFactory],
model_config: Optional[dict] = None
) -> None:
self._model_impl_factory = model_impl_factory
self._lazy_model_impl = LazyLoaded[ModelImpl](self._load_model_impl)
self.model_config = model_config or {}
def __repr__(self) -> str:
return '%s(model_config=%r, loaded=%r)' % (
type(self).__name__, self.model_config, self._lazy_model_impl.is_loaded
)
@abstractmethod
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> ModelDataGenerator:
pass
def get_semantic_extractor(self) -> ModelSemanticExtractor:
raise NotImplementedError()
def get_tei_training_data_generator(self) -> TeiTrainingDataGenerator:
raise NotImplementedError()
def _load_model_impl(self) -> ModelImpl:
assert self._model_impl_factory, 'model impl factory required'
LOGGER.info('creating model impl: %r', self._model_impl_factory)
model_impl = self._model_impl_factory()
if not isinstance(model_impl, ModelImpl):
raise TypeError('invalid model impl type: %r' % model_impl)
return model_impl
@property
def model_impl(self) -> ModelImpl:
was_loaded = self._lazy_model_impl.is_loaded
model_impl = self._lazy_model_impl.get()
if was_loaded:
LOGGER.info('model impl already loaded: %r', model_impl)
return model_impl
def preload(self):
model_impl = self.model_impl
model_impl.preload()
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.get_semantic_extractor().iter_semantic_content_for_entity_blocks(
entity_tokens,
**kwargs
)
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
return self.model_impl.predict_labels(texts, features, output_format)
def _iter_flat_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Union[T, NewDocumentMarker]]:
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
data_lines = list(iter_data_lines_for_model_data_iterables(
model_data_lists
))
texts, features = load_data_crf_lines(data_lines)
texts = texts.tolist()
tag_result = self.predict_labels(
texts=texts, features=features, output_format=None
)
if not tag_result:
return
if len(tag_result) != len(model_data_lists):
raise AssertionError('tag result does not match number of docs: %d != %d' % (
len(tag_result), len(model_data_lists)
))
for index, (doc_tag_result, model_data_list) in enumerate(
zip(tag_result, model_data_lists)
):
if index > 0:
yield NEW_DOCUMENT_MARKER
if len(doc_tag_result) != len(model_data_list):
raise AssertionError('doc tag result does not match data: %d != %d' % (
len(doc_tag_result), len(model_data_list)
))
for token_tag_result, token_model_data in zip(doc_tag_result, model_data_list):
label_token_text, token_label = token_tag_result
if label_token_text != token_model_data.label_token_text:
raise AssertionError(
f'actual: {repr(label_token_text)}'
f', expected: {repr(token_model_data.label_token_text)}'
)
yield item_factory(
token_label,
token_model_data
)
def _iter_stacked_label_model_data_lists_to(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]],
item_factory: Callable[[str, LayoutModelData], T]
) -> Iterable[Sequence[T]]:
model_data_lists = list(model_data_list_iterable)
if not model_data_lists:
return
doc_items: List[T] = []
result_doc_count = 0
for item in self._iter_flat_label_model_data_lists_to(
model_data_lists,
item_factory=item_factory
):
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
result_doc_count += 1
continue
doc_items.append(item)
if result_doc_count < len(model_data_lists):
yield doc_items
def iter_label_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[List[LayoutModelLabel]]:
doc_layout_model_labels: List[LayoutModelLabel] = []
result_doc_count = 0
for layout_model_label in self._iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
):
if isinstance(layout_model_label, NewDocumentMarker):
yield doc_layout_model_labels
doc_layout_model_labels = []
result_doc_count += 1
continue
doc_layout_model_labels.append(layout_model_label)
if result_doc_count < len(layout_documents):
yield doc_layout_model_labels
def iter_label_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LayoutModelLabel]:
for layout_model_label in self._iter_label_layout_documents(
[layout_document],
app_features_context=app_features_context
):
assert isinstance(layout_model_label, LayoutModelLabel)
yield layout_model_label
def _iter_label_layout_documents(
self,
layout_documents: Iterable[LayoutDocument],
app_features_context: AppFeaturesContext
) -> Iterable[Union[LayoutModelLabel, NewDocumentMarker]]:
data_generator = self.get_data_generator(
document_features_context=DocumentFeaturesContext(
app_features_context=app_features_context
)
)
model_data_lists = [
list(data_generator.iter_model_data_for_layout_document(
layout_document
))
for layout_document in layout_documents
]
return self._iter_flat_label_model_data_lists_to(
model_data_lists,
lambda label, model_data: LayoutModelLabel(
label=label,
label_token_text=model_data.label_token_text,
layout_line=model_data.layout_line,
layout_token=model_data.layout_token
)
)
def iter_labeled_model_data_list_for_model_data_list_iterable(
self,
model_data_list_iterable: Iterable[Sequence[LayoutModelData]]
) -> Iterable[Sequence[LabeledLayoutModelData]]:
return self._iter_stacked_label_model_data_lists_to(
model_data_list_iterable,
lambda label, model_data: LabeledLayoutModelData.from_model_data(
model_data,
label=label
)
)
def get_label_layout_document_result(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> LayoutDocumentLabelResult:
return LayoutDocumentLabelResult(
layout_document=layout_document,
layout_model_label_iterable=self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def iter_predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> Iterable[LabeledLayoutToken]:
yield from iter_labeled_layout_token_for_layout_model_label(
self.iter_label_layout_document(
layout_document,
app_features_context=app_features_context
)
)
def predict_labels_for_layout_document(
self,
layout_document: LayoutDocument,
app_features_context: AppFeaturesContext
) -> List[LabeledLayoutToken]:
return list(self.iter_predict_labels_for_layout_document(
layout_document,
app_features_context=app_features_context
))
def predict_labels_for_layout_documents(
self,
layout_documents: List[LayoutDocument],
app_features_context: AppFeaturesContext
) -> List[List[LabeledLayoutToken]]:
return [
list(iter_labeled_layout_token_for_layout_model_label(
layout_model_labels
))
for layout_model_labels in self.iter_label_layout_documents(
layout_documents,
app_features_context=app_features_context
)
]
def iter_entity_layout_blocks_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken]
) -> Iterable[Tuple[str, LayoutBlock]]:
return iter_entity_layout_blocks_for_labeled_layout_tokens(labeled_layout_tokens)
def iter_semantic_content_for_labeled_layout_tokens(
self,
labeled_layout_tokens: Iterable[LabeledLayoutToken],
**kwargs
) -> Iterable[SemanticContentWrapper]:
return self.iter_semantic_content_for_entity_blocks(
self.iter_entity_layout_blocks_for_labeled_layout_tokens(
labeled_layout_tokens
),
**kwargs
)
| true | true |
f719a28a0f454eca48dc84c19a7a003b8073c988 | 225 | py | Python | tests/test_case_files/class_test_1.py | calkerns/dyc | ddc35e6c183137dc30b2a3a2f481098280167bd1 | [
"MIT"
] | 100 | 2019-04-04T23:38:20.000Z | 2022-03-30T18:14:16.000Z | tests/test_case_files/class_test_1.py | calkerns/dyc | ddc35e6c183137dc30b2a3a2f481098280167bd1 | [
"MIT"
] | 51 | 2019-04-04T20:18:47.000Z | 2021-10-05T17:17:20.000Z | tests/test_case_files/class_test_1.py | calkerns/dyc | ddc35e6c183137dc30b2a3a2f481098280167bd1 | [
"MIT"
] | 63 | 2019-04-04T20:38:57.000Z | 2021-05-25T02:23:16.000Z | class MyClass:
x = 1
class MyClass1(Parent1):
y = 1
class MyClass2(Parent1, Parent2):
z = 1
class MyClass3(Parent1):
a = 1
class MyClass4(Parent1, Parent2):
b = 1
| 10.714286 | 41 | 0.515556 | class MyClass:
x = 1
class MyClass1(Parent1):
y = 1
class MyClass2(Parent1, Parent2):
z = 1
class MyClass3(Parent1):
a = 1
class MyClass4(Parent1, Parent2):
b = 1
| true | true |
f719a460ed4a51e9b13467d22b0a48aecf11f8ca | 346 | py | Python | students/k3343/laboratory_works/Rolinskiy_Sergey/Laba_1/project_first_app/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 10 | 2020-03-20T09:06:12.000Z | 2021-07-27T13:06:02.000Z | students/k3343/laboratory_works/Rolinskiy_Sergey/Laba_1/project_first_app/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 134 | 2020-03-23T09:47:48.000Z | 2022-03-12T01:05:19.000Z | students/k3343/laboratory_works/Rolinskiy_Sergey/Laba_1/project_first_app/urls.py | TonikX/ITMO_ICT_-WebProgramming_2020 | ba566c1b3ab04585665c69860b713741906935a0 | [
"MIT"
] | 71 | 2020-03-20T12:45:56.000Z | 2021-10-31T19:22:25.000Z | from django.urls import path
from django.conf.urls import url
from project_first_app.views import *
urlpatterns = [
path('',main,name='main'),
path('createowner/',createowner,name='createowner'),
path('login/',log_in,name='login'),
path(r'<int:ho_id>',review,name='detail')
]
#path(r'getowners/<int:ow_id>',detail,name='detail'), | 31.454545 | 56 | 0.699422 | from django.urls import path
from django.conf.urls import url
from project_first_app.views import *
urlpatterns = [
path('',main,name='main'),
path('createowner/',createowner,name='createowner'),
path('login/',log_in,name='login'),
path(r'<int:ho_id>',review,name='detail')
]
| true | true |
f719a465158b15ac1c1bfd62374aefc6ed61f38a | 36,465 | py | Python | owslib/iso.py | peterataylor/OWSLib | 8c15832da0c27dadfb567929ddd52a7570b7c231 | [
"BSD-3-Clause"
] | 1 | 2015-03-16T05:22:04.000Z | 2015-03-16T05:22:04.000Z | owslib/iso.py | peterataylor/OWSLib | 8c15832da0c27dadfb567929ddd52a7570b7c231 | [
"BSD-3-Clause"
] | null | null | null | owslib/iso.py | peterataylor/OWSLib | 8c15832da0c27dadfb567929ddd52a7570b7c231 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" ISO metadata parser """
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
#warnings.warn(
# 'the .identification and .serviceidentification properties will merge into '
# '.identification being a list of properties. This is currently implemented '
# 'in .identificationinfo. '
# 'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
# FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
""" process CI_Date """
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
""" process MD_DataIdentification """
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.creator = None
self.publisher = None
self.originator = None
self.edition = None
self.abstract = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
# There may be multiple geographicElement, create an extent
# from the one containing either an EX_GeographicBoundingBox or EX_BoundingPolygon.
# The schema also specifies an EX_GeographicDescription. This is not implemented yet.
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox # for backwards compatibility
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
""" process MD_Distributor """
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
""" process MD_Distribution """
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
''' process DQ_DataQuality'''
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
""" process SV_ServiceIdentification """
def __init__(self, md=None):
if md is None:
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype = None
self.operations = []
self.operateson = []
else:
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
""" process CI_OnlineResource """
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent = None
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
""" process EX_Extent """
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
""" process MD_ReferenceSystem """
def __init__(self, md):
if md is None:
pass
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
""" get gco:CodeListValue_Type attribute, else get text content """
if elpath is not None: # try to get @codeListValue
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else: # see if there is element text
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
""" process CT_CodelistCatalogue """
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
| 47.480469 | 225 | 0.604854 |
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'):
self.xml = etree.tostring(md.getroot())
else:
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organisationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:voice/gco:CharacterString', namespaces))
self.phone = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:phone/gmd:CI_Telephone/gmd:facsimile/gco:CharacterString', namespaces))
self.fax = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:deliveryPoint/gco:CharacterString', namespaces))
self.address = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:city/gco:CharacterString', namespaces))
self.city = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:administrativeArea/gco:CharacterString', namespaces))
self.region = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:postalCode/gco:CharacterString', namespaces))
self.postcode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:country/gco:CharacterString', namespaces))
self.country = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:address/gmd:CI_Address/gmd:electronicMailAddress/gco:CharacterString', namespaces))
self.email = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contact/gmd:onlineResource/gmd:CI_OnlineResource', namespaces))
if val is not None:
self.onlineresource = CI_OnlineResource(val)
else:
self.onlineresource = None
self.role = _testCodeListValue(md.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces)))
class MD_DataIdentification(object):
def __init__(self, md=None, identtype=None):
if md is None:
self.identtype = None
self.title = None
self.alternatetitle = None
self.aggregationinfo = None
self.uricode = []
self.uricodespace = []
self.date = []
self.datetype = []
self.uselimitation = []
self.accessconstraints = []
self.classification = []
self.otherconstraints = []
self.securityconstraints = []
self.useconstraints = []
self.denominators = []
self.distance = []
self.uom = []
self.resourcelanguage = []
self.creator = None
self.publisher = None
self.originator = None
self.edition = None
self.abstract = None
self.purpose = None
self.status = None
self.contact = []
self.keywords = []
self.topiccategory = []
self.supplementalinformation = None
self.extent = None
self.bbox = None
self.temporalextent_start = None
self.temporalextent_end = None
else:
self.identtype = identtype
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.title = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:alternateTitle/gco:CharacterString', namespaces))
self.alternatetitle = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:aggregationInfo', namespaces))
self.aggregationinfo = util.testXMLValue(val)
self.uricode = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricode.append(val)
self.uricodespace = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:RS_Identifier/gmd:codeSpace/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uricodespace.append(val)
self.date = []
self.datetype = []
for i in md.findall(util.nspath_eval('gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
self.date.append(CI_Date(i))
self.uselimitation = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.uselimitation.append(val)
self.accessconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.accessconstraints.append(val)
self.classification = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:accessConstraints/gmd:MD_ClassificationCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.classification.append(val)
self.otherconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.otherconstraints.append(val)
self.securityconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_SecurityConstraints/gmd:useLimitation', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.securityconstraints.append(val)
self.useconstraints = []
for i in md.findall(util.nspath_eval('gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:useConstraints/gmd:MD_RestrictionCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.useconstraints.append(val)
self.denominators = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:equivalentScale/gmd:MD_RepresentativeFraction/gmd:denominator/gco:Integer', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.denominators.append(val)
self.distance = []
self.uom = []
for i in md.findall(util.nspath_eval('gmd:spatialResolution/gmd:MD_Resolution/gmd:distance/gco:Distance', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.distance.append(val)
self.uom.append(i.get("uom"))
self.resourcelanguage = []
for i in md.findall(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.resourcelanguage.append(val)
val = md.find(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty/gmd:organisationName', namespaces))
if val is not None:
val2 = val.find(util.nspath_eval('gmd:role/gmd:CI_RoleCode', namespaces))
if val2 is not None:
clv = _testCodeListValue(val)
if clv == 'originator':
self.creator = util.testXMLValue(val)
elif clv == 'publisher':
self.publisher = util.testXMLValue(val)
elif clv == 'contributor':
self.originator = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:edition/gco:CharacterString', namespaces))
self.edition = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:abstract/gco:CharacterString', namespaces))
self.abstract = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:purpose/gco:CharacterString', namespaces))
self.purpose = util.testXMLValue(val)
self.status = _testCodeListValue(md.find(util.nspath_eval('gmd:status/gmd:MD_ProgressCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:pointOfContact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
self.keywords = []
for i in md.findall(util.nspath_eval('gmd:descriptiveKeywords', namespaces)):
mdkw = {}
mdkw['type'] = _testCodeListValue(i.find(util.nspath_eval('gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode', namespaces)))
mdkw['thesaurus'] = {}
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
mdkw['thesaurus']['title'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces))
mdkw['thesaurus']['date'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gmd:MD_Keywords/gmd:thesaurusName/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces))
mdkw['thesaurus']['datetype'] = util.testXMLValue(val)
mdkw['keywords'] = []
for k in i.findall(util.nspath_eval('gmd:MD_Keywords/gmd:keyword', namespaces)):
val = k.find(util.nspath_eval('gco:CharacterString', namespaces))
if val is not None:
val2 = util.testXMLValue(val)
if val2 is not None:
mdkw['keywords'].append(val2)
self.keywords.append(mdkw)
self.topiccategory = []
for i in md.findall(util.nspath_eval('gmd:topicCategory/gmd:MD_TopicCategoryCode', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.topiccategory.append(val)
val = md.find(util.nspath_eval('gmd:supplementalInformation/gco:CharacterString', namespaces))
self.supplementalinformation = util.testXMLValue(val)
val = None
val2 = None
val3 = None
extents = md.findall(util.nspath_eval('gmd:extent', namespaces))
extents.extend(md.findall(util.nspath_eval('srv:extent', namespaces)))
for extent in extents:
if val is None:
for e in extent.findall(util.nspath_eval('gmd:EX_Extent/gmd:geographicElement', namespaces)):
if e.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces)) is not None or e.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces)) is not None:
val = e
break
self.extent = EX_Extent(val)
self.bbox = self.extent.boundingBox
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:beginPosition', namespaces))
if val2 is None:
val2 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:beginPosition', namespaces))
self.temporalextent_start = util.testXMLValue(val2)
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod/gml:endPosition', namespaces))
if val3 is None:
val3 = extent.find(util.nspath_eval('gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml32:TimePeriod/gml32:endPosition', namespaces))
self.temporalextent_end = util.testXMLValue(val3)
class MD_Distributor(object):
def __init__(self, md=None):
if md is None:
self.contact = None
self.online = []
else:
self.contact = None
val = md.find(util.nspath_eval('gmd:MD_Distributor/gmd:distributorContact/gmd:CI_ResponsibleParty', namespaces))
if val is not None:
self.contact = CI_ResponsibleParty(val)
self.online = []
for ol in md.findall(util.nspath_eval('gmd:MD_Distributor/gmd:distributorTransferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class MD_Distribution(object):
def __init__(self, md=None):
if md is None:
self.format = None
self.version = None
self.distributor = []
self.online = []
pass
else:
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString', namespaces))
self.format = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:distributionFormat/gmd:MD_Format/gmd:version/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
self.distributor = []
for dist in md.findall(util.nspath_eval('gmd:distributor', namespaces)):
self.distributor.append(MD_Distributor(dist))
self.online = []
for ol in md.findall(util.nspath_eval('gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource', namespaces)):
self.online.append(CI_OnlineResource(ol))
class DQ_DataQuality(object):
def __init__(self, md=None):
if md is None:
self.conformancetitle = []
self.conformancedate = []
self.conformancedatetype = []
self.conformancedegree = []
self.lineage = None
self.specificationtitle = None
self.specificationdate = []
else:
self.conformancetitle = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancetitle.append(val)
self.conformancedate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedate.append(val)
self.conformancedatetype = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:dateType/gmd:CI_DateTypeCode', namespaces)):
val = _testCodeListValue(i)
if val is not None:
self.conformancedatetype.append(val)
self.conformancedegree = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.conformancedegree.append(val)
val = md.find(util.nspath_eval('gmd:lineage/gmd:LI_Lineage/gmd:statement/gco:CharacterString', namespaces))
self.lineage = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString', namespaces))
self.specificationtitle = util.testXMLValue(val)
self.specificationdate = []
for i in md.findall(util.nspath_eval('gmd:report/gmd:DQ_DomainConsistency/gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date', namespaces)):
val = util.testXMLValue(i)
if val is not None:
self.specificationdate.append(val)
class SV_ServiceIdentification(object):
def __init__(self, md=None):
if md is None:
self.identtype = 'service'
self.type = None
self.version = None
self.fees = None
self.bbox = None
self.couplingtype = None
self.operations = []
self.operateson = []
else:
self.identtype = 'service'
val = md.find(util.nspath_eval('srv:serviceType/gco:LocalName', namespaces))
self.type = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:serviceTypeVersion/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:accessProperties/gmd:MD_StandardOrderProcess/gmd:fees/gco:CharacterString', namespaces))
self.fees = util.testXMLValue(val)
val = md.find(util.nspath_eval('srv:extent/gmd:EX_Extent', namespaces))
if val is not None:
self.bbox = EX_Extent(val)
else:
self.bbox = None
self.couplingtype = _testCodeListValue(md.find(util.nspath_eval('gmd:couplingType/gmd:SV_CouplingType', namespaces)))
self.operations = []
for i in md.findall(util.nspath_eval('srv:containsOperations', namespaces)):
tmp = {}
val = i.find(util.nspath_eval('srv:SV_OperationMetadata/srv:operationName/gco:CharacterString', namespaces))
tmp['name'] = util.testXMLValue(val)
tmp['dcplist'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:DCP', namespaces)):
tmp2 = _testCodeListValue(d.find(util.nspath_eval('srv:DCPList', namespaces)))
tmp['dcplist'].append(tmp2)
tmp['connectpoint'] = []
for d in i.findall(util.nspath_eval('srv:SV_OperationMetadata/srv:connectPoint', namespaces)):
tmp3 = d.find(util.nspath_eval('gmd:CI_OnlineResource', namespaces))
tmp['connectpoint'].append(CI_OnlineResource(tmp3))
self.operations.append(tmp)
self.operateson = []
for i in md.findall(util.nspath_eval('srv:operatesOn', namespaces)):
tmp = {}
tmp['uuidref'] = i.attrib.get('uuidref')
tmp['href'] = i.attrib.get(util.nspath_eval('xlink:href', namespaces))
tmp['title'] = i.attrib.get(util.nspath_eval('xlink:title', namespaces))
self.operateson.append(tmp)
class CI_OnlineResource(object):
def __init__(self,md=None):
if md is None:
self.url = None
self.protocol = None
self.name = None
self.description = None
self.function = None
else:
val = md.find(util.nspath_eval('gmd:linkage/gmd:URL', namespaces))
self.url = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:protocol/gco:CharacterString', namespaces))
self.protocol = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:description/gco:CharacterString', namespaces))
self.description = util.testXMLValue(val)
self.function = _testCodeListValue(md.find(util.nspath_eval('gmd:function/gmd:CI_OnLineFunctionCode', namespaces)))
class EX_GeographicBoundingBox(object):
def __init__(self, md=None):
if md is None:
self.minx = None
self.maxx = None
self.miny = None
self.maxy = None
else:
val = md.find(util.nspath_eval('gmd:westBoundLongitude/gco:Decimal', namespaces))
self.minx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:eastBoundLongitude/gco:Decimal', namespaces))
self.maxx = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:southBoundLatitude/gco:Decimal', namespaces))
self.miny = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:northBoundLatitude/gco:Decimal', namespaces))
self.maxy = util.testXMLValue(val)
class EX_Polygon(object):
def __init__(self, md=None):
if md is None:
self.exterior_ring = None
self.interior_rings = []
else:
linear_ring = md.find(util.nspath_eval('gml32:Polygon/gml32:exterior/gml32:LinearRing', namespaces))
if linear_ring is not None:
self.exterior_ring = self._coordinates_for_ring(linear_ring)
interior_ring_elements = md.findall(util.nspath_eval('gml32:Polygon/gml32:interior', namespaces))
self.interior_rings = []
for iring_element in interior_ring_elements:
linear_ring = iring_element.find(util.nspath_eval('gml32:LinearRing', namespaces))
self.interior_rings.append(self._coordinates_for_ring(linear_ring))
def _coordinates_for_ring(self, linear_ring):
coordinates = []
positions = linear_ring.findall(util.nspath_eval('gml32:pos', namespaces))
for pos in positions:
tokens = pos.text.split()
coords = tuple([float(t) for t in tokens])
coordinates.append(coords)
return coordinates
class EX_GeographicBoundingPolygon(object):
def __init__(self, md=None):
if md is None:
self.is_extent = None
self.polygons = []
else:
val = md.find(util.nspath_eval('gmd:extentTypeCode', namespaces))
self.is_extent = util.testXMLValue(val)
md_polygons = md.findall(util.nspath_eval('gmd:polygon', namespaces))
self.polygons = []
for val in md_polygons:
self.polygons.append(EX_Polygon(val))
class EX_Extent(object):
def __init__(self, md=None):
if md is None:
self.boundingBox = None
self.boundingPolygon = None
self.description_code = None
else:
self.boundingBox = None
self.boundingPolygon = None
if md is not None:
bboxElement = md.find(util.nspath_eval('gmd:EX_GeographicBoundingBox', namespaces))
if bboxElement is not None:
self.boundingBox = EX_GeographicBoundingBox(bboxElement)
polygonElement = md.find(util.nspath_eval('gmd:EX_BoundingPolygon', namespaces))
if polygonElement is not None:
self.boundingPolygon = EX_GeographicBoundingPolygon(polygonElement)
val = md.find(util.nspath_eval('gmd:EX_GeographicDescription/gmd:geographicIdentifier/gmd:MD_Identifier/gmd:code/gco:CharacterString', namespaces))
self.description_code = util.testXMLValue(val)
class MD_ReferenceSystem(object):
def __init__(self, md):
if md is None:
pass
else:
val = md.find(util.nspath_eval('gmd:referenceSystemIdentifier/gmd:RS_Identifier/gmd:code/gco:CharacterString', namespaces))
self.code = util.testXMLValue(val)
def _testCodeListValue(elpath):
if elpath is not None:
val = util.testXMLValue(elpath.attrib.get('codeListValue'), True)
if val is not None:
return val
else:
return util.testXMLValue(elpath)
else:
return None
class CodelistCatalogue(object):
def __init__(self, ct):
val = ct.find(util.nspath_eval('gmx:name/gco:CharacterString', namespaces))
self.name = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:scope/gco:CharacterString', namespaces))
self.scope = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:fieldOfApplication/gco:CharacterString', namespaces))
self.fieldapp = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionNumber/gco:CharacterString', namespaces))
self.version = util.testXMLValue(val)
val = ct.find(util.nspath_eval('gmx:versionDate/gco:Date', namespaces))
self.date = util.testXMLValue(val)
self.dictionaries = {}
for i in ct.findall(util.nspath_eval('gmx:codelistItem/gmx:CodeListDictionary', namespaces)):
id = i.attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id] = {}
val = i.find(util.nspath_eval('gml32:description', namespaces))
self.dictionaries[id]['description'] = util.testXMLValue(val)
val = i.find(util.nspath_eval('gml32:identifier', namespaces))
self.dictionaries[id]['identifier'] = util.testXMLValue(val)
self.dictionaries[id]['entries'] = {}
for j in i.findall(util.nspath_eval('gmx:codeEntry', namespaces)):
id2 = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get(util.nspath_eval('gml32:id', namespaces))
self.dictionaries[id]['entries'][id2] = {}
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:description', namespaces))
self.dictionaries[id]['entries'][id2]['description'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition/gml32:identifier', namespaces))
self.dictionaries[id]['entries'][id2]['identifier'] = util.testXMLValue(val)
val = j.find(util.nspath_eval('gmx:CodeDefinition', namespaces)).attrib.get('codeSpace')
self.dictionaries[id]['entries'][id2]['codespace'] = util.testXMLValue(val, True)
def getcodelistdictionaries(self):
return self.dictionaries.keys()
def getcodedefinitionidentifiers(self, cdl):
if self.dictionaries.has_key(cdl):
ids = []
for i in self.dictionaries[cdl]['entries']:
ids.append(self.dictionaries[cdl]['entries'][i]['identifier'])
return ids
else:
return None
| true | true |
f719a47cc5a7d23e73cc98dbe3e60cc827cae0aa | 3,057 | py | Python | fun.py | Grymlock/Guardian_Bot | 0fac4cd37038a46d1d8b6eed3fbb79832bd7abf9 | [
"MIT"
] | 1 | 2018-06-22T03:52:49.000Z | 2018-06-22T03:52:49.000Z | fun.py | Grymlock/Guardian_Bot | 0fac4cd37038a46d1d8b6eed3fbb79832bd7abf9 | [
"MIT"
] | null | null | null | fun.py | Grymlock/Guardian_Bot | 0fac4cd37038a46d1d8b6eed3fbb79832bd7abf9 | [
"MIT"
] | null | null | null | import discord
import constants as c
from discord.ext import commands
import random as r
urls=['https://cdn.discordapp.com/attachments/433007901800398858/433047585121501194/maxresdefault.jpg','https://cdn.discordapp.com/attachments/442868510776098818/442879211296915466/9bt3n9w40bp01.jpg','https://cdn.discordapp.com/attachments/442323518860951589/443142715761360915/Dap.PNG',"https://cdn.discordapp.com/attachments/442323518860951589/443250907501821964/IMG_20180222_192827.jpg"]
badWords=["gamer","frick","fudge","heck","bubby"]
class Fun:
def __init__(self,bot):
self.bot=bot
@commands.command()
async def dab(self,ctx, *, member: discord.Member):
"everybody pause at 1:18"
try:
if member.id==426560497781833748 or member.id==c.owner_id:
await ctx.send("haha no")
else:
em=discord.Embed(title="",description='')
rand=r.randint(0,3)
em.set_image(url=str(urls[rand]))
await ctx.send(embed=em)
await ctx.send(str(member.mention))
except:
await ctx.send("Invalid user")
@commands.command()
async def bruhcat(self,ctx):
"bruh"
catembed=discord.Embed()
catembed.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445300639631671296/bruh.gif")
await ctx.send(embed=catembed)
@commands.command()
async def blicky(self,ctx):
em=discord.Embed()
em.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445407209359409163/27c3yf.png")
await ctx.send(embed=em)
async def on_message(self,message):
if message.author.bot:#prevents the bot from reacting to itself
pass
else:
for word in badWords:
if message.content==(word):
await message.channel.send(f"Please do not use the word '{word}' or I will report you and block you")
ran=r.randint(1,2000)
if ran==1:
await message.channel.send("^Are you listening to this retard lmao")
if message.content==("gm") or message.content==("good morning"):
await message.channel.send("Another day closer to death" + str(message.author.mention))
if message.content==("gn") or message.content==("good night"):
await message.channel.send("sleep tight boyo")
if message.content==("good bye"):
await message.channel.send("bye loser")
if message.content==("what do we want"):
await message.channel.send("Equality for women")
if message.content==("when do we want it"):
await message.channel.send("Now")
async def on_member_ban(self,guild,member):
if member==c.owner_id:
await guild.owner.send("Can y'all stop banning my master")
def setup(bot):
bot.add_cog(Fun(bot)) | 44.955882 | 391 | 0.615309 | import discord
import constants as c
from discord.ext import commands
import random as r
urls=['https://cdn.discordapp.com/attachments/433007901800398858/433047585121501194/maxresdefault.jpg','https://cdn.discordapp.com/attachments/442868510776098818/442879211296915466/9bt3n9w40bp01.jpg','https://cdn.discordapp.com/attachments/442323518860951589/443142715761360915/Dap.PNG',"https://cdn.discordapp.com/attachments/442323518860951589/443250907501821964/IMG_20180222_192827.jpg"]
badWords=["gamer","frick","fudge","heck","bubby"]
class Fun:
def __init__(self,bot):
self.bot=bot
@commands.command()
async def dab(self,ctx, *, member: discord.Member):
try:
if member.id==426560497781833748 or member.id==c.owner_id:
await ctx.send("haha no")
else:
em=discord.Embed(title="",description='')
rand=r.randint(0,3)
em.set_image(url=str(urls[rand]))
await ctx.send(embed=em)
await ctx.send(str(member.mention))
except:
await ctx.send("Invalid user")
@commands.command()
async def bruhcat(self,ctx):
catembed=discord.Embed()
catembed.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445300639631671296/bruh.gif")
await ctx.send(embed=catembed)
@commands.command()
async def blicky(self,ctx):
em=discord.Embed()
em.set_image(url="https://cdn.discordapp.com/attachments/444325494264037377/445407209359409163/27c3yf.png")
await ctx.send(embed=em)
async def on_message(self,message):
if message.author.bot:
pass
else:
for word in badWords:
if message.content==(word):
await message.channel.send(f"Please do not use the word '{word}' or I will report you and block you")
ran=r.randint(1,2000)
if ran==1:
await message.channel.send("^Are you listening to this retard lmao")
if message.content==("gm") or message.content==("good morning"):
await message.channel.send("Another day closer to death" + str(message.author.mention))
if message.content==("gn") or message.content==("good night"):
await message.channel.send("sleep tight boyo")
if message.content==("good bye"):
await message.channel.send("bye loser")
if message.content==("what do we want"):
await message.channel.send("Equality for women")
if message.content==("when do we want it"):
await message.channel.send("Now")
async def on_member_ban(self,guild,member):
if member==c.owner_id:
await guild.owner.send("Can y'all stop banning my master")
def setup(bot):
bot.add_cog(Fun(bot)) | true | true |
f719a5d3a4154a174de4fc3bb0bdc9ef6f49b521 | 1,012 | py | Python | test/schemes/test_qz.py | stormymcstorm/condensa | ee3bf993b0032e5d84aeb3cc7f0ddcdb8d846bd9 | [
"Apache-2.0"
] | 153 | 2019-05-29T15:10:38.000Z | 2022-03-05T05:20:55.000Z | test/schemes/test_qz.py | rogerxujiang/condensa | c7321e0a362f73eca9349769b341a7dd688ee1b9 | [
"Apache-2.0"
] | 5 | 2019-07-11T20:56:38.000Z | 2022-03-14T10:12:15.000Z | test/schemes/test_qz.py | rogerxujiang/condensa | c7321e0a362f73eca9349769b341a7dd688ee1b9 | [
"Apache-2.0"
] | 21 | 2019-05-30T22:21:54.000Z | 2022-03-14T07:06:52.000Z | # Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import condensa
from condensa import schemes
def test_float16(device):
scheme = schemes.Quantize(condensa.float16)
fc = torch.nn.Linear(100, 10).float().to(device)
scheme.pi(fc)
assert fc.weight.dtype == torch.float16
scheme.delta(fc)
assert fc.weight.dtype == torch.float32
if __name__ == '__main__':
test_float16('cpu')
if torch.cuda.is_available():
test_float16('cpu')
| 30.666667 | 74 | 0.733202 |
import torch
import condensa
from condensa import schemes
def test_float16(device):
scheme = schemes.Quantize(condensa.float16)
fc = torch.nn.Linear(100, 10).float().to(device)
scheme.pi(fc)
assert fc.weight.dtype == torch.float16
scheme.delta(fc)
assert fc.weight.dtype == torch.float32
if __name__ == '__main__':
test_float16('cpu')
if torch.cuda.is_available():
test_float16('cpu')
| true | true |
f719a5ec02915c2d40aa2c28ddf93147dd695082 | 6,791 | py | Python | objects/CSCG/_3d/forms/standard/base/export/field.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | objects/CSCG/_3d/forms/standard/base/export/field.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | objects/CSCG/_3d/forms/standard/base/export/field.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
] | null | null | null | """We want to export the field to some data files.
"""
from root.config.main import *
from screws.freeze.main import FrozenOnly
from screws.miscellaneous.timer import check_filename, check_no_splcharacter
from scipy.io import savemat
class _3dCSC_SF_Export_Field(FrozenOnly):
""""""
def __init__(self, sf):
""""""
assert '3dCSCG_standard_form' in sf.standard_properties.tags
self._sf_ = sf
self._freeze_self_()
def to_file(self, filename, numOfSamples=1e6, regions=None):
""""""
filename, extension = check_filename(filename)
if extension is None: extension = 'txt'
supported_formats = ('txt', 'mat')
assert extension in supported_formats, \
f"format={extension} is not among the supported formats {supported_formats}."
if isinstance(numOfSamples, (int, float)):
assert numOfSamples > 0, f"numOfSamples={numOfSamples} is wrong."
numOfSamples = [numOfSamples, numOfSamples, numOfSamples]
else:
assert isinstance(numOfSamples, (tuple, list)) and len(numOfSamples) == 3, \
f"numOfSamples={numOfSamples} wrong."
for nos in numOfSamples:
assert isinstance(nos, (int, float)) and nos > 0, f"numOfSamples={numOfSamples} wrong."
mesh = self._sf_.mesh
if regions is None:
regions = mesh.domain.regions.names
elif isinstance(regions, str):
regions = [regions,]
else:
pass
assert isinstance(regions, (list, tuple)), f"regions={regions} is wrong."
assert len(set(regions)) == len(regions), f"regions={regions} has repeated regions."
for i, r in enumerate(regions):
assert r in mesh.domain.regions, f"regions[{i}]={r} is wrong."
rst = list()
for i in range(3):
density = int((numOfSamples[i] / mesh.elements.GLOBAL_num) ** (1/3)) + 1
interval = 2 / density
rst.append(np.linspace(-1 + interval/2, 1-interval/2, density))
xyz, v = self._sf_.reconstruct(*rst, regions=regions)
# Now, we gather xyz & v from all cores into Master Core, store in XYZ & V --- BELOW ---
if rAnk == mAster_rank:
X = [None for _ in range(mesh.elements.GLOBAL_num)]
Y = [None for _ in range(mesh.elements.GLOBAL_num)]
Z = [None for _ in range(mesh.elements.GLOBAL_num)]
Vx = [None for _ in range(mesh.elements.GLOBAL_num)]
if self._sf_.k in (1, 2):
Vy = [None for _ in range(mesh.elements.GLOBAL_num)]
Vz = [None for _ in range(mesh.elements.GLOBAL_num)]
for j in mesh.elements.indices:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
Vy[j] = v[j][1]
# noinspection PyUnboundLocalVariable
Vz[j] = v[j][2]
for i in sLave_ranks:
xyz, v = cOmm.recv(source=i, tag=0)
for j in xyz:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
Vy[j] = v[j][1]
Vz[j] = v[j][2]
del xyz, v
else:
cOmm.send([xyz, v], dest=mAster_rank, tag=0)
del xyz, v
# Now, we reshape the XYZ and V for export in the master core. -------- BELOW ----------
if rAnk == mAster_rank:
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
X, Y, Z, Vx, Vy, Vz = mesh.do.regionwsie_stack(X, Y, Z, Vx, Vy, Vz)
else:
# noinspection PyUnboundLocalVariable
X, Y, Z, V = mesh.do.regionwsie_stack(X, Y, Z, Vx)
for rn in regions:
assert rn in X and rn in Y and rn in Z, "Data not full!"
x, y, z = X[rn], Y[rn], Z[rn]
if self._sf_.k in (1, 2):
vx, vy, vz = Vx[rn], Vy[rn], Vz[rn]
else:
# noinspection PyUnboundLocalVariable
vx = V[rn]
# we take care of the file names ------------------ BELOW -----------------------
RN = rn[2:] # if regions name is R:center, we select
assert check_no_splcharacter(RN), f"region name={RN} wrong."
FILE_NAME = filename + '__InRegion_' + RN
if self._sf_.k in (1, 2):
FILE_NAME += '__x_y_z_vx_vy_vz'
else:
FILE_NAME += '__x_y_z_v'
FILE_NAME = FILE_NAME + '.' + extension
# It's time to do the save or writing ------------------- BELOW -----------------
if extension == 'txt':
# for .txt, we have to flat the data =====================
x = x.ravel(order='F')[:,np.newaxis]
y = y.ravel(order='F')[:,np.newaxis]
z = z.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
vx = vx.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vy = vy.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vz = vz.ravel(order='F')[:,np.newaxis]
else:
vx = vx.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
TO_BE_WRITTEN = np.hstack((x, y, z, vx, vy, vz))
else:
TO_BE_WRITTEN = np.hstack((x, y, z, vx))
# noinspection PyTypeChecker
np.savetxt(FILE_NAME, TO_BE_WRITTEN)
elif extension == 'mat':
# for .mat, we save 3-d arrays. ==========================
m_dic = dict()
m_dic['x'] = x
m_dic['y'] = y
m_dic['z'] = z
if self._sf_.k in (1, 2):
m_dic['vx'] = vx
m_dic['vy'] = vy
m_dic['vz'] = vz
else:
m_dic['v'] = vx
savemat(FILE_NAME, m_dic)
else:
raise Exception(f"Format=.{extension} is not supported.") | 41.408537 | 103 | 0.472684 |
from root.config.main import *
from screws.freeze.main import FrozenOnly
from screws.miscellaneous.timer import check_filename, check_no_splcharacter
from scipy.io import savemat
class _3dCSC_SF_Export_Field(FrozenOnly):
def __init__(self, sf):
assert '3dCSCG_standard_form' in sf.standard_properties.tags
self._sf_ = sf
self._freeze_self_()
def to_file(self, filename, numOfSamples=1e6, regions=None):
filename, extension = check_filename(filename)
if extension is None: extension = 'txt'
supported_formats = ('txt', 'mat')
assert extension in supported_formats, \
f"format={extension} is not among the supported formats {supported_formats}."
if isinstance(numOfSamples, (int, float)):
assert numOfSamples > 0, f"numOfSamples={numOfSamples} is wrong."
numOfSamples = [numOfSamples, numOfSamples, numOfSamples]
else:
assert isinstance(numOfSamples, (tuple, list)) and len(numOfSamples) == 3, \
f"numOfSamples={numOfSamples} wrong."
for nos in numOfSamples:
assert isinstance(nos, (int, float)) and nos > 0, f"numOfSamples={numOfSamples} wrong."
mesh = self._sf_.mesh
if regions is None:
regions = mesh.domain.regions.names
elif isinstance(regions, str):
regions = [regions,]
else:
pass
assert isinstance(regions, (list, tuple)), f"regions={regions} is wrong."
assert len(set(regions)) == len(regions), f"regions={regions} has repeated regions."
for i, r in enumerate(regions):
assert r in mesh.domain.regions, f"regions[{i}]={r} is wrong."
rst = list()
for i in range(3):
density = int((numOfSamples[i] / mesh.elements.GLOBAL_num) ** (1/3)) + 1
interval = 2 / density
rst.append(np.linspace(-1 + interval/2, 1-interval/2, density))
xyz, v = self._sf_.reconstruct(*rst, regions=regions)
if rAnk == mAster_rank:
X = [None for _ in range(mesh.elements.GLOBAL_num)]
Y = [None for _ in range(mesh.elements.GLOBAL_num)]
Z = [None for _ in range(mesh.elements.GLOBAL_num)]
Vx = [None for _ in range(mesh.elements.GLOBAL_num)]
if self._sf_.k in (1, 2):
Vy = [None for _ in range(mesh.elements.GLOBAL_num)]
Vz = [None for _ in range(mesh.elements.GLOBAL_num)]
for j in mesh.elements.indices:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
Vy[j] = v[j][1]
Vz[j] = v[j][2]
for i in sLave_ranks:
xyz, v = cOmm.recv(source=i, tag=0)
for j in xyz:
X[j] = xyz[j][0]
Y[j] = xyz[j][1]
Z[j] = xyz[j][2]
Vx[j] = v[j][0]
if self._sf_.k in (1, 2):
Vy[j] = v[j][1]
Vz[j] = v[j][2]
del xyz, v
else:
cOmm.send([xyz, v], dest=mAster_rank, tag=0)
del xyz, v
if rAnk == mAster_rank:
if self._sf_.k in (1, 2):
X, Y, Z, Vx, Vy, Vz = mesh.do.regionwsie_stack(X, Y, Z, Vx, Vy, Vz)
else:
X, Y, Z, V = mesh.do.regionwsie_stack(X, Y, Z, Vx)
for rn in regions:
assert rn in X and rn in Y and rn in Z, "Data not full!"
x, y, z = X[rn], Y[rn], Z[rn]
if self._sf_.k in (1, 2):
vx, vy, vz = Vx[rn], Vy[rn], Vz[rn]
else:
vx = V[rn]
RN = rn[2:]
assert check_no_splcharacter(RN), f"region name={RN} wrong."
FILE_NAME = filename + '__InRegion_' + RN
if self._sf_.k in (1, 2):
FILE_NAME += '__x_y_z_vx_vy_vz'
else:
FILE_NAME += '__x_y_z_v'
FILE_NAME = FILE_NAME + '.' + extension
if extension == 'txt':
# for .txt, we have to flat the data =====================
x = x.ravel(order='F')[:,np.newaxis]
y = y.ravel(order='F')[:,np.newaxis]
z = z.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
vx = vx.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vy = vy.ravel(order='F')[:,np.newaxis]
# noinspection PyUnboundLocalVariable
vz = vz.ravel(order='F')[:,np.newaxis]
else:
vx = vx.ravel(order='F')[:,np.newaxis]
if self._sf_.k in (1, 2):
# noinspection PyUnboundLocalVariable
TO_BE_WRITTEN = np.hstack((x, y, z, vx, vy, vz))
else:
TO_BE_WRITTEN = np.hstack((x, y, z, vx))
# noinspection PyTypeChecker
np.savetxt(FILE_NAME, TO_BE_WRITTEN)
elif extension == 'mat':
# for .mat, we save 3-d arrays. ==========================
m_dic = dict()
m_dic['x'] = x
m_dic['y'] = y
m_dic['z'] = z
if self._sf_.k in (1, 2):
m_dic['vx'] = vx
m_dic['vy'] = vy
m_dic['vz'] = vz
else:
m_dic['v'] = vx
savemat(FILE_NAME, m_dic)
else:
raise Exception(f"Format=.{extension} is not supported.") | true | true |
f719a60077cb4b23bbe3c54efafc1d30bc3f8163 | 3,252 | py | Python | config.py | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c | [
"MIT"
] | 1 | 2019-03-13T16:05:11.000Z | 2019-03-13T16:05:11.000Z | config.py | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c | [
"MIT"
] | null | null | null | config.py | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c | [
"MIT"
] | 1 | 2019-03-15T09:09:08.000Z | 2019-03-15T09:09:08.000Z | from easydict import EasyDict as edict
# from pathlib import Path
import torch
import os
from torchvision import transforms as trans
from utils.constants import *
list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
def get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):
conf = edict()
conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conf.input_size = [112, 112]
conf.face_limit = 5
conf.min_face_size = 30
conf.mode = mode
conf.net_size = net_size
if mode =='app':
assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'
conf.use_tensor = True
conf.work_path = WORK_PATH
conf.model_path = '%s/models'%WORK_PATH
conf.log_path = '%s/log'%WORK_PATH
conf.save_path = '%s/save'%WORK_PATH
conf.facebank_path = '%s/Face_bank'%WORK_PATH
conf.threshold = threshold
if use_mtcnn:
conf.use_mtcnn = True
else:
conf.use_mtcnn = False
#when inference, at maximum detect 10 faces in one image, my laptop is slow
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
if net_size == 'large':
conf.use_mobilfacenet = False
if net_mode == 'ir_se':
conf.net_mode = 'ir_se' # or 'ir'
conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH
conf.url = list_model[1]
else:
conf.net_mode = 'ir' # or 'ir'
conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH
conf.url = list_model[2]
if net_size =='mobi':
conf.use_mobilfacenet = True
conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH
conf.url = list_model[0]
conf.video_source = 0
if mode =='training_eval':
conf.lr = 1e-3
conf.milestones = [18,30,42]
conf.momentum = 0.9
conf.pin_memory = True
# conf.num_workers = 4 # when batchsize is 200
conf.num_workers = 3
conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
conf.batch_size = 4
conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
return conf | 47.823529 | 119 | 0.634071 | from easydict import EasyDict as edict
import torch
import os
from torchvision import transforms as trans
from utils.constants import *
list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
def get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):
conf = edict()
conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conf.input_size = [112, 112]
conf.face_limit = 5
conf.min_face_size = 30
conf.mode = mode
conf.net_size = net_size
if mode =='app':
assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'
conf.use_tensor = True
conf.work_path = WORK_PATH
conf.model_path = '%s/models'%WORK_PATH
conf.log_path = '%s/log'%WORK_PATH
conf.save_path = '%s/save'%WORK_PATH
conf.facebank_path = '%s/Face_bank'%WORK_PATH
conf.threshold = threshold
if use_mtcnn:
conf.use_mtcnn = True
else:
conf.use_mtcnn = False
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
if net_size == 'large':
conf.use_mobilfacenet = False
if net_mode == 'ir_se':
conf.net_mode = 'ir_se'
conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH
conf.url = list_model[1]
else:
conf.net_mode = 'ir'
conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH
conf.url = list_model[2]
if net_size =='mobi':
conf.use_mobilfacenet = True
conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH
conf.url = list_model[0]
conf.video_source = 0
if mode =='training_eval':
conf.lr = 1e-3
conf.milestones = [18,30,42]
conf.momentum = 0.9
conf.pin_memory = True
rs = 3
conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
conf.batch_size = 4
conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
return conf | true | true |
f719a616152547d0300a25992cdb6dbefb41b0a6 | 16,599 | py | Python | utils/tests/test_util.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | utils/tests/test_util.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | utils/tests/test_util.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | # Copyright 2019 Graphcore Ltd.
from statistics import mean
import numpy as np
import os
import re
import subprocess
import sys
import time
"""Library of utility functions common between frameworks"""
def parse_results_for_speed(output, iter_tolerance, speed_tolerance):
"""Look for <iter number> sec/itr. <speed number> {other stuff}"""
found_a_result = False
for line in output.split("\n"):
matches = re.match(r"([\d.]+) +sec/itr. +([\d.]+)", line)
if matches:
found_a_result = True
iterations, speed = matches.groups()
iterations = float(iterations)
speed = float(speed)
_verify_model_numbers(
iter_tolerance, iterations, speed_tolerance, speed, line
)
if not found_a_result:
raise AssertionError("No results detected in this run")
def parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):
"""Look for Accuracy=<accuracy>%"""
accuracies = []
for line in output.split("\n"):
if re.match(r" + Accuracy=+([\d.]+)%", line):
accuracy = float(re.match(r" + Accuracy=+([\d.]+)%", line).groups()[0])
accuracies.append(accuracy)
elif re.search(r"Validation accuracy", line):
accuracy_str = re.search(r"accuracy:\s(.*)", line).group(1)
accuracy = float(accuracy_str[:accuracy_str.rfind("%")])
accuracies.append(accuracy)
if len(accuracies) == 0:
raise AssertionError("No results detected in this run")
elif len(accuracies) != len(expected_accuracies):
raise AssertionError("Expected accuracies and parsed accuracies have"
" different lengths")
_verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)
def _verify_model_numbers(iter_tolerance, iterations,
speed_tolerance, speed, line):
iter_error = ""
speed_error = ""
# Verify iteration speed
if iterations > iter_tolerance[1]:
iter_error = ("The time per iteration has regressed above"
" the tolerance maximum: " +
str(iter_tolerance[1]))
elif iterations < iter_tolerance[0]:
iter_error = ("Time taken to compete an iteration was "
"suspiciously fast. Please verify the model"
" is operating correctly and tune tolerances"
" accordingly.")
# Verify item processing speed
if speed < speed_tolerance[0]:
speed_error = ("The number of items processed per second"
" has regressed below the tolerance: " +
str(speed_tolerance[0]))
elif speed > speed_tolerance[1]:
speed_error = ("The number of items processed per second"
" was suspiciously high. Please verify the"
" model is behaving correctly and tune"
" tolerances accordingly.")
if iter_error and speed_error:
sys.stderr.write("\n".join([line, iter_error, speed_error]))
raise AssertionError("Timings out of tolerance range")
elif iter_error or speed_error:
sys.stderr.write(line)
raise AssertionError(iter_error + speed_error)
def _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):
"""Asserts a list of accuracies is within a list of expected accuracies
with a tolerance applied.
Args:
accuracies: A list of floats representing the accuracies (%) produced
by the model at each step.
expected_accuracy: A list of floats representing the expected
accuracies (%) produced by the model at each step.
acc_tolerance: A float representing a percentage tolerance applied on
top of the expected accuracies that the accuracies produced by
the model should sit within.
Raises:
Assertion Error: Accuracy produced by the model are not within
the expected limits.
"""
for iter_num in range(len(accuracies)):
exp_acc = expected_accuracy[iter_num]
exp_acc_str = (
"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]".format(
"Expected accuracy (%)".ljust(22),
exp_acc,
acc_tolerance,
exp_acc - acc_tolerance,
exp_acc + acc_tolerance,
2
)
)
acc = accuracies[iter_num]
acc_str = "{} = {:.{}f}".format(
"Accuracy (%)".ljust(22),
acc,
2
)
full_acc_str = "{}\n{}".format(acc_str, exp_acc_str)
if acc < exp_acc - acc_tolerance:
raise AssertionError(
"After iteration {}, the model is less accurate"
" than expected.\n"
"{}".format(iter_num + 1, full_acc_str)
)
elif acc > exp_acc + acc_tolerance:
raise AssertionError(
"After iteration {}, the model is producing an accuracy"
" that is suspiciously high and should be reviewed.\n"
"{}".format(iter_num + 1, full_acc_str)
)
def assert_result_equals_tensor_value(output, tensor):
"""Searches for a single tensor result in the first line of the output
Searches the first line of the string output for a line with format
'[array([3., 8.], dtype=float32)]' and asserts its equal to the numpy
tensor argument
Args:
output: String containing the string representation of a numpy
tensor
tensor: numpy tensor representing the expected result
Returns:
None
Raises:
Assertion Error: Output is not in correct format
Assertion Error: Output does not contain a string representation
of a numpy array
Assertion Error: Output numpy array does not equal the expected
numpy array
"""
# TODO - np representation over multiple lines
# TODO - large np array output
# TODO - multiple dimension np output
list_regex = r"^\[.*?\]$"
np_array_str_regex = r"array\(.*?, dtype=.*?\)$"
first_line = output.split("\n")[0]
if not re.match(list_regex, first_line):
raise AssertionError(
"Result not in expected string format."
" Expecting stringified list "
" eg. [array([3., 8.], dtype=float32)]"
)
contents = first_line[1:-1]
if not re.match(np_array_str_regex, contents):
raise AssertionError(
"Expecting numpy representation "
"array with dtype "
"eg. array([3., 8.], dtype=float32)"
)
assert contents == np.array_repr(tensor), (
"Output value {} does not "
"equal expected value {}".format(np.array_repr(contents), tensor)
)
def parse_results_for_ipus_used(output):
"""Finds the number of IPUs used in the model by looking for
string with format ' On 2 IPUs.' in output"""
shards_regex = r" On ([\d.]+) IPUs."
for line in output.split("\n"):
matches = re.match(shards_regex, line)
if matches:
shards = matches.group(1)
return int(shards)
raise AssertionError("Expecting line detailing IPU usage "
"eg. ' On 2 IPUs.'")
def assert_shards(output, expected_shards):
"""Verify the expected number of shards used were actually
used"""
actual_shards = parse_results_for_ipus_used(output)
assert actual_shards == expected_shards
def get_final_accuracy(output):
"""Find and return the accuracy reported in a test's output."""
result_regex = r"Accuracy=([\d.]+)\%"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_final_loss(output):
"""Find and return the loss reported in a test's output."""
result_regex = r"Loss=([\d.]+)"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_average_speeds(output):
"""Finds the average seconds/iteration and tokens/second
Args:
output: String representing the output of a test.
Returns:
A tuple where the first element is a float representing
the average iterations per second and the second the
average tokens processed per second
"""
result_regex = r"([\d.]+) +sec/itr. +([\d.]+)"
results = parse_results_with_regex(output, result_regex)
itr_sec_list = results[0]
tokens_sec_list = results[1]
return mean(itr_sec_list), mean(tokens_sec_list)
def parse_results_with_regex(output, regex):
"""Find and returns the regex matching results in output
Looks through the output line by line looking for a matching regex.
The function assembles a list of lists where each parent list is
the results for that position in the regex string and each item in
the child lists represents an order of the results found in the output
Args:
output: String representing the output of a test.
regex: Regex of result to find.
Returns:
A list of lists of floats. Parent list represents the result at each
position in the regex. Child list contains results received in the
order they were output.
Raises:
AssertionError: a line matching the regex could not be found in the
output
"""
results = []
for line in output.split("\n"):
matches = re.search(regex, line)
if matches:
number_of_results = matches.lastindex
if results == []:
results = [None] * number_of_results
for match_index in range(0, number_of_results):
result = float(matches.group(match_index + 1))
if results[match_index]:
results[match_index].append(result)
continue
results[match_index] = [result]
if results == []:
raise AssertionError("Regex {} not found in result".format(regex))
return results
def get_total_epochs(output):
"""Finds the number of epochs model has run through by looking for
string with format 'Epoch #3' in the models raw output"""
epochs = None
for line in output.split("\n"):
epoch_match = re.search(r"Epoch #([\d.]+)", line)
if epoch_match:
epochs = int(epoch_match.group(1))
if not epochs:
raise AssertionError("Epochs not found in output, eg. "
"Epoch #3")
return epochs
def assert_total_run_time(total_time, time_range):
"""Checks total run time is within the required range
Args:
total_time: float representing number of seconds the test took to
run
time_range: a tuple of floats where the first element is the minimum
time the test should run in in seconds and the second the
maximum
Raises:
AssertionError: if the total_time is not between the minimum time
and maximum time
"""
minimum_time = time_range[0]
maximum_time = time_range[1]
assert total_time >= minimum_time
assert total_time <= maximum_time
def assert_final_accuracy(output, minimum, maximum):
"""Gets the final accuracy given a raw model output and checks its value
is between the minimum and maximum
Args:
output: String representing the raw output of a model
minimum: a float representing a percentage (between 0.0% and 100%)
that is the minimum accuracy for the model after running
maximum: a float representing a percentage (between 0.0% and 100%)
that is the maximum accuracy for the model after running
Raises:
AssertionError: if the final accuracy is not between the maximum and
minimum percentages
"""
accuracy = get_final_accuracy(output)
assert accuracy >= minimum
assert accuracy <= maximum
def run_python_script_helper(cwd, script, **kwargs):
"""A function that given a path and python script name, runs the script
with kwargs as the command line arguments
Args:
cwd: string representing the directory of the python script
script: string representing the full name of the python script
kwargs: dictionary of string key and values that form the command
line arguments when the script is run.
Returns:
A string representing the raw output of the python script run
Raises:
AssertionError: if the final accuracy is not between the maximum and
minimum percentages
"""
py_version = "python{}".format(sys.version_info[0])
cmd = [py_version, script]
if kwargs:
args = [
str(item) for sublist in kwargs.items() for item in sublist if item != ""
]
cmd.extend(args)
out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)
print(out)
return out
def run_test_helper(subprocess_function, total_run_time=None,
total_run_time_tolerance=0.1, **kwargs):
"""Helper function for running tests
Takes in testable parameters, runs the test and checks the relevant
parameters against test results
Args:
subprocess_function: the function that runs a subprocess of
the model in question
total_run_time_range: tuple float representing the expected
upper and lower bounds for the total time taken to run
the test
Returns:
A String representing the raw output of the models subprocess
Raises:
AssertionError: If the accuracy, time taken etc. are not within
the expected bounds
"""
start_time = time.time()
out = subprocess_function(**kwargs)
total_time = time.time() - start_time
if total_run_time:
total_run_time_range = range_from_tolerances(
total_run_time, total_run_time_tolerance
)
assert_total_run_time(total_time, total_run_time_range)
return out
def range_from_tolerances(value, tolerance):
"""Helper function that takes a value and applies the tolerance
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied symmetrically across the value argument
Returns:
A tuple of floats, the first element representing the tolerance
applied below the value (minimum) and the second above (maximum)
"""
return (
get_minimum_with_tolerance(value, tolerance),
get_maximum_with_tolerance(value, tolerance),
)
def get_minimum_with_tolerance(value, tolerance):
"""Helper function that takes a value and applies the tolerance
below the value
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied to the value argument
Returns:
A float representing the tolerance applied below the value (maximum)
"""
return value * (1 - tolerance)
def get_maximum_with_tolerance(value, tolerance):
"""Helper function that takes a value and applies the tolerance
above the value
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied to the value argument
Returns:
A float representing the tolerance applied above the value (minimum)
"""
return value * (1 + tolerance)
def check_data_exists(data_path, expected_files_list):
"""Helper function that checks the expected data exists in a directory
Args:
data_path: A string representing the directory of where the
data is expected to be
expected_files_list: a list of strings representing the expected
file names in the data_path directory
Returns:
A boolean which represents whether the expected files are found in
the data_path directory
"""
if os.path.exists(data_path):
for filename in expected_files_list:
if not os.path.isfile(os.path.join(data_path, filename)):
return False
return True
return False
| 34.36646 | 85 | 0.636123 |
from statistics import mean
import numpy as np
import os
import re
import subprocess
import sys
import time
def parse_results_for_speed(output, iter_tolerance, speed_tolerance):
found_a_result = False
for line in output.split("\n"):
matches = re.match(r"([\d.]+) +sec/itr. +([\d.]+)", line)
if matches:
found_a_result = True
iterations, speed = matches.groups()
iterations = float(iterations)
speed = float(speed)
_verify_model_numbers(
iter_tolerance, iterations, speed_tolerance, speed, line
)
if not found_a_result:
raise AssertionError("No results detected in this run")
def parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):
accuracies = []
for line in output.split("\n"):
if re.match(r" + Accuracy=+([\d.]+)%", line):
accuracy = float(re.match(r" + Accuracy=+([\d.]+)%", line).groups()[0])
accuracies.append(accuracy)
elif re.search(r"Validation accuracy", line):
accuracy_str = re.search(r"accuracy:\s(.*)", line).group(1)
accuracy = float(accuracy_str[:accuracy_str.rfind("%")])
accuracies.append(accuracy)
if len(accuracies) == 0:
raise AssertionError("No results detected in this run")
elif len(accuracies) != len(expected_accuracies):
raise AssertionError("Expected accuracies and parsed accuracies have"
" different lengths")
_verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)
def _verify_model_numbers(iter_tolerance, iterations,
speed_tolerance, speed, line):
iter_error = ""
speed_error = ""
if iterations > iter_tolerance[1]:
iter_error = ("The time per iteration has regressed above"
" the tolerance maximum: " +
str(iter_tolerance[1]))
elif iterations < iter_tolerance[0]:
iter_error = ("Time taken to compete an iteration was "
"suspiciously fast. Please verify the model"
" is operating correctly and tune tolerances"
" accordingly.")
if speed < speed_tolerance[0]:
speed_error = ("The number of items processed per second"
" has regressed below the tolerance: " +
str(speed_tolerance[0]))
elif speed > speed_tolerance[1]:
speed_error = ("The number of items processed per second"
" was suspiciously high. Please verify the"
" model is behaving correctly and tune"
" tolerances accordingly.")
if iter_error and speed_error:
sys.stderr.write("\n".join([line, iter_error, speed_error]))
raise AssertionError("Timings out of tolerance range")
elif iter_error or speed_error:
sys.stderr.write(line)
raise AssertionError(iter_error + speed_error)
def _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):
for iter_num in range(len(accuracies)):
exp_acc = expected_accuracy[iter_num]
exp_acc_str = (
"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]".format(
"Expected accuracy (%)".ljust(22),
exp_acc,
acc_tolerance,
exp_acc - acc_tolerance,
exp_acc + acc_tolerance,
2
)
)
acc = accuracies[iter_num]
acc_str = "{} = {:.{}f}".format(
"Accuracy (%)".ljust(22),
acc,
2
)
full_acc_str = "{}\n{}".format(acc_str, exp_acc_str)
if acc < exp_acc - acc_tolerance:
raise AssertionError(
"After iteration {}, the model is less accurate"
" than expected.\n"
"{}".format(iter_num + 1, full_acc_str)
)
elif acc > exp_acc + acc_tolerance:
raise AssertionError(
"After iteration {}, the model is producing an accuracy"
" that is suspiciously high and should be reviewed.\n"
"{}".format(iter_num + 1, full_acc_str)
)
def assert_result_equals_tensor_value(output, tensor):
list_regex = r"^\[.*?\]$"
np_array_str_regex = r"array\(.*?, dtype=.*?\)$"
first_line = output.split("\n")[0]
if not re.match(list_regex, first_line):
raise AssertionError(
"Result not in expected string format."
" Expecting stringified list "
" eg. [array([3., 8.], dtype=float32)]"
)
contents = first_line[1:-1]
if not re.match(np_array_str_regex, contents):
raise AssertionError(
"Expecting numpy representation "
"array with dtype "
"eg. array([3., 8.], dtype=float32)"
)
assert contents == np.array_repr(tensor), (
"Output value {} does not "
"equal expected value {}".format(np.array_repr(contents), tensor)
)
def parse_results_for_ipus_used(output):
shards_regex = r" On ([\d.]+) IPUs."
for line in output.split("\n"):
matches = re.match(shards_regex, line)
if matches:
shards = matches.group(1)
return int(shards)
raise AssertionError("Expecting line detailing IPU usage "
"eg. ' On 2 IPUs.'")
def assert_shards(output, expected_shards):
actual_shards = parse_results_for_ipus_used(output)
assert actual_shards == expected_shards
def get_final_accuracy(output):
result_regex = r"Accuracy=([\d.]+)\%"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_final_loss(output):
result_regex = r"Loss=([\d.]+)"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_average_speeds(output):
result_regex = r"([\d.]+) +sec/itr. +([\d.]+)"
results = parse_results_with_regex(output, result_regex)
itr_sec_list = results[0]
tokens_sec_list = results[1]
return mean(itr_sec_list), mean(tokens_sec_list)
def parse_results_with_regex(output, regex):
results = []
for line in output.split("\n"):
matches = re.search(regex, line)
if matches:
number_of_results = matches.lastindex
if results == []:
results = [None] * number_of_results
for match_index in range(0, number_of_results):
result = float(matches.group(match_index + 1))
if results[match_index]:
results[match_index].append(result)
continue
results[match_index] = [result]
if results == []:
raise AssertionError("Regex {} not found in result".format(regex))
return results
def get_total_epochs(output):
epochs = None
for line in output.split("\n"):
epoch_match = re.search(r"Epoch #([\d.]+)", line)
if epoch_match:
epochs = int(epoch_match.group(1))
if not epochs:
raise AssertionError("Epochs not found in output, eg. "
"Epoch #3")
return epochs
def assert_total_run_time(total_time, time_range):
minimum_time = time_range[0]
maximum_time = time_range[1]
assert total_time >= minimum_time
assert total_time <= maximum_time
def assert_final_accuracy(output, minimum, maximum):
accuracy = get_final_accuracy(output)
assert accuracy >= minimum
assert accuracy <= maximum
def run_python_script_helper(cwd, script, **kwargs):
py_version = "python{}".format(sys.version_info[0])
cmd = [py_version, script]
if kwargs:
args = [
str(item) for sublist in kwargs.items() for item in sublist if item != ""
]
cmd.extend(args)
out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)
print(out)
return out
def run_test_helper(subprocess_function, total_run_time=None,
total_run_time_tolerance=0.1, **kwargs):
start_time = time.time()
out = subprocess_function(**kwargs)
total_time = time.time() - start_time
if total_run_time:
total_run_time_range = range_from_tolerances(
total_run_time, total_run_time_tolerance
)
assert_total_run_time(total_time, total_run_time_range)
return out
def range_from_tolerances(value, tolerance):
return (
get_minimum_with_tolerance(value, tolerance),
get_maximum_with_tolerance(value, tolerance),
)
def get_minimum_with_tolerance(value, tolerance):
return value * (1 - tolerance)
def get_maximum_with_tolerance(value, tolerance):
return value * (1 + tolerance)
def check_data_exists(data_path, expected_files_list):
if os.path.exists(data_path):
for filename in expected_files_list:
if not os.path.isfile(os.path.join(data_path, filename)):
return False
return True
return False
| true | true |
f719a788aa6769dc9f43b9f60b9a57cc0504643a | 1,535 | py | Python | code/clients/requests.py | lpmatos/gitlab-analytics | 47a220bb54efa473f01bf033291f65b38accdbca | [
"MIT"
] | 2 | 2020-09-16T11:03:01.000Z | 2021-07-30T07:05:58.000Z | code/clients/requests.py | lpmatos/gitlab-analytics | 47a220bb54efa473f01bf033291f65b38accdbca | [
"MIT"
] | null | null | null | code/clients/requests.py | lpmatos/gitlab-analytics | 47a220bb54efa473f01bf033291f65b38accdbca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import annotations
import requests
from validators.url import URL
from abc import ABC, abstractmethod
from requests.adapters import HTTPAdapter
from typing import Text, NoReturn, Callable, Dict
from requests.packages.urllib3.util.retry import Retry
class RequestResponse:
def __init__(self, response: Text) -> NoReturn:
self.status = response.status_code
self.reason = response.reason
self.json = response.json()
def get_json(self) -> Dict:
return self.json
class RequestsImplementation(ABC):
def __init__(self, url: Text, *args, **kwargs) -> NoReturn:
if URL.url_validator(url):
if not kwargs["is_secure"]:
url = url.replace("https", "http")
self.url = url
self._logger = kwargs["logger"]
if kwargs["retry"]:
self.session = self.requests_retry_session(kwargs["session"])
else:
self.session = requests.Session()
@staticmethod
def requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None) -> requests.Session():
session = session or requests.Session()
retry = Retry(total=retries, read=retries,
connect=retries, backoff_factor=backoff_factor,
status_forcelist=status_forcelist,)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
@abstractmethod
def get(self) -> NoReturn:
pass
@property
def logger(self) -> Callable:
return self._logger
| 28.962264 | 130 | 0.704235 |
from __future__ import annotations
import requests
from validators.url import URL
from abc import ABC, abstractmethod
from requests.adapters import HTTPAdapter
from typing import Text, NoReturn, Callable, Dict
from requests.packages.urllib3.util.retry import Retry
class RequestResponse:
def __init__(self, response: Text) -> NoReturn:
self.status = response.status_code
self.reason = response.reason
self.json = response.json()
def get_json(self) -> Dict:
return self.json
class RequestsImplementation(ABC):
def __init__(self, url: Text, *args, **kwargs) -> NoReturn:
if URL.url_validator(url):
if not kwargs["is_secure"]:
url = url.replace("https", "http")
self.url = url
self._logger = kwargs["logger"]
if kwargs["retry"]:
self.session = self.requests_retry_session(kwargs["session"])
else:
self.session = requests.Session()
@staticmethod
def requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None) -> requests.Session():
session = session or requests.Session()
retry = Retry(total=retries, read=retries,
connect=retries, backoff_factor=backoff_factor,
status_forcelist=status_forcelist,)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
@abstractmethod
def get(self) -> NoReturn:
pass
@property
def logger(self) -> Callable:
return self._logger
| true | true |
f719a9168a4d3106600fffcc47c14cc90f3cadc7 | 6,299 | py | Python | official/vision/detection/dataloader/tf_example_decoder.py | gujralsanyam22/models | d96f8f043dbe2b5ca8ea1785f57df8faf68d8875 | [
"Apache-2.0"
] | 153 | 2020-10-25T13:58:04.000Z | 2022-03-07T06:01:54.000Z | official/vision/detection/dataloader/tf_example_decoder.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 11 | 2020-07-13T08:29:00.000Z | 2022-03-24T07:21:09.000Z | official/vision/detection/dataloader/tf_example_decoder.py | yangxl-2014-fe/models | 11ea5237818e791a5717716d5413977f4c4db1e3 | [
"Apache-2.0"
] | 23 | 2020-10-25T14:44:47.000Z | 2021-03-31T02:12:13.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, include_mask=False):
self._include_mask = include_mask
self._keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string),
'image/source_id':
tf.io.FixedLenFeature((), tf.string),
'image/height':
tf.io.FixedLenFeature((), tf.int64),
'image/width':
tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(tf.int64),
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.io.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': parsed_tensors['image/source_id'],
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| 40.121019 | 122 | 0.657247 |
import tensorflow as tf
class TfExampleDecoder(object):
def __init__(self, include_mask=False):
self._include_mask = include_mask
self._keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string),
'image/source_id':
tf.io.FixedLenFeature((), tf.string),
'image/height':
tf.io.FixedLenFeature((), tf.int64),
'image/width':
tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(tf.int64),
'image/object/area':
tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.io.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': parsed_tensors['image/source_id'],
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| true | true |
f719a9bfc05dbb1ca8c4fffbbf92b7f387621266 | 859 | py | Python | taskobra/orm/components/cpu.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | null | null | null | taskobra/orm/components/cpu.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | 43 | 2020-02-06T22:23:42.000Z | 2020-04-29T23:56:43.000Z | taskobra/orm/components/cpu.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | 2 | 2020-02-06T21:01:42.000Z | 2020-02-06T23:43:11.000Z | # Libraries
from sqlalchemy import Column, Float, ForeignKey, Integer, String
# Taskobra
from taskobra.orm.components import Component
class CPU(Component):
__tablename__ = "CPU"
unique_id = Column(Integer, ForeignKey("Component.unique_id"), primary_key=True)
manufacturer = Column(String)
model = Column(String)
isa = Column(String)
tdp = Column(Integer)
core_count = Column(Integer)
threads_per_core = Column(Integer)
nominal_frequency = Column(Float)
maximum_frequency = Column(Float)
__mapper_args__ = {
"polymorphic_identity": __tablename__,
}
@property
def threads(self):
return self.core_count * self.threads_per_core
def __repr__(self):
return f"<CPU({self.manufacturer} {self.model} ({self.core_count}/{self.threads}x{self.nominal_frequency} GHz {self.isa}))>"
| 29.62069 | 132 | 0.705471 |
from sqlalchemy import Column, Float, ForeignKey, Integer, String
from taskobra.orm.components import Component
class CPU(Component):
__tablename__ = "CPU"
unique_id = Column(Integer, ForeignKey("Component.unique_id"), primary_key=True)
manufacturer = Column(String)
model = Column(String)
isa = Column(String)
tdp = Column(Integer)
core_count = Column(Integer)
threads_per_core = Column(Integer)
nominal_frequency = Column(Float)
maximum_frequency = Column(Float)
__mapper_args__ = {
"polymorphic_identity": __tablename__,
}
@property
def threads(self):
return self.core_count * self.threads_per_core
def __repr__(self):
return f"<CPU({self.manufacturer} {self.model} ({self.core_count}/{self.threads}x{self.nominal_frequency} GHz {self.isa}))>"
| true | true |
f719a9d668b8a403e901541f650b87db1bf30dbc | 1,112 | py | Python | music/migrations/0010_auto_20150427_2304.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 3 | 2015-06-16T11:12:29.000Z | 2019-05-03T09:09:21.000Z | music/migrations/0010_auto_20150427_2304.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 16 | 2015-08-18T14:35:55.000Z | 2021-06-10T17:31:04.000Z | music/migrations/0010_auto_20150427_2304.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 1 | 2016-10-19T14:48:52.000Z | 2016-10-19T14:48:52.000Z | from __future__ import unicode_literals
from django.db import models, migrations
def set_sources(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Source = apps.get_model("music", "Source")
TemporaryMusic = apps.get_model("music", "TemporaryMusic")
youtube = Source.objects.get(name="Youtube")
for tempMusic in TemporaryMusic.objects.all():
tempMusic.source = youtube
tempMusic.save()
class Migration(migrations.Migration):
dependencies = [
('music', '0009_auto_20150427_2038'),
]
operations = [
migrations.AddField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', null=True, on_delete=models.CASCADE),
),
migrations.RunPython(set_sources),
migrations.AlterField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', on_delete=models.CASCADE),
),
]
| 30.888889 | 92 | 0.654676 | from __future__ import unicode_literals
from django.db import models, migrations
def set_sources(apps, schema_editor):
# version than this migration expects. We use the historical version.
Source = apps.get_model("music", "Source")
TemporaryMusic = apps.get_model("music", "TemporaryMusic")
youtube = Source.objects.get(name="Youtube")
for tempMusic in TemporaryMusic.objects.all():
tempMusic.source = youtube
tempMusic.save()
class Migration(migrations.Migration):
dependencies = [
('music', '0009_auto_20150427_2038'),
]
operations = [
migrations.AddField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', null=True, on_delete=models.CASCADE),
),
migrations.RunPython(set_sources),
migrations.AlterField(
model_name='temporarymusic',
name='source',
field=models.ForeignKey(to='music.Source', on_delete=models.CASCADE),
),
]
| true | true |
f719aae1c7a532a452c6a6c2a3522f59f033bbfa | 1,533 | py | Python | tests/test_fieldtype_model.py | MasterScott/Formasaurus | d7d916237a6d2ca4c80c4c8ae5d66999c8beebed | [
"MIT"
] | 132 | 2015-04-18T01:53:52.000Z | 2022-03-31T08:33:26.000Z | tests/test_fieldtype_model.py | Eglet27/Formasaurus | d7d916237a6d2ca4c80c4c8ae5d66999c8beebed | [
"MIT"
] | 26 | 2015-07-08T20:09:26.000Z | 2022-03-03T16:50:08.000Z | tests/test_fieldtype_model.py | Eglet27/Formasaurus | d7d916237a6d2ca4c80c4c8ae5d66999c8beebed | [
"MIT"
] | 63 | 2015-02-17T08:41:00.000Z | 2022-03-31T08:58:18.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import itertools
import numpy as np
from sklearn_crfsuite.metrics import flat_accuracy_score
from formasaurus.fieldtype_model import (
train,
_PRECISE_C1_C2,
_REALISTIC_C1_C2,
get_Xy,
)
def test_training(storage, capsys):
annotations = (a for a in storage.iter_annotations(
simplify_form_types=True,
simplify_field_types=True,
) if a.fields_annotated)
annotations = list(itertools.islice(annotations, 0, 300))
crf = train(
annotations=annotations,
use_precise_form_types=False,
optimize_hyperparameters_iters=2,
optimize_hyperparameters_folds=2,
optimize_hyperparameters_jobs=-1,
full_form_type_names=False,
full_field_type_names=False
)
out, err = capsys.readouterr()
assert 'Training on 300 forms' in out
assert 'realistic form types' in out
assert 'Best hyperparameters' in out
assert 0.0 < crf.c1 < 2.5
assert 0.0 < crf.c2 < 0.9
assert crf.c1, crf.c2 != _REALISTIC_C1_C2
assert crf.c1, crf.c2 != _PRECISE_C1_C2
form_types = np.asarray([a.type for a in annotations])
X, y = get_Xy(annotations, form_types, full_type_names=False)
y_pred = crf.predict(X)
score = flat_accuracy_score(y, y_pred)
assert 0.9 < score < 1.0 # overfitting FTW!
field_schema = storage.get_field_schema()
short_names = set(field_schema.types_inv.keys())
assert set(crf.classes_).issubset(short_names)
| 28.924528 | 65 | 0.701239 |
from __future__ import absolute_import, division
import itertools
import numpy as np
from sklearn_crfsuite.metrics import flat_accuracy_score
from formasaurus.fieldtype_model import (
train,
_PRECISE_C1_C2,
_REALISTIC_C1_C2,
get_Xy,
)
def test_training(storage, capsys):
annotations = (a for a in storage.iter_annotations(
simplify_form_types=True,
simplify_field_types=True,
) if a.fields_annotated)
annotations = list(itertools.islice(annotations, 0, 300))
crf = train(
annotations=annotations,
use_precise_form_types=False,
optimize_hyperparameters_iters=2,
optimize_hyperparameters_folds=2,
optimize_hyperparameters_jobs=-1,
full_form_type_names=False,
full_field_type_names=False
)
out, err = capsys.readouterr()
assert 'Training on 300 forms' in out
assert 'realistic form types' in out
assert 'Best hyperparameters' in out
assert 0.0 < crf.c1 < 2.5
assert 0.0 < crf.c2 < 0.9
assert crf.c1, crf.c2 != _REALISTIC_C1_C2
assert crf.c1, crf.c2 != _PRECISE_C1_C2
form_types = np.asarray([a.type for a in annotations])
X, y = get_Xy(annotations, form_types, full_type_names=False)
y_pred = crf.predict(X)
score = flat_accuracy_score(y, y_pred)
assert 0.9 < score < 1.0
field_schema = storage.get_field_schema()
short_names = set(field_schema.types_inv.keys())
assert set(crf.classes_).issubset(short_names)
| true | true |
f719ac12ab39a81ed2df4d9c929c5f6b2e9f5724 | 2,399 | py | Python | Lib/glyphsLib/__main__.py | silnrsi/glyphsLib | fc9ac286874e30130679430b028a173062c311a0 | [
"Apache-2.0"
] | 1 | 2019-01-19T05:50:30.000Z | 2019-01-19T05:50:30.000Z | Lib/glyphsLib/__main__.py | DalavanCloud/glyphsLib | fc9ac286874e30130679430b028a173062c311a0 | [
"Apache-2.0"
] | null | null | null | Lib/glyphsLib/__main__.py | DalavanCloud/glyphsLib | fc9ac286874e30130679430b028a173062c311a0 | [
"Apache-2.0"
] | 1 | 2019-01-19T05:50:14.000Z | 2019-01-19T05:50:14.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import argparse
import glyphsLib
description = """\n
Converts a Glyphs.app source file into UFO masters
or UFO instances and MutatorMath designspace.
"""
def parse_options(args):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--version", action="version",
version='glyphsLib %s' % (glyphsLib.__version__))
parser.add_argument("-g", "--glyphs", metavar="GLYPHS", required=True,
help="Glyphs file to convert.")
parser.add_argument("-m", "--masters", metavar="MASTERS",
default="master_ufo",
help="Ouput masters UFO to folder MASTERS. "
"(default: %(default)s)")
parser.add_argument("-n", "--instances", metavar="INSTANCES", nargs="?",
const="instance_ufo", default=None,
help="Output and generate interpolated instances UFO "
"to folder INSTANCES. "
"(default: %(const)s)")
parser.add_argument("-r", "--round-instances", action="store_true",
help="Apply integer rounding to all geometry when "
"interpolating")
options = parser.parse_args(args)
return options
def main(args=None):
opt = parse_options(args)
if opt.glyphs is not None:
if opt.instances is None:
glyphsLib.build_masters(opt.glyphs, opt.masters)
else:
glyphsLib.build_instances(opt.glyphs, opt.masters, opt.instances,
round_geometry=opt.round_instances)
if __name__ == '__main__':
main(sys.argv[1:])
| 38.693548 | 82 | 0.631513 |
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
import argparse
import glyphsLib
description = """\n
Converts a Glyphs.app source file into UFO masters
or UFO instances and MutatorMath designspace.
"""
def parse_options(args):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--version", action="version",
version='glyphsLib %s' % (glyphsLib.__version__))
parser.add_argument("-g", "--glyphs", metavar="GLYPHS", required=True,
help="Glyphs file to convert.")
parser.add_argument("-m", "--masters", metavar="MASTERS",
default="master_ufo",
help="Ouput masters UFO to folder MASTERS. "
"(default: %(default)s)")
parser.add_argument("-n", "--instances", metavar="INSTANCES", nargs="?",
const="instance_ufo", default=None,
help="Output and generate interpolated instances UFO "
"to folder INSTANCES. "
"(default: %(const)s)")
parser.add_argument("-r", "--round-instances", action="store_true",
help="Apply integer rounding to all geometry when "
"interpolating")
options = parser.parse_args(args)
return options
def main(args=None):
opt = parse_options(args)
if opt.glyphs is not None:
if opt.instances is None:
glyphsLib.build_masters(opt.glyphs, opt.masters)
else:
glyphsLib.build_instances(opt.glyphs, opt.masters, opt.instances,
round_geometry=opt.round_instances)
if __name__ == '__main__':
main(sys.argv[1:])
| true | true |
f719ac201c882a4f33c304211ff792834b6fe5b0 | 640 | py | Python | fm2o2.py | dumpydog212/fm2o2 | b5e173735bb08466d6c20f7868725e627260dd88 | [
"MIT"
] | null | null | null | fm2o2.py | dumpydog212/fm2o2 | b5e173735bb08466d6c20f7868725e627260dd88 | [
"MIT"
] | null | null | null | fm2o2.py | dumpydog212/fm2o2 | b5e173735bb08466d6c20f7868725e627260dd88 | [
"MIT"
] | null | null | null | import glob
import os
from xml.dom import minidom
import xml.etree.ElementTree as ET
path = r"C:\Users\shamb\Desktop\dita_demo"
valid_path = r"C:\Users\shamb\Desktop\dita_demo_scrubbed"
wildcard = "*.xml"
full_path = os.path.join(path, wildcard)
os.makedirs(valid_path, exist_ok=True)
file_list = glob.glob(full_path)
print("The file set includes:")
for this_file in file_list:
print(this_file)
# mydoc = minidom.parse(this_file)
# print(type(mydoc))
tree = ET.parse(this_file)
root = tree.getroot()
print('\nAll item data:')
for elem in root:
for subelem in elem:
print(subelem.text) | 22.068966 | 57 | 0.696875 | import glob
import os
from xml.dom import minidom
import xml.etree.ElementTree as ET
path = r"C:\Users\shamb\Desktop\dita_demo"
valid_path = r"C:\Users\shamb\Desktop\dita_demo_scrubbed"
wildcard = "*.xml"
full_path = os.path.join(path, wildcard)
os.makedirs(valid_path, exist_ok=True)
file_list = glob.glob(full_path)
print("The file set includes:")
for this_file in file_list:
print(this_file)
tree = ET.parse(this_file)
root = tree.getroot()
print('\nAll item data:')
for elem in root:
for subelem in elem:
print(subelem.text) | true | true |
f719acd0bf5519f70da4e2324dadedc8b1906093 | 12,049 | py | Python | gooddata-afm-client/gooddata_afm_client/model/included_dimension_props.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-24T16:27:06.000Z | 2022-02-25T10:18:49.000Z | gooddata-afm-client/gooddata_afm_client/model/included_dimension_props.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 29 | 2022-01-20T15:45:38.000Z | 2022-03-31T09:39:25.000Z | gooddata-afm-client/gooddata_afm_client/model/included_dimension_props.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-20T07:11:15.000Z | 2022-03-09T14:50:17.000Z | """
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: support@gooddata.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_afm_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_afm_client.exceptions import ApiAttributeError
class IncludedDimensionProps(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'dimension_attributes_values': ({str: ([str],)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'dimension_attributes_values': 'dimensionAttributesValues', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, dimension_attributes_values, *args, **kwargs): # noqa: E501
"""IncludedDimensionProps - a model defined in OpenAPI
Args:
dimension_attributes_values ({str: ([str],)}): Allows to customize for which attribute values the grand total will be computed. If the values for particular attribute are not specified then the totals for all values are computed. Note that this also covers the case of individual metrics (treated as values of the \"measureGroup\" pseudo attribute).
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, dimension_attributes_values, *args, **kwargs): # noqa: E501
"""IncludedDimensionProps - a model defined in OpenAPI
Args:
dimension_attributes_values ({str: ([str],)}): Allows to customize for which attribute values the grand total will be computed. If the values for particular attribute are not specified then the totals for all values are computed. Note that this also covers the case of individual metrics (treated as values of the \"measureGroup\" pseudo attribute).
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.813688 | 361 | 0.590256 |
import re
import sys
from gooddata_afm_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_afm_client.exceptions import ApiAttributeError
class IncludedDimensionProps(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = True
@cached_property
def openapi_types():
return {
'dimension_attributes_values': ({str: ([str],)},),
}
@cached_property
def discriminator():
return None
attribute_map = {
'dimension_attributes_values': 'dimensionAttributesValues',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, dimension_attributes_values, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, dimension_attributes_values, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.dimension_attributes_values = dimension_attributes_values
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f719ad57e58a44fc929ef55ed10a1ee635466eb2 | 326 | py | Python | setup.py | droberin/cyberdyne-dyndns | 7d495390413cff2829f6b00a482f7b9dff3dcb5a | [
"MIT"
] | null | null | null | setup.py | droberin/cyberdyne-dyndns | 7d495390413cff2829f6b00a482f7b9dff3dcb5a | [
"MIT"
] | null | null | null | setup.py | droberin/cyberdyne-dyndns | 7d495390413cff2829f6b00a482f7b9dff3dcb5a | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='cyberdynedyndnscli',
version='0.1.0',
packages=['cyberdynedyndnscli'],
url='https://github.com/droberin/cyberdynedyndnscli',
license='MIT',
author='DRoBeR',
author_email='drober+software@gmail.com',
description='Cyberdyne.es Dynamic DNS client'
)
| 25.076923 | 57 | 0.699387 | from distutils.core import setup
setup(
name='cyberdynedyndnscli',
version='0.1.0',
packages=['cyberdynedyndnscli'],
url='https://github.com/droberin/cyberdynedyndnscli',
license='MIT',
author='DRoBeR',
author_email='drober+software@gmail.com',
description='Cyberdyne.es Dynamic DNS client'
)
| true | true |
f719ae112f660d822e36dfe8386ebed7cf3c5760 | 13,464 | py | Python | Doc/tools/extensions/pyspecific.py | deadsnakes/python3.4 | e8ac58ab083b57aa04b46c79f764c68bdab607a0 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Doc/tools/extensions/pyspecific.py | deadsnakes/python3.4 | e8ac58ab083b57aa04b46c79f764c68bdab607a0 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Doc/tools/extensions/pyspecific.py | deadsnakes/python3.4 | e8ac58ab083b57aa04b46c79f764c68bdab607a0 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pyspecific.py
~~~~~~~~~~~~~
Sphinx extension with Python doc-specific markup.
:copyright: 2008-2014 by Georg Brandl.
:license: Python license.
"""
import re
import codecs
from os import path
from time import asctime
from pprint import pformat
from docutils.io import StringOutput
from docutils.parsers.rst import Directive
from docutils.utils import new_document
from docutils import nodes, utils
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.util.nodes import split_explicit_title
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.text import TextWriter
from sphinx.writers.latex import LaTeXTranslator
from sphinx.domains.python import PyModulelevel, PyClassmember
# Support for checking for suspicious markup
import suspicious
ISSUE_URI = 'https://bugs.python.org/issue%s'
SOURCE_URI = 'https://github.com/python/cpython/tree/3.4/%s'
# monkey-patch reST parser to disable alphabetic and roman enumerated lists
from docutils.parsers.rst.states import Body
Body.enum.converters['loweralpha'] = \
Body.enum.converters['upperalpha'] = \
Body.enum.converters['lowerroman'] = \
Body.enum.converters['upperroman'] = lambda x: None
# monkey-patch HTML and LaTeX translators to keep doctest blocks in the
# doctest docs themselves
orig_visit_literal_block = HTMLTranslator.visit_literal_block
orig_depart_literal_block = LaTeXTranslator.depart_literal_block
def new_visit_literal_block(self, node):
meta = self.builder.env.metadata[self.builder.current_docname]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_visit_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
def new_depart_literal_block(self, node):
meta = self.builder.env.metadata[self.curfilestack[-1]]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_depart_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
HTMLTranslator.visit_literal_block = new_visit_literal_block
LaTeXTranslator.depart_literal_block = new_depart_literal_block
# Support for marking up and linking to bugs.python.org issues
def issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
issue = utils.unescape(text)
text = 'issue ' + issue
refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue)
return [refnode], []
# Support for linking to Python source files easily
def source_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
has_t, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
refnode = nodes.reference(title, title, refuri=SOURCE_URI % target)
return [refnode], []
# Support for marking up implementation details
class ImplementationDetail(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
def run(self):
pnode = nodes.compound(classes=['impl-detail'])
content = self.content
add_text = nodes.strong('CPython implementation detail:',
'CPython implementation detail:')
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
pnode.append(nodes.paragraph('', '', *(n + m)))
self.state.nested_parse(content, self.content_offset, pnode)
if pnode.children and isinstance(pnode[0], nodes.paragraph):
pnode[0].insert(0, add_text)
pnode[0].insert(1, nodes.Text(' '))
else:
pnode.insert(0, nodes.paragraph('', '', add_text))
return [pnode]
# Support for documenting decorators
class PyDecoratorMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyDecoratorMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self):
return False
class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
def run(self):
# a decorator function is a function after all
self.name = 'py:function'
return PyModulelevel.run(self)
class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
class PyCoroutineMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyCoroutineMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_annotation('coroutine ', 'coroutine '))
return ret
class PyCoroutineFunction(PyCoroutineMixin, PyModulelevel):
def run(self):
self.name = 'py:function'
return PyModulelevel.run(self)
class PyCoroutineMethod(PyCoroutineMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
# Support for documenting version of removal in deprecations
class DeprecatedRemoved(Directive):
has_content = True
required_arguments = 2
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
_label = 'Deprecated since version %s, will be removed in version %s'
def run(self):
node = addnodes.versionmodified()
node.document = self.state.document
node['type'] = 'deprecated-removed'
version = (self.arguments[0], self.arguments[1])
node['version'] = version
text = self._label % version
if len(self.arguments) == 3:
inodes, messages = self.state.inline_text(self.arguments[2],
self.lineno+1)
para = nodes.paragraph(self.arguments[2], '', *inodes)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
classes=['versionmodified']))
node.append(para)
env = self.state.document.settings.env
env.note_versionchange('deprecated', version[0], node, self.lineno)
return [node] + messages
# Support for including Misc/NEWS
issue_re = re.compile('([Ii])ssue #([0-9]+)')
whatsnew_re = re.compile(r"(?im)^what's new in (.*?)\??$")
class MiscNews(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
fname = self.arguments[0]
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = path.dirname(path.abspath(source))
fpath = path.join(source_dir, fname)
self.state.document.settings.record_dependencies.add(fpath)
try:
fp = codecs.open(fpath, encoding='utf-8')
try:
content = fp.read()
finally:
fp.close()
except Exception:
text = 'The NEWS file is not available.'
node = nodes.strong(text, text)
return [node]
content = issue_re.sub(r'`\1ssue #\2 <https://bugs.python.org/\2>`__',
content)
content = whatsnew_re.sub(r'\1', content)
# remove first 3 lines as they are the main heading
lines = ['.. default-role:: obj', ''] + content.splitlines()[3:]
self.state_machine.insert_input(lines, fname)
return []
# Support for building "topic help" for pydoc
pydoc_topic_labels = [
'assert', 'assignment', 'atom-identifiers', 'atom-literals',
'attribute-access', 'attribute-references', 'augassign', 'binary',
'bitwise', 'bltin-code-objects', 'bltin-ellipsis-object',
'bltin-null-object', 'bltin-type-objects', 'booleans',
'break', 'callable-types', 'calls', 'class', 'comparisons', 'compound',
'context-managers', 'continue', 'conversions', 'customization', 'debugger',
'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'execmodel',
'exprlists', 'floating', 'for', 'formatstrings', 'function', 'global',
'id-classes', 'identifiers', 'if', 'imaginary', 'import', 'in', 'integers',
'lambda', 'lists', 'naming', 'nonlocal', 'numbers', 'numeric-types',
'objects', 'operator-summary', 'pass', 'power', 'raise', 'return',
'sequence-types', 'shifting', 'slicings', 'specialattrs', 'specialnames',
'string-methods', 'strings', 'subscriptions', 'truth', 'try', 'types',
'typesfunctions', 'typesmapping', 'typesmethods', 'typesmodules',
'typesseq', 'typesseq-mutable', 'unary', 'while', 'with', 'yield'
]
class PydocTopicsBuilder(Builder):
name = 'pydoc-topics'
def init(self):
self.topics = {}
def get_outdated_docs(self):
return 'all pydoc topics'
def get_target_uri(self, docname, typ=None):
return '' # no URIs
def write(self, *ignored):
writer = TextWriter(self)
for label in self.status_iterator(pydoc_topic_labels,
'building topics... ',
length=len(pydoc_topic_labels)):
if label not in self.env.domaindata['std']['labels']:
self.warn('label %r not in documentation' % label)
continue
docname, labelid, sectname = self.env.domaindata['std']['labels'][label]
doctree = self.env.get_and_resolve_doctree(docname, self)
document = new_document('<section node>')
document.append(doctree.ids[labelid])
destination = StringOutput(encoding='utf-8')
writer.write(document, destination)
self.topics[label] = writer.output
def finish(self):
f = open(path.join(self.outdir, 'topics.py'), 'wb')
try:
f.write('# -*- coding: utf-8 -*-\n'.encode('utf-8'))
f.write(('# Autogenerated by Sphinx on %s\n' % asctime()).encode('utf-8'))
f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8'))
finally:
f.close()
# Support for documenting Opcodes
opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)(?:\s*\((.*)\))?')
def parse_opcode_signature(env, sig, signode):
"""Transform an opcode signature into RST nodes."""
m = opcode_sig_re.match(sig)
if m is None:
raise ValueError
opname, arglist = m.groups()
signode += addnodes.desc_name(opname, opname)
if arglist is not None:
paramlist = addnodes.desc_parameterlist()
signode += paramlist
paramlist += addnodes.desc_parameter(arglist, arglist)
return opname.strip()
# Support for documenting pdb commands
pdbcmd_sig_re = re.compile(r'([a-z()!]+)\s*(.*)')
# later...
# pdbargs_tokens_re = re.compile(r'''[a-zA-Z]+ | # identifiers
# [.,:]+ | # punctuation
# [\[\]()] | # parens
# \s+ # whitespace
# ''', re.X)
def parse_pdb_command(env, sig, signode):
"""Transform a pdb command signature into RST nodes."""
m = pdbcmd_sig_re.match(sig)
if m is None:
raise ValueError
name, args = m.groups()
fullname = name.replace('(', '').replace(')', '')
signode += addnodes.desc_name(name, name)
if args:
signode += addnodes.desc_addname(' '+args, ' '+args)
return fullname
def setup(app):
app.add_role('issue', issue_role)
app.add_role('source', source_role)
app.add_directive('impl-detail', ImplementationDetail)
app.add_directive('deprecated-removed', DeprecatedRemoved)
app.add_builder(PydocTopicsBuilder)
app.add_builder(suspicious.CheckSuspiciousMarkupBuilder)
app.add_description_unit('opcode', 'opcode', '%s (opcode)',
parse_opcode_signature)
app.add_description_unit('pdbcommand', 'pdbcmd', '%s (pdb command)',
parse_pdb_command)
app.add_description_unit('2to3fixer', '2to3fixer', '%s (2to3 fixer)')
app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction)
app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod)
app.add_directive_to_domain('py', 'coroutinefunction', PyCoroutineFunction)
app.add_directive_to_domain('py', 'coroutinemethod', PyCoroutineMethod)
app.add_directive('miscnews', MiscNews)
return {'version': '1.0', 'parallel_read_safe': True}
| 36.096515 | 86 | 0.635844 |
import re
import codecs
from os import path
from time import asctime
from pprint import pformat
from docutils.io import StringOutput
from docutils.parsers.rst import Directive
from docutils.utils import new_document
from docutils import nodes, utils
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.util.nodes import split_explicit_title
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.text import TextWriter
from sphinx.writers.latex import LaTeXTranslator
from sphinx.domains.python import PyModulelevel, PyClassmember
import suspicious
ISSUE_URI = 'https://bugs.python.org/issue%s'
SOURCE_URI = 'https://github.com/python/cpython/tree/3.4/%s'
from docutils.parsers.rst.states import Body
Body.enum.converters['loweralpha'] = \
Body.enum.converters['upperalpha'] = \
Body.enum.converters['lowerroman'] = \
Body.enum.converters['upperroman'] = lambda x: None
orig_visit_literal_block = HTMLTranslator.visit_literal_block
orig_depart_literal_block = LaTeXTranslator.depart_literal_block
def new_visit_literal_block(self, node):
meta = self.builder.env.metadata[self.builder.current_docname]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_visit_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
def new_depart_literal_block(self, node):
meta = self.builder.env.metadata[self.curfilestack[-1]]
old_trim_doctest_flags = self.highlighter.trim_doctest_flags
if 'keepdoctest' in meta:
self.highlighter.trim_doctest_flags = False
try:
orig_depart_literal_block(self, node)
finally:
self.highlighter.trim_doctest_flags = old_trim_doctest_flags
HTMLTranslator.visit_literal_block = new_visit_literal_block
LaTeXTranslator.depart_literal_block = new_depart_literal_block
def issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
issue = utils.unescape(text)
text = 'issue ' + issue
refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue)
return [refnode], []
def source_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
has_t, title, target = split_explicit_title(text)
title = utils.unescape(title)
target = utils.unescape(target)
refnode = nodes.reference(title, title, refuri=SOURCE_URI % target)
return [refnode], []
class ImplementationDetail(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
def run(self):
pnode = nodes.compound(classes=['impl-detail'])
content = self.content
add_text = nodes.strong('CPython implementation detail:',
'CPython implementation detail:')
if self.arguments:
n, m = self.state.inline_text(self.arguments[0], self.lineno)
pnode.append(nodes.paragraph('', '', *(n + m)))
self.state.nested_parse(content, self.content_offset, pnode)
if pnode.children and isinstance(pnode[0], nodes.paragraph):
pnode[0].insert(0, add_text)
pnode[0].insert(1, nodes.Text(' '))
else:
pnode.insert(0, nodes.paragraph('', '', add_text))
return [pnode]
class PyDecoratorMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyDecoratorMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self):
return False
class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
def run(self):
self.name = 'py:function'
return PyModulelevel.run(self)
class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
class PyCoroutineMixin(object):
def handle_signature(self, sig, signode):
ret = super(PyCoroutineMixin, self).handle_signature(sig, signode)
signode.insert(0, addnodes.desc_annotation('coroutine ', 'coroutine '))
return ret
class PyCoroutineFunction(PyCoroutineMixin, PyModulelevel):
def run(self):
self.name = 'py:function'
return PyModulelevel.run(self)
class PyCoroutineMethod(PyCoroutineMixin, PyClassmember):
def run(self):
self.name = 'py:method'
return PyClassmember.run(self)
class DeprecatedRemoved(Directive):
has_content = True
required_arguments = 2
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
_label = 'Deprecated since version %s, will be removed in version %s'
def run(self):
node = addnodes.versionmodified()
node.document = self.state.document
node['type'] = 'deprecated-removed'
version = (self.arguments[0], self.arguments[1])
node['version'] = version
text = self._label % version
if len(self.arguments) == 3:
inodes, messages = self.state.inline_text(self.arguments[2],
self.lineno+1)
para = nodes.paragraph(self.arguments[2], '', *inodes)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
classes=['versionmodified']))
node.append(para)
env = self.state.document.settings.env
env.note_versionchange('deprecated', version[0], node, self.lineno)
return [node] + messages
issue_re = re.compile('([Ii])ssue #([0-9]+)')
whatsnew_re = re.compile(r"(?im)^what's new in (.*?)\??$")
class MiscNews(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
fname = self.arguments[0]
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = path.dirname(path.abspath(source))
fpath = path.join(source_dir, fname)
self.state.document.settings.record_dependencies.add(fpath)
try:
fp = codecs.open(fpath, encoding='utf-8')
try:
content = fp.read()
finally:
fp.close()
except Exception:
text = 'The NEWS file is not available.'
node = nodes.strong(text, text)
return [node]
content = issue_re.sub(r'`\1ssue
content)
content = whatsnew_re.sub(r'\1', content)
# remove first 3 lines as they are the main heading
lines = ['.. default-role:: obj', ''] + content.splitlines()[3:]
self.state_machine.insert_input(lines, fname)
return []
# Support for building "topic help" for pydoc
pydoc_topic_labels = [
'assert', 'assignment', 'atom-identifiers', 'atom-literals',
'attribute-access', 'attribute-references', 'augassign', 'binary',
'bitwise', 'bltin-code-objects', 'bltin-ellipsis-object',
'bltin-null-object', 'bltin-type-objects', 'booleans',
'break', 'callable-types', 'calls', 'class', 'comparisons', 'compound',
'context-managers', 'continue', 'conversions', 'customization', 'debugger',
'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'execmodel',
'exprlists', 'floating', 'for', 'formatstrings', 'function', 'global',
'id-classes', 'identifiers', 'if', 'imaginary', 'import', 'in', 'integers',
'lambda', 'lists', 'naming', 'nonlocal', 'numbers', 'numeric-types',
'objects', 'operator-summary', 'pass', 'power', 'raise', 'return',
'sequence-types', 'shifting', 'slicings', 'specialattrs', 'specialnames',
'string-methods', 'strings', 'subscriptions', 'truth', 'try', 'types',
'typesfunctions', 'typesmapping', 'typesmethods', 'typesmodules',
'typesseq', 'typesseq-mutable', 'unary', 'while', 'with', 'yield'
]
class PydocTopicsBuilder(Builder):
name = 'pydoc-topics'
def init(self):
self.topics = {}
def get_outdated_docs(self):
return 'all pydoc topics'
def get_target_uri(self, docname, typ=None):
return '' # no URIs
def write(self, *ignored):
writer = TextWriter(self)
for label in self.status_iterator(pydoc_topic_labels,
'building topics... ',
length=len(pydoc_topic_labels)):
if label not in self.env.domaindata['std']['labels']:
self.warn('label %r not in documentation' % label)
continue
docname, labelid, sectname = self.env.domaindata['std']['labels'][label]
doctree = self.env.get_and_resolve_doctree(docname, self)
document = new_document('<section node>')
document.append(doctree.ids[labelid])
destination = StringOutput(encoding='utf-8')
writer.write(document, destination)
self.topics[label] = writer.output
def finish(self):
f = open(path.join(self.outdir, 'topics.py'), 'wb')
try:
f.write('
f.write(('
f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8'))
finally:
f.close()
# Support for documenting Opcodes
opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)(?:\s*\((.*)\))?')
def parse_opcode_signature(env, sig, signode):
m = opcode_sig_re.match(sig)
if m is None:
raise ValueError
opname, arglist = m.groups()
signode += addnodes.desc_name(opname, opname)
if arglist is not None:
paramlist = addnodes.desc_parameterlist()
signode += paramlist
paramlist += addnodes.desc_parameter(arglist, arglist)
return opname.strip()
# Support for documenting pdb commands
pdbcmd_sig_re = re.compile(r'([a-z()!]+)\s*(.*)')
# later...
# pdbargs_tokens_re = re.compile(r'''[a-zA-Z]+ | # identifiers
# [.,:]+ | # punctuation
# [\[\]()] | # parens
# \s+ # whitespace
# ''', re.X)
def parse_pdb_command(env, sig, signode):
m = pdbcmd_sig_re.match(sig)
if m is None:
raise ValueError
name, args = m.groups()
fullname = name.replace('(', '').replace(')', '')
signode += addnodes.desc_name(name, name)
if args:
signode += addnodes.desc_addname(' '+args, ' '+args)
return fullname
def setup(app):
app.add_role('issue', issue_role)
app.add_role('source', source_role)
app.add_directive('impl-detail', ImplementationDetail)
app.add_directive('deprecated-removed', DeprecatedRemoved)
app.add_builder(PydocTopicsBuilder)
app.add_builder(suspicious.CheckSuspiciousMarkupBuilder)
app.add_description_unit('opcode', 'opcode', '%s (opcode)',
parse_opcode_signature)
app.add_description_unit('pdbcommand', 'pdbcmd', '%s (pdb command)',
parse_pdb_command)
app.add_description_unit('2to3fixer', '2to3fixer', '%s (2to3 fixer)')
app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction)
app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod)
app.add_directive_to_domain('py', 'coroutinefunction', PyCoroutineFunction)
app.add_directive_to_domain('py', 'coroutinemethod', PyCoroutineMethod)
app.add_directive('miscnews', MiscNews)
return {'version': '1.0', 'parallel_read_safe': True}
| true | true |
f719ae360e05e3d0b1462b0875f0af93d02276fd | 5,643 | py | Python | airflow/executors/debug_executor.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 3 | 2019-12-11T15:54:13.000Z | 2021-05-24T20:21:08.000Z | airflow/executors/debug_executor.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2021-02-08T20:40:47.000Z | 2022-03-29T22:27:53.000Z | airflow/executors/debug_executor.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 2 | 2021-01-11T13:53:03.000Z | 2021-10-02T05:06:34.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains DebugExecutor that is a single
process executor meaning it does not use multiprocessing.
"""
import threading
from typing import Any, Dict, List, Optional
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.models.taskinstance import TaskInstance, TaskInstanceKeyType
from airflow.utils.state import State
class DebugExecutor(BaseExecutor):
"""
This executor is meant for debugging purposes. It can be used with SQLite.
It executes one task instance at time. Additionally to support working
with sensors, all sensors ``mode`` will be automatically set to "reschedule".
"""
_terminated = threading.Event()
def __init__(self):
super().__init__()
self.tasks_to_run: List[TaskInstance] = []
# Place where we keep information for task instance raw run
self.tasks_params: Dict[TaskInstanceKeyType, Dict[str, Any]] = {}
self.fail_fast = conf.getboolean("debug", "fail_fast")
def execute_async(self, *args, **kwargs) -> None:
"""
The method is replaced by custom trigger_task implementation.
"""
def sync(self) -> None:
task_succeeded = True
while self.tasks_to_run:
ti = self.tasks_to_run.pop(0)
if self.fail_fast and not task_succeeded:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
continue
if self._terminated.is_set():
self.log.info(
"Executor is terminated! Stopping %s to %s", ti.key, State.FAILED
)
ti.set_state(State.FAILED)
self.change_state(ti.key, State.FAILED)
continue
task_succeeded = self._run_task(ti)
def _run_task(self, ti: TaskInstance) -> bool:
self.log.debug("Executing task: %s", ti)
key = ti.key
try:
params = self.tasks_params.pop(ti.key, {})
ti._run_raw_task( # pylint: disable=protected-access
job_id=ti.job_id, **params
)
self.change_state(key, State.SUCCESS)
return True
except Exception as e: # pylint: disable=broad-except
self.change_state(key, State.FAILED)
self.log.exception("Failed to execute task: %s.", str(e))
return False
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: Optional[str] = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: Optional[str] = None,
cfg_path: Optional[str] = None,
) -> None:
"""
Queues task instance with empty command because we do not need it.
"""
self.queue_command(
task_instance,
[str(task_instance)], # Just for better logging, it's not used anywhere
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue,
)
# Save params for TaskInstance._run_raw_task
self.tasks_params[task_instance.key] = {
"mark_success": mark_success,
"pool": pool,
}
def trigger_tasks(self, open_slots: int) -> None:
"""
Triggers tasks. Instead of calling exec_async we just
add task instance to tasks_to_run queue.
:param open_slots: Number of open slots
"""
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()], # pylint: disable=unnecessary-comprehension
key=lambda x: x[1][1],
reverse=True,
)
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (_, _, _, ti) = sorted_queue.pop(0)
self.queued_tasks.pop(key)
self.running.add(key)
self.tasks_to_run.append(ti) # type: ignore
def end(self) -> None:
"""
When the method is called we just set states of queued tasks
to UPSTREAM_FAILED marking them as not executed.
"""
for ti in self.tasks_to_run:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
def terminate(self) -> None:
self._terminated.set()
def change_state(self, key: TaskInstanceKeyType, state: str) -> None:
self.log.debug("Popping %s from executor task queue.", key)
self.running.remove(key)
self.event_buffer[key] = state
| 37.370861 | 104 | 0.633174 |
import threading
from typing import Any, Dict, List, Optional
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.models.taskinstance import TaskInstance, TaskInstanceKeyType
from airflow.utils.state import State
class DebugExecutor(BaseExecutor):
_terminated = threading.Event()
def __init__(self):
super().__init__()
self.tasks_to_run: List[TaskInstance] = []
self.tasks_params: Dict[TaskInstanceKeyType, Dict[str, Any]] = {}
self.fail_fast = conf.getboolean("debug", "fail_fast")
def execute_async(self, *args, **kwargs) -> None:
def sync(self) -> None:
task_succeeded = True
while self.tasks_to_run:
ti = self.tasks_to_run.pop(0)
if self.fail_fast and not task_succeeded:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
continue
if self._terminated.is_set():
self.log.info(
"Executor is terminated! Stopping %s to %s", ti.key, State.FAILED
)
ti.set_state(State.FAILED)
self.change_state(ti.key, State.FAILED)
continue
task_succeeded = self._run_task(ti)
def _run_task(self, ti: TaskInstance) -> bool:
self.log.debug("Executing task: %s", ti)
key = ti.key
try:
params = self.tasks_params.pop(ti.key, {})
ti._run_raw_task(
job_id=ti.job_id, **params
)
self.change_state(key, State.SUCCESS)
return True
except Exception as e:
self.change_state(key, State.FAILED)
self.log.exception("Failed to execute task: %s.", str(e))
return False
def queue_task_instance(
self,
task_instance: TaskInstance,
mark_success: bool = False,
pickle_id: Optional[str] = None,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
pool: Optional[str] = None,
cfg_path: Optional[str] = None,
) -> None:
self.queue_command(
task_instance,
[str(task_instance)],
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue,
)
# Save params for TaskInstance._run_raw_task
self.tasks_params[task_instance.key] = {
"mark_success": mark_success,
"pool": pool,
}
def trigger_tasks(self, open_slots: int) -> None:
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()], # pylint: disable=unnecessary-comprehension
key=lambda x: x[1][1],
reverse=True,
)
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, (_, _, _, ti) = sorted_queue.pop(0)
self.queued_tasks.pop(key)
self.running.add(key)
self.tasks_to_run.append(ti) # type: ignore
def end(self) -> None:
for ti in self.tasks_to_run:
self.log.info("Setting %s to %s", ti.key, State.UPSTREAM_FAILED)
ti.set_state(State.UPSTREAM_FAILED)
self.change_state(ti.key, State.UPSTREAM_FAILED)
def terminate(self) -> None:
self._terminated.set()
def change_state(self, key: TaskInstanceKeyType, state: str) -> None:
self.log.debug("Popping %s from executor task queue.", key)
self.running.remove(key)
self.event_buffer[key] = state
| true | true |
f719af5392c1befb33e7fc5a3df49b8e3154b0ce | 2,063 | py | Python | aliyun-python-sdk-eas/aliyunsdkeas/request/v20210701/ListServicesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-eas/aliyunsdkeas/request/v20210701/ListServicesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-eas/aliyunsdkeas/request/v20210701/ListServicesRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkeas.endpoint import endpoint_data
class ListServicesRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'eas', '2021-07-01', 'ListServices','eas')
self.set_uri_pattern('/api/v2/services')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Filter(self):
return self.get_query_params().get('Filter')
def set_Filter(self,Filter):
self.add_query_param('Filter',Filter)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Sort(self):
return self.get_query_params().get('Sort')
def set_Sort(self,Sort):
self.add_query_param('Sort',Sort)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Order(self):
return self.get_query_params().get('Order')
def set_Order(self,Order):
self.add_query_param('Order',Order) | 32.746032 | 74 | 0.750848 |
from aliyunsdkcore.request import RoaRequest
from aliyunsdkeas.endpoint import endpoint_data
class ListServicesRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'eas', '2021-07-01', 'ListServices','eas')
self.set_uri_pattern('/api/v2/services')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Filter(self):
return self.get_query_params().get('Filter')
def set_Filter(self,Filter):
self.add_query_param('Filter',Filter)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Sort(self):
return self.get_query_params().get('Sort')
def set_Sort(self,Sort):
self.add_query_param('Sort',Sort)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Order(self):
return self.get_query_params().get('Order')
def set_Order(self,Order):
self.add_query_param('Order',Order) | true | true |
f719af5c196d30f0eb97eff99d60406c1d503639 | 1,912 | py | Python | tests/unit/recommenders/models/test_newsrec_utils.py | enowy/Recommenders | 60033231b9167438032843c23158c0c776856e0e | [
"MIT"
] | 10 | 2019-05-06T21:57:10.000Z | 2019-05-07T06:15:39.000Z | tests/unit/recommenders/models/test_newsrec_utils.py | enowy/Recommenders | 60033231b9167438032843c23158c0c776856e0e | [
"MIT"
] | 2 | 2022-01-19T20:24:51.000Z | 2022-02-18T20:25:24.000Z | tests/unit/recommenders/models/test_newsrec_utils.py | enowy/Recommenders | 60033231b9167438032843c23158c0c776856e0e | [
"MIT"
] | 3 | 2019-05-06T22:24:21.000Z | 2019-05-07T02:50:46.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pytest
try:
from recommenders.models.deeprec.deeprec_utils import download_deeprec_resources
from recommenders.models.newsrec.newsrec_utils import prepare_hparams, load_yaml
except ImportError:
pass # skip this import if we are in cpu environment
@pytest.mark.parametrize(
"must_exist_attributes", ["wordEmb_file", "wordDict_file", "userDict_file"]
)
@pytest.mark.gpu
def test_prepare_hparams(must_exist_attributes, deeprec_resource_path):
wordEmb_file = os.path.join(deeprec_resource_path, "mind", "utils", "embedding.npy")
userDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "uid2index.pkl"
)
wordDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "word_dict.pkl"
)
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
assert hasattr(hparams, must_exist_attributes)
@pytest.mark.gpu
def test_load_yaml_file(deeprec_resource_path):
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
config = load_yaml(yaml_file)
assert config is not None
| 33.54386 | 88 | 0.69613 |
import os
import pytest
try:
from recommenders.models.deeprec.deeprec_utils import download_deeprec_resources
from recommenders.models.newsrec.newsrec_utils import prepare_hparams, load_yaml
except ImportError:
pass
@pytest.mark.parametrize(
"must_exist_attributes", ["wordEmb_file", "wordDict_file", "userDict_file"]
)
@pytest.mark.gpu
def test_prepare_hparams(must_exist_attributes, deeprec_resource_path):
wordEmb_file = os.path.join(deeprec_resource_path, "mind", "utils", "embedding.npy")
userDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "uid2index.pkl"
)
wordDict_file = os.path.join(
deeprec_resource_path, "mind", "utils", "word_dict.pkl"
)
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
r"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
hparams = prepare_hparams(
yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
epochs=1,
)
assert hasattr(hparams, must_exist_attributes)
@pytest.mark.gpu
def test_load_yaml_file(deeprec_resource_path):
yaml_file = os.path.join(deeprec_resource_path, "mind", "utils", r"nrms.yaml")
if not os.path.exists(yaml_file):
download_deeprec_resources(
"https://recodatasets.z20.web.core.windows.net/newsrec/",
os.path.join(deeprec_resource_path, "mind", "utils"),
"MINDdemo_utils.zip",
)
config = load_yaml(yaml_file)
assert config is not None
| true | true |
f719af7723defb10087e667c5753c6f31f956520 | 12,081 | py | Python | Self_Driving_Car/P1/LaneLines-P1/P1.py | Wentaobi/Udacity | 00af9c36b42d6bca5f2d42d2744efed2ddb51587 | [
"Apache-2.0"
] | null | null | null | Self_Driving_Car/P1/LaneLines-P1/P1.py | Wentaobi/Udacity | 00af9c36b42d6bca5f2d42d2744efed2ddb51587 | [
"Apache-2.0"
] | null | null | null | Self_Driving_Car/P1/LaneLines-P1/P1.py | Wentaobi/Udacity | 00af9c36b42d6bca5f2d42d2744efed2ddb51587 | [
"Apache-2.0"
] | null | null | null | #importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg');
#printing out some stats and plotting
print('This image is:', type(image), 'with dimesions:', image.shape)
plt.imshow(image); #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=13):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
x_size = img.shape[1]
y_size = img.shape[0]
lines_slope_intercept = np.zeros(shape=(len(lines),2))
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
intercept = y1 - x1 * slope
lines_slope_intercept[index]=[slope,intercept]
max_slope_line = lines_slope_intercept[lines_slope_intercept.argmax(axis=0)[0]]
min_slope_line = lines_slope_intercept[lines_slope_intercept.argmin(axis=0)[0]]
left_slopes = []
left_intercepts = []
right_slopes = []
right_intercepts = []
# this gets slopes and intercepts of lines similar to the lines with the max (immediate left) and min
# (immediate right) slopes (i.e. slope and intercept within x%)
for line in lines_slope_intercept:
if abs(line[0] - max_slope_line[0]) < 0.15 and abs(line[1] - max_slope_line[1]) < (0.15 * x_size):
left_slopes.append(line[0])
left_intercepts.append(line[1])
elif abs(line[0] - min_slope_line[0]) < 0.15 and abs(line[1] - min_slope_line[1]) < (0.15 * x_size):
right_slopes.append(line[0])
right_intercepts.append(line[1])
# left and right lines are averages of these slopes and intercepts, extrapolate lines to edges and center*
# *roughly
new_lines = np.zeros(shape=(1,2,4), dtype=np.int32)
if len(left_slopes) > 0:
left_line = [sum(left_slopes)/len(left_slopes),sum(left_intercepts)/len(left_intercepts)]
left_bottom_x = (y_size - left_line[1])/left_line[0]
left_top_x = (y_size*.575 - left_line[1])/left_line[0]
if (left_bottom_x >= 0):
new_lines[0][0] =[left_bottom_x,y_size,left_top_x,y_size*.575]
if len(right_slopes) > 0:
right_line = [sum(right_slopes)/len(right_slopes),sum(right_intercepts)/len(right_intercepts)]
right_bottom_x = (y_size - right_line[1])/right_line[0]
right_top_x = (y_size*.575 - right_line[1])/right_line[0]
if (right_bottom_x <= x_size):
new_lines[0][1]=[right_bottom_x,y_size,right_top_x,y_size*.575]
for line in new_lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
#reading in an image
for index, img in enumerate(os.listdir("test_images/")):
image = mpimg.imread('test_images/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
# define range of color in HSV
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
# Threshold the HSV image to get only yellow/white
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
# Bitwise-AND mask and original image
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(6,10))
plt.imshow(result, cmap="gray") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
#reading in an image
for index, img in enumerate(os.listdir("test_images2/")):
image = mpimg.imread('test_images2/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
# define range of color in HSV
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
# Threshold the HSV image to get only yellow/white
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
# Bitwise-AND mask and original image
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(8,10))
plt.imshow(result, cmap="gray") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image with lines are drawn on lanes)
gray_img = grayscale(image)
hsv_img = hsv(image)
# define range of color in HSV
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
# Threshold the HSV image to get only yellow/white
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
# Bitwise-AND mask and original image
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
#return cv2.cvtColor(masked_img, cv2.COLOR_GRAY2RGB)
return result
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
# HTML("""
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(white_output))
yellow_output = 'yellow.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
# HTML("""
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(yellow_output))
challenge_output = 'extra.mp4'
clip2 = VideoFileClip('challenge.mp4')
challenge_clip = clip2.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
#
# HTML("""
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(challenge_output))
| 35.848665 | 122 | 0.698121 |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
image = mpimg.imread('test_images/solidWhiteRight.jpg');
print('This image is:', type(image), 'with dimesions:', image.shape)
plt.imshow(image);
import math
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def canny(img, low_threshold, high_threshold):
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
mask = np.zeros_like(img)
if len(img.shape) > 2:
channel_count = img.shape[2]
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=13):
x_size = img.shape[1]
y_size = img.shape[0]
lines_slope_intercept = np.zeros(shape=(len(lines),2))
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
intercept = y1 - x1 * slope
lines_slope_intercept[index]=[slope,intercept]
max_slope_line = lines_slope_intercept[lines_slope_intercept.argmax(axis=0)[0]]
min_slope_line = lines_slope_intercept[lines_slope_intercept.argmin(axis=0)[0]]
left_slopes = []
left_intercepts = []
right_slopes = []
right_intercepts = []
for line in lines_slope_intercept:
if abs(line[0] - max_slope_line[0]) < 0.15 and abs(line[1] - max_slope_line[1]) < (0.15 * x_size):
left_slopes.append(line[0])
left_intercepts.append(line[1])
elif abs(line[0] - min_slope_line[0]) < 0.15 and abs(line[1] - min_slope_line[1]) < (0.15 * x_size):
right_slopes.append(line[0])
right_intercepts.append(line[1])
new_lines = np.zeros(shape=(1,2,4), dtype=np.int32)
if len(left_slopes) > 0:
left_line = [sum(left_slopes)/len(left_slopes),sum(left_intercepts)/len(left_intercepts)]
left_bottom_x = (y_size - left_line[1])/left_line[0]
left_top_x = (y_size*.575 - left_line[1])/left_line[0]
if (left_bottom_x >= 0):
new_lines[0][0] =[left_bottom_x,y_size,left_top_x,y_size*.575]
if len(right_slopes) > 0:
right_line = [sum(right_slopes)/len(right_slopes),sum(right_intercepts)/len(right_intercepts)]
right_bottom_x = (y_size - right_line[1])/right_line[0]
right_top_x = (y_size*.575 - right_line[1])/right_line[0]
if (right_bottom_x <= x_size):
new_lines[0][1]=[right_bottom_x,y_size,right_top_x,y_size*.575]
for line in new_lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
os.listdir("test_images/")
for index, img in enumerate(os.listdir("test_images/")):
image = mpimg.imread('test_images/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(6,10))
plt.imshow(result, cmap="gray")
for index, img in enumerate(os.listdir("test_images2/")):
image = mpimg.imread('test_images2/' + img)
gray_img = grayscale(image)
hsv_img = hsv(image)
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
fig = plt.figure(figsize=(8,10))
plt.imshow(result, cmap="gray")
from moviepy.editor import VideoFileClip
def process_image(image):
gray_img = grayscale(image)
hsv_img = hsv(image)
lower_yel = np.array([20,100,100])
upper_yel = np.array([30,255,255])
lower_wht = np.array([0,0,235])
upper_wht = np.array([255,255,255])
yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)
white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)
full_mask = cv2.bitwise_or(yellow_mask, white_mask)
subdued_gray = (gray_img / 2).astype('uint8')
boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)
kernel_size = 5
blurred_img = gaussian_blur(boosted_lanes,kernel_size)
canny_low_threshold = 60
canny_high_threshold = 150
edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)
x = edges_img.shape[1]
y = edges_img.shape[0]
vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)
masked_img = region_of_interest(edges_img, vertices)
hough_rho = 3
hough_theta = np.pi/180
hough_threshold = 70
hough_min_line_length = 70
hough_max_line_gap = 250
hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)
result = weighted_img(hough_img,image)
return result
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(white_output))
yellow_output = 'yellow.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(yellow_output))
challenge_output = 'extra.mp4'
clip2 = VideoFileClip('challenge.mp4')
challenge_clip = clip2.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
# <video width="960" height="540" controls>
# <source src="{0}">
# </video>
# """.format(challenge_output))
| true | true |
f719afb71003662d81876c64edd582861d9f11a6 | 1,088 | py | Python | exercicios-Python/desaf045.py | marcelo-py/Exercicios-Python | d654d54821983897dbc377a2d3db97671dd75b5b | [
"MIT"
] | null | null | null | exercicios-Python/desaf045.py | marcelo-py/Exercicios-Python | d654d54821983897dbc377a2d3db97671dd75b5b | [
"MIT"
] | null | null | null | exercicios-Python/desaf045.py | marcelo-py/Exercicios-Python | d654d54821983897dbc377a2d3db97671dd75b5b | [
"MIT"
] | null | null | null | import random
from emoji import emojize
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
print (emojize('''Suas opções:
[0] PEDRA :punch:
[1] PAPEL :hand:
[2] TESOURA :v:''',use_aliases=True))
escolha = int(input('Qual sua escolha? '))
computador = random.randint(0,2)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!')
print('-='*20)
print('O computador escolheu {}'.format(itens[computador]))
if escolha == 0:
print('Você escolheu PEDRA')
if computador == 1:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 2:
print('Você ganhou!!!')
elif escolha == 1:
print('Você escolheu PAPEL')
if computador == 2:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 0 :
print('Você ganhou!!!')
elif escolha == 2:
print('Você escolheu TESOURA')
if computador == 0:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 1 :
print('Você ganhou!!!')
print('=-'*20)
| 25.302326 | 59 | 0.607537 | import random
from emoji import emojize
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
print (emojize('''Suas opções:
[0] PEDRA :punch:
[1] PAPEL :hand:
[2] TESOURA :v:''',use_aliases=True))
escolha = int(input('Qual sua escolha? '))
computador = random.randint(0,2)
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!')
print('-='*20)
print('O computador escolheu {}'.format(itens[computador]))
if escolha == 0:
print('Você escolheu PEDRA')
if computador == 1:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 2:
print('Você ganhou!!!')
elif escolha == 1:
print('Você escolheu PAPEL')
if computador == 2:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 0 :
print('Você ganhou!!!')
elif escolha == 2:
print('Você escolheu TESOURA')
if computador == 0:
print('Você perdeu')
elif escolha == computador:
print('EMPATE')
elif computador == 1 :
print('Você ganhou!!!')
print('=-'*20)
| true | true |
f719afef6ce3f033481568e9522937db2bfbd069 | 86 | py | Python | my_exceptions.py | robert-dzikowski/api-smoke-test | 64394049ce82a0cf80fc128587a4a83e491725b7 | [
"MIT"
] | 1 | 2021-01-30T23:01:00.000Z | 2021-01-30T23:01:00.000Z | my_exceptions.py | robert-dzikowski/api-smoke-test | 64394049ce82a0cf80fc128587a4a83e491725b7 | [
"MIT"
] | null | null | null | my_exceptions.py | robert-dzikowski/api-smoke-test | 64394049ce82a0cf80fc128587a4a83e491725b7 | [
"MIT"
] | null | null | null | class TestFail(Exception):
"""
Exception raised when test has failed.
"""
| 17.2 | 42 | 0.627907 | class TestFail(Exception):
| true | true |
f719b0534049d456a9239569b20111fc6dcfa5fb | 292 | py | Python | esphome/components/json/__init__.py | TheEggi/esphomeyaml | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | [
"MIT"
] | null | null | null | esphome/components/json/__init__.py | TheEggi/esphomeyaml | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | [
"MIT"
] | null | null | null | esphome/components/json/__init__.py | TheEggi/esphomeyaml | 98e8cc1edc7b29891e8100eb484922e5c2d4fc33 | [
"MIT"
] | null | null | null | import esphome.codegen as cg
from esphome.core import coroutine_with_priority
json_ns = cg.esphome_ns.namespace('json')
@coroutine_with_priority(1.0)
def to_code(config):
cg.add_library('ArduinoJson-esphomelib', '5.13.3')
cg.add_define('USE_JSON')
cg.add_global(json_ns.using)
| 24.333333 | 54 | 0.763699 | import esphome.codegen as cg
from esphome.core import coroutine_with_priority
json_ns = cg.esphome_ns.namespace('json')
@coroutine_with_priority(1.0)
def to_code(config):
cg.add_library('ArduinoJson-esphomelib', '5.13.3')
cg.add_define('USE_JSON')
cg.add_global(json_ns.using)
| true | true |
f719b0960e13ee24f7ce64d60d298220d2513dc0 | 53 | py | Python | shiftscheduler/gui/constants.py | c-rainbow/nurse-scheduling | 8537c875e46772700499a89dec3a30a796434fe0 | [
"MIT"
] | 2 | 2020-04-16T17:03:56.000Z | 2021-04-08T17:23:21.000Z | shiftscheduler/gui/constants.py | c-rainbow/nurse-scheduling | 8537c875e46772700499a89dec3a30a796434fe0 | [
"MIT"
] | null | null | null | shiftscheduler/gui/constants.py | c-rainbow/nurse-scheduling | 8537c875e46772700499a89dec3a30a796434fe0 | [
"MIT"
] | 1 | 2020-05-04T18:03:59.000Z | 2020-05-04T18:03:59.000Z |
EXCEL_FILE_TYPE = (("Excel 2007 files","*.xlsx"),) | 13.25 | 50 | 0.622642 |
EXCEL_FILE_TYPE = (("Excel 2007 files","*.xlsx"),) | true | true |
f719b09aaa3ce37ed804af7fc5327f4ef6a12908 | 645 | py | Python | noxfile.py | HarshNarayanJha/diddi-and-the-bugs | 82af417a2ab324de7bde38736bfc42430b6b46fa | [
"MIT"
] | null | null | null | noxfile.py | HarshNarayanJha/diddi-and-the-bugs | 82af417a2ab324de7bde38736bfc42430b6b46fa | [
"MIT"
] | null | null | null | noxfile.py | HarshNarayanJha/diddi-and-the-bugs | 82af417a2ab324de7bde38736bfc42430b6b46fa | [
"MIT"
] | null | null | null | """
I use Nox here to reformat the code.
"""
import nox
files = ["noxfile.py", "main.py", "setup.py"]
@nox.session(name="keep-codebase-clean")
def keep_codebase_clean(session):
"Run formatters."
session.install("-r", "test-requirements.txt")
session.run("isort", *files)
session.run("black", *files)
@nox.session(name="check-quality")
def check_quality(session):
"Check the style and quality."
session.install("-r", "test-requirements.txt")
session.run("flake8", *files, "--max-line-length=127")
session.run("isort", "--check-only", *files)
session.run("black", "--check", *files)
| 26.875 | 59 | 0.632558 | import nox
files = ["noxfile.py", "main.py", "setup.py"]
@nox.session(name="keep-codebase-clean")
def keep_codebase_clean(session):
session.install("-r", "test-requirements.txt")
session.run("isort", *files)
session.run("black", *files)
@nox.session(name="check-quality")
def check_quality(session):
session.install("-r", "test-requirements.txt")
session.run("flake8", *files, "--max-line-length=127")
session.run("isort", "--check-only", *files)
session.run("black", "--check", *files)
| true | true |
f719b22b9c5885616b30cc4050c5cf2de4e5b710 | 1,553 | py | Python | services/storage/tests/helpers/utils_assert.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | services/storage/tests/helpers/utils_assert.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | services/storage/tests/helpers/utils_assert.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | from pprint import pformat
from aiohttp import web
from servicelib.aiohttp.rest_responses import unwrap_envelope
async def assert_status(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
assert (
response.status == expected_cls.status_code
), f"got {response.status}, expected {expected_cls.status_code}:\n data:{data},\n error:{error}"
if issubclass(expected_cls, web.HTTPError):
do_assert_error(data, error, expected_cls, expected_msg)
elif issubclass(expected_cls, web.HTTPNoContent):
assert not data, pformat(data)
assert not error, pformat(error)
else:
assert data is not None, pformat(data)
assert not error, pformat(error)
if expected_msg:
assert expected_msg in data["message"]
return data, error
async def assert_error(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
return do_assert_error(data, error, expected_cls, expected_msg)
def do_assert_error(
data, error, expected_cls: web.HTTPException, expected_msg: str = None
):
assert not data, pformat(data)
assert error, pformat(error)
# TODO: improve error messages
assert len(error["errors"]) == 1
err = error["errors"][0]
if expected_msg:
assert expected_msg in err["message"]
assert expected_cls.__name__ == err["code"]
return data, error
| 28.759259 | 100 | 0.701223 | from pprint import pformat
from aiohttp import web
from servicelib.aiohttp.rest_responses import unwrap_envelope
async def assert_status(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
assert (
response.status == expected_cls.status_code
), f"got {response.status}, expected {expected_cls.status_code}:\n data:{data},\n error:{error}"
if issubclass(expected_cls, web.HTTPError):
do_assert_error(data, error, expected_cls, expected_msg)
elif issubclass(expected_cls, web.HTTPNoContent):
assert not data, pformat(data)
assert not error, pformat(error)
else:
assert data is not None, pformat(data)
assert not error, pformat(error)
if expected_msg:
assert expected_msg in data["message"]
return data, error
async def assert_error(
response: web.Response, expected_cls: web.HTTPException, expected_msg: str = None
):
data, error = unwrap_envelope(await response.json())
return do_assert_error(data, error, expected_cls, expected_msg)
def do_assert_error(
data, error, expected_cls: web.HTTPException, expected_msg: str = None
):
assert not data, pformat(data)
assert error, pformat(error)
assert len(error["errors"]) == 1
err = error["errors"][0]
if expected_msg:
assert expected_msg in err["message"]
assert expected_cls.__name__ == err["code"]
return data, error
| true | true |
f719b3cf4408be63834d8d778dce83c706005a42 | 2,919 | py | Python | python/src/ties/util/version.py | Noblis/ties-lib | e7c6165ebcd80e11b792fd4bcddf6ce634da0c60 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-10T19:02:27.000Z | 2020-04-10T19:02:27.000Z | python/src/ties/util/version.py | Noblis/ties-lib | e7c6165ebcd80e11b792fd4bcddf6ce634da0c60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/src/ties/util/version.py | Noblis/ties-lib | e7c6165ebcd80e11b792fd4bcddf6ce634da0c60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | ################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
import argparse
from os.path import abspath, isfile
from pkg_resources import resource_filename
class VersionAction(argparse.Action):
def __init__(self, option_strings, dest, version=None, **kwargs):
kwargs['nargs'] = 0
self._version = version
super(VersionAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.exit(message="{}\n".format(self._version))
def _get_version_number():
return '0.9.3'
def _get_build_number():
resource_version_path = abspath(resource_filename(__name__, 'build_number.txt'))
if isfile(resource_version_path):
with open(resource_version_path, 'r', encoding='utf-8') as f:
build_number = f.read().strip()
if build_number:
return build_number
else:
return None
else:
return None
def _get_build_time():
resource_version_path = abspath(resource_filename(__name__, 'build_time.txt'))
if isfile(resource_version_path):
with open(resource_version_path, 'r', encoding='utf-8') as f:
build_time = f.read().strip()
if build_time:
return build_time
else:
return None
else:
return None
def version_string():
version_number = _get_version_number()
build_number = _get_build_number()
build_time = _get_build_time()
version = "version {}".format(version_number)
if build_number is not None:
version += "\nbuild {}".format(build_number)
if build_time is not None:
version += "\nbuilt on {}".format(build_time)
return version
| 39.445946 | 84 | 0.528606 | true | true | |
f719b481bbf26bf74e10817f58f02d7b6a184525 | 905 | py | Python | packages/pyre/xml/ElementDescriptor.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/xml/ElementDescriptor.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/xml/ElementDescriptor.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
from .Descriptor import Descriptor
class ElementDescriptor(Descriptor):
"""
Descriptor class that gathers all the metadata about a document tag that was provided by
the user during the DTD declaration. It is used by DTD derived classes to decorate the
Document instance and the tag handlers with the information needed by the Reader so it can
process XML documents
"""
# element meta data
handler = None # the Node descendant that handles parsing events for this document element
attributes = () # a list of the tag attribute descriptors that encode the document DTD
# meta methods
def __init__(self, *, tag, handler, root=False):
super().__init__(name=tag)
self.handler = handler
self.root = root
return
# end of file
| 25.857143 | 94 | 0.693923 |
from .Descriptor import Descriptor
class ElementDescriptor(Descriptor):
handler = None
attributes = ()
def __init__(self, *, tag, handler, root=False):
super().__init__(name=tag)
self.handler = handler
self.root = root
return
| true | true |
f719b4bd078cf626a5dea79e89509d44970085fe | 1,812 | py | Python | pliers/tests/extractors/api/test_clarifai_extractors.py | adelavega/pliers | dee21102689c77a56b7da48bf9a0ac10c90be0eb | [
"BSD-3-Clause"
] | null | null | null | pliers/tests/extractors/api/test_clarifai_extractors.py | adelavega/pliers | dee21102689c77a56b7da48bf9a0ac10c90be0eb | [
"BSD-3-Clause"
] | null | null | null | pliers/tests/extractors/api/test_clarifai_extractors.py | adelavega/pliers | dee21102689c77a56b7da48bf9a0ac10c90be0eb | [
"BSD-3-Clause"
] | null | null | null | from os.path import join
from ...utils import get_test_data_path
from pliers.extractors import ClarifaiAPIExtractor
from pliers.stimuli import ImageStim
from pliers.extractors.base import merge_results
import numpy as np
import pytest
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
result = ClarifaiAPIExtractor().transform(stim).to_df()
assert result['apple'][0] > 0.5
assert result.ix[:, 5][0] > 0.0
result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()
assert result.shape == (1, 9)
result = ClarifaiAPIExtractor(
min_value=0.9).transform(stim).to_df(object_id=False)
assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])
concepts = ['cat', 'dog']
result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)
result = result.to_df()
assert result.shape == (1, 6)
assert 'cat' in result.columns and 'dog' in result.columns
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor_batch():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
stim2 = ImageStim(join(image_dir, 'obama.jpg'))
ext = ClarifaiAPIExtractor()
results = ext.transform([stim, stim2])
results = merge_results(results)
assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \
results['ClarifaiAPIExtractor#apple'][1] > 0.5
# This takes too long to execute
# video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))
# results = ExtractorResult.merge_stims(ext.transform(video))
# assert 'Lego' in results.columns and 'robot' in results.columns
| 38.553191 | 75 | 0.711921 | from os.path import join
from ...utils import get_test_data_path
from pliers.extractors import ClarifaiAPIExtractor
from pliers.stimuli import ImageStim
from pliers.extractors.base import merge_results
import numpy as np
import pytest
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
result = ClarifaiAPIExtractor().transform(stim).to_df()
assert result['apple'][0] > 0.5
assert result.ix[:, 5][0] > 0.0
result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()
assert result.shape == (1, 9)
result = ClarifaiAPIExtractor(
min_value=0.9).transform(stim).to_df(object_id=False)
assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])
concepts = ['cat', 'dog']
result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)
result = result.to_df()
assert result.shape == (1, 6)
assert 'cat' in result.columns and 'dog' in result.columns
@pytest.mark.skipif("'CLARIFAI_API_KEY' not in os.environ")
def test_clarifai_api_extractor_batch():
image_dir = join(get_test_data_path(), 'image')
stim = ImageStim(join(image_dir, 'apple.jpg'))
stim2 = ImageStim(join(image_dir, 'obama.jpg'))
ext = ClarifaiAPIExtractor()
results = ext.transform([stim, stim2])
results = merge_results(results)
assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \
results['ClarifaiAPIExtractor#apple'][1] > 0.5
| true | true |
f719b4dc7ae13b6947c48e17f17fc0bd12e5e231 | 23,805 | py | Python | src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 3 | 2021-06-24T01:54:25.000Z | 2021-12-12T16:21:24.000Z | src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 79 | 2021-06-23T10:40:10.000Z | 2021-12-16T07:59:42.000Z | src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 5 | 2021-07-04T07:38:50.000Z | 2021-12-12T16:18:47.000Z | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import torch
import ntpath
import shutil
import numpy as np
import onnxruntime as ort
from torchvision.transforms import transforms as T
from opendr.engine.learners import Learner
from opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator
from opendr.perception.object_tracking_2d.logger import Logger
from opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model
from opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate
from opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint
from opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker
from opendr.engine.data import Image
from opendr.engine.target import TrackingAnnotation, TrackingAnnotationList
from opendr.engine.constants import OPENDR_SERVER_URL
from urllib.request import urlretrieve
class ObjectTracking2DFairMotLearner(Learner):
def __init__(
self,
lr=0.0001,
iters=-1,
batch_size=4,
optimizer="adam",
lr_schedule="",
backbone="dla_34",
network_head="",
checkpoint_after_iter=0,
checkpoint_load_iter=0,
temp_path="",
device="cuda",
threshold=0.3,
scale=1.0,
lr_step=[20],
head_conv=256,
ltrb=True,
num_classes=1,
reg_offset=True,
gpus=[0],
num_workers=4,
mse_loss=False,
reg_loss='l1',
dense_wh=False,
cat_spec_wh=False,
reid_dim=128,
norm_wh=False,
wh_weight=0.1,
off_weight=1,
id_weight=1,
num_epochs=30,
hm_weight=1,
down_ratio=4,
max_objs=500,
track_buffer=30,
image_mean=[0.408, 0.447, 0.47],
image_std=[0.289, 0.274, 0.278],
frame_rate=30,
min_box_area=100,
):
# Pass the shared parameters on super's constructor so they can get initialized as class attributes
super(ObjectTracking2DFairMotLearner, self).__init__(
lr=lr,
iters=iters,
batch_size=batch_size,
optimizer=optimizer,
lr_schedule=lr_schedule,
backbone=backbone,
network_head=network_head,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter,
temp_path=temp_path,
device=device,
threshold=threshold,
scale=scale,
)
self.ltrb = ltrb
self.head_conv = head_conv
self.num_classes = num_classes
self.reid_dim = reid_dim
self.reg_offset = reg_offset
self.gpus = gpus
self.num_workers = num_workers
self.mse_loss = mse_loss
self.reg_loss = reg_loss
self.dense_wh = dense_wh
self.cat_spec_wh = cat_spec_wh
self.reid_dim = reid_dim
self.norm_wh = norm_wh
self.wh_weight = wh_weight
self.off_weight = off_weight
self.id_weight = id_weight
self.num_epochs = num_epochs
self.lr_step = lr_step
self.hm_weight = hm_weight
self.down_ratio = down_ratio
self.max_objs = max_objs
self.track_buffer = track_buffer
self.image_mean = image_mean
self.image_mean = image_mean
self.image_std = image_std
self.frame_rate = frame_rate
self.min_box_area = min_box_area
main_batch_size = self.batch_size // len(self.gpus)
rest_batch_size = (self.batch_size - main_batch_size)
self.chunk_sizes = [main_batch_size]
for i in range(len(self.gpus) - 1):
worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)
if i < rest_batch_size % (len(self.gpus) - 1):
worker_chunk_size += 1
self.chunk_sizes.append(worker_chunk_size)
self.__create_model()
def save(self, path, verbose=False):
"""
This method is used to save a trained model.
Provided with the path, absolute or relative, including a *folder* name, it creates a directory with the name
of the *folder* provided and saves the model inside with a proper format and a .json file with metadata.
If self.optimize was ran previously, it saves the optimized ONNX model in a similar fashion, by copying it
from the self.temp_path it was saved previously during conversion.
:param path: for the model to be saved, including the folder name
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
if self.model is None and self.ort_session is None:
raise UserWarning("No model is loaded, cannot save.")
folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path
# Also extract folder name without any extension if extension is erroneously provided
folder_name_no_ext = folder_name.split(sep='.')[0]
# Extract path without folder name, by removing folder name from original path
path_no_folder_name = ''.join(path.rsplit(folder_name, 1))
# If tail is '', then path was a/b/c/, which leaves a trailing double '/'
if tail == '':
path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'
# Create model directory
new_path = path_no_folder_name + folder_name_no_ext
os.makedirs(new_path, exist_ok=True)
model_metadata = {"model_paths": [], "framework": "pytorch", "format": "", "has_data": False,
"inference_params": {}, "optimized": None, "optimizer_info": {}}
if self.model.ort_session is None:
model_metadata["model_paths"] = [
folder_name_no_ext + ".pth",
]
model_metadata["optimized"] = False
model_metadata["format"] = "pth"
torch.save({
'state_dict': self.model.state_dict()
}, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
if verbose:
print("Saved Pytorch model.")
else:
model_metadata["model_paths"] = [
folder_name_no_ext + ".onnx"
]
model_metadata["optimized"] = True
model_metadata["format"] = "onnx"
shutil.copy2(
os.path.join(self.temp_path, "onnx_model_temp.onnx"),
os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0])
)
if verbose:
print("Saved ONNX model.")
with open(os.path.join(new_path, folder_name_no_ext + ".json"), 'w') as outfile:
json.dump(model_metadata, outfile)
def load(
self,
path,
verbose=False,
):
"""
Loads the model from inside the path provided, based on the metadata .json file included.
:param path: path of the directory the model was saved
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided
with open(os.path.join(path, model_name + ".json")) as metadata_file:
metadata = json.load(metadata_file)
if not metadata["optimized"]:
self.__load_from_pth(self.model, os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded Pytorch model.")
else:
self.__load_rpn_from_onnx(os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded ONNX model.")
def reset(self):
self.tracker.reset()
def fit(
self,
dataset,
val_dataset=None,
val_epochs=-1,
logging_path=None,
silent=False,
verbose=False,
train_split_paths=None,
val_split_paths=None,
resume_optimizer=False,
nID=None
):
if train_split_paths is None:
train_split_paths = {
"mot20": os.path.join(
"perception", "object_tracking_2d", "datasets", "splits", "mot20.train"
)
}
if val_split_paths is None:
val_split_paths = train_split_paths
logger = Logger(silent, verbose, logging_path)
(
input_dataset_iterator,
eval_dataset_iterator,
) = self._prepare_datasets(
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_val_dataset=val_epochs > 0,
)
if nID is None:
nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, "nID") else dataset.nID
checkpoints_path = os.path.join(self.temp_path, "checkpoints")
if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:
os.makedirs(checkpoints_path, exist_ok=True)
start_epoch = 0
if self.checkpoint_load_iter != 0:
_, _, start_epoch = load_from_checkpoint(
self.model, os.path.join(checkpoints_path, f"checkpoint_{self.checkpoint_load_iter}.pth"),
self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,
)
last_eval_result = train(
self.model,
self.infer,
self.model_optimizer,
input_dataset_iterator,
eval_dataset_iterator,
self.batch_size,
self.num_workers,
self.gpus,
self.chunk_sizes,
self.iters,
"train", # exp_id,
self.device,
silent, # hide_data_time,
1 if verbose else (-1 if silent else 10), # print_iter,
self.mse_loss,
self.reg_loss,
self.dense_wh,
self.cat_spec_wh,
self.reid_dim,
nID,
self.norm_wh,
1, # num_stack,
self.wh_weight,
self.off_weight,
self.id_weight,
self.num_epochs,
self.lr_step,
self.temp_path,
self.lr,
self.reg_offset,
self.hm_weight,
checkpoints_path,
self.checkpoint_after_iter,
start_epoch,
val_epochs=val_epochs,
log=logger.log,
)
logger.close()
return last_eval_result
def eval(
self,
dataset,
val_split_paths=None,
logging_path=None,
silent=False,
verbose=False,
):
logger = Logger(silent, verbose, logging_path)
(
_,
eval_dataset_iterator,
) = self._prepare_datasets(
None,
dataset,
None,
val_split_paths,
require_dataset=False,
)
result = evaluate(self.infer, dataset)
logger.log(Logger.LOG_WHEN_NORMAL, result)
logger.close()
return result
def infer(self, batch, frame_ids=None, img_size=(1088, 608)):
if self.model is None:
raise ValueError("No model loaded or created")
self.model.eval()
is_single_image = False
if isinstance(batch, Image):
batch = [batch]
is_single_image = True
elif not isinstance(batch, list):
raise ValueError("Input batch should be an engine.Image or a list of engine.Image")
if frame_ids is None:
frame_ids = [-1] * len(batch)
elif is_single_image:
frame_ids = [frame_ids]
results = []
for image, frame_id in zip(batch, frame_ids):
img0 = image.convert("channels_last", "bgr") # BGR
img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
blob = torch.from_numpy(img).to(self.device).unsqueeze(0)
online_targets = self.tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(t.score)
result = TrackingAnnotationList([
TrackingAnnotation(
name=0,
top=tlwh[0],
left=tlwh[1],
width=tlwh[2],
height=tlwh[3],
id=id,
score=score,
frame=frame_id,
) for tlwh, id, score in zip(
online_tlwhs,
online_ids,
online_scores
)
])
results.append(result)
if is_single_image:
results = results[0]
return results
def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):
"""
Optimize method converts the model to ONNX format and saves the
model in the parent directory defined by self.temp_path. The ONNX model is then loaded.
:param do_constant_folding: whether to optimize constants, defaults to 'False'
:type do_constant_folding: bool, optional
"""
if not optimizable_dcn_v2:
raise Exception("Can not optimize the model while DCNv2 implementation is not optimizable")
if self.model is None:
raise UserWarning("No model is loaded, cannot optimize. Load or train a model first.")
if self.model.ort_session is not None:
raise UserWarning("Model is already optimized in ONNX.")
input_shape = [
1,
3,
img_size[1],
img_size[0],
]
try:
self.__convert_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
except FileNotFoundError:
# Create temp directory
os.makedirs(self.temp_path, exist_ok=True)
self.__convert_rpn_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
self.__load_rpn_from_onnx(os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"))
@staticmethod
def download(model_name, path, server_url=None):
if server_url is None and model_name not in [
"crowdhuman_dla34",
"fairmot_dla34",
]:
raise ValueError("Unknown model_name: " + model_name)
os.makedirs(path, exist_ok=True)
if server_url is None:
server_url = os.path.join(
OPENDR_SERVER_URL, "perception", "object_tracking_2d",
"fair_mot"
)
url = os.path.join(
server_url, model_name
)
model_dir = os.path.join(path, model_name)
os.makedirs(model_dir, exist_ok=True)
urlretrieve(os.path.join(
url, model_name + ".json"
), os.path.join(
model_dir, model_name + ".json"
))
try:
urlretrieve(os.path.join(
url, model_name + ".pth"
), os.path.join(
model_dir, model_name + ".pth"
))
except Exception:
urlretrieve(os.path.join(
url, model_name + ".tckpt"
), os.path.join(
model_dir, model_name + ".pth"
))
print("Downloaded model", model_name, "to", model_dir)
return model_dir
def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):
inp = torch.randn(input_shape).to(self.device)
input_names = ["data"]
output_names = self.heads.keys()
torch.onnx.export(
self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,
do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names
)
def __load_from_onnx(self, path):
"""
This method loads an ONNX model from the path provided into an onnxruntime inference session.
:param path: path to ONNX model
:type path: str
"""
self.model.rpn_ort_session = ort.InferenceSession(path)
# The comments below are the alternative way to use the onnx model, it might be useful in the future
# depending on how ONNX saving/loading will be implemented across the toolkit.
# # Load the ONNX model
# self.model = onnx.load(path)
#
# # Check that the IR is well formed
# onnx.checker.check_model(self.model)
#
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(self.model.graph)
def __load_from_pth(self, model, path, use_original_dict=False):
all_params = torch.load(path, map_location=self.device)
model.load_state_dict(all_params if use_original_dict else all_params["state_dict"])
def _prepare_datasets(
self,
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_dataset=True,
require_val_dataset=True,
):
input_dataset_iterator = None
eval_dataset_iterator = None
if isinstance(dataset, ExternalDataset):
dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset")
transforms = T.Compose([T.ToTensor()])
input_dataset_iterator = JointDataset(
dataset_path,
train_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
augment=False, transforms=transforms,
)
elif isinstance(dataset, DatasetIterator):
input_dataset_iterator = MappedDatasetIterator(
dataset,
lambda d: process_dataset(
d[0], d[1], self.ltrb, self.down_ratio,
self.max_objs, self.num_classes, self.mse_loss
)
)
else:
if require_dataset or dataset is not None:
raise ValueError(
"dataset parameter should be an ExternalDataset or a DatasetIterator"
)
if isinstance(val_dataset, ExternalDataset):
val_dataset_path = val_dataset.path
if val_dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(val_dataset) +
") is given as a val_dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif isinstance(val_dataset, DatasetIterator):
eval_dataset_iterator = val_dataset
elif val_dataset is None:
if isinstance(dataset, ExternalDataset):
val_dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif require_val_dataset:
raise ValueError(
"val_dataset is None and can't be derived from" +
" the dataset object because the dataset is not an ExternalDataset"
)
else:
eval_dataset_iterator = input_dataset_iterator
else:
raise ValueError(
"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None"
)
return input_dataset_iterator, eval_dataset_iterator
def __create_model(self):
heads = {
'hm': self.num_classes,
'wh': 2 if not self.ltrb else 4,
'id': self.reid_dim
}
if self.reg_offset:
heads.update({'reg': 2})
self.heads = heads
self.model = create_model(self.backbone, heads, self.head_conv)
self.model.to(self.device)
self.model.ort_session = None
self.model.heads_names = heads.keys()
self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)
self.tracker = JDETracker(
self.model,
self.threshold,
self.track_buffer,
self.max_objs,
self.image_mean,
self.image_std,
self.down_ratio,
self.num_classes,
self.reg_offset,
self.ltrb,
self.frame_rate,
)
@staticmethod
def __extract_trailing(path):
"""
Extracts the trailing folder name or filename from a path provided in an OS-generic way, also handling
cases where the last trailing character is a separator. Returns the folder name and the split head and tail.
:param path: the path to extract the trailing filename or folder name from
:type path: str
:return: the folder name, the head and tail of the path
:rtype: tuple of three strings
"""
head, tail = ntpath.split(path)
folder_name = tail or ntpath.basename(head) # handle both a/b/c and a/b/c/
return folder_name, head, tail
| 34.650655 | 117 | 0.585003 |
import os
import json
import torch
import ntpath
import shutil
import numpy as np
import onnxruntime as ort
from torchvision.transforms import transforms as T
from opendr.engine.learners import Learner
from opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator
from opendr.perception.object_tracking_2d.logger import Logger
from opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model
from opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate
from opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint
from opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset
from opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker
from opendr.engine.data import Image
from opendr.engine.target import TrackingAnnotation, TrackingAnnotationList
from opendr.engine.constants import OPENDR_SERVER_URL
from urllib.request import urlretrieve
class ObjectTracking2DFairMotLearner(Learner):
def __init__(
self,
lr=0.0001,
iters=-1,
batch_size=4,
optimizer="adam",
lr_schedule="",
backbone="dla_34",
network_head="",
checkpoint_after_iter=0,
checkpoint_load_iter=0,
temp_path="",
device="cuda",
threshold=0.3,
scale=1.0,
lr_step=[20],
head_conv=256,
ltrb=True,
num_classes=1,
reg_offset=True,
gpus=[0],
num_workers=4,
mse_loss=False,
reg_loss='l1',
dense_wh=False,
cat_spec_wh=False,
reid_dim=128,
norm_wh=False,
wh_weight=0.1,
off_weight=1,
id_weight=1,
num_epochs=30,
hm_weight=1,
down_ratio=4,
max_objs=500,
track_buffer=30,
image_mean=[0.408, 0.447, 0.47],
image_std=[0.289, 0.274, 0.278],
frame_rate=30,
min_box_area=100,
):
super(ObjectTracking2DFairMotLearner, self).__init__(
lr=lr,
iters=iters,
batch_size=batch_size,
optimizer=optimizer,
lr_schedule=lr_schedule,
backbone=backbone,
network_head=network_head,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter,
temp_path=temp_path,
device=device,
threshold=threshold,
scale=scale,
)
self.ltrb = ltrb
self.head_conv = head_conv
self.num_classes = num_classes
self.reid_dim = reid_dim
self.reg_offset = reg_offset
self.gpus = gpus
self.num_workers = num_workers
self.mse_loss = mse_loss
self.reg_loss = reg_loss
self.dense_wh = dense_wh
self.cat_spec_wh = cat_spec_wh
self.reid_dim = reid_dim
self.norm_wh = norm_wh
self.wh_weight = wh_weight
self.off_weight = off_weight
self.id_weight = id_weight
self.num_epochs = num_epochs
self.lr_step = lr_step
self.hm_weight = hm_weight
self.down_ratio = down_ratio
self.max_objs = max_objs
self.track_buffer = track_buffer
self.image_mean = image_mean
self.image_mean = image_mean
self.image_std = image_std
self.frame_rate = frame_rate
self.min_box_area = min_box_area
main_batch_size = self.batch_size // len(self.gpus)
rest_batch_size = (self.batch_size - main_batch_size)
self.chunk_sizes = [main_batch_size]
for i in range(len(self.gpus) - 1):
worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)
if i < rest_batch_size % (len(self.gpus) - 1):
worker_chunk_size += 1
self.chunk_sizes.append(worker_chunk_size)
self.__create_model()
def save(self, path, verbose=False):
if self.model is None and self.ort_session is None:
raise UserWarning("No model is loaded, cannot save.")
folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path
# Also extract folder name without any extension if extension is erroneously provided
folder_name_no_ext = folder_name.split(sep='.')[0]
# Extract path without folder name, by removing folder name from original path
path_no_folder_name = ''.join(path.rsplit(folder_name, 1))
# If tail is '', then path was a/b/c/, which leaves a trailing double '/'
if tail == '':
path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'
# Create model directory
new_path = path_no_folder_name + folder_name_no_ext
os.makedirs(new_path, exist_ok=True)
model_metadata = {"model_paths": [], "framework": "pytorch", "format": "", "has_data": False,
"inference_params": {}, "optimized": None, "optimizer_info": {}}
if self.model.ort_session is None:
model_metadata["model_paths"] = [
folder_name_no_ext + ".pth",
]
model_metadata["optimized"] = False
model_metadata["format"] = "pth"
torch.save({
'state_dict': self.model.state_dict()
}, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
if verbose:
print("Saved Pytorch model.")
else:
model_metadata["model_paths"] = [
folder_name_no_ext + ".onnx"
]
model_metadata["optimized"] = True
model_metadata["format"] = "onnx"
shutil.copy2(
os.path.join(self.temp_path, "onnx_model_temp.onnx"),
os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0])
)
if verbose:
print("Saved ONNX model.")
with open(os.path.join(new_path, folder_name_no_ext + ".json"), 'w') as outfile:
json.dump(model_metadata, outfile)
def load(
self,
path,
verbose=False,
):
model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided
with open(os.path.join(path, model_name + ".json")) as metadata_file:
metadata = json.load(metadata_file)
if not metadata["optimized"]:
self.__load_from_pth(self.model, os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded Pytorch model.")
else:
self.__load_rpn_from_onnx(os.path.join(path, metadata["model_paths"][0]))
if verbose:
print("Loaded ONNX model.")
def reset(self):
self.tracker.reset()
def fit(
self,
dataset,
val_dataset=None,
val_epochs=-1,
logging_path=None,
silent=False,
verbose=False,
train_split_paths=None,
val_split_paths=None,
resume_optimizer=False,
nID=None
):
if train_split_paths is None:
train_split_paths = {
"mot20": os.path.join(
"perception", "object_tracking_2d", "datasets", "splits", "mot20.train"
)
}
if val_split_paths is None:
val_split_paths = train_split_paths
logger = Logger(silent, verbose, logging_path)
(
input_dataset_iterator,
eval_dataset_iterator,
) = self._prepare_datasets(
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_val_dataset=val_epochs > 0,
)
if nID is None:
nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, "nID") else dataset.nID
checkpoints_path = os.path.join(self.temp_path, "checkpoints")
if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:
os.makedirs(checkpoints_path, exist_ok=True)
start_epoch = 0
if self.checkpoint_load_iter != 0:
_, _, start_epoch = load_from_checkpoint(
self.model, os.path.join(checkpoints_path, f"checkpoint_{self.checkpoint_load_iter}.pth"),
self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,
)
last_eval_result = train(
self.model,
self.infer,
self.model_optimizer,
input_dataset_iterator,
eval_dataset_iterator,
self.batch_size,
self.num_workers,
self.gpus,
self.chunk_sizes,
self.iters,
"train", # exp_id,
self.device,
silent, # hide_data_time,
1 if verbose else (-1 if silent else 10), # print_iter,
self.mse_loss,
self.reg_loss,
self.dense_wh,
self.cat_spec_wh,
self.reid_dim,
nID,
self.norm_wh,
1, # num_stack,
self.wh_weight,
self.off_weight,
self.id_weight,
self.num_epochs,
self.lr_step,
self.temp_path,
self.lr,
self.reg_offset,
self.hm_weight,
checkpoints_path,
self.checkpoint_after_iter,
start_epoch,
val_epochs=val_epochs,
log=logger.log,
)
logger.close()
return last_eval_result
def eval(
self,
dataset,
val_split_paths=None,
logging_path=None,
silent=False,
verbose=False,
):
logger = Logger(silent, verbose, logging_path)
(
_,
eval_dataset_iterator,
) = self._prepare_datasets(
None,
dataset,
None,
val_split_paths,
require_dataset=False,
)
result = evaluate(self.infer, dataset)
logger.log(Logger.LOG_WHEN_NORMAL, result)
logger.close()
return result
def infer(self, batch, frame_ids=None, img_size=(1088, 608)):
if self.model is None:
raise ValueError("No model loaded or created")
self.model.eval()
is_single_image = False
if isinstance(batch, Image):
batch = [batch]
is_single_image = True
elif not isinstance(batch, list):
raise ValueError("Input batch should be an engine.Image or a list of engine.Image")
if frame_ids is None:
frame_ids = [-1] * len(batch)
elif is_single_image:
frame_ids = [frame_ids]
results = []
for image, frame_id in zip(batch, frame_ids):
img0 = image.convert("channels_last", "bgr") # BGR
img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
blob = torch.from_numpy(img).to(self.device).unsqueeze(0)
online_targets = self.tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(t.score)
result = TrackingAnnotationList([
TrackingAnnotation(
name=0,
top=tlwh[0],
left=tlwh[1],
width=tlwh[2],
height=tlwh[3],
id=id,
score=score,
frame=frame_id,
) for tlwh, id, score in zip(
online_tlwhs,
online_ids,
online_scores
)
])
results.append(result)
if is_single_image:
results = results[0]
return results
def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):
if not optimizable_dcn_v2:
raise Exception("Can not optimize the model while DCNv2 implementation is not optimizable")
if self.model is None:
raise UserWarning("No model is loaded, cannot optimize. Load or train a model first.")
if self.model.ort_session is not None:
raise UserWarning("Model is already optimized in ONNX.")
input_shape = [
1,
3,
img_size[1],
img_size[0],
]
try:
self.__convert_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
except FileNotFoundError:
# Create temp directory
os.makedirs(self.temp_path, exist_ok=True)
self.__convert_rpn_to_onnx(
input_shape,
os.path.join(self.temp_path, "onnx_model_temp.onnx"), do_constant_folding
)
self.__load_rpn_from_onnx(os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"))
@staticmethod
def download(model_name, path, server_url=None):
if server_url is None and model_name not in [
"crowdhuman_dla34",
"fairmot_dla34",
]:
raise ValueError("Unknown model_name: " + model_name)
os.makedirs(path, exist_ok=True)
if server_url is None:
server_url = os.path.join(
OPENDR_SERVER_URL, "perception", "object_tracking_2d",
"fair_mot"
)
url = os.path.join(
server_url, model_name
)
model_dir = os.path.join(path, model_name)
os.makedirs(model_dir, exist_ok=True)
urlretrieve(os.path.join(
url, model_name + ".json"
), os.path.join(
model_dir, model_name + ".json"
))
try:
urlretrieve(os.path.join(
url, model_name + ".pth"
), os.path.join(
model_dir, model_name + ".pth"
))
except Exception:
urlretrieve(os.path.join(
url, model_name + ".tckpt"
), os.path.join(
model_dir, model_name + ".pth"
))
print("Downloaded model", model_name, "to", model_dir)
return model_dir
def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):
inp = torch.randn(input_shape).to(self.device)
input_names = ["data"]
output_names = self.heads.keys()
torch.onnx.export(
self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,
do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names
)
def __load_from_onnx(self, path):
self.model.rpn_ort_session = ort.InferenceSession(path)
# The comments below are the alternative way to use the onnx model, it might be useful in the future
# depending on how ONNX saving/loading will be implemented across the toolkit.
# # Load the ONNX model
# self.model = onnx.load(path)
#
# # Check that the IR is well formed
# onnx.checker.check_model(self.model)
#
# # Print a human readable representation of the graph
# onnx.helper.printable_graph(self.model.graph)
def __load_from_pth(self, model, path, use_original_dict=False):
all_params = torch.load(path, map_location=self.device)
model.load_state_dict(all_params if use_original_dict else all_params["state_dict"])
def _prepare_datasets(
self,
dataset,
val_dataset,
train_split_paths,
val_split_paths,
require_dataset=True,
require_val_dataset=True,
):
input_dataset_iterator = None
eval_dataset_iterator = None
if isinstance(dataset, ExternalDataset):
dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset")
transforms = T.Compose([T.ToTensor()])
input_dataset_iterator = JointDataset(
dataset_path,
train_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
augment=False, transforms=transforms,
)
elif isinstance(dataset, DatasetIterator):
input_dataset_iterator = MappedDatasetIterator(
dataset,
lambda d: process_dataset(
d[0], d[1], self.ltrb, self.down_ratio,
self.max_objs, self.num_classes, self.mse_loss
)
)
else:
if require_dataset or dataset is not None:
raise ValueError(
"dataset parameter should be an ExternalDataset or a DatasetIterator"
)
if isinstance(val_dataset, ExternalDataset):
val_dataset_path = val_dataset.path
if val_dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(val_dataset) +
") is given as a val_dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif isinstance(val_dataset, DatasetIterator):
eval_dataset_iterator = val_dataset
elif val_dataset is None:
if isinstance(dataset, ExternalDataset):
val_dataset_path = dataset.path
if dataset.dataset_type.lower() != "mot":
raise ValueError(
"ExternalDataset (" + str(dataset) +
") is given as a dataset, but it is not a MOT dataset"
)
eval_dataset_iterator = RawMotDatasetIterator(
val_dataset_path,
val_split_paths,
down_ratio=self.down_ratio,
max_objects=self.max_objs,
ltrb=self.ltrb,
mse_loss=self.mse_loss,
)
elif require_val_dataset:
raise ValueError(
"val_dataset is None and can't be derived from" +
" the dataset object because the dataset is not an ExternalDataset"
)
else:
eval_dataset_iterator = input_dataset_iterator
else:
raise ValueError(
"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None"
)
return input_dataset_iterator, eval_dataset_iterator
def __create_model(self):
heads = {
'hm': self.num_classes,
'wh': 2 if not self.ltrb else 4,
'id': self.reid_dim
}
if self.reg_offset:
heads.update({'reg': 2})
self.heads = heads
self.model = create_model(self.backbone, heads, self.head_conv)
self.model.to(self.device)
self.model.ort_session = None
self.model.heads_names = heads.keys()
self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)
self.tracker = JDETracker(
self.model,
self.threshold,
self.track_buffer,
self.max_objs,
self.image_mean,
self.image_std,
self.down_ratio,
self.num_classes,
self.reg_offset,
self.ltrb,
self.frame_rate,
)
@staticmethod
def __extract_trailing(path):
head, tail = ntpath.split(path)
folder_name = tail or ntpath.basename(head)
return folder_name, head, tail
| true | true |
f719b58aacd4b24349689985096bc6a158cb01c2 | 2,736 | py | Python | tests/crawler/media/test_bcc.py | allenyummy/GoodInfo | 94ab7421d1377450ac4cfdfd6e4667fa52b20d0c | [
"MIT"
] | 1 | 2022-01-17T14:06:27.000Z | 2022-01-17T14:06:27.000Z | tests/crawler/media/test_bcc.py | allenyummy/GoodInfo | 94ab7421d1377450ac4cfdfd6e4667fa52b20d0c | [
"MIT"
] | 9 | 2021-08-12T07:39:01.000Z | 2021-08-20T08:38:29.000Z | tests/crawler/media/test_bcc.py | allenyummy/GoodInfo | 94ab7421d1377450ac4cfdfd6e4667fa52b20d0c | [
"MIT"
] | 1 | 2022-02-21T15:45:13.000Z | 2022-02-21T15:45:13.000Z | # encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from src.crawler.media import bcc
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="中國廣播公司_1",
link="https://www.bcc.com.tw/newsView.6473942",
expected_output=NewsStruct(
title="「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒",
content="\r\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2021/06/22 18:49 報導",
link="https://www.bcc.com.tw/newsView.6473942",
),
)
TEST_DATA_2 = TEST_DATA(
name="中國廣播公司_2",
link="https://www.bcc.com.tw/newsView.4839712",
expected_output=NewsStruct(
title="台積電衝關未成 聯電ADR爆漲股價再登新高",
content="\r\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2020/11/24 11:26 報導",
link="https://www.bcc.com.tw/newsView.4839712",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return bcc.BCCNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
| 36 | 652 | 0.69883 |
import logging
import pytest
from collections import namedtuple
from src.crawler.media import bcc
from src.utils.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="中國廣播公司_1",
link="https://www.bcc.com.tw/newsView.6473942",
expected_output=NewsStruct(
title="「這家超商」6/23開賣快篩試劑 雙北2門市限量100盒",
content="\r\n 為了方便民眾居家檢測新冠肺炎,食藥署在19日公布核准5款家用快篩試劑,可就近到藥局、醫療器材販售業者,如藥妝店、醫療器材行、便利商店等商家選購。萊爾富位於雙北的2家門市明(23)日起將首度開賣家用快篩試劑,每店限量100盒,售完為止。萊爾富首度引進國產泰博科技的「福爾威創家用新型冠狀病毒抗原快速檢驗套組」,明天下午3點起,將在台北市迪化店、北縣五工店限量開賣,每盒5入售價1700元,每店限量100盒,不拆售。根據食藥署公布的指引,如果快篩陽性,居家檢疫或隔離者須先與衛生單位聯繫,一般民眾則到社區採檢院所採檢確認;如果是陰性,民眾仍要遵循防疫規範,做好個人防護,持續自我健康管理。(快篩試劑資料照)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2021/06/22 18:49 報導",
link="https://www.bcc.com.tw/newsView.6473942",
),
)
TEST_DATA_2 = TEST_DATA(
name="中國廣播公司_2",
link="https://www.bcc.com.tw/newsView.4839712",
expected_output=NewsStruct(
title="台積電衝關未成 聯電ADR爆漲股價再登新高",
content="\r\n 半導體類股正當紅,台積電今天(24日)早盤衝關500元短暫達標後拉回,聯電延續昨天的強勢,在ADR飆漲超過20%助威下,股價漲幅超過7%,最高攻至39.7元,市值擠下股王大立光,繼續成為台股人氣王。因為聯電的狂飆,大盤儘管稍事休息,拉回的幅度也很有限。(張佳琪報導)台股週一的兩大支柱台積電、聯電,週二股價兩樣情,台積電挑戰500元大關,早盤開盤隨即攻頂,但是衝高後買盤追價謹慎,導致股價翻黑呈現小跌。聯電因週一股價漲停板鎖住,美國ADR強漲20.24%,帶動股價開盤後強勢走高,隨即衝過39元一路向上,攻至39.7元,股價又改寫18年新高,且追價買單積極,漲幅超過7%,市值擠下股王大立光。讓股價瞬間點火爆衝的關鍵是美系外資分析師最新出具的報告大力看好聯電。理由是受惠於5G、AI、高速運算等發展,聯電產用率將提高至90%到95%,因此,8吋晶圓價格調漲、12吋晶圓產用率提升,以及28奈米拓展有成,推估聯電明後年資本支出將達12億美元,重申「買進」評等,目標價由32元上調至54.5元。分析師表示,三大法人週一同步大買聯電,週二的漲勢,內外資應都有貢獻。至於是否漲到外資報告訂下的目標價,分析師認為,以今年聯電EPS預估2.25元推算,如果漲到54.5元,本益比落在24倍,雖然高但不至於離譜,因此認為如果外資買盤力道夠強,目標價就可能達標。(圖:雅虎奇摩)\r\n ",
keywords=None,
category=None,
media="中國廣播公司",
datetime="2020/11/24 11:26 報導",
link="https://www.bcc.com.tw/newsView.4839712",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return bcc.BCCNewsCrawler()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
| true | true |
f719b5a93057ca90d71d3ce08000892efc53327a | 659 | py | Python | 2-hard/following-integer/main.py | mpillar/codeeval | ad1fc5aea277575dcce6ad5db230d7a2bfe41eed | [
"Unlicense"
] | 21 | 2015-02-09T18:41:15.000Z | 2021-07-31T02:43:28.000Z | 2-hard/following-integer/main.py | mpillar/codeeval | ad1fc5aea277575dcce6ad5db230d7a2bfe41eed | [
"Unlicense"
] | null | null | null | 2-hard/following-integer/main.py | mpillar/codeeval | ad1fc5aea277575dcce6ad5db230d7a2bfe41eed | [
"Unlicense"
] | 37 | 2015-01-06T06:20:17.000Z | 2021-06-21T18:22:13.000Z | import sys
def get_digits_ignore_zero(x):
digits = {}
for digit in str(x):
if digit == '0':
continue
if digit in digits:
digits[digit] += 1
else:
digits[digit] = 1
return digits
def following_integer(x):
original_digits = get_digits_ignore_zero(x)
while True:
x += 1
digits = get_digits_ignore_zero(x)
if original_digits == digits:
return x
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0:
continue
test = int(test)
print(following_integer(test))
test_cases.close()
| 21.966667 | 47 | 0.576631 | import sys
def get_digits_ignore_zero(x):
digits = {}
for digit in str(x):
if digit == '0':
continue
if digit in digits:
digits[digit] += 1
else:
digits[digit] = 1
return digits
def following_integer(x):
original_digits = get_digits_ignore_zero(x)
while True:
x += 1
digits = get_digits_ignore_zero(x)
if original_digits == digits:
return x
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
if len(test) == 0:
continue
test = int(test)
print(following_integer(test))
test_cases.close()
| true | true |
f719b60f710335528b05a8c8cbb30e8033fe17df | 13,939 | py | Python | tests/base_test_class.py | uncycler/django-DefectDojo | d7523e1dc34af47185830c13bfa7aedfc667dd60 | [
"BSD-3-Clause"
] | 3 | 2020-10-27T08:58:03.000Z | 2021-04-28T14:20:16.000Z | tests/base_test_class.py | uncycler/django-DefectDojo | d7523e1dc34af47185830c13bfa7aedfc667dd60 | [
"BSD-3-Clause"
] | 82 | 2020-11-06T22:34:05.000Z | 2021-08-10T16:30:48.000Z | tests/base_test_class.py | uncycler/django-DefectDojo | d7523e1dc34af47185830c13bfa7aedfc667dd60 | [
"BSD-3-Clause"
] | 2 | 2022-02-07T09:57:28.000Z | 2022-03-11T08:42:59.000Z | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException
import unittest
import os
import re
# import time
dd_driver = None
dd_driver_options = None
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
global dd_driver
if not dd_driver:
# setupModule and tearDownModule are not working in our scenario, so for now we use setupClass and a global variable
# global variables are dirty, but in unit tests scenario's like these they are acceptable
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
# dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument("--disable-gpu") # on windows sometimes chrome can't start with certain gpu driver versions, even in headless mode
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome('chromedriver', chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element_by_css_selector("button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
def goto_active_engagements_overview(self, driver):
# return self.goto_engagements_internal(driver, 'engagement')
# engagement overview doesn't seem to have the datatables yet modifying the DOM
# https://github.com/DefectDojo/django-DefectDojo/issues/2173
driver.get(self.base_url + 'engagement')
# self.goto_engagements_internal(driver, 'engagement')
return driver
def goto_all_engagements_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element_by_id(no_content_id)
except:
pass
if no_content is None:
# wait for product_wrapper div as datatables javascript modifies the DOM on page load.
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements_by_css_selector(selector)
if len(elems) == 0:
# print('no elements!')
return False
if text is None:
return True
for elem in elems:
print(elem.text)
if text in elem.text:
# print('contains!')
return True
# print('text mismatch!')
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_text_present_on_page(self, text):
# DEBUG: couldn't find: Product type added successfully. path: //*[contains(text(),'Product type added successfully.')]
# can't get this xpath to work
# path = "//*[contains(text(), '" + text + "')]"
# elems = self.driver.find_elements_by_xpath(path)
# if len(elems) == 0:
# print("DEBUG: couldn't find: ", text, "path: ", path)
body = self.driver.find_element_by_tag_name("body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements_by_id(id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.login_page()
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element_by_id(id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element_by_xpath('//*[@id=' + id + ']').click()
driver.find_element_by_id(id).click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element_by_id(id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def enable_block_execution(self):
# we set the admin user (ourselves) to have block_execution checked
# this will force dedupe to happen synchronously, among other things like notifications, rules, ...
driver = self.login_page()
driver.get(self.base_url + 'profile')
if not driver.find_element_by_id('id_block_execution').is_selected():
driver.find_element_by_xpath('//*[@id="id_block_execution"]').click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
self.assertTrue(driver.find_element_by_id('id_block_execution').is_selected())
return driver
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
"""
Sample output for levels (i.e. errors are SEVERE)
{'level': 'DEBUG', 'message': 'http://localhost:8080/product/type/4/edit 560:12 "debug"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'INFO', 'message': 'http://localhost:8080/product/type/4/edit 561:16 "info"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'WARNING', 'message': 'http://localhost:8080/product/type/4/edit 562:16 "warning"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'SEVERE', 'message': 'http://localhost:8080/product/type/4/edit 563:16 "error"', 'source': 'console-api', 'timestamp': 1583952828410}
"""
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
"""
images are not working in current docker/travis deployment, so ignore those 404s
see: https://github.com/DefectDojo/django-DefectDojo/issues/2045
examples:
http://localhost:8080/static/dojo/img/zoom-in.cur - Failed to load resource: the server responded with a status of 404 (Not Found)
http://localhost:8080/media/CACHE/images/finding_images/1bf9c0b1-5ed1-4b4e-9551-bcbfd198b90a/7d8d9af058566b8f2fe6548d96c63237.jpg - Failed to load resource: the server responded with a status of 404 (Not Found)
"""
accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)'
# accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)|(bootstrap\-chosen\.css\.map)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to finding images, see https://github.com/DefectDojo/django-DefectDojo/issues/2045')
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options['detach']:
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
# time.sleep(30)
raise(e)
return wrapper
| 43.423676 | 222 | 0.65801 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException
import unittest
import os
import re
dd_driver = None
dd_driver_options = None
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
global dd_driver
if not dd_driver:
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
dd_driver_options.add_argument("--disable-gpu")
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome('chromedriver', chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element_by_css_selector("button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
def goto_active_engagements_overview(self, driver):
# return self.goto_engagements_internal(driver, 'engagement')
# engagement overview doesn't seem to have the datatables yet modifying the DOM
driver.get(self.base_url + 'engagement')
return driver
def goto_all_engagements_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element_by_id(no_content_id)
except:
pass
if no_content is None:
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements_by_css_selector(selector)
if len(elems) == 0:
return False
if text is None:
return True
for elem in elems:
print(elem.text)
if text in elem.text:
return True
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_text_present_on_page(self, text):
# can't get this xpath to work
body = self.driver.find_element_by_tag_name("body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements_by_id(id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.login_page()
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element_by_id(id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element_by_xpath('//*[@id=' + id + ']').click()
driver.find_element_by_id(id).click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element_by_id(id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def enable_block_execution(self):
driver = self.login_page()
driver.get(self.base_url + 'profile')
if not driver.find_element_by_id('id_block_execution').is_selected():
driver.find_element_by_xpath('//*[@id="id_block_execution"]').click()
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.assertTrue(driver.find_element_by_id('id_block_execution').is_selected())
return driver
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)'
# accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)|(bootstrap\-chosen\.css\.map)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to finding images, see https://github.com/DefectDojo/django-DefectDojo/issues/2045')
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options['detach']:
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
# time.sleep(30)
raise(e)
return wrapper
| true | true |
f719b6cebef2b6af3c2533bfa679463c3243666f | 397 | py | Python | Code/Assignment/Assignment/asgi.py | vedez/SDEV2004 | b028c8454ddca9a1abeb95df95e7f189867dd346 | [
"MIT"
] | null | null | null | Code/Assignment/Assignment/asgi.py | vedez/SDEV2004 | b028c8454ddca9a1abeb95df95e7f189867dd346 | [
"MIT"
] | null | null | null | Code/Assignment/Assignment/asgi.py | vedez/SDEV2004 | b028c8454ddca9a1abeb95df95e7f189867dd346 | [
"MIT"
] | null | null | null | """
ASGI config for Assignment project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Assignment.settings')
application = get_asgi_application()
| 23.352941 | 78 | 0.788413 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Assignment.settings')
application = get_asgi_application()
| true | true |
f719b6d868fa7d2ce1c38e9b3db6ae27ddd83ee7 | 1,459 | py | Python | python/vanitygen_onion.py | 5kyc0d3r/Junk | f95fc9beaaf5f234102e213bd977de51cafdcebe | [
"MIT"
] | null | null | null | python/vanitygen_onion.py | 5kyc0d3r/Junk | f95fc9beaaf5f234102e213bd977de51cafdcebe | [
"MIT"
] | null | null | null | python/vanitygen_onion.py | 5kyc0d3r/Junk | f95fc9beaaf5f234102e213bd977de51cafdcebe | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
MIT License
Copyright (c) 2017 5kyc0d3r
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# This script helps you generate a customized .onion domain for your hidden service on the tor network.
# This should not be used if you require high performance for the domain generation process because
# it will be very slow since it was written in Python. However, Cython support will be added soon which
# will significantly boost the domain generation process.
| 47.064516 | 103 | 0.797807 | true | true | |
f719b711c4580588d5faede2a699731e7e1104b7 | 73,903 | py | Python | src/sage/rings/derivation.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/rings/derivation.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/rings/derivation.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | r"""
Derivations
Let `A` be a ring and `B` be an bimodule over `A`.
A derivation `d : A \to B` is an additive map that satisfies
the Leibniz rule
.. MATH::
d(xy) = x d(y) + d(x) y.
If `B` is an algebra over `A` and if we are given in addition a
ring homomorphism `\theta : A \to B`, a twisted derivation with respect
to `\theta` (or a `\theta`-derivation) is an additive map `d : A \to B`
such that
.. MATH::
d(xy) = \theta(x) d(y) + d(x) y.
When `\theta` is the morphism defining the structure of `A`-algebra
on `B`, a `\theta`-derivation is nothing but a derivation.
In general, if `\iota : A \to B` denotes the defining morphism above,
one easily checks that `\theta - \iota` is a `\theta`-derivation.
This file provides support for derivations and twisted derivations
over commutative rings with values in algebras (i.e. we require
that `B` is a commutative `A`-algebra).
In this case, the set of derivations (resp. `\theta`-derivations)
is a module over `B`.
Given a ring `A`, the module of derivations over `A` can be created
as follows::
sage: A.<x,y,z> = QQ[]
sage: M = A.derivation_module()
sage: M
Module of derivations over Multivariate Polynomial Ring in x, y, z over Rational Field
The method :meth:`~sage.rings.derivation.RingDerivationModule.gens`
returns the generators of this module::
sage: A.<x,y,z> = QQ[]
sage: M = A.derivation_module()
sage: M.gens()
(d/dx, d/dy, d/dz)
We can combine them in order to create all derivations::
sage: d = 2*M.gen(0) + z*M.gen(1) + (x^2 + y^2)*M.gen(2)
sage: d
2*d/dx + z*d/dy + (x^2 + y^2)*d/dz
and now play with them::
sage: d(x + y + z)
x^2 + y^2 + z + 2
sage: P = A.random_element()
sage: Q = A.random_element()
sage: d(P*Q) == P*d(Q) + d(P)*Q
True
Alternatively we can use the method
:meth:`~sage.rings.ring.CommutativeRing.derivation`
of the ring `A` to create derivations::
sage: Dx = A.derivation(x); Dx
d/dx
sage: Dy = A.derivation(y); Dy
d/dy
sage: Dz = A.derivation(z); Dz
d/dz
sage: A.derivation([2, z, x^2+y^2])
2*d/dx + z*d/dy + (x^2 + y^2)*d/dz
Sage knows moreover that `M` is a Lie algebra::
sage: M.category()
Join of Category of lie algebras with basis over Rational Field
and Category of modules with basis over Multivariate Polynomial Ring in x, y, z over Rational Field
Computations of Lie brackets are implemented as well::
sage: Dx.bracket(Dy)
0
sage: d.bracket(Dx)
-2*x*d/dz
At the creation of a module of derivations, a codomain can be specified::
sage: B = A.fraction_field()
sage: A.derivation_module(B)
Module of derivations from Multivariate Polynomial Ring in x, y, z over Rational Field
to Fraction Field of Multivariate Polynomial Ring in x, y, z over Rational Field
Alternatively, one can specify a morphism `f` with domain `A`.
In this case, the codomain of the derivations is the codomain of
`f` but the latter is viewed as an algebra over `A` through the
homomorphism `f`.
This construction is useful, for example, if we want to work with
derivations on `A` at a certain point, e.g. `(0,1,2)`. Indeed,
in order to achieve this, we first define the evaluation map at
this point::
sage: ev = A.hom([QQ(0), QQ(1), QQ(2)])
sage: ev
Ring morphism:
From: Multivariate Polynomial Ring in x, y, z over Rational Field
To: Rational Field
Defn: x |--> 0
y |--> 1
z |--> 2
Now we use this ring homomorphism to define a structure of `A`-algebra
on `\QQ` and then build the following module of derivations::
sage: M = A.derivation_module(ev)
sage: M
Module of derivations from Multivariate Polynomial Ring in x, y, z over Rational Field to Rational Field
sage: M.gens()
(d/dx, d/dy, d/dz)
Elements in `M` then acts as derivations at `(0,1,2)`::
sage: Dx = M.gen(0)
sage: Dy = M.gen(1)
sage: Dz = M.gen(2)
sage: f = x^2 + y^2 + z^2
sage: Dx(f) # = 2*x evaluated at (0,1,2)
0
sage: Dy(f) # = 2*y evaluated at (0,1,2)
2
sage: Dz(f) # = 2*z evaluated at (0,1,2)
4
Twisted derivations are handled similarly::
sage: theta = B.hom([B(y),B(z),B(x)])
sage: theta
Ring endomorphism of Fraction Field of Multivariate Polynomial Ring in x, y, z over Rational Field
Defn: x |--> y
y |--> z
z |--> x
sage: M = B.derivation_module(twist=theta)
sage: M
Module of twisted derivations over Fraction Field of Multivariate Polynomial Ring
in x, y, z over Rational Field (twisting morphism: x |--> y, y |--> z, z |--> x)
Over a field, one proves that every `\theta`-derivation is a multiple
of `\theta - id`, so that::
sage: d = M.gen(); d
[x |--> y, y |--> z, z |--> x] - id
and then::
sage: d(x)
-x + y
sage: d(y)
-y + z
sage: d(z)
x - z
sage: d(x + y + z)
0
AUTHOR:
- Xavier Caruso (2018-09)
"""
# ***************************************************************************
# Copyright (C) 2018 Xavier Caruso <xavier.caruso@normalesup.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
# ***************************************************************************
from sage.structure.richcmp import op_EQ, op_NE
from sage.structure.unique_representation import UniqueRepresentation
from sage.sets.family import Family
from sage.modules.module import Module
from sage.structure.element import ModuleElement
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring import PolynomialRing_general
from sage.rings.polynomial.multi_polynomial_ring_base import MPolynomialRing_base
from sage.rings.power_series_ring import PowerSeriesRing_generic
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.fraction_field import FractionField_generic
from sage.rings.quotient_ring import QuotientRing_generic
from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing_generic
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing_generic
from sage.rings.padics.padic_generic import pAdicGeneric
from sage.categories.number_fields import NumberFields
from sage.categories.finite_fields import FiniteFields
from sage.categories.modules import Modules
from sage.categories.modules_with_basis import ModulesWithBasis
from sage.categories.lie_algebras import LieAlgebras
from sage.categories.map import Map
from sage.categories.rings import Rings
from sage.misc.latex import latex
class RingDerivationModule(Module, UniqueRepresentation):
"""
A class for modules of derivations over a commutative ring.
"""
def __init__(self, domain, codomain, twist=None):
"""
Initialize this module of derivation.
TESTS::
sage: A.<x,y> = QQ[]
sage: M = A.derivation_module()
sage: TestSuite(M).run()
sage: from sage.rings.derivation import RingDerivationModule
sage: R5.<x> = GF(5)[]
sage: R25.<x> = GF(25)[]
sage: R7.<x> = GF(7)[]
sage: RingDerivationModule(R5, R25)
Module of derivations from Univariate Polynomial Ring in x over Finite Field of size 5 to Univariate Polynomial Ring in x over Finite Field in z2 of size 5^2
sage: RingDerivationModule(R5, R5^2)
Traceback (most recent call last):
...
TypeError: the codomain must be an algebra over the domain or a morphism with the correct domain
sage: RingDerivationModule(R5, R7)
Traceback (most recent call last):
...
TypeError: the codomain must be an algebra over the domain or a morphism with the correct domain
sage: theta = R5.hom([R5.gen()^2])
sage: RingDerivationModule(R5, R25, twist=theta)
Module of twisted derivations from Univariate Polynomial Ring in x over Finite Field of size 5 to Univariate Polynomial Ring in x over Finite Field in z2 of size 5^2 (twisting morphism: x |--> x^2)
sage: RingDerivationModule(R7, R7, twist=theta)
Traceback (most recent call last):
...
TypeError: the domain of the derivation must coerce to the domain of the twisting homomorphism
"""
if domain not in Rings().Commutative():
raise TypeError("the domain must be a commutative ring")
if codomain in Rings().Commutative() and codomain.has_coerce_map_from(domain):
defining_morphism = codomain.coerce_map_from(domain)
elif (isinstance(codomain,Map)
and codomain.category_for().is_subcategory(Rings())
and codomain.domain().has_coerce_map_from(domain)):
if codomain.domain() is domain:
defining_morphism = codomain
else:
defining_morphism = codomain * codomain.domain().coerce_map_from(domain)
codomain = defining_morphism.codomain()
else:
raise TypeError("the codomain must be an algebra over the domain"
" or a morphism with the correct domain")
if twist is not None:
if not (isinstance(twist, Map) and twist.category_for().is_subcategory(Rings())):
raise TypeError("the twisting homomorphism must be an homomorphism of rings")
if twist.domain() is not domain:
map = twist.domain().coerce_map_from(domain)
if map is None:
raise TypeError("the domain of the derivation must coerce"
" to the domain of the twisting homomorphism")
twist = twist * map
if twist.codomain() is not codomain:
map = codomain.coerce_map_from(twist.codomain())
if map is None:
raise TypeError("the codomain of the twisting homomorphism"
" must coerce to the codomain of the derivation")
twist = map * twist
# We check if the twisting morphism is the defining morphism
try:
if twist == defining_morphism:
twist = None
else:
for g in domain.gens():
if twist(g) != defining_morphism(g):
break
else:
twist = None
except (AttributeError, NotImplementedError):
pass
self._domain = domain
self._codomain = codomain
self._defining_morphism = defining_morphism
self._twist = twist
self._base_derivation = None
self._gens = None
self._basis = self._dual_basis = None
# Currently basis and gens play exactly the same role because
# the only rings that are supported lead to free modules of derivations
# So the code is a bit redundant but we except to be able to cover more
# rings (with non free modules of derivations) in a near future
self._constants = (ZZ, False)
if twist is not None:
self.Element = RingDerivationWithTwist_generic
if domain.is_field():
self._gens = [ 1 ]
self._basis = [ 1 ]
elif (domain is ZZ or domain in NumberFields() or domain in FiniteFields()
or isinstance(domain, IntegerModRing_generic)
or (isinstance(domain, pAdicGeneric) and (domain.is_field() or domain.absolute_e() == 1))):
self.Element = RingDerivationWithoutTwist_zero
self._gens = [ ]
self._basis = [ ]
self._dual_basis = [ ]
self._constants = (domain, True)
elif (isinstance(domain, (PolynomialRing_general, MPolynomialRing_base, PowerSeriesRing_generic, LaurentSeriesRing))
or (isinstance(domain, FractionField_generic)
and isinstance(domain.ring(), (PolynomialRing_general, MPolynomialRing_base)))):
self._base_derivation = RingDerivationModule(domain.base_ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_function
try:
self._gens = self._base_derivation.gens() + domain.gens()
except NotImplementedError:
pass
try:
self._basis = tuple(self._base_derivation.basis()) + domain.gens()
self._dual_basis = tuple(self._base_derivation.dual_basis()) + domain.gens()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
if domain.characteristic() == 0:
self._constants = (constants, sharp)
else:
# in this case, the constants are polynomials in x^p
# TODO: implement this
self._constants = (constants, False)
elif isinstance(domain, FractionField_generic):
self._base_derivation = RingDerivationModule(domain.ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_fraction_field
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants.fraction_field(), False)
elif isinstance(domain, PolynomialQuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.base(), defining_morphism)
modulus = domain.modulus()
for der in self._base_derivation.gens():
if der(modulus) != 0:
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False) # can we do better?
elif isinstance(domain, QuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.cover_ring(), defining_morphism)
if any(der(modulus) != 0 for modulus in domain.defining_ideal().gens()
for der in self._base_derivation.gens()):
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False) # can we do better?
else:
raise NotImplementedError("derivations over this ring is not implemented")
if self._basis is None:
category = Modules(codomain)
else:
category = ModulesWithBasis(codomain)
if self._twist is None and domain is codomain:
category &= LieAlgebras(self._constants[0])
Module.__init__(self, codomain, category=category)
if self._gens is not None:
self._gens = [self.element_class(self, x) for x in self._gens]
if self._basis is not None:
self._basis = [self.element_class(self, x) for x in self._basis]
if self._dual_basis is not None:
self._dual_basis = [domain(x) for x in self._dual_basis]
def __hash__(self):
"""
Return a hash of ``self``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: hash(M) == hash((M.domain(), M.codomain(), M.twisting_morphism()))
True
"""
return hash((self._domain, self._codomain, self._twist))
def _coerce_map_from_(self, R):
"""
Return ``True`` if there is a coercion map from ``R``
to this module.
EXAMPLES::
sage: A.<x> = QQ[]
sage: B.<y> = A[]
sage: M1 = A.derivation_module(); M1
Module of derivations over Univariate Polynomial Ring in x over Rational Field
sage: M2 = A.derivation_module(B); M2
Module of derivations from Univariate Polynomial Ring in x over Rational Field
to Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
sage: M1._coerce_map_from_(M2) is None
True
sage: M1.has_coerce_map_from(M2)
False
sage: M2.has_coerce_map_from(M1)
True
sage: M1.has_coerce_map_from(ZZ)
False
sage: M1.has_coerce_map_from(QQ)
False
sage: M1.has_coerce_map_from(A)
False
"""
if isinstance(R, RingDerivationModule):
if R.domain().has_coerce_map_from(self._domain) and self._codomain.has_coerce_map_from(R.codomain()):
morR = R.defining_morphism()
morS = self._defining_morphism
try:
# this test is not perfect
for g in self._domain.gens():
if morR(g) != morS(g):
return False
return True
except (AttributeError, NotImplementedError):
pass
return super(RingDerivationModule, self)._coerce_map_from_(R)
def _repr_(self):
"""
Return a string representation of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: R.derivation_module()
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: theta = R.hom([y,x])
sage: R.derivation_module(twist=theta)
Module of twisted derivations over Multivariate Polynomial Ring in x, y
over Integer Ring (twisting morphism: x |--> y, y |--> x)
"""
t = ""
if self._twist is None:
s = "Module of derivations"
else:
s = "Module of twisted derivations"
try:
t = " (twisting morphism: %s)" % self._twist._repr_short()
except AttributeError:
pass
if self._domain is self._codomain:
s += " over %s" % self._domain
else:
s += " from %s to %s" % (self._domain, self._codomain)
return s + t
def domain(self):
"""
Return the domain of the derivations in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.domain()
Multivariate Polynomial Ring in x, y over Integer Ring
"""
return self._domain
def codomain(self):
"""
Return the codomain of the derivations in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.codomain()
Multivariate Polynomial Ring in x, y over Integer Ring
"""
return self._codomain
def defining_morphism(self):
"""
Return the morphism defining the structure of algebra
of the codomain over the domain.
EXAMPLES::
sage: R.<x> = QQ[]
sage: M = R.derivation_module()
sage: M.defining_morphism()
Identity endomorphism of Univariate Polynomial Ring in x over Rational Field
sage: S.<y> = R[]
sage: M = R.derivation_module(S)
sage: M.defining_morphism()
Polynomial base injection morphism:
From: Univariate Polynomial Ring in x over Rational Field
To: Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
sage: ev = R.hom([QQ(0)])
sage: M = R.derivation_module(ev)
sage: M.defining_morphism()
Ring morphism:
From: Univariate Polynomial Ring in x over Rational Field
To: Rational Field
Defn: x |--> 0
"""
return self._defining_morphism
def twisting_morphism(self):
r"""
Return the twisting homomorphism of the derivations in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: M = R.derivation_module(twist=theta); M
Module of twisted derivations over Multivariate Polynomial Ring in x, y
over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.twisting_morphism()
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> y
y |--> x
When the derivations are untwisted, this method returns nothing::
sage: M = R.derivation_module()
sage: M.twisting_morphism()
"""
return self._twist
def ngens(self):
r"""
Return the number of generators of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.ngens()
2
Indeed, generators are::
sage: M.gens()
(d/dx, d/dy)
We check that, for a nontrivial twist over a field, the module of
twisted derivation is a vector space of dimension 1 generated by
``twist - id``::
sage: K = R.fraction_field()
sage: theta = K.hom([K(y),K(x)])
sage: M = K.derivation_module(twist=theta); M
Module of twisted derivations over Fraction Field of Multivariate Polynomial
Ring in x, y over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.ngens()
1
sage: M.gen()
[x |--> y, y |--> x] - id
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return len(self._gens)
def gens(self):
r"""
Return the generators of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.gens()
(d/dx, d/dy)
We check that, for a nontrivial twist over a field, the module of
twisted derivation is a vector space of dimension 1 generated by
``twist - id``::
sage: K = R.fraction_field()
sage: theta = K.hom([K(y),K(x)])
sage: M = K.derivation_module(twist=theta); M
Module of twisted derivations over Fraction Field of Multivariate Polynomial
Ring in x, y over Integer Ring (twisting morphism: x |--> y, y |--> x)
sage: M.gens()
([x |--> y, y |--> x] - id,)
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return tuple(self._gens)
def gen(self, n=0):
r"""
Return the ``n``-th generator of this module of derivations.
INPUT:
- ``n`` -- an integer (default: ``0``)
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module(); M
Module of derivations over Multivariate Polynomial Ring in x, y over Integer Ring
sage: M.gen()
d/dx
sage: M.gen(1)
d/dy
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
try:
return self._gens[n]
except IndexError:
raise ValueError("generator not defined")
def basis(self):
r"""
Return a basis of this module of derivations.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
"""
if self._basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._basis)
def dual_basis(self):
r"""
Return the dual basis of the canonical basis of this module of
derivations (which is that returned by the method :meth:`basis`).
.. NOTE::
The dual basis of `(d_1, \dots, d_n)` is a family
`(x_1, \ldots, x_n)` of elements in the domain such
that `d_i(x_i) = 1` and `d_i(x_j) = 0` if `i \neq j`.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: M.dual_basis()
Family (x, y)
"""
if self._dual_basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._dual_basis)
def ring_of_constants(self):
r"""
Return the subring of the domain consisting of elements
`x` such that `d(x) = 0` for all derivation `d` in this module.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: M.ring_of_constants()
Rational Field
"""
if not self._constants[1]:
raise NotImplementedError("the computation of the ring of constants"
" is not implemented for this derivation module")
return self._constants[0]
def random_element(self, *args, **kwds):
r"""
Return a random derivation in this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.random_element() # random
(x^2 + x*y - 3*y^2 + x + 1)*d/dx + (-2*x^2 + 3*x*y + 10*y^2 + 2*x + 8)*d/dy
"""
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return self([ self._codomain.random_element(*args, **kwds) for _ in range(len(self._gens)) ])
def some_elements(self):
r"""
Return a list of elements of this module.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: M = R.derivation_module()
sage: M.some_elements()
[d/dx, d/dy, x*d/dx, x*d/dy, y*d/dx, y*d/dy]
"""
if self._gens is None:
return self.an_element()
if self._dual_basis is None:
return self._gens
return self._gens + [f * D for f in self._dual_basis for D in self._gens]
# The class RingDerivation does not derive from Map (or RingMap)
# because we don't want to see derivations as morphisms in some
# category since they are not stable by composition.
class RingDerivation(ModuleElement):
r"""
An abstract class for twisted and untwisted derivations over
commutative rings.
TESTS::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x) + 2*R.derivation(y); f
d/dx + 2*d/dy
sage: f(x*y)
2*x + y
"""
def __call__(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = x*R.derivation(x) + y*R.derivation(y)
sage: f(x^2 + 3*x*y - y^2)
2*x^2 + 6*x*y - 2*y^2
"""
arg = self.parent().domain()(x)
return self._call_(arg)
def domain(self):
"""
Return the domain of this derivation.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: f = R.derivation(y); f
d/dy
sage: f.domain()
Multivariate Polynomial Ring in x, y over Rational Field
sage: f.domain() is R
True
"""
return self.parent().domain()
def codomain(self):
"""
Return the codomain of this derivation.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = R.derivation(); f
d/dx
sage: f.codomain()
Univariate Polynomial Ring in x over Rational Field
sage: f.codomain() is R
True
::
sage: S.<y> = R[]
sage: M = R.derivation_module(S)
sage: M.random_element().codomain()
Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
sage: M.random_element().codomain() is S
True
"""
return self.parent().codomain()
class RingDerivationWithoutTwist(RingDerivation):
"""
An abstract class for untwisted derivations.
"""
def _repr_(self):
r"""
Return a string representation of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: R.derivation(x)
d/dx
sage: R.derivation(y)
d/dy
"""
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "A derivation on %s" % parent.domain()
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "d/d%s" % dual_basis[i]
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s*%s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s*%s" % (-c, ddx)
else:
s += " + (%s)*%s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def _latex_(self):
r"""
Return a LaTeX representation of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: ddx = R.derivation(x)
sage: ddy = R.derivation(y)
sage: latex(ddx)
\frac{d}{dx}
sage: latex(ddy)
\frac{d}{dy}
sage: latex(ddx + ddy)
\frac{d}{dx} + \frac{d}{dy}
"""
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "\\text{A derivation on } %s" % latex(parent.domain())
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "\\frac{d}{d%s}" % latex(dual_basis[i])
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s %s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s %s" % (-c, ddx)
else:
s += " + \\left(%s\\right) %s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: R.derivation(x).list()
[1, 0]
sage: R.derivation(y).list()
[0, 1]
sage: f = x*R.derivation(x) + y*R.derivation(y); f
x*d/dx + y*d/dy
sage: f.list()
[x, y]
"""
parent = self.parent()
return [self(x) for x in parent.dual_basis()]
def monomial_coefficients(self):
r"""
Return dictionary of nonzero coordinates (on the canonical
basis) of this derivation.
More precisely, this returns a dictionary whose keys are indices
of basis elements and whose values are the corresponding coefficients.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: R.derivation(x).monomial_coefficients()
{0: 1}
sage: R.derivation(y).monomial_coefficients()
{1: 1}
sage: f = x*R.derivation(x) + y*R.derivation(y); f
x*d/dx + y*d/dy
sage: f.monomial_coefficients()
{0: x, 1: y}
"""
dual_basis = self.parent().dual_basis()
dict = { }
for i in range(len(dual_basis)):
c = self(dual_basis[i])
if c != 0:
dict[i] = c
return dict
def is_zero(self):
"""
Return ``True`` if this derivation is zero.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(); f
d/dx
sage: f.is_zero()
False
sage: (f-f).is_zero()
True
"""
for c in self.list():
if not c.is_zero():
return False
return True
def _richcmp_(self, other, op):
"""
Compare this derivation with ``other`` according
to the comparison operator ``op``.
EXAMPLES::
sage: R.<x,y,z> = GF(5)[]
sage: D = sum(v*R.derivation(v) for v in R.gens()); D
x*d/dx + y*d/dy + z*d/dz
sage: D.pth_power() == D
True
"""
if op == op_EQ:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() == other.list()
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() != other.list()
else:
return True
return NotImplemented
def _bracket_(self, other):
"""
Return the Lie bracket (that is the commutator) of
this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx._bracket_(Dy)
0
sage: Dx.bracket(x*Dy)
d/dy
TESTS::
sage: M = R.derivation_module()
sage: X = M.random_element()
sage: X.bracket(X)
0
We check the Jacobi identity::
sage: Y = M.random_element()
sage: Z = M.random_element()
sage: X.bracket(Y.bracket(Z)) + Y.bracket(Z.bracket(X)) + Z.bracket(X.bracket(Y))
0
and the product rule::
sage: f = R.random_element()
sage: X.bracket(f*Y) == X(f)*Y + f*X.bracket(Y)
True
"""
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the bracket is only defined for derivations with same domain and codomain")
arg = [ ]
for x in parent.dual_basis():
arg.append(self(other(x)) - other(self(x)))
return parent(arg)
def pth_power(self):
r"""
Return the `p`-th power of this derivation where `p`
is the characteristic of the domain.
.. NOTE::
Leibniz rule implies that this is again a derivation.
EXAMPLES::
sage: R.<x,y> = GF(5)[]
sage: Dx = R.derivation(x)
sage: Dx.pth_power()
0
sage: (x*Dx).pth_power()
x*d/dx
sage: (x^6*Dx).pth_power()
x^26*d/dx
sage: Dy = R.derivation(y)
sage: (x*Dx + y*Dy).pth_power()
x*d/dx + y*d/dy
An error is raised if the domain has characteristic zero::
sage: R.<x,y> = QQ[]
sage: Dx = R.derivation(x)
sage: Dx.pth_power()
Traceback (most recent call last):
...
TypeError: the domain of the derivation must have positive and prime characteristic
or if the characteristic is not a prime number::
sage: R.<x,y> = Integers(10)[]
sage: Dx = R.derivation(x)
sage: Dx.pth_power()
Traceback (most recent call last):
...
TypeError: the domain of the derivation must have positive and prime characteristic
TESTS::
sage: R.<x,y> = GF(3)[]
sage: D = R.derivation_module().random_element()
sage: Dp = D.pth_power()
sage: f = R.random_element()
sage: Dp(f) == D(D(D(f)))
True
sage: D.bracket(Dp)
0
"""
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the derivation must have the same domain and codomain")
p = parent.domain().characteristic()
if not p.is_prime():
raise TypeError("the domain of the derivation must have positive and prime characteristic")
arg = [ ]
for x in parent.dual_basis():
res = x
for _ in range(p):
res = self(res)
arg.append(res)
return parent(arg)
def precompose(self, morphism):
r"""
Return the derivation obtained by applying first
``morphism`` and then this derivation.
INPUT:
- ``morphism`` -- a homomorphism of rings whose codomain is
the domain of this derivation or a ring that coerces to
the domain of this derivation
EXAMPLES::
sage: A.<x> = QQ[]
sage: B.<x,y> = QQ[]
sage: D = B.derivation(x) - 2*x*B.derivation(y); D
d/dx - 2*x*d/dy
When restricting to ``A``, the term ``d/dy`` disappears
(since it vanishes on ``A``)::
sage: D.precompose(A)
d/dx
If we restrict to another well chosen subring, the derivation vanishes::
sage: C.<t> = QQ[]
sage: f = C.hom([x^2 + y]); f
Ring morphism:
From: Univariate Polynomial Ring in t over Rational Field
To: Multivariate Polynomial Ring in x, y over Rational Field
Defn: t |--> x^2 + y
sage: D.precompose(f)
0
Note that this method cannot be used to compose derivations::
sage: D.precompose(D)
Traceback (most recent call last):
...
TypeError: you must give an homomorphism of rings
TESTS::
sage: D.precompose(C)
Traceback (most recent call last):
...
TypeError: the given ring does not coerce to the domain of the derivation
"""
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism)
arg = [ ]
for x in M.dual_basis():
arg.append(self(morphism(x)))
return M(arg)
def postcompose(self, morphism):
"""
Return the derivation obtained by applying first
this derivation and then ``morphism``.
INPUT:
- ``morphism`` -- a homomorphism of rings whose domain is
the codomain of this derivation or a ring into which the
codomain of this derivation coerces
EXAMPLES::
sage: A.<x,y>= QQ[]
sage: ev = A.hom([QQ(0), QQ(1)])
sage: Dx = A.derivation(x)
sage: Dy = A.derivation(y)
We can define the derivation at `(0,1)` just by postcomposing
with ``ev``::
sage: dx = Dx.postcompose(ev)
sage: dy = Dy.postcompose(ev)
sage: f = x^2 + y^2
sage: dx(f)
0
sage: dy(f)
2
Note that we cannot avoid the creation of the evaluation morphism:
if we pass in ``QQ`` instead, an error is raised since there is
no coercion morphism from ``A`` to ``QQ``::
sage: Dx.postcompose(QQ)
Traceback (most recent call last):
...
TypeError: the codomain of the derivation does not coerce to the given ring
Note that this method cannot be used to compose derivations::
sage: Dx.precompose(Dy)
Traceback (most recent call last):
...
TypeError: you must give an homomorphism of rings
"""
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism())
arg = [ ]
for x in M.dual_basis():
arg.append(morphism(self(x)))
return M(arg)
def extend_to_fraction_field(self):
r"""
Return the extension of this derivation to fraction fields of
the domain and the codomain.
EXAMPLES::
sage: S.<x> = QQ[]
sage: d = S.derivation()
sage: d
d/dx
sage: D = d.extend_to_fraction_field()
sage: D
d/dx
sage: D.domain()
Fraction Field of Univariate Polynomial Ring in x over Rational Field
sage: D(1/x)
-1/x^2
"""
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
M = RingDerivationModule(domain, codomain)
try:
return M(self)
except (ValueError, NotImplementedError):
return M(self.list())
class RingDerivationWithoutTwist_zero(RingDerivationWithoutTwist):
"""
This class can only represent the zero derivation.
It is used when the parent is the zero derivation module
(e.g., when its domain is ``ZZ``, ``QQ``, a finite field, etc.)
"""
def __init__(self, parent, arg=None):
"""
Initialize this derivation.
TESTS::
sage: M = ZZ.derivation_module()
sage: der = M(); der
0
sage: from sage.rings.derivation import RingDerivationWithoutTwist_zero
sage: isinstance(der, RingDerivationWithoutTwist_zero)
True
sage: TestSuite(der).run()
"""
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if arg and not (isinstance(arg, RingDerivation) and arg.is_zero()):
raise ValueError("unable to create the derivation")
RingDerivation.__init__(self, parent)
def _repr_(self):
"""
Return a string representation of this derivation.
EXAMPLES::
sage: M = ZZ.derivation_module()
sage: M()
0
"""
return "0"
def _latex_(self):
"""
Return a string representation of this derivation.
EXAMPLES::
sage: M = ZZ.derivation_module()
sage: latex(M())
0
"""
return "0"
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx + Dy
d/dx + d/dy
"""
return other
def _sub_(self, other):
"""
Return the difference of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx - Dy
d/dx - d/dy
"""
return -other
def _neg_(self):
"""
Return the opposite of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: -Dx
-d/dx
"""
return self
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dx * 2
2*d/dx
sage: Dx * x^2
x^2*d/dx
"""
return self
def _rmul_(self, left):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: 2 * Dx
2*d/dx
sage: x^2 * Dx
x^2*d/dx
"""
return self
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = x*R.derivation(x) + y*R.derivation(y)
sage: f(x^2 + 3*x*y - y^2)
2*x^2 + 6*x*y - 2*y^2
"""
return self.parent().codomain().zero()
def _bracket_(self, other):
"""
Return the Lie bracket (that is the commutator) of
this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx._bracket_(Dy)
0
"""
return self
def is_zero(self):
"""
Return ``True`` if this derivation vanishes.
EXAMPLES::
sage: M = QQ.derivation_module()
sage: M().is_zero()
True
"""
return True
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: M = QQ.derivation_module()
sage: M().list()
[]
"""
return []
class RingDerivationWithoutTwist_wrapper(RingDerivationWithoutTwist):
"""
This class is a wrapper for derivation.
It is useful for changing the parent without changing the
computation rules for derivations. It is used for derivations
over fraction fields and quotient rings.
"""
def __init__(self, parent, arg=None):
"""
Initialize this derivation.
TESTS::
sage: from sage.rings.derivation import RingDerivationWithoutTwist_wrapper
sage: R.<x,y> = GF(5)[]
sage: S = R.quo([x^5, y^5])
sage: M = S.derivation_module()
sage: der = M.random_element()
sage: isinstance(der, RingDerivationWithoutTwist_wrapper)
True
sage: TestSuite(der).run()
"""
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if isinstance(arg, RingDerivationWithoutTwist_wrapper):
self._base_derivation = arg._base_derivation
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: Dy = S.derivation(y)
sage: Dx + Dy
d/dx + d/dy
"""
return type(self)(self.parent(), self._base_derivation + other._base_derivation)
def _sub_(self, other):
"""
Return the difference of this derivation and ``other``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: Dy = S.derivation(y)
sage: Dx - Dy
d/dx - d/dy
"""
return type(self)(self.parent(), self._base_derivation - other._base_derivation)
def _neg_(self):
"""
Return the opposite of this derivation.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: -Dx
-d/dx
"""
return type(self)(self.parent(), -self._base_derivation)
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: Dx * 2
2*d/dx
sage: Dx * x^2
x^2*d/dx
"""
return type(self)(self.parent(), self._base_derivation * factor)
def _rmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: Dx = S.derivation(x)
sage: 2 * Dx
2*d/dx
sage: x^2 * Dx
x^2*d/dx
"""
return type(self)(self.parent(), factor * self._base_derivation)
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: M = S.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: S.derivation(x).list()
[1, 0]
sage: S.derivation(y).list()
[0, 1]
sage: f = x*S.derivation(x) + y*S.derivation(y); f
x*d/dx + y*d/dy
sage: f.list()
[x, y]
"""
return self._base_derivation.list()
class RingDerivationWithoutTwist_function(RingDerivationWithoutTwist):
"""
A class for untwisted derivations over rings whose elements
are either polynomials, rational fractions, power series or
Laurent series.
"""
def __init__(self, parent, arg=None):
"""
Initialize this derivation.
TESTS::
sage: R.<x,y> = ZZ[]
sage: R.derivation(x)
d/dx
sage: der = R.derivation([1,2])
sage: der
d/dx + 2*d/dy
sage: TestSuite(der).run()
"""
domain = parent.domain()
codomain = parent.codomain()
ngens = domain.ngens()
self._base_derivation = parent._base_derivation()
self._images = [codomain.zero() for _ in range(ngens)]
if arg is None:
arg = domain.gen()
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if not arg:
pass
elif (isinstance(arg, RingDerivationWithoutTwist_function)
and parent.has_coerce_map_from(arg.parent())):
self._base_derivation = parent._base_derivation(arg._base_derivation)
self._images = [codomain(x) for x in arg._images]
elif isinstance(arg, (tuple, list)):
if len(arg) < ngens:
raise ValueError("the number of images is incorrect")
self._base_derivation = parent._base_derivation(arg[:-ngens])
self._images = [codomain(x) for x in arg[-ngens:]]
else:
for i in range(ngens):
if arg == domain.gen(i):
self._base_derivation = parent._base_derivation()
self._images[i] = codomain.one()
break
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx + Dy
d/dx + d/dy
"""
base_derivation = self._base_derivation + other._base_derivation
im = [ self._images[i] + other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _sub_(self, other):
"""
Return the subtraction of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dy = R.derivation(y)
sage: Dx - Dy
d/dx - d/dy
"""
base_derivation = self._base_derivation - other._base_derivation
im = [ self._images[i] - other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _rmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: 2 * Dx
2*d/dx
sage: x^2 * Dx
x^2*d/dx
"""
factor = self.parent().codomain()(factor)
base_derivation = factor * self._base_derivation
im = [ factor*x for x in self._images ]
return type(self)(self.parent(), [base_derivation] + im)
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Dx = R.derivation(x)
sage: Dx * 2
2*d/dx
sage: Dx * x^2
x^2*d/dx
"""
return self._rmul_(factor)
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: D = x*R.derivation(x) + y*R.derivation(y)
sage: D(x^2 + 3*x*y - y^2)
2*x^2 + 6*x*y - 2*y^2
"""
parent = self.parent()
domain = parent.domain()
codomain = parent.codomain()
defining_morphism = parent.defining_morphism()
if isinstance(domain, FractionField_generic):
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = num.map_coefficients(self._base_derivation, codomain)(*domain.gens())
vp = den.map_coefficients(self._base_derivation, codomain)(*domain.gens())
res = (up*v - u*vp) / (v*v)
else:
res = x.map_coefficients(self._base_derivation, codomain)(*domain.gens())
for i in range(len(self._images)):
res += defining_morphism(x.derivative(domain.gen(i))) * self._images[i]
return res
def is_zero(self):
"""
Return ``True`` if this derivation is zero.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(); f
d/dx
sage: f.is_zero()
False
sage: (f-f).is_zero()
True
"""
if not self._base_derivation.is_zero():
return False
return all(im == 0 for im in self._images)
def list(self):
"""
Return the list of coefficient of this derivation
on the canonical basis.
EXAMPLES::
sage: R.<x,y> = GF(5)[[]]
sage: M = R.derivation_module()
sage: M.basis()
Family (d/dx, d/dy)
sage: R.derivation(x).list()
[1, 0]
sage: R.derivation(y).list()
[0, 1]
sage: f = x*R.derivation(x) + y*R.derivation(y); f
x*d/dx + y*d/dy
sage: f.list()
[x, y]
"""
return self._base_derivation.list() + self._images
class RingDerivationWithoutTwist_fraction_field(RingDerivationWithoutTwist_wrapper):
"""
This class handles derivations over fraction fields.
"""
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: K = R.fraction_field()
sage: f = K.derivation(); f
d/dx
sage: f(1/x)
(-1)/x^2
"""
defining_morphism = self.parent().defining_morphism()
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = self._base_derivation(u)
vp = self._base_derivation(v)
return (up*v - u*vp) / (v*v)
class RingDerivationWithoutTwist_quotient(RingDerivationWithoutTwist_wrapper):
"""
This class handles derivations over quotient rings.
"""
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: f = R.derivation(x)
sage: hash(f) # random
3713081631936575706
"""
return hash(tuple(self.list()))
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<X,Y> = GF(5)[]
sage: S.<x,y> = R.quo([X^5, Y^5])
sage: f = x^3*S.derivation(); f
x^3*d/dx
sage: f(x^3)
0
"""
return self._base_derivation(x.lift())
class RingDerivationWithTwist_generic(RingDerivation):
r"""
The class handles `\theta`-derivations of the form
`\lambda (\theta - \iota)` (where `\iota` is the defining
morphism of the codomain over the domain) for a scalar
`\lambda` varying in the codomain.
"""
def __init__(self, parent, scalar=0):
"""
Initialize this derivation.
TESTS::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: R.derivation(twist=theta)
0
sage: R.derivation(1, twist=theta)
[x |--> y, y |--> x] - id
sage: der = R.derivation(x, twist=theta)
sage: TestSuite(der).run()
"""
codomain = parent.codomain()
self._scalar = codomain(scalar)
RingDerivation.__init__(self, parent)
def __hash__(self):
"""
Return a hash of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: f = R.derivation(1, twist=theta)
sage: hash(f) # random
-6511057926760520014
"""
return hash(self._scalar)
def _repr_(self):
r"""
Return a string representation of this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: R.derivation(1, twist=theta)
[x |--> y, y |--> x] - id
"""
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "id"
else:
sdef = "[%s]" % defining_morphism._repr_short()
except (AttributeError, NotImplementedError):
sdef = "defining_morphism"
try:
stwi = "[%s]" % twisting_morphism._repr_short()
except AttributeError:
stwi = "twisting_morphism"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s*" % sc
elif (-scalar)._is_atomic():
s = "-%s*" % (-scalar)
else:
s = "(%s)*" % sc
return "%s(%s - %s)" % (s, stwi, sdef)
def _latex_(self):
r"""
Return a LaTeX representation of this derivation.
EXAMPLES::
sage: k.<a> = GF(5^3)
sage: Frob = k.frobenius_endomorphism()
sage: der = k.derivation(a+1, twist=Frob)
sage: latex(der)
\left(a + 1\right) \left(\left[a \mapsto a^{5}\right] - \text{id}\right)
"""
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "\\text{id}"
else:
sdef = "\\left[%s\\right]" % latex(defining_morphism)
except (AttributeError, NotImplementedError):
sdef = "\\text{defining morphism}"
try:
stwi = "\\left[%s\\right]" % latex(twisting_morphism)
except AttributeError:
stwi = "\\text{twisting morphism}"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s " % sc
elif (-scalar)._is_atomic():
s = "-%s " % (-scalar)
else:
s = "\\left(%s\\right) " % sc
return "%s \\left(%s - %s\\right)" % (s, stwi, sdef)
def _add_(self, other):
"""
Return the sum of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: der1 = R.derivation(x, twist=theta); der1
x*([x |--> y, y |--> x] - id)
sage: der2 = R.derivation(y, twist=theta); der2
y*([x |--> y, y |--> x] - id)
sage: der1 + der2
(x + y)*([x |--> y, y |--> x] - id)
"""
return type(self)(self.parent(), self._scalar + other._scalar)
def _sub_(self, other):
"""
Return the subtraction of this derivation and ``other``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: der1 = R.derivation(x, twist=theta); der1
x*([x |--> y, y |--> x] - id)
sage: der2 = R.derivation(y, twist=theta); der2
y*([x |--> y, y |--> x] - id)
sage: der1 - der2
(x - y)*([x |--> y, y |--> x] - id)
TESTS::
sage: der1 - der1
0
sage: der2 - der2
0
"""
return type(self)(self.parent(), self._scalar - other._scalar)
def _rmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: y * D
x*y*([x |--> y, y |--> x] - id)
"""
return type(self)(self.parent(), factor * self._scalar)
def _lmul_(self, factor):
"""
Return the product of this derivation by the scalar ``factor``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: D * y
x*y*([x |--> y, y |--> x] - id)
"""
return self._rmul_(factor)
def _call_(self, x):
"""
Return the image of ``x`` under this derivation.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: f = R.derivation(1, twist=theta); f
[x |--> y, y |--> x] - id
sage: f(x)
-x + y
"""
parent = self.parent()
return self._scalar * (parent.twisting_morphism()(x) - parent.defining_morphism()(x))
def list(self):
"""
Return the list of coefficient of this twisted derivation
on the canonical basis.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: K = R.fraction_field()
sage: theta = K.hom([y,x])
sage: M = K.derivation_module(twist=theta)
sage: M.basis()
Family (twisting_morphism - id,)
sage: f = (x+y) * M.gen()
sage: f
(x + y)*(twisting_morphism - id)
sage: f.list()
[x + y]
"""
return [ self._scalar ]
def precompose(self, morphism):
r"""
Return the twisted derivation obtained by applying first
``morphism`` and then this twisted derivation.
INPUT:
- ``morphism`` -- a homomorphism of rings whose codomain is
the domain of this derivation or a ring that coerces to
the domain of this derivation
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: f = R.hom([x^2, y^3])
sage: g = D.postcompose(f); g
x^2*([x |--> y^3, y |--> x^2] - [x |--> x^2, y |--> y^3])
Observe that the `g` is no longer a `\theta`-derivation but
a `(f \circ \theta)`-derivation::
sage: g.parent().twisting_morphism()
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> y^3
y |--> x^2
"""
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism,
parent.twisting_morphism() * morphism)
return M(self._scalar)
def postcompose(self, morphism):
r"""
Return the twisted derivation obtained by applying first
this twisted derivation and then ``morphism``.
INPUT:
- ``morphism`` -- a homomorphism of rings whose domain is
the codomain of this derivation or a ring into which the
codomain of this derivation
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: D = R.derivation(x, twist=theta); D
x*([x |--> y, y |--> x] - id)
sage: f = R.hom([x^2, y^3])
sage: g = D.precompose(f); g
x*([x |--> y^2, y |--> x^3] - [x |--> x^2, y |--> y^3])
Observe that the `g` is no longer a `\theta`-derivation but
a `(\theta \circ f)`-derivation::
sage: g.parent().twisting_morphism()
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> y^2
y |--> x^3
"""
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism(),
morphism * parent.twisting_morphism())
return M(morphism(self._scalar))
def _richcmp_(self, other, op):
"""
Compare this derivation with ``other`` according
to the comparison operator ``op``.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: Dx = R.derivation(x, twist=theta); Dx
x*([x |--> y, y |--> x] - id)
sage: Dy = R.derivation(y, twist=theta); Dy
y*([x |--> y, y |--> x] - id)
sage: D = R.derivation(x+y, twist=theta); D
(x + y)*([x |--> y, y |--> x] - id)
sage: Dx == Dy
False
sage: D == Dx + Dy
True
sage: D != Dy
True
"""
if op == op_EQ:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar == other._scalar
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar != other._scalar
else:
return True
return NotImplemented
def extend_to_fraction_field(self):
r"""
Return the extension of this derivation to fraction fields of
the domain and the codomain.
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: theta = R.hom([y,x])
sage: d = R.derivation(x, twist=theta)
sage: d
x*([x |--> y, y |--> x] - id)
sage: D = d.extend_to_fraction_field()
sage: D
x*([x |--> y, y |--> x] - id)
sage: D.domain()
Fraction Field of Multivariate Polynomial Ring in x, y over Integer Ring
sage: D(1/x)
(x - y)/y
"""
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
twist = parent.twisting_morphism().extend_to_fraction_field()
M = RingDerivationModule(domain, codomain, twist)
return M(codomain(self._scalar))
| 31.663668 | 209 | 0.529153 |
from sage.structure.richcmp import op_EQ, op_NE
from sage.structure.unique_representation import UniqueRepresentation
from sage.sets.family import Family
from sage.modules.module import Module
from sage.structure.element import ModuleElement
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring import PolynomialRing_general
from sage.rings.polynomial.multi_polynomial_ring_base import MPolynomialRing_base
from sage.rings.power_series_ring import PowerSeriesRing_generic
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.rings.fraction_field import FractionField_generic
from sage.rings.quotient_ring import QuotientRing_generic
from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing_generic
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing_generic
from sage.rings.padics.padic_generic import pAdicGeneric
from sage.categories.number_fields import NumberFields
from sage.categories.finite_fields import FiniteFields
from sage.categories.modules import Modules
from sage.categories.modules_with_basis import ModulesWithBasis
from sage.categories.lie_algebras import LieAlgebras
from sage.categories.map import Map
from sage.categories.rings import Rings
from sage.misc.latex import latex
class RingDerivationModule(Module, UniqueRepresentation):
def __init__(self, domain, codomain, twist=None):
if domain not in Rings().Commutative():
raise TypeError("the domain must be a commutative ring")
if codomain in Rings().Commutative() and codomain.has_coerce_map_from(domain):
defining_morphism = codomain.coerce_map_from(domain)
elif (isinstance(codomain,Map)
and codomain.category_for().is_subcategory(Rings())
and codomain.domain().has_coerce_map_from(domain)):
if codomain.domain() is domain:
defining_morphism = codomain
else:
defining_morphism = codomain * codomain.domain().coerce_map_from(domain)
codomain = defining_morphism.codomain()
else:
raise TypeError("the codomain must be an algebra over the domain"
" or a morphism with the correct domain")
if twist is not None:
if not (isinstance(twist, Map) and twist.category_for().is_subcategory(Rings())):
raise TypeError("the twisting homomorphism must be an homomorphism of rings")
if twist.domain() is not domain:
map = twist.domain().coerce_map_from(domain)
if map is None:
raise TypeError("the domain of the derivation must coerce"
" to the domain of the twisting homomorphism")
twist = twist * map
if twist.codomain() is not codomain:
map = codomain.coerce_map_from(twist.codomain())
if map is None:
raise TypeError("the codomain of the twisting homomorphism"
" must coerce to the codomain of the derivation")
twist = map * twist
try:
if twist == defining_morphism:
twist = None
else:
for g in domain.gens():
if twist(g) != defining_morphism(g):
break
else:
twist = None
except (AttributeError, NotImplementedError):
pass
self._domain = domain
self._codomain = codomain
self._defining_morphism = defining_morphism
self._twist = twist
self._base_derivation = None
self._gens = None
self._basis = self._dual_basis = None
self._constants = (ZZ, False)
if twist is not None:
self.Element = RingDerivationWithTwist_generic
if domain.is_field():
self._gens = [ 1 ]
self._basis = [ 1 ]
elif (domain is ZZ or domain in NumberFields() or domain in FiniteFields()
or isinstance(domain, IntegerModRing_generic)
or (isinstance(domain, pAdicGeneric) and (domain.is_field() or domain.absolute_e() == 1))):
self.Element = RingDerivationWithoutTwist_zero
self._gens = [ ]
self._basis = [ ]
self._dual_basis = [ ]
self._constants = (domain, True)
elif (isinstance(domain, (PolynomialRing_general, MPolynomialRing_base, PowerSeriesRing_generic, LaurentSeriesRing))
or (isinstance(domain, FractionField_generic)
and isinstance(domain.ring(), (PolynomialRing_general, MPolynomialRing_base)))):
self._base_derivation = RingDerivationModule(domain.base_ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_function
try:
self._gens = self._base_derivation.gens() + domain.gens()
except NotImplementedError:
pass
try:
self._basis = tuple(self._base_derivation.basis()) + domain.gens()
self._dual_basis = tuple(self._base_derivation.dual_basis()) + domain.gens()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
if domain.characteristic() == 0:
self._constants = (constants, sharp)
else:
self._constants = (constants, False)
elif isinstance(domain, FractionField_generic):
self._base_derivation = RingDerivationModule(domain.ring(), defining_morphism)
self.Element = RingDerivationWithoutTwist_fraction_field
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants.fraction_field(), False)
elif isinstance(domain, PolynomialQuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.base(), defining_morphism)
modulus = domain.modulus()
for der in self._base_derivation.gens():
if der(modulus) != 0:
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False)
elif isinstance(domain, QuotientRing_generic):
self._base_derivation = RingDerivationModule(domain.cover_ring(), defining_morphism)
if any(der(modulus) != 0 for modulus in domain.defining_ideal().gens()
for der in self._base_derivation.gens()):
raise NotImplementedError("derivations over quotient rings"
" are not fully supported")
self.Element = RingDerivationWithoutTwist_quotient
try:
self._gens = self._base_derivation.gens()
except NotImplementedError:
pass
try:
self._basis = self._base_derivation.basis()
self._dual_basis = self._base_derivation.dual_basis()
except NotImplementedError:
pass
constants, sharp = self._base_derivation._constants
self._constants = (constants, False)
else:
raise NotImplementedError("derivations over this ring is not implemented")
if self._basis is None:
category = Modules(codomain)
else:
category = ModulesWithBasis(codomain)
if self._twist is None and domain is codomain:
category &= LieAlgebras(self._constants[0])
Module.__init__(self, codomain, category=category)
if self._gens is not None:
self._gens = [self.element_class(self, x) for x in self._gens]
if self._basis is not None:
self._basis = [self.element_class(self, x) for x in self._basis]
if self._dual_basis is not None:
self._dual_basis = [domain(x) for x in self._dual_basis]
def __hash__(self):
return hash((self._domain, self._codomain, self._twist))
def _coerce_map_from_(self, R):
if isinstance(R, RingDerivationModule):
if R.domain().has_coerce_map_from(self._domain) and self._codomain.has_coerce_map_from(R.codomain()):
morR = R.defining_morphism()
morS = self._defining_morphism
try:
for g in self._domain.gens():
if morR(g) != morS(g):
return False
return True
except (AttributeError, NotImplementedError):
pass
return super(RingDerivationModule, self)._coerce_map_from_(R)
def _repr_(self):
t = ""
if self._twist is None:
s = "Module of derivations"
else:
s = "Module of twisted derivations"
try:
t = " (twisting morphism: %s)" % self._twist._repr_short()
except AttributeError:
pass
if self._domain is self._codomain:
s += " over %s" % self._domain
else:
s += " from %s to %s" % (self._domain, self._codomain)
return s + t
def domain(self):
return self._domain
def codomain(self):
return self._codomain
def defining_morphism(self):
return self._defining_morphism
def twisting_morphism(self):
return self._twist
def ngens(self):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return len(self._gens)
def gens(self):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return tuple(self._gens)
def gen(self, n=0):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
try:
return self._gens[n]
except IndexError:
raise ValueError("generator not defined")
def basis(self):
if self._basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._basis)
def dual_basis(self):
if self._dual_basis is None:
raise NotImplementedError("basis is not implemented for this derivation module")
return Family(self._dual_basis)
def ring_of_constants(self):
if not self._constants[1]:
raise NotImplementedError("the computation of the ring of constants"
" is not implemented for this derivation module")
return self._constants[0]
def random_element(self, *args, **kwds):
if self._gens is None:
raise NotImplementedError("generators are not implemented for this derivation module")
return self([ self._codomain.random_element(*args, **kwds) for _ in range(len(self._gens)) ])
def some_elements(self):
if self._gens is None:
return self.an_element()
if self._dual_basis is None:
return self._gens
return self._gens + [f * D for f in self._dual_basis for D in self._gens]
# category since they are not stable by composition.
class RingDerivation(ModuleElement):
def __call__(self, x):
arg = self.parent().domain()(x)
return self._call_(arg)
def domain(self):
return self.parent().domain()
def codomain(self):
return self.parent().codomain()
class RingDerivationWithoutTwist(RingDerivation):
def _repr_(self):
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "A derivation on %s" % parent.domain()
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "d/d%s" % dual_basis[i]
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s*%s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s*%s" % (-c, ddx)
else:
s += " + (%s)*%s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def _latex_(self):
parent = self.parent()
try:
dual_basis = parent.dual_basis()
except NotImplementedError:
return "\\text{A derivation on } %s" % latex(parent.domain())
coeffs = self.list()
s = ""
for i in range(len(dual_basis)):
c = coeffs[i]
sc = str(c)
if sc == "0":
continue
ddx = "\\frac{d}{d%s}" % latex(dual_basis[i])
if sc == "1":
s += " + " + ddx
elif sc == "-1":
s += " - " + ddx
elif c._is_atomic() and sc[0] != "-":
s += " + %s %s" % (sc, ddx)
elif (-c)._is_atomic():
s += " - %s %s" % (-c, ddx)
else:
s += " + \\left(%s\\right) %s" % (sc, ddx)
if s[:3] == " + ":
return s[3:]
elif s[:3] == " - ":
return "-" + s[3:]
elif s == "":
return "0"
else:
return s
def list(self):
parent = self.parent()
return [self(x) for x in parent.dual_basis()]
def monomial_coefficients(self):
dual_basis = self.parent().dual_basis()
dict = { }
for i in range(len(dual_basis)):
c = self(dual_basis[i])
if c != 0:
dict[i] = c
return dict
def is_zero(self):
for c in self.list():
if not c.is_zero():
return False
return True
def _richcmp_(self, other, op):
if op == op_EQ:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() == other.list()
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithoutTwist):
return self.list() != other.list()
else:
return True
return NotImplemented
def _bracket_(self, other):
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the bracket is only defined for derivations with same domain and codomain")
arg = [ ]
for x in parent.dual_basis():
arg.append(self(other(x)) - other(self(x)))
return parent(arg)
def pth_power(self):
parent = self.parent()
if parent.domain() is not parent.codomain():
raise TypeError("the derivation must have the same domain and codomain")
p = parent.domain().characteristic()
if not p.is_prime():
raise TypeError("the domain of the derivation must have positive and prime characteristic")
arg = [ ]
for x in parent.dual_basis():
res = x
for _ in range(p):
res = self(res)
arg.append(res)
return parent(arg)
def precompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism)
arg = [ ]
for x in M.dual_basis():
arg.append(self(morphism(x)))
return M(arg)
def postcompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism())
arg = [ ]
for x in M.dual_basis():
arg.append(morphism(self(x)))
return M(arg)
def extend_to_fraction_field(self):
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
M = RingDerivationModule(domain, codomain)
try:
return M(self)
except (ValueError, NotImplementedError):
return M(self.list())
class RingDerivationWithoutTwist_zero(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if arg and not (isinstance(arg, RingDerivation) and arg.is_zero()):
raise ValueError("unable to create the derivation")
RingDerivation.__init__(self, parent)
def _repr_(self):
return "0"
def _latex_(self):
return "0"
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
return other
def _sub_(self, other):
return -other
def _neg_(self):
return self
def _lmul_(self, factor):
return self
def _rmul_(self, left):
return self
def _call_(self, x):
return self.parent().codomain().zero()
def _bracket_(self, other):
return self
def is_zero(self):
return True
def list(self):
return []
class RingDerivationWithoutTwist_wrapper(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if isinstance(arg, RingDerivationWithoutTwist_wrapper):
self._base_derivation = arg._base_derivation
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
return type(self)(self.parent(), self._base_derivation + other._base_derivation)
def _sub_(self, other):
return type(self)(self.parent(), self._base_derivation - other._base_derivation)
def _neg_(self):
return type(self)(self.parent(), -self._base_derivation)
def _lmul_(self, factor):
return type(self)(self.parent(), self._base_derivation * factor)
def _rmul_(self, factor):
return type(self)(self.parent(), factor * self._base_derivation)
def list(self):
return self._base_derivation.list()
class RingDerivationWithoutTwist_function(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
domain = parent.domain()
codomain = parent.codomain()
ngens = domain.ngens()
self._base_derivation = parent._base_derivation()
self._images = [codomain.zero() for _ in range(ngens)]
if arg is None:
arg = domain.gen()
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], RingDerivation):
arg = arg[0]
if not arg:
pass
elif (isinstance(arg, RingDerivationWithoutTwist_function)
and parent.has_coerce_map_from(arg.parent())):
self._base_derivation = parent._base_derivation(arg._base_derivation)
self._images = [codomain(x) for x in arg._images]
elif isinstance(arg, (tuple, list)):
if len(arg) < ngens:
raise ValueError("the number of images is incorrect")
self._base_derivation = parent._base_derivation(arg[:-ngens])
self._images = [codomain(x) for x in arg[-ngens:]]
else:
for i in range(ngens):
if arg == domain.gen(i):
self._base_derivation = parent._base_derivation()
self._images[i] = codomain.one()
break
else:
self._base_derivation = parent._base_derivation(arg)
RingDerivation.__init__(self, parent)
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
base_derivation = self._base_derivation + other._base_derivation
im = [ self._images[i] + other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _sub_(self, other):
base_derivation = self._base_derivation - other._base_derivation
im = [ self._images[i] - other._images[i] for i in range(self.parent().domain().ngens()) ]
return type(self)(self.parent(), [base_derivation] + im)
def _rmul_(self, factor):
factor = self.parent().codomain()(factor)
base_derivation = factor * self._base_derivation
im = [ factor*x for x in self._images ]
return type(self)(self.parent(), [base_derivation] + im)
def _lmul_(self, factor):
return self._rmul_(factor)
def _call_(self, x):
parent = self.parent()
domain = parent.domain()
codomain = parent.codomain()
defining_morphism = parent.defining_morphism()
if isinstance(domain, FractionField_generic):
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = num.map_coefficients(self._base_derivation, codomain)(*domain.gens())
vp = den.map_coefficients(self._base_derivation, codomain)(*domain.gens())
res = (up*v - u*vp) / (v*v)
else:
res = x.map_coefficients(self._base_derivation, codomain)(*domain.gens())
for i in range(len(self._images)):
res += defining_morphism(x.derivative(domain.gen(i))) * self._images[i]
return res
def is_zero(self):
if not self._base_derivation.is_zero():
return False
return all(im == 0 for im in self._images)
def list(self):
return self._base_derivation.list() + self._images
class RingDerivationWithoutTwist_fraction_field(RingDerivationWithoutTwist_wrapper):
def __hash__(self):
return hash(tuple(self.list()))
def _call_(self, x):
defining_morphism = self.parent().defining_morphism()
num = x.numerator()
den = x.denominator()
u = defining_morphism(num)
v = defining_morphism(den)
up = self._base_derivation(u)
vp = self._base_derivation(v)
return (up*v - u*vp) / (v*v)
class RingDerivationWithoutTwist_quotient(RingDerivationWithoutTwist_wrapper):
def __hash__(self):
return hash(tuple(self.list()))
def _call_(self, x):
return self._base_derivation(x.lift())
class RingDerivationWithTwist_generic(RingDerivation):
def __init__(self, parent, scalar=0):
codomain = parent.codomain()
self._scalar = codomain(scalar)
RingDerivation.__init__(self, parent)
def __hash__(self):
return hash(self._scalar)
def _repr_(self):
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "id"
else:
sdef = "[%s]" % defining_morphism._repr_short()
except (AttributeError, NotImplementedError):
sdef = "defining_morphism"
try:
stwi = "[%s]" % twisting_morphism._repr_short()
except AttributeError:
stwi = "twisting_morphism"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s*" % sc
elif (-scalar)._is_atomic():
s = "-%s*" % (-scalar)
else:
s = "(%s)*" % sc
return "%s(%s - %s)" % (s, stwi, sdef)
def _latex_(self):
scalar = self._scalar
sc = str(scalar)
if sc == "0":
return "0"
defining_morphism = self.parent().defining_morphism()
twisting_morphism = self.parent().twisting_morphism()
try:
if defining_morphism.is_identity():
sdef = "\\text{id}"
else:
sdef = "\\left[%s\\right]" % latex(defining_morphism)
except (AttributeError, NotImplementedError):
sdef = "\\text{defining morphism}"
try:
stwi = "\\left[%s\\right]" % latex(twisting_morphism)
except AttributeError:
stwi = "\\text{twisting morphism}"
if sc == "1":
return "%s - %s" % (stwi, sdef)
elif sc == "-1":
s = "-"
elif scalar._is_atomic():
s = "%s " % sc
elif (-scalar)._is_atomic():
s = "-%s " % (-scalar)
else:
s = "\\left(%s\\right) " % sc
return "%s \\left(%s - %s\\right)" % (s, stwi, sdef)
def _add_(self, other):
return type(self)(self.parent(), self._scalar + other._scalar)
def _sub_(self, other):
return type(self)(self.parent(), self._scalar - other._scalar)
def _rmul_(self, factor):
return type(self)(self.parent(), factor * self._scalar)
def _lmul_(self, factor):
return self._rmul_(factor)
def _call_(self, x):
parent = self.parent()
return self._scalar * (parent.twisting_morphism()(x) - parent.defining_morphism()(x))
def list(self):
return [ self._scalar ]
def precompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if parent.domain().has_coerce_map_from(morphism):
morphism = parent.domain().coerce_map_from(morphism)
else:
raise TypeError("the given ring does not coerce to the domain of the derivation")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(morphism.domain(), parent.defining_morphism() * morphism,
parent.twisting_morphism() * morphism)
return M(self._scalar)
def postcompose(self, morphism):
parent = self.parent()
if morphism in Rings().Commutative():
if morphism.has_coerce_map_from(parent.codomain()):
morphism = morphism.coerce_map_from(parent.codomain())
else:
raise TypeError("the codomain of the derivation does not coerce to the given ring")
elif not (isinstance(morphism, Map) and morphism.category_for().is_subcategory(Rings())):
raise TypeError("you must give an homomorphism of rings")
M = RingDerivationModule(parent.domain(), morphism * parent.defining_morphism(),
morphism * parent.twisting_morphism())
return M(morphism(self._scalar))
def _richcmp_(self, other, op):
if op == op_EQ:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar == other._scalar
else:
return False
if op == op_NE:
if isinstance(other, RingDerivationWithTwist_generic):
return self._scalar != other._scalar
else:
return True
return NotImplemented
def extend_to_fraction_field(self):
parent = self.parent()
domain = parent.domain().fraction_field()
codomain = parent.codomain().fraction_field()
twist = parent.twisting_morphism().extend_to_fraction_field()
M = RingDerivationModule(domain, codomain, twist)
return M(codomain(self._scalar))
| true | true |
f719b7c6bd2479d28d7a6679e56b280ca817a0bb | 1,669 | py | Python | py/orbit/py_linac/overlapping_fields/jparc_enge_func_factory.py | LeoRya/py-orbit | 340b14b6fd041ed8ec2cc25b0821b85742aabe0c | [
"MIT"
] | 17 | 2018-02-09T23:39:06.000Z | 2022-03-04T16:27:04.000Z | py/orbit/py_linac/overlapping_fields/jparc_enge_func_factory.py | LeoRya/py-orbit | 340b14b6fd041ed8ec2cc25b0821b85742aabe0c | [
"MIT"
] | 22 | 2017-05-31T19:40:14.000Z | 2021-09-24T22:07:47.000Z | py/orbit/py_linac/overlapping_fields/jparc_enge_func_factory.py | LeoRya/py-orbit | 340b14b6fd041ed8ec2cc25b0821b85742aabe0c | [
"MIT"
] | 37 | 2016-12-08T19:39:35.000Z | 2022-02-11T19:59:34.000Z | #!/usr/bin/env python
#--------------------------------------------------------------
# This is a Enge Function Factory specific for the J-PARC. Some
# Enge's function parameters are defined by the aperture and length,
# and others are defined by the field distribution formula from Trace3D
# documentation.
#--------------------------------------------------------------
import math
import sys
import os
from overlapping_quad_fields_lib import PMQ_Trace3D_Function
from overlapping_quad_fields_lib import EngeFunction
from overlapping_quad_fields_lib import SimpleQuadFieldFunc
def JPARC_EngeFunctionFactory(quad):
"""
It generates the Enge's Function for J-PARC quads.
"""
name = quad.getName()
length_param = quad.getLength()
#---- general PMQ function described in Trace3D documentation
if(quad.hasParam("radIn") and quad.hasParam("radOut")):
radIn = quad.getParam("radIn")
radOut = quad.getParam("radOut")
cutoff_level = 0.01
if(name == "LI_DTL1:DTQ01"): cutoff_level = 0.02
func = PMQ_Trace3D_Function(length_param,radIn,radOut,cutoff_level)
return func
#----- general Enge's Function
if(quad.hasParam("aperture")):
acceptance_diameter_param = quad.getParam("aperture")
cutoff_level = 0.01
func = EngeFunction(length_param,acceptance_diameter_param,cutoff_level)
return func
else:
msg = "SNS_EngeFunctionFactory Python function. "
msg += os.linesep
msg += "Cannot create the EngeFunction for the quad!"
msg += os.linesep
msg = msg + "quad name = " + quad.getName()
msg = msg + os.linesep
msg = msg + "It does not have the aperture parameter!"
msg = msg + os.linesep
orbitFinalize(msg)
return None
| 34.061224 | 74 | 0.688436 |
# and others are defined by the field distribution formula from Trace3D
# documentation.
#--------------------------------------------------------------
import math
import sys
import os
from overlapping_quad_fields_lib import PMQ_Trace3D_Function
from overlapping_quad_fields_lib import EngeFunction
from overlapping_quad_fields_lib import SimpleQuadFieldFunc
def JPARC_EngeFunctionFactory(quad):
name = quad.getName()
length_param = quad.getLength()
#---- general PMQ function described in Trace3D documentation
if(quad.hasParam("radIn") and quad.hasParam("radOut")):
radIn = quad.getParam("radIn")
radOut = quad.getParam("radOut")
cutoff_level = 0.01
if(name == "LI_DTL1:DTQ01"): cutoff_level = 0.02
func = PMQ_Trace3D_Function(length_param,radIn,radOut,cutoff_level)
return func
#----- general Enge's Function
if(quad.hasParam("aperture")):
acceptance_diameter_param = quad.getParam("aperture")
cutoff_level = 0.01
func = EngeFunction(length_param,acceptance_diameter_param,cutoff_level)
return func
else:
msg = "SNS_EngeFunctionFactory Python function. "
msg += os.linesep
msg += "Cannot create the EngeFunction for the quad!"
msg += os.linesep
msg = msg + "quad name = " + quad.getName()
msg = msg + os.linesep
msg = msg + "It does not have the aperture parameter!"
msg = msg + os.linesep
orbitFinalize(msg)
return None
| true | true |
f719b9a65c9a3077b966cb0086383cf3d2d3c035 | 498 | py | Python | meiduo_mall/utils/secret.py | liusudo123/meiduo_project | 3bf92fff56bf47777795cf9078ff285eb004b81f | [
"MIT"
] | null | null | null | meiduo_mall/utils/secret.py | liusudo123/meiduo_project | 3bf92fff56bf47777795cf9078ff285eb004b81f | [
"MIT"
] | null | null | null | meiduo_mall/utils/secret.py | liusudo123/meiduo_project | 3bf92fff56bf47777795cf9078ff285eb004b81f | [
"MIT"
] | null | null | null | # 1.装包
# 2.导包
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
# 3.实例化
# 4.加密解密
class SecretOauth(object):
# 加密
def dumps(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.dumps(data)
return result.decode()
# 解密
def loads(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.loads(data)
return result
| 21.652174 | 71 | 0.670683 |
from django.conf import settings
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
class SecretOauth(object):
def dumps(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.dumps(data)
return result.decode()
def loads(self, data):
s = Serializer(secret_key=settings.SECRET_KEY, expires_in=3600)
result = s.loads(data)
return result
| true | true |
f719b9b7e40ad20e1eac164cd3eb7a2cf77da67a | 3,848 | py | Python | Phys_Seg/run.py | pedrob37/Phys_Seg | 7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee | [
"Apache-2.0"
] | 1 | 2021-09-27T09:58:56.000Z | 2021-09-27T09:58:56.000Z | Phys_Seg/run.py | pedrob37/Phys_Seg | 7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee | [
"Apache-2.0"
] | null | null | null | Phys_Seg/run.py | pedrob37/Phys_Seg | 7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import SimpleITK as sitk
from Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img
from Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing
import importlib
from Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters
from network_architecture import nnUNet
import os
import Phys_Seg
def apply_phys_seg(img, out_fname):
img_itk = sitk.ReadImage(img)
img_npy = sitk.GetArrayFromImage(img_itk)
out = sitk.GetImageFromArray(img_npy)
out.CopyInformation(img_itk)
sitk.WriteImage(out, out_fname)
def run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,
# config_file=os.path.join(Phys_Seg.__path__[0], "config.py"),
device=None, overwrite=True):
"""
:param mri_fnames: str or list/tuple of str
:param output_fnames: str or list/tuple of str. If list: must have the same length as output_fnames
:param sequence: MPRAGE or SPGR (for now)
:param config_file: config.py
:param device: either int (for device id) or 'cpu'
:param overwrite: True or False
:param postprocess: whether to do postprocessing or not. Postprocessing here consists of simply discarding all
but the largest predicted connected component. Default False
:return:
"""
physics_input_size = {'MPRAGE': 4,
'SPGR': 6}
# Load in model weights
maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)
params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)
net = nnUNet(1, 4, physics_flag=True if physics_params else False,
physics_input=physics_input_size[sequence],
physics_output=40)
if device == "cpu":
net = net.cpu()
else:
net.cuda(device)
net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])
net.to(f'cuda:{net.device_ids[0]}')
# net = torch.nn.DataParallel(net)
if not isinstance(mri_fnames, (list, tuple)):
mri_fnames = [mri_fnames]
if not isinstance(output_fnames, (list, tuple)):
output_fnames = [output_fnames]
params = torch.load(params_file, map_location=lambda storage, loc: storage)
for in_fname, out_fname in zip(mri_fnames, output_fnames):
if overwrite or not (os.path.isfile(out_fname)):
print("File:", in_fname)
print("preprocessing...")
try:
data, aff = read_file(in_fname)
except RuntimeError:
print("\nERROR\nCould not read file", in_fname, "\n")
continue
except AssertionError as e:
print(e)
continue
# Process data
if physics_params is not None:
physics_params = eval(physics_params)
# Convert TR to pTD
physics_params[1] = physics_params[1] - physics_params[0]
print(physics_params)
processed_physics = physics_preprocessing(np.array(physics_params), sequence)
else:
processed_physics = None
data = image_preprocessing(patient_data=data)
print("prediction (CNN id)...")
net.load_state_dict(params['model_state_dict'])
net.eval()
seg = predict_phys_seg(net=net,
patient_data=data,
processed_physics=processed_physics,
main_device=device)
print("exporting segmentation...")
save_segmentation_nifti(seg, aff, out_fname)
# apply_phys_seg(in_fname, out_fname)
| 38.09901 | 114 | 0.64527 | import torch
import numpy as np
import SimpleITK as sitk
from Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img
from Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing
import importlib
from Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters
from network_architecture import nnUNet
import os
import Phys_Seg
def apply_phys_seg(img, out_fname):
img_itk = sitk.ReadImage(img)
img_npy = sitk.GetArrayFromImage(img_itk)
out = sitk.GetImageFromArray(img_npy)
out.CopyInformation(img_itk)
sitk.WriteImage(out, out_fname)
def run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,
device=None, overwrite=True):
physics_input_size = {'MPRAGE': 4,
'SPGR': 6}
maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)
params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)
net = nnUNet(1, 4, physics_flag=True if physics_params else False,
physics_input=physics_input_size[sequence],
physics_output=40)
if device == "cpu":
net = net.cpu()
else:
net.cuda(device)
net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])
net.to(f'cuda:{net.device_ids[0]}')
if not isinstance(mri_fnames, (list, tuple)):
mri_fnames = [mri_fnames]
if not isinstance(output_fnames, (list, tuple)):
output_fnames = [output_fnames]
params = torch.load(params_file, map_location=lambda storage, loc: storage)
for in_fname, out_fname in zip(mri_fnames, output_fnames):
if overwrite or not (os.path.isfile(out_fname)):
print("File:", in_fname)
print("preprocessing...")
try:
data, aff = read_file(in_fname)
except RuntimeError:
print("\nERROR\nCould not read file", in_fname, "\n")
continue
except AssertionError as e:
print(e)
continue
if physics_params is not None:
physics_params = eval(physics_params)
physics_params[1] = physics_params[1] - physics_params[0]
print(physics_params)
processed_physics = physics_preprocessing(np.array(physics_params), sequence)
else:
processed_physics = None
data = image_preprocessing(patient_data=data)
print("prediction (CNN id)...")
net.load_state_dict(params['model_state_dict'])
net.eval()
seg = predict_phys_seg(net=net,
patient_data=data,
processed_physics=processed_physics,
main_device=device)
print("exporting segmentation...")
save_segmentation_nifti(seg, aff, out_fname)
| true | true |
f719bb906e369e26b721b5b82e53ff4644582d3b | 3,541 | py | Python | lzo_indexer/indexer.py | krux/python-lzo-indexer | 21fdd821a38d9b941c02036b7f30a15891311a7d | [
"Apache-2.0"
] | 8 | 2015-09-12T17:11:00.000Z | 2021-04-22T01:35:26.000Z | lzo_indexer/indexer.py | krux/python-lzo-indexer | 21fdd821a38d9b941c02036b7f30a15891311a7d | [
"Apache-2.0"
] | null | null | null | lzo_indexer/indexer.py | krux/python-lzo-indexer | 21fdd821a38d9b941c02036b7f30a15891311a7d | [
"Apache-2.0"
] | 4 | 2015-06-18T01:04:19.000Z | 2018-09-28T16:33:54.000Z | import struct
from collections import namedtuple
from StringIO import StringIO
# Magic string expected at the start of the file to verify it's LZO
_LZO_MAGIC = bytearray("\x89LZO\x00\r\n\x1a\n")
_COMPRESSION_CHECKSUMS = (0x02, 0x200) # ADLER32 CRC32
_DECOMPRESSION_CHECKSUMS = (0x01, 0x100) # ADLER32 CRC32
def _parse_header(lzo_file):
"""Parse and verify the header of an LZO file, returning a tuple
of the number of compressed/decompressed checksums expected at the
end of each block.
"""
if lzo_file.tell() != 0:
raise Exception("File object must be at offset 0")
# Parse the header
if lzo_file.read(9) != _LZO_MAGIC:
raise Exception("Invalid lzo file")
# Ignore a bunch of values from the header
# TODO: We should validate these
lzop_version = lzo_file.read(2)
library_version = lzo_file.read(2)
extract_version = lzo_file.read(2)
method = lzo_file.read(1)
level = lzo_file.read(1)
# Checksum flags
flags, = struct.unpack(">I", lzo_file.read(4))
num_compressed_checksums = 0
for idx, flag in enumerate(_COMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_compressed_checksums += 1
num_decompressed_checksums = 0
for idx, flag in enumerate(_DECOMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_decompressed_checksums += 1
# Parse out the mode/mtime/gmtdiff values we're not interested in
mode = lzo_file.read(4)
mtime = lzo_file.read(4)
gmtdiff = lzo_file.read(4)
# Extract the filename
filename_length = ord(lzo_file.read(1))
if filename_length > 0:
filename = str(lzo_file.read(filename_length))
# TODO: Verify the header checksum against these bytes
lzo_file.read(4)
# Process extra header field for lzo < 1.08. This is a checksum that
# needs to also be validated
if (flags & 0x00000040) != 0:
size, = struct.unpack(">I", lzo_file.read(4))
if size > 0:
lzo_file.read(size)
lzo_file.read(4)
return num_compressed_checksums, num_decompressed_checksums
def get_lzo_blocks(lzo_file):
"""Return a generator containing all of the block offsets for each
compressed block of data in the LZO file.
"""
num_compressed_chksms, num_decompressed_chksms = _parse_header(lzo_file)
while True:
decompressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
if decompressed_blocksize == 0:
break
compressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
num_chksms_to_skip = num_decompressed_chksms
if decompressed_blocksize == compressed_blocksize:
num_chksms_to_skip += num_compressed_chksms
skip = 4 * num_chksms_to_skip
position = lzo_file.tell()
block_start = position - 8 # Rewind back to before the block headers
next_block = position + compressed_blocksize + skip
yield block_start
lzo_file.seek(next_block) # Seek to the next block
def index_lzo_string(string):
"""Return a generator containing block offsets for each compressed block
of data in the LZO string.
"""
index = StringIO()
index_lzo_file(StringIO(string), index)
return index.getvalue()
def index_lzo_file(lzo_file, index_file):
"""Index the given LZO file and write the index to the given output stream.
"""
for block_offset in get_lzo_blocks(lzo_file):
index_file.write(struct.pack(">Q", block_offset))
return index_file
| 29.508333 | 79 | 0.680316 | import struct
from collections import namedtuple
from StringIO import StringIO
_LZO_MAGIC = bytearray("\x89LZO\x00\r\n\x1a\n")
_COMPRESSION_CHECKSUMS = (0x02, 0x200) # ADLER32 CRC32
_DECOMPRESSION_CHECKSUMS = (0x01, 0x100) # ADLER32 CRC32
def _parse_header(lzo_file):
if lzo_file.tell() != 0:
raise Exception("File object must be at offset 0")
# Parse the header
if lzo_file.read(9) != _LZO_MAGIC:
raise Exception("Invalid lzo file")
# Ignore a bunch of values from the header
# TODO: We should validate these
lzop_version = lzo_file.read(2)
library_version = lzo_file.read(2)
extract_version = lzo_file.read(2)
method = lzo_file.read(1)
level = lzo_file.read(1)
# Checksum flags
flags, = struct.unpack(">I", lzo_file.read(4))
num_compressed_checksums = 0
for idx, flag in enumerate(_COMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_compressed_checksums += 1
num_decompressed_checksums = 0
for idx, flag in enumerate(_DECOMPRESSION_CHECKSUMS):
if (flag & flags) != 0:
num_decompressed_checksums += 1
# Parse out the mode/mtime/gmtdiff values we're not interested in
mode = lzo_file.read(4)
mtime = lzo_file.read(4)
gmtdiff = lzo_file.read(4)
filename_length = ord(lzo_file.read(1))
if filename_length > 0:
filename = str(lzo_file.read(filename_length))
lzo_file.read(4)
if (flags & 0x00000040) != 0:
size, = struct.unpack(">I", lzo_file.read(4))
if size > 0:
lzo_file.read(size)
lzo_file.read(4)
return num_compressed_checksums, num_decompressed_checksums
def get_lzo_blocks(lzo_file):
num_compressed_chksms, num_decompressed_chksms = _parse_header(lzo_file)
while True:
decompressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
if decompressed_blocksize == 0:
break
compressed_blocksize, = struct.unpack(">I", lzo_file.read(4))
num_chksms_to_skip = num_decompressed_chksms
if decompressed_blocksize == compressed_blocksize:
num_chksms_to_skip += num_compressed_chksms
skip = 4 * num_chksms_to_skip
position = lzo_file.tell()
block_start = position - 8
next_block = position + compressed_blocksize + skip
yield block_start
lzo_file.seek(next_block)
def index_lzo_string(string):
index = StringIO()
index_lzo_file(StringIO(string), index)
return index.getvalue()
def index_lzo_file(lzo_file, index_file):
for block_offset in get_lzo_blocks(lzo_file):
index_file.write(struct.pack(">Q", block_offset))
return index_file
| true | true |
f719bbd224fa1f348d74df1adf6270da318609b3 | 1,028 | py | Python | reference/ddtrace/ext/aws.py | stschenk/opentelemetry-python-contrib | 28c1331e571d386baab74f5028e3268e4bfda4cd | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | reference/ddtrace/ext/aws.py | stschenk/opentelemetry-python-contrib | 28c1331e571d386baab74f5028e3268e4bfda4cd | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-12T17:59:41.000Z | 2020-12-12T18:54:03.000Z | reference/ddtrace/ext/aws.py | stschenk/opentelemetry-python-contrib | 28c1331e571d386baab74f5028e3268e4bfda4cd | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-22T04:16:33.000Z | 2020-10-22T04:16:33.000Z | from ..utils.formats import flatten_dict
DENYLIST_ENDPOINT = ['kms', 'sts']
DENYLIST_ENDPOINT_TAGS = {
's3': ['params.Body'],
}
def truncate_arg_value(value, max_len=1024):
"""Truncate values which are bytes and greater than `max_len`.
Useful for parameters like 'Body' in `put_object` operations.
"""
if isinstance(value, bytes) and len(value) > max_len:
return b'...'
return value
def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in DENYLIST_ENDPOINT:
denylisted = DENYLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict(
(name, value)
for (name, value) in zip(args_names, args)
if name in args_traced
)
tags = flatten_dict(tags)
tags = {
k: truncate_arg_value(v)
for k, v in tags.items()
if k not in denylisted
}
span.set_tags(tags)
REGION = 'aws.region'
AGENT = 'aws.agent'
OPERATION = 'aws.operation'
| 25.7 | 74 | 0.622568 | from ..utils.formats import flatten_dict
DENYLIST_ENDPOINT = ['kms', 'sts']
DENYLIST_ENDPOINT_TAGS = {
's3': ['params.Body'],
}
def truncate_arg_value(value, max_len=1024):
if isinstance(value, bytes) and len(value) > max_len:
return b'...'
return value
def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in DENYLIST_ENDPOINT:
denylisted = DENYLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict(
(name, value)
for (name, value) in zip(args_names, args)
if name in args_traced
)
tags = flatten_dict(tags)
tags = {
k: truncate_arg_value(v)
for k, v in tags.items()
if k not in denylisted
}
span.set_tags(tags)
REGION = 'aws.region'
AGENT = 'aws.agent'
OPERATION = 'aws.operation'
| true | true |
f719bbfb410401300cb793e160dd34ffe11f0df1 | 426 | py | Python | list_comprehensions.py | rjayasin/list-comprehension | 6937f4f6dec8b1b8722c31356db32de18795de8b | [
"MIT"
] | null | null | null | list_comprehensions.py | rjayasin/list-comprehension | 6937f4f6dec8b1b8722c31356db32de18795de8b | [
"MIT"
] | null | null | null | list_comprehensions.py | rjayasin/list-comprehension | 6937f4f6dec8b1b8722c31356db32de18795de8b | [
"MIT"
] | null | null | null | import math
#compute primes using list difference
#from http://www.secnetix.de/olli/Python/list_comprehensions.hawk
noprimes = [j for i in range(2, 8) for j in range(i*2, 50, i)]
difference = [x for x in range(2, 50) if x not in noprimes]
# print(difference)
#my own version, a little more complicated
primes = [x for x in range(1, 51) if not any([y for y in range(2, int(math.sqrt(x) + 1)) if x % y == 0])]
# print(primes)
| 35.5 | 105 | 0.692488 | import math
noprimes = [j for i in range(2, 8) for j in range(i*2, 50, i)]
difference = [x for x in range(2, 50) if x not in noprimes]
primes = [x for x in range(1, 51) if not any([y for y in range(2, int(math.sqrt(x) + 1)) if x % y == 0])]
| true | true |
f719bcfdda7fd95388f3a3f5283d672ebcdb37cb | 5,859 | py | Python | apps/translations/tests/test_helpers.py | Joergen/olympia | eb84203469adbb6584e50d7bb6f9de7f20980dac | [
"BSD-3-Clause"
] | 1 | 2015-10-29T06:55:20.000Z | 2015-10-29T06:55:20.000Z | apps/translations/tests/test_helpers.py | magopian/olympia | 70cad15111a89e3d5c715cbade8925b12d1b98dc | [
"BSD-3-Clause"
] | null | null | null | apps/translations/tests/test_helpers.py | magopian/olympia | 70cad15111a89e3d5c715cbade8925b12d1b98dc | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.utils import translation
import jingo
import pytest
from mock import Mock, patch
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from translations import helpers
from translations.fields import save_signal
from translations.models import PurifiedTranslation
from translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def super():
jingo.load_helpers()
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = helpers.locale_html(testfield)
eq_(s, ' lang="de" dir="ltr"')
# rtl language
for lang in settings.RTL_LANGUAGES:
testfield.locale = lang
s = helpers.locale_html(testfield)
eq_(s, ' lang="%s" dir="rtl"' % testfield.locale)
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(6) }}').render({'s': t})
eq_(actual, s)
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(100) }}').render({'s': t})
eq_(actual, 'safe <script>alert("omg")</script>')
actual = jingo.env.from_string('{{ s|truncate(5) }}').render({'s': t})
eq_(actual, 'safe ...')
def test_clean():
# Links are not mangled, bad HTML is escaped, newlines are slimmed.
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
eq_(helpers.clean(s),
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
eq_(jingo.env.from_string('{{ s|clean }}').render({'s': s}), s)
def test_no_links():
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'a http://example.com, http://text.link')
# Bad markup.
s = '<http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}), '')
# Bad markup.
s = 'some text <http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'some text')
def test_l10n_menu():
# No remove_locale_url provided.
menu = helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
# Specific remove_locale_url provided (eg for user).
menu = helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
# Use the remove_locale_url taken from the addon in the context.
menu = helpers.l10n_menu({'addon': Addon()},
remove_locale_url='some/url/')
assert 'data-rm-locale="/developers/addon/None/rmlocale"' in menu, menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(amo.tests.TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
eq_(helpers.all_locales(addon, field_name), None)
addon = Mock()
field_name = 'description'
del addon.description
eq_(helpers.all_locales(addon, field_name), None)
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
# Pretend the TranslateModel instance was saved to force Translation
# objects to be saved.
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = helpers.all_locales(obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result
| 33.672414 | 79 | 0.635774 | from django.conf import settings
from django.utils import translation
import jingo
import pytest
from mock import Mock, patch
from nose.tools import eq_
import amo
import amo.tests
from addons.models import Addon
from translations import helpers
from translations.fields import save_signal
from translations.models import PurifiedTranslation
from translations.tests.testapp.models import TranslatedModel
pytestmark = pytest.mark.django_db
def super():
jingo.load_helpers()
def test_locale_html():
testfield = Mock()
this_lang = translation.get_language()
testfield.locale = this_lang
s = helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
testfield.locale = 'de'
s = helpers.locale_html(testfield)
eq_(s, ' lang="de" dir="ltr"')
for lang in settings.RTL_LANGUAGES:
testfield.locale = lang
s = helpers.locale_html(testfield)
eq_(s, ' lang="%s" dir="rtl"' % testfield.locale)
def test_locale_html_xss():
testfield = Mock()
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
s = helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(6) }}').render({'s': t})
eq_(actual, s)
def test_truncate_purified_field_xss():
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(100) }}').render({'s': t})
eq_(actual, 'safe <script>alert("omg")</script>')
actual = jingo.env.from_string('{{ s|truncate(5) }}').render({'s': t})
eq_(actual, 'safe ...')
def test_clean():
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
eq_(helpers.clean(s),
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
eq_(jingo.env.from_string('{{ s|clean }}').render({'s': s}), s)
def test_no_links():
s = 'a <a href="http://url.link">http://example.com</a>, http://text.link'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'a http://example.com, http://text.link')
s = '<http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}), '')
s = 'some text <http://bad.markup.com'
eq_(jingo.env.from_string('{{ s|no_links }}').render({'s': s}),
'some text')
def test_l10n_menu():
menu = helpers.l10n_menu({})
assert 'data-rm-locale=""' in menu, menu
menu = helpers.l10n_menu({}, remove_locale_url='/some/url/')
assert 'data-rm-locale="/some/url/"' in menu, menu
menu = helpers.l10n_menu({'addon': Addon()},
remove_locale_url='some/url/')
assert 'data-rm-locale="/developers/addon/None/rmlocale"' in menu, menu
@patch.object(settings, 'AMO_LANGUAGES', ('de', 'en-US', 'es', 'fr', 'pt-BR'))
class TestAllLocales(amo.tests.TestCase):
def test_all_locales_none(self):
addon = None
field_name = 'description'
eq_(helpers.all_locales(addon, field_name), None)
addon = Mock()
field_name = 'description'
del addon.description
eq_(helpers.all_locales(addon, field_name), None)
def test_all_locales(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': 'Spoon'
}
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr">Spoon</span>' in result
def test_all_locales_empty(self):
obj = TranslatedModel()
obj.description = {
'en-US': 'There',
'es': 'Is No',
'fr': ''
}
save_signal(sender=TranslatedModel, instance=obj)
result = helpers.all_locales(obj, 'description')
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span lang="fr"></span>' in result
result = helpers.all_locales(obj, 'description', prettify_empty=True)
assert u'<div class="trans" data-name="description">' in result
assert u'<span lang="en-us">There</span>' in result
assert u'<span lang="es">Is No</span>' in result
assert u'<span class="empty" lang="fr">None</span>' in result
| true | true |
f719bd0e61d8fc8ee4756b2db46ad0dfa8dfa39d | 6,499 | py | Python | twisted/test/test_text.py | sxamit/twisted | 30f6966329c857c3631c60aeb420d84d7828e01e | [
"MIT",
"Unlicense"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/twisted/test/test_text.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/twisted/test/test_text.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 1 | 2018-11-07T12:52:07.000Z | 2018-11-07T12:52:07.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.text}.
"""
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
"""
Tests for L{text.greedyWrap}.
"""
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
"""
Compare the number of words.
"""
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""
Compare the lists of words.
"""
words = []
for line in self.output:
words.extend(line.split())
# Using assertEqual here prints out some
# rather too long lists.
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self):
"""
Check the length of the lines.
"""
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
"""
Allow paragraphs delimited by two \ns.
"""
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
"""
Tests for L{isMultiline} and L{endsInNewline}.
"""
def test_isMultiline(self):
"""
L{text.isMultiline} returns C{True} if the string has a newline in it.
"""
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
"""
L{text.endsInNewline} returns C{True} if the string ends in a newline.
"""
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
"""
Tests for L{text.stringyString}.
"""
def test_tuple(self):
"""
Tuple elements are displayed on separate lines.
"""
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
"""
Dicts elements are displayed using C{str()}.
"""
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
"""
List elements are displayed on separate lines using C{str()}.
"""
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTests(unittest.TestCase):
"""
Tests for L{text.splitQuoted}.
"""
def test_oneWord(self):
"""
Splitting strings with one-word phrases.
"""
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.assertEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.assertEqual(["One Phrase"], r)
class StrFileTests(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEqual(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEqual(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEqual(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEqual(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEqual(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEqual(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEqual(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEqual(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEqual(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEqual(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEqual(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEqual(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEqual(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEqual(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEqual(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEqual(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEqual(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEqual(True, text.strFile("ThIs is A test STRING", self.io, False))
| 26.744856 | 103 | 0.59086 |
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
words = []
for line in self.output:
words.extend(line.split())
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self):
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
def test_isMultiline(self):
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
def test_tuple(self):
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTests(unittest.TestCase):
def test_oneWord(self):
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
class StrFileTests(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEqual(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEqual(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEqual(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEqual(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEqual(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEqual(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEqual(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEqual(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEqual(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEqual(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEqual(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEqual(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEqual(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEqual(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEqual(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEqual(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEqual(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEqual(True, text.strFile("ThIs is A test STRING", self.io, False))
| true | true |
f719bed52604d78cd372c38b0ba41bc4f013d7b2 | 311 | py | Python | routes/show_bp.py | Silve1ra/fyyur | 580562cc592d587c9bed4f080b856664abb9f70d | [
"MIT"
] | 1 | 2021-09-17T11:56:38.000Z | 2021-09-17T11:56:38.000Z | routes/show_bp.py | Silve1ra/fyyur | 580562cc592d587c9bed4f080b856664abb9f70d | [
"MIT"
] | null | null | null | routes/show_bp.py | Silve1ra/fyyur | 580562cc592d587c9bed4f080b856664abb9f70d | [
"MIT"
] | null | null | null | from flask import Blueprint
from controllers.show import shows, create_shows, create_show_submission
show_bp = Blueprint('show_bp', __name__)
show_bp.route('/', methods=['GET'])(shows)
show_bp.route('/create', methods=['GET'])(create_shows)
show_bp.route('/create', methods=['POST'])(create_show_submission)
| 31.1 | 72 | 0.762058 | from flask import Blueprint
from controllers.show import shows, create_shows, create_show_submission
show_bp = Blueprint('show_bp', __name__)
show_bp.route('/', methods=['GET'])(shows)
show_bp.route('/create', methods=['GET'])(create_shows)
show_bp.route('/create', methods=['POST'])(create_show_submission)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.