code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os.path
from unittest import TestCase
import numpy as np
import pandas as pd
from ipycli.notebookmanager import NotebookManager
from IPython.utils.tempdir import TemporaryDirectory
class TestCLI(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_new_notebook(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'Untitled0.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td)
assert os.path.isfile(filepath)
# Now make sure path mapping works
assert km.path_mapping[notebook_id] == filepath
assert km.find_path(notebook_id) == filepath
def test_new_notebook_name(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'new_test.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td, name=filename)
assert os.path.isfile(filepath)
# Now make sure path mapping works
assert km.path_mapping[notebook_id] == filepath
assert km.find_path(notebook_id) == filepath
assert filepath in km.pathed_notebooks.values()
def test_notebook_list(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'new_test.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td, name=filename)
n = {'name':filepath, 'notebook_id':notebook_id}
correct = []
correct.append(n)
nlist = km.list_notebooks()
assert nlist[0]['name'] == correct[0]['name']
assert nlist[0]['notebook_id'] == correct[0]['notebook_id']
def test_delete_notebook(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'new_test.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td, name=filename)
assert os.path.isfile(filepath)
# Now make sure path mapping works
assert km.path_mapping[notebook_id] == filepath
assert km.find_path(notebook_id) == filepath
assert notebook_id in km.mapping
assert notebook_id in km.path_mapping
assert notebook_id in km.rev_mapping.values()
km.delete_notebook(notebook_id)
assert notebook_id not in km.mapping
assert notebook_id not in km.path_mapping
assert notebook_id not in km.rev_mapping.values()
assert not os.path.isfile(filepath)
def test_existing_notebook(self):
# Create a dir with notebooks
td = TemporaryDirectory()
ndir = td.__enter__()
km = NotebookManager(notebook_dir=ndir)
filename = 'new_test.ipynb'
filepath = os.path.join(ndir, filename)
notebook_id = km.new_notebook(ndir=ndir, name=filename)
td2 = TemporaryDirectory()
ndir2 = td2.__enter__()
nbm = NotebookManager(notebook_dir=ndir2)
assert nbm.notebook_dir != km.notebook_dir
assert filepath not in nbm.path_mapping.values()
assert filepath not in nbm.pathed_notebooks.values()
nbm.get_pathed_notebook(filepath)
assert nbm.path_mapping.values()[0] == filepath
assert filepath in nbm.pathed_notebooks.values()
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False) | ipycli/tests/test_cli.py | import os.path
from unittest import TestCase
import numpy as np
import pandas as pd
from ipycli.notebookmanager import NotebookManager
from IPython.utils.tempdir import TemporaryDirectory
class TestCLI(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_new_notebook(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'Untitled0.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td)
assert os.path.isfile(filepath)
# Now make sure path mapping works
assert km.path_mapping[notebook_id] == filepath
assert km.find_path(notebook_id) == filepath
def test_new_notebook_name(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'new_test.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td, name=filename)
assert os.path.isfile(filepath)
# Now make sure path mapping works
assert km.path_mapping[notebook_id] == filepath
assert km.find_path(notebook_id) == filepath
assert filepath in km.pathed_notebooks.values()
def test_notebook_list(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'new_test.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td, name=filename)
n = {'name':filepath, 'notebook_id':notebook_id}
correct = []
correct.append(n)
nlist = km.list_notebooks()
assert nlist[0]['name'] == correct[0]['name']
assert nlist[0]['notebook_id'] == correct[0]['notebook_id']
def test_delete_notebook(self):
with TemporaryDirectory() as td:
km = NotebookManager(notebook_dir=td)
filename = 'new_test.ipynb'
filepath = os.path.join(td, filename)
notebook_id = km.new_notebook(ndir=td, name=filename)
assert os.path.isfile(filepath)
# Now make sure path mapping works
assert km.path_mapping[notebook_id] == filepath
assert km.find_path(notebook_id) == filepath
assert notebook_id in km.mapping
assert notebook_id in km.path_mapping
assert notebook_id in km.rev_mapping.values()
km.delete_notebook(notebook_id)
assert notebook_id not in km.mapping
assert notebook_id not in km.path_mapping
assert notebook_id not in km.rev_mapping.values()
assert not os.path.isfile(filepath)
def test_existing_notebook(self):
# Create a dir with notebooks
td = TemporaryDirectory()
ndir = td.__enter__()
km = NotebookManager(notebook_dir=ndir)
filename = 'new_test.ipynb'
filepath = os.path.join(ndir, filename)
notebook_id = km.new_notebook(ndir=ndir, name=filename)
td2 = TemporaryDirectory()
ndir2 = td2.__enter__()
nbm = NotebookManager(notebook_dir=ndir2)
assert nbm.notebook_dir != km.notebook_dir
assert filepath not in nbm.path_mapping.values()
assert filepath not in nbm.pathed_notebooks.values()
nbm.get_pathed_notebook(filepath)
assert nbm.path_mapping.values()[0] == filepath
assert filepath in nbm.pathed_notebooks.values()
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False) | 0.377426 | 0.371194 |
from enum import Enum
from dsp_lab import *
class Process(Enum):
DSP_OPEN_FILE = 0
DSP_SAVE_FILE = 1
DSP_PLOT_TIME_DOMAIN = 2
DSP_PLOT_FREQUENCY_DOMAIN = 3
DSP_FILTER = 4
DSP_ROOT_MEAN_SQUARE_ERROR = 5
def banner():
"""
Banner initial
"""
print("-----------------------------------------")
print("Welcome Digital Signal Processing Lab")
print("Universidade Luterana do Brasil 2019/2")
print('')
print("Professor: <NAME>")
print("Alunos: <NAME> e <NAME>")
print("-----------------------------------------")
print("This Project remove interferente signal")
print("-----------------------------------------")
def show_process_menu() -> Process:
"""
Show main menu
:return:
"""
print('\n\r')
for op in Process:
print(f'{op.value} - {op.name}.')
print('Q - EXIT.')
option = input('Option:')
if option is 'Q' or option is 'q':
print("Bye Bye! :)")
exit(0)
return Process(int(option))
def show_filter_type() -> filter_type:
"""
Menu filter type
:return:
"""
print('\n\r')
for op in filter_type:
print(f'{op.value} - {op.name}.')
print('Q - EXIT.')
option = input('Option:')
if option is 'Q' or option is 'q':
return None
return filter_type(int(option))
def file(message=None):
"""
Menu Open File
:param message:
:return:
"""
if message is None:
print("Enter file name:")
else:
print(message)
option = input()
return option
def main():
"""
Main Signal Processing Lab.
"""
# Create instance for dsp lab
dsp = dsp_lab()
# Show main menu
banner()
while True:
try:
# Show menu
opt = show_process_menu()
# Open file
if opt is Process.DSP_OPEN_FILE:
print('Open file')
filename = input('Filename:')
audio_samples, sample_rate, duration = dsp.open(filename=filename)
if audio_samples is not None:
print(f'Audio Number Samples: {len(audio_samples)}')
print(f'Sample Rate: {sample_rate}')
print(f'Duration: {duration}s')
else:
print('Fail open file')
# Save file
elif opt is Process.DSP_SAVE_FILE:
print('Save file')
if len(dsp.audio_samples):
filename = input('Filename:')
dsp.save(filename=filename, audio_samples=dsp.audio_samples)
else:
print('Not samples for save')
# Plot audio time domain
elif opt is Process.DSP_PLOT_TIME_DOMAIN:
print("Print Frequency Domain")
dsp.time_domain(audio_samples)
elif opt is Process.DSP_PLOT_FREQUENCY_DOMAIN:
print('Print Fast Transform Fourier (FFT)')
dsp.frequency_domain(audio_samples)
# Process audio filters
elif opt is Process.DSP_FILTER:
print("Processing filter...")
option = show_filter_type()
filtred_audio = dsp.filter(option)
print('Save file')
if filtred_audio is not None and len(filtred_audio):
filename = input('Filename:')
dsp.save(filename=filename, audio_samples=filtred_audio)
dsp.time_domain(filtred_audio)
dsp.frequency_domain(filtred_audio)
else:
print("Error um process filter")
#Calculate Root mean square
elif opt is Process.DSP_ROOT_MEAN_SQUARE_ERROR:
file_original = file("Filename original:")
file_filtred = file("Filename filtred:")
audio_samples_original, sample_rate, duration = dsp.open(filename=file_original)
audio_samples_filtred, sample_rate, duration = dsp.open(filename=file_filtred)
error = dsp.root_square_mean_error(audio_samples_original, audio_samples_filtred)
print(f'Error: {error}')
print(f'Percentage: {error * 100}%')
except Exception as e:
raise e
if __name__ == '__main__':
main() | signal_processing_lab.py | from enum import Enum
from dsp_lab import *
class Process(Enum):
DSP_OPEN_FILE = 0
DSP_SAVE_FILE = 1
DSP_PLOT_TIME_DOMAIN = 2
DSP_PLOT_FREQUENCY_DOMAIN = 3
DSP_FILTER = 4
DSP_ROOT_MEAN_SQUARE_ERROR = 5
def banner():
"""
Banner initial
"""
print("-----------------------------------------")
print("Welcome Digital Signal Processing Lab")
print("Universidade Luterana do Brasil 2019/2")
print('')
print("Professor: <NAME>")
print("Alunos: <NAME> e <NAME>")
print("-----------------------------------------")
print("This Project remove interferente signal")
print("-----------------------------------------")
def show_process_menu() -> Process:
"""
Show main menu
:return:
"""
print('\n\r')
for op in Process:
print(f'{op.value} - {op.name}.')
print('Q - EXIT.')
option = input('Option:')
if option is 'Q' or option is 'q':
print("Bye Bye! :)")
exit(0)
return Process(int(option))
def show_filter_type() -> filter_type:
"""
Menu filter type
:return:
"""
print('\n\r')
for op in filter_type:
print(f'{op.value} - {op.name}.')
print('Q - EXIT.')
option = input('Option:')
if option is 'Q' or option is 'q':
return None
return filter_type(int(option))
def file(message=None):
"""
Menu Open File
:param message:
:return:
"""
if message is None:
print("Enter file name:")
else:
print(message)
option = input()
return option
def main():
"""
Main Signal Processing Lab.
"""
# Create instance for dsp lab
dsp = dsp_lab()
# Show main menu
banner()
while True:
try:
# Show menu
opt = show_process_menu()
# Open file
if opt is Process.DSP_OPEN_FILE:
print('Open file')
filename = input('Filename:')
audio_samples, sample_rate, duration = dsp.open(filename=filename)
if audio_samples is not None:
print(f'Audio Number Samples: {len(audio_samples)}')
print(f'Sample Rate: {sample_rate}')
print(f'Duration: {duration}s')
else:
print('Fail open file')
# Save file
elif opt is Process.DSP_SAVE_FILE:
print('Save file')
if len(dsp.audio_samples):
filename = input('Filename:')
dsp.save(filename=filename, audio_samples=dsp.audio_samples)
else:
print('Not samples for save')
# Plot audio time domain
elif opt is Process.DSP_PLOT_TIME_DOMAIN:
print("Print Frequency Domain")
dsp.time_domain(audio_samples)
elif opt is Process.DSP_PLOT_FREQUENCY_DOMAIN:
print('Print Fast Transform Fourier (FFT)')
dsp.frequency_domain(audio_samples)
# Process audio filters
elif opt is Process.DSP_FILTER:
print("Processing filter...")
option = show_filter_type()
filtred_audio = dsp.filter(option)
print('Save file')
if filtred_audio is not None and len(filtred_audio):
filename = input('Filename:')
dsp.save(filename=filename, audio_samples=filtred_audio)
dsp.time_domain(filtred_audio)
dsp.frequency_domain(filtred_audio)
else:
print("Error um process filter")
#Calculate Root mean square
elif opt is Process.DSP_ROOT_MEAN_SQUARE_ERROR:
file_original = file("Filename original:")
file_filtred = file("Filename filtred:")
audio_samples_original, sample_rate, duration = dsp.open(filename=file_original)
audio_samples_filtred, sample_rate, duration = dsp.open(filename=file_filtred)
error = dsp.root_square_mean_error(audio_samples_original, audio_samples_filtred)
print(f'Error: {error}')
print(f'Percentage: {error * 100}%')
except Exception as e:
raise e
if __name__ == '__main__':
main() | 0.619586 | 0.143968 |
__version__ = "0.7.0"
from __future__ import print_function
import config
import yaml
import html2text
from os import path as p
from itertools import imap, repeat
from flask import Flask, g, render_template, url_for, Response, request
from flask.ext.bootstrap import Bootstrap
from flask.ext.markdown import Markdown
from flask_weasyprint import HTML, render_pdf
from weasyprint.css import find_stylesheets
from app.tables import TableExtension
from datetime import timedelta, date as d
def _get_styles(app, style_urls):
"""Gets the content of the given list of style URLs."""
styles = []
for style_url in style_urls:
with app.test_client() as c:
response = c.get(style_url)
styles.append(response.data)
return styles
def create_app(config_mode=None, config_file=None):
"""Create webapp instance"""
# Flask application
app = Flask(__name__)
Bootstrap(app)
md = Markdown(app, extensions=['toc'])
md.register_extension(TableExtension)
if config_mode:
app.config.from_object(getattr(config, config_mode))
elif config_file:
app.config.from_pyfile(config_file)
else:
app.config.from_envvar('APP_SETTINGS', silent=True)
table = app.config['TABLE']
@app.before_request
def before_request():
# set g variables
stream = file(app.config['INFO_PATH'], 'r')
[setattr(g, k, v) for k, v in yaml.safe_load(stream).items()]
g.site = app.config['SITE']
g.valid_until = (d.today() + timedelta(days=g.days_valid)).strftime(
"%B %d, %Y")
# Views
@app.route('/<style>/')
@app.route('/<style>/<source>/')
def index(style, source=None):
source = source or request.args.get('source')
if source:
parent = p.dirname(p.dirname(__file__))
path = p.join(parent, source)
stream = file(path, 'r')
items = yaml.safe_load(stream).items()
[setattr(g, k, v) for k, v in items]
return render_template('%s.html' % style).replace('<table>', table)
@app.route('/render/<style>/')
@app.route('/render/<style>/<otype>/')
def render(style, otype=None):
otype = otype or request.args.get('type', 'html')
source = request.args.get('source')
if source:
parent = p.dirname(p.dirname(__file__))
path = p.join(parent, source)
stream = file(path, 'r')
items = yaml.safe_load(stream).items()
[setattr(g, k, v) for k, v in items]
if otype.startswith('html'):
html = render_template('%s.html' % style).replace('<table>', table)
html_doc = HTML(string=html)
stylesheets = find_stylesheets(
html_doc.root_element,
html_doc.media_type,
html_doc.url_fetcher,
)
urls = [sheet.base_url for sheet in stylesheets]
style_urls = filter(lambda x: x.endswith('css'), urls)
styles = _get_styles(app, style_urls)
kwargs = {'styles': styles}
if source:
[setattr(g, k, v) for k, v in items]
return render_template('%s.html' % style, **kwargs).replace(
'<table>', table)
elif otype.startswith('md'):
h = html2text.HTML2Text()
# h.ignore_links = True
h.ignore_emphasis = True
h.body_width = 65
return h.handle(render_template('%s.html' % style))
elif otype.startswith('pdf'):
kwargs = {'to_print': True}
return render_pdf(url_for('index', style=style))
elif otype.startswith('png'):
kwargs = {'to_print': True}
html = render_template('%s.html' % style, **kwargs).replace(
'<table>', table)
html_doc = HTML(string=html)
return Response(html_doc.write_png(), mimetype='image/png')
else:
pass
return app | app/__init__.py | __version__ = "0.7.0"
from __future__ import print_function
import config
import yaml
import html2text
from os import path as p
from itertools import imap, repeat
from flask import Flask, g, render_template, url_for, Response, request
from flask.ext.bootstrap import Bootstrap
from flask.ext.markdown import Markdown
from flask_weasyprint import HTML, render_pdf
from weasyprint.css import find_stylesheets
from app.tables import TableExtension
from datetime import timedelta, date as d
def _get_styles(app, style_urls):
"""Gets the content of the given list of style URLs."""
styles = []
for style_url in style_urls:
with app.test_client() as c:
response = c.get(style_url)
styles.append(response.data)
return styles
def create_app(config_mode=None, config_file=None):
"""Create webapp instance"""
# Flask application
app = Flask(__name__)
Bootstrap(app)
md = Markdown(app, extensions=['toc'])
md.register_extension(TableExtension)
if config_mode:
app.config.from_object(getattr(config, config_mode))
elif config_file:
app.config.from_pyfile(config_file)
else:
app.config.from_envvar('APP_SETTINGS', silent=True)
table = app.config['TABLE']
@app.before_request
def before_request():
# set g variables
stream = file(app.config['INFO_PATH'], 'r')
[setattr(g, k, v) for k, v in yaml.safe_load(stream).items()]
g.site = app.config['SITE']
g.valid_until = (d.today() + timedelta(days=g.days_valid)).strftime(
"%B %d, %Y")
# Views
@app.route('/<style>/')
@app.route('/<style>/<source>/')
def index(style, source=None):
source = source or request.args.get('source')
if source:
parent = p.dirname(p.dirname(__file__))
path = p.join(parent, source)
stream = file(path, 'r')
items = yaml.safe_load(stream).items()
[setattr(g, k, v) for k, v in items]
return render_template('%s.html' % style).replace('<table>', table)
@app.route('/render/<style>/')
@app.route('/render/<style>/<otype>/')
def render(style, otype=None):
otype = otype or request.args.get('type', 'html')
source = request.args.get('source')
if source:
parent = p.dirname(p.dirname(__file__))
path = p.join(parent, source)
stream = file(path, 'r')
items = yaml.safe_load(stream).items()
[setattr(g, k, v) for k, v in items]
if otype.startswith('html'):
html = render_template('%s.html' % style).replace('<table>', table)
html_doc = HTML(string=html)
stylesheets = find_stylesheets(
html_doc.root_element,
html_doc.media_type,
html_doc.url_fetcher,
)
urls = [sheet.base_url for sheet in stylesheets]
style_urls = filter(lambda x: x.endswith('css'), urls)
styles = _get_styles(app, style_urls)
kwargs = {'styles': styles}
if source:
[setattr(g, k, v) for k, v in items]
return render_template('%s.html' % style, **kwargs).replace(
'<table>', table)
elif otype.startswith('md'):
h = html2text.HTML2Text()
# h.ignore_links = True
h.ignore_emphasis = True
h.body_width = 65
return h.handle(render_template('%s.html' % style))
elif otype.startswith('pdf'):
kwargs = {'to_print': True}
return render_pdf(url_for('index', style=style))
elif otype.startswith('png'):
kwargs = {'to_print': True}
html = render_template('%s.html' % style, **kwargs).replace(
'<table>', table)
html_doc = HTML(string=html)
return Response(html_doc.write_png(), mimetype='image/png')
else:
pass
return app | 0.357568 | 0.055183 |
import gtk
import gobject
import tempfile
import os
from plugins import get_plugin_by_type
from file_chooser_dlg import File_Chooser, FILE_CHOOSER_TYPE_FILE
from camera import Camera, Camera_Exception, DEFAULT_RESOLUTION
from support import warning, debug
from ossupport import xclose, xremove
from proximateprotocol import PLUGIN_TYPE_NOTIFICATION, MAX_FACE_DIMENSION, \
TP_FACE_SIZE
from guiutils import scale_image, compress_jpeg
class Picture_Choose_Dialog:
""" This class is used for previewing and selecting the profile picture.
Uses File_Chooser to select the picture. """
def __init__(self, gui, got_picture_cb):
self.notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION).notify
self.filename = None
self.gui = gui
self.tempfile = None # file to be removed when dialog is closed
self.got_picture_cb = got_picture_cb
self.dialog = gtk.Dialog("Select Profile Picture",
gui.get_main_window(),
gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,
(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
self.dialog.set_border_width(5)
self.dialog.vbox.set_spacing(2)
self.dialog.action_area.set_layout(gtk.BUTTONBOX_END)
self.dialog.set_position(gtk.WIN_POS_CENTER)
self.initialize_widgets()
self.dialog.connect("response", self.response_handler)
self.dialog.connect("delete-event", self.dialog_deleted)
def initialize_widgets(self):
self.profile_image = gtk.Image()
self.profile_image.set_size_request(300, 300)
self.profile_image.set_from_stock(gtk.STOCK_ORIENTATION_PORTRAIT, 4)
self.browse_button = gtk.Button("Browse")
self.take_photo = gtk.Button("Take photo")
self.clear_image = gtk.Button('Clear image')
self.vbox1 = gtk.VBox()
self.vbox1.pack_start(self.profile_image)
self.vbox1.pack_start(self.browse_button, False, True)
self.vbox1.pack_start(self.take_photo, False, True)
self.vbox1.pack_start(self.clear_image, False, True)
self.dialog.vbox.pack_start(self.vbox1)
self.browse_button.connect("clicked", self.browse_button_clicked)
self.take_photo.connect("clicked", self.take_photo_clicked)
self.clear_image.connect('clicked', self.clear_image_clicked)
def response_handler(self, widget, response_id, *args):
""" Handles dialog responses """
if response_id == gtk.RESPONSE_OK:
self.got_picture_cb(self.filename)
self.dialog.hide()
return True
def dialog_deleted(self, dialog, event):
return True
def show(self):
self.dialog.show_all()
def close(self):
self.remove_temp()
self.dialog.destroy()
def browse_button_clicked(self, widget):
file_dlg = File_Chooser(self.gui.main_window, FILE_CHOOSER_TYPE_FILE, False, self.browse_chooser_cb)
file_dlg.add_supported_pixbuf_formats()
#self.dialog.hide()
def browse_chooser_cb(self, filename, ctx):
#self.dialog.show()
if filename == None:
return
# checking if we have to scale the picture down
# also checking if it even is a picture
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
except gobject.GError:
self.notify("Error: Invalid image file", True)
return
larger_dimension = max((pixbuf.get_width(), pixbuf.get_height()))
if os.path.getsize(filename) <= TP_FACE_SIZE and \
larger_dimension <= MAX_FACE_DIMENSION:
# use the picture directly without recompression
self.remove_temp()
self.set_picture(filename)
else:
# need to recompress the picture
pixbuf = scale_image(pixbuf, MAX_FACE_DIMENSION)
if not self.compress_jpeg(pixbuf):
self.notify("Error: Unable to compress JPEG picture", True)
def remove_temp(self):
if self.tempfile != None:
if not xremove(self.tempfile):
warning("Unable to remove a scaled picture\n")
self.tempfile = None
def take_photo_clicked(self, widget):
self.camera_dialog = Camera_Dialog(self.dialog, DEFAULT_RESOLUTION,
self.got_photo)
def got_photo(self, pixbuf):
if pixbuf:
pixbuf = scale_image(pixbuf, MAX_FACE_DIMENSION)
if not self.compress_jpeg(pixbuf):
self.notify("Error: Unable to compress JPEG picture", True)
self.camera_dialog = None
def clear_image_clicked(self, widget):
self.remove_temp()
self.set_picture(None)
def set_picture(self, fname):
self.filename = fname
self.profile_image.set_from_file(fname)
def compress_jpeg(self, pixbuf):
(fd, filename) = tempfile.mkstemp(prefix = 'proximate-tmp-profile-pic-')
xclose(fd)
if not compress_jpeg(pixbuf, filename, TP_FACE_SIZE):
return False
self.remove_temp()
self.tempfile = filename
self.set_picture(filename)
return True
class Camera_Dialog:
def __init__(self, profile_dialog, resolution, got_photo_cb):
self.cb = got_photo_cb
self.dialog = gtk.Dialog('Camera', profile_dialog,
gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL)
self.dialog.set_has_separator(False)
self.image = gtk.DrawingArea()
self.image.set_size_request(resolution[0], resolution[1])
self.help_text = gtk.Label('Click to take picture')
try:
self.camera = Camera(resolution, self.image)
except Camera_Exception:
debug('profile dialog: Unable to initialize camera\n')
self.camera = None
self.help_text.set_label('No camera found')
self.image_hbox = gtk.HBox()
self.image_hbox.pack_start(gtk.HBox())
self.image_hbox.pack_start(self.image, False, False)
self.image_hbox.pack_start(gtk.HBox())
if self.camera != None:
self.dialog.vbox.pack_start(self.image_hbox)
self.dialog.vbox.pack_start(self.help_text, False, True)
self.close_button = gtk.Button('Close')
self.dialog.vbox.pack_start(self.close_button, False, True)
self.close_button.connect('clicked', self.close_clicked)
self.dialog.connect('response', self.dialog_response)
self.image.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.image.connect('button-press-event', self.image_clicked)
self.dialog.show_all()
def close_clicked(self, widget):
self.close()
def dialog_response(self, widget, response_id):
self.close()
def close(self):
if self.camera:
self.camera.stop()
if self.camera.buffer:
pixbuf = gtk.gdk.pixbuf_new_from_data(self.camera.buffer,
gtk.gdk.COLORSPACE_RGB, False, 8, self.camera.width,
self.camera.height, 3*self.camera.width)
self.cb(pixbuf)
else:
self.cb(None)
self.dialog.destroy()
def image_clicked(self, widget, data=None):
if self.camera:
self.camera.take_photo() | pic_choose_dlg.py | import gtk
import gobject
import tempfile
import os
from plugins import get_plugin_by_type
from file_chooser_dlg import File_Chooser, FILE_CHOOSER_TYPE_FILE
from camera import Camera, Camera_Exception, DEFAULT_RESOLUTION
from support import warning, debug
from ossupport import xclose, xremove
from proximateprotocol import PLUGIN_TYPE_NOTIFICATION, MAX_FACE_DIMENSION, \
TP_FACE_SIZE
from guiutils import scale_image, compress_jpeg
class Picture_Choose_Dialog:
""" This class is used for previewing and selecting the profile picture.
Uses File_Chooser to select the picture. """
def __init__(self, gui, got_picture_cb):
self.notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION).notify
self.filename = None
self.gui = gui
self.tempfile = None # file to be removed when dialog is closed
self.got_picture_cb = got_picture_cb
self.dialog = gtk.Dialog("Select Profile Picture",
gui.get_main_window(),
gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL,
(gtk.STOCK_OK, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
self.dialog.set_border_width(5)
self.dialog.vbox.set_spacing(2)
self.dialog.action_area.set_layout(gtk.BUTTONBOX_END)
self.dialog.set_position(gtk.WIN_POS_CENTER)
self.initialize_widgets()
self.dialog.connect("response", self.response_handler)
self.dialog.connect("delete-event", self.dialog_deleted)
def initialize_widgets(self):
self.profile_image = gtk.Image()
self.profile_image.set_size_request(300, 300)
self.profile_image.set_from_stock(gtk.STOCK_ORIENTATION_PORTRAIT, 4)
self.browse_button = gtk.Button("Browse")
self.take_photo = gtk.Button("Take photo")
self.clear_image = gtk.Button('Clear image')
self.vbox1 = gtk.VBox()
self.vbox1.pack_start(self.profile_image)
self.vbox1.pack_start(self.browse_button, False, True)
self.vbox1.pack_start(self.take_photo, False, True)
self.vbox1.pack_start(self.clear_image, False, True)
self.dialog.vbox.pack_start(self.vbox1)
self.browse_button.connect("clicked", self.browse_button_clicked)
self.take_photo.connect("clicked", self.take_photo_clicked)
self.clear_image.connect('clicked', self.clear_image_clicked)
def response_handler(self, widget, response_id, *args):
""" Handles dialog responses """
if response_id == gtk.RESPONSE_OK:
self.got_picture_cb(self.filename)
self.dialog.hide()
return True
def dialog_deleted(self, dialog, event):
return True
def show(self):
self.dialog.show_all()
def close(self):
self.remove_temp()
self.dialog.destroy()
def browse_button_clicked(self, widget):
file_dlg = File_Chooser(self.gui.main_window, FILE_CHOOSER_TYPE_FILE, False, self.browse_chooser_cb)
file_dlg.add_supported_pixbuf_formats()
#self.dialog.hide()
def browse_chooser_cb(self, filename, ctx):
#self.dialog.show()
if filename == None:
return
# checking if we have to scale the picture down
# also checking if it even is a picture
try:
pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
except gobject.GError:
self.notify("Error: Invalid image file", True)
return
larger_dimension = max((pixbuf.get_width(), pixbuf.get_height()))
if os.path.getsize(filename) <= TP_FACE_SIZE and \
larger_dimension <= MAX_FACE_DIMENSION:
# use the picture directly without recompression
self.remove_temp()
self.set_picture(filename)
else:
# need to recompress the picture
pixbuf = scale_image(pixbuf, MAX_FACE_DIMENSION)
if not self.compress_jpeg(pixbuf):
self.notify("Error: Unable to compress JPEG picture", True)
def remove_temp(self):
if self.tempfile != None:
if not xremove(self.tempfile):
warning("Unable to remove a scaled picture\n")
self.tempfile = None
def take_photo_clicked(self, widget):
self.camera_dialog = Camera_Dialog(self.dialog, DEFAULT_RESOLUTION,
self.got_photo)
def got_photo(self, pixbuf):
if pixbuf:
pixbuf = scale_image(pixbuf, MAX_FACE_DIMENSION)
if not self.compress_jpeg(pixbuf):
self.notify("Error: Unable to compress JPEG picture", True)
self.camera_dialog = None
def clear_image_clicked(self, widget):
self.remove_temp()
self.set_picture(None)
def set_picture(self, fname):
self.filename = fname
self.profile_image.set_from_file(fname)
def compress_jpeg(self, pixbuf):
(fd, filename) = tempfile.mkstemp(prefix = 'proximate-tmp-profile-pic-')
xclose(fd)
if not compress_jpeg(pixbuf, filename, TP_FACE_SIZE):
return False
self.remove_temp()
self.tempfile = filename
self.set_picture(filename)
return True
class Camera_Dialog:
def __init__(self, profile_dialog, resolution, got_photo_cb):
self.cb = got_photo_cb
self.dialog = gtk.Dialog('Camera', profile_dialog,
gtk.DIALOG_DESTROY_WITH_PARENT | gtk.DIALOG_MODAL)
self.dialog.set_has_separator(False)
self.image = gtk.DrawingArea()
self.image.set_size_request(resolution[0], resolution[1])
self.help_text = gtk.Label('Click to take picture')
try:
self.camera = Camera(resolution, self.image)
except Camera_Exception:
debug('profile dialog: Unable to initialize camera\n')
self.camera = None
self.help_text.set_label('No camera found')
self.image_hbox = gtk.HBox()
self.image_hbox.pack_start(gtk.HBox())
self.image_hbox.pack_start(self.image, False, False)
self.image_hbox.pack_start(gtk.HBox())
if self.camera != None:
self.dialog.vbox.pack_start(self.image_hbox)
self.dialog.vbox.pack_start(self.help_text, False, True)
self.close_button = gtk.Button('Close')
self.dialog.vbox.pack_start(self.close_button, False, True)
self.close_button.connect('clicked', self.close_clicked)
self.dialog.connect('response', self.dialog_response)
self.image.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.image.connect('button-press-event', self.image_clicked)
self.dialog.show_all()
def close_clicked(self, widget):
self.close()
def dialog_response(self, widget, response_id):
self.close()
def close(self):
if self.camera:
self.camera.stop()
if self.camera.buffer:
pixbuf = gtk.gdk.pixbuf_new_from_data(self.camera.buffer,
gtk.gdk.COLORSPACE_RGB, False, 8, self.camera.width,
self.camera.height, 3*self.camera.width)
self.cb(pixbuf)
else:
self.cb(None)
self.dialog.destroy()
def image_clicked(self, widget, data=None):
if self.camera:
self.camera.take_photo() | 0.381911 | 0.050635 |
import RPi.GPIO as GPIO
import time
import sys
import subprocess, os
import signal
import pygame
from pygame.locals import *
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
pygame.init() #initialize pygame
pygame.display.init()
GPIO.setmode(GPIO.BCM)
# set up buttons to monitor (compare to boswell.py though!)
quit_button = 17
current_question = 0
change_question = False
GPIO.setup(quit_button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Set up all questions
img_names = []
sound_names = []
qcodes = []
img_names.append("images/john_quinn.png")
sound_names.append("audio/alexa-please_tell_me_about.ogg")
qcodes.append("jquinn1961")
img_names.append("images/judy_wedding_photo.png")
sound_names.append("audio/silence.ogg")
qcodes.append("judywed1961")
img_names.append("images/question01.png")
sound_names.append("audio/willa_question01.ogg")
qcodes.append("worldchange")
img_names.append("images/question03.png")
sound_names.append("audio/silence.ogg")
qcodes.append("neighborhood")
img_names.append("images/question04.png")
sound_names.append("audio/alexa-earliestmemory.ogg")
qcodes.append("earliestmemory")
img_names.append("images/question05.png")
sound_names.append("audio/alexa-bestfriend.ogg")
qcodes.append("bestfriend")
max_question = len(img_names)
print "max_question = " , max_question
# Display logo screen
imgSurf = pygame.image.load ('images/boswell_startup_screen.png')
pygame.mixer.music.load("audio/silence.ogg")
pygame.mouse.set_visible(False)
screen = pygame.display.set_mode ( imgSurf.get_size(), pygame.FULLSCREEN )
screen.blit ( imgSurf, ( 0, 0 ) )
pygame.display.flip()
pygame.mixer.music.play()
rpistr = "sudo python boswell.py"
p=subprocess.Popen(rpistr,shell=True, preexec_fn=os.setsid)
while pygame.mixer.music.get_busy() == True:
continue
while True:
quit_state = GPIO.input(quit_button)
if quit_state == False:
os.killpg(p.pid, signal.SIGTERM)
pygame.quit()
sys.exit(0)
if change_question == True:
change_question = False
current_question = current_question + 1
if current_question > max_question:
current_question = 1
imgSurf = pygame.image.load (img_names[current_question-1]) # load the appropriate image
pygame.mixer.music.load(sound_names[current_question-1]) # load the question audio
screen = pygame.display.set_mode ( imgSurf.get_size(), pygame.FULLSCREEN )
screen.blit ( imgSurf, ( 0, 0 ) )
pygame.display.flip()
pygame.display.update()
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
# pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
change_question = True | launcher.py | import RPi.GPIO as GPIO
import time
import sys
import subprocess, os
import signal
import pygame
from pygame.locals import *
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
pygame.init() #initialize pygame
pygame.display.init()
GPIO.setmode(GPIO.BCM)
# set up buttons to monitor (compare to boswell.py though!)
quit_button = 17
current_question = 0
change_question = False
GPIO.setup(quit_button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Set up all questions
img_names = []
sound_names = []
qcodes = []
img_names.append("images/john_quinn.png")
sound_names.append("audio/alexa-please_tell_me_about.ogg")
qcodes.append("jquinn1961")
img_names.append("images/judy_wedding_photo.png")
sound_names.append("audio/silence.ogg")
qcodes.append("judywed1961")
img_names.append("images/question01.png")
sound_names.append("audio/willa_question01.ogg")
qcodes.append("worldchange")
img_names.append("images/question03.png")
sound_names.append("audio/silence.ogg")
qcodes.append("neighborhood")
img_names.append("images/question04.png")
sound_names.append("audio/alexa-earliestmemory.ogg")
qcodes.append("earliestmemory")
img_names.append("images/question05.png")
sound_names.append("audio/alexa-bestfriend.ogg")
qcodes.append("bestfriend")
max_question = len(img_names)
print "max_question = " , max_question
# Display logo screen
imgSurf = pygame.image.load ('images/boswell_startup_screen.png')
pygame.mixer.music.load("audio/silence.ogg")
pygame.mouse.set_visible(False)
screen = pygame.display.set_mode ( imgSurf.get_size(), pygame.FULLSCREEN )
screen.blit ( imgSurf, ( 0, 0 ) )
pygame.display.flip()
pygame.mixer.music.play()
rpistr = "sudo python boswell.py"
p=subprocess.Popen(rpistr,shell=True, preexec_fn=os.setsid)
while pygame.mixer.music.get_busy() == True:
continue
while True:
quit_state = GPIO.input(quit_button)
if quit_state == False:
os.killpg(p.pid, signal.SIGTERM)
pygame.quit()
sys.exit(0)
if change_question == True:
change_question = False
current_question = current_question + 1
if current_question > max_question:
current_question = 1
imgSurf = pygame.image.load (img_names[current_question-1]) # load the appropriate image
pygame.mixer.music.load(sound_names[current_question-1]) # load the question audio
screen = pygame.display.set_mode ( imgSurf.get_size(), pygame.FULLSCREEN )
screen.blit ( imgSurf, ( 0, 0 ) )
pygame.display.flip()
pygame.display.update()
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
# pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
change_question = True | 0.061073 | 0.065995 |
"""Implements a class to be used for unit testing.
"""
import datetime
from tlsmate.cert_chain import CertChain
from tlsmate.server_profile import SPObject, ProfileSchema, ServerProfileSchema
from tlsmate import utils
from marshmallow import fields
import pytest
class SPUnitTest(SPObject):
pass
class SPUnitTestSchema(ProfileSchema):
__profile_class__ = SPUnitTest
unit_test_1 = fields.Integer()
unit_test_2 = fields.String()
@ServerProfileSchema.augment
class SPUnitTestAugment(ProfileSchema):
unit_test = fields.Nested(SPUnitTestSchema)
def test_cert_paras(tlsmate, guballa_de_pem, quo_vadis_root_ca3):
chain = CertChain()
for cert in (quo_vadis_root_ca3, guballa_de_pem):
chain.append_pem_cert(cert.as_bytes())
prof = tlsmate.server_profile
prof.allocate_versions()
prof.append_unique_cert_chain(chain)
quo_vadis = prof.cert_chains[0].cert_chain[0]
guballa = prof.cert_chains[0].cert_chain[1]
cert_policies = quo_vadis.extensions[1].cert_policies
explicit_text = cert_policies[0].policy_qualifiers[0].explicit_text
assert type(explicit_text) is str
assert len(explicit_text)
text = cert_policies[0].policy_qualifiers[1].text
assert type(text) is str
assert len(text)
signed_ct = guballa.extensions[8].signed_certificate_timestamps
assert len(signed_ct) == 2
for ct in signed_ct:
assert ct.entry_type == "PRE_CERTIFICATE"
assert type(ct.log_id) is bytes
assert len(ct.log_id)
assert type(ct.timestamp) is datetime.datetime
assert ct.version == "v1"
def test_augment_profile(tlsmate):
tlsmate.server_profile.unit_test = SPUnitTest(unit_test_1=1, unit_test_2="hello")
data = tlsmate.server_profile.make_serializable()
assert "unit_test" in data
assert data["unit_test"]["unit_test_1"] == 1
assert data["unit_test"]["unit_test_2"] == "hello"
def test_deserialize_profile_ok(tlsmate):
data = {"unit_test": {"unit_test_1": 1, "unit_test_2": "hello"}}
tlsmate.server_profile.load(data)
assert tlsmate.server_profile.unit_test.unit_test_1 == 1
assert tlsmate.server_profile.unit_test.unit_test_2 == "hello"
def test_deserialize_profile_nok(tlsmate):
data = {
"unit_test": {"unit_test_1": 1, "unit_test_2": "hello"},
"too_much": "outch",
}
with pytest.raises(ValueError, match="fields not defined in schema"):
tlsmate.server_profile.load(data)
def test_deserialize_full_profile(tlsmate, server_profile):
tlsmate.server_profile.load(utils.deserialize_data(server_profile)) | tests/modules/test_module_server_profile.py | """Implements a class to be used for unit testing.
"""
import datetime
from tlsmate.cert_chain import CertChain
from tlsmate.server_profile import SPObject, ProfileSchema, ServerProfileSchema
from tlsmate import utils
from marshmallow import fields
import pytest
class SPUnitTest(SPObject):
pass
class SPUnitTestSchema(ProfileSchema):
__profile_class__ = SPUnitTest
unit_test_1 = fields.Integer()
unit_test_2 = fields.String()
@ServerProfileSchema.augment
class SPUnitTestAugment(ProfileSchema):
unit_test = fields.Nested(SPUnitTestSchema)
def test_cert_paras(tlsmate, guballa_de_pem, quo_vadis_root_ca3):
chain = CertChain()
for cert in (quo_vadis_root_ca3, guballa_de_pem):
chain.append_pem_cert(cert.as_bytes())
prof = tlsmate.server_profile
prof.allocate_versions()
prof.append_unique_cert_chain(chain)
quo_vadis = prof.cert_chains[0].cert_chain[0]
guballa = prof.cert_chains[0].cert_chain[1]
cert_policies = quo_vadis.extensions[1].cert_policies
explicit_text = cert_policies[0].policy_qualifiers[0].explicit_text
assert type(explicit_text) is str
assert len(explicit_text)
text = cert_policies[0].policy_qualifiers[1].text
assert type(text) is str
assert len(text)
signed_ct = guballa.extensions[8].signed_certificate_timestamps
assert len(signed_ct) == 2
for ct in signed_ct:
assert ct.entry_type == "PRE_CERTIFICATE"
assert type(ct.log_id) is bytes
assert len(ct.log_id)
assert type(ct.timestamp) is datetime.datetime
assert ct.version == "v1"
def test_augment_profile(tlsmate):
tlsmate.server_profile.unit_test = SPUnitTest(unit_test_1=1, unit_test_2="hello")
data = tlsmate.server_profile.make_serializable()
assert "unit_test" in data
assert data["unit_test"]["unit_test_1"] == 1
assert data["unit_test"]["unit_test_2"] == "hello"
def test_deserialize_profile_ok(tlsmate):
data = {"unit_test": {"unit_test_1": 1, "unit_test_2": "hello"}}
tlsmate.server_profile.load(data)
assert tlsmate.server_profile.unit_test.unit_test_1 == 1
assert tlsmate.server_profile.unit_test.unit_test_2 == "hello"
def test_deserialize_profile_nok(tlsmate):
data = {
"unit_test": {"unit_test_1": 1, "unit_test_2": "hello"},
"too_much": "outch",
}
with pytest.raises(ValueError, match="fields not defined in schema"):
tlsmate.server_profile.load(data)
def test_deserialize_full_profile(tlsmate, server_profile):
tlsmate.server_profile.load(utils.deserialize_data(server_profile)) | 0.650023 | 0.486941 |
import json
import os
from shutil import rmtree
from tempfile import mkdtemp
from couchapp.localdoc import LocalDoc
def test_load_ignores_non_exist():
doc = LocalDoc('/mock/app', create=False)
assert doc.ignores == []
class testIgnores(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_load_ignore(self):
func = self.check_ignore
yield func, '[42]', [42]
yield func, '["foo", "bar"]', ['foo', 'bar']
content = '''
[
"magic", // comments are allowed
"answer"
]
'''
yield func, content, ['magic', 'answer']
content = '''
[
"magic", /* comments are allowed */
"answer"
]
'''
yield func, content, ['magic', 'answer']
content = '''
[
"magic", /* comments are allowed */
"answer" // remix
]
'''
yield func, content, ['magic', 'answer']
content = '''
[
"magic"
/*
"answer"
*/
]
'''
yield func, content, ['magic']
content = '''
[
"^regex$", /* comment */
"answer"
]
'''
yield func, content, ['^regex$', 'answer']
content = '''
[
"/*regex", /* comment */
"answer//" // comment
]
'''
yield func, content, ['/*regex', 'answer//']
def check_ignore(self, content, ans):
# prepare ignore file
path = os.path.join(self.dir, '.couchappignore')
with open(path, 'w') as f:
f.write(content)
doc = LocalDoc(self.dir, create=False)
assert doc.ignores == ans
class testGetId(object):
'''
The test cases of ``LocalDoc.get_id``
'''
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_idfile(self):
f = self.check_idfile
yield f, 'magic_id', 'magic_id'
yield f, 'magic_id', 'magic_id', 'wb'
yield f, ' magic_id', 'magic_id'
yield f, ' magic_id', 'magic_id', 'wb'
yield f, 'magic_id ', 'magic_id'
yield f, 'magic_id ', 'magic_id', 'wb'
yield f, ' magic_id ', 'magic_id'
yield f, ' magic_id ', 'magic_id', 'wb'
yield f, 'magic_id\n', 'magic_id'
yield f, 'magic_id\n', 'magic_id', 'wb'
yield f, 'magic_id\n\r', 'magic_id'
yield f, 'magic_id\n\r', 'magic_id', 'wb'
yield f, 'magic_id\r', 'magic_id'
yield f, 'magic_id\r', 'magic_id', 'wb'
yield f, 'magic_id \n', 'magic_id'
yield f, 'magic_id \n', 'magic_id', 'wb'
yield f, 'magic_id \n\r', 'magic_id'
yield f, 'magic_id \n\r', 'magic_id', 'wb'
yield f, 'magic_id \r ', 'magic_id'
yield f, 'magic_id \r ', 'magic_id', 'wb'
f = self.check_not_idfile
yield f, '\nmagic_id', 'magic_id'
yield f, '\n\rmagic_id', 'magic_id'
yield f, '\nmagic_id\n', 'magic_id'
def check_idfile(self, content, ans, mode='w'):
# create ``_id`` file
p = os.path.join(self.dir, '_id')
with open(p, mode) as idfile:
idfile.write(content)
doc = LocalDoc(self.dir, create=False)
assert doc.get_id() == ans, doc.get_id()
def check_not_idfile(self, content, ans, mode='w'):
# create ``_id`` file
p = os.path.join(self.dir, '_id')
with open(p, mode) as idfile:
idfile.write(content)
doc = LocalDoc(self.dir, create=False)
assert doc.get_id() != ans, doc.get_id()
def test_dirname(self):
'''
If the ``_id`` file does not eixsts
'''
dirname = os.path.split(self.dir)[-1]
doc = LocalDoc(self.dir, is_ddoc=False)
assert doc.get_id() == dirname
doc = LocalDoc(self.dir, is_ddoc=True)
ans = '_design/{0}'.format(dirname)
assert doc.get_id() == ans
class testCreate(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def exists(self, filename):
return os.path.exists(os.path.join(self.dir, filename))
def test_create(self):
doc = LocalDoc(self.dir, create=True)
assert self.exists('.couchapprc')
assert self.exists('.couchappignore')
def test_create_nothing(self):
# .couchapprc already exists
path = os.path.join(self.dir, '.couchapprc')
with open(path, 'w') as f:
f.write('{}')
doc = LocalDoc(self.dir, create=True)
assert self.exists('.couchapprc')
assert not self.exists('.couchappignore')
def test_check_ignore():
f = check_check_ignore
ignores = ['.*\.bak']
yield f, ignores, 'magic.bak', True
yield f, ignores, 'magicbak', False
yield f, ignores, 'bar/magic.bak', True
ignores = ['bar']
yield f, ignores, 'bar', True
yield f, ignores, 'bar/', True
yield f, ignores, 'bar.txt', False
yield f, ignores, 'magic_bar', False
yield f, ignores, 'foo/bar', True
yield f, ignores, 'foo/qaz/bar', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'bar/app.js', True
yield f, ignores, 'bar/foo.txt', True
yield f, ignores, 'magic_bar/app.js', False
yield f, ignores, 'bar_magic/app.js', False
# the result should be same as ``['bar']``,
# the ``$`` is include by default
ignores = ['bar$']
yield f, ignores, 'bar', True
yield f, ignores, 'bar/', True
yield f, ignores, 'bar.txt', False
yield f, ignores, 'magic_bar', False
yield f, ignores, 'foo/bar', True
yield f, ignores, 'foo/qaz/bar', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'bar/app.js', True
yield f, ignores, 'bar/foo.txt', True
yield f, ignores, 'magic_bar/app.js', False
yield f, ignores, 'bar_magic/app.js', False
ignores = ['foo/bar']
yield f, ignores, 'foo/bar', True
yield f, ignores, 'qaz/foo/bar', True
yield f, ignores, 'foo/bar/', True
yield f, ignores, 'qaz/foo/bar/', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'qaz/foo/bar/app.js', True
ignores = ['foo/.*bar']
yield f, ignores, 'foo/magic_bar', True
yield f, ignores, 'foo/magic_bar/', True
yield f, ignores, 'foo/magic_bar/app.js', True
yield f, ignores, 'foo/magic/bar/', True
yield f, ignores, 'foo/magic/bar/app.js', True
yield f, ignores, 'foo/magic/long/long/bar', True
yield f, ignores, 'foo/magic/long/long/bar/app.js', True
yield f, ignores, 'foobar', False
yield f, ignores, 'qaz/foo/magic_bar', True
yield f, ignores, 'qaz/foo/magic_bar/', True
yield f, ignores, 'qaz/foo/magic_bar/app.js', True
yield f, ignores, 'qaz/foo/magic/bar/', True
yield f, ignores, 'qaz/foo/magic/bar/app.js', True
yield f, ignores, 'qaz/foo/magic/long/long/bar', True
yield f, ignores, 'qaz/foo/magic/long/long/bar/app.js', True
yield f, ignores, 'qaz_foo/magic_bar', False
yield f, ignores, 'qaz_foo/magic_bar/', False
yield f, ignores, 'qaz_foo/magic_bar/app.js', False
yield f, ignores, 'qaz_foo/magic/bar/', False
yield f, ignores, 'qaz_foo/magic/bar/app.js', False
yield f, ignores, 'qaz_foo/magic/long/long/bar', False
yield f, ignores, 'qaz_foo/magic/long/long/bar/app.js', False
yield f, ignores, 'foo/magic_bar_', False
yield f, ignores, 'foo/magic_bar_/', False
yield f, ignores, 'foo/magic_bar_/app.js', False
yield f, ignores, 'foo/magic/bar_/', False
yield f, ignores, 'foo/magic/bar_/app.js', False
yield f, ignores, 'foo/magic/long/long/bar_', False
yield f, ignores, 'foo/magic/long/long/bar_/app.js', False
ignores = ['foo/.*/bar']
yield f, ignores, 'foo/magic_bar', False
yield f, ignores, 'foo/magic_bar/', False
yield f, ignores, 'foo/magic_bar/app.js', False
yield f, ignores, 'foo/magic/bar/', True
yield f, ignores, 'foo/magic/bar/app.js', True
yield f, ignores, 'foo/magic/long/long/bar', True
yield f, ignores, 'foo/magic/long/long/bar/app.js', True
yield f, ignores, 'foobar', False
yield f, ignores, 'qaz/foo/magic_bar', False
yield f, ignores, 'qaz/foo/magic_bar/', False
yield f, ignores, 'qaz/foo/magic_bar/app.js', False
yield f, ignores, 'qaz/foo/magic/bar/', True
yield f, ignores, 'qaz/foo/magic/bar/app.js', True
yield f, ignores, 'qaz/foo/magic/long/long/bar', True
yield f, ignores, 'qaz/foo/magic/long/long/bar/app.js', True
yield f, ignores, 'qaz_foo/magic_bar', False
yield f, ignores, 'qaz_foo/magic_bar/', False
yield f, ignores, 'qaz_foo/magic_bar/app.js', False
yield f, ignores, 'qaz_foo/magic/bar/', False
yield f, ignores, 'qaz_foo/magic/bar/app.js', False
yield f, ignores, 'qaz_foo/magic/long/long/bar', False
yield f, ignores, 'qaz_foo/magic/long/long/bar/app.js', False
yield f, ignores, 'foo/magic/bar_', False
yield f, ignores, 'foo/magic/bar_/', False
yield f, ignores, 'foo/magic/bar_/app.js', False
yield f, ignores, 'foo/magic/long/long/bar_', False
yield f, ignores, 'foo/magic/long/long/bar_/app.js', False
ignores = ['/foo/bar']
yield f, ignores, 'foo/bar', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'qaz/foo/bar', False
yield f, ignores, 'qaz/foo/bar/app.js', False
ignores = [u'測試'] # unicode testing
yield f, ignores, u'測試', True
yield f, ignores, u'測 試', False
yield f, ignores, u'測試/app.js', True
yield f, ignores, u'測試資料夾', False
yield f, ignores, u'測試.txt', False
yield f, ignores, u'foo/測試', True
yield f, ignores, u'foo/測 試', False
yield f, ignores, u'foo/測試/app.js', True
yield f, ignores, u'foo/測試資料夾', False
yield f, ignores, u'foo/測試.txt', False
def check_check_ignore(ignores, path, ans):
doc = LocalDoc('/mock/app', create=False)
doc.ignores = ignores
assert doc.check_ignore(path) is ans
def test_meta_to_fields():
f = check_meta_to_fields
yield f, ({}, {}), ({'couchapp': {}}, {})
yield f, ({}, []), ({'couchapp': {'meta': []}},
{'meta': []})
yield f, ({}, [42]), ({'couchapp': {'meta': [42]}},
{'meta': [42]})
yield f, ({}, 'magic'), ({'couchapp': {'meta': 'magic'}},
{'meta': 'magic'})
yield f, ({}, {'signatures': 42}), ({'couchapp': {}}, {})
yield f, ({}, {'manifest': 42}), ({'couchapp': {}}, {})
yield f, ({}, {'objects': 42}), ({'couchapp': {}}, {})
yield f, ({}, {'object': 42}), ({'couchapp': {'object': 42}},
{'object': 42})
yield f, ({}, {'length': 42}), ({'couchapp': {}}, {})
yield (f,
({'couchapp': {'magic': 42}}, {'foo': 'bar'}),
({'couchapp': {'magic': 42, 'foo': 'bar'}}, {'foo': 'bar'}))
def check_meta_to_fields(input, ans):
output = LocalDoc._meta_to_fields(*input)
assert ans == output
# check the is a copy of dict
for i, o in zip(input, output):
assert i is not o
class TestEncodeContent(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_json_suffix(self):
f = self.check_json_suffix
yield f, '{"magic": 42}', {'magic': 42}
yield f, '"magic"', "magic"
yield f, '[1, 2, 3]', [1, 2, 3]
yield f, '{}{}', ''
def check_json_suffix(self, content, ans):
name = 'magic.json'
p = os.path.join(self.dir, name)
with open(p, 'w') as f:
f.write(content)
content = LocalDoc._encode_content(name, p)
assert content == ans
def test_text_file(self):
f = self.check_text_file
yield f, b'readme'
yield f, b'readme\ntopic\n'
yield f, b'readme\ntopic\n '
yield f, b'測試'
yield f, b'測試\n測試\n'
yield f, b'測試\n測試\n '
def check_text_file(self, ans):
name = 'README.rst'
p = os.path.join(self.dir, name)
with open(p, 'wb') as f:
f.write(ans)
content = LocalDoc._encode_content(name, p)
assert content == ans.decode('utf8'), content
def test_bin_file(self):
f = self.check_bin_file
yield f, b'\xb4\xfa\xb8\xd5', 'tPq41Q=='
yield f, b'\xb4\xfa\xb8\xd5\xb4', 'tPq41bQ='
def check_bin_file(self, bits, ans):
name = 'a.out'
p = os.path.join(self.dir, name)
with open(p, 'wb') as f:
f.write(bits)
content = LocalDoc._encode_content(name, p)
assert content == 'base64-encoded;' + ans
class TestDirToFieds(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_main(self):
f = self.check
# assume ``docdir`` is same as ``current_dir``
yield f, [], ['.foo'], {}, []
yield f, [], ['.foo', '.bar'], {}, []
yield f, ['test'], ['.foo', 'test/.bar'], {'test': {}}, ['test/']
f = self.check_ignore
yield f, ['README'], [], ['README'], {}, []
yield f, ['README'], ['d'], ['d/README', 'README'], {'d': {}}, ['d/']
yield (
f, ['README'], ['d'],
['d/README', 'README', '.foo', 'd/.bar'], # files
{'d': {}}, # fields
['d/']) # manifest
f = self.check
yield f, ['_foo'], [], {}, []
yield f, ['_foo', '_bar'], [], {}, []
yield f, ['_foo', '_bar'], ['_foo/foo'], {}, []
yield f, ['_foo', '_bar'], ['_foo/foo', '_bar/bar'], {}, []
yield (
f, ['d/_foo'], [],
{'d': {'_foo': {}}},
['d/', 'd/_foo/'])
yield (
f, ['d/_foo'], ['d/_foo/README'],
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar'],
['d/_foo/README', '_bar/bar'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar', '.foo'],
['d/_foo/README', '_bar/bar', '.foo/foo'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar', '.foo'],
['d/_foo/README', '_bar/bar', '_bar/_bar', '.foo/foo'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar', '.foo'],
['d/_foo/README', 'd/_foo/.foo', '_bar/bar', '_bar/_bar',
'.foo/foo'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
# test cases for ``_attachments``
yield (
f, ['_attachments'],
['_attachments/README'], # files
{},
[])
yield (
f, ['_attachments', 'd'],
['_attachments/README'], # files
{'d': {}},
['d/'])
yield (
f, ['_attachments', 'd/_foo'],
['_attachments/README', 'd/_foo/foo'], # files
{'d': {'_foo': {'foo': ''}}},
['d/', 'd/_foo/', 'd/_foo/foo'])
yield (
f, ['_attachments', 'd/_foo', 'd/_attachments'],
['_attachments/README', 'd/_foo/foo',
'd/_attachments/README'], # files
{'d': {'_foo': {'foo': ''}}},
['d/', 'd/_foo/', 'd/_foo/foo'])
# test cases for ``couchapp/``
yield (
f, ['couchapp'],
['couchapp/README'], # files
{'couchapp': {'README': ''}},
['couchapp/', 'couchapp/README'])
yield (
f, ['couchapp/_foo'],
['couchapp/README', 'couchapp/_foo/foo'], # files
{'couchapp': {'README': '', '_foo': {'foo': ''}}},
['couchapp/', 'couchapp/_foo/', 'couchapp/_foo/foo',
'couchapp/README'])
# test cases for ``couchapp.json``
f = self.check_capp_json
yield (
f, {},
[], ['couchapp.json'],
{'couchapp': {}},
['couchapp.json'])
yield (
f, {'foo': 'bar', 'ans': 42},
[], ['couchapp.json'],
{'couchapp': {'foo': 'bar', 'ans': 42}},
['couchapp.json'])
yield (
f, 'string',
[], ['couchapp.json'],
{'couchapp': {'meta': 'string'}},
['couchapp.json'])
yield (
f, ['list', 1, 2, 3],
[], ['couchapp.json'],
{'couchapp': {'meta': ['list', 1, 2, 3]}},
['couchapp.json'])
yield (
f, {'signatures': 42},
[], ['couchapp.json'],
{'couchapp': {}},
['couchapp.json'])
yield (
f, {'signatures': 42, 'foo': 'bar'},
[], ['couchapp.json'],
{'couchapp': {'foo': 'bar'}},
['couchapp.json'])
# test cases for name collision
f = self.check
yield(
f, [], ['README', 'README.rst'],
{'README': ''},
['README'])
def check(self, dirs, files, fields, manifest):
[os.makedirs(os.path.join(self.dir, d)) for d in dirs]
[open(os.path.join(self.dir, f), 'a').close() for f in files]
out_m = []
out_f = LocalDoc(self.dir).dir_to_fields(self.dir, manifest=out_m)
assert out_f == fields
assert set(out_m) == set(manifest)
def check_ignore(self, ignores, *args):
with open(os.path.join(self.dir, '.couchappignore'), 'w') as f:
f.write(json.dumps(ignores))
self.check(*args)
def check_capp_json(self, content, *args):
with open(os.path.join(self.dir, 'couchapp.json'), 'w') as f:
f.write(json.dumps(content))
self.check(*args) | tests/test_localdoc.py |
import json
import os
from shutil import rmtree
from tempfile import mkdtemp
from couchapp.localdoc import LocalDoc
def test_load_ignores_non_exist():
doc = LocalDoc('/mock/app', create=False)
assert doc.ignores == []
class testIgnores(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_load_ignore(self):
func = self.check_ignore
yield func, '[42]', [42]
yield func, '["foo", "bar"]', ['foo', 'bar']
content = '''
[
"magic", // comments are allowed
"answer"
]
'''
yield func, content, ['magic', 'answer']
content = '''
[
"magic", /* comments are allowed */
"answer"
]
'''
yield func, content, ['magic', 'answer']
content = '''
[
"magic", /* comments are allowed */
"answer" // remix
]
'''
yield func, content, ['magic', 'answer']
content = '''
[
"magic"
/*
"answer"
*/
]
'''
yield func, content, ['magic']
content = '''
[
"^regex$", /* comment */
"answer"
]
'''
yield func, content, ['^regex$', 'answer']
content = '''
[
"/*regex", /* comment */
"answer//" // comment
]
'''
yield func, content, ['/*regex', 'answer//']
def check_ignore(self, content, ans):
# prepare ignore file
path = os.path.join(self.dir, '.couchappignore')
with open(path, 'w') as f:
f.write(content)
doc = LocalDoc(self.dir, create=False)
assert doc.ignores == ans
class testGetId(object):
'''
The test cases of ``LocalDoc.get_id``
'''
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_idfile(self):
f = self.check_idfile
yield f, 'magic_id', 'magic_id'
yield f, 'magic_id', 'magic_id', 'wb'
yield f, ' magic_id', 'magic_id'
yield f, ' magic_id', 'magic_id', 'wb'
yield f, 'magic_id ', 'magic_id'
yield f, 'magic_id ', 'magic_id', 'wb'
yield f, ' magic_id ', 'magic_id'
yield f, ' magic_id ', 'magic_id', 'wb'
yield f, 'magic_id\n', 'magic_id'
yield f, 'magic_id\n', 'magic_id', 'wb'
yield f, 'magic_id\n\r', 'magic_id'
yield f, 'magic_id\n\r', 'magic_id', 'wb'
yield f, 'magic_id\r', 'magic_id'
yield f, 'magic_id\r', 'magic_id', 'wb'
yield f, 'magic_id \n', 'magic_id'
yield f, 'magic_id \n', 'magic_id', 'wb'
yield f, 'magic_id \n\r', 'magic_id'
yield f, 'magic_id \n\r', 'magic_id', 'wb'
yield f, 'magic_id \r ', 'magic_id'
yield f, 'magic_id \r ', 'magic_id', 'wb'
f = self.check_not_idfile
yield f, '\nmagic_id', 'magic_id'
yield f, '\n\rmagic_id', 'magic_id'
yield f, '\nmagic_id\n', 'magic_id'
def check_idfile(self, content, ans, mode='w'):
# create ``_id`` file
p = os.path.join(self.dir, '_id')
with open(p, mode) as idfile:
idfile.write(content)
doc = LocalDoc(self.dir, create=False)
assert doc.get_id() == ans, doc.get_id()
def check_not_idfile(self, content, ans, mode='w'):
# create ``_id`` file
p = os.path.join(self.dir, '_id')
with open(p, mode) as idfile:
idfile.write(content)
doc = LocalDoc(self.dir, create=False)
assert doc.get_id() != ans, doc.get_id()
def test_dirname(self):
'''
If the ``_id`` file does not eixsts
'''
dirname = os.path.split(self.dir)[-1]
doc = LocalDoc(self.dir, is_ddoc=False)
assert doc.get_id() == dirname
doc = LocalDoc(self.dir, is_ddoc=True)
ans = '_design/{0}'.format(dirname)
assert doc.get_id() == ans
class testCreate(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def exists(self, filename):
return os.path.exists(os.path.join(self.dir, filename))
def test_create(self):
doc = LocalDoc(self.dir, create=True)
assert self.exists('.couchapprc')
assert self.exists('.couchappignore')
def test_create_nothing(self):
# .couchapprc already exists
path = os.path.join(self.dir, '.couchapprc')
with open(path, 'w') as f:
f.write('{}')
doc = LocalDoc(self.dir, create=True)
assert self.exists('.couchapprc')
assert not self.exists('.couchappignore')
def test_check_ignore():
f = check_check_ignore
ignores = ['.*\.bak']
yield f, ignores, 'magic.bak', True
yield f, ignores, 'magicbak', False
yield f, ignores, 'bar/magic.bak', True
ignores = ['bar']
yield f, ignores, 'bar', True
yield f, ignores, 'bar/', True
yield f, ignores, 'bar.txt', False
yield f, ignores, 'magic_bar', False
yield f, ignores, 'foo/bar', True
yield f, ignores, 'foo/qaz/bar', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'bar/app.js', True
yield f, ignores, 'bar/foo.txt', True
yield f, ignores, 'magic_bar/app.js', False
yield f, ignores, 'bar_magic/app.js', False
# the result should be same as ``['bar']``,
# the ``$`` is include by default
ignores = ['bar$']
yield f, ignores, 'bar', True
yield f, ignores, 'bar/', True
yield f, ignores, 'bar.txt', False
yield f, ignores, 'magic_bar', False
yield f, ignores, 'foo/bar', True
yield f, ignores, 'foo/qaz/bar', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'bar/app.js', True
yield f, ignores, 'bar/foo.txt', True
yield f, ignores, 'magic_bar/app.js', False
yield f, ignores, 'bar_magic/app.js', False
ignores = ['foo/bar']
yield f, ignores, 'foo/bar', True
yield f, ignores, 'qaz/foo/bar', True
yield f, ignores, 'foo/bar/', True
yield f, ignores, 'qaz/foo/bar/', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'qaz/foo/bar/app.js', True
ignores = ['foo/.*bar']
yield f, ignores, 'foo/magic_bar', True
yield f, ignores, 'foo/magic_bar/', True
yield f, ignores, 'foo/magic_bar/app.js', True
yield f, ignores, 'foo/magic/bar/', True
yield f, ignores, 'foo/magic/bar/app.js', True
yield f, ignores, 'foo/magic/long/long/bar', True
yield f, ignores, 'foo/magic/long/long/bar/app.js', True
yield f, ignores, 'foobar', False
yield f, ignores, 'qaz/foo/magic_bar', True
yield f, ignores, 'qaz/foo/magic_bar/', True
yield f, ignores, 'qaz/foo/magic_bar/app.js', True
yield f, ignores, 'qaz/foo/magic/bar/', True
yield f, ignores, 'qaz/foo/magic/bar/app.js', True
yield f, ignores, 'qaz/foo/magic/long/long/bar', True
yield f, ignores, 'qaz/foo/magic/long/long/bar/app.js', True
yield f, ignores, 'qaz_foo/magic_bar', False
yield f, ignores, 'qaz_foo/magic_bar/', False
yield f, ignores, 'qaz_foo/magic_bar/app.js', False
yield f, ignores, 'qaz_foo/magic/bar/', False
yield f, ignores, 'qaz_foo/magic/bar/app.js', False
yield f, ignores, 'qaz_foo/magic/long/long/bar', False
yield f, ignores, 'qaz_foo/magic/long/long/bar/app.js', False
yield f, ignores, 'foo/magic_bar_', False
yield f, ignores, 'foo/magic_bar_/', False
yield f, ignores, 'foo/magic_bar_/app.js', False
yield f, ignores, 'foo/magic/bar_/', False
yield f, ignores, 'foo/magic/bar_/app.js', False
yield f, ignores, 'foo/magic/long/long/bar_', False
yield f, ignores, 'foo/magic/long/long/bar_/app.js', False
ignores = ['foo/.*/bar']
yield f, ignores, 'foo/magic_bar', False
yield f, ignores, 'foo/magic_bar/', False
yield f, ignores, 'foo/magic_bar/app.js', False
yield f, ignores, 'foo/magic/bar/', True
yield f, ignores, 'foo/magic/bar/app.js', True
yield f, ignores, 'foo/magic/long/long/bar', True
yield f, ignores, 'foo/magic/long/long/bar/app.js', True
yield f, ignores, 'foobar', False
yield f, ignores, 'qaz/foo/magic_bar', False
yield f, ignores, 'qaz/foo/magic_bar/', False
yield f, ignores, 'qaz/foo/magic_bar/app.js', False
yield f, ignores, 'qaz/foo/magic/bar/', True
yield f, ignores, 'qaz/foo/magic/bar/app.js', True
yield f, ignores, 'qaz/foo/magic/long/long/bar', True
yield f, ignores, 'qaz/foo/magic/long/long/bar/app.js', True
yield f, ignores, 'qaz_foo/magic_bar', False
yield f, ignores, 'qaz_foo/magic_bar/', False
yield f, ignores, 'qaz_foo/magic_bar/app.js', False
yield f, ignores, 'qaz_foo/magic/bar/', False
yield f, ignores, 'qaz_foo/magic/bar/app.js', False
yield f, ignores, 'qaz_foo/magic/long/long/bar', False
yield f, ignores, 'qaz_foo/magic/long/long/bar/app.js', False
yield f, ignores, 'foo/magic/bar_', False
yield f, ignores, 'foo/magic/bar_/', False
yield f, ignores, 'foo/magic/bar_/app.js', False
yield f, ignores, 'foo/magic/long/long/bar_', False
yield f, ignores, 'foo/magic/long/long/bar_/app.js', False
ignores = ['/foo/bar']
yield f, ignores, 'foo/bar', True
yield f, ignores, 'foo/bar/app.js', True
yield f, ignores, 'qaz/foo/bar', False
yield f, ignores, 'qaz/foo/bar/app.js', False
ignores = [u'測試'] # unicode testing
yield f, ignores, u'測試', True
yield f, ignores, u'測 試', False
yield f, ignores, u'測試/app.js', True
yield f, ignores, u'測試資料夾', False
yield f, ignores, u'測試.txt', False
yield f, ignores, u'foo/測試', True
yield f, ignores, u'foo/測 試', False
yield f, ignores, u'foo/測試/app.js', True
yield f, ignores, u'foo/測試資料夾', False
yield f, ignores, u'foo/測試.txt', False
def check_check_ignore(ignores, path, ans):
doc = LocalDoc('/mock/app', create=False)
doc.ignores = ignores
assert doc.check_ignore(path) is ans
def test_meta_to_fields():
f = check_meta_to_fields
yield f, ({}, {}), ({'couchapp': {}}, {})
yield f, ({}, []), ({'couchapp': {'meta': []}},
{'meta': []})
yield f, ({}, [42]), ({'couchapp': {'meta': [42]}},
{'meta': [42]})
yield f, ({}, 'magic'), ({'couchapp': {'meta': 'magic'}},
{'meta': 'magic'})
yield f, ({}, {'signatures': 42}), ({'couchapp': {}}, {})
yield f, ({}, {'manifest': 42}), ({'couchapp': {}}, {})
yield f, ({}, {'objects': 42}), ({'couchapp': {}}, {})
yield f, ({}, {'object': 42}), ({'couchapp': {'object': 42}},
{'object': 42})
yield f, ({}, {'length': 42}), ({'couchapp': {}}, {})
yield (f,
({'couchapp': {'magic': 42}}, {'foo': 'bar'}),
({'couchapp': {'magic': 42, 'foo': 'bar'}}, {'foo': 'bar'}))
def check_meta_to_fields(input, ans):
output = LocalDoc._meta_to_fields(*input)
assert ans == output
# check the is a copy of dict
for i, o in zip(input, output):
assert i is not o
class TestEncodeContent(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_json_suffix(self):
f = self.check_json_suffix
yield f, '{"magic": 42}', {'magic': 42}
yield f, '"magic"', "magic"
yield f, '[1, 2, 3]', [1, 2, 3]
yield f, '{}{}', ''
def check_json_suffix(self, content, ans):
name = 'magic.json'
p = os.path.join(self.dir, name)
with open(p, 'w') as f:
f.write(content)
content = LocalDoc._encode_content(name, p)
assert content == ans
def test_text_file(self):
f = self.check_text_file
yield f, b'readme'
yield f, b'readme\ntopic\n'
yield f, b'readme\ntopic\n '
yield f, b'測試'
yield f, b'測試\n測試\n'
yield f, b'測試\n測試\n '
def check_text_file(self, ans):
name = 'README.rst'
p = os.path.join(self.dir, name)
with open(p, 'wb') as f:
f.write(ans)
content = LocalDoc._encode_content(name, p)
assert content == ans.decode('utf8'), content
def test_bin_file(self):
f = self.check_bin_file
yield f, b'\xb4\xfa\xb8\xd5', 'tPq41Q=='
yield f, b'\xb4\xfa\xb8\xd5\xb4', 'tPq41bQ='
def check_bin_file(self, bits, ans):
name = 'a.out'
p = os.path.join(self.dir, name)
with open(p, 'wb') as f:
f.write(bits)
content = LocalDoc._encode_content(name, p)
assert content == 'base64-encoded;' + ans
class TestDirToFieds(object):
def setUp(self):
self.dir = mkdtemp()
def tearDown(self):
rmtree(self.dir)
def test_main(self):
f = self.check
# assume ``docdir`` is same as ``current_dir``
yield f, [], ['.foo'], {}, []
yield f, [], ['.foo', '.bar'], {}, []
yield f, ['test'], ['.foo', 'test/.bar'], {'test': {}}, ['test/']
f = self.check_ignore
yield f, ['README'], [], ['README'], {}, []
yield f, ['README'], ['d'], ['d/README', 'README'], {'d': {}}, ['d/']
yield (
f, ['README'], ['d'],
['d/README', 'README', '.foo', 'd/.bar'], # files
{'d': {}}, # fields
['d/']) # manifest
f = self.check
yield f, ['_foo'], [], {}, []
yield f, ['_foo', '_bar'], [], {}, []
yield f, ['_foo', '_bar'], ['_foo/foo'], {}, []
yield f, ['_foo', '_bar'], ['_foo/foo', '_bar/bar'], {}, []
yield (
f, ['d/_foo'], [],
{'d': {'_foo': {}}},
['d/', 'd/_foo/'])
yield (
f, ['d/_foo'], ['d/_foo/README'],
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar'],
['d/_foo/README', '_bar/bar'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar', '.foo'],
['d/_foo/README', '_bar/bar', '.foo/foo'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar', '.foo'],
['d/_foo/README', '_bar/bar', '_bar/_bar', '.foo/foo'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
yield (
f, ['d/_foo', '_bar', '.foo'],
['d/_foo/README', 'd/_foo/.foo', '_bar/bar', '_bar/_bar',
'.foo/foo'], # files
{'d': {'_foo': {'README': ''}}},
['d/', 'd/_foo/', 'd/_foo/README'])
# test cases for ``_attachments``
yield (
f, ['_attachments'],
['_attachments/README'], # files
{},
[])
yield (
f, ['_attachments', 'd'],
['_attachments/README'], # files
{'d': {}},
['d/'])
yield (
f, ['_attachments', 'd/_foo'],
['_attachments/README', 'd/_foo/foo'], # files
{'d': {'_foo': {'foo': ''}}},
['d/', 'd/_foo/', 'd/_foo/foo'])
yield (
f, ['_attachments', 'd/_foo', 'd/_attachments'],
['_attachments/README', 'd/_foo/foo',
'd/_attachments/README'], # files
{'d': {'_foo': {'foo': ''}}},
['d/', 'd/_foo/', 'd/_foo/foo'])
# test cases for ``couchapp/``
yield (
f, ['couchapp'],
['couchapp/README'], # files
{'couchapp': {'README': ''}},
['couchapp/', 'couchapp/README'])
yield (
f, ['couchapp/_foo'],
['couchapp/README', 'couchapp/_foo/foo'], # files
{'couchapp': {'README': '', '_foo': {'foo': ''}}},
['couchapp/', 'couchapp/_foo/', 'couchapp/_foo/foo',
'couchapp/README'])
# test cases for ``couchapp.json``
f = self.check_capp_json
yield (
f, {},
[], ['couchapp.json'],
{'couchapp': {}},
['couchapp.json'])
yield (
f, {'foo': 'bar', 'ans': 42},
[], ['couchapp.json'],
{'couchapp': {'foo': 'bar', 'ans': 42}},
['couchapp.json'])
yield (
f, 'string',
[], ['couchapp.json'],
{'couchapp': {'meta': 'string'}},
['couchapp.json'])
yield (
f, ['list', 1, 2, 3],
[], ['couchapp.json'],
{'couchapp': {'meta': ['list', 1, 2, 3]}},
['couchapp.json'])
yield (
f, {'signatures': 42},
[], ['couchapp.json'],
{'couchapp': {}},
['couchapp.json'])
yield (
f, {'signatures': 42, 'foo': 'bar'},
[], ['couchapp.json'],
{'couchapp': {'foo': 'bar'}},
['couchapp.json'])
# test cases for name collision
f = self.check
yield(
f, [], ['README', 'README.rst'],
{'README': ''},
['README'])
def check(self, dirs, files, fields, manifest):
[os.makedirs(os.path.join(self.dir, d)) for d in dirs]
[open(os.path.join(self.dir, f), 'a').close() for f in files]
out_m = []
out_f = LocalDoc(self.dir).dir_to_fields(self.dir, manifest=out_m)
assert out_f == fields
assert set(out_m) == set(manifest)
def check_ignore(self, ignores, *args):
with open(os.path.join(self.dir, '.couchappignore'), 'w') as f:
f.write(json.dumps(ignores))
self.check(*args)
def check_capp_json(self, content, *args):
with open(os.path.join(self.dir, 'couchapp.json'), 'w') as f:
f.write(json.dumps(content))
self.check(*args) | 0.398875 | 0.222162 |
import re
from studioqt import QtCore
class SearchFilter(QtCore.QObject):
searchChanged = QtCore.Signal()
class Operator:
OR = " or "
AND = " and "
def __init__(self, pattern, spaceOperator=Operator.AND):
"""
:type pattern: str
:type spaceOperator: SearchFilter.Operator
"""
QtCore.QObject.__init__(self)
self._matches = 0
self._pattern = None
self._resolvedPattern = None
self._spaceOperator = spaceOperator
self.setPattern(pattern)
def pattern(self):
"""
Return the pattern for the search filter.
:rtype: str
"""
return self._pattern
def setPattern(self, pattern):
"""
Set the pattern for the search filter.
:type pattern: str
"""
self._pattern = pattern
self._searchChanged()
def _searchChanged(self):
"""
Triggered when the search filter changes.
:rtype: None
"""
self.resolvePattern()
self.searchChanged.emit()
def resolvedPattern(self):
"""
Return the resolved pattern.
:rtype: str
"""
return self._resolvedPattern
def setResolvedPattern(self, resolvedPattern):
"""
Set the resolved pattern.
:type resolvedPattern: str
:rtype: None
"""
self._resolvedPattern = resolvedPattern
def spaceOperator(self):
"""
Return the operator for all white spaces in the pattern.
:rtype: SearchFilter.Operator
"""
return self._spaceOperator
def setSpaceOperator(self, operator):
"""
Set the operator for all white spaces in the pattern.
:type: SearchFilter.Operator
"""
self._spaceOperator = operator
self._searchChanged()
def settings(self):
"""
Return the state of the search filter as a dict object.
:rtype: dict
"""
settings = {}
settings["pattern"] = self.pattern()
settings["spaceOperator"] = self.spaceOperator()
return settings
def setSettings(self, settings):
"""
Set the state of the search filter from a dict object.
:type settings: dict
:rtype: None
"""
pattern = settings.get("pattern", "")
self.setPattern(pattern)
spaceOperator = settings.get("spaceOperator", self.Operator.AND)
self.setSpaceOperator(spaceOperator)
def resolvePattern(self):
"""
Resolve the pattern to speed up the match method.
:rtype: None
"""
pattern = self.pattern()
spaceOperator = self.spaceOperator()
pattern = pattern.strip()
# Case-sensitive is not supported
pattern = pattern.lower()
# Remove all double spaces.
pattern = re.sub(' +', ' ', pattern)
# Replace all white spaces with the space operator
pattern = pattern.replace(self.Operator.OR, "_OR_")
pattern = pattern.replace(self.Operator.AND, "_AND_")
pattern = pattern.replace(" ", spaceOperator)
pattern = pattern.replace("_OR_", self.Operator.OR)
pattern = pattern.replace("_AND_", self.Operator.AND)
self.setResolvedPattern(pattern)
def matches(self):
"""
Return the number of matches from the last match.
:rtype: int
"""
return self._matches
def match(self, text):
"""
Match the given text to the resolved pattern.
:type text: str
:rtype: bool
"""
match = False
matches = 0
pattern = self.resolvedPattern()
groups = pattern.split(self.Operator.OR)
for group in groups:
match = True
labels = [label.lower() for label in group.split(self.Operator.AND)]
for label in labels:
if label not in text.lower():
matches += 1
match = False
break
matches += 1
if match:
break
matches += 1
if not match:
matches = 0
self._matches = matches
return match | zfused_maya/zfused_maya/tool/animation/studiolibrary/packages/studioqt/widgets/searchwidget/searchfilter.py | import re
from studioqt import QtCore
class SearchFilter(QtCore.QObject):
searchChanged = QtCore.Signal()
class Operator:
OR = " or "
AND = " and "
def __init__(self, pattern, spaceOperator=Operator.AND):
"""
:type pattern: str
:type spaceOperator: SearchFilter.Operator
"""
QtCore.QObject.__init__(self)
self._matches = 0
self._pattern = None
self._resolvedPattern = None
self._spaceOperator = spaceOperator
self.setPattern(pattern)
def pattern(self):
"""
Return the pattern for the search filter.
:rtype: str
"""
return self._pattern
def setPattern(self, pattern):
"""
Set the pattern for the search filter.
:type pattern: str
"""
self._pattern = pattern
self._searchChanged()
def _searchChanged(self):
"""
Triggered when the search filter changes.
:rtype: None
"""
self.resolvePattern()
self.searchChanged.emit()
def resolvedPattern(self):
"""
Return the resolved pattern.
:rtype: str
"""
return self._resolvedPattern
def setResolvedPattern(self, resolvedPattern):
"""
Set the resolved pattern.
:type resolvedPattern: str
:rtype: None
"""
self._resolvedPattern = resolvedPattern
def spaceOperator(self):
"""
Return the operator for all white spaces in the pattern.
:rtype: SearchFilter.Operator
"""
return self._spaceOperator
def setSpaceOperator(self, operator):
"""
Set the operator for all white spaces in the pattern.
:type: SearchFilter.Operator
"""
self._spaceOperator = operator
self._searchChanged()
def settings(self):
"""
Return the state of the search filter as a dict object.
:rtype: dict
"""
settings = {}
settings["pattern"] = self.pattern()
settings["spaceOperator"] = self.spaceOperator()
return settings
def setSettings(self, settings):
"""
Set the state of the search filter from a dict object.
:type settings: dict
:rtype: None
"""
pattern = settings.get("pattern", "")
self.setPattern(pattern)
spaceOperator = settings.get("spaceOperator", self.Operator.AND)
self.setSpaceOperator(spaceOperator)
def resolvePattern(self):
"""
Resolve the pattern to speed up the match method.
:rtype: None
"""
pattern = self.pattern()
spaceOperator = self.spaceOperator()
pattern = pattern.strip()
# Case-sensitive is not supported
pattern = pattern.lower()
# Remove all double spaces.
pattern = re.sub(' +', ' ', pattern)
# Replace all white spaces with the space operator
pattern = pattern.replace(self.Operator.OR, "_OR_")
pattern = pattern.replace(self.Operator.AND, "_AND_")
pattern = pattern.replace(" ", spaceOperator)
pattern = pattern.replace("_OR_", self.Operator.OR)
pattern = pattern.replace("_AND_", self.Operator.AND)
self.setResolvedPattern(pattern)
def matches(self):
"""
Return the number of matches from the last match.
:rtype: int
"""
return self._matches
def match(self, text):
"""
Match the given text to the resolved pattern.
:type text: str
:rtype: bool
"""
match = False
matches = 0
pattern = self.resolvedPattern()
groups = pattern.split(self.Operator.OR)
for group in groups:
match = True
labels = [label.lower() for label in group.split(self.Operator.AND)]
for label in labels:
if label not in text.lower():
matches += 1
match = False
break
matches += 1
if match:
break
matches += 1
if not match:
matches = 0
self._matches = matches
return match | 0.742608 | 0.342984 |
import os
import json
from datetime import datetime
from ckan.lib.redis import connect_to_redis
def get_config(config_name):
"""
Retrieves a specific section of the config by its name. The config is
retrieved from Redis, as it is cached there for up to 24 hours.
:param str config_name: the name of the config key
:rtype: dict[str, list of str|dict[str, list of str|dict[str, dict]]
"""
_load_config_file()
return json.loads(redis_conn.get(config_key + config_name))
def in_list(name, list_type, value):
"""
Checks whether or not a given value is part of a given list.
The lists to check against are stored in the Redis instance configured in
the CKAN `production.ini` file. So in order to check against the list, the
list will first be retrieved from Redis.
:param str name: The name of the list
:param str list_type: The type of the list
:param str value: The value to search for
:rtype: bool
:return: Whether or not the value is contained in the list
"""
_load_config_file()
return value in json.loads(
redis_conn.get(redis_key + list_type + '.' + name)
)
def _load_list(name, local_name, list_type='vocabulary'):
"""
Loads the requested list from the local filesystem. The identifiers of all
the entries in the list are put into a list, this list is then returned.
See also: https://waardelijsten.dcat-ap-donl.nl
Support list_types:
- vocabulary, found in `'ckanext/dataoverheid/resources/vocabularies/*'`
- taxonomy, found in `'ckanext/dataoverheid/resources/taxonomies/*'`
Will raise a Exception under the following conditions:
- No list is found locally with the given name
- The local list contains content that could not be parsed as valid JSON
:param str name: The name of the list to load
:param str local_name: The name of the list on the filesystem
:param str list_type: The type of list to load
:rtype: list of str
:return: The entries of the loaded list
"""
types_map = {
'vocabulary': 'vocabularies',
'taxonomy': 'taxonomies'
}
special = [
'CKAN:License',
'Overheid:License'
]
try:
list_type = types_map.get(list_type)
filepath = os.path.join(os.path.dirname(__file__), '..', '..',
'resources', list_type, local_name)
with open(filepath, 'r') as file_contents:
parsed = json.loads(file_contents.read())
try:
return [block['id'] for block in parsed] if name in special \
else parsed.keys()
except KeyError:
raise Exception(name + ' is malformed')
except KeyError:
raise Exception('the requested vocabulary ' + name + ' does not exist '
'or is not supported')
def _load_config_file():
"""
Loads the contents from the ckanext-dataoverheid configuration file and
stores it in Redis for later use. Additionally all the vocabularies and
taxonomies used in DCAT-AP-DONL are stored in Redis so that they can be made
available to CKAN during the package validation process.
The Redis cache will be updated once every 24 hours on the first request of
the day.
:rtype: None
"""
current_date = str(datetime.strftime(datetime.now(), '%Y%m%d'))
cache_key = redis_key + '_cache_date'
if current_date == redis_conn.get(cache_key):
return
filepath = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',
'config.json')
with open(filepath, 'r') as config_file:
config_keys = [
'validation', 'transformations', 'dcat', 'solr',
'properties_to_remove'
]
contents = json.load(config_file)
redis_conn.set(cache_key, current_date)
for redis_config_key in config_keys:
redis_conn.set(config_key + redis_config_key,
json.dumps(contents.get(redis_config_key)))
[redis_conn.set(redis_key + 'vocabulary.{0}'.format(key),
json.dumps(_load_list(key, voc['local'], 'vocabulary')))
for key, voc in contents.get('validation')['vocabularies'].iteritems()]
[redis_conn.set(redis_key + 'taxonomy.{0}'.format(key),
json.dumps(_load_list(key, tax['local'], 'taxonomy')))
for key, tax in contents.get('validation')['taxonomies'].iteritems()]
redis_key = 'ckanext.dataoverheid:'
config_key = redis_key + 'config.'
redis_conn = connect_to_redis()
_load_config_file() | ckanext/dataoverheid/logic/helpers/config.py |
import os
import json
from datetime import datetime
from ckan.lib.redis import connect_to_redis
def get_config(config_name):
"""
Retrieves a specific section of the config by its name. The config is
retrieved from Redis, as it is cached there for up to 24 hours.
:param str config_name: the name of the config key
:rtype: dict[str, list of str|dict[str, list of str|dict[str, dict]]
"""
_load_config_file()
return json.loads(redis_conn.get(config_key + config_name))
def in_list(name, list_type, value):
"""
Checks whether or not a given value is part of a given list.
The lists to check against are stored in the Redis instance configured in
the CKAN `production.ini` file. So in order to check against the list, the
list will first be retrieved from Redis.
:param str name: The name of the list
:param str list_type: The type of the list
:param str value: The value to search for
:rtype: bool
:return: Whether or not the value is contained in the list
"""
_load_config_file()
return value in json.loads(
redis_conn.get(redis_key + list_type + '.' + name)
)
def _load_list(name, local_name, list_type='vocabulary'):
"""
Loads the requested list from the local filesystem. The identifiers of all
the entries in the list are put into a list, this list is then returned.
See also: https://waardelijsten.dcat-ap-donl.nl
Support list_types:
- vocabulary, found in `'ckanext/dataoverheid/resources/vocabularies/*'`
- taxonomy, found in `'ckanext/dataoverheid/resources/taxonomies/*'`
Will raise a Exception under the following conditions:
- No list is found locally with the given name
- The local list contains content that could not be parsed as valid JSON
:param str name: The name of the list to load
:param str local_name: The name of the list on the filesystem
:param str list_type: The type of list to load
:rtype: list of str
:return: The entries of the loaded list
"""
types_map = {
'vocabulary': 'vocabularies',
'taxonomy': 'taxonomies'
}
special = [
'CKAN:License',
'Overheid:License'
]
try:
list_type = types_map.get(list_type)
filepath = os.path.join(os.path.dirname(__file__), '..', '..',
'resources', list_type, local_name)
with open(filepath, 'r') as file_contents:
parsed = json.loads(file_contents.read())
try:
return [block['id'] for block in parsed] if name in special \
else parsed.keys()
except KeyError:
raise Exception(name + ' is malformed')
except KeyError:
raise Exception('the requested vocabulary ' + name + ' does not exist '
'or is not supported')
def _load_config_file():
"""
Loads the contents from the ckanext-dataoverheid configuration file and
stores it in Redis for later use. Additionally all the vocabularies and
taxonomies used in DCAT-AP-DONL are stored in Redis so that they can be made
available to CKAN during the package validation process.
The Redis cache will be updated once every 24 hours on the first request of
the day.
:rtype: None
"""
current_date = str(datetime.strftime(datetime.now(), '%Y%m%d'))
cache_key = redis_key + '_cache_date'
if current_date == redis_conn.get(cache_key):
return
filepath = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..',
'config.json')
with open(filepath, 'r') as config_file:
config_keys = [
'validation', 'transformations', 'dcat', 'solr',
'properties_to_remove'
]
contents = json.load(config_file)
redis_conn.set(cache_key, current_date)
for redis_config_key in config_keys:
redis_conn.set(config_key + redis_config_key,
json.dumps(contents.get(redis_config_key)))
[redis_conn.set(redis_key + 'vocabulary.{0}'.format(key),
json.dumps(_load_list(key, voc['local'], 'vocabulary')))
for key, voc in contents.get('validation')['vocabularies'].iteritems()]
[redis_conn.set(redis_key + 'taxonomy.{0}'.format(key),
json.dumps(_load_list(key, tax['local'], 'taxonomy')))
for key, tax in contents.get('validation')['taxonomies'].iteritems()]
redis_key = 'ckanext.dataoverheid:'
config_key = redis_key + 'config.'
redis_conn = connect_to_redis()
_load_config_file() | 0.656438 | 0.305918 |
from django.shortcuts import render,redirect
from post.models import Post, Comment
# 向上取整
from math import ceil
from post.helper import page_cach,read_count
from post.helper import top_n
from user.helper import login_required
# Create your views here.
# 帖子列表操作
@page_cach(60)
def post_list(request):
# 获取到当前的页码
page = int(request.GET.get('page',1))
# 获取所有的帖子总数
total = Post.objects.count()
# 每页显示的帖子数
per_page = 10
# 显示的所有页数
pages = ceil(total/per_page)
# 按照索引进行分页
start = (page - 1) * per_page
end = start + per_page
posts = Post.objects.all().order_by('-id')[start:end]
# int 不能进行遍历,需要用range()转化下
return render(request, 'post_list.html' ,{'posts':posts, 'pages':range(pages)})
# 创建帖子的操作
@login_required
def create_post(request):
uid = request.session.get('uid')
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
# 根据title和content创建帖子
post = Post.objects.create(uid=uid, title=title, content=content)
# 创建完成之后跳转到阅读页面
return redirect('/post/read/?post_id=%s' %post.id)
return render(request, 'create_post.html')
# 修改帖子的操作
@login_required
def edit_post(request):
if request.method == 'POST':
# 先获取要修改的数据(通过hidden里面要提交的post_id获取的)
post_id = int(request.POST.get('post_id'))
post = Post.objects.get(id=post_id)
# 将要修改的内容存入数据库
post.title = request.POST.get('title')
post.content = request.POST.get('content')
post.save()
return redirect('/post/read/?post_id=%s' %post.id)
else:
post_id = int(request.GET.get('post_id'))
post = Post.objects.get(id=post_id)
return render(request, 'edit_post.html', {'post':post})
# 阅读帖子的操作
# 给帖子添加缓存
@read_count
@page_cach(5)
def read_post(request):
# 获取post_id
post_id = int(request.GET.get('post_id'))
# 根据请求参数所携带的post_id查找到对应的post
post = Post.objects.get(id=post_id)
return render(request, 'read_post.html', {'post':post})
# 搜索帖子的操作
def search_post(request):
if request.method == 'POST':
# 获取关键字
keyword = request.POST.get('keyword')
# 根据关键字查询到所有符合条件的文章
posts = Post.objects.get(content__contains=keyword)
return render(request, 'search.html', {'posts':posts})
return render(request, 'search.html', {})
def top10(request):
rank_data = top_n(10)
return render(request,'top10.html',{'rank_data':rank_data})
@login_required
def comment(request):
uid = request.session.get('uid')
post_id = request.POST.get('post_id')
content = request.POST.get('content')
Comment.objects.create(uid=uid, post_id=post_id, content=content)
return redirect('/post/read/post_id=%s'%post_id) | post/views.py | from django.shortcuts import render,redirect
from post.models import Post, Comment
# 向上取整
from math import ceil
from post.helper import page_cach,read_count
from post.helper import top_n
from user.helper import login_required
# Create your views here.
# 帖子列表操作
@page_cach(60)
def post_list(request):
# 获取到当前的页码
page = int(request.GET.get('page',1))
# 获取所有的帖子总数
total = Post.objects.count()
# 每页显示的帖子数
per_page = 10
# 显示的所有页数
pages = ceil(total/per_page)
# 按照索引进行分页
start = (page - 1) * per_page
end = start + per_page
posts = Post.objects.all().order_by('-id')[start:end]
# int 不能进行遍历,需要用range()转化下
return render(request, 'post_list.html' ,{'posts':posts, 'pages':range(pages)})
# 创建帖子的操作
@login_required
def create_post(request):
uid = request.session.get('uid')
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
# 根据title和content创建帖子
post = Post.objects.create(uid=uid, title=title, content=content)
# 创建完成之后跳转到阅读页面
return redirect('/post/read/?post_id=%s' %post.id)
return render(request, 'create_post.html')
# 修改帖子的操作
@login_required
def edit_post(request):
if request.method == 'POST':
# 先获取要修改的数据(通过hidden里面要提交的post_id获取的)
post_id = int(request.POST.get('post_id'))
post = Post.objects.get(id=post_id)
# 将要修改的内容存入数据库
post.title = request.POST.get('title')
post.content = request.POST.get('content')
post.save()
return redirect('/post/read/?post_id=%s' %post.id)
else:
post_id = int(request.GET.get('post_id'))
post = Post.objects.get(id=post_id)
return render(request, 'edit_post.html', {'post':post})
# 阅读帖子的操作
# 给帖子添加缓存
@read_count
@page_cach(5)
def read_post(request):
# 获取post_id
post_id = int(request.GET.get('post_id'))
# 根据请求参数所携带的post_id查找到对应的post
post = Post.objects.get(id=post_id)
return render(request, 'read_post.html', {'post':post})
# 搜索帖子的操作
def search_post(request):
if request.method == 'POST':
# 获取关键字
keyword = request.POST.get('keyword')
# 根据关键字查询到所有符合条件的文章
posts = Post.objects.get(content__contains=keyword)
return render(request, 'search.html', {'posts':posts})
return render(request, 'search.html', {})
def top10(request):
rank_data = top_n(10)
return render(request,'top10.html',{'rank_data':rank_data})
@login_required
def comment(request):
uid = request.session.get('uid')
post_id = request.POST.get('post_id')
content = request.POST.get('content')
Comment.objects.create(uid=uid, post_id=post_id, content=content)
return redirect('/post/read/post_id=%s'%post_id) | 0.28279 | 0.066055 |
import os
import re
import logging
import traceback
from subprocess import call
from time import time
from pyramid.view import view_config
from pyramid.response import Response
from mist.monitor import config
from mist.monitor import methods
from mist.monitor import graphite
from mist.monitor.model import get_all_machines
from mist.monitor.exceptions import MistError
from mist.monitor.exceptions import RequiredParameterMissingError
from mist.monitor.exceptions import MachineNotFoundError
from mist.monitor.exceptions import ForbiddenError
from mist.monitor.exceptions import UnauthorizedError
from mist.monitor.exceptions import BadRequestError
log = logging.getLogger(__name__)
OK = Response("OK", 200)
@view_config(context=Exception)
def exception_handler_mist(exc, request):
"""Here we catch exceptions and transform them to proper http responses
This is a special pyramid view that gets triggered whenever an exception
is raised from any other view. It catches all exceptions exc where
isinstance(exc, context) is True.
"""
# non-mist exceptions. that shouldn't happen! never!
if not isinstance(exc, MistError):
trace = traceback.format_exc()
log.critical("Uncaught non-mist exception? WTF!\n%s", trace)
return Response("Internal Server Error", 500)
# mist exceptions are ok.
log.info("MistError: %r", exc)
# translate it to HTTP response based on http_code attribute
return Response(str(exc), exc.http_code)
@view_config(route_name='machines', request_method='GET', renderer='json')
def list_machines(request):
"""Lists machines with monitoring.
Returns a dict with uuid's as keys and machine dicts as values.
"""
return {machine.uuid: {'rules': [rule_id]}
for machine in get_all_machines()
for rule_id in machine.rules}
@view_config(route_name='machine', request_method='PUT')
def add_machine(request):
"""Adds machine to monitored list."""
uuid = request.matchdict['machine']
passwd = request.params.get('passwd')
log.info("Adding machine %s to monitor list" % (uuid))
if not passwd:
raise RequiredParameterMissingError('passwd')
methods.add_machine(uuid, passwd)
return OK
@view_config(route_name='machine', request_method='DELETE')
def remove_machine(request):
"""Removes machine from monitored list."""
uuid = request.matchdict['machine']
log.info("Removing machine %s from monitor list" % (uuid))
methods.remove_machine(uuid)
return OK
@view_config(route_name='rule', request_method='PUT')
def add_rule(request):
"""Add or update rule.
This will create a new condition that will start being checked with clear
history, even if the rule is not actually being changed.
"""
uuid = request.matchdict['machine']
rule_id = request.matchdict['rule']
params = request.json_body
for key in ["metric", "operator"]:
if not params.get(key):
raise RequiredParameterMissingError(key)
metric = params["metric"]
operator = params["operator"]
try:
value = float(params["value"])
except (ValueError, TypeError):
raise BadRequestError("Invalid value type %r" % value)
reminder_list = params.get("reminder_list")
reminder_offset = params.get("reminder_offset")
aggregate = params.get("aggregate")
methods.add_rule(uuid, rule_id, metric, operator, value,
aggregate=aggregate, reminder_list=reminder_list,
reminder_offset=reminder_offset)
return OK
@view_config(route_name='rule', request_method='DELETE')
def remove_rule(request):
"""Removes rule and corresponding condition."""
uuid = request.matchdict['machine']
rule_id = request.matchdict['rule']
methods.remove_rule(uuid, rule_id)
return OK
def _parse_get_stats_params(request):
try:
params = request.json_body
metrics = params.get('metrics', [])
uuids = params.get('uuids', [])
except:
params = request.params
metrics = params.getall('metric')
uuids = params.getall('uuid')
start = params.get('start')
stop = params.get('stop')
interval_str = str(params.get('step', ''))
if re.match("^[0-9]+(\.[0-9]+)?$", interval_str):
seconds = int(interval_str)
log.info('seconds: %d', seconds)
log.info('start: %s', start)
for key in sorted(config.RETENTIONS.keys()):
log.info('testing key, tstamp: %s %s', key, time()-key)
log.info('period interval %s', config.RETENTIONS[key])
if int(start) >= time() - key:
if seconds < config.RETENTIONS[key]:
raise BadRequestError(
"Requested resolution is too high for specified time "
"range, try zooming out."
)
log.info('step is ok')
break
interval_str = "%ssec" % seconds
elif re.match("^[0-9]+m$", interval_str):
interval_str += 'in'
return uuids, metrics, start, stop, interval_str
@view_config(route_name='stats', request_method='GET', renderer='json')
def get_stats(request):
"""Returns all stats for a machine, the client will draw them."""
uuid = request.matchdict['machine']
_, metrics, start, stop, interval_str = _parse_get_stats_params(request)
return methods.get_stats(uuid, metrics, start, stop, interval_str)
@view_config(route_name='load', request_method='GET', renderer='json')
def get_load(request):
"""Returns shortterm load for many machines"""
uuids, _, start, stop, interval_str = _parse_get_stats_params(request)
return methods.get_load(uuids, start, stop, interval_str)
@view_config(route_name='cores', request_method='GET', renderer='json')
def get_cores(request):
"""Returns number of cores for many machines"""
uuids, _, start, stop, interval_str = _parse_get_stats_params(request)
return methods.get_cores(uuids, start, stop, interval_str)
@view_config(route_name='find_metrics', request_method='GET', renderer='json')
def find_metrics(request):
uuid = request.matchdict['machine']
return methods.find_metrics(uuid)
@view_config(route_name='reset', request_method='POST')
def reset_hard(request):
"""Reset mist.monitor with data provided from mist.core
This is a special view that will cause monitor to drop all known data
for machines, rules and conditions, will repopulate itself with the data
provided in the request and will restart collectd and mist.alert.
For security reasons, a special non empty key needs to be specified in
settings.py and sent along with the reset request.
"""
params = request.json_body
key, data = params.get('key'), params.get('data', {})
if not config.RESET_KEY:
raise ForbiddenError("Reset functionality not enabled.")
if key != config.RESET_KEY:
raise UnauthorizedError("Wrong reset key provided.")
methods.reset_hard(params['data'])
return OK | src/mist/monitor/views.py | import os
import re
import logging
import traceback
from subprocess import call
from time import time
from pyramid.view import view_config
from pyramid.response import Response
from mist.monitor import config
from mist.monitor import methods
from mist.monitor import graphite
from mist.monitor.model import get_all_machines
from mist.monitor.exceptions import MistError
from mist.monitor.exceptions import RequiredParameterMissingError
from mist.monitor.exceptions import MachineNotFoundError
from mist.monitor.exceptions import ForbiddenError
from mist.monitor.exceptions import UnauthorizedError
from mist.monitor.exceptions import BadRequestError
log = logging.getLogger(__name__)
OK = Response("OK", 200)
@view_config(context=Exception)
def exception_handler_mist(exc, request):
"""Here we catch exceptions and transform them to proper http responses
This is a special pyramid view that gets triggered whenever an exception
is raised from any other view. It catches all exceptions exc where
isinstance(exc, context) is True.
"""
# non-mist exceptions. that shouldn't happen! never!
if not isinstance(exc, MistError):
trace = traceback.format_exc()
log.critical("Uncaught non-mist exception? WTF!\n%s", trace)
return Response("Internal Server Error", 500)
# mist exceptions are ok.
log.info("MistError: %r", exc)
# translate it to HTTP response based on http_code attribute
return Response(str(exc), exc.http_code)
@view_config(route_name='machines', request_method='GET', renderer='json')
def list_machines(request):
"""Lists machines with monitoring.
Returns a dict with uuid's as keys and machine dicts as values.
"""
return {machine.uuid: {'rules': [rule_id]}
for machine in get_all_machines()
for rule_id in machine.rules}
@view_config(route_name='machine', request_method='PUT')
def add_machine(request):
"""Adds machine to monitored list."""
uuid = request.matchdict['machine']
passwd = request.params.get('passwd')
log.info("Adding machine %s to monitor list" % (uuid))
if not passwd:
raise RequiredParameterMissingError('passwd')
methods.add_machine(uuid, passwd)
return OK
@view_config(route_name='machine', request_method='DELETE')
def remove_machine(request):
"""Removes machine from monitored list."""
uuid = request.matchdict['machine']
log.info("Removing machine %s from monitor list" % (uuid))
methods.remove_machine(uuid)
return OK
@view_config(route_name='rule', request_method='PUT')
def add_rule(request):
"""Add or update rule.
This will create a new condition that will start being checked with clear
history, even if the rule is not actually being changed.
"""
uuid = request.matchdict['machine']
rule_id = request.matchdict['rule']
params = request.json_body
for key in ["metric", "operator"]:
if not params.get(key):
raise RequiredParameterMissingError(key)
metric = params["metric"]
operator = params["operator"]
try:
value = float(params["value"])
except (ValueError, TypeError):
raise BadRequestError("Invalid value type %r" % value)
reminder_list = params.get("reminder_list")
reminder_offset = params.get("reminder_offset")
aggregate = params.get("aggregate")
methods.add_rule(uuid, rule_id, metric, operator, value,
aggregate=aggregate, reminder_list=reminder_list,
reminder_offset=reminder_offset)
return OK
@view_config(route_name='rule', request_method='DELETE')
def remove_rule(request):
"""Removes rule and corresponding condition."""
uuid = request.matchdict['machine']
rule_id = request.matchdict['rule']
methods.remove_rule(uuid, rule_id)
return OK
def _parse_get_stats_params(request):
try:
params = request.json_body
metrics = params.get('metrics', [])
uuids = params.get('uuids', [])
except:
params = request.params
metrics = params.getall('metric')
uuids = params.getall('uuid')
start = params.get('start')
stop = params.get('stop')
interval_str = str(params.get('step', ''))
if re.match("^[0-9]+(\.[0-9]+)?$", interval_str):
seconds = int(interval_str)
log.info('seconds: %d', seconds)
log.info('start: %s', start)
for key in sorted(config.RETENTIONS.keys()):
log.info('testing key, tstamp: %s %s', key, time()-key)
log.info('period interval %s', config.RETENTIONS[key])
if int(start) >= time() - key:
if seconds < config.RETENTIONS[key]:
raise BadRequestError(
"Requested resolution is too high for specified time "
"range, try zooming out."
)
log.info('step is ok')
break
interval_str = "%ssec" % seconds
elif re.match("^[0-9]+m$", interval_str):
interval_str += 'in'
return uuids, metrics, start, stop, interval_str
@view_config(route_name='stats', request_method='GET', renderer='json')
def get_stats(request):
"""Returns all stats for a machine, the client will draw them."""
uuid = request.matchdict['machine']
_, metrics, start, stop, interval_str = _parse_get_stats_params(request)
return methods.get_stats(uuid, metrics, start, stop, interval_str)
@view_config(route_name='load', request_method='GET', renderer='json')
def get_load(request):
"""Returns shortterm load for many machines"""
uuids, _, start, stop, interval_str = _parse_get_stats_params(request)
return methods.get_load(uuids, start, stop, interval_str)
@view_config(route_name='cores', request_method='GET', renderer='json')
def get_cores(request):
"""Returns number of cores for many machines"""
uuids, _, start, stop, interval_str = _parse_get_stats_params(request)
return methods.get_cores(uuids, start, stop, interval_str)
@view_config(route_name='find_metrics', request_method='GET', renderer='json')
def find_metrics(request):
uuid = request.matchdict['machine']
return methods.find_metrics(uuid)
@view_config(route_name='reset', request_method='POST')
def reset_hard(request):
"""Reset mist.monitor with data provided from mist.core
This is a special view that will cause monitor to drop all known data
for machines, rules and conditions, will repopulate itself with the data
provided in the request and will restart collectd and mist.alert.
For security reasons, a special non empty key needs to be specified in
settings.py and sent along with the reset request.
"""
params = request.json_body
key, data = params.get('key'), params.get('data', {})
if not config.RESET_KEY:
raise ForbiddenError("Reset functionality not enabled.")
if key != config.RESET_KEY:
raise UnauthorizedError("Wrong reset key provided.")
methods.reset_hard(params['data'])
return OK | 0.649912 | 0.069985 |
import os
import sys
import unittest
import PRESUBMIT
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..',
'..', '..'))
from PRESUBMIT_test_mocks import (MockInputApi, MockOutputApi, MockAffectedFile)
class AccessibilityEventsTestIncludesAndroidTest(unittest.TestCase):
# Test that no warning is raised when the Android file is also modified.
def testAndroidChangeIncluded(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='A'),
MockAffectedFile(
'accessibility/WebContentsAccessibilityEventsTest.java',
[''], action='M')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that a warning is raised when the Android file is not modified.
def testAndroidChangeMissing(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='A'),
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(msgs),
'Expected %d messages, found %d: %s'
% (1, len(msgs), msgs))
# Test that Android change is not required when no html file is added/removed.
def testIgnoreNonHtmlFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.txt',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.cc',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.h',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.py',
[''], action='A')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that Android change is not required for unrelated html files.
def testIgnoreNonRelatedHtmlFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/aria/foo.html',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/html/foo.html',
[''], action='A'),
MockAffectedFile('chrome/tests/data/accessibility/foo.html',
[''], action='A')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that only modifying an html file will not trigger the warning.
def testIgnoreModifiedFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='M')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that deleting an html file will trigger the warning.
def testAndroidChangeMissingOnDeletedFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[], action='D')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(msgs),
'Expected %d messages, found %d: %s'
% (1, len(msgs), msgs))
if __name__ == '__main__':
unittest.main() | content/test/data/accessibility/PRESUBMIT_test.py |
import os
import sys
import unittest
import PRESUBMIT
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..',
'..', '..'))
from PRESUBMIT_test_mocks import (MockInputApi, MockOutputApi, MockAffectedFile)
class AccessibilityEventsTestIncludesAndroidTest(unittest.TestCase):
# Test that no warning is raised when the Android file is also modified.
def testAndroidChangeIncluded(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='A'),
MockAffectedFile(
'accessibility/WebContentsAccessibilityEventsTest.java',
[''], action='M')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that a warning is raised when the Android file is not modified.
def testAndroidChangeMissing(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='A'),
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(msgs),
'Expected %d messages, found %d: %s'
% (1, len(msgs), msgs))
# Test that Android change is not required when no html file is added/removed.
def testIgnoreNonHtmlFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.txt',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.cc',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.h',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/event/foo.py',
[''], action='A')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that Android change is not required for unrelated html files.
def testIgnoreNonRelatedHtmlFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/aria/foo.html',
[''], action='A'),
MockAffectedFile('content/test/data/accessibility/html/foo.html',
[''], action='A'),
MockAffectedFile('chrome/tests/data/accessibility/foo.html',
[''], action='A')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that only modifying an html file will not trigger the warning.
def testIgnoreModifiedFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[''], action='M')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(msgs),
'Expected %d messages, found %d: %s'
% (0, len(msgs), msgs))
# Test that deleting an html file will trigger the warning.
def testAndroidChangeMissingOnDeletedFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('content/test/data/accessibility/event/foo.html',
[], action='D')
]
msgs = PRESUBMIT.CheckAccessibilityEventsTestIncludesAndroid(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(msgs),
'Expected %d messages, found %d: %s'
% (1, len(msgs), msgs))
if __name__ == '__main__':
unittest.main() | 0.405449 | 0.131507 |
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
import net.nn as nn
def vq_encoder_spec(x, ema=None, nr_channel=128, nr_res_block=2, nr_res_channel=64, embedding_dim=64,
num_embeddings=512, commitment_cost=0.25, decay=0.99, is_training=False):
"""
Input:
Tensor x of shape (N,H,W,3) (e.g. (128,256,256,3))
Output:
Tensor enc_t of shape (N,H//8,W//8,C) (e.g. (128,32,32,64))
Tensor enc_b of shape (N,H//4,W//4,C) (e.g. (128,64,64,64))
Tensor quant_t of shape (N,H//8,W//8,C) (e.g. (128,32,32,64))
Tensor quant_b of shape (N,H//4,W//4,C) (e.g. (128,64,64,64))
Tensor loss of shape (1,)
Tensor idx_t of shape (N,H//8,W//8) (e.g. (128,32,32))
Tensor idx_b of shape (N,H//4,W//4) (e.g. (128,64,64))
Tensor embed_t of shape (C,K) (e.g. (64,512))
Tensor embed_b of shape (C,K) (e.g. (64,512))
"""
counters = {}
with arg_scope([nn.conv2d, nn.deconv2d, nn.vector_quantize], counters=counters, ema=ema):
# Bottom encoder
enc_b = nn.conv2d(x, nr_channel//2, filter_size=[4,4], stride=[2,2])
enc_b = tf.nn.elu(enc_b)
enc_b = nn.conv2d(enc_b, nr_channel, filter_size=[4,4], stride=[2,2])
enc_b = tf.nn.elu(enc_b)
enc_b = nn.conv2d(enc_b, nr_channel)
for rep in range(nr_res_block):
enc_b = nn.resnet(enc_b, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
enc_b = tf.nn.elu(enc_b)
# Top encoder
enc_t = nn.conv2d(enc_b, nr_channel//2, filter_size=[4,4], stride=[2,2])
enc_t = tf.nn.elu(enc_t)
enc_t = nn.conv2d(enc_t, nr_channel)
for rep in range(nr_res_block):
enc_t = nn.resnet(enc_t, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
enc_t = tf.nn.elu(enc_t)
enc_t = nn.conv2d(enc_t, embedding_dim, filter_size=[1,1])
# Vector quantization with top codebook
quant_t, diff_t, idx_t, embed_t = nn.vector_quantize(enc_t, embedding_dim=embedding_dim,
num_embeddings=num_embeddings, commitment_cost=commitment_cost,
decay=decay, is_training=is_training)
# Top decoder
dec_t = nn.conv2d(quant_t, nr_channel)
for rep in range(nr_res_block):
dec_t = nn.resnet(dec_t, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
dec_t = tf.nn.elu(dec_t)
dec_t = nn.deconv2d(dec_t, nr_channel, filter_size=[4,4], stride=[2,2])
enc_b = tf.concat([enc_b, dec_t], -1)
enc_b = nn.conv2d(enc_b, embedding_dim, filter_size=[1,1])
# Vector quantization with bottom codebook
quant_b, diff_b, idx_b, embed_b = nn.vector_quantize(enc_b, embedding_dim=embedding_dim,
num_embeddings=num_embeddings, commitment_cost=commitment_cost,
decay=decay, is_training=is_training)
return {'enc_t': enc_t, 'enc_b': enc_b, 'quant_t': quant_t, 'quant_b': quant_b, 'loss': diff_t + diff_b,
'idx_t': idx_t, 'idx_b': idx_b, 'embed_t': embed_t, 'embed_b': embed_b}
def vq_decoder_spec(quant_t, quant_b, ema=None, nr_channel=128, nr_res_block=2, nr_res_channel=64, embedding_dim=64):
"""
Input:
Tensor quant_t of shape (N,H//8,W//8,C) (e.g. (128,32,32,64))
Tensor quant_b of shape (N,H//4,W//4,C) (e.g. (128,64,64,64))
Output:
Tensor dec_b of shape (N,H,W,3) (e.g. (128,256,256,3))
"""
counters = {}
with arg_scope([nn.conv2d, nn.deconv2d], counters=counters, ema=ema):
# Bottom decoder
quant_t = nn.deconv2d(quant_t, embedding_dim, filter_size=[4,4], stride=[2,2])
dec_b = tf.concat([quant_b, quant_t], -1)
dec_b = nn.conv2d(dec_b, nr_channel)
for rep in range(nr_res_block):
dec_b = nn.resnet(dec_b, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
dec_b = tf.nn.elu(dec_b)
dec_b = nn.deconv2d(dec_b, nr_channel//2, filter_size=[4,4], stride=[2,2])
dec_b = tf.nn.elu(dec_b)
dec_b = nn.deconv2d(dec_b, 3, filter_size=[4,4], stride=[2,2])
return {'dec_b': dec_b} | net/vqvae.py | import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import arg_scope
import net.nn as nn
def vq_encoder_spec(x, ema=None, nr_channel=128, nr_res_block=2, nr_res_channel=64, embedding_dim=64,
num_embeddings=512, commitment_cost=0.25, decay=0.99, is_training=False):
"""
Input:
Tensor x of shape (N,H,W,3) (e.g. (128,256,256,3))
Output:
Tensor enc_t of shape (N,H//8,W//8,C) (e.g. (128,32,32,64))
Tensor enc_b of shape (N,H//4,W//4,C) (e.g. (128,64,64,64))
Tensor quant_t of shape (N,H//8,W//8,C) (e.g. (128,32,32,64))
Tensor quant_b of shape (N,H//4,W//4,C) (e.g. (128,64,64,64))
Tensor loss of shape (1,)
Tensor idx_t of shape (N,H//8,W//8) (e.g. (128,32,32))
Tensor idx_b of shape (N,H//4,W//4) (e.g. (128,64,64))
Tensor embed_t of shape (C,K) (e.g. (64,512))
Tensor embed_b of shape (C,K) (e.g. (64,512))
"""
counters = {}
with arg_scope([nn.conv2d, nn.deconv2d, nn.vector_quantize], counters=counters, ema=ema):
# Bottom encoder
enc_b = nn.conv2d(x, nr_channel//2, filter_size=[4,4], stride=[2,2])
enc_b = tf.nn.elu(enc_b)
enc_b = nn.conv2d(enc_b, nr_channel, filter_size=[4,4], stride=[2,2])
enc_b = tf.nn.elu(enc_b)
enc_b = nn.conv2d(enc_b, nr_channel)
for rep in range(nr_res_block):
enc_b = nn.resnet(enc_b, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
enc_b = tf.nn.elu(enc_b)
# Top encoder
enc_t = nn.conv2d(enc_b, nr_channel//2, filter_size=[4,4], stride=[2,2])
enc_t = tf.nn.elu(enc_t)
enc_t = nn.conv2d(enc_t, nr_channel)
for rep in range(nr_res_block):
enc_t = nn.resnet(enc_t, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
enc_t = tf.nn.elu(enc_t)
enc_t = nn.conv2d(enc_t, embedding_dim, filter_size=[1,1])
# Vector quantization with top codebook
quant_t, diff_t, idx_t, embed_t = nn.vector_quantize(enc_t, embedding_dim=embedding_dim,
num_embeddings=num_embeddings, commitment_cost=commitment_cost,
decay=decay, is_training=is_training)
# Top decoder
dec_t = nn.conv2d(quant_t, nr_channel)
for rep in range(nr_res_block):
dec_t = nn.resnet(dec_t, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
dec_t = tf.nn.elu(dec_t)
dec_t = nn.deconv2d(dec_t, nr_channel, filter_size=[4,4], stride=[2,2])
enc_b = tf.concat([enc_b, dec_t], -1)
enc_b = nn.conv2d(enc_b, embedding_dim, filter_size=[1,1])
# Vector quantization with bottom codebook
quant_b, diff_b, idx_b, embed_b = nn.vector_quantize(enc_b, embedding_dim=embedding_dim,
num_embeddings=num_embeddings, commitment_cost=commitment_cost,
decay=decay, is_training=is_training)
return {'enc_t': enc_t, 'enc_b': enc_b, 'quant_t': quant_t, 'quant_b': quant_b, 'loss': diff_t + diff_b,
'idx_t': idx_t, 'idx_b': idx_b, 'embed_t': embed_t, 'embed_b': embed_b}
def vq_decoder_spec(quant_t, quant_b, ema=None, nr_channel=128, nr_res_block=2, nr_res_channel=64, embedding_dim=64):
"""
Input:
Tensor quant_t of shape (N,H//8,W//8,C) (e.g. (128,32,32,64))
Tensor quant_b of shape (N,H//4,W//4,C) (e.g. (128,64,64,64))
Output:
Tensor dec_b of shape (N,H,W,3) (e.g. (128,256,256,3))
"""
counters = {}
with arg_scope([nn.conv2d, nn.deconv2d], counters=counters, ema=ema):
# Bottom decoder
quant_t = nn.deconv2d(quant_t, embedding_dim, filter_size=[4,4], stride=[2,2])
dec_b = tf.concat([quant_b, quant_t], -1)
dec_b = nn.conv2d(dec_b, nr_channel)
for rep in range(nr_res_block):
dec_b = nn.resnet(dec_b, num_res_channel=nr_res_channel, nonlinearity=tf.nn.elu)
dec_b = tf.nn.elu(dec_b)
dec_b = nn.deconv2d(dec_b, nr_channel//2, filter_size=[4,4], stride=[2,2])
dec_b = tf.nn.elu(dec_b)
dec_b = nn.deconv2d(dec_b, 3, filter_size=[4,4], stride=[2,2])
return {'dec_b': dec_b} | 0.79999 | 0.403684 |
import numpy as np
from settings import same_grid_dist_ratio
class SudokuVideo:
def __init__(self, grid):
self.grid_raw = grid
self.grid = np.zeros((9, 9), dtype=int)
self.init_grid(grid)
self.grid_solved = np.zeros((9, 9), dtype=int)
self.isConfident = False
self.isSolved = False
self.nbr_apparition = 1
self.last_apparition = 0
self.TL = 0
self.TR = 0
self.BR = 0
self.BL = 0
self.w = 0
self.h = 0
def get_limits(self):
return self.TL, self.TR, self.BR, self.BL
def set_limits(self, points):
self.TL = points[0]
self.TR = points[1]
self.BR = points[2]
self.BL = points[3]
self.w = ((self.TR[0] - self.TL[0]) + (self.BR[0] - self.BL[0])) / 2
self.h = ((self.TR[1] - self.TL[1]) + (self.BR[1] - self.BL[1])) / 2
def __str__(self):
string = "-" * 18
for y in range(9):
string += "\n|"
for x in range(9):
string += str(self.grid[y, x]) + "|"
string += "\n"
string += "-" * 18
return string
def init_grid(self, grid):
for y in range(9):
for x in range(9):
value = grid[y][x]
self.grid[y, x] = value
def is_filled(self):
return self.isSolved
def incr_last_apparition(self):
self.last_apparition += 1
def incr_nbr_apparition(self):
self.nbr_apparition += 1
def is_same_grid(self, points):
thresh_dist = 0.03 * (self.w + self.h)
points_grid = self.get_limits()
for i in range(4):
if np.linalg.norm(points_grid[i] - points[i]) > thresh_dist:
return False
self.last_apparition = 0
self.set_limits(points)
return True
def is_same_grid_v2(self, points):
thresh_dist = same_grid_dist_ratio * (self.w + self.h)
is_same = []
points_grid = self.get_limits()
for i in range(4):
is_same.append(np.linalg.norm(points_grid[i] - points[i]) < thresh_dist)
if sum(is_same) < 3:
return False
if sum(is_same) == 3:
false_value_ind = np.argmin(is_same)
points[false_value_ind] = points_grid[false_value_ind]
self.last_apparition = 0
self.set_limits(points)
return True | src/solving_objects/SudokuVideo.py | import numpy as np
from settings import same_grid_dist_ratio
class SudokuVideo:
def __init__(self, grid):
self.grid_raw = grid
self.grid = np.zeros((9, 9), dtype=int)
self.init_grid(grid)
self.grid_solved = np.zeros((9, 9), dtype=int)
self.isConfident = False
self.isSolved = False
self.nbr_apparition = 1
self.last_apparition = 0
self.TL = 0
self.TR = 0
self.BR = 0
self.BL = 0
self.w = 0
self.h = 0
def get_limits(self):
return self.TL, self.TR, self.BR, self.BL
def set_limits(self, points):
self.TL = points[0]
self.TR = points[1]
self.BR = points[2]
self.BL = points[3]
self.w = ((self.TR[0] - self.TL[0]) + (self.BR[0] - self.BL[0])) / 2
self.h = ((self.TR[1] - self.TL[1]) + (self.BR[1] - self.BL[1])) / 2
def __str__(self):
string = "-" * 18
for y in range(9):
string += "\n|"
for x in range(9):
string += str(self.grid[y, x]) + "|"
string += "\n"
string += "-" * 18
return string
def init_grid(self, grid):
for y in range(9):
for x in range(9):
value = grid[y][x]
self.grid[y, x] = value
def is_filled(self):
return self.isSolved
def incr_last_apparition(self):
self.last_apparition += 1
def incr_nbr_apparition(self):
self.nbr_apparition += 1
def is_same_grid(self, points):
thresh_dist = 0.03 * (self.w + self.h)
points_grid = self.get_limits()
for i in range(4):
if np.linalg.norm(points_grid[i] - points[i]) > thresh_dist:
return False
self.last_apparition = 0
self.set_limits(points)
return True
def is_same_grid_v2(self, points):
thresh_dist = same_grid_dist_ratio * (self.w + self.h)
is_same = []
points_grid = self.get_limits()
for i in range(4):
is_same.append(np.linalg.norm(points_grid[i] - points[i]) < thresh_dist)
if sum(is_same) < 3:
return False
if sum(is_same) == 3:
false_value_ind = np.argmin(is_same)
points[false_value_ind] = points_grid[false_value_ind]
self.last_apparition = 0
self.set_limits(points)
return True | 0.638723 | 0.258095 |
from logging import getLogger
from pymcuprog.pymcuprog_errors import PymcuprogError
from . import constants
class UpdiDatalink:
"""
UPDI data link class handles the UPDI data protocol within the device
"""
LDCS_RESPONSE_BYTES = 1
def __init__(self):
self.logger = getLogger(__name__)
self.updi_phy = None
def set_physical(self, physical):
"""
Inject a serial-port based physical layer for use by this DL
"""
self.updi_phy = physical
def _init_session_parameters(self):
"""
Set the inter-byte delay bit and disable collision detection
"""
self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT)
self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT)
def init_datalink(self):
"""
Init DL layer
"""
self._init_session_parameters()
# Check
if not self._check_datalink():
# Send double break if all is not well, and re-check
self.updi_phy.send_double_break()
self._init_session_parameters()
if not self._check_datalink():
raise PymcuprogError("UPDI initialisation failed")
def _check_datalink(self):
"""
Check UPDI by loading CS STATUSA
"""
try:
if self.ldcs(constants.UPDI_CS_STATUSA) != 0:
self.logger.info("UPDI init OK")
return True
except PymcuprogError:
self.logger.warning("Check failed")
return False
self.logger.info("UPDI not OK - reinitialisation required")
return False
def ldcs(self, address):
"""
Load data from Control/Status space
:param address: address to load
"""
self.logger.debug("LDCS from 0x%02X", address)
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])
response = self.updi_phy.receive(self.LDCS_RESPONSE_BYTES)
numbytes_received = len(response)
if numbytes_received != self.LDCS_RESPONSE_BYTES:
raise PymcuprogError("Unexpected number of bytes in response: "
"{} byte(s) expected {} byte(s)".format(numbytes_received, self.LDCS_RESPONSE_BYTES))
return response[0]
def stcs(self, address, value):
"""
Store a value to Control/Status space
:param address: address to store to
:param value: value to write
"""
self.logger.debug("STCS to 0x%02X", address)
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value])
def ld_ptr_inc(self, size):
"""
Loads a number of bytes from the pointer location with pointer post-increment
:param size: number of bytes to load
:return: values read
"""
self.logger.debug("LD8 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_8])
return self.updi_phy.receive(size)
def ld_ptr_inc16(self, words):
"""
Load a 16-bit word value from the pointer location with pointer post-increment
:param words: number of words to load
:return: values read
"""
self.logger.debug("LD16 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16])
return self.updi_phy.receive(words << 1)
def st_ptr_inc(self, data):
"""
Store data to the pointer location with pointer post-increment
:param data: data to store
"""
self.logger.debug("ST8 to *ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8,
data[0]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("ACK error with st_ptr_inc")
num = 1
while num < len(data):
self.updi_phy.send([data[num]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr_inc")
num += 1
def st_ptr_inc16(self, data):
"""
Store a 16-bit word value to the pointer location with pointer post-increment
:param data: data to store
"""
self.logger.debug("ST16 to *ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16, data[0], data[1]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("ACK error with st_ptr_inc16")
num = 2
while num < len(data):
self.updi_phy.send([data[num], data[num + 1]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr_inc16")
num += 2
def repeat(self, repeats):
"""
Store a value to the repeat counter
:param repeats: number of repeats requested
"""
self.logger.debug("Repeat %d", repeats)
if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE:
self.logger.error("Invalid repeat count of %d", repeats)
raise Exception("Invalid repeat count!")
repeats -= 1
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,
repeats & 0xFF])
def read_sib(self):
"""
Read the SIB
"""
return self.updi_phy.sib()
def key(self, size, key):
"""
Write a key
:param size: size of key (0=64B, 1=128B, 2=256B)
:param key: key value
"""
self.logger.debug("Writing key")
if len(key) != 8 << size:
raise PymcuprogError("Invalid KEY length!")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size])
self.updi_phy.send(list(reversed(list(key))))
def _st_data_phase(self, values):
"""
Performs data phase of transaction:
* receive ACK
* send data
:param values: bytearray of value(s) to send
"""
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st")
self.updi_phy.send(values)
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st")
class UpdiDatalink16bit(UpdiDatalink):
"""
UPDI data link layer in 16-bit version
This means that all addresses and pointers contain 2 bytes
"""
def __init__(self):
UpdiDatalink.__init__(self)
self.logger = getLogger(__name__)
# pylint: disable=invalid-name
def ld(self, address):
"""
Load a single byte direct from a 16-bit address
:param address: address to load from
:return: value read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(1)[0]
def ld16(self, address):
"""
Load a 16-bit word directly from a 16-bit address
:param address: address to load from
:return: values read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(2)
# pylint: disable=invalid-name
def st(self, address, value):
"""
Store a single byte value directly to a 16-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
return self._st_data_phase([value & 0xFF])
def st16(self, address, value):
"""
Store a 16-bit word value directly to a 16-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
return self._st_data_phase([value & 0xFF, (value >> 8) & 0xFF])
def st_ptr(self, address):
"""
Set the pointer location
:param address: address to write
"""
self.logger.info("ST to ptr")
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr")
class UpdiDatalink24bit(UpdiDatalink):
"""
UPDI data link layer in 24-bit version
This means that all addresses and pointers contain 3 bytes
"""
def __init__(self):
UpdiDatalink.__init__(self)
self.logger = getLogger(__name__)
# pylint: disable=invalid-name
def ld(self, address):
"""
Load a single byte direct from a 24-bit address
:param address: address to load from
:return: value read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self.updi_phy.receive(1)[0]
def ld16(self, address):
"""
Load a 16-bit word directly from a 24-bit address
:param address: address to load from
:return: values read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self.updi_phy.receive(2)
# pylint: disable=invalid-name
def st(self, address, value):
"""
Store a single byte value directly to a 24-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self._st_data_phase([value & 0xFF])
def st16(self, address, value):
"""
Store a 16-bit word value directly to a 24-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self._st_data_phase([value & 0xFF, (value >> 8) & 0xFF])
def st_ptr(self, address):
"""
Set the pointer location
:param address: address to write
"""
self.logger.info("ST to ptr")
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr") | pymcuprog/serialupdi/link.py | from logging import getLogger
from pymcuprog.pymcuprog_errors import PymcuprogError
from . import constants
class UpdiDatalink:
"""
UPDI data link class handles the UPDI data protocol within the device
"""
LDCS_RESPONSE_BYTES = 1
def __init__(self):
self.logger = getLogger(__name__)
self.updi_phy = None
def set_physical(self, physical):
"""
Inject a serial-port based physical layer for use by this DL
"""
self.updi_phy = physical
def _init_session_parameters(self):
"""
Set the inter-byte delay bit and disable collision detection
"""
self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT)
self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT)
def init_datalink(self):
"""
Init DL layer
"""
self._init_session_parameters()
# Check
if not self._check_datalink():
# Send double break if all is not well, and re-check
self.updi_phy.send_double_break()
self._init_session_parameters()
if not self._check_datalink():
raise PymcuprogError("UPDI initialisation failed")
def _check_datalink(self):
"""
Check UPDI by loading CS STATUSA
"""
try:
if self.ldcs(constants.UPDI_CS_STATUSA) != 0:
self.logger.info("UPDI init OK")
return True
except PymcuprogError:
self.logger.warning("Check failed")
return False
self.logger.info("UPDI not OK - reinitialisation required")
return False
def ldcs(self, address):
"""
Load data from Control/Status space
:param address: address to load
"""
self.logger.debug("LDCS from 0x%02X", address)
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])
response = self.updi_phy.receive(self.LDCS_RESPONSE_BYTES)
numbytes_received = len(response)
if numbytes_received != self.LDCS_RESPONSE_BYTES:
raise PymcuprogError("Unexpected number of bytes in response: "
"{} byte(s) expected {} byte(s)".format(numbytes_received, self.LDCS_RESPONSE_BYTES))
return response[0]
def stcs(self, address, value):
"""
Store a value to Control/Status space
:param address: address to store to
:param value: value to write
"""
self.logger.debug("STCS to 0x%02X", address)
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value])
def ld_ptr_inc(self, size):
"""
Loads a number of bytes from the pointer location with pointer post-increment
:param size: number of bytes to load
:return: values read
"""
self.logger.debug("LD8 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_8])
return self.updi_phy.receive(size)
def ld_ptr_inc16(self, words):
"""
Load a 16-bit word value from the pointer location with pointer post-increment
:param words: number of words to load
:return: values read
"""
self.logger.debug("LD16 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16])
return self.updi_phy.receive(words << 1)
def st_ptr_inc(self, data):
"""
Store data to the pointer location with pointer post-increment
:param data: data to store
"""
self.logger.debug("ST8 to *ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8,
data[0]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("ACK error with st_ptr_inc")
num = 1
while num < len(data):
self.updi_phy.send([data[num]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr_inc")
num += 1
def st_ptr_inc16(self, data):
"""
Store a 16-bit word value to the pointer location with pointer post-increment
:param data: data to store
"""
self.logger.debug("ST16 to *ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16, data[0], data[1]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("ACK error with st_ptr_inc16")
num = 2
while num < len(data):
self.updi_phy.send([data[num], data[num + 1]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr_inc16")
num += 2
def repeat(self, repeats):
"""
Store a value to the repeat counter
:param repeats: number of repeats requested
"""
self.logger.debug("Repeat %d", repeats)
if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE:
self.logger.error("Invalid repeat count of %d", repeats)
raise Exception("Invalid repeat count!")
repeats -= 1
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,
repeats & 0xFF])
def read_sib(self):
"""
Read the SIB
"""
return self.updi_phy.sib()
def key(self, size, key):
"""
Write a key
:param size: size of key (0=64B, 1=128B, 2=256B)
:param key: key value
"""
self.logger.debug("Writing key")
if len(key) != 8 << size:
raise PymcuprogError("Invalid KEY length!")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size])
self.updi_phy.send(list(reversed(list(key))))
def _st_data_phase(self, values):
"""
Performs data phase of transaction:
* receive ACK
* send data
:param values: bytearray of value(s) to send
"""
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st")
self.updi_phy.send(values)
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st")
class UpdiDatalink16bit(UpdiDatalink):
"""
UPDI data link layer in 16-bit version
This means that all addresses and pointers contain 2 bytes
"""
def __init__(self):
UpdiDatalink.__init__(self)
self.logger = getLogger(__name__)
# pylint: disable=invalid-name
def ld(self, address):
"""
Load a single byte direct from a 16-bit address
:param address: address to load from
:return: value read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(1)[0]
def ld16(self, address):
"""
Load a 16-bit word directly from a 16-bit address
:param address: address to load from
:return: values read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(2)
# pylint: disable=invalid-name
def st(self, address, value):
"""
Store a single byte value directly to a 16-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
return self._st_data_phase([value & 0xFF])
def st16(self, address, value):
"""
Store a 16-bit word value directly to a 16-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
return self._st_data_phase([value & 0xFF, (value >> 8) & 0xFF])
def st_ptr(self, address):
"""
Set the pointer location
:param address: address to write
"""
self.logger.info("ST to ptr")
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr")
class UpdiDatalink24bit(UpdiDatalink):
"""
UPDI data link layer in 24-bit version
This means that all addresses and pointers contain 3 bytes
"""
def __init__(self):
UpdiDatalink.__init__(self)
self.logger = getLogger(__name__)
# pylint: disable=invalid-name
def ld(self, address):
"""
Load a single byte direct from a 24-bit address
:param address: address to load from
:return: value read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self.updi_phy.receive(1)[0]
def ld16(self, address):
"""
Load a 16-bit word directly from a 24-bit address
:param address: address to load from
:return: values read
"""
self.logger.info("LD from 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self.updi_phy.receive(2)
# pylint: disable=invalid-name
def st(self, address, value):
"""
Store a single byte value directly to a 24-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self._st_data_phase([value & 0xFF])
def st16(self, address, value):
"""
Store a 16-bit word value directly to a 24-bit address
:param address: address to write to
:param value: value to write
"""
self.logger.info("ST to 0x{0:06X}".format(address))
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
return self._st_data_phase([value & 0xFF, (value >> 8) & 0xFF])
def st_ptr(self, address):
"""
Set the pointer location
:param address: address to write
"""
self.logger.info("ST to ptr")
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise PymcuprogError("Error with st_ptr") | 0.765155 | 0.259718 |
import numpy as np
from cloudvolume import CloudVolume
from .cube import Cube
from .error import CVDBError
class CloudVolumeDB:
"""
Wrapper interface for cloudvolume read access to bossDB.
"""
def __init__(self, cv_config=None):
self.cv_config = cv_config
# Main READ interface method
def cutout(
self,
resource,
corner,
extent,
resolution,
time_sample_range=None,
filter_ids=None,
iso=False,
access_mode="cache",
):
"""Extract a cube of arbitrary size. Need not be aligned to cuboid boundaries.
corner represents the location of the cutout and extent the size. As an example in 1D, if asking for
a corner of 3 and extent of 2, this would be the values at 3 and 4.
Args:
resource (spdb.project.BossResource): Data model info based on the request or target resource
corner ((int, int, int)): the xyz location of the corner of the cutout
extent ((int, int, int)): the xyz extents
resolution (int): the resolution level
time_sample_range : ignored
filter_ids (optional[list]): ignored
iso (bool): ignored
access_mode (str): ignored
Returns:
cube.Cube: The cutout data stored in a Cube instance
Raises:
(CVDBError)
"""
channel = resource.get_channel()
out_cube = Cube.create_cube(resource, extent)
# NOTE: Refer to Tim's changes for channel method to check storage type.
if channel.storage_type != "cloudvol":
raise CVDBError(
f"Storage type {channel.storage_type} not configured for cloudvolume.",
701,
)
# NOTE: Refer to Tim's changes for S3 bucket and path.
try:
# Accessing HTTPS version of dataset. This is READ-ONLY and PUBLIC-ONLY, but much faster to download.
vol = CloudVolume(
f"s3://{channel.bucket}/{channel.cv_path}",
mip=resolution,
use_https=True,
fill_missing=True,
)
# Data is downloaded by providing XYZ indicies.
data = vol[
corner[0] : corner[0] + extent[0],
corner[1] : corner[1] + extent[1],
corner[2] : corner[2] + extent[2],
]
# Data returned as cloudvolume VolumeCutout object in XYZT order.
# Here we recast it to numpy array and transpose it to TZYX order.
data = np.array(data.T)
except Exception as e:
raise CVDBError(f"Error downloading cloudvolume data: {e}")
out_cube.set_data(data)
return out_cube
# Main WRITE interface method
def write_cuboid(
self,
resource,
corner,
resolution,
cuboid_data,
time_sample_start=0,
iso=False,
to_black=False,
):
""" Write a 3D/4D volume to the key-value store. Used by API/cache in consistent mode as it reconciles writes
If cuboid_data.ndim == 4, data in time-series format - assume t,z,y,x
If cuboid_data.ndim == 3, data not in time-series format - assume z,y,x
Args:
resource (project.BossResource): Data model info based on the request or target resource
corner ((int, int, int)): the xyz locatiotn of the corner of the cuout
resolution (int): the resolution level
cuboid_data (numpy.ndarray): Matrix of data to write as cuboids
time_sample_start (int): if cuboid_data.ndim == 3, the time sample for the data
if cuboid_data.ndim == 4, the time sample for cuboid_data[0, :, :, :]
iso (bool): Flag indicating if you want to write to the "isotropic" version of a channel, if available
to_black (bool): Flag indicating is this cuboid is a cutout_to_black cuboid.
Returns:
None
"""
raise NotImplementedError | cvdb/cloudvolumedb.py |
import numpy as np
from cloudvolume import CloudVolume
from .cube import Cube
from .error import CVDBError
class CloudVolumeDB:
"""
Wrapper interface for cloudvolume read access to bossDB.
"""
def __init__(self, cv_config=None):
self.cv_config = cv_config
# Main READ interface method
def cutout(
self,
resource,
corner,
extent,
resolution,
time_sample_range=None,
filter_ids=None,
iso=False,
access_mode="cache",
):
"""Extract a cube of arbitrary size. Need not be aligned to cuboid boundaries.
corner represents the location of the cutout and extent the size. As an example in 1D, if asking for
a corner of 3 and extent of 2, this would be the values at 3 and 4.
Args:
resource (spdb.project.BossResource): Data model info based on the request or target resource
corner ((int, int, int)): the xyz location of the corner of the cutout
extent ((int, int, int)): the xyz extents
resolution (int): the resolution level
time_sample_range : ignored
filter_ids (optional[list]): ignored
iso (bool): ignored
access_mode (str): ignored
Returns:
cube.Cube: The cutout data stored in a Cube instance
Raises:
(CVDBError)
"""
channel = resource.get_channel()
out_cube = Cube.create_cube(resource, extent)
# NOTE: Refer to Tim's changes for channel method to check storage type.
if channel.storage_type != "cloudvol":
raise CVDBError(
f"Storage type {channel.storage_type} not configured for cloudvolume.",
701,
)
# NOTE: Refer to Tim's changes for S3 bucket and path.
try:
# Accessing HTTPS version of dataset. This is READ-ONLY and PUBLIC-ONLY, but much faster to download.
vol = CloudVolume(
f"s3://{channel.bucket}/{channel.cv_path}",
mip=resolution,
use_https=True,
fill_missing=True,
)
# Data is downloaded by providing XYZ indicies.
data = vol[
corner[0] : corner[0] + extent[0],
corner[1] : corner[1] + extent[1],
corner[2] : corner[2] + extent[2],
]
# Data returned as cloudvolume VolumeCutout object in XYZT order.
# Here we recast it to numpy array and transpose it to TZYX order.
data = np.array(data.T)
except Exception as e:
raise CVDBError(f"Error downloading cloudvolume data: {e}")
out_cube.set_data(data)
return out_cube
# Main WRITE interface method
def write_cuboid(
self,
resource,
corner,
resolution,
cuboid_data,
time_sample_start=0,
iso=False,
to_black=False,
):
""" Write a 3D/4D volume to the key-value store. Used by API/cache in consistent mode as it reconciles writes
If cuboid_data.ndim == 4, data in time-series format - assume t,z,y,x
If cuboid_data.ndim == 3, data not in time-series format - assume z,y,x
Args:
resource (project.BossResource): Data model info based on the request or target resource
corner ((int, int, int)): the xyz locatiotn of the corner of the cuout
resolution (int): the resolution level
cuboid_data (numpy.ndarray): Matrix of data to write as cuboids
time_sample_start (int): if cuboid_data.ndim == 3, the time sample for the data
if cuboid_data.ndim == 4, the time sample for cuboid_data[0, :, :, :]
iso (bool): Flag indicating if you want to write to the "isotropic" version of a channel, if available
to_black (bool): Flag indicating is this cuboid is a cutout_to_black cuboid.
Returns:
None
"""
raise NotImplementedError | 0.863334 | 0.421373 |
import os
import zipfile
import pathlib
from time import time
from io import BytesIO
import requests
from psycopg2 import sql
from flask import Blueprint, request, jsonify, send_file
from app.config import config
from app.auth_utils import auth_user
from Database.postgres import Postgres_db
from Drive.tools import allowed_file
manage_storage_bp = Blueprint('manage_storage', __name__)
@manage_storage_bp.route('/create_folder', methods=['POST'])
@auth_user(name_func='create_folder')
def create_folder(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
os.makedirs(path, exist_ok=True)
return jsonify(True)
@manage_storage_bp.route('/get_file', methods=['POST'])
@auth_user(name_func='get_file')
def get_file(user):
"""Download a file."""
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
with zipfile.ZipFile(path, 'r') as z:
for filename in z.namelist( ):
return send_file(
BytesIO(z.read(filename)),
attachment_filename=filename,
as_attachment=True
)
@manage_storage_bp.route('/get_files_in_directory', methods=['POST'])
@auth_user(name_func='get_files_in_directory')
def get_files_in_directory(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
vozvrat = []
with os.scandir(path) as listOfEntries:
for entry in listOfEntries:
if entry.is_file():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size,
"type": "file"
})
elif entry.is_dir():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size,
"type": "dir"
})
return jsonify(vozvrat)
@manage_storage_bp.route('/del_object', methods=['DELETE'])
@auth_user(name_func='del_object')
def del_object(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
try:
database = Postgres_db()
except TypeError:
return jsonify({"message": "Нет подключения к БД"})
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
file_size = os.stat(path).st_size
if os.path.isdir(path):
os.removedirs(path)
elif os.path.isfile(path):
os.remove(path)
free_space_kbyte = database.select_data(sql.SQL("""
UPDATE users
SET free_space_kbyte = (
SELECT free_space_kbyte
FROM users
WHERE id={user_id}
) + {file_size}
WHERE id={user_id} RETURNING free_space_kbyte;""").format(
user_id=sql.Literal(user.get_id()),
file_size=sql.Literal(file_size)
))
if type(free_space_kbyte) == list:
return jsonify({
"free_space_kbyte": free_space_kbyte[0][0]
})
else:
return jsonify(free_space_kbyte)
@manage_storage_bp.route('/share', methods=['POST'])
@auth_user(name_func='share_object')
def share_object(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
payload = {
"route": f"/{user.get_username()}{file_path}"
}
if os.path.isdir(path):
payload["type"] = "dir"
elif os.path.isfile(path):
payload["type"] = "file"
r = requests.Request(method='GET', url=f"http://{config['APP']['URL_SERVICE']}/share", params=payload).prepare()
return jsonify(r.url)
@manage_storage_bp.route('/share', methods=['GET'])
@auth_user(name_func='get_share_object')
def get_share_object(user):
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{request.args.get('route')}")
type_obj = request.args.get('type')
if type_obj == "dir" and os.path.isdir(path):
vozvrat = []
with os.scandir(path) as listOfEntries:
for entry in listOfEntries:
if entry.is_file():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size // 1024,
"type": "file"
})
elif entry.is_dir():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size // 1024,
"type": "dir"
})
return vozvrat
elif type_obj == "file" and os.path.isfile(path):
with zipfile.ZipFile(path, 'r') as z:
for filename in z.namelist( ):
return send_file(
BytesIO(z.read(filename)),
attachment_filename=filename,
as_attachment=True
)
return jsonify(False), 404 | backend/Drive/manage_storage.py | import os
import zipfile
import pathlib
from time import time
from io import BytesIO
import requests
from psycopg2 import sql
from flask import Blueprint, request, jsonify, send_file
from app.config import config
from app.auth_utils import auth_user
from Database.postgres import Postgres_db
from Drive.tools import allowed_file
manage_storage_bp = Blueprint('manage_storage', __name__)
@manage_storage_bp.route('/create_folder', methods=['POST'])
@auth_user(name_func='create_folder')
def create_folder(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
os.makedirs(path, exist_ok=True)
return jsonify(True)
@manage_storage_bp.route('/get_file', methods=['POST'])
@auth_user(name_func='get_file')
def get_file(user):
"""Download a file."""
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
with zipfile.ZipFile(path, 'r') as z:
for filename in z.namelist( ):
return send_file(
BytesIO(z.read(filename)),
attachment_filename=filename,
as_attachment=True
)
@manage_storage_bp.route('/get_files_in_directory', methods=['POST'])
@auth_user(name_func='get_files_in_directory')
def get_files_in_directory(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
vozvrat = []
with os.scandir(path) as listOfEntries:
for entry in listOfEntries:
if entry.is_file():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size,
"type": "file"
})
elif entry.is_dir():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size,
"type": "dir"
})
return jsonify(vozvrat)
@manage_storage_bp.route('/del_object', methods=['DELETE'])
@auth_user(name_func='del_object')
def del_object(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
try:
database = Postgres_db()
except TypeError:
return jsonify({"message": "Нет подключения к БД"})
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
file_size = os.stat(path).st_size
if os.path.isdir(path):
os.removedirs(path)
elif os.path.isfile(path):
os.remove(path)
free_space_kbyte = database.select_data(sql.SQL("""
UPDATE users
SET free_space_kbyte = (
SELECT free_space_kbyte
FROM users
WHERE id={user_id}
) + {file_size}
WHERE id={user_id} RETURNING free_space_kbyte;""").format(
user_id=sql.Literal(user.get_id()),
file_size=sql.Literal(file_size)
))
if type(free_space_kbyte) == list:
return jsonify({
"free_space_kbyte": free_space_kbyte[0][0]
})
else:
return jsonify(free_space_kbyte)
@manage_storage_bp.route('/share', methods=['POST'])
@auth_user(name_func='share_object')
def share_object(user):
json = request.get_json(silent=True)
if not json:
return jsonify({"message": "JSON не найден"}), 204
file_path = json.get('file_path')
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{user.get_username()}{file_path}")
payload = {
"route": f"/{user.get_username()}{file_path}"
}
if os.path.isdir(path):
payload["type"] = "dir"
elif os.path.isfile(path):
payload["type"] = "file"
r = requests.Request(method='GET', url=f"http://{config['APP']['URL_SERVICE']}/share", params=payload).prepare()
return jsonify(r.url)
@manage_storage_bp.route('/share', methods=['GET'])
@auth_user(name_func='get_share_object')
def get_share_object(user):
path = os.path.join(f"{config['APP']['PATH_STORAGE']}{request.args.get('route')}")
type_obj = request.args.get('type')
if type_obj == "dir" and os.path.isdir(path):
vozvrat = []
with os.scandir(path) as listOfEntries:
for entry in listOfEntries:
if entry.is_file():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size // 1024,
"type": "file"
})
elif entry.is_dir():
vozvrat.append({
"title": entry.name,
"path": entry.path[len(config['APP']['PATH_STORAGE']) + len(user.get_username()):],
"size": entry.stat(follow_symlinks=False).st_size // 1024,
"type": "dir"
})
return vozvrat
elif type_obj == "file" and os.path.isfile(path):
with zipfile.ZipFile(path, 'r') as z:
for filename in z.namelist( ):
return send_file(
BytesIO(z.read(filename)),
attachment_filename=filename,
as_attachment=True
)
return jsonify(False), 404 | 0.252016 | 0.093306 |
import sys
from instrument_lookup import hex_to_instrument, instrument_to_hex
class MIDIFile:
MTrk = ["4d", "54", "72", "6b"]
MThd = ["4d", "54", "68", "06"]
END_TRACK = ["ff", "2f", "00"]
def __init__(self):
self.hex_array = []
self.meta_events = {}
def read_file(self, input_file_name: str) -> None:
""" Read a midi file and store it into the MIDIFile instance."""
with open(input_file_name, "rb") as input_file:
binary_string = input_file.read()
hex_string = binary_string.hex(" ")
self.hex_array = hex_string.split(" ")
self.read_header()
def read_header(self) -> None:
"""Reads header information into format, ntracks, and tickdiv."""
position = 4
header_string = ""
for hex_byte in self.read_bytes(4, 4):
header_string = header_string + hex_byte
position = 8
header_length = int(header_string, 16)
header_data = self.read_bytes(8, header_length)
self.format = self.htoi("".join(header_data[0:2]))
self.num_tracks = self.htoi("".join(header_data[2:4]))
timing = self.htoi("".join(header_data[4:6]))
# The top bit of a 16-bit number determines the timing format.
if (timing > 32768):
self.timing = "timecode"
timing = timing - 32768
else:
self.timing = "metrical"
self.tickdiv = timing
def hexarray_to_binary(self) -> bytes:
"""Converts an array of HEX values into a binary sting."""
hex_string = ''.join(self.hex_array)
binary_string = bytes.fromhex(hex_string)
return binary_string
def write_file(self, output_file_name: str) -> None:
"""Writes the MIDIFile instance to the output file."""
with open(output_file_name, "wb") as output_file:
output_file.write(self.hexarray_to_binary())
def find_start_track(self, start: int) -> int:
"""Returns the index after the start track and length of the track."""
#print("searching")
for index in range(start, len(self.hex_array) - 4):
if self.read_bytes(index, 4) == self.MTrk:
return index + 7
return -1
def find_end_track(self, start: int) -> int:
""" Returns the index right after an end of track sequence."""
for index in range(start, len(self.hex_array) - 3):
if (self.read_bytes(index, 3) == self.END_TRACK):
return index + 3
return -1
def find_byte_sequence(self, start: int, byte_sequence: list) -> int:
"""Returns the index after the start track and length of the track."""
#print("searching")
for index in range(start, len(self.hex_array) - len(byte_sequence)):
if self.read_bytes(index, len(byte_sequence)) == byte_sequence:
return index
return -1
def list_instruments(self) -> dict:
""" Returns the instrument titles being used in the midi file in a list.
"""
strings = {}
search_index = self.find_end_track(0)
search_index = self.find_start_track(search_index)
while search_index < len(self.hex_array):
if (search_index == -1):
break
if (self.hex_array[search_index][0] == "c"):
channel = self.hex_array[search_index]
instrument_type = self.hex_array[search_index+1]
strings[channel] = hex_to_instrument[instrument_type]
search_index = self.find_start_track(search_index)
else:
search_index = search_index + 1
return strings
def change_instrument(self, channel: str, instrument_name: str) -> None:
"""Changes the instrument for a channel to the specified instrument."""
if (len(instrument_name) != 2):
instrument_name = instrument_to_hex[instrument_name]
search_index = self.find_end_track(0)
search_index = self.find_start_track(search_index)
while search_index < len(self.hex_array):
if (self.hex_array[search_index] == "54"):
search_index = search_index + 7
if (self.hex_array[search_index] == channel):
self.hex_array[search_index+1] = instrument_name
return
search_index = search_index + 1
def read_bytes(self, start_position: int, number_of_bytes: int) -> list:
"""Read a certain number of bytes from the hex_array starting at
start position."""
output = []
end_position = start_position + number_of_bytes
for i in range(start_position, end_position):
output.append(self.hex_array[i])
return output
def htoi(self, hex_string: str) -> int:
"""Converts a hex_string to an integer."""
return int(hex_string, 16)
def read_meta_events(self):
"""Reads all meta events into a dictionary."""
for type_byte in MetaEvent.TEXT_EVENTS:
event = MetaEvent(type_byte)
if (event.read_event(self)):
self.meta_events[type_byte] = event.data
for type_byte in MetaEvent.NUMERIC_EVENTS:
event = MetaEvent(type_byte)
if (event.read_event(self)):
self.meta_events[type_byte] = event.data
class Event:
"""This is a representation of a single event that can be found in a MIDI
file."""
def __init__(self, start_byte: str) -> None:
self.start_byte = start_byte
def htoi(self, hex_string: str) -> int:
"""Converts a hex_string to an integer."""
return int(hex_string, 16)
def hex_to_char(self, hex_string: str) -> str:
"""Converts a hex_string to an Unicode charater (string)."""
return chr(self.htoi(hex_string))
class MIDIEvent(Event):
"""A single MIDI Event."""
def __init__(self, start_byte: str) -> None:
super().__init__(start_byte)
def parse_delta_time(self, start):
"""Reads the delta time of an event."""
class SysExEvent(Event):
"""A single system exclusive Event."""
def __init__(self, start_byte: str) -> None:
super().__init__(start_byte)
class MetaEvent(Event):
"""A single Meta Event."""
#Meta events are of the form ff type length data
TEXT_EVENTS = ["01", "02", "03", "04", "05", "06", "07", "08", "09"]
NUMERIC_EVENTS = ["00", "20", "21", "51", "54", "58", "59"]
# Do not currently support Sequence Specific Event
# Not including End of Track sequence
def __init__(self, type_byte: str) -> None:
super().__init__("ff")
self.type = type_byte
def read_event(self, midi: MIDIFile) -> bool:
"""Reads the meta-event from MIDIFile midi."""
search = [self.start_byte, self.type]
start = midi.find_start_track(0)
position = start
index = midi.find_byte_sequence(start, search)
if index == -1:
return False
length = self.htoi(midi.hex_array[index + 2])
data = midi.read_bytes(index + 3, length)
if (self.type in self.TEXT_EVENTS):
return self.read_event_text(data)
else:
return self.read_event_numeric(data)
def read_event_text(self, data: list) -> bool:
"""Reads a text meta-event from MIDIFile midi."""
for i in range(len(data)):
data[i] = self.hex_to_char(data[i])
self.data = "".join(data)
return True
def read_event_numeric(self, data: list) -> bool:
"""Reads a numeric meta-event from MIDIFile midi."""
if (len(data) == 1):
self.data = self.htoi(data[0])
elif (self.type == "59"):
self.data = [self.htoi(data[0]), self.htoi(data[1])]
elif ((len(data) == 2) or (len(data) == 3)):
hex_string = ""
for i in data:
hex_string = hex_string + i
self.data = self.htoi(hex_string)
elif ((len(data) == 4) or (len(data) == 5)):
for i in range(len(data)):
data[i] = self.htoi(data[i])
self.data = data
else:
return False
return True
if __name__ == "__main__":
if (len(sys.argv) > 1):
if (sys.argv[1] == "inst"):
a = MIDIFile()
a.read_file(sys.argv[2])
#print(a.list_instruments())
print(a.list_instruments())
print(a.format, a.num_tracks, a.timing, a.tickdiv)
else:
a = MIDIFile()
a.read_file("mary.mid")
print(a.read_bytes(0,6))
print(a.read_bytes(0,5))
#print(a.format, a.num_tracks, a.timing, a.tickdiv) | pymiditools.py | import sys
from instrument_lookup import hex_to_instrument, instrument_to_hex
class MIDIFile:
MTrk = ["4d", "54", "72", "6b"]
MThd = ["4d", "54", "68", "06"]
END_TRACK = ["ff", "2f", "00"]
def __init__(self):
self.hex_array = []
self.meta_events = {}
def read_file(self, input_file_name: str) -> None:
""" Read a midi file and store it into the MIDIFile instance."""
with open(input_file_name, "rb") as input_file:
binary_string = input_file.read()
hex_string = binary_string.hex(" ")
self.hex_array = hex_string.split(" ")
self.read_header()
def read_header(self) -> None:
"""Reads header information into format, ntracks, and tickdiv."""
position = 4
header_string = ""
for hex_byte in self.read_bytes(4, 4):
header_string = header_string + hex_byte
position = 8
header_length = int(header_string, 16)
header_data = self.read_bytes(8, header_length)
self.format = self.htoi("".join(header_data[0:2]))
self.num_tracks = self.htoi("".join(header_data[2:4]))
timing = self.htoi("".join(header_data[4:6]))
# The top bit of a 16-bit number determines the timing format.
if (timing > 32768):
self.timing = "timecode"
timing = timing - 32768
else:
self.timing = "metrical"
self.tickdiv = timing
def hexarray_to_binary(self) -> bytes:
"""Converts an array of HEX values into a binary sting."""
hex_string = ''.join(self.hex_array)
binary_string = bytes.fromhex(hex_string)
return binary_string
def write_file(self, output_file_name: str) -> None:
"""Writes the MIDIFile instance to the output file."""
with open(output_file_name, "wb") as output_file:
output_file.write(self.hexarray_to_binary())
def find_start_track(self, start: int) -> int:
"""Returns the index after the start track and length of the track."""
#print("searching")
for index in range(start, len(self.hex_array) - 4):
if self.read_bytes(index, 4) == self.MTrk:
return index + 7
return -1
def find_end_track(self, start: int) -> int:
""" Returns the index right after an end of track sequence."""
for index in range(start, len(self.hex_array) - 3):
if (self.read_bytes(index, 3) == self.END_TRACK):
return index + 3
return -1
def find_byte_sequence(self, start: int, byte_sequence: list) -> int:
"""Returns the index after the start track and length of the track."""
#print("searching")
for index in range(start, len(self.hex_array) - len(byte_sequence)):
if self.read_bytes(index, len(byte_sequence)) == byte_sequence:
return index
return -1
def list_instruments(self) -> dict:
""" Returns the instrument titles being used in the midi file in a list.
"""
strings = {}
search_index = self.find_end_track(0)
search_index = self.find_start_track(search_index)
while search_index < len(self.hex_array):
if (search_index == -1):
break
if (self.hex_array[search_index][0] == "c"):
channel = self.hex_array[search_index]
instrument_type = self.hex_array[search_index+1]
strings[channel] = hex_to_instrument[instrument_type]
search_index = self.find_start_track(search_index)
else:
search_index = search_index + 1
return strings
def change_instrument(self, channel: str, instrument_name: str) -> None:
"""Changes the instrument for a channel to the specified instrument."""
if (len(instrument_name) != 2):
instrument_name = instrument_to_hex[instrument_name]
search_index = self.find_end_track(0)
search_index = self.find_start_track(search_index)
while search_index < len(self.hex_array):
if (self.hex_array[search_index] == "54"):
search_index = search_index + 7
if (self.hex_array[search_index] == channel):
self.hex_array[search_index+1] = instrument_name
return
search_index = search_index + 1
def read_bytes(self, start_position: int, number_of_bytes: int) -> list:
"""Read a certain number of bytes from the hex_array starting at
start position."""
output = []
end_position = start_position + number_of_bytes
for i in range(start_position, end_position):
output.append(self.hex_array[i])
return output
def htoi(self, hex_string: str) -> int:
"""Converts a hex_string to an integer."""
return int(hex_string, 16)
def read_meta_events(self):
"""Reads all meta events into a dictionary."""
for type_byte in MetaEvent.TEXT_EVENTS:
event = MetaEvent(type_byte)
if (event.read_event(self)):
self.meta_events[type_byte] = event.data
for type_byte in MetaEvent.NUMERIC_EVENTS:
event = MetaEvent(type_byte)
if (event.read_event(self)):
self.meta_events[type_byte] = event.data
class Event:
"""This is a representation of a single event that can be found in a MIDI
file."""
def __init__(self, start_byte: str) -> None:
self.start_byte = start_byte
def htoi(self, hex_string: str) -> int:
"""Converts a hex_string to an integer."""
return int(hex_string, 16)
def hex_to_char(self, hex_string: str) -> str:
"""Converts a hex_string to an Unicode charater (string)."""
return chr(self.htoi(hex_string))
class MIDIEvent(Event):
"""A single MIDI Event."""
def __init__(self, start_byte: str) -> None:
super().__init__(start_byte)
def parse_delta_time(self, start):
"""Reads the delta time of an event."""
class SysExEvent(Event):
"""A single system exclusive Event."""
def __init__(self, start_byte: str) -> None:
super().__init__(start_byte)
class MetaEvent(Event):
"""A single Meta Event."""
#Meta events are of the form ff type length data
TEXT_EVENTS = ["01", "02", "03", "04", "05", "06", "07", "08", "09"]
NUMERIC_EVENTS = ["00", "20", "21", "51", "54", "58", "59"]
# Do not currently support Sequence Specific Event
# Not including End of Track sequence
def __init__(self, type_byte: str) -> None:
super().__init__("ff")
self.type = type_byte
def read_event(self, midi: MIDIFile) -> bool:
"""Reads the meta-event from MIDIFile midi."""
search = [self.start_byte, self.type]
start = midi.find_start_track(0)
position = start
index = midi.find_byte_sequence(start, search)
if index == -1:
return False
length = self.htoi(midi.hex_array[index + 2])
data = midi.read_bytes(index + 3, length)
if (self.type in self.TEXT_EVENTS):
return self.read_event_text(data)
else:
return self.read_event_numeric(data)
def read_event_text(self, data: list) -> bool:
"""Reads a text meta-event from MIDIFile midi."""
for i in range(len(data)):
data[i] = self.hex_to_char(data[i])
self.data = "".join(data)
return True
def read_event_numeric(self, data: list) -> bool:
"""Reads a numeric meta-event from MIDIFile midi."""
if (len(data) == 1):
self.data = self.htoi(data[0])
elif (self.type == "59"):
self.data = [self.htoi(data[0]), self.htoi(data[1])]
elif ((len(data) == 2) or (len(data) == 3)):
hex_string = ""
for i in data:
hex_string = hex_string + i
self.data = self.htoi(hex_string)
elif ((len(data) == 4) or (len(data) == 5)):
for i in range(len(data)):
data[i] = self.htoi(data[i])
self.data = data
else:
return False
return True
if __name__ == "__main__":
if (len(sys.argv) > 1):
if (sys.argv[1] == "inst"):
a = MIDIFile()
a.read_file(sys.argv[2])
#print(a.list_instruments())
print(a.list_instruments())
print(a.format, a.num_tracks, a.timing, a.tickdiv)
else:
a = MIDIFile()
a.read_file("mary.mid")
print(a.read_bytes(0,6))
print(a.read_bytes(0,5))
#print(a.format, a.num_tracks, a.timing, a.tickdiv) | 0.434221 | 0.452475 |
import datetime
import json
import os
from flask import jsonify
from flask import request
from flask.views import MethodView
from common.config import DOC_DIR, DOC_TEMPLATE_DIR
from common.constant import EFFECT_TIME_NOW
class ResignDirectorHandler(MethodView):
methods = ['GET', 'POST']
def post(self):
if request.data:
data_json = json.loads(request.data)
else:
data_json = json.loads(request.form.to_dict().keys()[0])
from docxtpl import DocxTemplate
tpl = DocxTemplate('%s/resign_tpl.docx' % DOC_TEMPLATE_DIR)
# 一些公共的参数
effect_time = data_json['items'][0]['effect_time']
date = data_json['items'][0]['date']
time_format = datetime.datetime.strptime(date, '%Y-%m-%d')
date = time_format.strftime('%d %B %Y')
company = data_json['items'][0]['company']
notice_type_dic = {
1: u'董事辞任',
2: u'监事辞任',
3: u'委任董事',
4: u'委任监事',
5: u'委任职工董事',
6: u'委任职工监事',
7: u'变更董事',
8: u'变更监事',
}
job_dic = {
1: "Non-Executive Director",
2: "Supervisory Committee",
3: "Supervisory Committee",
4: "Supervisory Committee",
5: "Supervisory Committee",
6: "Supervisory Committee",
}
single_person_flag = True if len(data_json['items']) == 1 else False
# 构造标题
title = job_dic[data_json['items'][0]['job']] if single_person_flag else "Directors"
# 构造第一段
first_a = ""
for item in data_json['items']:
sex_he = 'he' if item['sex'] == 1 else 'she'
sex_his = 'his' if item['sex'] == 1 else 'her'
sex_Mr = 'Mr. %s' % item['lastname'] if item['sex'] == 1 else 'Ms. %s' % item['lastname']
sex_Mr_long = 'Mr. %s %s' % (item['lastname'], item['firstname']) if item['sex'] == 1 else 'Ms. %s%s' % (
item['lastname'], item['firstname'])
first_a += u"a resignation letter from %s (“%s”), informing the Board of %s resignation from the position" \
u" as the %s of the Company due to %s " % \
(sex_Mr_long, sex_Mr, sex_his, item['position'], item['reason'])
first_a += ', and '
first_a = first_a[0:-6]
if effect_time == EFFECT_TIME_NOW:
first_b = "The resignation takes effect immediately"
else:
_t = u'、'.join([job_dic[v["job"]] for v in data_json["items"]])
first_b = u"The resignation will take effect upon the election of the new %s of the Company." % _t
# 构造第二段
if len(data_json['items']) == 1:
item = data_json["items"][0]
sex_he = 'he' if item['sex'] == 1 else 'she'
sex_his = 'his' if item['sex'] == 1 else 'her'
sex_Mr = 'Mr.' + item['lastname'] if item['sex'] == 1 else 'Ms.' + item['lastname']
second_a = "%s " % (sex_Mr)
second_b = "%s has" % (sex_he)
third_a = "%s for %s" % (sex_Mr, sex_his)
third_b = "%s" % (sex_his)
else:
tmp_str = ""
for item in data_json['items']:
sex_Mr = 'Mr. ' + item['lastname'] if item['sex'] == 1 else 'Ms. ' + item['lastname']
tmp_str += "%s and " % sex_Mr
tmp_str = tmp_str[0:-4]
second_a = "Each of %s " % (tmp_str)
second_b = "they have"
third_a = "their"
third_b = "their"
context = data_json
context['in_europe'] = True
context['is_paid'] = False
context['title'] = title
context['date'] = date
context['company'] = company
context['first_a'] = first_a
context['first_b'] = first_b
context['second_a'] = second_a
context['second_b'] = second_b
context['third_a'] = third_a
context['third_b'] = third_b
tpl.render(context)
if not os.path.exists(DOC_DIR):
os.mkdir(DOC_DIR)
# 保存文件
now = datetime.datetime.now().strftime("%Y%m%d:%H%M%S")
new_filename = 'resign_director%s.docx' % now
new_filepath = '%s/resign_director%s.docx' % (DOC_DIR, now)
tpl.save(new_filepath)
return jsonify({"msg": "ok", "code": 0, "url": "/download/%s" % new_filename}) | handlers/resign_director_handler.py | import datetime
import json
import os
from flask import jsonify
from flask import request
from flask.views import MethodView
from common.config import DOC_DIR, DOC_TEMPLATE_DIR
from common.constant import EFFECT_TIME_NOW
class ResignDirectorHandler(MethodView):
methods = ['GET', 'POST']
def post(self):
if request.data:
data_json = json.loads(request.data)
else:
data_json = json.loads(request.form.to_dict().keys()[0])
from docxtpl import DocxTemplate
tpl = DocxTemplate('%s/resign_tpl.docx' % DOC_TEMPLATE_DIR)
# 一些公共的参数
effect_time = data_json['items'][0]['effect_time']
date = data_json['items'][0]['date']
time_format = datetime.datetime.strptime(date, '%Y-%m-%d')
date = time_format.strftime('%d %B %Y')
company = data_json['items'][0]['company']
notice_type_dic = {
1: u'董事辞任',
2: u'监事辞任',
3: u'委任董事',
4: u'委任监事',
5: u'委任职工董事',
6: u'委任职工监事',
7: u'变更董事',
8: u'变更监事',
}
job_dic = {
1: "Non-Executive Director",
2: "Supervisory Committee",
3: "Supervisory Committee",
4: "Supervisory Committee",
5: "Supervisory Committee",
6: "Supervisory Committee",
}
single_person_flag = True if len(data_json['items']) == 1 else False
# 构造标题
title = job_dic[data_json['items'][0]['job']] if single_person_flag else "Directors"
# 构造第一段
first_a = ""
for item in data_json['items']:
sex_he = 'he' if item['sex'] == 1 else 'she'
sex_his = 'his' if item['sex'] == 1 else 'her'
sex_Mr = 'Mr. %s' % item['lastname'] if item['sex'] == 1 else 'Ms. %s' % item['lastname']
sex_Mr_long = 'Mr. %s %s' % (item['lastname'], item['firstname']) if item['sex'] == 1 else 'Ms. %s%s' % (
item['lastname'], item['firstname'])
first_a += u"a resignation letter from %s (“%s”), informing the Board of %s resignation from the position" \
u" as the %s of the Company due to %s " % \
(sex_Mr_long, sex_Mr, sex_his, item['position'], item['reason'])
first_a += ', and '
first_a = first_a[0:-6]
if effect_time == EFFECT_TIME_NOW:
first_b = "The resignation takes effect immediately"
else:
_t = u'、'.join([job_dic[v["job"]] for v in data_json["items"]])
first_b = u"The resignation will take effect upon the election of the new %s of the Company." % _t
# 构造第二段
if len(data_json['items']) == 1:
item = data_json["items"][0]
sex_he = 'he' if item['sex'] == 1 else 'she'
sex_his = 'his' if item['sex'] == 1 else 'her'
sex_Mr = 'Mr.' + item['lastname'] if item['sex'] == 1 else 'Ms.' + item['lastname']
second_a = "%s " % (sex_Mr)
second_b = "%s has" % (sex_he)
third_a = "%s for %s" % (sex_Mr, sex_his)
third_b = "%s" % (sex_his)
else:
tmp_str = ""
for item in data_json['items']:
sex_Mr = 'Mr. ' + item['lastname'] if item['sex'] == 1 else 'Ms. ' + item['lastname']
tmp_str += "%s and " % sex_Mr
tmp_str = tmp_str[0:-4]
second_a = "Each of %s " % (tmp_str)
second_b = "they have"
third_a = "their"
third_b = "their"
context = data_json
context['in_europe'] = True
context['is_paid'] = False
context['title'] = title
context['date'] = date
context['company'] = company
context['first_a'] = first_a
context['first_b'] = first_b
context['second_a'] = second_a
context['second_b'] = second_b
context['third_a'] = third_a
context['third_b'] = third_b
tpl.render(context)
if not os.path.exists(DOC_DIR):
os.mkdir(DOC_DIR)
# 保存文件
now = datetime.datetime.now().strftime("%Y%m%d:%H%M%S")
new_filename = 'resign_director%s.docx' % now
new_filepath = '%s/resign_director%s.docx' % (DOC_DIR, now)
tpl.save(new_filepath)
return jsonify({"msg": "ok", "code": 0, "url": "/download/%s" % new_filename}) | 0.139543 | 0.160496 |
import json
import argparse
import string
def get_parser():
# Get parser for command line arguments.
parser = argparse.ArgumentParser(description="Twitter Downloader")
parser.add_argument("-fn",
"--fname",
dest="fname")
parser.add_argument("-d",
"--data-dir",
dest="data_dir",
help="Output/Data Directory")
return parser
def full_version_json(source_file, data_dir):
# Full version of pretty json
with open('data/stream_movie.json', 'r') as f:
content = f.readlines() # read only the first tweet/line
for tweet in content:
t = json.loads(tweet)
outfile = "%s/%s_pretty.json" % (data_dir, source_file)
with open(outfile, 'a') as ff:
ff.write(json.dumps(t,indent=4)) # pretty-print
ff.write('\n')
def required_version_json(source_file, data_dir):
# Required fields version of json
with open('data/stream_movie.json', 'r') as f:
content = f.readlines() # read only the first tweet/line
for tweet in content:
t = json.loads(tweet)
new_t = {}
for k, v in t.items():
if k == "user": new_t.update({k:v})
if k == "id": new_t.update({k:v})
if k == "lang": new_t.update({k:v})
if k == "text": new_t.update({k:v})
if k == "created_at": new_t.update({k:v})
if k == "favorite_count": new_t.update({k:v})
if k == "retweet_count": new_t.update({k:v})
if k == "favorited": new_t.update({k:v})
if k == "retweeted": new_t.update({k:v})
outfile = "%s/%s_pretty_required.json" % (data_dir, source_file)
with open(outfile, 'a') as ff:
ff.write(json.dumps(new_t,sort_keys=True,indent=4)) # pretty-print
ff.write('\n')
# created_at: the date of creation
# favorite_count, retweet_count: the number of favourites and retweets
# favorited, retweeted: boolean stating whether the authenticated user (you) have favourited or retweeted this tweet
# lang: acronym for the language (e.g. “en” for english)
# id: the tweet identifier
# place, coordinates, geo: geo-location information if available
# user: the author’s full profile
# entities: list of entities like URLs, @-mentions, hashtags and symbols
# in_reply_to_user_id: user identifier if the tweet is a reply to a specific user
# in_reply_to_status_id: status identifier id the tweet is a reply to a specific status
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
full_version_json(args.fname, args.data_dir)
required_version_json(args.fname, args.data_dir) | pretty_required_json.py |
import json
import argparse
import string
def get_parser():
# Get parser for command line arguments.
parser = argparse.ArgumentParser(description="Twitter Downloader")
parser.add_argument("-fn",
"--fname",
dest="fname")
parser.add_argument("-d",
"--data-dir",
dest="data_dir",
help="Output/Data Directory")
return parser
def full_version_json(source_file, data_dir):
# Full version of pretty json
with open('data/stream_movie.json', 'r') as f:
content = f.readlines() # read only the first tweet/line
for tweet in content:
t = json.loads(tweet)
outfile = "%s/%s_pretty.json" % (data_dir, source_file)
with open(outfile, 'a') as ff:
ff.write(json.dumps(t,indent=4)) # pretty-print
ff.write('\n')
def required_version_json(source_file, data_dir):
# Required fields version of json
with open('data/stream_movie.json', 'r') as f:
content = f.readlines() # read only the first tweet/line
for tweet in content:
t = json.loads(tweet)
new_t = {}
for k, v in t.items():
if k == "user": new_t.update({k:v})
if k == "id": new_t.update({k:v})
if k == "lang": new_t.update({k:v})
if k == "text": new_t.update({k:v})
if k == "created_at": new_t.update({k:v})
if k == "favorite_count": new_t.update({k:v})
if k == "retweet_count": new_t.update({k:v})
if k == "favorited": new_t.update({k:v})
if k == "retweeted": new_t.update({k:v})
outfile = "%s/%s_pretty_required.json" % (data_dir, source_file)
with open(outfile, 'a') as ff:
ff.write(json.dumps(new_t,sort_keys=True,indent=4)) # pretty-print
ff.write('\n')
# created_at: the date of creation
# favorite_count, retweet_count: the number of favourites and retweets
# favorited, retweeted: boolean stating whether the authenticated user (you) have favourited or retweeted this tweet
# lang: acronym for the language (e.g. “en” for english)
# id: the tweet identifier
# place, coordinates, geo: geo-location information if available
# user: the author’s full profile
# entities: list of entities like URLs, @-mentions, hashtags and symbols
# in_reply_to_user_id: user identifier if the tweet is a reply to a specific user
# in_reply_to_status_id: status identifier id the tweet is a reply to a specific status
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
full_version_json(args.fname, args.data_dir)
required_version_json(args.fname, args.data_dir) | 0.35488 | 0.134747 |
from __future__ import absolute_import, print_function
import uuid
from invenio_pidstore.models import PersistentIdentifier
from invenio_records_rest.schemas import Nested, StrictKeysMixin
from invenio_records_rest.schemas.fields import DateString, GenFunction, \
SanitizedHTML, SanitizedUnicode
from marshmallow import ValidationError, fields, missing, validate
from invenio_communities.api import Community
def pid_from_context_or_rec(data_value, context, **kwargs):
"""Get PID from marshmallow context."""
pid = (context or {}).get('pid')
pid_value = getattr(pid, 'pid_value', None) or data_value
if not pid_value:
raise ValidationError('Missing data for required field.')
else:
if not pid: # check that the ID is not already taken
if PersistentIdentifier.query.filter_by(
pid_type='comid', pid_value=pid_value).one_or_none():
raise ValidationError(
'ID "{}" is already assigned to a community.'.format(pid_value))
return pid_value
def load_creator(_, context):
"""Load the record creator."""
old_data = context.get('record')
if old_data:
return old_data.get('created_by', missing)
# TODO a validation error must be raised in each case
return context.get('user_id', missing)
def serialize_creator(record, context):
"""Load the record creator."""
return record.get('created_by', missing)
class CommunitySchemaMetadataV1(StrictKeysMixin):
"""Community metadata schema."""
schema_ = fields.Str(attribute="$schema", dump_to="$schema")
id = GenFunction(
deserialize=pid_from_context_or_rec,
serialize=pid_from_context_or_rec # to be added only when loading
)
title = SanitizedUnicode(required=True)
description = SanitizedHTML()
curation_policy = SanitizedHTML()
page = SanitizedHTML()
type = fields.Str(required=True, validate=validate.OneOf([
'organization',
'event',
'topic',
'project',
]))
alternate_identifiers = fields.List(fields.Raw())
website = fields.Url()
funding = fields.List(fields.String())
domain = fields.List(fields.String())
verified = fields.Boolean()
visibility = fields.Str(validate=validate.OneOf([
'public',
'private',
'hidden',
]))
member_policy = fields.Str(validate=validate.OneOf([
'open',
'closed',
]))
record_policy = fields.Str(validate=validate.OneOf([
'open',
'closed',
'restricted',
]))
archived = fields.Boolean()
created_by = GenFunction(
deserialize=load_creator,
serialize=serialize_creator
)
class CommunitySchemaV1(StrictKeysMixin):
"""Schema for the community metadata."""
created = fields.Str(dump_only=True)
revision = fields.Integer(dump_only=True)
updated = fields.Str(dump_only=True)
links = fields.Raw(dump_only=True)
metadata = fields.Nested(CommunitySchemaMetadataV1) | invenio_communities/marshmallow/json.py | from __future__ import absolute_import, print_function
import uuid
from invenio_pidstore.models import PersistentIdentifier
from invenio_records_rest.schemas import Nested, StrictKeysMixin
from invenio_records_rest.schemas.fields import DateString, GenFunction, \
SanitizedHTML, SanitizedUnicode
from marshmallow import ValidationError, fields, missing, validate
from invenio_communities.api import Community
def pid_from_context_or_rec(data_value, context, **kwargs):
"""Get PID from marshmallow context."""
pid = (context or {}).get('pid')
pid_value = getattr(pid, 'pid_value', None) or data_value
if not pid_value:
raise ValidationError('Missing data for required field.')
else:
if not pid: # check that the ID is not already taken
if PersistentIdentifier.query.filter_by(
pid_type='comid', pid_value=pid_value).one_or_none():
raise ValidationError(
'ID "{}" is already assigned to a community.'.format(pid_value))
return pid_value
def load_creator(_, context):
"""Load the record creator."""
old_data = context.get('record')
if old_data:
return old_data.get('created_by', missing)
# TODO a validation error must be raised in each case
return context.get('user_id', missing)
def serialize_creator(record, context):
"""Load the record creator."""
return record.get('created_by', missing)
class CommunitySchemaMetadataV1(StrictKeysMixin):
"""Community metadata schema."""
schema_ = fields.Str(attribute="$schema", dump_to="$schema")
id = GenFunction(
deserialize=pid_from_context_or_rec,
serialize=pid_from_context_or_rec # to be added only when loading
)
title = SanitizedUnicode(required=True)
description = SanitizedHTML()
curation_policy = SanitizedHTML()
page = SanitizedHTML()
type = fields.Str(required=True, validate=validate.OneOf([
'organization',
'event',
'topic',
'project',
]))
alternate_identifiers = fields.List(fields.Raw())
website = fields.Url()
funding = fields.List(fields.String())
domain = fields.List(fields.String())
verified = fields.Boolean()
visibility = fields.Str(validate=validate.OneOf([
'public',
'private',
'hidden',
]))
member_policy = fields.Str(validate=validate.OneOf([
'open',
'closed',
]))
record_policy = fields.Str(validate=validate.OneOf([
'open',
'closed',
'restricted',
]))
archived = fields.Boolean()
created_by = GenFunction(
deserialize=load_creator,
serialize=serialize_creator
)
class CommunitySchemaV1(StrictKeysMixin):
"""Schema for the community metadata."""
created = fields.Str(dump_only=True)
revision = fields.Integer(dump_only=True)
updated = fields.Str(dump_only=True)
links = fields.Raw(dump_only=True)
metadata = fields.Nested(CommunitySchemaMetadataV1) | 0.505127 | 0.17522 |
import asyncio
def merge_nodes(a, b):
"""Recursively and non-destructively merges two nodes. Returns the newly
created node.
"""
if a is None:
return b
if b is None:
return a
if a[0] > b[0]:
a, b = b, a
return a[0], merge_nodes(b, a[2]), a[1]
def pop_node(root):
"""Removes the top element from the root of the tree. Returns the element
and the merged subtrees.
"""
item, left, right = root
return item, merge_nodes(left, right)
def explain_node_str(root, indent=0):
"""Returns an indendeted outline-style representation of the subtree.
"""
indent_string = " " * indent
buf = f"{indent_string}Node<item={root[0]}>"
if not root[1] and not root[2]:
buf += "\n"
else:
buf += ":\n"
if root[1]:
buf += f"{indent_string} -Left:\n"
buf += explain_node_str(root[1], indent + 1)
if root[2]:
buf += f"{indent_string} -Right:\n"
buf += explain_node_str(root[2], indent + 1)
return buf
class SkewHeap:
"""A skew heap is a min heap or priority queue which ammortizes the cost of
rebalancing using an elegant merge algorithm. All operations on a skew heap
are defined in terms of the merge algorithm.
An interesting side effect of this is that skew heaps can be quickly and
easily merged non-destructively.
Items added to the heap will be returned in order from lowest to highest.
To control the ordering, implement __gt__ on the class of the items being
inserted.
"""
def __init__(self):
self.size = 0
self.root = None
def __repr__(self):
buf = f"SkewHeap<size={self.size}>:\n"
if self.root is None:
buf += " (Empty)"
else:
buf += explain_node_str(self.root, 1)
return buf
def __str__(self):
return self.__repr__()
@classmethod
def merge(cls, *heaps):
"""Non-destructively merges *heaps into a single, new heap. Returns the
new heap.
newheap = SkewHeap.merge(a, b, c, ...)
"""
c = SkewHeap()
for h in heaps:
c.size += h.size
c.root = merge_nodes(c.root, h.root)
return c
@property
def is_empty(self):
"""Returns True if there are no elements in the heap.
"""
return self.size == 0
def put(self, *args):
"""Adds one or more new elements to the heap. Returns the heap's new
size.
"""
for item in args:
self.root = merge_nodes(self.root, [item, None, None])
self.size = self.size + 1
return self.size
def take(self):
"""Removes and returns the top element from the heap. Returns None
if the heap is empty.
"""
if self.is_empty:
return None
self.size = self.size - 1
item, self.root = pop_node(self.root)
return item
def peek(self):
"""Returns the top element from the heap without removing it. Returns
None if the heap is empty.
"""
if self.is_empty:
return None
return self.root[0]
def adopt(self, *heaps):
"""Merges the elements from additional heaps into this one. The other
heaps are left intact.
"""
for h in heaps:
self.size += h.size
self.root = merge_nodes(self.root, h.root)
return self.size
def items(self):
"""Returns a generator of elements in the heap.
"""
while not self.is_empty:
yield self.take()
def drain(self):
"""Removes and returns all elements from the heap as a list.
"""
items = []
while not self.is_empty:
items.append(self.take())
return items
class AsyncSkewHeap:
"""A SkewHeap whose contents can be accessed asynchronously. Calls
to take() will block until an element is available.
"""
def __init__(self):
super().__init__()
self.heap = SkewHeap()
self.ev = asyncio.Event()
self.sem = asyncio.Semaphore(0)
@property
def is_empty(self):
"""True when the heap is empty."""
return self.heap.is_empty
@property
def is_shutdown(self):
"""True once the heap has been shutdown with shutdown()."""
return self.ev.is_set()
def shutdown(self):
"""Shutting down the heap will awaken all pending calls to take(),
returning None to them. Future callers to take() will receive immediate
results. Items may still be added to the heap, but it will no longer
block when calling take().
"""
self.ev.set()
async def join(self):
"""Blocks until the queue has been shut down."""
if not self.is_shutdown:
await self.ev.wait()
async def take(self):
"""Returns the next item in the queue, blocking until one is available
if necessary.
"""
if self.is_shutdown:
return self.heap.take()
async with self.sem:
return self.heap.take()
def put(self, *args):
"""Adds any number of items to the queue."""
for item in args:
self.heap.put(item)
if not self.is_shutdown:
self.sem.release()
def adopt(self, *heaps):
"""Merges other heaps into this one. The other heaps are left intact.
"""
prev_size = self.heap.size
self.heap.adopt(*heaps)
for _ in range(0, self.heap.size - prev_size):
self.sem.release() | skewheap/__init__.py | import asyncio
def merge_nodes(a, b):
"""Recursively and non-destructively merges two nodes. Returns the newly
created node.
"""
if a is None:
return b
if b is None:
return a
if a[0] > b[0]:
a, b = b, a
return a[0], merge_nodes(b, a[2]), a[1]
def pop_node(root):
"""Removes the top element from the root of the tree. Returns the element
and the merged subtrees.
"""
item, left, right = root
return item, merge_nodes(left, right)
def explain_node_str(root, indent=0):
"""Returns an indendeted outline-style representation of the subtree.
"""
indent_string = " " * indent
buf = f"{indent_string}Node<item={root[0]}>"
if not root[1] and not root[2]:
buf += "\n"
else:
buf += ":\n"
if root[1]:
buf += f"{indent_string} -Left:\n"
buf += explain_node_str(root[1], indent + 1)
if root[2]:
buf += f"{indent_string} -Right:\n"
buf += explain_node_str(root[2], indent + 1)
return buf
class SkewHeap:
"""A skew heap is a min heap or priority queue which ammortizes the cost of
rebalancing using an elegant merge algorithm. All operations on a skew heap
are defined in terms of the merge algorithm.
An interesting side effect of this is that skew heaps can be quickly and
easily merged non-destructively.
Items added to the heap will be returned in order from lowest to highest.
To control the ordering, implement __gt__ on the class of the items being
inserted.
"""
def __init__(self):
self.size = 0
self.root = None
def __repr__(self):
buf = f"SkewHeap<size={self.size}>:\n"
if self.root is None:
buf += " (Empty)"
else:
buf += explain_node_str(self.root, 1)
return buf
def __str__(self):
return self.__repr__()
@classmethod
def merge(cls, *heaps):
"""Non-destructively merges *heaps into a single, new heap. Returns the
new heap.
newheap = SkewHeap.merge(a, b, c, ...)
"""
c = SkewHeap()
for h in heaps:
c.size += h.size
c.root = merge_nodes(c.root, h.root)
return c
@property
def is_empty(self):
"""Returns True if there are no elements in the heap.
"""
return self.size == 0
def put(self, *args):
"""Adds one or more new elements to the heap. Returns the heap's new
size.
"""
for item in args:
self.root = merge_nodes(self.root, [item, None, None])
self.size = self.size + 1
return self.size
def take(self):
"""Removes and returns the top element from the heap. Returns None
if the heap is empty.
"""
if self.is_empty:
return None
self.size = self.size - 1
item, self.root = pop_node(self.root)
return item
def peek(self):
"""Returns the top element from the heap without removing it. Returns
None if the heap is empty.
"""
if self.is_empty:
return None
return self.root[0]
def adopt(self, *heaps):
"""Merges the elements from additional heaps into this one. The other
heaps are left intact.
"""
for h in heaps:
self.size += h.size
self.root = merge_nodes(self.root, h.root)
return self.size
def items(self):
"""Returns a generator of elements in the heap.
"""
while not self.is_empty:
yield self.take()
def drain(self):
"""Removes and returns all elements from the heap as a list.
"""
items = []
while not self.is_empty:
items.append(self.take())
return items
class AsyncSkewHeap:
"""A SkewHeap whose contents can be accessed asynchronously. Calls
to take() will block until an element is available.
"""
def __init__(self):
super().__init__()
self.heap = SkewHeap()
self.ev = asyncio.Event()
self.sem = asyncio.Semaphore(0)
@property
def is_empty(self):
"""True when the heap is empty."""
return self.heap.is_empty
@property
def is_shutdown(self):
"""True once the heap has been shutdown with shutdown()."""
return self.ev.is_set()
def shutdown(self):
"""Shutting down the heap will awaken all pending calls to take(),
returning None to them. Future callers to take() will receive immediate
results. Items may still be added to the heap, but it will no longer
block when calling take().
"""
self.ev.set()
async def join(self):
"""Blocks until the queue has been shut down."""
if not self.is_shutdown:
await self.ev.wait()
async def take(self):
"""Returns the next item in the queue, blocking until one is available
if necessary.
"""
if self.is_shutdown:
return self.heap.take()
async with self.sem:
return self.heap.take()
def put(self, *args):
"""Adds any number of items to the queue."""
for item in args:
self.heap.put(item)
if not self.is_shutdown:
self.sem.release()
def adopt(self, *heaps):
"""Merges other heaps into this one. The other heaps are left intact.
"""
prev_size = self.heap.size
self.heap.adopt(*heaps)
for _ in range(0, self.heap.size - prev_size):
self.sem.release() | 0.781414 | 0.661732 |
import platform
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
class SiteElement:
"""Defines site elements in a structured way and provides a convenient
means for element manipulations (clicking, entering text, etc.)
"""
def __init__(self, by, locator):
self.by = by
self.locator = locator
def loc_it(self, driver):
"""
Identifies element on page, based on an element locator.
Waits until an element becomes available & visible in DOM, and
then until it becomes clickable.
"""
wait = WebDriverWait(driver, 10)
try:
wait.until(EC.visibility_of_element_located((self.by, self.locator)))
target_el = wait.until(EC.element_to_be_clickable((self.by, self.locator)))
except TimeoutException as e:
print(
"\nUnable to locate element by {}, "
"locator: '{}'".format(self.by, self.locator)
)
raise e
return target_el
def exists(self, driver):
"""
Checks if element is visible on the page.
"""
wait = WebDriverWait(driver, 3)
try:
wait.until(EC.visibility_of_element_located((self.by, self.locator)))
target_el = wait.until(EC.element_to_be_clickable((self.by, self.locator)))
return True
except TimeoutException as e:
return False
def is_visible(self, driver):
"""
Checks if element is visible on the page.
"""
target_el = driver.find_element(self.by, self.locator)
return target_el.is_displayed()
def is_selected(self, driver):
"""
Checks if element is visible on the page.
"""
target_el = driver.find_element(self.by, self.locator)
return target_el.is_selected()
def click(self, driver):
"""Identifies an element on the page. After identification
the element is then clicked.
"""
target_el = self.loc_it(driver)
target_el.click()
def double_click(self, driver):
"""
Double click on element.
"""
target_el = self.loc_it(driver)
actionchains = ActionChains(driver)
actionchains.double_click(target_el).perform()
def javascript_click(self, driver):
"""
Clicks an element using JavaScript
"""
target_el = self.loc_it(driver)
driver.execute_script("arguments[0].click();", target_el)
def submit(self, driver):
"""Send ENTER to element, simulates submit"""
target_el = self.loc_it(driver)
target_el.send_keys(Keys.ENTER)
def multi_click(self, driver):
"""Clicks an element while holding the control key, as to enable
a multi-selection
"""
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.move_to_element(target_el)
actions.key_down(Keys.LEFT_CONTROL)
actions.click(target_el)
actions.key_up(Keys.LEFT_CONTROL)
actions.perform()
def range_click(self, driver):
"""Clicks an element while holding the control key, as to enable
a range selection
"""
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.move_to_element(target_el)
actions.key_down(Keys.LEFT_SHIFT)
actions.click(target_el)
actions.key_up(Keys.LEFT_SHIFT)
actions.perform()
def passive_click(self, driver):
"""Identifies an element on the page. After identification
the element is then clicked, regardless if it is "interactable"
or not
"""
target_el = self.loc_it(driver)
ActionChains(driver).move_to_element(target_el).click(target_el).perform()
def clear_all_text(self, driver):
"""Uses the Ctrl+A keys combination to select all text before using
BACKSPACE key to delete it
"""
target_el = self.loc_it(driver)
if platform.system() == "Darwin": # MacOs
ctrl_key = Keys.COMMAND
else:
ctrl_key = Keys.CONTROL
ActionChains(driver).move_to_element(target_el).key_down(ctrl_key).send_keys(
"a"
).key_up(ctrl_key).send_keys(Keys.BACKSPACE).perform()
def clear_text(self, driver, size):
"""Uses backspace to clear text from a field"""
target_el = self.loc_it(driver)
target_el.send_keys(Keys.END)
for i in range(0, size):
target_el.send_keys(Keys.BACK_SPACE)
def select_option(self, driver, select_choice):
"""Selects an option from a dropdown element"""
target_el = self.loc_it(driver)
select_el = Select(target_el)
select_el.select_by_value(select_choice)
def select_option_text(self, driver, select_choice):
"""Selects an option from dropdown given visible text"""
target_el = self.loc_it(driver)
select_el = Select(target_el)
select_el.select_by_visible_text(select_choice)
def scroll_to(self, driver):
"""After element identification, the window is scrolled
such that the element becomes visible in the window
"""
target_el = self.loc_it(driver)
target_el.location_once_scrolled_into_view
def scroll_right(self, driver):
"""Scroll right using Keys.ARROW_RIGHT
and a hold of one second
"""
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.move_to_element(target_el)
actions.key_down(Keys.ARROW_RIGHT)
actions.perform()
time.sleep(1)
actions = ActionChains(driver)
actions.key_up(Keys.ARROW_RIGHT)
actions.perform()
def inject_text(self, driver, field_text):
"""Enters text into a field or other input-capable html
element using send keys
"""
target_el = self.loc_it(driver)
for i in range(0, len(field_text)):
target_el.send_keys(field_text[i])
def set_path(self, driver, field_text):
"""Enters text into a field or other input-capable html
element using send keys, best for setting path to files for upload
"""
target_el = self.loc_it(driver)
target_el.send_keys(field_text)
def iframe_in(self, driver):
"""Switches driver focus to an iframe within a page"""
target_el = self.loc_it(driver)
driver.switch_to.frame(target_el)
def iframe_out(self, driver):
"""Switches driver focus out of iframe and back to the
main page
"""
driver.switch_to.parent_frame()
def get_attribute(self, driver, attribute):
"""Returns any attribute of website element"""
target_el = self.loc_it(driver)
return target_el.get_attribute(attribute)
def get_text(self, driver):
"""Returns content text of website element"""
target_el = self.loc_it(driver)
return target_el.text
def get_value(self, driver):
"""Returns content text of website element"""
target_el = self.loc_it(driver)
return target_el.get_attribute("value")
def get_href(self, driver, base_url=None):
"""Returns element href link, with relative links expanded
into an absolute link
"""
target_el = self.loc_it(driver)
target_href = target_el.get_attribute("href")
if target_href[0] == "/":
target_href = base_url + target_href
return target_href
def get_bag_url(self, driver, base_url=None):
"""Returns element href link, with relative links expanded
into an absolute link
"""
target_el = self.loc_it(driver)
target_href = target_el.get_attribute("data-bag-url")
if target_href[0] == "/":
target_href = base_url + target_href
return target_href
def get_child_count(self, driver):
"""Returns the number of child elements, given a parent
element specification
"""
target_el = self.loc_it(driver)
return len(target_el.find_elements_by_xpath(".//*"))
def get_immediate_child_count(self, driver):
"""Returns the number of immediate child elements, given a parent
element specification
"""
target_el = self.loc_it(driver)
return len(target_el.find_elements_by_xpath("*"))
def get_class(self, driver):
target_el = self.loc_it(driver)
target_class = target_el.get_attribute("class")
return target_class
def get_style(self, driver):
target_el = self.loc_it(driver)
target_style = target_el.get_attribute("style")
return target_style
def wait_on_visibility(self, driver, max_time):
locator = self.by, self.locator
WebDriverWait(driver, max_time).until(EC.visibility_of_element_located(locator))
def right_click(self, driver):
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.context_click(target_el)
actions.perform()
class SiteElementsCollection:
"""
Provides a way to locate all page elements which are identified by a
common locator.
"""
def __init__(self, by, locator):
self.by = by
self.locator = locator
def loc_them(self, driver):
"""
Finds all elements on a page that match a given locator.
Waits until all elements become visible in a DOM.
"""
wait = WebDriverWait(driver, 30)
try:
elements = wait.until(
EC.visibility_of_all_elements_located((self.by, self.locator))
)
except TimeoutException as e:
print(
"\nUnable to locate elements by {}, "
"locator: '{}'".format(self.by, self.locator)
)
raise e
return elements
def items(self, driver):
return self.loc_them(driver) | cuahsi_base/site_element.py | import platform
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
class SiteElement:
"""Defines site elements in a structured way and provides a convenient
means for element manipulations (clicking, entering text, etc.)
"""
def __init__(self, by, locator):
self.by = by
self.locator = locator
def loc_it(self, driver):
"""
Identifies element on page, based on an element locator.
Waits until an element becomes available & visible in DOM, and
then until it becomes clickable.
"""
wait = WebDriverWait(driver, 10)
try:
wait.until(EC.visibility_of_element_located((self.by, self.locator)))
target_el = wait.until(EC.element_to_be_clickable((self.by, self.locator)))
except TimeoutException as e:
print(
"\nUnable to locate element by {}, "
"locator: '{}'".format(self.by, self.locator)
)
raise e
return target_el
def exists(self, driver):
"""
Checks if element is visible on the page.
"""
wait = WebDriverWait(driver, 3)
try:
wait.until(EC.visibility_of_element_located((self.by, self.locator)))
target_el = wait.until(EC.element_to_be_clickable((self.by, self.locator)))
return True
except TimeoutException as e:
return False
def is_visible(self, driver):
"""
Checks if element is visible on the page.
"""
target_el = driver.find_element(self.by, self.locator)
return target_el.is_displayed()
def is_selected(self, driver):
"""
Checks if element is visible on the page.
"""
target_el = driver.find_element(self.by, self.locator)
return target_el.is_selected()
def click(self, driver):
"""Identifies an element on the page. After identification
the element is then clicked.
"""
target_el = self.loc_it(driver)
target_el.click()
def double_click(self, driver):
"""
Double click on element.
"""
target_el = self.loc_it(driver)
actionchains = ActionChains(driver)
actionchains.double_click(target_el).perform()
def javascript_click(self, driver):
"""
Clicks an element using JavaScript
"""
target_el = self.loc_it(driver)
driver.execute_script("arguments[0].click();", target_el)
def submit(self, driver):
"""Send ENTER to element, simulates submit"""
target_el = self.loc_it(driver)
target_el.send_keys(Keys.ENTER)
def multi_click(self, driver):
"""Clicks an element while holding the control key, as to enable
a multi-selection
"""
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.move_to_element(target_el)
actions.key_down(Keys.LEFT_CONTROL)
actions.click(target_el)
actions.key_up(Keys.LEFT_CONTROL)
actions.perform()
def range_click(self, driver):
"""Clicks an element while holding the control key, as to enable
a range selection
"""
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.move_to_element(target_el)
actions.key_down(Keys.LEFT_SHIFT)
actions.click(target_el)
actions.key_up(Keys.LEFT_SHIFT)
actions.perform()
def passive_click(self, driver):
"""Identifies an element on the page. After identification
the element is then clicked, regardless if it is "interactable"
or not
"""
target_el = self.loc_it(driver)
ActionChains(driver).move_to_element(target_el).click(target_el).perform()
def clear_all_text(self, driver):
"""Uses the Ctrl+A keys combination to select all text before using
BACKSPACE key to delete it
"""
target_el = self.loc_it(driver)
if platform.system() == "Darwin": # MacOs
ctrl_key = Keys.COMMAND
else:
ctrl_key = Keys.CONTROL
ActionChains(driver).move_to_element(target_el).key_down(ctrl_key).send_keys(
"a"
).key_up(ctrl_key).send_keys(Keys.BACKSPACE).perform()
def clear_text(self, driver, size):
"""Uses backspace to clear text from a field"""
target_el = self.loc_it(driver)
target_el.send_keys(Keys.END)
for i in range(0, size):
target_el.send_keys(Keys.BACK_SPACE)
def select_option(self, driver, select_choice):
"""Selects an option from a dropdown element"""
target_el = self.loc_it(driver)
select_el = Select(target_el)
select_el.select_by_value(select_choice)
def select_option_text(self, driver, select_choice):
"""Selects an option from dropdown given visible text"""
target_el = self.loc_it(driver)
select_el = Select(target_el)
select_el.select_by_visible_text(select_choice)
def scroll_to(self, driver):
"""After element identification, the window is scrolled
such that the element becomes visible in the window
"""
target_el = self.loc_it(driver)
target_el.location_once_scrolled_into_view
def scroll_right(self, driver):
"""Scroll right using Keys.ARROW_RIGHT
and a hold of one second
"""
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.move_to_element(target_el)
actions.key_down(Keys.ARROW_RIGHT)
actions.perform()
time.sleep(1)
actions = ActionChains(driver)
actions.key_up(Keys.ARROW_RIGHT)
actions.perform()
def inject_text(self, driver, field_text):
"""Enters text into a field or other input-capable html
element using send keys
"""
target_el = self.loc_it(driver)
for i in range(0, len(field_text)):
target_el.send_keys(field_text[i])
def set_path(self, driver, field_text):
"""Enters text into a field or other input-capable html
element using send keys, best for setting path to files for upload
"""
target_el = self.loc_it(driver)
target_el.send_keys(field_text)
def iframe_in(self, driver):
"""Switches driver focus to an iframe within a page"""
target_el = self.loc_it(driver)
driver.switch_to.frame(target_el)
def iframe_out(self, driver):
"""Switches driver focus out of iframe and back to the
main page
"""
driver.switch_to.parent_frame()
def get_attribute(self, driver, attribute):
"""Returns any attribute of website element"""
target_el = self.loc_it(driver)
return target_el.get_attribute(attribute)
def get_text(self, driver):
"""Returns content text of website element"""
target_el = self.loc_it(driver)
return target_el.text
def get_value(self, driver):
"""Returns content text of website element"""
target_el = self.loc_it(driver)
return target_el.get_attribute("value")
def get_href(self, driver, base_url=None):
"""Returns element href link, with relative links expanded
into an absolute link
"""
target_el = self.loc_it(driver)
target_href = target_el.get_attribute("href")
if target_href[0] == "/":
target_href = base_url + target_href
return target_href
def get_bag_url(self, driver, base_url=None):
"""Returns element href link, with relative links expanded
into an absolute link
"""
target_el = self.loc_it(driver)
target_href = target_el.get_attribute("data-bag-url")
if target_href[0] == "/":
target_href = base_url + target_href
return target_href
def get_child_count(self, driver):
"""Returns the number of child elements, given a parent
element specification
"""
target_el = self.loc_it(driver)
return len(target_el.find_elements_by_xpath(".//*"))
def get_immediate_child_count(self, driver):
"""Returns the number of immediate child elements, given a parent
element specification
"""
target_el = self.loc_it(driver)
return len(target_el.find_elements_by_xpath("*"))
def get_class(self, driver):
target_el = self.loc_it(driver)
target_class = target_el.get_attribute("class")
return target_class
def get_style(self, driver):
target_el = self.loc_it(driver)
target_style = target_el.get_attribute("style")
return target_style
def wait_on_visibility(self, driver, max_time):
locator = self.by, self.locator
WebDriverWait(driver, max_time).until(EC.visibility_of_element_located(locator))
def right_click(self, driver):
target_el = self.loc_it(driver)
actions = ActionChains(driver)
actions.context_click(target_el)
actions.perform()
class SiteElementsCollection:
"""
Provides a way to locate all page elements which are identified by a
common locator.
"""
def __init__(self, by, locator):
self.by = by
self.locator = locator
def loc_them(self, driver):
"""
Finds all elements on a page that match a given locator.
Waits until all elements become visible in a DOM.
"""
wait = WebDriverWait(driver, 30)
try:
elements = wait.until(
EC.visibility_of_all_elements_located((self.by, self.locator))
)
except TimeoutException as e:
print(
"\nUnable to locate elements by {}, "
"locator: '{}'".format(self.by, self.locator)
)
raise e
return elements
def items(self, driver):
return self.loc_them(driver) | 0.559771 | 0.194559 |
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.single import GlobalBestPSO, LocalBestPSO, GeneralOptimizerPSO
from pyswarms.discrete import BinaryPSO
from pyswarms.utils.functions.single_obj import sphere_func
from pyswarms.backend.topology import Star, Ring, Pyramid, Random, VonNeumann
@pytest.fixture(scope="module")
def general_opt_history(topology):
"""Returns a GeneralOptimizerPSO instance run for 1000 iterations for checking
history"""
pso = GeneralOptimizerPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology)
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def general_opt_reset(topology):
"""Returns a GeneralOptimizerPSO instance that has been run and reset to check
default value"""
pso = GeneralOptimizerPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology)
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def gbest_history():
"""Returns a GlobalBestPSO instance run for 1000 iterations for checking
history"""
pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def gbest_reset():
"""Returns a GlobalBestPSO instance that has been run and reset to check
default value"""
pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def lbest_history():
"""Returns a LocalBestPSO instance run for 1000 iterations for checking
history"""
pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def lbest_reset():
"""Returns a LocalBestPSO instance that has been run and reset to check
default value"""
pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def binary_history():
"""Returns a BinaryPSO instance run for 1000 iterations for checking
history"""
pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def binary_reset():
"""Returns a BinaryPSO instance that has been run and reset to check
default value"""
pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture
def options():
"""Default options dictionary for most PSO use-cases"""
options_ = {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2, "r": 1}
return options_
@pytest.fixture(params=[
Star(),
Ring(static=False), Ring(static=True),
Pyramid(static=False), Pyramid(static=True),
Random(static=False), Random(static=True),
VonNeumann()
])
def topology(request):
"""Parametrized topology parameter"""
topology_ = request.param
return topology_ | tests/optimizers/conftest.py | # Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.single import GlobalBestPSO, LocalBestPSO, GeneralOptimizerPSO
from pyswarms.discrete import BinaryPSO
from pyswarms.utils.functions.single_obj import sphere_func
from pyswarms.backend.topology import Star, Ring, Pyramid, Random, VonNeumann
@pytest.fixture(scope="module")
def general_opt_history(topology):
"""Returns a GeneralOptimizerPSO instance run for 1000 iterations for checking
history"""
pso = GeneralOptimizerPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology)
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def general_opt_reset(topology):
"""Returns a GeneralOptimizerPSO instance that has been run and reset to check
default value"""
pso = GeneralOptimizerPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology)
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def gbest_history():
"""Returns a GlobalBestPSO instance run for 1000 iterations for checking
history"""
pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def gbest_reset():
"""Returns a GlobalBestPSO instance that has been run and reset to check
default value"""
pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def lbest_history():
"""Returns a LocalBestPSO instance run for 1000 iterations for checking
history"""
pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def lbest_reset():
"""Returns a LocalBestPSO instance that has been run and reset to check
default value"""
pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def binary_history():
"""Returns a BinaryPSO instance run for 1000 iterations for checking
history"""
pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def binary_reset():
"""Returns a BinaryPSO instance that has been run and reset to check
default value"""
pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture
def options():
"""Default options dictionary for most PSO use-cases"""
options_ = {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2, "r": 1}
return options_
@pytest.fixture(params=[
Star(),
Ring(static=False), Ring(static=True),
Pyramid(static=False), Pyramid(static=True),
Random(static=False), Random(static=True),
VonNeumann()
])
def topology(request):
"""Parametrized topology parameter"""
topology_ = request.param
return topology_ | 0.721449 | 0.516291 |
import cuflow as cu
import dip
import sot
__VERSION__ = "1.0.0"
"""
RPi dimensions:
https://www.raspberrypi.org/documentation/hardware/raspberrypi/mechanical/rpi_MECH_4b_4p0.pdf
| GPIO | pin | color | function |
| ---- | --- | ------ | ------------------- |
| 14 | 8 | yellow | C2C: RESET |
| 17 | 11 | green | C2D: C2D |
| 18 | 12 | yellow | VS: VCC sense |
| 12 | 32 | blue | 1K: 1khz reference |
| 6 | 31 | | relay control |
1 3v3 Power
2 5v Power
3 BCM 2 (SDA)
4 5v Power
5 BCM 3 (SCL)
6 Ground
7 BCM 4 (GPCLK0)
8 BCM 14 (TXD)
9 Ground
10 BCM 15 (RXD)
11 BCM 17
12 BCM 18 (PWM0)
13 BCM 27
14 Ground
15 BCM 22
16 BCM 23
17 3v3 Power
18 BCM 24
19 BCM 10 (MOSI)
20 Ground
21 BCM 9 (MISO)
22 BCM 25
23 BCM 11 (SCLK)
24 BCM 8 (CE0)
25 Ground
26 BCM 7 (CE1)
27 BCM 0 (ID_SD)
28 BCM 1 (ID_SC)
29 BCM 5
30 Ground
31 BCM 6
32 BCM 12 (PWM0)
33 BCM 13 (PWM1)
34 Ground
35 BCM 19 (MISO)
36 BCM 16
37 BCM 26
38 BCM 20 (MOSI)
39 Ground
40 BCM 21 (SCLK)
"""
def thermal(t, layer, d = 1.3):
t.setname(layer).thermal(d).wire(layer = layer)
if __name__ == "__main__":
brd = cu.Board(
(65, 56),
trace = 0.2,
space = cu.inches(1 / 20) - 0.2,
via_hole = 0.3,
via = 0.6,
via_space = cu.mil(5),
silk = cu.mil(6))
WW = 0.6 # wide wire width
dc = brd.DC((3.5 + 29, 56 - 3.5)).left(90)
j1 = dip.HDR40(dc)
for pin in "6 9 14 20 25 30 34 39".split():
thermal(j1.s(pin), "GBL")
for pin in "2 4".split():
thermal(j1.s(pin), "GTL")
route = (8, 12, 11, 32)
tt = [j1.s(str(i)) for i in route]
for t in tt:
pn = int(t.name)
if (pn % 2) == 0:
t.left(45).forward(cu.inches(.0707)).left(45)
else:
t.left(90)
t.forward(2)
cu.extend2(tt)
rv1 = brd.enriver90(tt, 90)
rv1.w("l 90")
rv1.wire()
j2 = dip.Screw2(brd.DC((60, 42)).left(90))
thermal(j2.s("1"), "GBL", 2)
thermal(j2.s("2"), "GTL", 2)
k1 = dip.ReedRelay(brd.DC((40, 36)).left(90))
thermal(k1.pads[1], "GTL")
r1 = dip.Res10W(brd.DC((34, 25)))
k1.pads[0].left(90).setwidth(WW).setlayer("GBL").goto(r1.pads[0]).wire()
rv = rv1
rv.w("f 3 l 90")
p = k1.pads[2].copy()
p.w("l 90 f 4.5 l 180")
t1 = sot.SOT23(p)
t1.s("2").w("r 90 f 1 .")
t1.s("3").goto(k1.pads[2]).wire()
p.w("l 90 f 4 l 90")
r = cu.R0402(p, "2K3")
r.pads[0].goto(t1.s("1")).wire()
p = r.pads[1]
p.w("o")
j1.s("31").left(90).goto(p).wire()
for x in (0, 58):
for y in (0, 49):
brd.hole((3.5 + x, 3.5 + y), 2.7, 6)
j3 = dip.Hdr_1_7(brd.DC((46, 5)).left(90))
for p,lbl in zip(j3.pads, ('GND', 'C2C', 'C2D', 'VS', '1K', 'L-', 'L+')):
for a in (-90, 90):
p.copy().right(a).forward(3.5).text(lbl)
thermal(j3.pads[0], "GBL")
[p.w("l 90 f 5") for p in j3.pads[1:]]
[p.setwidth(WW).forward(6) for p in j3.pads[-2:]]
rv3 = brd.enriver90(j3.pads[4:0:-1], -90)
rv.meet(rv3.wire())
j3.pads[5].goto(k1.pads[3]).wire()
j3.pads[6].goto(r1.pads[1]).wire()
brd.outline()
if 1:
brd.space = cu.mil(12) # XXX hack the
brd.fill_any("GTL", "GTL")
brd.fill_any("GBL", "GBL")
brd.save("pihat")
brd.postscript("pihat.ps") | pihat.py | import cuflow as cu
import dip
import sot
__VERSION__ = "1.0.0"
"""
RPi dimensions:
https://www.raspberrypi.org/documentation/hardware/raspberrypi/mechanical/rpi_MECH_4b_4p0.pdf
| GPIO | pin | color | function |
| ---- | --- | ------ | ------------------- |
| 14 | 8 | yellow | C2C: RESET |
| 17 | 11 | green | C2D: C2D |
| 18 | 12 | yellow | VS: VCC sense |
| 12 | 32 | blue | 1K: 1khz reference |
| 6 | 31 | | relay control |
1 3v3 Power
2 5v Power
3 BCM 2 (SDA)
4 5v Power
5 BCM 3 (SCL)
6 Ground
7 BCM 4 (GPCLK0)
8 BCM 14 (TXD)
9 Ground
10 BCM 15 (RXD)
11 BCM 17
12 BCM 18 (PWM0)
13 BCM 27
14 Ground
15 BCM 22
16 BCM 23
17 3v3 Power
18 BCM 24
19 BCM 10 (MOSI)
20 Ground
21 BCM 9 (MISO)
22 BCM 25
23 BCM 11 (SCLK)
24 BCM 8 (CE0)
25 Ground
26 BCM 7 (CE1)
27 BCM 0 (ID_SD)
28 BCM 1 (ID_SC)
29 BCM 5
30 Ground
31 BCM 6
32 BCM 12 (PWM0)
33 BCM 13 (PWM1)
34 Ground
35 BCM 19 (MISO)
36 BCM 16
37 BCM 26
38 BCM 20 (MOSI)
39 Ground
40 BCM 21 (SCLK)
"""
def thermal(t, layer, d = 1.3):
t.setname(layer).thermal(d).wire(layer = layer)
if __name__ == "__main__":
brd = cu.Board(
(65, 56),
trace = 0.2,
space = cu.inches(1 / 20) - 0.2,
via_hole = 0.3,
via = 0.6,
via_space = cu.mil(5),
silk = cu.mil(6))
WW = 0.6 # wide wire width
dc = brd.DC((3.5 + 29, 56 - 3.5)).left(90)
j1 = dip.HDR40(dc)
for pin in "6 9 14 20 25 30 34 39".split():
thermal(j1.s(pin), "GBL")
for pin in "2 4".split():
thermal(j1.s(pin), "GTL")
route = (8, 12, 11, 32)
tt = [j1.s(str(i)) for i in route]
for t in tt:
pn = int(t.name)
if (pn % 2) == 0:
t.left(45).forward(cu.inches(.0707)).left(45)
else:
t.left(90)
t.forward(2)
cu.extend2(tt)
rv1 = brd.enriver90(tt, 90)
rv1.w("l 90")
rv1.wire()
j2 = dip.Screw2(brd.DC((60, 42)).left(90))
thermal(j2.s("1"), "GBL", 2)
thermal(j2.s("2"), "GTL", 2)
k1 = dip.ReedRelay(brd.DC((40, 36)).left(90))
thermal(k1.pads[1], "GTL")
r1 = dip.Res10W(brd.DC((34, 25)))
k1.pads[0].left(90).setwidth(WW).setlayer("GBL").goto(r1.pads[0]).wire()
rv = rv1
rv.w("f 3 l 90")
p = k1.pads[2].copy()
p.w("l 90 f 4.5 l 180")
t1 = sot.SOT23(p)
t1.s("2").w("r 90 f 1 .")
t1.s("3").goto(k1.pads[2]).wire()
p.w("l 90 f 4 l 90")
r = cu.R0402(p, "2K3")
r.pads[0].goto(t1.s("1")).wire()
p = r.pads[1]
p.w("o")
j1.s("31").left(90).goto(p).wire()
for x in (0, 58):
for y in (0, 49):
brd.hole((3.5 + x, 3.5 + y), 2.7, 6)
j3 = dip.Hdr_1_7(brd.DC((46, 5)).left(90))
for p,lbl in zip(j3.pads, ('GND', 'C2C', 'C2D', 'VS', '1K', 'L-', 'L+')):
for a in (-90, 90):
p.copy().right(a).forward(3.5).text(lbl)
thermal(j3.pads[0], "GBL")
[p.w("l 90 f 5") for p in j3.pads[1:]]
[p.setwidth(WW).forward(6) for p in j3.pads[-2:]]
rv3 = brd.enriver90(j3.pads[4:0:-1], -90)
rv.meet(rv3.wire())
j3.pads[5].goto(k1.pads[3]).wire()
j3.pads[6].goto(r1.pads[1]).wire()
brd.outline()
if 1:
brd.space = cu.mil(12) # XXX hack the
brd.fill_any("GTL", "GTL")
brd.fill_any("GBL", "GBL")
brd.save("pihat")
brd.postscript("pihat.ps") | 0.428353 | 0.438424 |
import configparser
import wmi
import csv
import logging
import logging.handlers
import os
import sys
from ServerObj import ServerObj
from Storage import *
path_current_directory = os.path.dirname(__file__)
path_config_file = os.path.join(path_current_directory, 'config.ini')
config = configparser.ConfigParser()
config.read(path_config_file)
console_handler = logging.StreamHandler(sys.stdout)
logfile = os.path.join(path_current_directory, 'log', config['Default']['logFile'])
os.makedirs(os.path.dirname(logfile), exist_ok=True)
logging.basicConfig(filename=logfile, filemode='w', level=logging.DEBUG)
log = logging.getLogger("serveragent")
log.addHandler(console_handler)
log.info(path_config_file)
servers = []
serverList = config['Default']['serverList']
serversFile = os.path.join(path_current_directory, 'csv', config['Default']['serverCSV'])
serverDisksFile = os.path.join(path_current_directory, 'csv', config['Default']['serverDisksCSV'])
os.makedirs(os.path.dirname(serversFile), exist_ok=True)
os.makedirs(os.path.dirname(serverDisksFile), exist_ok=True)
serverNames = serverList.split(',')
for serverName in serverNames:
try:
conn = wmi.WMI(serverName, user=config['Default']['wmiUser'], password=config['Default']['w<PASSWORD>'])
log.info('Connected: ' + serverName)
cs = conn.Win32_ComputerSystem()
os = conn.Win32_OperatingSystem()
memTotal = int(int(cs[0].TotalPhysicalMemory)/1024/1024)
memFree = int(int(os[0].FreePhysicalMemory)/1024)
server = ServerObj()
server.name = serverName
server.os = os[0].Caption
server.totalPhysicalMemory = memTotal
server.freePhysicalMemory = memFree
for disk in conn.Win32_LogicalDisk (DriveType=3):
d = {"ID": disk.DeviceID, "DiskSize": format(int(disk.Size)/1000000000,'.2f'), "DiskFreeSpace": format(int(disk.FreeSpace)/1000000000,'.2f')}
server.disks.append(d)
servers.append(server)
except Exception as e:
log.error(e)
fieldnames = ("Server", "OS","Total Physical Memory MB", "Free Physical Memory MB", "Date")
Storage.csvFileHeader(serversFile, fieldnames)
fieldnames = ("Server", "Disk ID","Disk Size GB", "Disk Free Space GB", "Date")
Storage.csvFileHeader(serverDisksFile, fieldnames)
Storage.csv(servers,serversFile,serverDisksFile) | ServerAgent.py | import configparser
import wmi
import csv
import logging
import logging.handlers
import os
import sys
from ServerObj import ServerObj
from Storage import *
path_current_directory = os.path.dirname(__file__)
path_config_file = os.path.join(path_current_directory, 'config.ini')
config = configparser.ConfigParser()
config.read(path_config_file)
console_handler = logging.StreamHandler(sys.stdout)
logfile = os.path.join(path_current_directory, 'log', config['Default']['logFile'])
os.makedirs(os.path.dirname(logfile), exist_ok=True)
logging.basicConfig(filename=logfile, filemode='w', level=logging.DEBUG)
log = logging.getLogger("serveragent")
log.addHandler(console_handler)
log.info(path_config_file)
servers = []
serverList = config['Default']['serverList']
serversFile = os.path.join(path_current_directory, 'csv', config['Default']['serverCSV'])
serverDisksFile = os.path.join(path_current_directory, 'csv', config['Default']['serverDisksCSV'])
os.makedirs(os.path.dirname(serversFile), exist_ok=True)
os.makedirs(os.path.dirname(serverDisksFile), exist_ok=True)
serverNames = serverList.split(',')
for serverName in serverNames:
try:
conn = wmi.WMI(serverName, user=config['Default']['wmiUser'], password=config['Default']['w<PASSWORD>'])
log.info('Connected: ' + serverName)
cs = conn.Win32_ComputerSystem()
os = conn.Win32_OperatingSystem()
memTotal = int(int(cs[0].TotalPhysicalMemory)/1024/1024)
memFree = int(int(os[0].FreePhysicalMemory)/1024)
server = ServerObj()
server.name = serverName
server.os = os[0].Caption
server.totalPhysicalMemory = memTotal
server.freePhysicalMemory = memFree
for disk in conn.Win32_LogicalDisk (DriveType=3):
d = {"ID": disk.DeviceID, "DiskSize": format(int(disk.Size)/1000000000,'.2f'), "DiskFreeSpace": format(int(disk.FreeSpace)/1000000000,'.2f')}
server.disks.append(d)
servers.append(server)
except Exception as e:
log.error(e)
fieldnames = ("Server", "OS","Total Physical Memory MB", "Free Physical Memory MB", "Date")
Storage.csvFileHeader(serversFile, fieldnames)
fieldnames = ("Server", "Disk ID","Disk Size GB", "Disk Free Space GB", "Date")
Storage.csvFileHeader(serverDisksFile, fieldnames)
Storage.csv(servers,serversFile,serverDisksFile) | 0.075766 | 0.037187 |
from typing import ContextManager
from ipywidgets import widgets
from puzzle.constraints import constraints
from puzzle.problems import problem
from puzzle.puzzlepedia import _bind, _common, _widget_util, \
annotation_widget, \
debug_data_widget, meta_problem, table_widget
from puzzle.puzzlepedia._bind import widget_observable
from puzzle.steps import step
_MAX_RESULTS = 30
def ProblemWidget(mp: meta_problem.MetaProblem):
"""Factory for IPython widgets, pretending to be real widget."""
capture = widgets.Output()
items = []
options = {}
for p in mp:
# 'p' is instance of problem.Problem.
options[p.kind] = p
# Dropdown.
dropdown = widgets.Dropdown(options=options)
items.append(dropdown)
dropdown_source = widget_observable(dropdown)
# Interactive information appears between dropdown + solution and the
# table of solutions.
interactive_information = widgets.VBox([])
# Best solution.
best_solution = widgets.Text()
items.append(best_solution)
def _on_problem_kind_change(p: problem.Problem) -> None:
_update_solutions_for_problem(solutions_table, best_solution, p)
_update_interactive_information_for_problem(
interactive_information, p, capture)
dropdown_source.subscribe(_on_problem_kind_change)
best_solution_source = widget_observable(best_solution)
def _on_best_solution_change(solution: str) -> None:
mp.solution = solution
best_solution_source.subscribe(_on_best_solution_change)
solutions_table = table_widget.TableWidget()
if mp.peek():
_update_solutions_for_problem(
solutions_table, best_solution, mp.peek())
_update_interactive_information_for_problem(
interactive_information, mp.peek(), capture)
for p in mp:
p.subscribe(_bind.callback_without_event(
_update_solutions_for_problem, solutions_table, best_solution, p))
clear_output_button = widgets.Button(description='clear output')
ouptuts_changed = _bind.widget_observable(capture, 'outputs')
ouptuts_changed.subscribe(_bind.callback_without_event(
_update_clear_button_visibility, clear_output_button, capture))
_update_clear_button_visibility(clear_output_button, capture)
clear_output_button.on_click(
_bind.callback_without_event(capture.clear_output))
return widgets.VBox([
widgets.HBox(items), interactive_information, solutions_table, capture,
clear_output_button,
])
def _update_solutions_for_problem(
table: table_widget.TableWidget,
best_solution: widgets.Text,
p: problem.Problem) -> None:
solutions = p.solutions()
if solutions.peek():
best_solution.value = solutions.peek()
headers = ['score', 'solution', 'notes']
data = []
for i, (solution, score) in enumerate(solutions.items()):
if i >= _MAX_RESULTS:
break
data.append([
round(score, 3),
_common.preformat_html(solution),
'<br />'.join(p.notes_for(solution))
])
table.update_data(data, headers=headers)
def _update_interactive_information_for_problem(
interactive_information: widgets.VBox,
p: problem.Problem,
capture: ContextManager):
accordion_children = []
steps = list(p.steps())
for s in steps:
step_tabs_children = []
for group in s.constraints():
child_constraints = []
group_container = widgets.VBox(child_constraints)
_update_annotations_for_group(group_container, group, capture)
group.subscribe(_bind.callback_without_event(
_update_annotations_for_group, group_container, group, capture))
step_tabs_children.append(group_container)
step_tabs = widgets.Tab(step_tabs_children)
for i, group in enumerate(s.constraints()):
step_tabs.set_title(i, _common.format_label(group.__class__.__name__))
debug_data_container = widgets.VBox([])
debug_data_accordion = widgets.Accordion([debug_data_container])
debug_data_accordion.set_title(0, 'debug data')
debug_data_accordion.selected_index = None
_update_debug_data_for_problem(
debug_data_container, debug_data_accordion, s, capture)
accordian_selected_index_changed = _bind.widget_observable(
debug_data_accordion, 'selected_index')
accordian_selected_index_changed.subscribe(_bind.callback_without_event(
_update_debug_data_for_problem, debug_data_container,
debug_data_accordion, s, capture))
p.subscribe(_bind.callback_without_event(
_update_debug_data_for_problem, debug_data_container,
debug_data_accordion, s, capture))
s.subscribe(_bind.callback_without_event(
_update_debug_data_for_problem, debug_data_container,
debug_data_accordion, s, capture))
step_tabs = widgets.VBox([step_tabs, debug_data_accordion])
accordion_children.append(step_tabs)
accordion = widgets.Accordion(children=accordion_children)
for i, s in enumerate(steps):
accordion.set_title(i, _common.format_label(str(s)))
interactive_information.children = (accordion,)
def _update_annotations_for_group(
annotations_container: widgets.VBox,
group: constraints.Constraints,
capture: ContextManager) -> None:
children = []
for key, value, annotation, docs in group:
children.append(annotation_widget.AnnotationWidget(
annotation, group, key, value, docs, capture))
_widget_util.merge_assign_children(annotations_container, children)
def _update_debug_data_for_problem(
debug_data_container: widgets.VBox,
debug_data_accordion: widgets.Accordion,
s: step.Step,
capture: ContextManager,
):
# TODO: Diff.
if debug_data_accordion.selected_index is not None:
debug_widget = debug_data_widget.DebugDataWidget(s, capture)
debug_data_container.children = (debug_widget,)
def _update_clear_button_visibility(
clear_button: widgets.Button, output: widgets.Output) -> None:
if output.outputs:
clear_button.layout.display = 'block'
else:
clear_button.layout.display = 'none' | src/puzzle/puzzlepedia/problem_widget.py | from typing import ContextManager
from ipywidgets import widgets
from puzzle.constraints import constraints
from puzzle.problems import problem
from puzzle.puzzlepedia import _bind, _common, _widget_util, \
annotation_widget, \
debug_data_widget, meta_problem, table_widget
from puzzle.puzzlepedia._bind import widget_observable
from puzzle.steps import step
_MAX_RESULTS = 30
def ProblemWidget(mp: meta_problem.MetaProblem):
"""Factory for IPython widgets, pretending to be real widget."""
capture = widgets.Output()
items = []
options = {}
for p in mp:
# 'p' is instance of problem.Problem.
options[p.kind] = p
# Dropdown.
dropdown = widgets.Dropdown(options=options)
items.append(dropdown)
dropdown_source = widget_observable(dropdown)
# Interactive information appears between dropdown + solution and the
# table of solutions.
interactive_information = widgets.VBox([])
# Best solution.
best_solution = widgets.Text()
items.append(best_solution)
def _on_problem_kind_change(p: problem.Problem) -> None:
_update_solutions_for_problem(solutions_table, best_solution, p)
_update_interactive_information_for_problem(
interactive_information, p, capture)
dropdown_source.subscribe(_on_problem_kind_change)
best_solution_source = widget_observable(best_solution)
def _on_best_solution_change(solution: str) -> None:
mp.solution = solution
best_solution_source.subscribe(_on_best_solution_change)
solutions_table = table_widget.TableWidget()
if mp.peek():
_update_solutions_for_problem(
solutions_table, best_solution, mp.peek())
_update_interactive_information_for_problem(
interactive_information, mp.peek(), capture)
for p in mp:
p.subscribe(_bind.callback_without_event(
_update_solutions_for_problem, solutions_table, best_solution, p))
clear_output_button = widgets.Button(description='clear output')
ouptuts_changed = _bind.widget_observable(capture, 'outputs')
ouptuts_changed.subscribe(_bind.callback_without_event(
_update_clear_button_visibility, clear_output_button, capture))
_update_clear_button_visibility(clear_output_button, capture)
clear_output_button.on_click(
_bind.callback_without_event(capture.clear_output))
return widgets.VBox([
widgets.HBox(items), interactive_information, solutions_table, capture,
clear_output_button,
])
def _update_solutions_for_problem(
table: table_widget.TableWidget,
best_solution: widgets.Text,
p: problem.Problem) -> None:
solutions = p.solutions()
if solutions.peek():
best_solution.value = solutions.peek()
headers = ['score', 'solution', 'notes']
data = []
for i, (solution, score) in enumerate(solutions.items()):
if i >= _MAX_RESULTS:
break
data.append([
round(score, 3),
_common.preformat_html(solution),
'<br />'.join(p.notes_for(solution))
])
table.update_data(data, headers=headers)
def _update_interactive_information_for_problem(
interactive_information: widgets.VBox,
p: problem.Problem,
capture: ContextManager):
accordion_children = []
steps = list(p.steps())
for s in steps:
step_tabs_children = []
for group in s.constraints():
child_constraints = []
group_container = widgets.VBox(child_constraints)
_update_annotations_for_group(group_container, group, capture)
group.subscribe(_bind.callback_without_event(
_update_annotations_for_group, group_container, group, capture))
step_tabs_children.append(group_container)
step_tabs = widgets.Tab(step_tabs_children)
for i, group in enumerate(s.constraints()):
step_tabs.set_title(i, _common.format_label(group.__class__.__name__))
debug_data_container = widgets.VBox([])
debug_data_accordion = widgets.Accordion([debug_data_container])
debug_data_accordion.set_title(0, 'debug data')
debug_data_accordion.selected_index = None
_update_debug_data_for_problem(
debug_data_container, debug_data_accordion, s, capture)
accordian_selected_index_changed = _bind.widget_observable(
debug_data_accordion, 'selected_index')
accordian_selected_index_changed.subscribe(_bind.callback_without_event(
_update_debug_data_for_problem, debug_data_container,
debug_data_accordion, s, capture))
p.subscribe(_bind.callback_without_event(
_update_debug_data_for_problem, debug_data_container,
debug_data_accordion, s, capture))
s.subscribe(_bind.callback_without_event(
_update_debug_data_for_problem, debug_data_container,
debug_data_accordion, s, capture))
step_tabs = widgets.VBox([step_tabs, debug_data_accordion])
accordion_children.append(step_tabs)
accordion = widgets.Accordion(children=accordion_children)
for i, s in enumerate(steps):
accordion.set_title(i, _common.format_label(str(s)))
interactive_information.children = (accordion,)
def _update_annotations_for_group(
annotations_container: widgets.VBox,
group: constraints.Constraints,
capture: ContextManager) -> None:
children = []
for key, value, annotation, docs in group:
children.append(annotation_widget.AnnotationWidget(
annotation, group, key, value, docs, capture))
_widget_util.merge_assign_children(annotations_container, children)
def _update_debug_data_for_problem(
debug_data_container: widgets.VBox,
debug_data_accordion: widgets.Accordion,
s: step.Step,
capture: ContextManager,
):
# TODO: Diff.
if debug_data_accordion.selected_index is not None:
debug_widget = debug_data_widget.DebugDataWidget(s, capture)
debug_data_container.children = (debug_widget,)
def _update_clear_button_visibility(
clear_button: widgets.Button, output: widgets.Output) -> None:
if output.outputs:
clear_button.layout.display = 'block'
else:
clear_button.layout.display = 'none' | 0.632957 | 0.140661 |
import json
from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from core.models import Author
from core.tests.factories import AuthorFactory
from users.tests.factories import UserFactory, TokenFactory
class AuthorUpdateTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
author = AuthorFactory(name='<NAME>')
self.url = reverse('v1:author-detail', kwargs={'pk': author.pk})
def test_author_update_returns_401_given_anonymous_request(self):
body = {
'name': '<NAME>',
}
response = self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertEqual(401, response.status_code)
def test_author_update_returns_403_given_non_admin_user(self):
user = UserFactory(is_staff=False)
token = TokenFactory(user=user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
body = {
'name': '<NAME>',
}
response = self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertEqual(403, response.status_code)
def test_author_update_returns_200_given_valid_input(self):
user = UserFactory(is_staff=True)
token = TokenFactory(user=user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
body = {
'name': '<NAME>',
}
response = self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertEqual(200, response.status_code)
def test_author_update_updates_a_author_given_valid_input(self):
user = UserFactory(is_staff=True)
token = TokenFactory(user=user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
body = {
'name': '<NAME>',
}
self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertTrue(Author.objects.filter(name='<NAME>').exists()) | core/tests/test_author_update.py | import json
from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from core.models import Author
from core.tests.factories import AuthorFactory
from users.tests.factories import UserFactory, TokenFactory
class AuthorUpdateTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
author = AuthorFactory(name='<NAME>')
self.url = reverse('v1:author-detail', kwargs={'pk': author.pk})
def test_author_update_returns_401_given_anonymous_request(self):
body = {
'name': '<NAME>',
}
response = self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertEqual(401, response.status_code)
def test_author_update_returns_403_given_non_admin_user(self):
user = UserFactory(is_staff=False)
token = TokenFactory(user=user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
body = {
'name': '<NAME>',
}
response = self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertEqual(403, response.status_code)
def test_author_update_returns_200_given_valid_input(self):
user = UserFactory(is_staff=True)
token = TokenFactory(user=user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
body = {
'name': '<NAME>',
}
response = self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertEqual(200, response.status_code)
def test_author_update_updates_a_author_given_valid_input(self):
user = UserFactory(is_staff=True)
token = TokenFactory(user=user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
body = {
'name': '<NAME>',
}
self.client.patch(self.url, json.dumps(body), content_type='application/json')
self.assertTrue(Author.objects.filter(name='<NAME>').exists()) | 0.431345 | 0.123471 |
# Copyright 2020 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Python version of tokenizer.pl
"""
import sys
import codecs
import argparse
def io_wrapper(io_str, mode):
"""
Wrapper for IO stream
"""
if io_str != "-":
std = False
stream = codecs.open(io_str, mode, encoding="utf-8")
else:
std = True
if mode not in ["r", "w"]:
raise RuntimeError(f"Unknown IO mode: {mode}")
if mode == "w":
stream = codecs.getwriter("utf-8")(sys.stdout.buffer)
else:
stream = codecs.getreader("utf-8")(sys.stdin.buffer)
return std, stream
def run(args):
src_std, src = io_wrapper(args.src_txt, "r")
dst_std, dst = io_wrapper(args.dst_tok, "w")
def add_to_vocab(vocab, units):
if vocab is None:
return
for unit in units:
if unit not in vocab:
vocab[unit] = len(vocab)
sp_mdl = None
vocab = None
add_units = None
if args.unit == "subword":
if not args.spm:
raise RuntimeError("Missing --spm when choose subword unit")
import sentencepiece as sp
sp_mdl = sp.SentencePieceProcessor(model_file=args.spm)
else:
if args.add_units:
add_units = args.add_units.split(",")
if args.dump_vocab:
vocab = {}
if add_units:
print(f"Add units: {add_units} to vocabulary")
add_to_vocab(vocab, add_units)
if args.space:
add_to_vocab(vocab, [args.space])
filter_units = args.filter_units.split(",")
print(f"Filter units: {filter_units}")
for raw_line in src:
line = raw_line.strip()
raw_tokens = line.split()
if args.text_format == "kaldi":
sets = raw_tokens[1:]
dst.write(f"{raw_tokens[0]}\t")
else:
sets = raw_tokens
kept_tokens = []
for n, tok in enumerate(sets):
# remove tokens
is_filter_tok = tok in filter_units
if is_filter_tok and args.unit != "char":
continue
# word => char
if args.unit == "char" and not is_filter_tok:
toks = [t for t in tok]
else:
toks = [tok]
kept_tokens += toks
add_to_vocab(vocab, toks)
if args.space and n != len(sets) - 1:
kept_tokens += [args.space]
if args.unit == "subword":
kept_tokens = sp_mdl.encode(" ".join(kept_tokens), out_type=str)
dst.write(" ".join(kept_tokens) + "\n")
if vocab:
_, dump_vocab = io_wrapper(args.dump_vocab, "w")
for unit, idx in vocab.items():
dump_vocab.write(f"{unit} {idx}\n")
print(f"Dump vocabulary to {args.dump_vocab} with {len(vocab)} units")
dump_vocab.close()
if not src_std:
src.close()
if not dst_std:
dst.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Tokenize the text to modeling units, e.g., "
"character, phoneme, word, subword, ...")
parser.add_argument("src_txt",
type=str,
help="Source text file (Kaldi format or not)")
parser.add_argument("dst_tok",
type=str,
help="Output text file (Kaldi format or not)")
parser.add_argument("--text-format",
type=str,
default="kaldi",
choices=["kaldi", "raw"],
help="Format of the text file. "
"The kaldi format begins with the utterance ID")
parser.add_argument("--spm",
type=str,
default="",
help="Path of the sentencepiece's model "
"if we choose subword unit")
parser.add_argument("--filter-units",
type=str,
default="",
help="Filter the units if needed, "
"each unit is separated via \',\'")
parser.add_argument("--unit",
type=str,
default="char",
choices=["char", "word", "subword"],
help="Type of the modeling unit")
parser.add_argument("--space",
type=str,
default="",
help="If not none, insert space "
"symbol between each units")
parser.add_argument("--add-units",
type=str,
default="",
help="Add units to vocabulary set, "
"e.g., <sos>, <eos>, <unk>")
parser.add_argument("--dump-vocab",
type=str,
default="",
help="If not none, dump out the vocabulary set")
args = parser.parse_args()
run(args) | utils/tokenizer.py |
# Copyright 2020 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
Python version of tokenizer.pl
"""
import sys
import codecs
import argparse
def io_wrapper(io_str, mode):
"""
Wrapper for IO stream
"""
if io_str != "-":
std = False
stream = codecs.open(io_str, mode, encoding="utf-8")
else:
std = True
if mode not in ["r", "w"]:
raise RuntimeError(f"Unknown IO mode: {mode}")
if mode == "w":
stream = codecs.getwriter("utf-8")(sys.stdout.buffer)
else:
stream = codecs.getreader("utf-8")(sys.stdin.buffer)
return std, stream
def run(args):
src_std, src = io_wrapper(args.src_txt, "r")
dst_std, dst = io_wrapper(args.dst_tok, "w")
def add_to_vocab(vocab, units):
if vocab is None:
return
for unit in units:
if unit not in vocab:
vocab[unit] = len(vocab)
sp_mdl = None
vocab = None
add_units = None
if args.unit == "subword":
if not args.spm:
raise RuntimeError("Missing --spm when choose subword unit")
import sentencepiece as sp
sp_mdl = sp.SentencePieceProcessor(model_file=args.spm)
else:
if args.add_units:
add_units = args.add_units.split(",")
if args.dump_vocab:
vocab = {}
if add_units:
print(f"Add units: {add_units} to vocabulary")
add_to_vocab(vocab, add_units)
if args.space:
add_to_vocab(vocab, [args.space])
filter_units = args.filter_units.split(",")
print(f"Filter units: {filter_units}")
for raw_line in src:
line = raw_line.strip()
raw_tokens = line.split()
if args.text_format == "kaldi":
sets = raw_tokens[1:]
dst.write(f"{raw_tokens[0]}\t")
else:
sets = raw_tokens
kept_tokens = []
for n, tok in enumerate(sets):
# remove tokens
is_filter_tok = tok in filter_units
if is_filter_tok and args.unit != "char":
continue
# word => char
if args.unit == "char" and not is_filter_tok:
toks = [t for t in tok]
else:
toks = [tok]
kept_tokens += toks
add_to_vocab(vocab, toks)
if args.space and n != len(sets) - 1:
kept_tokens += [args.space]
if args.unit == "subword":
kept_tokens = sp_mdl.encode(" ".join(kept_tokens), out_type=str)
dst.write(" ".join(kept_tokens) + "\n")
if vocab:
_, dump_vocab = io_wrapper(args.dump_vocab, "w")
for unit, idx in vocab.items():
dump_vocab.write(f"{unit} {idx}\n")
print(f"Dump vocabulary to {args.dump_vocab} with {len(vocab)} units")
dump_vocab.close()
if not src_std:
src.close()
if not dst_std:
dst.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Tokenize the text to modeling units, e.g., "
"character, phoneme, word, subword, ...")
parser.add_argument("src_txt",
type=str,
help="Source text file (Kaldi format or not)")
parser.add_argument("dst_tok",
type=str,
help="Output text file (Kaldi format or not)")
parser.add_argument("--text-format",
type=str,
default="kaldi",
choices=["kaldi", "raw"],
help="Format of the text file. "
"The kaldi format begins with the utterance ID")
parser.add_argument("--spm",
type=str,
default="",
help="Path of the sentencepiece's model "
"if we choose subword unit")
parser.add_argument("--filter-units",
type=str,
default="",
help="Filter the units if needed, "
"each unit is separated via \',\'")
parser.add_argument("--unit",
type=str,
default="char",
choices=["char", "word", "subword"],
help="Type of the modeling unit")
parser.add_argument("--space",
type=str,
default="",
help="If not none, insert space "
"symbol between each units")
parser.add_argument("--add-units",
type=str,
default="",
help="Add units to vocabulary set, "
"e.g., <sos>, <eos>, <unk>")
parser.add_argument("--dump-vocab",
type=str,
default="",
help="If not none, dump out the vocabulary set")
args = parser.parse_args()
run(args) | 0.45641 | 0.196537 |
import sqlite3
class UserRepository:
def __init__(self, config):
connection = config["Database"]["connection"]
assert connection
self.connection_string = connection
def _connection(self):
return sqlite3.connect(self.connection_string)
def list_user(self):
"""
Query for a list of user
:return: a list tuple (user_name, user_pass, note)
"""
query = "SELECT user_name, user_pass, note FROM users"
result = []
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query)
for row in cursor.fetchall():
result.append(
(row[0], row[1], row[2])
)
return result
def get_user(self, user_name):
"""
Find a user base on user name
:param user_name: target user name
:return: tuple of (username, password, note) or None
"""
query = "SELECT user_name, user_pass, note FROM users WHERE user_name = :user_name"
params = {"user_name": user_name}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
found_user = cursor.fetchone()
if found_user is not None:
return found_user[0], found_user[1], found_user[2]
else:
return None
def create_user(self, user_name, password, note):
"""
Create an entry of user
:param user_name: user's user name
:param password: <PASSWORD>
:param note: note of the user
:return: if the operation success or not
"""
query = "INSERT INTO users (user_name, user_pass, note) VALUES (:user_name, :password, :note);"
params = {
"user_name": user_name,
"password": password,
"note": note
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_effected = cursor.rowcount
return row_effected == 1
def edit_user(self, user_name, password, note):
"""
Update a user
:param user_name: user name of the user
:param password: <PASSWORD>
:param note: note of the user
:return: if the operation success of not
"""
query = "UPDATE users SET user_pass = :password, note = :note WHERE user_name = :user_name"
params = {
"user_name": user_name,
"password": password,
"note": note
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_effected = cursor.rowcount
return row_effected == 1
def delete_user(self, user_name):
"""
Delete a user
:param user_name: user name of the user
:return: if the operation success or not
"""
query = "DELETE FROM users WHERE user_name = :user_name"
params = {"user_name": user_name}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_effected = cursor.rowcount
return row_effected == 1
def get_role(self, user_name, role_name):
"""
Return a single mapping of user role
:param user_name: user name of the target mapping
:param role_name: role name of the target mapping
:return: a tuple (user_name, role_name) of found mapping or None
"""
query = "SELECT user_name, role_name FROM user_roles WHERE user_name = :user_name AND role_name = :role_name"
params = {
"user_name": user_name,
"role_name": role_name
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
found_mapping = cursor.fetchone()
if found_mapping is not None:
return found_mapping[0], found_mapping(1)
else:
return None
def get_roles(self, user_name):
"""
Return a list of role name for a user
:param user_name: user name of the target user
:return: a list of tuple (username, role name) for a user or an empty list
"""
query = "SELECT user_name, role_name FROM user_roles WHERE user_name = :user_name"
params = {"user_name": user_name}
result = []
with self._connection() as connection:
cursor = connection.cursor()
for row in cursor.execute(query, params):
result.append(
(row[0], row[1])
)
return result
def create_role(self, user_name, role_name):
"""
Create a role for target user
:param user_name: user name of the user
:param role_name: role name of the user
:return: if the operation success of not
"""
query = "INSERT INTO user_roles (user_name, role_name) VALUES (:user_name, :role_name)"
params = {
"user_name": user_name,
"role_name": role_name
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_count = cursor.rowcount
return row_count == 1
def delete_role(self, user_name, role_name):
"""
Delete a role from target user
:param user_name:
:param role_name:
:return:
"""
query = "DELETE FROM user_roles WHERE user_name = :user_name AND role_name = :role_name"
params = {
"user_name": user_name,
"role_name": role_name
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_count = cursor.rowcount
return row_count == 1 | back/facades/UserRepository.py | import sqlite3
class UserRepository:
def __init__(self, config):
connection = config["Database"]["connection"]
assert connection
self.connection_string = connection
def _connection(self):
return sqlite3.connect(self.connection_string)
def list_user(self):
"""
Query for a list of user
:return: a list tuple (user_name, user_pass, note)
"""
query = "SELECT user_name, user_pass, note FROM users"
result = []
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query)
for row in cursor.fetchall():
result.append(
(row[0], row[1], row[2])
)
return result
def get_user(self, user_name):
"""
Find a user base on user name
:param user_name: target user name
:return: tuple of (username, password, note) or None
"""
query = "SELECT user_name, user_pass, note FROM users WHERE user_name = :user_name"
params = {"user_name": user_name}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
found_user = cursor.fetchone()
if found_user is not None:
return found_user[0], found_user[1], found_user[2]
else:
return None
def create_user(self, user_name, password, note):
"""
Create an entry of user
:param user_name: user's user name
:param password: <PASSWORD>
:param note: note of the user
:return: if the operation success or not
"""
query = "INSERT INTO users (user_name, user_pass, note) VALUES (:user_name, :password, :note);"
params = {
"user_name": user_name,
"password": password,
"note": note
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_effected = cursor.rowcount
return row_effected == 1
def edit_user(self, user_name, password, note):
"""
Update a user
:param user_name: user name of the user
:param password: <PASSWORD>
:param note: note of the user
:return: if the operation success of not
"""
query = "UPDATE users SET user_pass = :password, note = :note WHERE user_name = :user_name"
params = {
"user_name": user_name,
"password": password,
"note": note
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_effected = cursor.rowcount
return row_effected == 1
def delete_user(self, user_name):
"""
Delete a user
:param user_name: user name of the user
:return: if the operation success or not
"""
query = "DELETE FROM users WHERE user_name = :user_name"
params = {"user_name": user_name}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_effected = cursor.rowcount
return row_effected == 1
def get_role(self, user_name, role_name):
"""
Return a single mapping of user role
:param user_name: user name of the target mapping
:param role_name: role name of the target mapping
:return: a tuple (user_name, role_name) of found mapping or None
"""
query = "SELECT user_name, role_name FROM user_roles WHERE user_name = :user_name AND role_name = :role_name"
params = {
"user_name": user_name,
"role_name": role_name
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
found_mapping = cursor.fetchone()
if found_mapping is not None:
return found_mapping[0], found_mapping(1)
else:
return None
def get_roles(self, user_name):
"""
Return a list of role name for a user
:param user_name: user name of the target user
:return: a list of tuple (username, role name) for a user or an empty list
"""
query = "SELECT user_name, role_name FROM user_roles WHERE user_name = :user_name"
params = {"user_name": user_name}
result = []
with self._connection() as connection:
cursor = connection.cursor()
for row in cursor.execute(query, params):
result.append(
(row[0], row[1])
)
return result
def create_role(self, user_name, role_name):
"""
Create a role for target user
:param user_name: user name of the user
:param role_name: role name of the user
:return: if the operation success of not
"""
query = "INSERT INTO user_roles (user_name, role_name) VALUES (:user_name, :role_name)"
params = {
"user_name": user_name,
"role_name": role_name
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_count = cursor.rowcount
return row_count == 1
def delete_role(self, user_name, role_name):
"""
Delete a role from target user
:param user_name:
:param role_name:
:return:
"""
query = "DELETE FROM user_roles WHERE user_name = :user_name AND role_name = :role_name"
params = {
"user_name": user_name,
"role_name": role_name
}
with self._connection() as connection:
cursor = connection.cursor()
cursor.execute(query, params)
row_count = cursor.rowcount
return row_count == 1 | 0.520496 | 0.228146 |
import struct
import unittest
from datasketch.partition_minhash import PartitionMinHash, BetterWeightedPartitionMinHash
class FakeHash(object):
def __init__(self, h):
'''
Initialize with an integer
'''
self.h = h
def digest(self):
'''
Return the bytes representation of the integer
'''
return struct.pack('<Q', self.h)
class TestPartitionMinhash(unittest.TestCase):
def test_init(self):
m1 = PartitionMinHash(4)
m2 = PartitionMinHash(4)
self.assertEqual(m1, m2)
self.assertEqual(m1.k_val, m2.k_val)
self.assertEqual(m1.partitions, m1.partitions)
def test_is_empty(self):
m = PartitionMinHash(4, hashobj=FakeHash)
self.assertTrue(m.is_empty())
m.update(1)
self.assertFalse(m.is_empty())
def test_update(self):
m1 = PartitionMinHash(4, hashobj=FakeHash)
m2 = PartitionMinHash(4, hashobj=FakeHash)
m1.update(12)
self.assertTrue(m1 != m2)
def test_jaccard(self):
m1 = PartitionMinHash(4, hashobj=FakeHash)
m2 = PartitionMinHash(4, hashobj=FakeHash)
self.assertEqual(m1.jaccard(m2), 1.0)
m2.update(12)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(13)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(12)
self.assertEqual(m1.jaccard(m2), 0.75)
m2.update(13)
self.assertEqual(m1.jaccard(m2), 1.0)
m1.update(14)
self.assertEqual(m1.jaccard(m2), 2./3)
m2.update(14)
self.assertEqual(m1.jaccard(m2), 1.0)
def test_better_weighting_jaccard(self):
m1 = BetterWeightedPartitionMinHash(4, hashobj=FakeHash)
m2 = BetterWeightedPartitionMinHash(4, hashobj=FakeHash)
self.assertEqual(m1.jaccard(m2), 1.0)
m2.update(12)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(13)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(12)
self.assertEqual(m1.jaccard(m2), 0.50)
m2.update(13)
self.assertEqual(m1.jaccard(m2), 1.0)
m1.update(14)
self.assertEqual(m1.jaccard(m2), 2./3)
m2.update(14)
self.assertEqual(m1.jaccard(m2), 1.0)
def test_eq(self):
m1 = PartitionMinHash(4, hashobj=FakeHash)
m2 = PartitionMinHash(4, hashobj=FakeHash)
m3 = PartitionMinHash(4, hashobj=FakeHash)
m4 = PartitionMinHash(4, hashobj=FakeHash)
m5 = PartitionMinHash(4, hashobj=FakeHash)
m1.update(11)
m2.update(12)
m3.update(11)
m4.update(11)
m5.update(11)
self.assertNotEqual(m1, m2)
self.assertEqual(m1, m3)
self.assertEqual(m1, m4)
self.assertEqual(m1, m5)
m1.update(12)
m2.update(11)
self.assertEqual(m1, m2)
if __name__ == "__main__":
unittest.main() | test/partition_minhash_test.py | import struct
import unittest
from datasketch.partition_minhash import PartitionMinHash, BetterWeightedPartitionMinHash
class FakeHash(object):
def __init__(self, h):
'''
Initialize with an integer
'''
self.h = h
def digest(self):
'''
Return the bytes representation of the integer
'''
return struct.pack('<Q', self.h)
class TestPartitionMinhash(unittest.TestCase):
def test_init(self):
m1 = PartitionMinHash(4)
m2 = PartitionMinHash(4)
self.assertEqual(m1, m2)
self.assertEqual(m1.k_val, m2.k_val)
self.assertEqual(m1.partitions, m1.partitions)
def test_is_empty(self):
m = PartitionMinHash(4, hashobj=FakeHash)
self.assertTrue(m.is_empty())
m.update(1)
self.assertFalse(m.is_empty())
def test_update(self):
m1 = PartitionMinHash(4, hashobj=FakeHash)
m2 = PartitionMinHash(4, hashobj=FakeHash)
m1.update(12)
self.assertTrue(m1 != m2)
def test_jaccard(self):
m1 = PartitionMinHash(4, hashobj=FakeHash)
m2 = PartitionMinHash(4, hashobj=FakeHash)
self.assertEqual(m1.jaccard(m2), 1.0)
m2.update(12)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(13)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(12)
self.assertEqual(m1.jaccard(m2), 0.75)
m2.update(13)
self.assertEqual(m1.jaccard(m2), 1.0)
m1.update(14)
self.assertEqual(m1.jaccard(m2), 2./3)
m2.update(14)
self.assertEqual(m1.jaccard(m2), 1.0)
def test_better_weighting_jaccard(self):
m1 = BetterWeightedPartitionMinHash(4, hashobj=FakeHash)
m2 = BetterWeightedPartitionMinHash(4, hashobj=FakeHash)
self.assertEqual(m1.jaccard(m2), 1.0)
m2.update(12)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(13)
self.assertEqual(m1.jaccard(m2), 0.0)
m1.update(12)
self.assertEqual(m1.jaccard(m2), 0.50)
m2.update(13)
self.assertEqual(m1.jaccard(m2), 1.0)
m1.update(14)
self.assertEqual(m1.jaccard(m2), 2./3)
m2.update(14)
self.assertEqual(m1.jaccard(m2), 1.0)
def test_eq(self):
m1 = PartitionMinHash(4, hashobj=FakeHash)
m2 = PartitionMinHash(4, hashobj=FakeHash)
m3 = PartitionMinHash(4, hashobj=FakeHash)
m4 = PartitionMinHash(4, hashobj=FakeHash)
m5 = PartitionMinHash(4, hashobj=FakeHash)
m1.update(11)
m2.update(12)
m3.update(11)
m4.update(11)
m5.update(11)
self.assertNotEqual(m1, m2)
self.assertEqual(m1, m3)
self.assertEqual(m1, m4)
self.assertEqual(m1, m5)
m1.update(12)
m2.update(11)
self.assertEqual(m1, m2)
if __name__ == "__main__":
unittest.main() | 0.709623 | 0.572185 |
import six
import logging
import numpy as np
import re
if not six.PY2:
basestring = str
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def strQ2B(ustring):
"""全角符号转对应的半角符号
"""
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
"""判断s是否至少包含keywords中的至少一个字符串
"""
for k in keywords:
if re.search(k, s):
return True
return False
class Progress:
"""显示进度,自己简单封装,比tqdm更可控一些
iterable: 可迭代的对象;
period: 显示进度的周期;
steps: iterable可迭代的总步数,相当于len(iterable)
"""
def __init__(self, iterable, period=1, steps=None, desc=None):
self.iterable = iterable
self.period = period
if hasattr(iterable, '__len__'):
self.steps = len(iterable)
else:
self.steps = steps
self.desc = desc
if self.steps:
self._format_ = u'%s/%s passed' % ('%s', self.steps)
else:
self._format_ = u'%s passed'
if self.desc:
self._format_ = self.desc + ' - ' + self._format_
self.logger = logging.getLogger()
def __iter__(self):
for i, j in enumerate(self.iterable):
if (i + 1) % self.period == 0:
self.logger.info(self._format_ % (i + 1))
yield j
def parallel_apply(func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False):
"""多进程或多线程地将func应用到iterable的每个元素中。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
输出可能是func(c), func(a), func(b)。
参数:
dummy: False是多进程/线性,True则是多线程/线性;
callback: 处理单个输出的回调函数;
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue = Queue(max_queue_size), Queue()
def worker_step(in_queue, out_queue):
# 单步函数包装成循环执行
while True:
d = in_queue.get()
r = func(d)
out_queue.put(r)
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
# 后处理函数
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
d = out_queue.get()
out_count += 1
if callback is None:
results.append(d)
else:
callback(d)
return out_count
# 存入数据,取出结果
in_count, out_count = 0, 0
for d in iterable:
in_count += 1
while True:
try:
in_queue.put(d, block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
return results
def get_all_attributes(something):
"""获取类下的所有属性和方法
"""
return {
name: getattr(something, name)
for name in dir(something) if name[0] != '_'
}
def sequence_padding(inputs, length=None, padding=0):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = max([len(x) for x in inputs])
outputs = np.array([
np.concatenate([x, [padding] * (length - len(x))])
if len(x) < length else x[:length] for x in inputs
])
return outputs
def is_one_of(x, ys):
"""判断x是否在ys之中
等价于x in ys,但有些情况下x in ys会报错
"""
for y in ys:
if x is y:
return True
return False | bert4keras/snippets.py |
import six
import logging
import numpy as np
import re
if not six.PY2:
basestring = str
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def strQ2B(ustring):
"""全角符号转对应的半角符号
"""
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
"""判断s是否至少包含keywords中的至少一个字符串
"""
for k in keywords:
if re.search(k, s):
return True
return False
class Progress:
"""显示进度,自己简单封装,比tqdm更可控一些
iterable: 可迭代的对象;
period: 显示进度的周期;
steps: iterable可迭代的总步数,相当于len(iterable)
"""
def __init__(self, iterable, period=1, steps=None, desc=None):
self.iterable = iterable
self.period = period
if hasattr(iterable, '__len__'):
self.steps = len(iterable)
else:
self.steps = steps
self.desc = desc
if self.steps:
self._format_ = u'%s/%s passed' % ('%s', self.steps)
else:
self._format_ = u'%s passed'
if self.desc:
self._format_ = self.desc + ' - ' + self._format_
self.logger = logging.getLogger()
def __iter__(self):
for i, j in enumerate(self.iterable):
if (i + 1) % self.period == 0:
self.logger.info(self._format_ % (i + 1))
yield j
def parallel_apply(func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False):
"""多进程或多线程地将func应用到iterable的每个元素中。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
输出可能是func(c), func(a), func(b)。
参数:
dummy: False是多进程/线性,True则是多线程/线性;
callback: 处理单个输出的回调函数;
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue = Queue(max_queue_size), Queue()
def worker_step(in_queue, out_queue):
# 单步函数包装成循环执行
while True:
d = in_queue.get()
r = func(d)
out_queue.put(r)
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
# 后处理函数
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
d = out_queue.get()
out_count += 1
if callback is None:
results.append(d)
else:
callback(d)
return out_count
# 存入数据,取出结果
in_count, out_count = 0, 0
for d in iterable:
in_count += 1
while True:
try:
in_queue.put(d, block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
return results
def get_all_attributes(something):
"""获取类下的所有属性和方法
"""
return {
name: getattr(something, name)
for name in dir(something) if name[0] != '_'
}
def sequence_padding(inputs, length=None, padding=0):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = max([len(x) for x in inputs])
outputs = np.array([
np.concatenate([x, [padding] * (length - len(x))])
if len(x) < length else x[:length] for x in inputs
])
return outputs
def is_one_of(x, ys):
"""判断x是否在ys之中
等价于x in ys,但有些情况下x in ys会报错
"""
for y in ys:
if x is y:
return True
return False | 0.334916 | 0.361306 |
import pandas as pd
import re
import os
from pptx import Presentation
from pptx.util import Inches, Pt
from pptx.enum.shapes import MSO_SHAPE
from pptx.enum.text import PP_ALIGN
from pptx.dml.color import RGBColor
from pptx.oxml.xmlchemy import OxmlElement
d = ''
# Glasswall palette
dark_blue = RGBColor(14, 61, 90)
green_blue = RGBColor(26, 145, 154)
white = RGBColor(255, 255, 255)
blue1 = RGBColor(22, 94, 122)
# table colors:
gray = RGBColor(191, 191, 191)
blue2 = RGBColor(45, 92, 117)
# rag colors:
green = RGBColor(0, 204, 153) # (0, 255, 0)
amber = RGBColor(255, 204, 0) # (255, 153, 51)
red = RGBColor(255, 102, 102) # (255, 0, 0)
# Letter font
gw_font = 'Arial'
def examine_template():
"""
Print default Master's slides information, title, subtitle, placeholders, etc.
"""
prs = Presentation()
for n in range(0, 11):
slide = prs.slides.add_slide(prs.slide_layouts[n])
print('Master Slide ' + str(n))
for shape in slide.placeholders:
print('%d, %s' % (shape.placeholder_format.idx, shape.name))
def logo(slide, img_path=d + 'gw.png', place='top right'):
"""
Insert logo in slide.
:param slide: slide from presentation
:param img_path: path to image file, Glasswall logo for default
:param place: place to locate image, top right, center or top left
"""
if place == 'top right':
# Logo size
width = Inches(1.2)
height = Inches(0.6) # width half
top = Inches(0.1)
left = Inches(10.0) - width - Inches(0.2)
elif place == 'center':
width = Inches(6.0)
height = Inches(3.0) # width halfs
left = (Inches(10.0) - width) / 2
top = (Inches(7.5) - height) / 2
elif place == 'top left':
width = Inches(1.25)
height = Inches(0.625) # width half
top = Inches(0.25)
left = Inches(0.3)
pic = slide.shapes.add_picture(img_path, left, top, width, height)
def set_background_color(slide, bg_color=dark_blue):
"""
Set slide background color.
:param slide: slide from presentation
:param bg_color: background color
"""
background = slide.background
fill(background, bg_color)
def fill(shape, fill_color=dark_blue):
"""
Fill shape with color.
:param shape: MSO_SHAPE shape (MSO_SHAPE.RECTANGLE, MSO_SHAPE.ELLIPSE, etc)
:param fill_color: fill color
"""
fill = shape.fill
fill.solid()
fill.fore_color.rgb = fill_color
def wrap_by_word(s, n):
"""
Returns a string where \n is inserted between every n words
:param s: string
:param n: integer, number of words
:return:
"""
a = s.split()
ret = ''
for i in range(0, len(a), n):
ret += ' '.join(a[i:i+n]) + '\n'
return ret
def wrap_by_char(s, n):
return '\n'.join(l for line in s.splitlines() for l in textwrap.wrap(line, width=n))
def get_data():
# Create URL to JSON file (alternatively this can be a filepath)
url = 'https://wmwaredata.s3.us-east-2.amazonaws.com/gw_releases.json'
# Load the first sheet of the JSON file into a data frame
df = pd.read_json(url, orient='columns')
df = df.rename(columns={'sub_repo_commit_url': 'sub_repo_url'})
repos = []
dates = []
tags = []
hashes = []
descriptions = []
for i in range(len(df)):
# Repo
repo = df['repo_name'].iloc[i] + '\n\n' + df['repo_url'].iloc[i]
repos.append(repo)
# Date
d = df['release_date'].iloc[i]
if d is not None:
d = d.split('T')
date = d[0] + '\n\n' + d[1][:-1]
else:
date = ''
dates.append(date)
# Version / Tag
t = df['version'].iloc[i]
tags.append(t)
# Hash
h = df['hash'].iloc[i]
hashes.append(h)
# Notes / Description
content = re.sub('<.*?>', '', df['release_notes'].iloc[i])
des = wrap_by_word(content, n=20)
descriptions.append(des)
# Sub Repo
s_name = df['sub_repo_name'].iloc[i]
if s_name is not None:
s_repo = s_name + '\n\n' + df['sub_repo_url'].iloc[i]
repos.append(s_repo)
# date
dates.append(date)
# tag
tags.append(t)
# Sub Hash
s_h = df['sub_hash'].iloc[i]
if s_h is not None:
hashes.append(s_h)
# notes
descriptions.append(des)
df = pd.DataFrame()
df['Repo'] = repos
df['Date'] = dates
df['Version'] = tags
df['Hash'] = hashes
df['Notes'] = descriptions
# Sort columns
df = df[['Repo', 'Date', 'Version', 'Hash', 'Notes']]
# drop repeated rows
df1 = df.drop_duplicates().reset_index(drop=True)
return df1
def make_presentation(output_to, single=False, dm=False):
"""
Autocreate power point presentation for 'Project Team Structure' sheet.
:param sheet_name: sheet name, in this case 'Projects Team Structure'
:param output_to: path to save output pptx file
:param single: bool, create a single presentations and save it to single folder if true
:param dm: bool, create a presentation per delivery manager and save it to dm folder if true
"""
prs = Presentation()
df1 = get_data()
# PROJECT SLIDES
for row_index in range(len(df1)):
add_project_slide(prs, df1, row_index)
prs.save(output_to)
def add_project_slide(prs, df, row_index):
"""
Add slide to presentation.
:param prs: presentation
:param df: pandas dataframe with presentation information
:param row_index: index of the row with the information corresponding to the slide
"""
repo = df.iloc[row_index]['Repo']
date = df.iloc[row_index]['Date']
version = df.iloc[row_index]['Version']
hash = df.iloc[row_index]['Hash']
notes = df.iloc[row_index]['Notes']
title_only_slide_layout = prs.slide_layouts[5]
slide = prs.slides.add_slide(title_only_slide_layout)
title = slide.shapes.title.text = "GW Releases"
#set_background_color(slide)
logo(slide)
shapes = slide.shapes
# TITLE
#title = shapes.title
#title = '\n GW Releases' + repo.upper() + '\n'
#text_settings(title, i=0)
#text_settings(title, i=1)
#text_settings(title, i=2, font_size=Pt(26))
#text_settings(title, i=4, font_size=Pt(24), font_color=green_blue)
rnr = df[(df['Hash'] == hash)].reset_index()
if len(rnr) > 0:
add_table(shapes, rnr, blue1)
def text_settings(
shape,
i=0,
alignment=PP_ALIGN.LEFT,
font_color=white,
font_size=Pt(9),
font= gw_font,
bold=False):
"""
Format shape's text with alignment, font, font color and size, etc.
:param shape: MSO_SHAPE shape (MSO_SHAPE.RECTANGLE, MSO_SHAPE.ELLIPSE, etc)
:param i: line position
:param alignment: alignment (PP_ALIGN.LEFT, PP_ALIGN.CENTER, etc.)
:param font_color: font color
:param font_size: font size
:param font: letter font
:param bold: bool, use bold letters if true
"""
text = shape.text_frame.paragraphs[i]
text.alignment = alignment
text.font.name = font
text.font.size = font_size
text.font.color.rgb = font_color
text.font.bold = bold
for paragraph in shape.text_frame.paragraphs:
paragraph.font.size = Pt(9)
paragraph.font.color.rgb = RGBColor(255, 255, 255)
def add_table(
shapes,
df,
table_color,
top=Inches(1.5),
col_width=Inches(3.0),
left=Inches(0.3),
width=Inches(3.5),
height=Inches(0.5)):
"""
Add table to slide.
:param shapes: shapes attribute from slide (which in turn is an attribute of the presentation)
:param df: pandas dataframe with 'Resource' and 'Responsability' information in columns
:param table_color: table color
:param top: distance (in inches) to top edge of slide (each slide is 10 per 7.5 inches)
:param col_width: column width
:param left: distance (in inches) to left edge of slide
:param width: table width
:param height: table height
"""
cols = 5
rows = len(df) + 1
shape = shapes.add_table(rows, cols, left, top, width, height)
table = shape.table
# set column widths
table.columns[0].width = Inches(1.5)
table.columns[1].width = Inches(1.0)
table.columns[2].width = Inches(1.0)
table.columns[3].width = col_width
table.columns[4].width = col_width
# write column headings
table.cell(0, 0).text = 'Repo'.capitalize()
table.cell(0, 1).text = 'Date'.capitalize()
table.cell(0, 2).text = 'Version'.capitalize()
table.cell(0, 3).text = 'Hash'.capitalize()
table.cell(0, 4).text = 'Notes'.capitalize()
# write body cells
for i in range(1, rows):
table.cell(i, 0).text = df['Repo'][i - 1]
cell = table.cell(i, 0)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER, font_size=Pt(5))
set_cell_border(cell, blue2, white)
table.cell(i, 1).text = df['Date'][i - 1]
cell = table.cell(i, 1)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
table.cell(i, 2).text = df['Version'][i - 1]
cell = table.cell(i, 2)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
table.cell(i, 3).text = df['Hash'][i - 1]
cell = table.cell(i, 3)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
table.cell(i, 4).text = df['Notes'][i - 1]
cell = table.cell(i, 4)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
def set_cell_border(
cell,
border_color_LR,
border_color_TB,
border_width='12700'):
"""
Format cell borders.
:param cell: cell from table
:param border_color_LR: left and right border colors
:param border_color_TB: top and bottom border colors
:param border_width: border width
"""
# convert RGB to hex
border_color_LR = '%02x%02x%02x' % border_color_LR
border_color_TB = '%02x%02x%02x' % border_color_TB
colors = [
border_color_LR,
border_color_LR,
border_color_TB,
border_color_TB]
tc = cell._tc
tcPr = tc.get_or_add_tcPr()
lines = ['a:lnL', 'a:lnR', 'a:lnT', 'a:lnB']
for line, color in zip(lines, colors):
ln = SubElement(
tcPr,
line,
w=border_width,
cap='flat',
cmpd='sng',
algn='ctr')
solidFill = SubElement(ln, 'a:solidFill')
srgbClr = SubElement(solidFill, 'a:srgbClr', val=color)
prstDash = SubElement(ln, 'a:prstDash', val='solid')
round_ = SubElement(ln, 'a:round')
headEnd = SubElement(ln, 'a:headEnd', type='none', w='med', len='med')
tailEnd = SubElement(ln, 'a:tailEnd', type='none', w='med', len='med')
def SubElement(parent, tagname, **kwargs):
element = OxmlElement(tagname)
element.attrib.update(kwargs)
parent.append(element)
return element | upwork-devs/Lwasampijja-Baker/make_ppt.py | import pandas as pd
import re
import os
from pptx import Presentation
from pptx.util import Inches, Pt
from pptx.enum.shapes import MSO_SHAPE
from pptx.enum.text import PP_ALIGN
from pptx.dml.color import RGBColor
from pptx.oxml.xmlchemy import OxmlElement
d = ''
# Glasswall palette
dark_blue = RGBColor(14, 61, 90)
green_blue = RGBColor(26, 145, 154)
white = RGBColor(255, 255, 255)
blue1 = RGBColor(22, 94, 122)
# table colors:
gray = RGBColor(191, 191, 191)
blue2 = RGBColor(45, 92, 117)
# rag colors:
green = RGBColor(0, 204, 153) # (0, 255, 0)
amber = RGBColor(255, 204, 0) # (255, 153, 51)
red = RGBColor(255, 102, 102) # (255, 0, 0)
# Letter font
gw_font = 'Arial'
def examine_template():
"""
Print default Master's slides information, title, subtitle, placeholders, etc.
"""
prs = Presentation()
for n in range(0, 11):
slide = prs.slides.add_slide(prs.slide_layouts[n])
print('Master Slide ' + str(n))
for shape in slide.placeholders:
print('%d, %s' % (shape.placeholder_format.idx, shape.name))
def logo(slide, img_path=d + 'gw.png', place='top right'):
"""
Insert logo in slide.
:param slide: slide from presentation
:param img_path: path to image file, Glasswall logo for default
:param place: place to locate image, top right, center or top left
"""
if place == 'top right':
# Logo size
width = Inches(1.2)
height = Inches(0.6) # width half
top = Inches(0.1)
left = Inches(10.0) - width - Inches(0.2)
elif place == 'center':
width = Inches(6.0)
height = Inches(3.0) # width halfs
left = (Inches(10.0) - width) / 2
top = (Inches(7.5) - height) / 2
elif place == 'top left':
width = Inches(1.25)
height = Inches(0.625) # width half
top = Inches(0.25)
left = Inches(0.3)
pic = slide.shapes.add_picture(img_path, left, top, width, height)
def set_background_color(slide, bg_color=dark_blue):
"""
Set slide background color.
:param slide: slide from presentation
:param bg_color: background color
"""
background = slide.background
fill(background, bg_color)
def fill(shape, fill_color=dark_blue):
"""
Fill shape with color.
:param shape: MSO_SHAPE shape (MSO_SHAPE.RECTANGLE, MSO_SHAPE.ELLIPSE, etc)
:param fill_color: fill color
"""
fill = shape.fill
fill.solid()
fill.fore_color.rgb = fill_color
def wrap_by_word(s, n):
"""
Returns a string where \n is inserted between every n words
:param s: string
:param n: integer, number of words
:return:
"""
a = s.split()
ret = ''
for i in range(0, len(a), n):
ret += ' '.join(a[i:i+n]) + '\n'
return ret
def wrap_by_char(s, n):
return '\n'.join(l for line in s.splitlines() for l in textwrap.wrap(line, width=n))
def get_data():
# Create URL to JSON file (alternatively this can be a filepath)
url = 'https://wmwaredata.s3.us-east-2.amazonaws.com/gw_releases.json'
# Load the first sheet of the JSON file into a data frame
df = pd.read_json(url, orient='columns')
df = df.rename(columns={'sub_repo_commit_url': 'sub_repo_url'})
repos = []
dates = []
tags = []
hashes = []
descriptions = []
for i in range(len(df)):
# Repo
repo = df['repo_name'].iloc[i] + '\n\n' + df['repo_url'].iloc[i]
repos.append(repo)
# Date
d = df['release_date'].iloc[i]
if d is not None:
d = d.split('T')
date = d[0] + '\n\n' + d[1][:-1]
else:
date = ''
dates.append(date)
# Version / Tag
t = df['version'].iloc[i]
tags.append(t)
# Hash
h = df['hash'].iloc[i]
hashes.append(h)
# Notes / Description
content = re.sub('<.*?>', '', df['release_notes'].iloc[i])
des = wrap_by_word(content, n=20)
descriptions.append(des)
# Sub Repo
s_name = df['sub_repo_name'].iloc[i]
if s_name is not None:
s_repo = s_name + '\n\n' + df['sub_repo_url'].iloc[i]
repos.append(s_repo)
# date
dates.append(date)
# tag
tags.append(t)
# Sub Hash
s_h = df['sub_hash'].iloc[i]
if s_h is not None:
hashes.append(s_h)
# notes
descriptions.append(des)
df = pd.DataFrame()
df['Repo'] = repos
df['Date'] = dates
df['Version'] = tags
df['Hash'] = hashes
df['Notes'] = descriptions
# Sort columns
df = df[['Repo', 'Date', 'Version', 'Hash', 'Notes']]
# drop repeated rows
df1 = df.drop_duplicates().reset_index(drop=True)
return df1
def make_presentation(output_to, single=False, dm=False):
"""
Autocreate power point presentation for 'Project Team Structure' sheet.
:param sheet_name: sheet name, in this case 'Projects Team Structure'
:param output_to: path to save output pptx file
:param single: bool, create a single presentations and save it to single folder if true
:param dm: bool, create a presentation per delivery manager and save it to dm folder if true
"""
prs = Presentation()
df1 = get_data()
# PROJECT SLIDES
for row_index in range(len(df1)):
add_project_slide(prs, df1, row_index)
prs.save(output_to)
def add_project_slide(prs, df, row_index):
"""
Add slide to presentation.
:param prs: presentation
:param df: pandas dataframe with presentation information
:param row_index: index of the row with the information corresponding to the slide
"""
repo = df.iloc[row_index]['Repo']
date = df.iloc[row_index]['Date']
version = df.iloc[row_index]['Version']
hash = df.iloc[row_index]['Hash']
notes = df.iloc[row_index]['Notes']
title_only_slide_layout = prs.slide_layouts[5]
slide = prs.slides.add_slide(title_only_slide_layout)
title = slide.shapes.title.text = "GW Releases"
#set_background_color(slide)
logo(slide)
shapes = slide.shapes
# TITLE
#title = shapes.title
#title = '\n GW Releases' + repo.upper() + '\n'
#text_settings(title, i=0)
#text_settings(title, i=1)
#text_settings(title, i=2, font_size=Pt(26))
#text_settings(title, i=4, font_size=Pt(24), font_color=green_blue)
rnr = df[(df['Hash'] == hash)].reset_index()
if len(rnr) > 0:
add_table(shapes, rnr, blue1)
def text_settings(
shape,
i=0,
alignment=PP_ALIGN.LEFT,
font_color=white,
font_size=Pt(9),
font= gw_font,
bold=False):
"""
Format shape's text with alignment, font, font color and size, etc.
:param shape: MSO_SHAPE shape (MSO_SHAPE.RECTANGLE, MSO_SHAPE.ELLIPSE, etc)
:param i: line position
:param alignment: alignment (PP_ALIGN.LEFT, PP_ALIGN.CENTER, etc.)
:param font_color: font color
:param font_size: font size
:param font: letter font
:param bold: bool, use bold letters if true
"""
text = shape.text_frame.paragraphs[i]
text.alignment = alignment
text.font.name = font
text.font.size = font_size
text.font.color.rgb = font_color
text.font.bold = bold
for paragraph in shape.text_frame.paragraphs:
paragraph.font.size = Pt(9)
paragraph.font.color.rgb = RGBColor(255, 255, 255)
def add_table(
shapes,
df,
table_color,
top=Inches(1.5),
col_width=Inches(3.0),
left=Inches(0.3),
width=Inches(3.5),
height=Inches(0.5)):
"""
Add table to slide.
:param shapes: shapes attribute from slide (which in turn is an attribute of the presentation)
:param df: pandas dataframe with 'Resource' and 'Responsability' information in columns
:param table_color: table color
:param top: distance (in inches) to top edge of slide (each slide is 10 per 7.5 inches)
:param col_width: column width
:param left: distance (in inches) to left edge of slide
:param width: table width
:param height: table height
"""
cols = 5
rows = len(df) + 1
shape = shapes.add_table(rows, cols, left, top, width, height)
table = shape.table
# set column widths
table.columns[0].width = Inches(1.5)
table.columns[1].width = Inches(1.0)
table.columns[2].width = Inches(1.0)
table.columns[3].width = col_width
table.columns[4].width = col_width
# write column headings
table.cell(0, 0).text = 'Repo'.capitalize()
table.cell(0, 1).text = 'Date'.capitalize()
table.cell(0, 2).text = 'Version'.capitalize()
table.cell(0, 3).text = 'Hash'.capitalize()
table.cell(0, 4).text = 'Notes'.capitalize()
# write body cells
for i in range(1, rows):
table.cell(i, 0).text = df['Repo'][i - 1]
cell = table.cell(i, 0)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER, font_size=Pt(5))
set_cell_border(cell, blue2, white)
table.cell(i, 1).text = df['Date'][i - 1]
cell = table.cell(i, 1)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
table.cell(i, 2).text = df['Version'][i - 1]
cell = table.cell(i, 2)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
table.cell(i, 3).text = df['Hash'][i - 1]
cell = table.cell(i, 3)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
table.cell(i, 4).text = df['Notes'][i - 1]
cell = table.cell(i, 4)
fill(cell, blue2)
text_settings(cell, alignment = PP_ALIGN.CENTER)
set_cell_border(cell, blue2, white)
def set_cell_border(
cell,
border_color_LR,
border_color_TB,
border_width='12700'):
"""
Format cell borders.
:param cell: cell from table
:param border_color_LR: left and right border colors
:param border_color_TB: top and bottom border colors
:param border_width: border width
"""
# convert RGB to hex
border_color_LR = '%02x%02x%02x' % border_color_LR
border_color_TB = '%02x%02x%02x' % border_color_TB
colors = [
border_color_LR,
border_color_LR,
border_color_TB,
border_color_TB]
tc = cell._tc
tcPr = tc.get_or_add_tcPr()
lines = ['a:lnL', 'a:lnR', 'a:lnT', 'a:lnB']
for line, color in zip(lines, colors):
ln = SubElement(
tcPr,
line,
w=border_width,
cap='flat',
cmpd='sng',
algn='ctr')
solidFill = SubElement(ln, 'a:solidFill')
srgbClr = SubElement(solidFill, 'a:srgbClr', val=color)
prstDash = SubElement(ln, 'a:prstDash', val='solid')
round_ = SubElement(ln, 'a:round')
headEnd = SubElement(ln, 'a:headEnd', type='none', w='med', len='med')
tailEnd = SubElement(ln, 'a:tailEnd', type='none', w='med', len='med')
def SubElement(parent, tagname, **kwargs):
element = OxmlElement(tagname)
element.attrib.update(kwargs)
parent.append(element)
return element | 0.519034 | 0.125762 |
from django.conf import settings
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from django.contrib import messages
from django.core.cache import caches
from django.views.decorators.cache import cache_page
from django.contrib.postgres.search import SearchVector
from datetime import date
import time
import logging
from .models import Poster, Conference
from .forms import PDFForm
from .utils import email_log
logger = logging.getLogger(__name__)
USR_FAILED = "Your poster was uploaded successfully, but we had trouble converting it. We will look into it and activate your poster within the next few hours."
USR_FAILED_MULTIPAGE = "It looks like you might have uploaded a multi-page document. We can only handle single page posters, at the moment."
USR_SUCCESS = "Your poster was uploaded successfully! It will appear on the front page within a minute or two."
USR_INVALID_FILE = "Please upload a valid file."
USR_EXISTING_FILE = "Your poster has been uploaded already. You may update it by uploading a new file."
#TODO: change to generic views
@cache_page(settings.CACHE_TTL, cache='index')
def index(request, conference_id=None):
if conference_id is None:
poster_list = Poster.objects.filter(active=True).order_by('-pub_date')[:256].prefetch_related('authors')
return render(request, 'pages/index.html', {'poster_list': poster_list})
else:
conference = get_object_or_404(Conference, slug=conference_id)
poster_list = Poster.objects.filter(active=True, conference=conference).prefetch_related('authors')
return render(request, 'pages/index.html', {'poster_list': poster_list, 'conference': conference})
def search(request):
search_vector = SearchVector('title', 'conference__name', 'authors__name')
poster_list = Poster.objects.annotate(search=search_vector).distinct('pk').filter(active=True, search=request.GET['q'])
return render(request, 'pages/index.html', {'poster_list': poster_list, 'search': True})
def detail(request, slug):
poster = get_object_or_404(Poster, slug=slug)
return render(request, 'pages/detail.html', {'poster': poster})
def upload(request, access_key):
poster = get_object_or_404(Poster, access_key=access_key)
log_email = email_log.LogEmail(poster)
form = PDFForm(instance=poster)
if poster.pdf:
messages.info(request, USR_EXISTING_FILE)
if request.method == 'POST':
try:
log_email.add_message('INFO: uploaded')
form = PDFForm(request.POST, request.FILES, instance=poster)
if form.is_valid():
log_email.add_message('INFO: valid file')
form.save()
try:
poster.generate_preview()
poster.active = True
poster.pub_date = date.today()
poster.save()
messages.success(request, USR_SUCCESS)
log_email.add_message('INFO: conversion successful')
return redirect('detail', slug=poster.slug)
except TypeError:
poster.active = False
poster.save()
logger.exception('ERR: failed to convert PDF (id %s) -- likely multi-page document' % poster.pk)
messages.error(request, USR_FAILED_MULTIPAGE)
log_email.add_message('ERR: conversion failed -- likely multi-page document')
except:
poster.active = False
poster.save()
logger.exception('ERR: failed to convert PDF (id %s)' % poster.pk)
messages.warning(request, USR_FAILED)
log_email.add_message('ERR: conversion failed')
return redirect('detail', slug=poster.slug)
else:
log_email.add_message('ERR: invalid file')
messages.error(request, USR_INVALID_FILE)
finally:
caches['index'].clear()
log_email.send()
form.active = False
return render(request, 'pages/upload.html', {'form': form, 'poster': poster})
@cache_page(3600, cache='default')
def sitemap(request):
poster_list = Poster.objects.filter(active=True).order_by('-pub_date')
return render(request, 'sitemap.xml', {'poster_list': poster_list}, content_type='text/xml')
def rss(request):
pass | posters/views.py | from django.conf import settings
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from django.contrib import messages
from django.core.cache import caches
from django.views.decorators.cache import cache_page
from django.contrib.postgres.search import SearchVector
from datetime import date
import time
import logging
from .models import Poster, Conference
from .forms import PDFForm
from .utils import email_log
logger = logging.getLogger(__name__)
USR_FAILED = "Your poster was uploaded successfully, but we had trouble converting it. We will look into it and activate your poster within the next few hours."
USR_FAILED_MULTIPAGE = "It looks like you might have uploaded a multi-page document. We can only handle single page posters, at the moment."
USR_SUCCESS = "Your poster was uploaded successfully! It will appear on the front page within a minute or two."
USR_INVALID_FILE = "Please upload a valid file."
USR_EXISTING_FILE = "Your poster has been uploaded already. You may update it by uploading a new file."
#TODO: change to generic views
@cache_page(settings.CACHE_TTL, cache='index')
def index(request, conference_id=None):
if conference_id is None:
poster_list = Poster.objects.filter(active=True).order_by('-pub_date')[:256].prefetch_related('authors')
return render(request, 'pages/index.html', {'poster_list': poster_list})
else:
conference = get_object_or_404(Conference, slug=conference_id)
poster_list = Poster.objects.filter(active=True, conference=conference).prefetch_related('authors')
return render(request, 'pages/index.html', {'poster_list': poster_list, 'conference': conference})
def search(request):
search_vector = SearchVector('title', 'conference__name', 'authors__name')
poster_list = Poster.objects.annotate(search=search_vector).distinct('pk').filter(active=True, search=request.GET['q'])
return render(request, 'pages/index.html', {'poster_list': poster_list, 'search': True})
def detail(request, slug):
poster = get_object_or_404(Poster, slug=slug)
return render(request, 'pages/detail.html', {'poster': poster})
def upload(request, access_key):
poster = get_object_or_404(Poster, access_key=access_key)
log_email = email_log.LogEmail(poster)
form = PDFForm(instance=poster)
if poster.pdf:
messages.info(request, USR_EXISTING_FILE)
if request.method == 'POST':
try:
log_email.add_message('INFO: uploaded')
form = PDFForm(request.POST, request.FILES, instance=poster)
if form.is_valid():
log_email.add_message('INFO: valid file')
form.save()
try:
poster.generate_preview()
poster.active = True
poster.pub_date = date.today()
poster.save()
messages.success(request, USR_SUCCESS)
log_email.add_message('INFO: conversion successful')
return redirect('detail', slug=poster.slug)
except TypeError:
poster.active = False
poster.save()
logger.exception('ERR: failed to convert PDF (id %s) -- likely multi-page document' % poster.pk)
messages.error(request, USR_FAILED_MULTIPAGE)
log_email.add_message('ERR: conversion failed -- likely multi-page document')
except:
poster.active = False
poster.save()
logger.exception('ERR: failed to convert PDF (id %s)' % poster.pk)
messages.warning(request, USR_FAILED)
log_email.add_message('ERR: conversion failed')
return redirect('detail', slug=poster.slug)
else:
log_email.add_message('ERR: invalid file')
messages.error(request, USR_INVALID_FILE)
finally:
caches['index'].clear()
log_email.send()
form.active = False
return render(request, 'pages/upload.html', {'form': form, 'poster': poster})
@cache_page(3600, cache='default')
def sitemap(request):
poster_list = Poster.objects.filter(active=True).order_by('-pub_date')
return render(request, 'sitemap.xml', {'poster_list': poster_list}, content_type='text/xml')
def rss(request):
pass | 0.216674 | 0.061819 |
import os
import warnings
import dj_database_url
import raven
import yaml
from django.urls import reverse_lazy
from promgen.plugins import apps_from_setuptools
from promgen.version import __version__
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_DIR = os.environ['CONFIG_DIR']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
if not SECRET_KEY:
warnings.warn('Unset SECRET_KEY setting to random for now')
# Taken from Django's generation function
from django.utils.crypto import get_random_string
SECRET_KEY = get_random_string(50, '<KEY>@#$%^&*(-_=+)')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.path.exists(os.path.join(CONFIG_DIR, 'DEBUG'))
# Settings for Prometheus paths and such
PROMGEN_CONFIG = os.path.join(CONFIG_DIR, 'promgen.yml')
if os.path.exists(PROMGEN_CONFIG):
with open(PROMGEN_CONFIG) as fp:
PROMGEN = yaml.load(fp)
else:
PROMGEN = {}
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = apps_from_setuptools + [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'social_django',
'promgen',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'promgen.middleware.PromgenMiddleware',
]
SOCIAL_AUTH_RAISE_EXCEPTIONS = DEBUG
LOGIN_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('home')
LOGOUT_REDIRECT_URL = reverse_lazy('home')
ROOT_URLCONF = 'promgen.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'promgen.context_processors.settings_in_view',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'promgen.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {'default': dj_database_url.config(
env='DATABASE_URL',
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
)}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.expanduser('~/.cache/promgen')
SITE_ID = 1
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
if 'SENTRY_DSN' in os.environ:
INSTALLED_APPS += ['raven.contrib.django.raven_compat']
try:
_RELEASE = raven.fetch_git_sha(BASE_DIR)
except:
_RELEASE = __version__
RAVEN_CONFIG = {
'dsn': os.environ['SENTRY_DSN'],
'release': _RELEASE,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
'dsn': os.environ['SENTRY_DSN'],
},
},
'loggers': {
'': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': True,
},
},
}
# If CELERY_BROKER_URL is set in our environment, then we configure celery as
# expected. If it is not configured, then we set CELERY_TASK_ALWAYS_EAGER to
# force celery to run all tasks in the same process (effectively runs each task
# as a normal function)
if 'CELERY_BROKER_URL' in os.environ:
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
else:
CELERY_TASK_ALWAYS_EAGER = True
if DEBUG:
try:
import debug_toolbar # NOQA
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE = ['debug_toolbar.middleware.DebugToolbarMiddleware'] + MIDDLEWARE
INTERNAL_IPS = ['127.0.0.1']
except:
pass
# Load overrides from PROMGEN to replace Django settings
for k, v in PROMGEN.pop('django', {}).items():
globals()[k] = v | promgen/settings.py | import os
import warnings
import dj_database_url
import raven
import yaml
from django.urls import reverse_lazy
from promgen.plugins import apps_from_setuptools
from promgen.version import __version__
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_DIR = os.environ['CONFIG_DIR']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
if not SECRET_KEY:
warnings.warn('Unset SECRET_KEY setting to random for now')
# Taken from Django's generation function
from django.utils.crypto import get_random_string
SECRET_KEY = get_random_string(50, '<KEY>@#$%^&*(-_=+)')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.path.exists(os.path.join(CONFIG_DIR, 'DEBUG'))
# Settings for Prometheus paths and such
PROMGEN_CONFIG = os.path.join(CONFIG_DIR, 'promgen.yml')
if os.path.exists(PROMGEN_CONFIG):
with open(PROMGEN_CONFIG) as fp:
PROMGEN = yaml.load(fp)
else:
PROMGEN = {}
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = apps_from_setuptools + [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'social_django',
'promgen',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'promgen.middleware.PromgenMiddleware',
]
SOCIAL_AUTH_RAISE_EXCEPTIONS = DEBUG
LOGIN_URL = reverse_lazy('login')
LOGIN_REDIRECT_URL = reverse_lazy('home')
LOGOUT_REDIRECT_URL = reverse_lazy('home')
ROOT_URLCONF = 'promgen.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'promgen.context_processors.settings_in_view',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'promgen.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {'default': dj_database_url.config(
env='DATABASE_URL',
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
)}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.expanduser('~/.cache/promgen')
SITE_ID = 1
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
if 'SENTRY_DSN' in os.environ:
INSTALLED_APPS += ['raven.contrib.django.raven_compat']
try:
_RELEASE = raven.fetch_git_sha(BASE_DIR)
except:
_RELEASE = __version__
RAVEN_CONFIG = {
'dsn': os.environ['SENTRY_DSN'],
'release': _RELEASE,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
'dsn': os.environ['SENTRY_DSN'],
},
},
'loggers': {
'': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': True,
},
},
}
# If CELERY_BROKER_URL is set in our environment, then we configure celery as
# expected. If it is not configured, then we set CELERY_TASK_ALWAYS_EAGER to
# force celery to run all tasks in the same process (effectively runs each task
# as a normal function)
if 'CELERY_BROKER_URL' in os.environ:
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
else:
CELERY_TASK_ALWAYS_EAGER = True
if DEBUG:
try:
import debug_toolbar # NOQA
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE = ['debug_toolbar.middleware.DebugToolbarMiddleware'] + MIDDLEWARE
INTERNAL_IPS = ['127.0.0.1']
except:
pass
# Load overrides from PROMGEN to replace Django settings
for k, v in PROMGEN.pop('django', {}).items():
globals()[k] = v | 0.331444 | 0.059401 |
import re
import csv
import hashlib
import numpy as np
from .errors import AnnotationsError
GENESETS_TIDYCSV_HEADER = [
"gene_set_description",
"gene_set_name",
"differential_expression"
]
def read_gene_sets_tidycsv(gs_locator, context=None):
"""
Read & parse the Tidy CSV format, applying validation checks for mandatory
values, and de-duping rules.
Format is a four-column CSV, with a mandatory header row, and optional "#" prefixed
comments. Format:
gene_set_name, gene_set_description, gene_symbol, gene_description
gene_set_name must be non-null; others are optional.
Returns: a dictionary of the shape (values in angle-brackets vary):
{
<string, a gene set name>: {
"geneset_name": <string, a gene set name>,
"geneset_description": <a string or None>,
"genes": [
{
"gene_symbol": <string, a gene symbol or name>,
"gene_description": <a string or None>
},
...
]
},
...
}
"""
class myDialect(csv.excel):
skipinitialspace = False
gene_sets = {}
with gs_locator.local_handle() as fname:
header_read = False
with open(fname, newline="") as f:
reader = csv.reader(f, dialect=myDialect())
for row in reader:
if len(row) <= 3 or not header_read:
header_read = True
continue
geneset_description, geneset_name, diffExp = row[:3]
x = "//;;//" if (diffExp=="TRUE" or diffExp == "True" or diffExp == "true") else ""
geneset_description+=x
gene_symbols = row[3:]
try:
gene_symbols = gene_symbols[:gene_symbols.index("")]
except:
pass
if geneset_description in gene_sets:
gs = gene_sets[geneset_description]
else:
gs = gene_sets[geneset_description] = {}
if geneset_name in gs:
gene_symbols = list(set(gene_symbols).union(gs[geneset_name]))
gs[geneset_name] = gene_symbols
return gene_sets
def write_gene_sets_tidycsv(f, genesets):
"""
Convert the internal gene sets format (returned by read_gene_set_tidycsv) into
the simple Tidy CSV.
"""
writer = csv.writer(f, dialect="excel")
writer.writerow(GENESETS_TIDYCSV_HEADER)
for k1 in genesets.keys():
for k2 in genesets[k1].keys():
genes = genesets[k1].get(k2,None)
k3 ='__DEG__' in k1
knew = k1.split('__DEG__')[0]
if not genes:
writer.writerow([knew, k2, k3])
else:
writer.writerow([knew, k2, k3]+genes)
def summarizeQueryHash(raw_query):
""" generate a cache key (hash) from the raw query string """
return hashlib.sha1(raw_query).hexdigest()
def validate_gene_sets(genesets, var_names, context=None):
"""
Check validity of gene sets, return if correct, else raise error.
May also modify the gene set for conditions that should be resolved,
but which do not warrant a hard error.
Argument gene sets may be either the REST OTA format (list of dicts) or the internal
format (dict of dicts, keyed by the gene set name).
Will return a modified gene sets (eg, remove warnings) of the same type as the
provided argument. Ie, dict->dict, list->list
Rules:
0. All gene set names must be unique. [error]
1. Gene set names must conform to the following: [error]
* Names must be comprised of 1 or more ASCII characters 32-126
* No leading or trailing spaces (ASCII 32)
* No multi-space (ASCII 32) runs
2. Gene symbols must be part of the current var_index. [warning]
If gene symbol is not in the var_index, generate a warning and remove the symbol
from the gene sets.
3. Gene symbols must not be duplicated in a gene set. [warning]
Duplications will be silently de-duped.
Items marked [error] will generate a hard error, causing the validation to fail.
Items marked [warning] will generate a warning, and will be resolved without failing
the validation (typically by removing the offending item from the gene sets).
"""
messagefn = context["messagefn"] if context else (lambda x: None)
# accept genesets args as either the internal (dict) or REST (list) format,
# as they are identical except for the dict being keyed by geneset_name.
if not isinstance(genesets, dict):
raise ValueError("Gene sets must be a dict.")
for k1 in genesets.keys():
for name in genesets[k1].keys():
if type(name) != str or len(name) == 0:
raise KeyError("Gene set names must be non-null string.")
for k1 in genesets.keys():
for k2 in genesets[k1].keys():
genes = genesets[k1][k2]
if not isinstance(genes, list):
raise ValueError("Gene set genes field must be a list")
gene_symbol_already_seen = set()
new_genes = []
for gene_symbol in genes:
if not isinstance(gene_symbol, str) or len(gene_symbol) == 0:
raise ValueError("Gene symbol must be non-null string.")
if gene_symbol in gene_symbol_already_seen:
# duplicate check
messagefn(
f"Warning: a duplicate of gene {gene_symbol} was found in gene set {k1}:{k2}, "
"and will be ignored."
)
continue
if gene_symbol not in var_names:
messagefn(
f"Warning: {gene_symbol}, used in gene set {k1}:{k2}, "
"was not found in the dataset and will be ignored."
)
continue
gene_symbol_already_seen.add(gene_symbol)
new_genes.append(gene_symbol)
genesets[k1][k2] = new_genes
return genesets | backend/common/genesets.py | import re
import csv
import hashlib
import numpy as np
from .errors import AnnotationsError
GENESETS_TIDYCSV_HEADER = [
"gene_set_description",
"gene_set_name",
"differential_expression"
]
def read_gene_sets_tidycsv(gs_locator, context=None):
"""
Read & parse the Tidy CSV format, applying validation checks for mandatory
values, and de-duping rules.
Format is a four-column CSV, with a mandatory header row, and optional "#" prefixed
comments. Format:
gene_set_name, gene_set_description, gene_symbol, gene_description
gene_set_name must be non-null; others are optional.
Returns: a dictionary of the shape (values in angle-brackets vary):
{
<string, a gene set name>: {
"geneset_name": <string, a gene set name>,
"geneset_description": <a string or None>,
"genes": [
{
"gene_symbol": <string, a gene symbol or name>,
"gene_description": <a string or None>
},
...
]
},
...
}
"""
class myDialect(csv.excel):
skipinitialspace = False
gene_sets = {}
with gs_locator.local_handle() as fname:
header_read = False
with open(fname, newline="") as f:
reader = csv.reader(f, dialect=myDialect())
for row in reader:
if len(row) <= 3 or not header_read:
header_read = True
continue
geneset_description, geneset_name, diffExp = row[:3]
x = "//;;//" if (diffExp=="TRUE" or diffExp == "True" or diffExp == "true") else ""
geneset_description+=x
gene_symbols = row[3:]
try:
gene_symbols = gene_symbols[:gene_symbols.index("")]
except:
pass
if geneset_description in gene_sets:
gs = gene_sets[geneset_description]
else:
gs = gene_sets[geneset_description] = {}
if geneset_name in gs:
gene_symbols = list(set(gene_symbols).union(gs[geneset_name]))
gs[geneset_name] = gene_symbols
return gene_sets
def write_gene_sets_tidycsv(f, genesets):
"""
Convert the internal gene sets format (returned by read_gene_set_tidycsv) into
the simple Tidy CSV.
"""
writer = csv.writer(f, dialect="excel")
writer.writerow(GENESETS_TIDYCSV_HEADER)
for k1 in genesets.keys():
for k2 in genesets[k1].keys():
genes = genesets[k1].get(k2,None)
k3 ='__DEG__' in k1
knew = k1.split('__DEG__')[0]
if not genes:
writer.writerow([knew, k2, k3])
else:
writer.writerow([knew, k2, k3]+genes)
def summarizeQueryHash(raw_query):
""" generate a cache key (hash) from the raw query string """
return hashlib.sha1(raw_query).hexdigest()
def validate_gene_sets(genesets, var_names, context=None):
"""
Check validity of gene sets, return if correct, else raise error.
May also modify the gene set for conditions that should be resolved,
but which do not warrant a hard error.
Argument gene sets may be either the REST OTA format (list of dicts) or the internal
format (dict of dicts, keyed by the gene set name).
Will return a modified gene sets (eg, remove warnings) of the same type as the
provided argument. Ie, dict->dict, list->list
Rules:
0. All gene set names must be unique. [error]
1. Gene set names must conform to the following: [error]
* Names must be comprised of 1 or more ASCII characters 32-126
* No leading or trailing spaces (ASCII 32)
* No multi-space (ASCII 32) runs
2. Gene symbols must be part of the current var_index. [warning]
If gene symbol is not in the var_index, generate a warning and remove the symbol
from the gene sets.
3. Gene symbols must not be duplicated in a gene set. [warning]
Duplications will be silently de-duped.
Items marked [error] will generate a hard error, causing the validation to fail.
Items marked [warning] will generate a warning, and will be resolved without failing
the validation (typically by removing the offending item from the gene sets).
"""
messagefn = context["messagefn"] if context else (lambda x: None)
# accept genesets args as either the internal (dict) or REST (list) format,
# as they are identical except for the dict being keyed by geneset_name.
if not isinstance(genesets, dict):
raise ValueError("Gene sets must be a dict.")
for k1 in genesets.keys():
for name in genesets[k1].keys():
if type(name) != str or len(name) == 0:
raise KeyError("Gene set names must be non-null string.")
for k1 in genesets.keys():
for k2 in genesets[k1].keys():
genes = genesets[k1][k2]
if not isinstance(genes, list):
raise ValueError("Gene set genes field must be a list")
gene_symbol_already_seen = set()
new_genes = []
for gene_symbol in genes:
if not isinstance(gene_symbol, str) or len(gene_symbol) == 0:
raise ValueError("Gene symbol must be non-null string.")
if gene_symbol in gene_symbol_already_seen:
# duplicate check
messagefn(
f"Warning: a duplicate of gene {gene_symbol} was found in gene set {k1}:{k2}, "
"and will be ignored."
)
continue
if gene_symbol not in var_names:
messagefn(
f"Warning: {gene_symbol}, used in gene set {k1}:{k2}, "
"was not found in the dataset and will be ignored."
)
continue
gene_symbol_already_seen.add(gene_symbol)
new_genes.append(gene_symbol)
genesets[k1][k2] = new_genes
return genesets | 0.580709 | 0.475118 |
from typing import List, Dict, Any
from pydantic import ValidationError
from starlette.exceptions import HTTPException
from robot_server.service.json_api.errors import ErrorResponse, Error, \
ErrorSource
class V1HandlerError(Exception):
"""An exception raised in order to produce a V1BasicResponse response"""
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
class RobotServerError(Exception):
def __init__(self, status_code: int, error: Error):
self.status_code = status_code
self.error = error
def transform_http_exception_to_json_api_errors(exception: HTTPException) \
-> ErrorResponse:
"""
Object marshalling for http exceptions (these errors come back differently
than validation errors). e.g. invalid json in request body.
"""
request_error = Error(
status=str(exception.status_code),
detail=exception.detail,
title='Bad Request'
)
return ErrorResponse(errors=[request_error])
def transform_validation_error_to_json_api_errors(
status_code: int,
exception: ValidationError
) -> ErrorResponse:
"""
Object marshalling for validation errors. format pydantic validation
errors to expected json:api response shape.
"""
def transform_error(error):
return Error(
status=str(status_code),
detail=error.get('msg'),
source=ErrorSource(pointer='/' + '/'.join(
str(node) for node in error['loc'])),
title=error.get('type')
)
return ErrorResponse(
errors=[transform_error(error) for error in exception.errors()]
)
def consolidate_fastapi_response(all_exceptions: List[Dict[str, Any]]) -> str:
"""
Consolidate the default fastAPI response so it can be returned as a string.
Default schema of fastAPI exception response is:
{
'loc': ('body',
'<outer_scope1>',
'<outer_scope2>',
'<inner_param>'),
'msg': '<the_error_message>',
'type': '<expected_type>'
}
In order to create a meaningful V1-style response, we consolidate the
above response into a string of shape:
'<outer_scope1>.<outer_scope2>.<inner_param>: <the_error_message>'
"""
# Pick just the error message while discarding v2 response items
def error_to_str(error: dict) -> str:
err_node = ".".join(str(loc) for loc in error['loc'] if loc != 'body')
res = ": ".join([err_node, error["msg"]])
return res
all_errs = ". ".join(error_to_str(exc) for exc in all_exceptions)
return all_errs | robot-server/robot_server/service/errors.py | from typing import List, Dict, Any
from pydantic import ValidationError
from starlette.exceptions import HTTPException
from robot_server.service.json_api.errors import ErrorResponse, Error, \
ErrorSource
class V1HandlerError(Exception):
"""An exception raised in order to produce a V1BasicResponse response"""
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
class RobotServerError(Exception):
def __init__(self, status_code: int, error: Error):
self.status_code = status_code
self.error = error
def transform_http_exception_to_json_api_errors(exception: HTTPException) \
-> ErrorResponse:
"""
Object marshalling for http exceptions (these errors come back differently
than validation errors). e.g. invalid json in request body.
"""
request_error = Error(
status=str(exception.status_code),
detail=exception.detail,
title='Bad Request'
)
return ErrorResponse(errors=[request_error])
def transform_validation_error_to_json_api_errors(
status_code: int,
exception: ValidationError
) -> ErrorResponse:
"""
Object marshalling for validation errors. format pydantic validation
errors to expected json:api response shape.
"""
def transform_error(error):
return Error(
status=str(status_code),
detail=error.get('msg'),
source=ErrorSource(pointer='/' + '/'.join(
str(node) for node in error['loc'])),
title=error.get('type')
)
return ErrorResponse(
errors=[transform_error(error) for error in exception.errors()]
)
def consolidate_fastapi_response(all_exceptions: List[Dict[str, Any]]) -> str:
"""
Consolidate the default fastAPI response so it can be returned as a string.
Default schema of fastAPI exception response is:
{
'loc': ('body',
'<outer_scope1>',
'<outer_scope2>',
'<inner_param>'),
'msg': '<the_error_message>',
'type': '<expected_type>'
}
In order to create a meaningful V1-style response, we consolidate the
above response into a string of shape:
'<outer_scope1>.<outer_scope2>.<inner_param>: <the_error_message>'
"""
# Pick just the error message while discarding v2 response items
def error_to_str(error: dict) -> str:
err_node = ".".join(str(loc) for loc in error['loc'] if loc != 'body')
res = ": ".join([err_node, error["msg"]])
return res
all_errs = ". ".join(error_to_str(exc) for exc in all_exceptions)
return all_errs | 0.857186 | 0.196209 |
import time
import grovepi
class Grove4DigitDisplay:
def __init__(self, pin = 5):
"""
initialize 4 digit display at pin = 5 by default
connect to grovePi port D5
"""
self.display = pin
grovepi.pinMode(self.display, "OUTPUT")
grovepi.fourDigit_init(self.display)
def setBrightness(self, value = 0):
"""
set brightness of the 4 digit display 0 - 8
brightness set to 0 by default
"""
grovepi.fourDigit_brightness(self.display, value)
def setNumber(self, value = 0, leading_zero = 1):
"""
display a number on 4 digit display
by default display number 0 without leading zeroes
"""
grovepi.fourDigit_number(self.display, value, leading_zero)
def setDigit(self, position = 0, digit = 0):
"""
set a particular digit at a particular position
position 0 - 3 (0 = leftmost position)
digit 0 - 15 (0-9A-F)
by default set digit 0 at position 0
"""
grovepi.fourDigit_digit(self.display, position, digit)
def setScore(self, left_score = 0, right_score = 0):
"""
display score , i.e two 2 digit values separated by :
by default display 00:00
"""
grovepi.fourDigit_score(self.display, left_score, right_score)
def monitorAnalog(self, pin = 0 , seconds = 0):
"""
monitor and display the value of an analog pin for some nuber of seconds
by default monitor analog pin 0 for 0 seconds
"""
grovepi.fourDigit_monitor(self.display, pin, seconds)
def allOn(self):
"""
switch all lights on
"""
grovepi.fourDigit_on(self.display)
def allOff(self):
"""
switch all lights off
"""
grovepi.fourDigit_off(self.display)
if __name__ == "__main__":
print "initialize"
four_digit_display = Grove4DigitDisplay()
print "set brightness"
four_digit_display.setBrightness()
while True:
print "set number 5 without leading zeros"
four_digit_display.setNumber(5, 1)
time.sleep(0.5)
print "set number 5 with leading zeros"
four_digit_display.setNumber(5, 0)
time.sleep(0.5)
print "set digits ABCD"
four_digit_display.setDigit(0, 10) #A
four_digit_display.setDigit(1, 11) #B
four_digit_display.setDigit(2, 12) #C
four_digit_display.setDigit(3, 13) #D
time.sleep(0.5)
print "set score 07:03"
four_digit_display.setScore(7, 3)
time.sleep(0.5)
print "turn all lights on"
four_digit_display.allOn()
time.sleep(0.5)
print "turn all lights off"
four_digit_display.allOff()
time.sleep(0.5)
print "done" | cloudmesh/pi/grove_4_digit_display.py | import time
import grovepi
class Grove4DigitDisplay:
def __init__(self, pin = 5):
"""
initialize 4 digit display at pin = 5 by default
connect to grovePi port D5
"""
self.display = pin
grovepi.pinMode(self.display, "OUTPUT")
grovepi.fourDigit_init(self.display)
def setBrightness(self, value = 0):
"""
set brightness of the 4 digit display 0 - 8
brightness set to 0 by default
"""
grovepi.fourDigit_brightness(self.display, value)
def setNumber(self, value = 0, leading_zero = 1):
"""
display a number on 4 digit display
by default display number 0 without leading zeroes
"""
grovepi.fourDigit_number(self.display, value, leading_zero)
def setDigit(self, position = 0, digit = 0):
"""
set a particular digit at a particular position
position 0 - 3 (0 = leftmost position)
digit 0 - 15 (0-9A-F)
by default set digit 0 at position 0
"""
grovepi.fourDigit_digit(self.display, position, digit)
def setScore(self, left_score = 0, right_score = 0):
"""
display score , i.e two 2 digit values separated by :
by default display 00:00
"""
grovepi.fourDigit_score(self.display, left_score, right_score)
def monitorAnalog(self, pin = 0 , seconds = 0):
"""
monitor and display the value of an analog pin for some nuber of seconds
by default monitor analog pin 0 for 0 seconds
"""
grovepi.fourDigit_monitor(self.display, pin, seconds)
def allOn(self):
"""
switch all lights on
"""
grovepi.fourDigit_on(self.display)
def allOff(self):
"""
switch all lights off
"""
grovepi.fourDigit_off(self.display)
if __name__ == "__main__":
print "initialize"
four_digit_display = Grove4DigitDisplay()
print "set brightness"
four_digit_display.setBrightness()
while True:
print "set number 5 without leading zeros"
four_digit_display.setNumber(5, 1)
time.sleep(0.5)
print "set number 5 with leading zeros"
four_digit_display.setNumber(5, 0)
time.sleep(0.5)
print "set digits ABCD"
four_digit_display.setDigit(0, 10) #A
four_digit_display.setDigit(1, 11) #B
four_digit_display.setDigit(2, 12) #C
four_digit_display.setDigit(3, 13) #D
time.sleep(0.5)
print "set score 07:03"
four_digit_display.setScore(7, 3)
time.sleep(0.5)
print "turn all lights on"
four_digit_display.allOn()
time.sleep(0.5)
print "turn all lights off"
four_digit_display.allOff()
time.sleep(0.5)
print "done" | 0.39036 | 0.548915 |
from typing import (
Optional,
Tuple,
)
import numpy as np
from packaging import version
from pandas.core.exchange.dataframe_protocol import (
Buffer,
DlpackDeviceType,
)
_NUMPY_HAS_DLPACK = version.parse(np.__version__) >= version.parse("1.22.0")
class PandasBuffer(Buffer):
"""
Data in the buffer is guaranteed to be contiguous in memory.
"""
def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None:
"""
Handle only regular columns (= numpy arrays) for now.
"""
if not x.strides == (x.dtype.itemsize,):
# The protocol does not support strided buffers, so a copy is
# necessary. If that's not allowed, we need to raise an exception.
if allow_copy:
x = x.copy()
else:
raise RuntimeError(
"Exports cannot be zero-copy in the case "
"of a non-contiguous buffer"
)
# Store the numpy array in which the data resides as a private
# attribute, so we can use it to retrieve the public attributes
self._x = x
@property
def bufsize(self) -> int:
"""
Buffer size in bytes.
"""
return self._x.size * self._x.dtype.itemsize
@property
def ptr(self) -> int:
"""
Pointer to start of the buffer as an integer.
"""
return self._x.__array_interface__["data"][0]
def __dlpack__(self):
"""
Represent this structure as DLPack interface.
"""
if _NUMPY_HAS_DLPACK:
return self._x.__dlpack__()
raise NotImplementedError("__dlpack__")
def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
"""
Device type and device ID for where the data in the buffer resides.
"""
return (DlpackDeviceType.CPU, None)
def __repr__(self) -> str:
return (
"PandasBuffer("
+ str(
{
"bufsize": self.bufsize,
"ptr": self.ptr,
"device": self.__dlpack_device__()[0].name,
}
)
+ ")"
) | pandas/core/exchange/buffer.py | from typing import (
Optional,
Tuple,
)
import numpy as np
from packaging import version
from pandas.core.exchange.dataframe_protocol import (
Buffer,
DlpackDeviceType,
)
_NUMPY_HAS_DLPACK = version.parse(np.__version__) >= version.parse("1.22.0")
class PandasBuffer(Buffer):
"""
Data in the buffer is guaranteed to be contiguous in memory.
"""
def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None:
"""
Handle only regular columns (= numpy arrays) for now.
"""
if not x.strides == (x.dtype.itemsize,):
# The protocol does not support strided buffers, so a copy is
# necessary. If that's not allowed, we need to raise an exception.
if allow_copy:
x = x.copy()
else:
raise RuntimeError(
"Exports cannot be zero-copy in the case "
"of a non-contiguous buffer"
)
# Store the numpy array in which the data resides as a private
# attribute, so we can use it to retrieve the public attributes
self._x = x
@property
def bufsize(self) -> int:
"""
Buffer size in bytes.
"""
return self._x.size * self._x.dtype.itemsize
@property
def ptr(self) -> int:
"""
Pointer to start of the buffer as an integer.
"""
return self._x.__array_interface__["data"][0]
def __dlpack__(self):
"""
Represent this structure as DLPack interface.
"""
if _NUMPY_HAS_DLPACK:
return self._x.__dlpack__()
raise NotImplementedError("__dlpack__")
def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]:
"""
Device type and device ID for where the data in the buffer resides.
"""
return (DlpackDeviceType.CPU, None)
def __repr__(self) -> str:
return (
"PandasBuffer("
+ str(
{
"bufsize": self.bufsize,
"ptr": self.ptr,
"device": self.__dlpack_device__()[0].name,
}
)
+ ")"
) | 0.871064 | 0.285339 |
from __future__ import absolute_import, print_function
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
from model_utils.models import TimeStampedModel
from rest_framework.reverse import reverse
from ..files.models import RelatedFile
class Portfolio(TimeStampedModel):
name = models.CharField(max_length=255, help_text=_('The name of the portfolio'))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='portfolios')
accounts_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='accounts_file_portfolios')
location_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='location_file_portfolios')
reinsurance_info_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='reinsurance_info_file_portfolios')
reinsurance_scope_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='reinsurance_scope_file_portfolios')
def __str__(self):
return self.name
def get_absolute_url(self, request=None):
return reverse('portfolio-detail', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_create_analysis_url(self, request=None):
return reverse('portfolio-create-analysis', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_accounts_file_url(self, request=None):
return reverse('portfolio-accounts-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_location_file_url(self, request=None):
return reverse('portfolio-location-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_reinsurance_info_file_url(self, request=None):
return reverse('portfolio-reinsurance-info-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_reinsurance_scope_file_url(self, request=None):
return reverse('portfolio-reinsurance-scope-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_storage_url(self, request=None):
return reverse('portfolio-storage-links', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
class PortfolioStatus(TimeStampedModel):
def __str__(self):
pass
@receiver(post_delete, sender=Portfolio)
def delete_connected_files(sender, instance, **kwargs):
""" Post delete handler to clear out any dangaling analyses files
"""
files_for_removal = [
'accounts_file',
'location_file',
'reinsurance_info_file',
'reinsurance_scope_file',
]
for ref in files_for_removal:
file_ref = getattr(instance, ref)
if file_ref:
file_ref.delete() | src/server/oasisapi/portfolios/models.py | from __future__ import absolute_import, print_function
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
from model_utils.models import TimeStampedModel
from rest_framework.reverse import reverse
from ..files.models import RelatedFile
class Portfolio(TimeStampedModel):
name = models.CharField(max_length=255, help_text=_('The name of the portfolio'))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='portfolios')
accounts_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='accounts_file_portfolios')
location_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='location_file_portfolios')
reinsurance_info_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='reinsurance_info_file_portfolios')
reinsurance_scope_file = models.ForeignKey(RelatedFile, on_delete=models.CASCADE, blank=True, null=True, default=None, related_name='reinsurance_scope_file_portfolios')
def __str__(self):
return self.name
def get_absolute_url(self, request=None):
return reverse('portfolio-detail', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_create_analysis_url(self, request=None):
return reverse('portfolio-create-analysis', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_accounts_file_url(self, request=None):
return reverse('portfolio-accounts-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_location_file_url(self, request=None):
return reverse('portfolio-location-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_reinsurance_info_file_url(self, request=None):
return reverse('portfolio-reinsurance-info-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_reinsurance_scope_file_url(self, request=None):
return reverse('portfolio-reinsurance-scope-file', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
def get_absolute_storage_url(self, request=None):
return reverse('portfolio-storage-links', kwargs={'version': 'v1', 'pk': self.pk}, request=request)
class PortfolioStatus(TimeStampedModel):
def __str__(self):
pass
@receiver(post_delete, sender=Portfolio)
def delete_connected_files(sender, instance, **kwargs):
""" Post delete handler to clear out any dangaling analyses files
"""
files_for_removal = [
'accounts_file',
'location_file',
'reinsurance_info_file',
'reinsurance_scope_file',
]
for ref in files_for_removal:
file_ref = getattr(instance, ref)
if file_ref:
file_ref.delete() | 0.579757 | 0.069038 |
import sys
import os
import argparse
import importlib
import getpass
from datetime import datetime
import logging
import subprocess
import socket
import boto3
from click import echo
from drift.management.gittools import get_branch, get_commit, get_repo_url, get_git_version
from drift.utils import pretty, set_pretty_settings, PRETTY_FORMATTER, PRETTY_STYLE
from driftconfig.util import get_default_drift_config_and_source, ConfigNotFound
from drift.flaskfactory import AppRootNotFound
def get_commands():
commands = [
f[:-3]
for f in os.listdir(os.path.join(os.path.dirname(__file__), "commands"))
if not f.startswith("_") and f.endswith(".py")
]
return commands
def execute_cmd():
try:
return do_execute_cmd(sys.argv[1:])
except AppRootNotFound as e:
# A very common case that needs pretty printing
echo(str(e))
except KeyboardInterrupt:
echo(" Aborting because you said so.")
def do_execute_cmd(argv):
valid_commands = get_commands()
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'--tier',
help="Specify which tenant to use. Will override any other settings."
)
parser.add_argument(
'--tenant', '-t',
help="Specify which tenant to use. Will override any other settings."
)
parser.add_argument(
'--config',
help="Specify which config source to use. Will override 'DRIFT_CONFIG_URL' environment variable."
)
parser.add_argument(
"--loglevel", '-l',
help="Logging level name. Default is WARNING.", default='WARNING'
)
parser.add_argument(
'--formatter',
help="Specify which formatter to use for text output. Default is {}.".format(
PRETTY_FORMATTER)
)
parser.add_argument(
'--style',
help="Specify which style to use for text output. Default is {}.".format(
PRETTY_STYLE)
)
parser.add_argument("-v", "--verbose", help="I am verbose!", action="store_true")
subparsers = parser.add_subparsers(help="sub-command help", dest="cmd")
subparsers.required = True
for cmd in valid_commands:
module = importlib.import_module("drift.management.commands." + cmd)
subparser = subparsers.add_parser(cmd, help="Subcommands for {}".format(cmd))
if hasattr(module, "get_options"):
module.get_options(subparser)
subparser.set_defaults(func=module.run_command)
args = parser.parse_args(argv)
if args.loglevel:
logging.basicConfig(level=args.loglevel)
if args.config:
os.environ['DRIFT_CONFIG_URL'] = args.config
try:
conf, source = get_default_drift_config_and_source()
echo("Drift configuration source: {!r}".format(source))
except ConfigNotFound:
pass
set_pretty_settings(formatter=args.formatter, style=args.style)
if args.tier:
os.environ['DRIFT_TIER'] = args.tier
echo("Tier set to {!r}.".format(args.tier))
if args.tenant:
os.environ['DRIFT_DEFAULT_TENANT'] = args.tenant
echo("Default tenant set to {!r}.".format(args.tenant))
if 'DRIFT_APP_ROOT' in os.environ:
echo("App root set: DRIFT_APP_ROOT={!r}".format(os.environ['DRIFT_APP_ROOT']))
args.func(args)
def get_app_version():
"""
Return the version of the current app.
It's gotten by running: python setup.py --version
"""
# HACK: Get app root:
from drift.utils import get_app_root
app_root = get_app_root()
p = subprocess.Popen(
[sys.executable, 'setup.py', '--version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=app_root
)
out, err = p.communicate()
out, err = (str(s.decode("utf-8")) for s in (out, err))
if p.returncode != 0:
raise RuntimeError(
"Can't get version of this deployable. Error: {} - {}".format(p.returncode, err)
)
version = out.strip()
return version
def check_connectivity(instances):
SSH_PORT = 22
for inst in instances:
ip_address = inst.private_ip_address
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
result = sock.connect_ex((ip_address, SSH_PORT))
if result != 0:
raise RuntimeError("Unable to connect to '%s'. Is your VPN connection active?" % ip_address)
def get_ec2_instances(region, tier, service_name):
"""
Returns all EC2 instances on the specified region, tier and service.
Raises an error if any of the instances are not reachable in SSH
"""
filters = {
'tag:service-name': service_name,
"instance-state-name": "running",
"tag:tier": tier,
}
echo("Finding ec2 instances in region {!r} from filters: {!r}".format(region, filters))
conn = boto3.client('ec2', region_name=region)
reservations = conn.get_all_reservations(filters=filters)
instances = [i for r in reservations for i in r.instances]
if not instances:
raise RuntimeError("Found no running ec2 instances in region '%s', tier '%s' and service '%s'" % (region, tier, service_name))
check_connectivity(instances)
return instances
def create_deployment_manifest(method, comment=None, deployable_name=None):
"""Returns a dict describing the current deployable."""
git_version = get_git_version()
git_commit = get_commit()
info = {
'method': method,
'deployable': deployable_name,
'version': get_app_version(),
'username': getpass.getuser(),
'comment': comment,
'datetime': datetime.utcnow().isoformat(),
'git_branch': get_branch(),
'git_commit': git_commit,
'git_commit_url': get_repo_url() + "/commit/" + git_commit,
'git_release': git_version['tag'] if git_version else 'untagged-branch',
}
return info | drift/management/__init__.py | import sys
import os
import argparse
import importlib
import getpass
from datetime import datetime
import logging
import subprocess
import socket
import boto3
from click import echo
from drift.management.gittools import get_branch, get_commit, get_repo_url, get_git_version
from drift.utils import pretty, set_pretty_settings, PRETTY_FORMATTER, PRETTY_STYLE
from driftconfig.util import get_default_drift_config_and_source, ConfigNotFound
from drift.flaskfactory import AppRootNotFound
def get_commands():
commands = [
f[:-3]
for f in os.listdir(os.path.join(os.path.dirname(__file__), "commands"))
if not f.startswith("_") and f.endswith(".py")
]
return commands
def execute_cmd():
try:
return do_execute_cmd(sys.argv[1:])
except AppRootNotFound as e:
# A very common case that needs pretty printing
echo(str(e))
except KeyboardInterrupt:
echo(" Aborting because you said so.")
def do_execute_cmd(argv):
valid_commands = get_commands()
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'--tier',
help="Specify which tenant to use. Will override any other settings."
)
parser.add_argument(
'--tenant', '-t',
help="Specify which tenant to use. Will override any other settings."
)
parser.add_argument(
'--config',
help="Specify which config source to use. Will override 'DRIFT_CONFIG_URL' environment variable."
)
parser.add_argument(
"--loglevel", '-l',
help="Logging level name. Default is WARNING.", default='WARNING'
)
parser.add_argument(
'--formatter',
help="Specify which formatter to use for text output. Default is {}.".format(
PRETTY_FORMATTER)
)
parser.add_argument(
'--style',
help="Specify which style to use for text output. Default is {}.".format(
PRETTY_STYLE)
)
parser.add_argument("-v", "--verbose", help="I am verbose!", action="store_true")
subparsers = parser.add_subparsers(help="sub-command help", dest="cmd")
subparsers.required = True
for cmd in valid_commands:
module = importlib.import_module("drift.management.commands." + cmd)
subparser = subparsers.add_parser(cmd, help="Subcommands for {}".format(cmd))
if hasattr(module, "get_options"):
module.get_options(subparser)
subparser.set_defaults(func=module.run_command)
args = parser.parse_args(argv)
if args.loglevel:
logging.basicConfig(level=args.loglevel)
if args.config:
os.environ['DRIFT_CONFIG_URL'] = args.config
try:
conf, source = get_default_drift_config_and_source()
echo("Drift configuration source: {!r}".format(source))
except ConfigNotFound:
pass
set_pretty_settings(formatter=args.formatter, style=args.style)
if args.tier:
os.environ['DRIFT_TIER'] = args.tier
echo("Tier set to {!r}.".format(args.tier))
if args.tenant:
os.environ['DRIFT_DEFAULT_TENANT'] = args.tenant
echo("Default tenant set to {!r}.".format(args.tenant))
if 'DRIFT_APP_ROOT' in os.environ:
echo("App root set: DRIFT_APP_ROOT={!r}".format(os.environ['DRIFT_APP_ROOT']))
args.func(args)
def get_app_version():
"""
Return the version of the current app.
It's gotten by running: python setup.py --version
"""
# HACK: Get app root:
from drift.utils import get_app_root
app_root = get_app_root()
p = subprocess.Popen(
[sys.executable, 'setup.py', '--version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=app_root
)
out, err = p.communicate()
out, err = (str(s.decode("utf-8")) for s in (out, err))
if p.returncode != 0:
raise RuntimeError(
"Can't get version of this deployable. Error: {} - {}".format(p.returncode, err)
)
version = out.strip()
return version
def check_connectivity(instances):
SSH_PORT = 22
for inst in instances:
ip_address = inst.private_ip_address
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
result = sock.connect_ex((ip_address, SSH_PORT))
if result != 0:
raise RuntimeError("Unable to connect to '%s'. Is your VPN connection active?" % ip_address)
def get_ec2_instances(region, tier, service_name):
"""
Returns all EC2 instances on the specified region, tier and service.
Raises an error if any of the instances are not reachable in SSH
"""
filters = {
'tag:service-name': service_name,
"instance-state-name": "running",
"tag:tier": tier,
}
echo("Finding ec2 instances in region {!r} from filters: {!r}".format(region, filters))
conn = boto3.client('ec2', region_name=region)
reservations = conn.get_all_reservations(filters=filters)
instances = [i for r in reservations for i in r.instances]
if not instances:
raise RuntimeError("Found no running ec2 instances in region '%s', tier '%s' and service '%s'" % (region, tier, service_name))
check_connectivity(instances)
return instances
def create_deployment_manifest(method, comment=None, deployable_name=None):
"""Returns a dict describing the current deployable."""
git_version = get_git_version()
git_commit = get_commit()
info = {
'method': method,
'deployable': deployable_name,
'version': get_app_version(),
'username': getpass.getuser(),
'comment': comment,
'datetime': datetime.utcnow().isoformat(),
'git_branch': get_branch(),
'git_commit': git_commit,
'git_commit_url': get_repo_url() + "/commit/" + git_commit,
'git_release': git_version['tag'] if git_version else 'untagged-branch',
}
return info | 0.388618 | 0.06216 |
import sys
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import database_receita
from datetime import date, datetime
qt_tela_inicial = "telas/tela_gerenciar_fabricacao.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qt_tela_inicial)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
#CONFIG BOTOES
self.btn_cancelar.pressed.connect(self.cancelar_edicao)
self.btn_voltar.pressed.connect(self.fechar_tela)
self.btn_editar.pressed.connect(self.editar)
self.btn_excluir.pressed.connect(self.excluir_item)
self.combo_status.currentIndexChanged.connect(self.carrega_fabricacoes)
self.list_fabricacoes.itemDoubleClicked.connect(self.iniciar_edicao)
def iniciar_edicao(self, item):
item = item.text()
id_fabricacao, data, _ = item.split(' - ')
self.list_fabricacoes.setEnabled(False)
data_fabricacao = datetime.strptime(data, '%Y/%m/%d')
self.date_data.setDate(data_fabricacao)
rendimento, unidade, tempo = database_receita.select_fabricacao_por_id(id_fabricacao)
self.txt_rendimento.setPlaceholderText(rendimento)
self.txt_rendimento.setText(rendimento)
self.txt_rendimento.setEnabled(True)
self.txt_unidade.setPlaceholderText(unidade)
self.txt_unidade.setText(unidade)
self.txt_unidade.setEnabled(True)
self.spin_tempo.setValue(int(tempo))
self.spin_tempo.setEnabled(True)
self.btn_editar.setEnabled(True)
self.btn_excluir.setEnabled(True)
self.btn_cancelar.setEnabled(True)
def cancelar_edicao(self):
self.list_fabricacoes.setEnabled(True)
data_hoje = str(date.today())
data_hoje = QtCore.QDate.fromString(data_hoje, 'yyyy-MM-dd')
self.date_data.setDate(data_hoje)
self.txt_rendimento.clear()
self.txt_rendimento.setPlaceholderText('')
self.txt_rendimento.setEnabled(False)
self.txt_unidade.clear()
self.spin_tempo.setValue(0)
self.spin_tempo.setEnabled(False)
self.btn_editar.setEnabled(False)
self.btn_excluir.setEnabled(False)
self.btn_cancelar.setEnabled(False)
def editar(self):
id_ingrediente = str(self.combo_ingrediente.currentText()).split(' - ')[0]
id_embalagem = str(self.list_embalagens.selectedItems()[0].text()).split(' - ')[0]
tamanho = self.txt_tamanho.text()
nome_marca = self.txt_marca.text()
id_marca = database_receita.select_marca_por_nome(nome_marca)
if(not database_receita.verifica_embalagem_duplicada(tamanho, id_ingrediente, id_marca)):
database_receita.update_embalagem(id_embalagem, id_marca, tamanho)
self.limpar()
def combo_ingrediente_selecionado(self, item):
try:
codigo, _, unidade = str(self.combo_ingrediente.currentText()).split(' - ')
self.txt_unidade.setText(unidade)
self.carrega_embalagens(codigo)
except:
self.txt_unidade.clear()
def carrega_embalagens(self, id_ingrediente):
#COD - MARCA - TAMANHO - UNIDADE
self.list_embalagens.clear()
lista_embalagens = database_receita.select_embalagens_nomes_por_ingrediente(id_ingrediente)
self.list_embalagens.addItems(lista_embalagens)
def carrega_fabricacoes(self, indice):
self.list_fabricacoes.clear()
status = self.combo_status.itemText(indice).split(' - ')[0]
if(status == 0):
lista_fabricacoes = database_receita.select_fabricacoes_vendidas()
lista_fabricacoes = database_receita.select_fabricacoes_por_lista_ids(lista_fabricacoes)
elif(status == 1):
lista_fabricacoes = database_receita.select_fabricacoes_ids()
lista_fabricacoes = database_receita.select_fabricacoes_por_lista_ids(lista_fabricacoes)
else:
lista_fabricacoes_vendidas = database_receita.select_fabricacoes_vendidas()
lista_fabricacoes_todas = database_receita.select_fabricacoes_ids()
lista_fabricacoes_nao = []
for fabri in lista_fabricacoes_vendidas:
if(not fabri in lista_fabricacoes_todas):
lista_fabricacoes_nao.append(fabri)
lista_fabricacoes = database_receita.select_fabricacoes_por_lista_ids(lista_fabricacoes_nao)
print(lista_fabricacoes)
self.list_fabricacoes.addItems(lista_fabricacoes)
def fechar_tela(self):
self.close()
def limpar(self):
self.list_embalagens.clear()
self.cancelar_edicao()
def excluir_item(self):
try:
item_selec = self.list_embalagens.selectedItems()[0].text()
cod = item_selec.split(' - ')[0]
database_receita.delete_embalagem(cod)
self.carrega_tb_dados()
self.cancelar_edicao()
self.limpar()
except:
self.cancelar_edicao()
self.limpar() | tela_gerenciar_fabricacao.py | import sys
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import database_receita
from datetime import date, datetime
qt_tela_inicial = "telas/tela_gerenciar_fabricacao.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qt_tela_inicial)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
#CONFIG BOTOES
self.btn_cancelar.pressed.connect(self.cancelar_edicao)
self.btn_voltar.pressed.connect(self.fechar_tela)
self.btn_editar.pressed.connect(self.editar)
self.btn_excluir.pressed.connect(self.excluir_item)
self.combo_status.currentIndexChanged.connect(self.carrega_fabricacoes)
self.list_fabricacoes.itemDoubleClicked.connect(self.iniciar_edicao)
def iniciar_edicao(self, item):
item = item.text()
id_fabricacao, data, _ = item.split(' - ')
self.list_fabricacoes.setEnabled(False)
data_fabricacao = datetime.strptime(data, '%Y/%m/%d')
self.date_data.setDate(data_fabricacao)
rendimento, unidade, tempo = database_receita.select_fabricacao_por_id(id_fabricacao)
self.txt_rendimento.setPlaceholderText(rendimento)
self.txt_rendimento.setText(rendimento)
self.txt_rendimento.setEnabled(True)
self.txt_unidade.setPlaceholderText(unidade)
self.txt_unidade.setText(unidade)
self.txt_unidade.setEnabled(True)
self.spin_tempo.setValue(int(tempo))
self.spin_tempo.setEnabled(True)
self.btn_editar.setEnabled(True)
self.btn_excluir.setEnabled(True)
self.btn_cancelar.setEnabled(True)
def cancelar_edicao(self):
self.list_fabricacoes.setEnabled(True)
data_hoje = str(date.today())
data_hoje = QtCore.QDate.fromString(data_hoje, 'yyyy-MM-dd')
self.date_data.setDate(data_hoje)
self.txt_rendimento.clear()
self.txt_rendimento.setPlaceholderText('')
self.txt_rendimento.setEnabled(False)
self.txt_unidade.clear()
self.spin_tempo.setValue(0)
self.spin_tempo.setEnabled(False)
self.btn_editar.setEnabled(False)
self.btn_excluir.setEnabled(False)
self.btn_cancelar.setEnabled(False)
def editar(self):
id_ingrediente = str(self.combo_ingrediente.currentText()).split(' - ')[0]
id_embalagem = str(self.list_embalagens.selectedItems()[0].text()).split(' - ')[0]
tamanho = self.txt_tamanho.text()
nome_marca = self.txt_marca.text()
id_marca = database_receita.select_marca_por_nome(nome_marca)
if(not database_receita.verifica_embalagem_duplicada(tamanho, id_ingrediente, id_marca)):
database_receita.update_embalagem(id_embalagem, id_marca, tamanho)
self.limpar()
def combo_ingrediente_selecionado(self, item):
try:
codigo, _, unidade = str(self.combo_ingrediente.currentText()).split(' - ')
self.txt_unidade.setText(unidade)
self.carrega_embalagens(codigo)
except:
self.txt_unidade.clear()
def carrega_embalagens(self, id_ingrediente):
#COD - MARCA - TAMANHO - UNIDADE
self.list_embalagens.clear()
lista_embalagens = database_receita.select_embalagens_nomes_por_ingrediente(id_ingrediente)
self.list_embalagens.addItems(lista_embalagens)
def carrega_fabricacoes(self, indice):
self.list_fabricacoes.clear()
status = self.combo_status.itemText(indice).split(' - ')[0]
if(status == 0):
lista_fabricacoes = database_receita.select_fabricacoes_vendidas()
lista_fabricacoes = database_receita.select_fabricacoes_por_lista_ids(lista_fabricacoes)
elif(status == 1):
lista_fabricacoes = database_receita.select_fabricacoes_ids()
lista_fabricacoes = database_receita.select_fabricacoes_por_lista_ids(lista_fabricacoes)
else:
lista_fabricacoes_vendidas = database_receita.select_fabricacoes_vendidas()
lista_fabricacoes_todas = database_receita.select_fabricacoes_ids()
lista_fabricacoes_nao = []
for fabri in lista_fabricacoes_vendidas:
if(not fabri in lista_fabricacoes_todas):
lista_fabricacoes_nao.append(fabri)
lista_fabricacoes = database_receita.select_fabricacoes_por_lista_ids(lista_fabricacoes_nao)
print(lista_fabricacoes)
self.list_fabricacoes.addItems(lista_fabricacoes)
def fechar_tela(self):
self.close()
def limpar(self):
self.list_embalagens.clear()
self.cancelar_edicao()
def excluir_item(self):
try:
item_selec = self.list_embalagens.selectedItems()[0].text()
cod = item_selec.split(' - ')[0]
database_receita.delete_embalagem(cod)
self.carrega_tb_dados()
self.cancelar_edicao()
self.limpar()
except:
self.cancelar_edicao()
self.limpar() | 0.089338 | 0.112186 |
import json
import sys
import argparse
import requests
import datetime
from markdown import markdown
from weasyprint import HTML
from thehive4py.api import TheHiveApi
HTML_TEMPLATE = u"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="stylesheet" type="text/css" href="./codehilite.css">
</head>
<body>
{}
</body>
</html>
"""
class TheHiveExtendedApi(TheHiveApi):
def get_case_tasks(self, caseId):
"""
:param caseId: Case identifier
:return: request.response object with case tasks list
"""
req = self.url + "/api/case/task/_search?range=all"
params = {
"range": "all",
"sort": "startDate"
}
data = {
'query': {
'_parent': {
'_type': 'case',
'_query': {
'_id': caseId
}
}
}
}
try:
return requests.post(req, json=data, proxies=self.proxies, auth=self.auth)
except requests.exceptions.RequestException as e:
sys.exit("Error: {}".format(e))
def get_task_logs(self, taskId):
"""
:param taskId: Task identifier
:return: request.response with task log records list
"""
req = self.url + "/api/case/task/log/_search"
params = {
"range": "all",
"sort": "startDate"
}
data = {
"query": {
"_and": [{
"_parent": {
"_type": "case_task",
"_query": {
"_id": taskId
}
}
}]
}
}
try:
return requests.post(req, json=data, params=params, proxies=self.proxies, auth=self.auth)
except requests.exceptions.RequestException as e:
sys.exit("Error: {}".format(e))
class TheHiveRetriever:
def __init__(self, host, user, password, proxies=None):
self.api = TheHiveExtendedApi(host, user, password, proxies=proxies)
def fetch_case(self, case_id):
case = self.api.get_case(case_id).json()
title = case['title']
description = case['description']
# created_date = case['createdAt']
# severity = case['severity']
# tags = case['tags']
observables = self.fetch_observables(case_id)
tasks = self.fetch_tasks(case_id)
case_markdown = unicode()
case_markdown += u'{}\n{}\n\n'.format(title, '---')
case_markdown += u'{}\n\n'.format(description)
if observables:
case_markdown += observables
if tasks:
case_markdown += tasks
return case_markdown
def fetch_observables(self, case_id):
obs = self.api.get_case_observables(case_id).json()
if not obs:
return None
observables_markdown = u'## Observables\n\nData | Type | Message | Analysis\n---|---|---|---\n'
for artifact in obs:
observables_markdown += u'{} | {} | {} | {}'.format(artifact['data'],
artifact['dataType'],
artifact['message'],
artifact['reports'] if artifact['reports'] else 'Not available')
return observables_markdown + '\n\n'
def fetch_tasks(self, case_id):
tasks = self.api.get_case_tasks(case_id).json()
if not tasks:
return None
tasks_markdown = unicode('## Tasks\n\n')
for task in tasks:
tasks_markdown += u'### {}\n\n'.format(task['title'])
tasks_markdown += self.fetch_task_logs(task['id'])
return tasks_markdown
def fetch_task_logs(self, task_id):
logs = self.api.get_task_logs(task_id).json()
task_log_markdown = u''
for log in logs:
date = datetime.datetime.utcfromtimestamp(log['startDate']/1000).strftime('%Y-%m-%dT%H:%M:%SZ')
task_log_markdown += u'{} ({})\n---\n\n{}\n\n'.format(
log['createdBy'],
date,
log['message']
)
return task_log_markdown
def case_to_pdf(self, case_id, output_filename):
case_md = self.fetch_case(case_id)
case_html = HTML_TEMPLATE.format(markdown(case_md, output_format='html5'))
with open(output_filename, "w+b") as out:
HTML(string=case_html).write_pdf(out)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--url', required=True, type=str, help='TheHive server URL')
parser.add_argument('-u', '--user', required=True, type=str, help='Username')
parser.add_argument('-p', '--password', required=True, type=str, help='User password')
parser.add_argument('-c', '--case', required=True, type=str, help='Case ID, could be retrieved from case URL')
parser.add_argument('-o', '--output', required=True, type=str, help='PDF output filename')
args = parser.parse_args()
the_hive = TheHiveRetriever(args.url, args.user, args.password)
if the_hive.case_to_pdf(args.case, args.output):
print("Successfully written report to {}".format(args.output)) | case-to-pdf/case2pdf.py |
import json
import sys
import argparse
import requests
import datetime
from markdown import markdown
from weasyprint import HTML
from thehive4py.api import TheHiveApi
HTML_TEMPLATE = u"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="stylesheet" type="text/css" href="./codehilite.css">
</head>
<body>
{}
</body>
</html>
"""
class TheHiveExtendedApi(TheHiveApi):
def get_case_tasks(self, caseId):
"""
:param caseId: Case identifier
:return: request.response object with case tasks list
"""
req = self.url + "/api/case/task/_search?range=all"
params = {
"range": "all",
"sort": "startDate"
}
data = {
'query': {
'_parent': {
'_type': 'case',
'_query': {
'_id': caseId
}
}
}
}
try:
return requests.post(req, json=data, proxies=self.proxies, auth=self.auth)
except requests.exceptions.RequestException as e:
sys.exit("Error: {}".format(e))
def get_task_logs(self, taskId):
"""
:param taskId: Task identifier
:return: request.response with task log records list
"""
req = self.url + "/api/case/task/log/_search"
params = {
"range": "all",
"sort": "startDate"
}
data = {
"query": {
"_and": [{
"_parent": {
"_type": "case_task",
"_query": {
"_id": taskId
}
}
}]
}
}
try:
return requests.post(req, json=data, params=params, proxies=self.proxies, auth=self.auth)
except requests.exceptions.RequestException as e:
sys.exit("Error: {}".format(e))
class TheHiveRetriever:
def __init__(self, host, user, password, proxies=None):
self.api = TheHiveExtendedApi(host, user, password, proxies=proxies)
def fetch_case(self, case_id):
case = self.api.get_case(case_id).json()
title = case['title']
description = case['description']
# created_date = case['createdAt']
# severity = case['severity']
# tags = case['tags']
observables = self.fetch_observables(case_id)
tasks = self.fetch_tasks(case_id)
case_markdown = unicode()
case_markdown += u'{}\n{}\n\n'.format(title, '---')
case_markdown += u'{}\n\n'.format(description)
if observables:
case_markdown += observables
if tasks:
case_markdown += tasks
return case_markdown
def fetch_observables(self, case_id):
obs = self.api.get_case_observables(case_id).json()
if not obs:
return None
observables_markdown = u'## Observables\n\nData | Type | Message | Analysis\n---|---|---|---\n'
for artifact in obs:
observables_markdown += u'{} | {} | {} | {}'.format(artifact['data'],
artifact['dataType'],
artifact['message'],
artifact['reports'] if artifact['reports'] else 'Not available')
return observables_markdown + '\n\n'
def fetch_tasks(self, case_id):
tasks = self.api.get_case_tasks(case_id).json()
if not tasks:
return None
tasks_markdown = unicode('## Tasks\n\n')
for task in tasks:
tasks_markdown += u'### {}\n\n'.format(task['title'])
tasks_markdown += self.fetch_task_logs(task['id'])
return tasks_markdown
def fetch_task_logs(self, task_id):
logs = self.api.get_task_logs(task_id).json()
task_log_markdown = u''
for log in logs:
date = datetime.datetime.utcfromtimestamp(log['startDate']/1000).strftime('%Y-%m-%dT%H:%M:%SZ')
task_log_markdown += u'{} ({})\n---\n\n{}\n\n'.format(
log['createdBy'],
date,
log['message']
)
return task_log_markdown
def case_to_pdf(self, case_id, output_filename):
case_md = self.fetch_case(case_id)
case_html = HTML_TEMPLATE.format(markdown(case_md, output_format='html5'))
with open(output_filename, "w+b") as out:
HTML(string=case_html).write_pdf(out)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--url', required=True, type=str, help='TheHive server URL')
parser.add_argument('-u', '--user', required=True, type=str, help='Username')
parser.add_argument('-p', '--password', required=True, type=str, help='User password')
parser.add_argument('-c', '--case', required=True, type=str, help='Case ID, could be retrieved from case URL')
parser.add_argument('-o', '--output', required=True, type=str, help='PDF output filename')
args = parser.parse_args()
the_hive = TheHiveRetriever(args.url, args.user, args.password)
if the_hive.case_to_pdf(args.case, args.output):
print("Successfully written report to {}".format(args.output)) | 0.341143 | 0.08043 |
import os
import re
import redis
class RedisClient(object):
""" Redis Client """
WIKI_AUGMENTED_DB = 3
WIKI_PAGE_DB = 4
WIKI_SEARCH_DB = 5
def __init__(self,
db: int = 0,
decode_responses: bool = True):
"""
Created:
29-May-2019
<EMAIL>
Updated:
04-Dec-2019
<EMAIL>
* Use CredentialsFromJson
Updated:
23-Jan-2020
<EMAIL>
* Honor de DB parameter even when using an url. from_url ignores
its param if already specified in the url
"""
from ..dto import CredentialsFromJson
url = None
ca_file = None
if 'REDIS_JSON_CREDENTIALS' in os.environ:
credentials = CredentialsFromJson(os.environ['REDIS_JSON_CREDENTIALS'],
'rediss')
url = credentials.url
ca_file = credentials.ca_file
if not url:
url = 'redis://localhost:6379/0'
url = re.sub(r'/\d*$', f'/{db}', url)
options = {
'decode_responses': decode_responses
}
if url.startswith('rediss:') and ca_file:
options['ssl_ca_certs'] = ca_file
self.redis = redis.from_url(url, **options)
self.url = CredentialsFromJson.sanitize_url(url, 'rediss')
def size(self) -> int:
return self.redis.dbsize()
def clear(self):
for key in self.redis.keys():
self.redis.delete(key)
def set(self,
a_key: str,
value: str) -> None:
self.redis.set(a_key, value)
def get(self,
key) -> str:
return self.redis.get(key)
def has(self,
key) -> bool:
return self.redis.exists(key)
def set_list(self,
a_key: str,
a_list: list) -> None:
if not self.has(a_key):
self.redis.rpush(a_key, *a_list)
def get_list(self,
a_key: str) -> list:
return self.redis.lrange(a_key, 0, 9999999) # I don't like this either ...
def set_dict(self,
a_key: str,
a_dict: dict) -> None:
if not self.has(a_key):
self.redis.hmset(a_key, a_dict)
def get_dict(self,
a_key: str) -> dict:
return self.redis.hgetall(a_key) | python/base/core/dmo/redis_client.py |
import os
import re
import redis
class RedisClient(object):
""" Redis Client """
WIKI_AUGMENTED_DB = 3
WIKI_PAGE_DB = 4
WIKI_SEARCH_DB = 5
def __init__(self,
db: int = 0,
decode_responses: bool = True):
"""
Created:
29-May-2019
<EMAIL>
Updated:
04-Dec-2019
<EMAIL>
* Use CredentialsFromJson
Updated:
23-Jan-2020
<EMAIL>
* Honor de DB parameter even when using an url. from_url ignores
its param if already specified in the url
"""
from ..dto import CredentialsFromJson
url = None
ca_file = None
if 'REDIS_JSON_CREDENTIALS' in os.environ:
credentials = CredentialsFromJson(os.environ['REDIS_JSON_CREDENTIALS'],
'rediss')
url = credentials.url
ca_file = credentials.ca_file
if not url:
url = 'redis://localhost:6379/0'
url = re.sub(r'/\d*$', f'/{db}', url)
options = {
'decode_responses': decode_responses
}
if url.startswith('rediss:') and ca_file:
options['ssl_ca_certs'] = ca_file
self.redis = redis.from_url(url, **options)
self.url = CredentialsFromJson.sanitize_url(url, 'rediss')
def size(self) -> int:
return self.redis.dbsize()
def clear(self):
for key in self.redis.keys():
self.redis.delete(key)
def set(self,
a_key: str,
value: str) -> None:
self.redis.set(a_key, value)
def get(self,
key) -> str:
return self.redis.get(key)
def has(self,
key) -> bool:
return self.redis.exists(key)
def set_list(self,
a_key: str,
a_list: list) -> None:
if not self.has(a_key):
self.redis.rpush(a_key, *a_list)
def get_list(self,
a_key: str) -> list:
return self.redis.lrange(a_key, 0, 9999999) # I don't like this either ...
def set_dict(self,
a_key: str,
a_dict: dict) -> None:
if not self.has(a_key):
self.redis.hmset(a_key, a_dict)
def get_dict(self,
a_key: str) -> dict:
return self.redis.hgetall(a_key) | 0.509276 | 0.1178 |
from fabric.api import env
from fabric.api import settings
from cloudferrylib.utils import cmd_cfg
from cloudferrylib.utils import driver_transporter
from cloudferrylib.utils import rbd_util
from cloudferrylib.utils import utils
LOG = utils.get_log(__name__)
class SSHCephToCeph(driver_transporter.DriverTransporter):
def transfer(self, data, snapshot=None, snapshot_type=1):
host_src = (data.get('host_src') if data.get('host_src')
else self.src_cloud.getIpSsh())
host_dst = (data.get('host_dst') if data.get('host_dst')
else self.dst_cloud.getIpSsh())
with (settings(host_string=host_src,
connection_attempts=env.connection_attempts),
utils.forward_agent(env.key_filename)):
rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd
ssh_cmd = cmd_cfg.ssh_cmd
ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff)
if snapshot:
process_params = [snapshot['name'], data['path_src'], '-', '-',
data['path_dst']]
if snapshot_type == 1:
rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd
elif snapshot_type == 2:
rbd_export_diff = \
rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd
process_params.insert(0, snapshot['prev_snapname'])
elif snapshot_type == 3:
rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd
else:
raise ValueError("Unsupported snapshot type %s",
snapshot_type)
else:
rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd
process_params = [data['path_src'], '-', '-', data['path_dst']]
process = rbd_export_diff >> ssh_rbd_import_diff
process = process(*process_params)
self.src_cloud.ssh_util.execute(process) | cloudferrylib/utils/drivers/ssh_ceph_to_ceph.py |
from fabric.api import env
from fabric.api import settings
from cloudferrylib.utils import cmd_cfg
from cloudferrylib.utils import driver_transporter
from cloudferrylib.utils import rbd_util
from cloudferrylib.utils import utils
LOG = utils.get_log(__name__)
class SSHCephToCeph(driver_transporter.DriverTransporter):
def transfer(self, data, snapshot=None, snapshot_type=1):
host_src = (data.get('host_src') if data.get('host_src')
else self.src_cloud.getIpSsh())
host_dst = (data.get('host_dst') if data.get('host_dst')
else self.dst_cloud.getIpSsh())
with (settings(host_string=host_src,
connection_attempts=env.connection_attempts),
utils.forward_agent(env.key_filename)):
rbd_import_diff = rbd_util.RbdUtil.rbd_import_diff_cmd
ssh_cmd = cmd_cfg.ssh_cmd
ssh_rbd_import_diff = ssh_cmd(host_dst, rbd_import_diff)
if snapshot:
process_params = [snapshot['name'], data['path_src'], '-', '-',
data['path_dst']]
if snapshot_type == 1:
rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_snap_cmd
elif snapshot_type == 2:
rbd_export_diff = \
rbd_util.RbdUtil.rbd_export_diff_from_snap_cmd
process_params.insert(0, snapshot['prev_snapname'])
elif snapshot_type == 3:
rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_from_cmd
else:
raise ValueError("Unsupported snapshot type %s",
snapshot_type)
else:
rbd_export_diff = rbd_util.RbdUtil.rbd_export_diff_cmd
process_params = [data['path_src'], '-', '-', data['path_dst']]
process = rbd_export_diff >> ssh_rbd_import_diff
process = process(*process_params)
self.src_cloud.ssh_util.execute(process) | 0.258794 | 0.128662 |
import logging
from argparse import ArgumentParser
from datetime import datetime
from lib.base_test import StatelessTest
from lib.gtpu import GTPU
from lib.utils import list_port_status
from lib.xnt import analysis_report_pcap
from scapy.layers.all import IP, TCP, UDP, Ether
from trex_stl_lib.api import STLPktBuilder, STLStream, STLTXCont
SOURCE_MAC = "00:00:00:00:00:01"
DEST_MAC = "00:00:00:00:00:03"
SOURCE_IP = "192.168.10.1"
DEST_IP = "192.168.30.1"
INNER_SRC_IP = "10.240.0.1"
INNER_DEST_IP = "8.8.8.8"
SENDER_PORTS = [0]
INT_COLLECTPR_PORTS = [3]
class IntSingleFlow(StatelessTest):
@classmethod
def setup_subparser(cls, parser: ArgumentParser) -> None:
parser.add_argument("--duration", type=int, help="Test duration", default=5)
parser.add_argument(
"--mult", type=str, help="Traffic multiplier", default="1pps"
)
parser.add_argument("--pkt-type", type=str, help="Packet type", default="tcp")
def get_sample_packet(self, pkt_type):
if pkt_type == "tcp":
return Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / TCP() / ("*" * 1500)
elif pkt_type == "gtpu-udp":
return (
Ether()
/ IP(src=SOURCE_IP, dst=DEST_IP)
/ UDP()
/ GTPU()
/ IP()
/ UDP()
/ ("*" * 1500)
)
else:
return Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / UDP() / ("*" * 1500)
def start(self, args) -> None:
pkt = self.get_sample_packet(args.pkt_type)
if not pkt:
return 1
stream = STLStream(packet=STLPktBuilder(pkt=pkt, vm=[]), mode=STLTXCont())
logging.info("Setting up ports")
self.client.add_streams(stream, ports=SENDER_PORTS)
pkt_capture_limit = args.duration * 3
logging.info(
"Start capturing first %s RX packet from INT collector", pkt_capture_limit
)
self.client.set_service_mode(ports=INT_COLLECTPR_PORTS, enabled=True)
capture = self.client.start_capture(
rx_ports=INT_COLLECTPR_PORTS,
limit=pkt_capture_limit,
bpf_filter="udp and dst port 32766",
)
logging.info(
"Starting traffic, duration: %ds, throughput: %s", args.duration, args.mult
)
self.client.start(ports=SENDER_PORTS, mult=args.mult, duration=args.duration)
logging.info("Waiting until all traffic stop")
self.client.wait_on_traffic(ports=SENDER_PORTS)
logging.info("Stop capturing packet from INT collector port")
output = "/tmp/int-single-flow-{}-{}.pcap".format(
args.pkt_type, datetime.now().strftime("%Y%m%d-%H%M%S")
)
self.client.stop_capture(capture["id"], output)
analysis_report_pcap(output)
list_port_status(self.client.get_stats()) | trex-scripts/tests/int_single_flow.py |
import logging
from argparse import ArgumentParser
from datetime import datetime
from lib.base_test import StatelessTest
from lib.gtpu import GTPU
from lib.utils import list_port_status
from lib.xnt import analysis_report_pcap
from scapy.layers.all import IP, TCP, UDP, Ether
from trex_stl_lib.api import STLPktBuilder, STLStream, STLTXCont
SOURCE_MAC = "00:00:00:00:00:01"
DEST_MAC = "00:00:00:00:00:03"
SOURCE_IP = "192.168.10.1"
DEST_IP = "192.168.30.1"
INNER_SRC_IP = "10.240.0.1"
INNER_DEST_IP = "8.8.8.8"
SENDER_PORTS = [0]
INT_COLLECTPR_PORTS = [3]
class IntSingleFlow(StatelessTest):
@classmethod
def setup_subparser(cls, parser: ArgumentParser) -> None:
parser.add_argument("--duration", type=int, help="Test duration", default=5)
parser.add_argument(
"--mult", type=str, help="Traffic multiplier", default="1pps"
)
parser.add_argument("--pkt-type", type=str, help="Packet type", default="tcp")
def get_sample_packet(self, pkt_type):
if pkt_type == "tcp":
return Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / TCP() / ("*" * 1500)
elif pkt_type == "gtpu-udp":
return (
Ether()
/ IP(src=SOURCE_IP, dst=DEST_IP)
/ UDP()
/ GTPU()
/ IP()
/ UDP()
/ ("*" * 1500)
)
else:
return Ether() / IP(src=SOURCE_IP, dst=DEST_IP) / UDP() / ("*" * 1500)
def start(self, args) -> None:
pkt = self.get_sample_packet(args.pkt_type)
if not pkt:
return 1
stream = STLStream(packet=STLPktBuilder(pkt=pkt, vm=[]), mode=STLTXCont())
logging.info("Setting up ports")
self.client.add_streams(stream, ports=SENDER_PORTS)
pkt_capture_limit = args.duration * 3
logging.info(
"Start capturing first %s RX packet from INT collector", pkt_capture_limit
)
self.client.set_service_mode(ports=INT_COLLECTPR_PORTS, enabled=True)
capture = self.client.start_capture(
rx_ports=INT_COLLECTPR_PORTS,
limit=pkt_capture_limit,
bpf_filter="udp and dst port 32766",
)
logging.info(
"Starting traffic, duration: %ds, throughput: %s", args.duration, args.mult
)
self.client.start(ports=SENDER_PORTS, mult=args.mult, duration=args.duration)
logging.info("Waiting until all traffic stop")
self.client.wait_on_traffic(ports=SENDER_PORTS)
logging.info("Stop capturing packet from INT collector port")
output = "/tmp/int-single-flow-{}-{}.pcap".format(
args.pkt_type, datetime.now().strftime("%Y%m%d-%H%M%S")
)
self.client.stop_capture(capture["id"], output)
analysis_report_pcap(output)
list_port_status(self.client.get_stats()) | 0.561696 | 0.110136 |
import backend.container_service.cluster_tools.constants
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chart_name', models.CharField(max_length=128, unique=True)),
('name', models.CharField(max_length=64, verbose_name='组件名')),
('default_version', models.CharField(max_length=64, verbose_name='单个组件的默认版本')),
('default_values', models.TextField(blank=True, help_text='组件启用时需要额外设置的变量值,文本内容格式为 yaml', null=True)),
('extra_options', models.TextField(default='')),
('namespace', models.CharField(default='bcs-system', max_length=64)),
('description', models.TextField(blank=True, help_text='组件功能介绍', null=True)),
('help_link', models.CharField(blank=True, max_length=255, null=True)),
('logo', models.TextField(blank=True, null=True, verbose_name='图片 logo')),
('version', models.CharField(max_length=64, verbose_name='组件库的版本')),
],
),
migrations.CreateModel(
name='InstalledTool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=64, verbose_name='创建者')),
('updator', models.CharField(max_length=64, verbose_name='修改者')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted_time', models.DateTimeField(blank=True, null=True)),
('release_name', models.CharField(max_length=53)),
('project_id', models.CharField(max_length=32)),
('cluster_id', models.CharField(max_length=32)),
('chart_url', models.CharField(max_length=255)),
('values', models.TextField(blank=True, help_text='组件启用或更新时设置的变量值,文本内容格式为 yaml', null=True)),
('extra_options', models.TextField(default='')),
('namespace', models.CharField(max_length=64)),
('status', models.CharField(choices=[('pending', 'pending'), ('deployed', 'deployed'), ('failed', 'failed'), ('unknown', 'unknown')], default=backend.container_service.cluster_tools.constants.ToolStatus['PENDING'], max_length=32)),
('message', models.TextField(default='', verbose_name='记录错误信息')),
('tool', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='cluster_tools.tool')),
],
options={
'unique_together': {('tool', 'project_id', 'cluster_id')},
},
),
] | bcs-ui/backend/container_service/cluster_tools/migrations/0001_initial.py |
import backend.container_service.cluster_tools.constants
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chart_name', models.CharField(max_length=128, unique=True)),
('name', models.CharField(max_length=64, verbose_name='组件名')),
('default_version', models.CharField(max_length=64, verbose_name='单个组件的默认版本')),
('default_values', models.TextField(blank=True, help_text='组件启用时需要额外设置的变量值,文本内容格式为 yaml', null=True)),
('extra_options', models.TextField(default='')),
('namespace', models.CharField(default='bcs-system', max_length=64)),
('description', models.TextField(blank=True, help_text='组件功能介绍', null=True)),
('help_link', models.CharField(blank=True, max_length=255, null=True)),
('logo', models.TextField(blank=True, null=True, verbose_name='图片 logo')),
('version', models.CharField(max_length=64, verbose_name='组件库的版本')),
],
),
migrations.CreateModel(
name='InstalledTool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=64, verbose_name='创建者')),
('updator', models.CharField(max_length=64, verbose_name='修改者')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted_time', models.DateTimeField(blank=True, null=True)),
('release_name', models.CharField(max_length=53)),
('project_id', models.CharField(max_length=32)),
('cluster_id', models.CharField(max_length=32)),
('chart_url', models.CharField(max_length=255)),
('values', models.TextField(blank=True, help_text='组件启用或更新时设置的变量值,文本内容格式为 yaml', null=True)),
('extra_options', models.TextField(default='')),
('namespace', models.CharField(max_length=64)),
('status', models.CharField(choices=[('pending', 'pending'), ('deployed', 'deployed'), ('failed', 'failed'), ('unknown', 'unknown')], default=backend.container_service.cluster_tools.constants.ToolStatus['PENDING'], max_length=32)),
('message', models.TextField(default='', verbose_name='记录错误信息')),
('tool', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='cluster_tools.tool')),
],
options={
'unique_together': {('tool', 'project_id', 'cluster_id')},
},
),
] | 0.416559 | 0.197987 |
import talos as ta
import pandas as pd
from talos.model.normalizers import lr_normalizer
from keras.models import Sequential
from keras.layers import Dropout, Dense
from keras.optimizers import Adam, Nadam
from keras.activations import softmax
from keras.losses import categorical_crossentropy, logcosh
x, y = ta.datasets.iris()
def iris_model(x_train, y_train, x_val, y_val, params):
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
model.add(Dense(y_train.shape[1],
activation=params['last_activation']))
model.compile(optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
loss=params['loss'],
metrics=['acc'])
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val])
return out, model
p = {'lr': (0.1, 10, 10),
'first_neuron':[4, 8, 16, 32, 64, 128],
'batch_size': [2, 3, 4],
'epochs': [200],
'dropout': (0, 0.40, 10),
'optimizer': [Adam, Nadam],
'loss': ['categorical_crossentropy'],
'last_activation': ['softmax'],
'weight_regulizer': [None]}
h = ta.Scan(x, y, params=p,
model=iris_model,
dataset_name='iris',
experiment_no='1',
grid_downsample=.01)
# accessing the results data frame
print(h.data.head())
# accessing epoch entropy values for each round
print(h.peak_epochs_df)
# access the summary details
print(h.details)
# use Scan object as input
r = ta.Reporting(h)
# use filename as input
r = ta.Reporting('iris_1.csv')
# access the dataframe with the results
r.data.head(-3)
# get the number of rounds in the Scan
r.rounds()
# get the highest result ('val_acc' by default)
r.high()
# get the highest result for any metric
r.high('acc')
# get the round with the best result
r.rounds2high()
# get the best paramaters
r.best_params()
# get correlation for hyperparameters against a metric
r.correlate('val_loss')
# a regression plot for two dimensions
r.plot_regs()
# line plot
r.plot_line()
# up to two dimensional kernel density estimator
r.plot_kde('val_acc')
# a simple histogram
r.plot_hist(bins=50)
# heatmap correlation
r.plot_corr()
# a four dimensional bar grid
r.plot_bars('batch_size', 'val_acc', 'first_neuron', 'lr')
e = ta.Evaluate(h)
e.evaluate(x, y, folds=10, average='macro')
ta.Deploy(h, 'iris')
iris = ta.Restore('iris.zip')
# make predictions with the model
iris.model.predict(x)
# get the meta-data for the experiment
print(iris.details)
# get the hyperparameter space boundary
print(iris.params)
# sample of x and y data
print(iris.x)
print(iris.y)
# the results dataframe
print(iris.results) | playground/talos_reporting_sample.py | import talos as ta
import pandas as pd
from talos.model.normalizers import lr_normalizer
from keras.models import Sequential
from keras.layers import Dropout, Dense
from keras.optimizers import Adam, Nadam
from keras.activations import softmax
from keras.losses import categorical_crossentropy, logcosh
x, y = ta.datasets.iris()
def iris_model(x_train, y_train, x_val, y_val, params):
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
model.add(Dense(y_train.shape[1],
activation=params['last_activation']))
model.compile(optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
loss=params['loss'],
metrics=['acc'])
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val])
return out, model
p = {'lr': (0.1, 10, 10),
'first_neuron':[4, 8, 16, 32, 64, 128],
'batch_size': [2, 3, 4],
'epochs': [200],
'dropout': (0, 0.40, 10),
'optimizer': [Adam, Nadam],
'loss': ['categorical_crossentropy'],
'last_activation': ['softmax'],
'weight_regulizer': [None]}
h = ta.Scan(x, y, params=p,
model=iris_model,
dataset_name='iris',
experiment_no='1',
grid_downsample=.01)
# accessing the results data frame
print(h.data.head())
# accessing epoch entropy values for each round
print(h.peak_epochs_df)
# access the summary details
print(h.details)
# use Scan object as input
r = ta.Reporting(h)
# use filename as input
r = ta.Reporting('iris_1.csv')
# access the dataframe with the results
r.data.head(-3)
# get the number of rounds in the Scan
r.rounds()
# get the highest result ('val_acc' by default)
r.high()
# get the highest result for any metric
r.high('acc')
# get the round with the best result
r.rounds2high()
# get the best paramaters
r.best_params()
# get correlation for hyperparameters against a metric
r.correlate('val_loss')
# a regression plot for two dimensions
r.plot_regs()
# line plot
r.plot_line()
# up to two dimensional kernel density estimator
r.plot_kde('val_acc')
# a simple histogram
r.plot_hist(bins=50)
# heatmap correlation
r.plot_corr()
# a four dimensional bar grid
r.plot_bars('batch_size', 'val_acc', 'first_neuron', 'lr')
e = ta.Evaluate(h)
e.evaluate(x, y, folds=10, average='macro')
ta.Deploy(h, 'iris')
iris = ta.Restore('iris.zip')
# make predictions with the model
iris.model.predict(x)
# get the meta-data for the experiment
print(iris.details)
# get the hyperparameter space boundary
print(iris.params)
# sample of x and y data
print(iris.x)
print(iris.y)
# the results dataframe
print(iris.results) | 0.879082 | 0.392599 |
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.estimator import (
Estimator,
ModeKeys,
TrainSpec,
EvalSpec,
EstimatorSpec,
)
from galileo.platform.default_values import DefaultValues
from galileo.platform.log import log
from galileo.platform.export import export
from galileo.framework.python.utils.save_embedding import save_embedding
from galileo.framework.tf.python.tf_trainer import TFTrainer
from galileo.framework.tf.python.hooks.hooks import (
get_train_hooks,
get_evaluate_hooks,
get_predict_hooks,
)
@export('galileo.tf')
class EstimatorTrainer(TFTrainer):
r'''
\brief Trainer for tf estimator
attention API: galileo.tf.EstimatorTrainer
'''
def init_model(self, **kwargs):
super().init_model(**kwargs)
if 1 == self.config['num_workers'] and 0 == self.config['num_ps']:
# remove TF_CONFIG when num_workers==1 and num_ps==0
# otherwise assert error not _is_device_list_single_worker(devices)
os.environ['TF_CONFIG'] = '{}'
self.model_args['is_add_metrics'] = False
batch_num = self.run_config.get('batch_num')
log_steps = self.run_config.get('log_steps', DefaultValues.LOG_STEPS)
log_max_times_per_epoch = self.run_config.get(
'log_max_times_per_epoch', DefaultValues.LOG_MAX_TIMES_PER_EPOCH)
save_checkpoint_epochs = self.run_config.get('save_checkpoint_epochs')
keep_checkpoint_max = self.run_config.get('keep_checkpoint_max', 5)
if batch_num and batch_num > 0:
# avoid too much batch log
log_steps = min(log_steps, batch_num)
if batch_num // log_steps > log_max_times_per_epoch:
log_steps = batch_num // log_max_times_per_epoch
if save_checkpoint_epochs and save_checkpoint_epochs > 0:
save_checkpoints_steps = save_checkpoint_epochs * batch_num
else:
save_checkpoints_steps = None
self.run_config['log_steps'] = log_steps
tensorboard_steps = self.run_config.get('tensorboard_steps', 'epoch')
if 'epoch' == tensorboard_steps:
tensorboard_steps = batch_num if batch_num and batch_num > 0 else 100
elif 'batch' == tensorboard_steps:
tensorboard_steps = 1
else:
tensorboard_steps = int(tensorboard_steps)
rel_model_dir = os.path.relpath(self.model_dir)
# RunConfig will parse TF_CONFIG
self.estimator_config = tf.estimator.RunConfig(
model_dir=rel_model_dir,
train_distribute=self.strategy,
eval_distribute=self.strategy,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
log_step_count_steps=None,
save_summary_steps=tensorboard_steps)
if self.inputs is not None:
self.inputs_dict = {
ModeKeys.TRAIN: self.inputs.train_data,
ModeKeys.EVAL: self.inputs.evaluate_data,
ModeKeys.PREDICT: self.inputs.predict_data,
}
def create_estimator(self):
self.estimator = Estimator(self.model_fn,
config=self.estimator_config,
model_dir=None,
params=None,
warm_start_from=None)
custom_metric_fn = self.run_config.get('custom_metric_fn')
if callable(custom_metric_fn):
self.estimator = tf.estimator.add_metrics(self.estimator,
custom_metric_fn)
def model_fn(self, features, labels, mode):
self.model = self.model_class(**self.model_args)
r'''
the metric_objs of model must be a function, not a `Metric` class
for tf/estimator version 2.3.0
'''
if hasattr(self.model, 'metric_objs'):
for name, mo in self.model.metric_objs.items():
if isinstance(mo, tf.keras.metrics.Metric):
raise ValueError(f'metric {name} for estimator must be a '
f'function, not a Metric class {mo}')
outputs = self.model(features, training=mode == ModeKeys.TRAIN)
if mode == ModeKeys.PREDICT:
return EstimatorSpec(
mode,
predictions=outputs,
prediction_hooks=get_predict_hooks(**self.run_config))
loss = outputs.pop('loss')
logits = outputs.pop('logits', None)
if mode == ModeKeys.EVAL:
r'''
eval_metric_ops is dict of metric results keyed by name.
The values of the dict can be one of the following: (1) instance of
`Metric` class. (2) Results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
when metric results is returned by model, value must be tensor
returned by a function, not a `Metric` class for tf version 2.3.0
'''
eval_metric_ops = {}
for name, o in outputs.items():
if tf.is_tensor(o):
eval_metric_ops[name] = tf.compat.v1.metrics.mean(o)
if len(eval_metric_ops) == 0:
eval_metric_ops = None
return EstimatorSpec(
mode,
loss=loss,
predictions={'logits': logits},
eval_metric_ops=eval_metric_ops,
evaluation_hooks=get_evaluate_hooks(**self.run_config))
optimizer = self.get_optimizer()
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer.iterations = global_step
trainable_variables = self.model.trainable_variables
update_ops = self.model.updates
train_op = optimizer.get_updates(loss, trainable_variables)[0]
if update_ops is not None and len(update_ops) > 0:
train_op = tf.group(train_op, *update_ops)
log_tensor_dict = dict(loss=loss, step=global_step)
log_tensor_dict.update(outputs)
train_hooks = get_train_hooks(log_tensor_dict, **self.run_config)
return EstimatorSpec(
mode,
loss=loss,
predictions={'logits': logits},
train_op=train_op,
training_hooks=train_hooks,
)
def get_dataset(self, mode, input_context=None):
# args from self.config and self.run_config
batch_size = self.run_config['batch_size']
if self.should_dist_dataset and self.strategy is not None:
batch_size *= self.strategy.num_replicas_in_sync
inputs_args = dict(
distribution_strategy=self.config['distribution_strategy'],
num_workers=self.config['num_workers'],
task_id=self.config['task_id'],
batch_size=batch_size,
max_id=self.run_config.get('max_id'),
input_context=input_context,
)
if self.inputs is not None:
self.inputs.config.update(inputs_args)
dataset = self.inputs_dict[mode]()
else:
if not callable(self.run_config.get('inputs_fn')):
raise ValueError('inputs_fn must be specified and callable'
'when self.inputs is None')
kwargs = self.run_config.copy()
kwargs.update(inputs_args)
kwargs['mode'] = mode
dataset = self.run_config['inputs_fn'](**kwargs)
if self.should_dist_dataset and self.strategy is not None:
dataset = self.strategy.experimental_distribute_dataset(dataset)
return dataset
def do_train(self):
self.create_estimator()
max_steps = None
num_epochs = self.run_config.get('num_epochs')
if self.config['task_type'] != 'ps':
batch_num = self.run_config.get('batch_num')
assert batch_num and batch_num > 0
max_steps = batch_num * num_epochs
log.info(f'start train model {self.model_name}, '
f'epochs: {num_epochs}, steps per epoch: {batch_num}, '
f'all steps: {max_steps}')
eval_hooks = self.run_config.get('eval_hooks')
exporters = self.run_config.get('eval_exporters')
throttle_secs = self.run_config.get('eval_throttle_secs') or 600
estimator_hooks_fn = self.run_config.get('estimator_hooks_fn') or (
lambda **kwargs: [])
train_spec = TrainSpec(
self.get_dataset,
max_steps=max_steps,
hooks=estimator_hooks_fn(estimator=self.estimator,
**self.run_config))
eval_spec = EvalSpec(self.get_dataset,
steps=None,
hooks=eval_hooks,
exporters=exporters,
throttle_secs=throttle_secs)
tf.estimator.train_and_evaluate(self.estimator, train_spec, eval_spec)
def do_evaluate(self):
if self.config['task_type'] == 'ps':
log.info(f'parameter server exits when evaluate')
return
log.info(f'starting evaluate model {self.model_name}')
self.estimator_config = self.estimator_config.replace(
eval_distribute=None)
self.create_estimator()
outputs = self.estimator.evaluate(self.get_dataset, steps=None)
log.info(f'evaluate output: {outputs}')
return outputs
def do_predict(self):
if self.config['task_type'] == 'ps':
log.info(f'parameter server exits when predict')
return
self.create_estimator()
save_predict_dir = os.path.join(self.model_dir, 'predict_results')
os.makedirs(save_predict_dir, exist_ok=True)
log.info(f'starting save predict outputs to {save_predict_dir}')
save_predict_fn = self.run_config.get('save_predict_fn')
task_id = self.config['task_id']
outputs = self.estimator.predict(self.get_dataset)
ids = []
embeddings = []
ret_outputs = []
for output in outputs:
if 'ids' in output and 'embeddings' in output:
ids.append(output['ids'])
embeddings.append(output['embeddings'])
ret_outputs.append(output)
if ids and embeddings:
embeddings = np.stack(embeddings, axis=0)
if not callable(save_predict_fn):
save_predict_fn = save_embedding
save_predict_fn(ids, embeddings, save_predict_dir, task_id)
return ret_outputs, task_id
export('galileo.tf').var('Trainer', EstimatorTrainer) | galileo/framework/tf/python/estimator_trainer.py |
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.estimator import (
Estimator,
ModeKeys,
TrainSpec,
EvalSpec,
EstimatorSpec,
)
from galileo.platform.default_values import DefaultValues
from galileo.platform.log import log
from galileo.platform.export import export
from galileo.framework.python.utils.save_embedding import save_embedding
from galileo.framework.tf.python.tf_trainer import TFTrainer
from galileo.framework.tf.python.hooks.hooks import (
get_train_hooks,
get_evaluate_hooks,
get_predict_hooks,
)
@export('galileo.tf')
class EstimatorTrainer(TFTrainer):
r'''
\brief Trainer for tf estimator
attention API: galileo.tf.EstimatorTrainer
'''
def init_model(self, **kwargs):
super().init_model(**kwargs)
if 1 == self.config['num_workers'] and 0 == self.config['num_ps']:
# remove TF_CONFIG when num_workers==1 and num_ps==0
# otherwise assert error not _is_device_list_single_worker(devices)
os.environ['TF_CONFIG'] = '{}'
self.model_args['is_add_metrics'] = False
batch_num = self.run_config.get('batch_num')
log_steps = self.run_config.get('log_steps', DefaultValues.LOG_STEPS)
log_max_times_per_epoch = self.run_config.get(
'log_max_times_per_epoch', DefaultValues.LOG_MAX_TIMES_PER_EPOCH)
save_checkpoint_epochs = self.run_config.get('save_checkpoint_epochs')
keep_checkpoint_max = self.run_config.get('keep_checkpoint_max', 5)
if batch_num and batch_num > 0:
# avoid too much batch log
log_steps = min(log_steps, batch_num)
if batch_num // log_steps > log_max_times_per_epoch:
log_steps = batch_num // log_max_times_per_epoch
if save_checkpoint_epochs and save_checkpoint_epochs > 0:
save_checkpoints_steps = save_checkpoint_epochs * batch_num
else:
save_checkpoints_steps = None
self.run_config['log_steps'] = log_steps
tensorboard_steps = self.run_config.get('tensorboard_steps', 'epoch')
if 'epoch' == tensorboard_steps:
tensorboard_steps = batch_num if batch_num and batch_num > 0 else 100
elif 'batch' == tensorboard_steps:
tensorboard_steps = 1
else:
tensorboard_steps = int(tensorboard_steps)
rel_model_dir = os.path.relpath(self.model_dir)
# RunConfig will parse TF_CONFIG
self.estimator_config = tf.estimator.RunConfig(
model_dir=rel_model_dir,
train_distribute=self.strategy,
eval_distribute=self.strategy,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
log_step_count_steps=None,
save_summary_steps=tensorboard_steps)
if self.inputs is not None:
self.inputs_dict = {
ModeKeys.TRAIN: self.inputs.train_data,
ModeKeys.EVAL: self.inputs.evaluate_data,
ModeKeys.PREDICT: self.inputs.predict_data,
}
def create_estimator(self):
self.estimator = Estimator(self.model_fn,
config=self.estimator_config,
model_dir=None,
params=None,
warm_start_from=None)
custom_metric_fn = self.run_config.get('custom_metric_fn')
if callable(custom_metric_fn):
self.estimator = tf.estimator.add_metrics(self.estimator,
custom_metric_fn)
def model_fn(self, features, labels, mode):
self.model = self.model_class(**self.model_args)
r'''
the metric_objs of model must be a function, not a `Metric` class
for tf/estimator version 2.3.0
'''
if hasattr(self.model, 'metric_objs'):
for name, mo in self.model.metric_objs.items():
if isinstance(mo, tf.keras.metrics.Metric):
raise ValueError(f'metric {name} for estimator must be a '
f'function, not a Metric class {mo}')
outputs = self.model(features, training=mode == ModeKeys.TRAIN)
if mode == ModeKeys.PREDICT:
return EstimatorSpec(
mode,
predictions=outputs,
prediction_hooks=get_predict_hooks(**self.run_config))
loss = outputs.pop('loss')
logits = outputs.pop('logits', None)
if mode == ModeKeys.EVAL:
r'''
eval_metric_ops is dict of metric results keyed by name.
The values of the dict can be one of the following: (1) instance of
`Metric` class. (2) Results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
when metric results is returned by model, value must be tensor
returned by a function, not a `Metric` class for tf version 2.3.0
'''
eval_metric_ops = {}
for name, o in outputs.items():
if tf.is_tensor(o):
eval_metric_ops[name] = tf.compat.v1.metrics.mean(o)
if len(eval_metric_ops) == 0:
eval_metric_ops = None
return EstimatorSpec(
mode,
loss=loss,
predictions={'logits': logits},
eval_metric_ops=eval_metric_ops,
evaluation_hooks=get_evaluate_hooks(**self.run_config))
optimizer = self.get_optimizer()
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer.iterations = global_step
trainable_variables = self.model.trainable_variables
update_ops = self.model.updates
train_op = optimizer.get_updates(loss, trainable_variables)[0]
if update_ops is not None and len(update_ops) > 0:
train_op = tf.group(train_op, *update_ops)
log_tensor_dict = dict(loss=loss, step=global_step)
log_tensor_dict.update(outputs)
train_hooks = get_train_hooks(log_tensor_dict, **self.run_config)
return EstimatorSpec(
mode,
loss=loss,
predictions={'logits': logits},
train_op=train_op,
training_hooks=train_hooks,
)
def get_dataset(self, mode, input_context=None):
# args from self.config and self.run_config
batch_size = self.run_config['batch_size']
if self.should_dist_dataset and self.strategy is not None:
batch_size *= self.strategy.num_replicas_in_sync
inputs_args = dict(
distribution_strategy=self.config['distribution_strategy'],
num_workers=self.config['num_workers'],
task_id=self.config['task_id'],
batch_size=batch_size,
max_id=self.run_config.get('max_id'),
input_context=input_context,
)
if self.inputs is not None:
self.inputs.config.update(inputs_args)
dataset = self.inputs_dict[mode]()
else:
if not callable(self.run_config.get('inputs_fn')):
raise ValueError('inputs_fn must be specified and callable'
'when self.inputs is None')
kwargs = self.run_config.copy()
kwargs.update(inputs_args)
kwargs['mode'] = mode
dataset = self.run_config['inputs_fn'](**kwargs)
if self.should_dist_dataset and self.strategy is not None:
dataset = self.strategy.experimental_distribute_dataset(dataset)
return dataset
def do_train(self):
self.create_estimator()
max_steps = None
num_epochs = self.run_config.get('num_epochs')
if self.config['task_type'] != 'ps':
batch_num = self.run_config.get('batch_num')
assert batch_num and batch_num > 0
max_steps = batch_num * num_epochs
log.info(f'start train model {self.model_name}, '
f'epochs: {num_epochs}, steps per epoch: {batch_num}, '
f'all steps: {max_steps}')
eval_hooks = self.run_config.get('eval_hooks')
exporters = self.run_config.get('eval_exporters')
throttle_secs = self.run_config.get('eval_throttle_secs') or 600
estimator_hooks_fn = self.run_config.get('estimator_hooks_fn') or (
lambda **kwargs: [])
train_spec = TrainSpec(
self.get_dataset,
max_steps=max_steps,
hooks=estimator_hooks_fn(estimator=self.estimator,
**self.run_config))
eval_spec = EvalSpec(self.get_dataset,
steps=None,
hooks=eval_hooks,
exporters=exporters,
throttle_secs=throttle_secs)
tf.estimator.train_and_evaluate(self.estimator, train_spec, eval_spec)
def do_evaluate(self):
if self.config['task_type'] == 'ps':
log.info(f'parameter server exits when evaluate')
return
log.info(f'starting evaluate model {self.model_name}')
self.estimator_config = self.estimator_config.replace(
eval_distribute=None)
self.create_estimator()
outputs = self.estimator.evaluate(self.get_dataset, steps=None)
log.info(f'evaluate output: {outputs}')
return outputs
def do_predict(self):
if self.config['task_type'] == 'ps':
log.info(f'parameter server exits when predict')
return
self.create_estimator()
save_predict_dir = os.path.join(self.model_dir, 'predict_results')
os.makedirs(save_predict_dir, exist_ok=True)
log.info(f'starting save predict outputs to {save_predict_dir}')
save_predict_fn = self.run_config.get('save_predict_fn')
task_id = self.config['task_id']
outputs = self.estimator.predict(self.get_dataset)
ids = []
embeddings = []
ret_outputs = []
for output in outputs:
if 'ids' in output and 'embeddings' in output:
ids.append(output['ids'])
embeddings.append(output['embeddings'])
ret_outputs.append(output)
if ids and embeddings:
embeddings = np.stack(embeddings, axis=0)
if not callable(save_predict_fn):
save_predict_fn = save_embedding
save_predict_fn(ids, embeddings, save_predict_dir, task_id)
return ret_outputs, task_id
export('galileo.tf').var('Trainer', EstimatorTrainer) | 0.74512 | 0.210868 |
import builtins
import inspect
import sys
from collections import deque
from typing import Any, Callable, Dict, List, Optional, Union, cast
from pydoc_fork import settings
from pydoc_fork.inspector.custom_types import TypeLike
from pydoc_fork.inspector.utils import (
_split_list,
classify_class_attrs,
classname,
getdoc,
sort_attributes,
visiblename,
)
from pydoc_fork.reporter import inline_styles
from pydoc_fork.reporter.format_data import document_data
from pydoc_fork.reporter.format_other import docother
from pydoc_fork.reporter.formatter_html import markup, section
def classlink(the_object: Union[TypeLike, type], modname: str) -> str:
"""Make a link for a class."""
name, module = the_object.__name__, sys.modules.get(the_object.__module__)
if hasattr(module, name) and getattr(module, name) is the_object:
return f'<a href="{module.__name__}.html#{name}">{classname(cast(TypeLike, the_object), modname)}</a>'
return classname(the_object, modname)
# noinspection PyBroadException
def docclass(
the_object: TypeLike,
name: str = "",
mod: str = "",
funcs: Optional[Dict[str, str]] = None,
classes: Optional[Dict[str, str]] = None,
) -> str:
"""Produce HTML documentation for a class object."""
funcs = funcs or {}
classes = classes or {}
real_name = the_object.__name__
name = name or real_name
bases = the_object.__bases__
contents: List[str] = []
push = contents.append
class HorizontalRule:
"""Cute little class to pump out a horizontal rule between sections."""
def __init__(self) -> None:
self.need_one = 0
def maybe(self) -> None:
"""Skip"""
if self.need_one:
push("<hr>\n")
self.need_one = 1
# pylint:disable=invalid-name
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(cast(type, the_object)))
if len(mro) > 2:
hr.maybe()
push("<dl><dt>Method resolution order:</dt>\n")
for base in mro:
push(f"<dd>{classlink(base, the_object.__module__)}</dd>\n")
push("</dl>\n")
def spill(
msg: str, attrs_in: List[Any], predicate: Callable[[Any], Any]
) -> List[Any]:
"""Not sure"""
ok, attrs = _split_list(attrs_in, predicate)
if ok:
hr.maybe()
push(msg)
for name, _, _, value in ok:
# noinspection PyBroadException
try:
value = getattr(the_object, name)
except Exception: # nosec
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(
document_data(
value,
name,
# mod, unused
)
)
else:
# circular ref
# pylint: disable=import-outside-toplevel
from pydoc_fork.reporter.format_page import document
push(
document(
value, name, mod, funcs, classes, module_dict, the_object
)
)
push("\n")
return attrs
def spilldescriptors(
msg: str,
attrs_in: List[Any], # Tuple[str, str, type, "object"]
predicate: Callable[[Any], bool],
) -> List[Any]:
"""Not sure"""
ok, attrs = _split_list(attrs_in, predicate)
if ok:
hr.maybe()
push(msg)
for name, _, _, value in ok:
push(
document_data(
value,
name,
# mod, ignored
)
)
return attrs
def spilldata(
msg: str, attrs_in: List[Any], predicate: Callable[[Any], bool]
) -> List[Any]:
"""Not sure"""
ok, attrs = _split_list(attrs_in, predicate)
if ok:
hr.maybe()
push(msg)
for name, _, __, value in ok:
base = docother(
getattr(the_object, name),
name,
# mod ignored
)
found_doc = getdoc(value)
if not found_doc:
push(f"<dl><dt>{base}</dl>\n")
else:
found_doc = markup(getdoc(value), funcs, classes, module_dict)
found_doc = f"<dd><tt>{found_doc}</tt>"
push(f"<dl><dt>{base}{found_doc}</dl>\n")
push("\n")
return attrs
attrs = [
(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(the_object)
if visiblename(name, obj=the_object)
]
module_dict = {}
for key, _, _, value in attrs:
module_dict[key] = anchor = "#" + name + "-" + key
try:
value = getattr(the_object, name)
except Exception: # nosec
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass # nosec
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
module_dict[value] = anchor
except TypeError:
pass # nosec
while attrs:
if mro:
this_class = mro.popleft()
else:
this_class = attrs[0][2]
is_this_class: Callable[[Any], Any] = lambda t: t[2] is this_class
attrs, inherited = _split_list(attrs, is_this_class)
if the_object is not builtins.object and this_class is builtins.object:
attrs = inherited
continue
if this_class is the_object:
tag = "defined here"
else:
tag = f"inherited from {classlink(this_class, the_object.__module__)}"
tag += ":<br>\n"
sort_attributes(attrs, the_object)
# feature to remove typing annotations cruft.
for kind in attrs.copy():
module_name = inspect.getmodule(kind)
if module_name and module_name.__name__ in settings.SKIP_MODULES:
attrs.remove(kind)
# Pump out the attrs, segregated by kind.
is_method: Callable[[Any], Any] = lambda t: t[1] == "method"
attrs = spill(f"Methods {tag}", attrs, is_method)
is_class: Callable[[Any], Any] = lambda t: t[1] == "class method"
attrs = spill(f"Class methods {tag}", attrs, is_class)
is_static: Callable[[Any], Any] = lambda t: t[1] == "static method"
attrs = spill(f"Static methods {tag}", attrs, is_static)
is_read_only: Callable[[Any], Any] = lambda t: t[1] == "readonly property"
attrs = spilldescriptors(
f"Readonly properties {tag}",
attrs,
is_read_only,
)
is_data_descriptor: Callable[[Any], Any] = lambda t: t[1] == "data descriptor"
attrs = spilldescriptors(f"Data descriptors {tag}", attrs, is_data_descriptor)
is_data: Callable[[Any], Any] = lambda t: t[1] == "data"
attrs = spilldata(f"Data and other attributes {tag}", attrs, is_data)
assert not attrs # nosec
attrs = inherited
contents_as_string = "".join(contents) # type got redefined
if name == real_name:
title = f'<a name="{name}">class <strong>{real_name}</strong></a>'
else:
title = f'<strong>{name}</strong> = <a name="{name}">class {real_name}</a>'
if bases:
parents = []
for base in bases:
parents.append(classlink(base, the_object.__module__))
title = title + f"({', '.join(parents)})"
decl = ""
try:
signature = inspect.signature(the_object)
except (ValueError, TypeError):
signature = None
if signature:
argument_specification = str(signature)
if argument_specification and argument_specification != "()":
# this will cause double escape on ->
# escape(argument_specification)
decl = name + argument_specification + "\n\n"
doc = getdoc(the_object)
if decl:
doc = decl + (doc or "")
doc = markup(doc, funcs, classes, module_dict)
doc = doc and f"<tt>{doc}<br> </tt>"
return section(title, "#000000", "#ffc8d8", contents_as_string, 3, doc)
def format_tree(tree: List[Any], modname: str, parent: Optional[Any] = None) -> str:
"""
Creates a representation of class inheritance.
"""
# """Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ""
for entry in tree:
class_object = entry
# pylint: disable=unidiomatic-typecheck
if type(entry) is type(()): # noqa - not sure of switching to isinstance
class_object, bases = entry
result = (
result + f'<dt><span style="font-family:{inline_styles.SAN_SERIF}">'
)
result = result + classlink(class_object, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(classlink(base, modname))
result = result + "(" + ", ".join(parents) + ")"
result = result + "\n</span></dt>"
elif type(entry) is type([]): # noqa - not sure of switching to isinstance
tree = format_tree(entry, modname, class_object)
result = result + f"<dd>\n{tree}</dd>\n"
return f"<dl>\n{result}</dl>\n" | pydoc_fork/reporter/format_class.py | import builtins
import inspect
import sys
from collections import deque
from typing import Any, Callable, Dict, List, Optional, Union, cast
from pydoc_fork import settings
from pydoc_fork.inspector.custom_types import TypeLike
from pydoc_fork.inspector.utils import (
_split_list,
classify_class_attrs,
classname,
getdoc,
sort_attributes,
visiblename,
)
from pydoc_fork.reporter import inline_styles
from pydoc_fork.reporter.format_data import document_data
from pydoc_fork.reporter.format_other import docother
from pydoc_fork.reporter.formatter_html import markup, section
def classlink(the_object: Union[TypeLike, type], modname: str) -> str:
"""Make a link for a class."""
name, module = the_object.__name__, sys.modules.get(the_object.__module__)
if hasattr(module, name) and getattr(module, name) is the_object:
return f'<a href="{module.__name__}.html#{name}">{classname(cast(TypeLike, the_object), modname)}</a>'
return classname(the_object, modname)
# noinspection PyBroadException
def docclass(
the_object: TypeLike,
name: str = "",
mod: str = "",
funcs: Optional[Dict[str, str]] = None,
classes: Optional[Dict[str, str]] = None,
) -> str:
"""Produce HTML documentation for a class object."""
funcs = funcs or {}
classes = classes or {}
real_name = the_object.__name__
name = name or real_name
bases = the_object.__bases__
contents: List[str] = []
push = contents.append
class HorizontalRule:
"""Cute little class to pump out a horizontal rule between sections."""
def __init__(self) -> None:
self.need_one = 0
def maybe(self) -> None:
"""Skip"""
if self.need_one:
push("<hr>\n")
self.need_one = 1
# pylint:disable=invalid-name
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(cast(type, the_object)))
if len(mro) > 2:
hr.maybe()
push("<dl><dt>Method resolution order:</dt>\n")
for base in mro:
push(f"<dd>{classlink(base, the_object.__module__)}</dd>\n")
push("</dl>\n")
def spill(
msg: str, attrs_in: List[Any], predicate: Callable[[Any], Any]
) -> List[Any]:
"""Not sure"""
ok, attrs = _split_list(attrs_in, predicate)
if ok:
hr.maybe()
push(msg)
for name, _, _, value in ok:
# noinspection PyBroadException
try:
value = getattr(the_object, name)
except Exception: # nosec
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(
document_data(
value,
name,
# mod, unused
)
)
else:
# circular ref
# pylint: disable=import-outside-toplevel
from pydoc_fork.reporter.format_page import document
push(
document(
value, name, mod, funcs, classes, module_dict, the_object
)
)
push("\n")
return attrs
def spilldescriptors(
msg: str,
attrs_in: List[Any], # Tuple[str, str, type, "object"]
predicate: Callable[[Any], bool],
) -> List[Any]:
"""Not sure"""
ok, attrs = _split_list(attrs_in, predicate)
if ok:
hr.maybe()
push(msg)
for name, _, _, value in ok:
push(
document_data(
value,
name,
# mod, ignored
)
)
return attrs
def spilldata(
msg: str, attrs_in: List[Any], predicate: Callable[[Any], bool]
) -> List[Any]:
"""Not sure"""
ok, attrs = _split_list(attrs_in, predicate)
if ok:
hr.maybe()
push(msg)
for name, _, __, value in ok:
base = docother(
getattr(the_object, name),
name,
# mod ignored
)
found_doc = getdoc(value)
if not found_doc:
push(f"<dl><dt>{base}</dl>\n")
else:
found_doc = markup(getdoc(value), funcs, classes, module_dict)
found_doc = f"<dd><tt>{found_doc}</tt>"
push(f"<dl><dt>{base}{found_doc}</dl>\n")
push("\n")
return attrs
attrs = [
(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(the_object)
if visiblename(name, obj=the_object)
]
module_dict = {}
for key, _, _, value in attrs:
module_dict[key] = anchor = "#" + name + "-" + key
try:
value = getattr(the_object, name)
except Exception: # nosec
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass # nosec
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
module_dict[value] = anchor
except TypeError:
pass # nosec
while attrs:
if mro:
this_class = mro.popleft()
else:
this_class = attrs[0][2]
is_this_class: Callable[[Any], Any] = lambda t: t[2] is this_class
attrs, inherited = _split_list(attrs, is_this_class)
if the_object is not builtins.object and this_class is builtins.object:
attrs = inherited
continue
if this_class is the_object:
tag = "defined here"
else:
tag = f"inherited from {classlink(this_class, the_object.__module__)}"
tag += ":<br>\n"
sort_attributes(attrs, the_object)
# feature to remove typing annotations cruft.
for kind in attrs.copy():
module_name = inspect.getmodule(kind)
if module_name and module_name.__name__ in settings.SKIP_MODULES:
attrs.remove(kind)
# Pump out the attrs, segregated by kind.
is_method: Callable[[Any], Any] = lambda t: t[1] == "method"
attrs = spill(f"Methods {tag}", attrs, is_method)
is_class: Callable[[Any], Any] = lambda t: t[1] == "class method"
attrs = spill(f"Class methods {tag}", attrs, is_class)
is_static: Callable[[Any], Any] = lambda t: t[1] == "static method"
attrs = spill(f"Static methods {tag}", attrs, is_static)
is_read_only: Callable[[Any], Any] = lambda t: t[1] == "readonly property"
attrs = spilldescriptors(
f"Readonly properties {tag}",
attrs,
is_read_only,
)
is_data_descriptor: Callable[[Any], Any] = lambda t: t[1] == "data descriptor"
attrs = spilldescriptors(f"Data descriptors {tag}", attrs, is_data_descriptor)
is_data: Callable[[Any], Any] = lambda t: t[1] == "data"
attrs = spilldata(f"Data and other attributes {tag}", attrs, is_data)
assert not attrs # nosec
attrs = inherited
contents_as_string = "".join(contents) # type got redefined
if name == real_name:
title = f'<a name="{name}">class <strong>{real_name}</strong></a>'
else:
title = f'<strong>{name}</strong> = <a name="{name}">class {real_name}</a>'
if bases:
parents = []
for base in bases:
parents.append(classlink(base, the_object.__module__))
title = title + f"({', '.join(parents)})"
decl = ""
try:
signature = inspect.signature(the_object)
except (ValueError, TypeError):
signature = None
if signature:
argument_specification = str(signature)
if argument_specification and argument_specification != "()":
# this will cause double escape on ->
# escape(argument_specification)
decl = name + argument_specification + "\n\n"
doc = getdoc(the_object)
if decl:
doc = decl + (doc or "")
doc = markup(doc, funcs, classes, module_dict)
doc = doc and f"<tt>{doc}<br> </tt>"
return section(title, "#000000", "#ffc8d8", contents_as_string, 3, doc)
def format_tree(tree: List[Any], modname: str, parent: Optional[Any] = None) -> str:
"""
Creates a representation of class inheritance.
"""
# """Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ""
for entry in tree:
class_object = entry
# pylint: disable=unidiomatic-typecheck
if type(entry) is type(()): # noqa - not sure of switching to isinstance
class_object, bases = entry
result = (
result + f'<dt><span style="font-family:{inline_styles.SAN_SERIF}">'
)
result = result + classlink(class_object, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(classlink(base, modname))
result = result + "(" + ", ".join(parents) + ")"
result = result + "\n</span></dt>"
elif type(entry) is type([]): # noqa - not sure of switching to isinstance
tree = format_tree(entry, modname, class_object)
result = result + f"<dd>\n{tree}</dd>\n"
return f"<dl>\n{result}</dl>\n" | 0.593374 | 0.136206 |
from flask import Flask
from flask import render_template
from flask import Response
import sqlite3
import random
import io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
app = Flask(__name__)
@app.route("/")
def cv_index():
cvs = get_cv()
res = ""
res += f"<h2>Кол-во резюме по годам</h2>"
for i, cv in enumerate(cvs):
res += f"<p>{cv['substr(dateModify,1,4)']} - {cv['count(dateModify)']}</p>"
return res
@app.route("/dashboard")
def dashboard():
con = sqlite3.connect('works.sqlite')
res = con.execute('select substr(dateModify,1,4), count(dateModify) '
'from works where dateModify is not null group by substr(dateModify,1,4);').fetchall()
con.close()
return render_template('d1.html',
cvs=get_cv(),
labels=[row[0] for row in res],
data=[row[1] for row in res]
)
def dict_factory(cursor, row):
# обертка для преобразования
# полученной строки. (взята из документации)
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_cv():
con = sqlite3.connect('works.sqlite')
con.row_factory = dict_factory
res = list(con.execute('select substr(dateModify,1,4), count(dateModify) '
'from works where dateModify is not null group by substr(dateModify,1,4);'))
con.close()
return res
@app.route('/plot.png')
def plot_png():
fig = create_figure()
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
def create_figure():
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
axis.plot(xs, ys)
return fig
@app.route("/statistic")
def statistic():
job_titles = get_field('jobTitle')
qualifications = get_field('qualification')
res = ""
people_count = count_people_with_non_matched_fields(job_titles, qualifications)
res += f"<p>Из {people_count[1]} людей не совпадают профессия и должность у {people_count[0]}</p>"
res += f"\n<p>Список зарплат у людей со скиллами в python:</p>"
python_salaries = get_python_salary()
for i in python_salaries:
res += f"<p>{i[0]} руб.</p>"
return res
def get_field(field):
con = sqlite3.connect('works.sqlite')
res = list(con.execute(f'select {field} from works'))
con.close()
return res
def get_python_salary():
con = sqlite3.connect('works.sqlite')
res = list(con.execute("select salary from works where skills is"
" not null and instr(lower(skills),'python')"))
con.close()
return res
def count_people_with_non_matched_fields(field1, field2):
res_count, total = 0, 0
for (f1, f2) in zip(field1, field2):
total += 1
if not find_match(f1[0], f2[0]) and not find_match(f2[0], f1[0]):
res_count += 1
return res_count, total
def find_match(f1, f2):
arr1 = str(f1).lower().replace('-', ' ').split()
for word in arr1:
if word in str(f2).lower():
return True
return False
app.run(debug=True) | main.py | from flask import Flask
from flask import render_template
from flask import Response
import sqlite3
import random
import io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
app = Flask(__name__)
@app.route("/")
def cv_index():
cvs = get_cv()
res = ""
res += f"<h2>Кол-во резюме по годам</h2>"
for i, cv in enumerate(cvs):
res += f"<p>{cv['substr(dateModify,1,4)']} - {cv['count(dateModify)']}</p>"
return res
@app.route("/dashboard")
def dashboard():
con = sqlite3.connect('works.sqlite')
res = con.execute('select substr(dateModify,1,4), count(dateModify) '
'from works where dateModify is not null group by substr(dateModify,1,4);').fetchall()
con.close()
return render_template('d1.html',
cvs=get_cv(),
labels=[row[0] for row in res],
data=[row[1] for row in res]
)
def dict_factory(cursor, row):
# обертка для преобразования
# полученной строки. (взята из документации)
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_cv():
con = sqlite3.connect('works.sqlite')
con.row_factory = dict_factory
res = list(con.execute('select substr(dateModify,1,4), count(dateModify) '
'from works where dateModify is not null group by substr(dateModify,1,4);'))
con.close()
return res
@app.route('/plot.png')
def plot_png():
fig = create_figure()
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
def create_figure():
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
xs = range(100)
ys = [random.randint(1, 50) for x in xs]
axis.plot(xs, ys)
return fig
@app.route("/statistic")
def statistic():
job_titles = get_field('jobTitle')
qualifications = get_field('qualification')
res = ""
people_count = count_people_with_non_matched_fields(job_titles, qualifications)
res += f"<p>Из {people_count[1]} людей не совпадают профессия и должность у {people_count[0]}</p>"
res += f"\n<p>Список зарплат у людей со скиллами в python:</p>"
python_salaries = get_python_salary()
for i in python_salaries:
res += f"<p>{i[0]} руб.</p>"
return res
def get_field(field):
con = sqlite3.connect('works.sqlite')
res = list(con.execute(f'select {field} from works'))
con.close()
return res
def get_python_salary():
con = sqlite3.connect('works.sqlite')
res = list(con.execute("select salary from works where skills is"
" not null and instr(lower(skills),'python')"))
con.close()
return res
def count_people_with_non_matched_fields(field1, field2):
res_count, total = 0, 0
for (f1, f2) in zip(field1, field2):
total += 1
if not find_match(f1[0], f2[0]) and not find_match(f2[0], f1[0]):
res_count += 1
return res_count, total
def find_match(f1, f2):
arr1 = str(f1).lower().replace('-', ' ').split()
for word in arr1:
if word in str(f2).lower():
return True
return False
app.run(debug=True) | 0.404155 | 0.155431 |
from typing import Tuple
import matplotlib.pyplot as plt
import pandas as pd
from polar_bearings.opt_pah_finder_robotics.potential_field_planning import (
potential_field_planning,
)
def main(
filepath: str = "ice_thickness_01-01-2020.csv",
rescaling_factor: int = 2,
grid_size: float = 0.1,
robot_radius: float = 0.01,
):
"""Loads the ice thickness data and plans a route over safe ice."""
df = pd.read_csv(filepath)
df_rescaled = df.iloc[::rescaling_factor, :]
gx, gy, sx, sy, ox, oy = process_data(df_rescaled)
plt.grid(True)
plt.axis("equal")
# path generation
_, _ = potential_field_planning(sx, sy, gx, gy, ox, oy, grid_size, robot_radius)
plt.show()
def process_data(
single_day_df: pd.DataFrame,
safety_threshold: float = 1.0,
):
"""Rescales data, then provides the coordinates needed for the pathfinder."""
sx, sy, gx, gy = find_start_end(single_day_df)
single_day_df = single_day_df.fillna(safety_threshold) # NaN values are land
unsafe = single_day_df[single_day_df.sithick < safety_threshold]
ox = unsafe.longitude.values.tolist()
oy = unsafe.latitude.values.tolist()
print(f"{len(ox)}/{len(single_day_df)} co-ordinates considered as dangerous ice.")
return gx, gy, sx, sy, ox, oy
def find_closest(df, lat, lon):
dist = (df["latitude"] - lat).abs() + (df["longitude"] - lon).abs()
return df.loc[dist.idxmin()]
def find_start_end(df_rescaled: pd.DataFrame) -> Tuple[int, int, int, int]:
"""Finds start and end points of ulukhaktok and sachs harbour, then scales their coordinate values to the origin."""
df_rescaled["longitude"] = df_rescaled.longitude
df_rescaled["latitude"] = df_rescaled.latitude
ulukhaktok_y, ulukhaktok_x = (
70.74025296172513,
-117.77122885607929,
)
sachs_y, sachs_x = 71.98715823380064, -125.24848194895534
closest = find_closest(df_rescaled, ulukhaktok_y, ulukhaktok_x)
sy, sx = closest["latitude"], closest["longitude"]
closest = find_closest(df_rescaled, sachs_y, sachs_x)
gy, gx = closest["latitude"], closest["longitude"]
return sx, sy, gx, gy | polar_bearings/opt_pah_finder_robotics/navigate_ice.py | from typing import Tuple
import matplotlib.pyplot as plt
import pandas as pd
from polar_bearings.opt_pah_finder_robotics.potential_field_planning import (
potential_field_planning,
)
def main(
filepath: str = "ice_thickness_01-01-2020.csv",
rescaling_factor: int = 2,
grid_size: float = 0.1,
robot_radius: float = 0.01,
):
"""Loads the ice thickness data and plans a route over safe ice."""
df = pd.read_csv(filepath)
df_rescaled = df.iloc[::rescaling_factor, :]
gx, gy, sx, sy, ox, oy = process_data(df_rescaled)
plt.grid(True)
plt.axis("equal")
# path generation
_, _ = potential_field_planning(sx, sy, gx, gy, ox, oy, grid_size, robot_radius)
plt.show()
def process_data(
single_day_df: pd.DataFrame,
safety_threshold: float = 1.0,
):
"""Rescales data, then provides the coordinates needed for the pathfinder."""
sx, sy, gx, gy = find_start_end(single_day_df)
single_day_df = single_day_df.fillna(safety_threshold) # NaN values are land
unsafe = single_day_df[single_day_df.sithick < safety_threshold]
ox = unsafe.longitude.values.tolist()
oy = unsafe.latitude.values.tolist()
print(f"{len(ox)}/{len(single_day_df)} co-ordinates considered as dangerous ice.")
return gx, gy, sx, sy, ox, oy
def find_closest(df, lat, lon):
dist = (df["latitude"] - lat).abs() + (df["longitude"] - lon).abs()
return df.loc[dist.idxmin()]
def find_start_end(df_rescaled: pd.DataFrame) -> Tuple[int, int, int, int]:
"""Finds start and end points of ulukhaktok and sachs harbour, then scales their coordinate values to the origin."""
df_rescaled["longitude"] = df_rescaled.longitude
df_rescaled["latitude"] = df_rescaled.latitude
ulukhaktok_y, ulukhaktok_x = (
70.74025296172513,
-117.77122885607929,
)
sachs_y, sachs_x = 71.98715823380064, -125.24848194895534
closest = find_closest(df_rescaled, ulukhaktok_y, ulukhaktok_x)
sy, sx = closest["latitude"], closest["longitude"]
closest = find_closest(df_rescaled, sachs_y, sachs_x)
gy, gx = closest["latitude"], closest["longitude"]
return sx, sy, gx, gy | 0.937139 | 0.640776 |
from QuantTorch.functions.terner_connect import TernaryConnectDeterministic, TernaryConnectStochastic
import torch
import pytest
def equals(a, b, epsilon=1e-12):
return torch.all(torch.lt( torch.abs(a-b), epsilon ))
def test_terner_connect_det_forward():
x_1 = torch.Tensor([0.75,0.5,0.25,0.0,-1,-0.2])
x_2 = torch.Tensor([1,0,0.51,0.1,0,-1,-0.2,.7]).view(2,4)
y_1_expected = torch.Tensor([1,1,0,0,-1,0])
y_2_expected = torch.Tensor([1,0,1,0.,0,-1,0,1]).view(2,4)
y_1 = TernaryConnectDeterministic.apply(x_1)
y_2 = TernaryConnectDeterministic.apply(x_2)
assert equals(
y_1,
y_1_expected
)
assert equals(
y_2,
y_2_expected
)
def test_terner_connect_sto_forward():
x = torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1)
results = list()
for i in range(1000):
temp_result = TernaryConnectStochastic.apply(x)
# Tensor must have only -1 , 0 , 1 values
assert not torch.any(torch.lt(torch.abs(temp_result-1),1e-8)*torch.lt(torch.abs(temp_result),1e-8))
results.append(temp_result)
result = torch.cat(results,0 )
result = torch.sum(result, 0)/1000
assert equals(
result,
torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1),
5e-2)
@pytest.mark.parametrize("inputs, weight", [
[torch.FloatTensor(6).uniform_(-10, 10).view(3,2),torch.FloatTensor(6).uniform_(-1, 1).view(2,3)],
[torch.FloatTensor(5).uniform_(-7, 8).view(1,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)],
[torch.FloatTensor(10).uniform_(50, -10).view(2,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)]
])
def test_terner_connect_det_backward(inputs, weight):
#setup all vars
inputs_var_1 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_1 = torch.autograd.Variable(weight, requires_grad=True)
inputs_var_2 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_2 = torch.autograd.Variable(weight, requires_grad=True)
loss_1 = torch.sum(torch.mm(inputs,TernaryConnectDeterministic.apply(weight_var_1) ))
loss_1.backward()
assert equals(
weight_var_1.grad,
torch.transpose(torch.sum(inputs_var_1,0, keepdim=True),1,0).repeat(1, weight.shape[-1])
)
loss_2_temp = torch.mm(inputs_var_2,TernaryConnectDeterministic.apply(weight_var_2) )
loss_2 = torch.sum(torch.pow(loss_2_temp, 2))
loss_2.backward()
assert equals(
weight_var_2.grad,
torch.mm(inputs_var_2.transpose(1,0),2*loss_2_temp )
)
def test_terner_connect_det_backward_bis():
x = torch.autograd.Variable(torch.Tensor([2,1.0,0.0,-1,-3]), requires_grad=True)
loss = torch.sum(TernaryConnectDeterministic.apply(x))
loss.backward()
assert equals(
x.grad[0],
0)
assert equals(
x.grad[4],
0)
@pytest.mark.parametrize("inputs, weight", [
[torch.FloatTensor(6).uniform_(-10, 10).view(3,2),torch.FloatTensor(6).uniform_(-1, 1).view(2,3)],
[torch.FloatTensor(5).uniform_(-7, 8).view(1,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)],
[torch.FloatTensor(10).uniform_(50, -10).view(2,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)]
])
def test_terner_connect_sto_backward(inputs, weight):
#setup all vars
inputs_var_1 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_1 = torch.autograd.Variable(weight, requires_grad=True)
inputs_var_2 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_2 = torch.autograd.Variable(weight, requires_grad=True)
loss_1 = torch.sum(torch.mm(inputs,TernaryConnectStochastic.apply(weight_var_1) ))
loss_1.backward()
assert equals(
weight_var_1.grad,
torch.transpose(torch.sum(inputs_var_1,0, keepdim=True),1,0).repeat(1, weight.shape[-1])
)
loss_2_temp = torch.mm(inputs_var_2,TernaryConnectStochastic.apply(weight_var_2) )
loss_2 = torch.sum(torch.pow(loss_2_temp, 2))
loss_2.backward()
assert equals(
weight_var_2.grad,
torch.mm(inputs_var_2.transpose(1,0),2*loss_2_temp )
) | tests/implementations/Terner/function_test.py | from QuantTorch.functions.terner_connect import TernaryConnectDeterministic, TernaryConnectStochastic
import torch
import pytest
def equals(a, b, epsilon=1e-12):
return torch.all(torch.lt( torch.abs(a-b), epsilon ))
def test_terner_connect_det_forward():
x_1 = torch.Tensor([0.75,0.5,0.25,0.0,-1,-0.2])
x_2 = torch.Tensor([1,0,0.51,0.1,0,-1,-0.2,.7]).view(2,4)
y_1_expected = torch.Tensor([1,1,0,0,-1,0])
y_2_expected = torch.Tensor([1,0,1,0.,0,-1,0,1]).view(2,4)
y_1 = TernaryConnectDeterministic.apply(x_1)
y_2 = TernaryConnectDeterministic.apply(x_2)
assert equals(
y_1,
y_1_expected
)
assert equals(
y_2,
y_2_expected
)
def test_terner_connect_sto_forward():
x = torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1)
results = list()
for i in range(1000):
temp_result = TernaryConnectStochastic.apply(x)
# Tensor must have only -1 , 0 , 1 values
assert not torch.any(torch.lt(torch.abs(temp_result-1),1e-8)*torch.lt(torch.abs(temp_result),1e-8))
results.append(temp_result)
result = torch.cat(results,0 )
result = torch.sum(result, 0)/1000
assert equals(
result,
torch.Tensor([1,0,0.45,-1,-0.9]).view(1,-1),
5e-2)
@pytest.mark.parametrize("inputs, weight", [
[torch.FloatTensor(6).uniform_(-10, 10).view(3,2),torch.FloatTensor(6).uniform_(-1, 1).view(2,3)],
[torch.FloatTensor(5).uniform_(-7, 8).view(1,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)],
[torch.FloatTensor(10).uniform_(50, -10).view(2,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)]
])
def test_terner_connect_det_backward(inputs, weight):
#setup all vars
inputs_var_1 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_1 = torch.autograd.Variable(weight, requires_grad=True)
inputs_var_2 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_2 = torch.autograd.Variable(weight, requires_grad=True)
loss_1 = torch.sum(torch.mm(inputs,TernaryConnectDeterministic.apply(weight_var_1) ))
loss_1.backward()
assert equals(
weight_var_1.grad,
torch.transpose(torch.sum(inputs_var_1,0, keepdim=True),1,0).repeat(1, weight.shape[-1])
)
loss_2_temp = torch.mm(inputs_var_2,TernaryConnectDeterministic.apply(weight_var_2) )
loss_2 = torch.sum(torch.pow(loss_2_temp, 2))
loss_2.backward()
assert equals(
weight_var_2.grad,
torch.mm(inputs_var_2.transpose(1,0),2*loss_2_temp )
)
def test_terner_connect_det_backward_bis():
x = torch.autograd.Variable(torch.Tensor([2,1.0,0.0,-1,-3]), requires_grad=True)
loss = torch.sum(TernaryConnectDeterministic.apply(x))
loss.backward()
assert equals(
x.grad[0],
0)
assert equals(
x.grad[4],
0)
@pytest.mark.parametrize("inputs, weight", [
[torch.FloatTensor(6).uniform_(-10, 10).view(3,2),torch.FloatTensor(6).uniform_(-1, 1).view(2,3)],
[torch.FloatTensor(5).uniform_(-7, 8).view(1,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)],
[torch.FloatTensor(10).uniform_(50, -10).view(2,5),torch.FloatTensor(5).uniform_(-1,1).view(5,1)]
])
def test_terner_connect_sto_backward(inputs, weight):
#setup all vars
inputs_var_1 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_1 = torch.autograd.Variable(weight, requires_grad=True)
inputs_var_2 = torch.autograd.Variable(inputs, requires_grad=True)
weight_var_2 = torch.autograd.Variable(weight, requires_grad=True)
loss_1 = torch.sum(torch.mm(inputs,TernaryConnectStochastic.apply(weight_var_1) ))
loss_1.backward()
assert equals(
weight_var_1.grad,
torch.transpose(torch.sum(inputs_var_1,0, keepdim=True),1,0).repeat(1, weight.shape[-1])
)
loss_2_temp = torch.mm(inputs_var_2,TernaryConnectStochastic.apply(weight_var_2) )
loss_2 = torch.sum(torch.pow(loss_2_temp, 2))
loss_2.backward()
assert equals(
weight_var_2.grad,
torch.mm(inputs_var_2.transpose(1,0),2*loss_2_temp )
) | 0.735547 | 0.644589 |
import os
import pytest
from intervaltree import Interval
from viridian_workflow import self_qc, primers
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "primers")
class StatsTest:
def __init__(self, fail):
self.fail = fail
self.log = []
self.config = self_qc.default_config
def check_for_failure(self, **kwargs):
return self.fail
def test_cigar_tuple_construction():
ref = "AAA"
query = "AAA"
cigar = [
(3, 0),
]
assert self_qc.cigar_to_alts(ref, query, cigar) == [(0, "A"), (1, "A"), (2, "A")]
ref = "AAA"
query = "ATTAA"
cigar = [(1, 0), (2, 1), (2, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar) == [
(0, "A"),
# (1, "TT"),
(1, "A"),
(2, "A"),
]
ref = "ATTAA"
query = "AAA"
cigar = [(1, 0), (2, 2), (2, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar) == [
(0, "A"),
(1, "-"),
(2, "-"),
(3, "A"),
(4, "A"),
]
ref = "ATTAA"
query = "AAA"
cigar = [(0, 1), (2, 2), (0, 2)]
assert self_qc.cigar_to_alts(ref, query, cigar, pysam=True) == [
(0, "A"),
(1, "-"),
(2, "-"),
(3, "A"),
(4, "A"),
]
ref = "AAAA"
query = "GGGAAAA"
cigar = [(3, 4), (4, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar, q_pos=3) == [
(0, "A"),
(1, "A"),
(2, "A"),
(3, "A"),
]
def test_mappy_cigar_liftover():
amplicon = primers.Amplicon("test_amplicon")
seq = "CTTCAGGTGATGGCACAACAAGTCCTATTTGAACATAGACTCACGAGATTGCGGTTATACTTTCGAAAATGGGAATCTGGAGTAAAAGACTAAAGTTAGATACACAGTTGCTTCACTTCAGACTATTACCAGCTGTACTCAACTCAATTGAGTACAGACACTGGTGTTGAACATGTGCCATCTTCTTCATCTACAATAAAATTGTTGATGAGCCTGAAGAACATGGTCCAATTCACACAACGACGGTTCATCCGGAGTTGTTAATCCAGTAATGGAACCAATTTATGATGAACCGACGACGACTACTAGCGTGCCTTTGTGTTACTCAAGCTGATGAGTACGAACTTATGTACTCATTCGTTTCGGGAAGAGACAGGTACGTTAATAGTTAATAGCGTACTTCTTTTTCTTGCTTTCGT"
cigar = [
(4, 32),
(0, 29),
(2, 2),
(0, 7),
(1, 1),
(0, 4),
(1, 1),
(0, 8),
(2, 1),
(0, 11),
(1, 3),
(0, 1),
(2, 1),
(0, 26),
(1, 1),
(0, 8),
(2, 1),
(0, 76),
(1, 2),
(0, 46),
(1, 1),
(0, 4),
(2, 1),
(0, 11),
(2, 1),
(0, 77),
(1, 2),
(0, 5),
(2, 1),
(0, 40),
(1, 1),
(0, 54),
(4, 70),
]
self_qc.cigar_to_alts(seq, seq, cigar, pysam=True)
def test_bias_test():
return True # TODO resolve
assert not self_qc.test_bias(10, 100, threshold=0.3)
assert not self_qc.test_bias(90, 100, threshold=0.3)
assert self_qc.test_bias(40, 100, threshold=0.3)
assert self_qc.test_bias(60, 100, threshold=0.3)
def test_stat_evaluation():
return True # resolve
fwd = self_qc.BaseProfile(False, True, "test_amplicon1")
rev = self_qc.BaseProfile(False, False, "test_amplicon2")
# 20% alt alleles
pileup20 = ["A", "A", "C", "T", "A", "A", "A", "A", "A", "A"]
# 0% alt alleles
pileup0 = ["A", "A", "A", "A", "A", "A", "A", "A", "A", "A"]
# 100% alt alleles
pileup100 = ["T", "T", "T", "G", "G", "G", "T", "G", "C", "C"]
stats = self_qc.Stats()
for base in pileup20:
if base != "A":
stats.add_alt(fwd)
stats.add_alt(rev)
else:
stats.add_ref(fwd)
stats.add_ref(rev)
assert stats.check_for_failure(bias_threshold=0.3)
def test_masking():
fail = StatsTest(True)
succeed = StatsTest(False)
sequence = "ATCATC"
stats = {0: succeed, 4: fail}
masked, _ = self_qc.mask_sequence(sequence, stats)
assert masked == "ATCANC"
sequence = "ATCATC"
stats = {0: fail, 4: fail}
masked, _ = self_qc.mask_sequence(sequence, stats)
assert masked == "NTCANC" | tests/self_qc_test.py | import os
import pytest
from intervaltree import Interval
from viridian_workflow import self_qc, primers
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "primers")
class StatsTest:
def __init__(self, fail):
self.fail = fail
self.log = []
self.config = self_qc.default_config
def check_for_failure(self, **kwargs):
return self.fail
def test_cigar_tuple_construction():
ref = "AAA"
query = "AAA"
cigar = [
(3, 0),
]
assert self_qc.cigar_to_alts(ref, query, cigar) == [(0, "A"), (1, "A"), (2, "A")]
ref = "AAA"
query = "ATTAA"
cigar = [(1, 0), (2, 1), (2, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar) == [
(0, "A"),
# (1, "TT"),
(1, "A"),
(2, "A"),
]
ref = "ATTAA"
query = "AAA"
cigar = [(1, 0), (2, 2), (2, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar) == [
(0, "A"),
(1, "-"),
(2, "-"),
(3, "A"),
(4, "A"),
]
ref = "ATTAA"
query = "AAA"
cigar = [(0, 1), (2, 2), (0, 2)]
assert self_qc.cigar_to_alts(ref, query, cigar, pysam=True) == [
(0, "A"),
(1, "-"),
(2, "-"),
(3, "A"),
(4, "A"),
]
ref = "AAAA"
query = "GGGAAAA"
cigar = [(3, 4), (4, 0)]
assert self_qc.cigar_to_alts(ref, query, cigar, q_pos=3) == [
(0, "A"),
(1, "A"),
(2, "A"),
(3, "A"),
]
def test_mappy_cigar_liftover():
amplicon = primers.Amplicon("test_amplicon")
seq = "CTTCAGGTGATGGCACAACAAGTCCTATTTGAACATAGACTCACGAGATTGCGGTTATACTTTCGAAAATGGGAATCTGGAGTAAAAGACTAAAGTTAGATACACAGTTGCTTCACTTCAGACTATTACCAGCTGTACTCAACTCAATTGAGTACAGACACTGGTGTTGAACATGTGCCATCTTCTTCATCTACAATAAAATTGTTGATGAGCCTGAAGAACATGGTCCAATTCACACAACGACGGTTCATCCGGAGTTGTTAATCCAGTAATGGAACCAATTTATGATGAACCGACGACGACTACTAGCGTGCCTTTGTGTTACTCAAGCTGATGAGTACGAACTTATGTACTCATTCGTTTCGGGAAGAGACAGGTACGTTAATAGTTAATAGCGTACTTCTTTTTCTTGCTTTCGT"
cigar = [
(4, 32),
(0, 29),
(2, 2),
(0, 7),
(1, 1),
(0, 4),
(1, 1),
(0, 8),
(2, 1),
(0, 11),
(1, 3),
(0, 1),
(2, 1),
(0, 26),
(1, 1),
(0, 8),
(2, 1),
(0, 76),
(1, 2),
(0, 46),
(1, 1),
(0, 4),
(2, 1),
(0, 11),
(2, 1),
(0, 77),
(1, 2),
(0, 5),
(2, 1),
(0, 40),
(1, 1),
(0, 54),
(4, 70),
]
self_qc.cigar_to_alts(seq, seq, cigar, pysam=True)
def test_bias_test():
return True # TODO resolve
assert not self_qc.test_bias(10, 100, threshold=0.3)
assert not self_qc.test_bias(90, 100, threshold=0.3)
assert self_qc.test_bias(40, 100, threshold=0.3)
assert self_qc.test_bias(60, 100, threshold=0.3)
def test_stat_evaluation():
return True # resolve
fwd = self_qc.BaseProfile(False, True, "test_amplicon1")
rev = self_qc.BaseProfile(False, False, "test_amplicon2")
# 20% alt alleles
pileup20 = ["A", "A", "C", "T", "A", "A", "A", "A", "A", "A"]
# 0% alt alleles
pileup0 = ["A", "A", "A", "A", "A", "A", "A", "A", "A", "A"]
# 100% alt alleles
pileup100 = ["T", "T", "T", "G", "G", "G", "T", "G", "C", "C"]
stats = self_qc.Stats()
for base in pileup20:
if base != "A":
stats.add_alt(fwd)
stats.add_alt(rev)
else:
stats.add_ref(fwd)
stats.add_ref(rev)
assert stats.check_for_failure(bias_threshold=0.3)
def test_masking():
fail = StatsTest(True)
succeed = StatsTest(False)
sequence = "ATCATC"
stats = {0: succeed, 4: fail}
masked, _ = self_qc.mask_sequence(sequence, stats)
assert masked == "ATCANC"
sequence = "ATCATC"
stats = {0: fail, 4: fail}
masked, _ = self_qc.mask_sequence(sequence, stats)
assert masked == "NTCANC" | 0.449876 | 0.703282 |
import os
import sys
import logging
from argparse import ArgumentParser
ROOT = os.path.dirname(os.path.realpath(__file__))
# Try to load modules from our current env first
sys.path.insert(0, os.path.join(ROOT, ".."))
from burpui_monitor.tools.logging import logger
logger.init_logger(config=dict(level=logging.CRITICAL))
def parse_args(name=None):
mname = name
if not name:
mname = "bui-monitor"
parser = ArgumentParser(prog=mname)
parser.add_argument(
"-v",
"--verbose",
dest="log",
help="increase output verbosity (e.g., -vv is more verbose than -v)",
action="count",
)
parser.add_argument(
"-V",
"--version",
dest="version",
help="print version and exit",
action="store_true",
)
parser.add_argument(
"-c",
"--config",
dest="config",
help="burp-ui configuration file",
metavar="<CONFIG>",
)
parser.add_argument(
"-l",
"--logfile",
dest="logfile",
help="output logs in defined file",
metavar="<FILE>",
)
options = parser.parse_args()
if options.version:
from burpui_monitor import __title__
from burpui_monitor.desc import __version__, __release__
ver = "{}: v{}".format(mname or __title__, __version__)
if options.log:
ver = "{} ({})".format(ver, __release__)
print(ver)
sys.exit(0)
return options
def main():
"""
Main function
"""
options = parse_args()
monitor(options)
def monitor(options=None):
import trio
from burpui_monitor.engines.monitor import MonitorPool
from burpui_monitor.utils import lookup_file
if not options:
options = parse_args(name="bui-monitor")
conf = ["buimonitor.cfg", "buimonitor.sample.cfg"]
if options.config:
conf = lookup_file(options.config, guess=False)
else:
conf = lookup_file(conf)
check_config(conf)
monitor = MonitorPool(conf, options.log, options.logfile)
trio.run(monitor.run)
def check_config(conf):
if not conf:
raise IOError("No configuration file found")
if not os.path.isfile(conf):
raise IOError("File does not exist: '{0}'".format(conf))
if __name__ == "__main__":
main() | pkgs/burp-ui-monitor/burpui_monitor-decoy/__main__.py | import os
import sys
import logging
from argparse import ArgumentParser
ROOT = os.path.dirname(os.path.realpath(__file__))
# Try to load modules from our current env first
sys.path.insert(0, os.path.join(ROOT, ".."))
from burpui_monitor.tools.logging import logger
logger.init_logger(config=dict(level=logging.CRITICAL))
def parse_args(name=None):
mname = name
if not name:
mname = "bui-monitor"
parser = ArgumentParser(prog=mname)
parser.add_argument(
"-v",
"--verbose",
dest="log",
help="increase output verbosity (e.g., -vv is more verbose than -v)",
action="count",
)
parser.add_argument(
"-V",
"--version",
dest="version",
help="print version and exit",
action="store_true",
)
parser.add_argument(
"-c",
"--config",
dest="config",
help="burp-ui configuration file",
metavar="<CONFIG>",
)
parser.add_argument(
"-l",
"--logfile",
dest="logfile",
help="output logs in defined file",
metavar="<FILE>",
)
options = parser.parse_args()
if options.version:
from burpui_monitor import __title__
from burpui_monitor.desc import __version__, __release__
ver = "{}: v{}".format(mname or __title__, __version__)
if options.log:
ver = "{} ({})".format(ver, __release__)
print(ver)
sys.exit(0)
return options
def main():
"""
Main function
"""
options = parse_args()
monitor(options)
def monitor(options=None):
import trio
from burpui_monitor.engines.monitor import MonitorPool
from burpui_monitor.utils import lookup_file
if not options:
options = parse_args(name="bui-monitor")
conf = ["buimonitor.cfg", "buimonitor.sample.cfg"]
if options.config:
conf = lookup_file(options.config, guess=False)
else:
conf = lookup_file(conf)
check_config(conf)
monitor = MonitorPool(conf, options.log, options.logfile)
trio.run(monitor.run)
def check_config(conf):
if not conf:
raise IOError("No configuration file found")
if not os.path.isfile(conf):
raise IOError("File does not exist: '{0}'".format(conf))
if __name__ == "__main__":
main() | 0.294114 | 0.064418 |
import argparse
import csv
import math
import pathlib
import sys
from typing import List
from pynapl.APL import APL
from pynapl.APLPyConnect import Connection
LANGUAGES = ["en", "fr", "es", "pt"]
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
FILE_NAME_TEMPLATE = "{lang}_trigram_count_filtered.tsv"
def init_data(apl: Connection.APL) -> List[int]:
"""Initialise the data arrays on the APL side.
As a side effect, this function defines some arrays on the APL instance.
For each language, {lang}_trigrams and {lang}_counts arrays are created.
The trigrams array is a nested character vector,
and the counts array is a simple integer vector.
The counts vector is one item longer than the trigrams array,
having an extra 1 at the end.
Returns an integer list, with the total trigram count for each language.
"""
totals = []
for lang in LANGUAGES:
total = 0
trigrams, counts = [], []
with open(DATA_FOLDER / FILE_NAME_TEMPLATE.format(lang=lang), "r") as f:
reader = csv.reader(f, delimiter="\t")
for trigram, count in reader:
trigrams.append(trigram)
total += int(count)
counts.append(int(count) + 1)
totals.append(total)
_ = apl.eval(f"{lang}_trigrams ← ⊃∆", trigrams)
_ = apl.eval(f"{lang}_counts ← 1,⍨⊃∆", counts)
return totals
def get_counts(apl: Connection.APL, sentence: str, language: str) -> List[int]:
"""Return the trigram counts for each trigram of a sentence."""
code = "{lang}_counts[{lang}_trigrams ⍳ 3,/⊃∆]".format(lang=language)
return apl.eval(code, sentence.lower())
def recognise_sentence(apl: Connection.APL, totals: List[int], sentence: str) -> str:
"""Performs automatic language recognition on the given sentence."""
log_probabilities = [
sum(math.log(c/total) for c in get_counts(apl, sentence.lower(), lang))
for lang, total in zip(LANGUAGES, totals)
]
# Find the index where log_probabilities is maximal and return respective language.
return LANGUAGES[max(range(len(LANGUAGES)), key=log_probabilities.__getitem__)]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sentence", help="Sentence to recognise.")
parser.add_argument(
"-i",
"--interactive",
help="Enter interactive mode.",
action="store_true"
)
args = parser.parse_args()
if not args.sentence and not args.interactive:
sys.exit()
apl = APL()
totals = init_data(apl)
if args.sentence:
print(recognise_sentence(apl, totals, args.sentence))
if args.interactive:
print("Type sentences to be recognised:")
sentence = input(" >> ")
while sentence:
print(recognise_sentence(apl, totals, sentence))
sentence = input(" >> ") | demos/language_recogniser/recogniser.py | import argparse
import csv
import math
import pathlib
import sys
from typing import List
from pynapl.APL import APL
from pynapl.APLPyConnect import Connection
LANGUAGES = ["en", "fr", "es", "pt"]
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
FILE_NAME_TEMPLATE = "{lang}_trigram_count_filtered.tsv"
def init_data(apl: Connection.APL) -> List[int]:
"""Initialise the data arrays on the APL side.
As a side effect, this function defines some arrays on the APL instance.
For each language, {lang}_trigrams and {lang}_counts arrays are created.
The trigrams array is a nested character vector,
and the counts array is a simple integer vector.
The counts vector is one item longer than the trigrams array,
having an extra 1 at the end.
Returns an integer list, with the total trigram count for each language.
"""
totals = []
for lang in LANGUAGES:
total = 0
trigrams, counts = [], []
with open(DATA_FOLDER / FILE_NAME_TEMPLATE.format(lang=lang), "r") as f:
reader = csv.reader(f, delimiter="\t")
for trigram, count in reader:
trigrams.append(trigram)
total += int(count)
counts.append(int(count) + 1)
totals.append(total)
_ = apl.eval(f"{lang}_trigrams ← ⊃∆", trigrams)
_ = apl.eval(f"{lang}_counts ← 1,⍨⊃∆", counts)
return totals
def get_counts(apl: Connection.APL, sentence: str, language: str) -> List[int]:
"""Return the trigram counts for each trigram of a sentence."""
code = "{lang}_counts[{lang}_trigrams ⍳ 3,/⊃∆]".format(lang=language)
return apl.eval(code, sentence.lower())
def recognise_sentence(apl: Connection.APL, totals: List[int], sentence: str) -> str:
"""Performs automatic language recognition on the given sentence."""
log_probabilities = [
sum(math.log(c/total) for c in get_counts(apl, sentence.lower(), lang))
for lang, total in zip(LANGUAGES, totals)
]
# Find the index where log_probabilities is maximal and return respective language.
return LANGUAGES[max(range(len(LANGUAGES)), key=log_probabilities.__getitem__)]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sentence", help="Sentence to recognise.")
parser.add_argument(
"-i",
"--interactive",
help="Enter interactive mode.",
action="store_true"
)
args = parser.parse_args()
if not args.sentence and not args.interactive:
sys.exit()
apl = APL()
totals = init_data(apl)
if args.sentence:
print(recognise_sentence(apl, totals, args.sentence))
if args.interactive:
print("Type sentences to be recognised:")
sentence = input(" >> ")
while sentence:
print(recognise_sentence(apl, totals, sentence))
sentence = input(" >> ") | 0.627837 | 0.373762 |
import filecmp
import os
import shutil
import tempfile
import unittest
import json_summary_combiner
class TestJsonSummaryCombiner(unittest.TestCase):
def setUp(self):
self._test_data_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_data', 'combiner')
self._actual_html_dir = tempfile.mkdtemp()
self._absolute_url = 'http://dummy-link.foobar/'
self._render_pictures_args = '--test1=test --test2=test --test3'
self._nopatch_gpu = 'False'
self._withpatch_gpu = 'True'
def tearDown(self):
shutil.rmtree(self._actual_html_dir)
def test_CombineJsonSummaries_WithDifferences(self):
worker_name_to_info = json_summary_combiner.CombineJsonSummaries(
os.path.join(self._test_data_dir, 'differences'))
for worker_name, worker_info in worker_name_to_info.items():
worker_num = worker_name[-1]
file_count = 0
for file_info in worker_info.failed_files:
file_count += 1
self.assertEquals(file_info.file_name,
'file%s_%s.png' % (worker_name, file_count))
self.assertEquals(file_info.skp_location,
'http://storage.cloud.google.com/dummy-bucket/skps'
'/%s/file%s_.skp' % (worker_name, worker_name))
self.assertEquals(file_info.num_pixels_differing,
int('%s%s1' % (worker_num, file_count)))
self.assertEquals(file_info.percent_pixels_differing,
int('%s%s2' % (worker_num, file_count)))
self.assertEquals(file_info.max_diff_per_channel,
int('%s%s4' % (worker_num, file_count)))
self.assertEquals(
worker_info.skps_location,
'gs://dummy-bucket/skps/%s' % worker_name)
self.assertEquals(
worker_info.files_location_nopatch,
'gs://dummy-bucket/output-dir/%s/nopatch-images' % worker_name)
self.assertEquals(
worker_info.files_location_diffs,
'gs://dummy-bucket/output-dir/%s/diffs' % worker_name)
self.assertEquals(
worker_info.files_location_whitediffs,
'gs://dummy-bucket/output-dir/%s/whitediffs' % worker_name)
def test_CombineJsonSummaries_NoDifferences(self):
worker_name_to_info = json_summary_combiner.CombineJsonSummaries(
os.path.join(self._test_data_dir, 'no_output'))
self.assertEquals(worker_name_to_info, {})
def _get_test_worker_name_to_info(self):
worker_name_to_info = {
'worker1': json_summary_combiner.WorkerInfo(
worker_name='worker1',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker1_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker1/'
'fileworker1_.skp',
111, 112, 114, 115),
json_summary_combiner.FileInfo(
'fileworker1_2.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker1/'
'fileworker1_.skp',
121, 122, 124, 125)],
skps_location='gs://dummy-bucket/skps/worker1',
files_location_diffs='gs://dummy-bucket/worker1/diffs',
files_location_whitediffs='gs://dummy-bucket/worker1/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker1/nopatch',
files_location_withpatch='gs://dummy-bucket/worker1/withpatch'),
'worker2': json_summary_combiner.WorkerInfo(
worker_name='worker2',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker2_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker2/'
'fileworker2_.skp',
211, 212, 214, 215)],
skps_location='gs://dummy-bucket/skps/worker2',
files_location_diffs='gs://dummy-bucket/worker2/diffs',
files_location_whitediffs='gs://dummy-bucket/worker2/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker2/nopatch',
files_location_withpatch='gs://dummy-bucket/worker2/withpatch'),
'worker3': json_summary_combiner.WorkerInfo(
worker_name='worker3',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker3_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
311, 312, 314, 315),
json_summary_combiner.FileInfo(
'fileworker3_2.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
321, 322, 324, 325),
json_summary_combiner.FileInfo(
'fileworker3_3.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
331, 332, 334, 335),
json_summary_combiner.FileInfo(
'fileworker3_4.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
341, 342, 344, 345)],
skps_location='gs://dummy-bucket/skps/worker3',
files_location_diffs='gs://dummy-bucket/worker3/diffs',
files_location_whitediffs='gs://dummy-bucket/worker3/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker3/nopatch',
files_location_withpatch='gs://dummy-bucket/worker3/withpatch')
}
return worker_name_to_info
def test_OutputToHTML_WithDifferences_WithAbsoluteUrl(self):
worker_name_to_info = self._get_test_worker_name_to_info()
json_summary_combiner.OutputToHTML(
worker_name_to_info=worker_name_to_info,
output_html_dir=self._actual_html_dir,
absolute_url=self._absolute_url,
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'differences_with_url')
for html_file in ('index.html', 'list_of_all_files.html',
'fileworker1_1.png.html', 'fileworker1_2.png.html',
'fileworker2_1.png.html', 'fileworker3_1.png.html',
'fileworker3_2.png.html', 'fileworker3_3.png.html',
'fileworker3_4.png.html'):
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, html_file),
os.path.join(self._actual_html_dir, html_file)))
def test_OutputToHTML_WithDifferences_WithNoUrl(self):
worker_name_to_info = self._get_test_worker_name_to_info()
json_summary_combiner.OutputToHTML(
worker_name_to_info=worker_name_to_info,
output_html_dir=self._actual_html_dir,
absolute_url='',
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'differences_no_url')
for html_file in ('index.html', 'list_of_all_files.html',
'fileworker1_1.png.html', 'fileworker1_2.png.html',
'fileworker2_1.png.html', 'fileworker3_1.png.html',
'fileworker3_2.png.html', 'fileworker3_3.png.html',
'fileworker3_4.png.html'):
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, html_file),
os.path.join(self._actual_html_dir, html_file)))
def test_OutputToHTML_NoDifferences(self):
json_summary_combiner.OutputToHTML(
worker_name_to_info={},
output_html_dir=self._actual_html_dir,
absolute_url='',
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'nodifferences')
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, 'index.html'),
os.path.join(self._actual_html_dir, 'index.html')))
if __name__ == '__main__':
unittest.main() | ct/py/json_summary_combiner_test.py | import filecmp
import os
import shutil
import tempfile
import unittest
import json_summary_combiner
class TestJsonSummaryCombiner(unittest.TestCase):
def setUp(self):
self._test_data_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_data', 'combiner')
self._actual_html_dir = tempfile.mkdtemp()
self._absolute_url = 'http://dummy-link.foobar/'
self._render_pictures_args = '--test1=test --test2=test --test3'
self._nopatch_gpu = 'False'
self._withpatch_gpu = 'True'
def tearDown(self):
shutil.rmtree(self._actual_html_dir)
def test_CombineJsonSummaries_WithDifferences(self):
worker_name_to_info = json_summary_combiner.CombineJsonSummaries(
os.path.join(self._test_data_dir, 'differences'))
for worker_name, worker_info in worker_name_to_info.items():
worker_num = worker_name[-1]
file_count = 0
for file_info in worker_info.failed_files:
file_count += 1
self.assertEquals(file_info.file_name,
'file%s_%s.png' % (worker_name, file_count))
self.assertEquals(file_info.skp_location,
'http://storage.cloud.google.com/dummy-bucket/skps'
'/%s/file%s_.skp' % (worker_name, worker_name))
self.assertEquals(file_info.num_pixels_differing,
int('%s%s1' % (worker_num, file_count)))
self.assertEquals(file_info.percent_pixels_differing,
int('%s%s2' % (worker_num, file_count)))
self.assertEquals(file_info.max_diff_per_channel,
int('%s%s4' % (worker_num, file_count)))
self.assertEquals(
worker_info.skps_location,
'gs://dummy-bucket/skps/%s' % worker_name)
self.assertEquals(
worker_info.files_location_nopatch,
'gs://dummy-bucket/output-dir/%s/nopatch-images' % worker_name)
self.assertEquals(
worker_info.files_location_diffs,
'gs://dummy-bucket/output-dir/%s/diffs' % worker_name)
self.assertEquals(
worker_info.files_location_whitediffs,
'gs://dummy-bucket/output-dir/%s/whitediffs' % worker_name)
def test_CombineJsonSummaries_NoDifferences(self):
worker_name_to_info = json_summary_combiner.CombineJsonSummaries(
os.path.join(self._test_data_dir, 'no_output'))
self.assertEquals(worker_name_to_info, {})
def _get_test_worker_name_to_info(self):
worker_name_to_info = {
'worker1': json_summary_combiner.WorkerInfo(
worker_name='worker1',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker1_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker1/'
'fileworker1_.skp',
111, 112, 114, 115),
json_summary_combiner.FileInfo(
'fileworker1_2.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker1/'
'fileworker1_.skp',
121, 122, 124, 125)],
skps_location='gs://dummy-bucket/skps/worker1',
files_location_diffs='gs://dummy-bucket/worker1/diffs',
files_location_whitediffs='gs://dummy-bucket/worker1/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker1/nopatch',
files_location_withpatch='gs://dummy-bucket/worker1/withpatch'),
'worker2': json_summary_combiner.WorkerInfo(
worker_name='worker2',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker2_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker2/'
'fileworker2_.skp',
211, 212, 214, 215)],
skps_location='gs://dummy-bucket/skps/worker2',
files_location_diffs='gs://dummy-bucket/worker2/diffs',
files_location_whitediffs='gs://dummy-bucket/worker2/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker2/nopatch',
files_location_withpatch='gs://dummy-bucket/worker2/withpatch'),
'worker3': json_summary_combiner.WorkerInfo(
worker_name='worker3',
failed_files=[
json_summary_combiner.FileInfo(
'fileworker3_1.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
311, 312, 314, 315),
json_summary_combiner.FileInfo(
'fileworker3_2.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
321, 322, 324, 325),
json_summary_combiner.FileInfo(
'fileworker3_3.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
331, 332, 334, 335),
json_summary_combiner.FileInfo(
'fileworker3_4.png',
'http://storage.cloud.google.com/dummy-bucket/skps/worker3/'
'fileworker3_.skp',
341, 342, 344, 345)],
skps_location='gs://dummy-bucket/skps/worker3',
files_location_diffs='gs://dummy-bucket/worker3/diffs',
files_location_whitediffs='gs://dummy-bucket/worker3/whitediffs',
files_location_nopatch='gs://dummy-bucket/worker3/nopatch',
files_location_withpatch='gs://dummy-bucket/worker3/withpatch')
}
return worker_name_to_info
def test_OutputToHTML_WithDifferences_WithAbsoluteUrl(self):
worker_name_to_info = self._get_test_worker_name_to_info()
json_summary_combiner.OutputToHTML(
worker_name_to_info=worker_name_to_info,
output_html_dir=self._actual_html_dir,
absolute_url=self._absolute_url,
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'differences_with_url')
for html_file in ('index.html', 'list_of_all_files.html',
'fileworker1_1.png.html', 'fileworker1_2.png.html',
'fileworker2_1.png.html', 'fileworker3_1.png.html',
'fileworker3_2.png.html', 'fileworker3_3.png.html',
'fileworker3_4.png.html'):
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, html_file),
os.path.join(self._actual_html_dir, html_file)))
def test_OutputToHTML_WithDifferences_WithNoUrl(self):
worker_name_to_info = self._get_test_worker_name_to_info()
json_summary_combiner.OutputToHTML(
worker_name_to_info=worker_name_to_info,
output_html_dir=self._actual_html_dir,
absolute_url='',
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'differences_no_url')
for html_file in ('index.html', 'list_of_all_files.html',
'fileworker1_1.png.html', 'fileworker1_2.png.html',
'fileworker2_1.png.html', 'fileworker3_1.png.html',
'fileworker3_2.png.html', 'fileworker3_3.png.html',
'fileworker3_4.png.html'):
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, html_file),
os.path.join(self._actual_html_dir, html_file)))
def test_OutputToHTML_NoDifferences(self):
json_summary_combiner.OutputToHTML(
worker_name_to_info={},
output_html_dir=self._actual_html_dir,
absolute_url='',
render_pictures_args=self._render_pictures_args,
nopatch_gpu=self._nopatch_gpu,
withpatch_gpu=self._withpatch_gpu)
html_expected_dir = os.path.join(self._test_data_dir, 'html_outputs',
'nodifferences')
self.assertTrue(
filecmp.cmp(os.path.join(html_expected_dir, 'index.html'),
os.path.join(self._actual_html_dir, 'index.html')))
if __name__ == '__main__':
unittest.main() | 0.363082 | 0.152694 |
from heapq import heappush, heappop
# The Maze
# DFS is faster to get to destination
class Solution(object):
def hasPath(self, maze, start, destination):
"""
:type maze: List[List[int]]
:type start: List[int]
:type destination: List[int]
:rtype: bool
"""
visited = set()
return self.dfs(maze, tuple(start), tuple(destination), visited)
def dfs(self, maze, curr, dest, visited):
# early stop condition
if curr in visited:
return False
# valid result condition
if curr == dest:
return True
visited.add(curr)
# recursion definition
for coor in self.next_coors(curr[0], curr[1], maze):
if self.dfs(maze, coor, dest, visited):
return True
return False
def next_coors(self, i, j, maze):
m, n = len(maze), len(maze[0])
coors = []
for dx, dy in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
x, y = i, j
while 0 <= x + dx < m and 0 <= y + dy < n and maze[x + dx][y + dy] == 0:
x += dx
y += dy
if not (i == x and j == y):
coors.append((x, y))
return coors
# The Maze II
# BFS with priority queue is faster in this case
class Solution(object):
def shortestDistance(self, maze, start, destination):
"""
:type maze: List[List[int]]
:type start: List[int]
:type destination: List[int]
:rtype: int
"""
visited = set()
pq = [(0, start[0], start[1])]
while pq:
# means every coordinate starts the search with shortest distance
# and the same coordinate with longer distance will be deduplicated
# thus overall forming a shortest path
dist, i, j = heappop(pq)
if (i, j) in visited:
continue
if [i, j] == destination:
return dist
visited.add((i, j))
for coor in self.next_coors(i, j, maze):
new_dist = dist + abs(coor[0] - i) + abs(coor[1] - j)
heappush(pq, (new_dist, coor[0], coor[1]))
return -1
# same as Q1
def next_coors(self, i, j, maze):
m, n = len(maze), len(maze[0])
coors = []
for dx, dy in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
x, y = i, j
while 0 <= x + dx < m and 0 <= y + dy < n and maze[x + dx][y + dy] == 0:
x += dx
y += dy
if not (i == x and j == y):
coors.append((x, y))
return coors
# The Maze III
# BFS with prioritiy queue, ball will fall into hole if rolled to that position
# similar approach as Q2
class Solution(object):
def findShortestWay(self, maze, ball, hole):
"""
:type maze: List[List[int]]
:type ball: List[int]
:type hole: List[int]
:rtype: str
"""
m, n = len(maze), len(maze[0])
visited = set()
pq = [(0, '', ball[0], ball[1])]
while pq:
dist, path, i, j = heappop(pq)
if (i, j) in visited:
continue
if [i, j] == hole:
return path
visited.add((i, j))
for x, y, direction in self.next_coors(maze, i, j, hole):
new_dist = dist + abs(x - i) + abs(y - j)
new_path = path + direction
heappush(pq, (new_dist, new_path, x, y))
return 'impossible'
def next_coors(self, maze, i, j, hole):
m, n = len(maze), len(maze[0])
directions = {
'r': (0, 1),
'l': (0, -1),
'd': (1, 0),
'u': (-1, 0)
}
coors = []
for d in directions:
dx, dy = directions[d]
x, y = i, j
while 0 <= x + dx < m and 0 <= y + dy < n and maze[x + dx][y + dy] == 0:
x += dx
y += dy
# stop when reaching the hole
if [x, y] == hole:
break
if not (x == i and y == j)
coors.append((x, y, d))
return coors | algorithms/bfs/the_maze.py | from heapq import heappush, heappop
# The Maze
# DFS is faster to get to destination
class Solution(object):
def hasPath(self, maze, start, destination):
"""
:type maze: List[List[int]]
:type start: List[int]
:type destination: List[int]
:rtype: bool
"""
visited = set()
return self.dfs(maze, tuple(start), tuple(destination), visited)
def dfs(self, maze, curr, dest, visited):
# early stop condition
if curr in visited:
return False
# valid result condition
if curr == dest:
return True
visited.add(curr)
# recursion definition
for coor in self.next_coors(curr[0], curr[1], maze):
if self.dfs(maze, coor, dest, visited):
return True
return False
def next_coors(self, i, j, maze):
m, n = len(maze), len(maze[0])
coors = []
for dx, dy in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
x, y = i, j
while 0 <= x + dx < m and 0 <= y + dy < n and maze[x + dx][y + dy] == 0:
x += dx
y += dy
if not (i == x and j == y):
coors.append((x, y))
return coors
# The Maze II
# BFS with priority queue is faster in this case
class Solution(object):
def shortestDistance(self, maze, start, destination):
"""
:type maze: List[List[int]]
:type start: List[int]
:type destination: List[int]
:rtype: int
"""
visited = set()
pq = [(0, start[0], start[1])]
while pq:
# means every coordinate starts the search with shortest distance
# and the same coordinate with longer distance will be deduplicated
# thus overall forming a shortest path
dist, i, j = heappop(pq)
if (i, j) in visited:
continue
if [i, j] == destination:
return dist
visited.add((i, j))
for coor in self.next_coors(i, j, maze):
new_dist = dist + abs(coor[0] - i) + abs(coor[1] - j)
heappush(pq, (new_dist, coor[0], coor[1]))
return -1
# same as Q1
def next_coors(self, i, j, maze):
m, n = len(maze), len(maze[0])
coors = []
for dx, dy in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
x, y = i, j
while 0 <= x + dx < m and 0 <= y + dy < n and maze[x + dx][y + dy] == 0:
x += dx
y += dy
if not (i == x and j == y):
coors.append((x, y))
return coors
# The Maze III
# BFS with prioritiy queue, ball will fall into hole if rolled to that position
# similar approach as Q2
class Solution(object):
def findShortestWay(self, maze, ball, hole):
"""
:type maze: List[List[int]]
:type ball: List[int]
:type hole: List[int]
:rtype: str
"""
m, n = len(maze), len(maze[0])
visited = set()
pq = [(0, '', ball[0], ball[1])]
while pq:
dist, path, i, j = heappop(pq)
if (i, j) in visited:
continue
if [i, j] == hole:
return path
visited.add((i, j))
for x, y, direction in self.next_coors(maze, i, j, hole):
new_dist = dist + abs(x - i) + abs(y - j)
new_path = path + direction
heappush(pq, (new_dist, new_path, x, y))
return 'impossible'
def next_coors(self, maze, i, j, hole):
m, n = len(maze), len(maze[0])
directions = {
'r': (0, 1),
'l': (0, -1),
'd': (1, 0),
'u': (-1, 0)
}
coors = []
for d in directions:
dx, dy = directions[d]
x, y = i, j
while 0 <= x + dx < m and 0 <= y + dy < n and maze[x + dx][y + dy] == 0:
x += dx
y += dy
# stop when reaching the hole
if [x, y] == hole:
break
if not (x == i and y == j)
coors.append((x, y, d))
return coors | 0.736021 | 0.624379 |
import random
humanMales = ['Kharmat', 'Dalba', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
humanFemales = ['Vurnan', 'Ulbuh', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Share Bhalme', '<NAME>', '<NAME>']
halflingMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', 'N<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halflingFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halforcMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halforcFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halfelfMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halfelfFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
gnomeMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
gnomeFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
elfMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Corio<NAME>igohhih', 'Neironthalmas Komrol']
elfFemales = ['<NAME>', '<NAME>', '<NAME>', 'Inthe Shaniol', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
dwarfMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
dwarfFemales = ['<NAME>', '<NAME>', 'So Deeppunch', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
genders = [
#Weighted to allow for a 1/40 chance of a non-binary NPC, a 20/40 chance
#of a female NPC, and a 19/40 chance of a male NPC.
'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male',
'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female',
'non-binary']
#Proof of concept using a list for now, need to find a way to generate
#names en-masse and either create them on the fly or put a bunch in a DB
#Also, an androgynous set of names would be nice, but for now just choose
#randomly between male and female names if gender is not male/female
class Name():
def __init__(self, inName, inGender, finalRace):
self.finalName = inName
self.finalGender = inGender
self.finalRace = finalRace
def get_gender(self):
if self.finalGender == 'rand':
self.finalGender = random.choice(genders)
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(humanMales + halfingMales + halforcMales + halfelfMales + gnomeMales + elfMales + dwarfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(humanFemales + halflingFemales + halforcFemales + halfelfFemales + gnomeFemales + elfFemales + dwarfFemales)
else:
self.finalName = random.choice(humanMales + humanFemales + halfingMales + halflingFemales + halforcMales + halforcFemales + halfelfMales + halfelfFemales + gnomeMales + gnomeFemales + elfMales + elfFemales + dwarfMales + dwarfFemales)
def generate(self):
self.get_gender()
self.get_name()
return (self.finalName, self.finalGender)
class HumanName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(humanMales)
elif self.finalGender == 'female':
self.finalName = random.choice(humanFemales)
else:
self.finalName = random.choice((humanMales + humanFemales))
class HalflingName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(halflingMales)
elif self.finalGender == 'female':
self.finalName = random.choice(halflingFemales)
else:
self.finalName = random.choice(halflingMales + halflingFemales)
class HalfOrcName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(halforcMales)
elif self.finalGender == 'female':
self.finalName = random.choice(halforcFemales)
else:
self.finalName = random.choice(halforcMales + halforcFemales)
class HalfElfName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(halfelfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(halfelfFemales)
else:
self.finalName = random.choice(halfelfMales + halfelfFemales)
class GnomeName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(gnomeMales)
elif self.finalGender == 'female':
self.finalName = random.choice(gnomeFemales)
else:
self.finalName = random.choice(gnomeMales + gnomeFemales)
class ElfName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(elfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(elfFemales)
else:
self.finalName = random.choice(elfMales + elfFemales)
class DwarfName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(dwarfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(dwarfFemales)
else:
self.finalName = random.choice(dwarfMales + dwarfFemales) | namegen.py | import random
humanMales = ['Kharmat', 'Dalba', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
humanFemales = ['Vurnan', 'Ulbuh', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Share Bhalme', '<NAME>', '<NAME>']
halflingMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', 'N<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halflingFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halforcMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halforcFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halfelfMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
halfelfFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
gnomeMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
gnomeFemales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
elfMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'Corio<NAME>igohhih', 'Neironthalmas Komrol']
elfFemales = ['<NAME>', '<NAME>', '<NAME>', 'Inthe Shaniol', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
dwarfMales = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
dwarfFemales = ['<NAME>', '<NAME>', 'So Deeppunch', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
genders = [
#Weighted to allow for a 1/40 chance of a non-binary NPC, a 20/40 chance
#of a female NPC, and a 19/40 chance of a male NPC.
'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male', 'male',
'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female', 'female',
'non-binary']
#Proof of concept using a list for now, need to find a way to generate
#names en-masse and either create them on the fly or put a bunch in a DB
#Also, an androgynous set of names would be nice, but for now just choose
#randomly between male and female names if gender is not male/female
class Name():
def __init__(self, inName, inGender, finalRace):
self.finalName = inName
self.finalGender = inGender
self.finalRace = finalRace
def get_gender(self):
if self.finalGender == 'rand':
self.finalGender = random.choice(genders)
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(humanMales + halfingMales + halforcMales + halfelfMales + gnomeMales + elfMales + dwarfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(humanFemales + halflingFemales + halforcFemales + halfelfFemales + gnomeFemales + elfFemales + dwarfFemales)
else:
self.finalName = random.choice(humanMales + humanFemales + halfingMales + halflingFemales + halforcMales + halforcFemales + halfelfMales + halfelfFemales + gnomeMales + gnomeFemales + elfMales + elfFemales + dwarfMales + dwarfFemales)
def generate(self):
self.get_gender()
self.get_name()
return (self.finalName, self.finalGender)
class HumanName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(humanMales)
elif self.finalGender == 'female':
self.finalName = random.choice(humanFemales)
else:
self.finalName = random.choice((humanMales + humanFemales))
class HalflingName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(halflingMales)
elif self.finalGender == 'female':
self.finalName = random.choice(halflingFemales)
else:
self.finalName = random.choice(halflingMales + halflingFemales)
class HalfOrcName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(halforcMales)
elif self.finalGender == 'female':
self.finalName = random.choice(halforcFemales)
else:
self.finalName = random.choice(halforcMales + halforcFemales)
class HalfElfName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(halfelfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(halfelfFemales)
else:
self.finalName = random.choice(halfelfMales + halfelfFemales)
class GnomeName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(gnomeMales)
elif self.finalGender == 'female':
self.finalName = random.choice(gnomeFemales)
else:
self.finalName = random.choice(gnomeMales + gnomeFemales)
class ElfName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(elfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(elfFemales)
else:
self.finalName = random.choice(elfMales + elfFemales)
class DwarfName(Name):
def get_name(self):
if self.finalName != 'rand':
pass
elif self.finalGender == 'male':
self.finalName = random.choice(dwarfMales)
elif self.finalGender == 'female':
self.finalName = random.choice(dwarfFemales)
else:
self.finalName = random.choice(dwarfMales + dwarfFemales) | 0.084259 | 0.095687 |
# Import necessary libraries
import subprocess # Runs commands and gets output
import socket # Used to test internet connection
import os # Used to run system commands and checks if run as root user
import getpass # Used to hide user input in password field
# Check if the user that executed this program is root
user = os.getenv("SUDO_USER")
if user is None: # If not root
print("\n Execute as \033[1;31;48msudo\033[1;37;48m\n") # Prints request to run as root
exit() # Closes program
def internet(host="8.8.8.8", port=53, timeout=3): # Function to check if internet connection is successful
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
print(ex)
return False
# Enter wifi name here.
search = "tamulink-wpa" # SSID to seach for tamulink-wpa.
ssid = search # Also accepts parts of SSID like, "tam" or "link"
scan = subprocess.Popen(["connmanctl", "services"], stdout=subprocess.PIPE) # Run connmanctl services and get output
ssids = scan.stdout.readlines() # Break output into list named "ssids"
print("\n\033[1;32;40mSearching For SSID:\033[1;37;40m", ssid) # Print SSID that is being searched for
os.system("echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf") # Writes "nameserver 8.8.8.8" to /etc/resolv.conf
while len(ssids) <= 1: # Checks how many SSIDs where found during scan
print("Scanning for more SSIDS") # Scans again if no SSIDs were found
subprocess.call("connmanctl", " scan", " wifi") # Runs system command to scan for more ssids
print("Have ", len(ssids), " ssids.") # Prints number of SSIDs found after scan
for x in range(len(ssids)): # Looks at each SSID in list and converts each list entry to string
ssids[x] = str(ssids[x]) # Convert to string
for idx, val in enumerate(ssids): # Search through list for specified SSID or specified part of SSID
if ssid in val: # If found SSID
ssid = val # Replace specified SSID with found SSID
ssid = ssid[0:len(ssid)-3].split() # Split SSID string along space chars and store in list
del ssid[0] # Delete first entry in list because it's an artifact from
# bytes to sting in lines 44 and 45
print("\033[1;32;40m\nFound: \n\033[1;37;40m" + ssid[0] + "\t\t" + ssid[1]) # Output SSID found matching search string
username = input("\n\033[1;37;40mPlease enter your username: \033[0;37;40m") # Ask for tamulink-wpa username
password = getpass.getpass(prompt='\033[1;37;40mPlease enter your password: \033[0;37;40m', stream=None) # Ask for tamulink-wpa password
config_path = "/var/lib/connman/" + ssid[1] + ".config" # Generate connmanctl wifi configuration file path and name
file = open(config_path, "w") # Open file using generated name and path, will create if file is non-existant
print("\033[1;32;40m\nGenerated Configuration File Path: \n\033[1;37;40m" + config_path + "\n") # Tell user configuration file name and path
connmanctl_ssid = ssid[1].splits("_") # Split connmanctl wifi scan wireless details
file.write("[service_" + ssid[1] + "]\n") # Use tamulink detailed ssid for file title
file.write("Type = wifi\n") # this is a WiFi file
file.write("SSID = " + connmanctl_ssid[2] + "\n") # Set SSID to detailed SSID
file.write("EAP = peap\n") # Set type of encapsulation
file.write("Phase2 = MSCHAPV2\n") # Set type of authentication for network
file.write("Identity= " + username + "\n") # Set network username
file.write("Passphrase= " + password + "\n") # Set network password
file.close() # Close file
os.system("sudo systemctl restart connman") # Restart connman service so it finds new configuration file | software/python/basics/setup_wpa_enterprise.py |
# Import necessary libraries
import subprocess # Runs commands and gets output
import socket # Used to test internet connection
import os # Used to run system commands and checks if run as root user
import getpass # Used to hide user input in password field
# Check if the user that executed this program is root
user = os.getenv("SUDO_USER")
if user is None: # If not root
print("\n Execute as \033[1;31;48msudo\033[1;37;48m\n") # Prints request to run as root
exit() # Closes program
def internet(host="8.8.8.8", port=53, timeout=3): # Function to check if internet connection is successful
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
print(ex)
return False
# Enter wifi name here.
search = "tamulink-wpa" # SSID to seach for tamulink-wpa.
ssid = search # Also accepts parts of SSID like, "tam" or "link"
scan = subprocess.Popen(["connmanctl", "services"], stdout=subprocess.PIPE) # Run connmanctl services and get output
ssids = scan.stdout.readlines() # Break output into list named "ssids"
print("\n\033[1;32;40mSearching For SSID:\033[1;37;40m", ssid) # Print SSID that is being searched for
os.system("echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf") # Writes "nameserver 8.8.8.8" to /etc/resolv.conf
while len(ssids) <= 1: # Checks how many SSIDs where found during scan
print("Scanning for more SSIDS") # Scans again if no SSIDs were found
subprocess.call("connmanctl", " scan", " wifi") # Runs system command to scan for more ssids
print("Have ", len(ssids), " ssids.") # Prints number of SSIDs found after scan
for x in range(len(ssids)): # Looks at each SSID in list and converts each list entry to string
ssids[x] = str(ssids[x]) # Convert to string
for idx, val in enumerate(ssids): # Search through list for specified SSID or specified part of SSID
if ssid in val: # If found SSID
ssid = val # Replace specified SSID with found SSID
ssid = ssid[0:len(ssid)-3].split() # Split SSID string along space chars and store in list
del ssid[0] # Delete first entry in list because it's an artifact from
# bytes to sting in lines 44 and 45
print("\033[1;32;40m\nFound: \n\033[1;37;40m" + ssid[0] + "\t\t" + ssid[1]) # Output SSID found matching search string
username = input("\n\033[1;37;40mPlease enter your username: \033[0;37;40m") # Ask for tamulink-wpa username
password = getpass.getpass(prompt='\033[1;37;40mPlease enter your password: \033[0;37;40m', stream=None) # Ask for tamulink-wpa password
config_path = "/var/lib/connman/" + ssid[1] + ".config" # Generate connmanctl wifi configuration file path and name
file = open(config_path, "w") # Open file using generated name and path, will create if file is non-existant
print("\033[1;32;40m\nGenerated Configuration File Path: \n\033[1;37;40m" + config_path + "\n") # Tell user configuration file name and path
connmanctl_ssid = ssid[1].splits("_") # Split connmanctl wifi scan wireless details
file.write("[service_" + ssid[1] + "]\n") # Use tamulink detailed ssid for file title
file.write("Type = wifi\n") # this is a WiFi file
file.write("SSID = " + connmanctl_ssid[2] + "\n") # Set SSID to detailed SSID
file.write("EAP = peap\n") # Set type of encapsulation
file.write("Phase2 = MSCHAPV2\n") # Set type of authentication for network
file.write("Identity= " + username + "\n") # Set network username
file.write("Passphrase= " + password + "\n") # Set network password
file.close() # Close file
os.system("sudo systemctl restart connman") # Restart connman service so it finds new configuration file | 0.263315 | 0.077832 |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wishlist import exceptions
class WishListItem(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='wishlist_items',
verbose_name=_('Owner'), on_delete=models.CASCADE)
product = models.ForeignKey(
'products.Product', verbose_name=_('Product'),
on_delete=models.CASCADE)
date_created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
def __str__(self):
return str(self.product)
class Meta:
unique_together = ['user', 'product']
verbose_name = _('Wish list item')
verbose_name_plural = _('Wish list items')
class WishList(object):
def __init__(self, user):
self._user = user
@property
def _items(self):
if not self._user.is_authenticated:
raise exceptions.UserIsNotAuthenticated()
if not hasattr(self, '_items_cache'):
self._items_cache = {
i.product_id: i for i in
self._user.wishlist_items.all().select_related('product')
}
return self._items_cache
def add(self, product):
if product.id in self._items:
raise exceptions.ProductAlreadyAdded()
item = self._user.wishlist_items.create(product=product)
self._items_cache[product.id] = item
def remove(self, product_id):
if product_id not in self._items:
raise exceptions.ItemDoesNotExists()
self._items[product_id].delete()
del self._items[product_id]
def has_product(self, product_id):
if not self._user.is_authenticated:
return False
return product_id in self._items
def __iter__(self):
return iter(self._items.values())
def __len__(self):
if self._user.is_authenticated:
return len(self._items)
return 0 | wishlist/models.py | from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wishlist import exceptions
class WishListItem(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='wishlist_items',
verbose_name=_('Owner'), on_delete=models.CASCADE)
product = models.ForeignKey(
'products.Product', verbose_name=_('Product'),
on_delete=models.CASCADE)
date_created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
def __str__(self):
return str(self.product)
class Meta:
unique_together = ['user', 'product']
verbose_name = _('Wish list item')
verbose_name_plural = _('Wish list items')
class WishList(object):
def __init__(self, user):
self._user = user
@property
def _items(self):
if not self._user.is_authenticated:
raise exceptions.UserIsNotAuthenticated()
if not hasattr(self, '_items_cache'):
self._items_cache = {
i.product_id: i for i in
self._user.wishlist_items.all().select_related('product')
}
return self._items_cache
def add(self, product):
if product.id in self._items:
raise exceptions.ProductAlreadyAdded()
item = self._user.wishlist_items.create(product=product)
self._items_cache[product.id] = item
def remove(self, product_id):
if product_id not in self._items:
raise exceptions.ItemDoesNotExists()
self._items[product_id].delete()
del self._items[product_id]
def has_product(self, product_id):
if not self._user.is_authenticated:
return False
return product_id in self._items
def __iter__(self):
return iter(self._items.values())
def __len__(self):
if self._user.is_authenticated:
return len(self._items)
return 0 | 0.565299 | 0.07333 |
from rnd_game import RNDGame
from player import Player
from rpsls_game import RockPaperScissorsLizardSpockGame
def enter_username():
while True:
try:
username_input = str(input().strip())
return username_input
break
except:
print("please insert a useful username")
def enter_game_selection():
while True:
game_input = str(input()).strip()
if game_input == '1':
return game_input
break
elif game_input == '2':
return game_input
break
print("Sorry, {0} is not a option, please try again".format(str(game_input)))
print("Please choose a game")
print("Press 1 and Enter for 'Random Number Game'")
print("Press 2 and Enter for 'Rock, Paper, Scissors, Lizard, Spock'")
class MenuStarter:
def __init__(self) -> object:
print(
"Welcome to the first exercise - it consists of 'Random Number Game' und 'Rock, paper, scissors ("
"extended with Spock and Lizard) '")
print("Please enter your nickname")
player1 = Player(enter_username())
print("Welcome " + player1.name + " to the first exercise :)")
print("Select the game you want to play")
print("Select 1 and Enter for 'Random Number Game'")
print("Select 2 und Enter für 'Rock, Paper, Scissors, Lizard, Spock'")
check = enter_game_selection()
if check == '1':
print("'Random Number Game' is your selection")
g = RNDGame(player1.name)
g.start_game()
elif check == '2':
print("'Rock, Paper, Scissors, Lizard, Spock' is your selection")
print("Please enter name of the second player")
player2 = Player(enter_username())
g = RockPaperScissorsLizardSpockGame(player1, player2)
print("OK let's start the game " + player1.name + " vs. " + player2.name + " have fun")
g.start_game(player1, player2) | menu_starter.py | from rnd_game import RNDGame
from player import Player
from rpsls_game import RockPaperScissorsLizardSpockGame
def enter_username():
while True:
try:
username_input = str(input().strip())
return username_input
break
except:
print("please insert a useful username")
def enter_game_selection():
while True:
game_input = str(input()).strip()
if game_input == '1':
return game_input
break
elif game_input == '2':
return game_input
break
print("Sorry, {0} is not a option, please try again".format(str(game_input)))
print("Please choose a game")
print("Press 1 and Enter for 'Random Number Game'")
print("Press 2 and Enter for 'Rock, Paper, Scissors, Lizard, Spock'")
class MenuStarter:
def __init__(self) -> object:
print(
"Welcome to the first exercise - it consists of 'Random Number Game' und 'Rock, paper, scissors ("
"extended with Spock and Lizard) '")
print("Please enter your nickname")
player1 = Player(enter_username())
print("Welcome " + player1.name + " to the first exercise :)")
print("Select the game you want to play")
print("Select 1 and Enter for 'Random Number Game'")
print("Select 2 und Enter für 'Rock, Paper, Scissors, Lizard, Spock'")
check = enter_game_selection()
if check == '1':
print("'Random Number Game' is your selection")
g = RNDGame(player1.name)
g.start_game()
elif check == '2':
print("'Rock, Paper, Scissors, Lizard, Spock' is your selection")
print("Please enter name of the second player")
player2 = Player(enter_username())
g = RockPaperScissorsLizardSpockGame(player1, player2)
print("OK let's start the game " + player1.name + " vs. " + player2.name + " have fun")
g.start_game(player1, player2) | 0.293607 | 0.220531 |
import itertools
import random
import subprocess
import os
from absl import logging, flags, app
from multiprocessing import Queue, Manager
from pathos import multiprocessing
import traceback
import time
import sys
log_dir = sys.argv[1]
num_gpus = 2
max_worker_num = num_gpus * 1 + 1
nb_train_steps = 400
meta_update_freq = 1
actor_update_freq = 1
batch_size = 1024
num_envs = 10
COMMAND1 = f"python3 experiments/run_hiro.py {log_dir}"
COMMAND2 = f"--alg TD3 --evaluate --n_training 1 --verbose 1 --relative_goals --off_policy_corrections --eval_deterministic --num_envs {num_envs} --actor_lr 1e-4 --critic_lr 1e-4 --use_huber --target_noise_clip 0.5 --batch_size {batch_size} --tau 0.05 --gamma 0.99 --nb_train_steps {nb_train_steps} --meta_update_freq {meta_update_freq} --actor_update_freq {actor_update_freq} --intrinsic_reward_scale 1.0 --meta_period 3 --buffer_size 500000 --noise 0.1"
envs = ["GoalTask", "KickBallTask"]
total_steps = [4950000, 4950000]
horizons = [100, 200]
nb_rollout_steps = [10 * 100, 10 * 200]
def _init_device_queue(max_worker_num):
m = Manager()
device_queue = m.Queue()
for i in range(max_worker_num):
idx = i % num_gpus
device_queue.put(idx)
return device_queue
def run():
"""Run trainings with all possible parameter combinations in
the configured space.
"""
process_pool = multiprocessing.Pool(
processes=max_worker_num, maxtasksperchild=1)
device_queue = _init_device_queue(max_worker_num)
for i in range(3):
for i, env in enumerate(envs):
command = "%s %s --total_steps %d --horizon %d --nb_rollout_steps %d %s" % (COMMAND1, env, total_steps[i], horizons[i], nb_rollout_steps[i], COMMAND2)
process_pool.apply_async(
func=_worker,
args=[command, device_queue],
error_callback=lambda e: logging.error(e))
process_pool.close()
process_pool.join()
def _worker(command, device_queue):
# sleep for random seconds to avoid crowded launching
try:
time.sleep(random.uniform(0, 15))
device = device_queue.get()
logging.set_verbosity(logging.INFO)
logging.info("command %s" % command)
os.system("CUDA_VISIBLE_DEVICES=%d " % device + command)
device_queue.put(device)
except Exception as e:
logging.info(traceback.format_exc())
raise e
run() | run_socialbot_evals.py | import itertools
import random
import subprocess
import os
from absl import logging, flags, app
from multiprocessing import Queue, Manager
from pathos import multiprocessing
import traceback
import time
import sys
log_dir = sys.argv[1]
num_gpus = 2
max_worker_num = num_gpus * 1 + 1
nb_train_steps = 400
meta_update_freq = 1
actor_update_freq = 1
batch_size = 1024
num_envs = 10
COMMAND1 = f"python3 experiments/run_hiro.py {log_dir}"
COMMAND2 = f"--alg TD3 --evaluate --n_training 1 --verbose 1 --relative_goals --off_policy_corrections --eval_deterministic --num_envs {num_envs} --actor_lr 1e-4 --critic_lr 1e-4 --use_huber --target_noise_clip 0.5 --batch_size {batch_size} --tau 0.05 --gamma 0.99 --nb_train_steps {nb_train_steps} --meta_update_freq {meta_update_freq} --actor_update_freq {actor_update_freq} --intrinsic_reward_scale 1.0 --meta_period 3 --buffer_size 500000 --noise 0.1"
envs = ["GoalTask", "KickBallTask"]
total_steps = [4950000, 4950000]
horizons = [100, 200]
nb_rollout_steps = [10 * 100, 10 * 200]
def _init_device_queue(max_worker_num):
m = Manager()
device_queue = m.Queue()
for i in range(max_worker_num):
idx = i % num_gpus
device_queue.put(idx)
return device_queue
def run():
"""Run trainings with all possible parameter combinations in
the configured space.
"""
process_pool = multiprocessing.Pool(
processes=max_worker_num, maxtasksperchild=1)
device_queue = _init_device_queue(max_worker_num)
for i in range(3):
for i, env in enumerate(envs):
command = "%s %s --total_steps %d --horizon %d --nb_rollout_steps %d %s" % (COMMAND1, env, total_steps[i], horizons[i], nb_rollout_steps[i], COMMAND2)
process_pool.apply_async(
func=_worker,
args=[command, device_queue],
error_callback=lambda e: logging.error(e))
process_pool.close()
process_pool.join()
def _worker(command, device_queue):
# sleep for random seconds to avoid crowded launching
try:
time.sleep(random.uniform(0, 15))
device = device_queue.get()
logging.set_verbosity(logging.INFO)
logging.info("command %s" % command)
os.system("CUDA_VISIBLE_DEVICES=%d " % device + command)
device_queue.put(device)
except Exception as e:
logging.info(traceback.format_exc())
raise e
run() | 0.239883 | 0.105995 |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandautils as pup
from sklearn.metrics import roc_curve
import cPickle
def plotROC(test_ntuple_path):#, picklename):
'''
Definition:
-----------
Plot a ROC curve comparison between the old mv2c10 contained in the branch and the newly evaluated one,
which is loaded in from a pickle file.
Both the .root file and the pickled mv2 array are assumed to be event-flat, not jet-flat.
Args:
-----
test_ntuple_path: string, the path to the root files used for evaluation
picklename: string, the path to the pickle containing the new output of your retrained mv2
'''
# -- import the root file into a df
print 'Opening files'
df = pup.root2panda(test_ntuple_path, 'bTag_AntiKt2PV0TrackJets', branches=['jet_mv2c10', 'jet_LabDr_HadF'])
# -- extract the old mv2c10 branch for comparison
oldMV2 = pup.flatten(df['jet_mv2c10'])
# -- extract the truth labels
truthflav = pup.flatten(df['jet_LabDr_HadF'])
# -- open the pickle produced by evaluate_and_store
print 'Importing pickle'
c00 = pup.flatten(cPickle.load(open('val_Alessandro_c00.pkl', 'rb')))
c07 = pup.flatten(cPickle.load(open('val_Alessandro_c07.pkl', 'rb')))
c15 = pup.flatten(cPickle.load(open('val_Alessandro_c15.pkl', 'rb')))
# -- this allows you to check performance on b VS light
# -- change it, if you want to look at a different performance
print 'Slicing'
bl_selection = (truthflav == 0) | (truthflav == 5)
print 'Plotting'
plot(bl_selection, 'bl', truthflav, oldMV2, c00, c07, c15)
print 'Slicing'
bc_selection = (truthflav == 4) | (truthflav == 5)
print 'Plotting'
plot(bc_selection, 'bc', truthflav, oldMV2, c00, c07, c15)
def plot(selection, ID, truthflav, oldMV2, c00, c07, c15):
# -- calculate the points that make up a roc curve
old_fpr, old_eff, _ = roc_curve(truthflav[selection], oldMV2[selection], pos_label=5)
c00_fpr, c00_eff, _ = roc_curve(truthflav[selection], c00[selection], pos_label=5)
c07_fpr, c07_eff, _ = roc_curve(truthflav[selection], c07[selection], pos_label=5)
c15_fpr, c15_eff, _ = roc_curve(truthflav[selection], c15[selection], pos_label=5)
# -- PLOTTING!
# -- settings
matplotlib.rcParams.update({'font.size': 18})
fig = plt.figure(figsize=(11.69, 8.27), dpi=100)
# -- add as many curves as you want here
# -- note: to plot rejection, take 1/false_positive_rate
plt.plot(old_eff, 1/old_fpr, label='mv2c10 branch', color='black')
plt.plot(c00_eff, 1/c00_fpr, label='new c00')
plt.plot(c07_eff, 1/c07_fpr, label='new c07')
plt.plot(c15_eff, 1/c15_fpr, label='new c15')
# -- more settings
plt.xlim(xmin=0.6)
plt.yscale('log')
plt.xlabel(r'$\varepsilon_b$')
if ID == 'bl':
plt.ylabel((r'$1/\varepsilon_u$'))
plt.ylim(ymax=1000)
elif ID == 'bc':
plt.ylabel((r'$1/\varepsilon_c$'))
plt.ylim(ymax=20)
plt.grid(which='both')
plt.legend() # display legend on plot
plt.show() # open window to show plot
fig.savefig('ROC'+ID+'.pdf') # save plot as a pdf
# -----------------------------------------------------------------
if __name__ == '__main__':
import sys
import argparse
# -- read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="input .root file name")
#parser.add_argument("picklename", help="path to the .pkl file with the evaluation results")
args = parser.parse_args()
# -- pass arguments to main
sys.exit(plotROC(args.filename))#, args.picklename)) | trackjets/plotROC.py | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandautils as pup
from sklearn.metrics import roc_curve
import cPickle
def plotROC(test_ntuple_path):#, picklename):
'''
Definition:
-----------
Plot a ROC curve comparison between the old mv2c10 contained in the branch and the newly evaluated one,
which is loaded in from a pickle file.
Both the .root file and the pickled mv2 array are assumed to be event-flat, not jet-flat.
Args:
-----
test_ntuple_path: string, the path to the root files used for evaluation
picklename: string, the path to the pickle containing the new output of your retrained mv2
'''
# -- import the root file into a df
print 'Opening files'
df = pup.root2panda(test_ntuple_path, 'bTag_AntiKt2PV0TrackJets', branches=['jet_mv2c10', 'jet_LabDr_HadF'])
# -- extract the old mv2c10 branch for comparison
oldMV2 = pup.flatten(df['jet_mv2c10'])
# -- extract the truth labels
truthflav = pup.flatten(df['jet_LabDr_HadF'])
# -- open the pickle produced by evaluate_and_store
print 'Importing pickle'
c00 = pup.flatten(cPickle.load(open('val_Alessandro_c00.pkl', 'rb')))
c07 = pup.flatten(cPickle.load(open('val_Alessandro_c07.pkl', 'rb')))
c15 = pup.flatten(cPickle.load(open('val_Alessandro_c15.pkl', 'rb')))
# -- this allows you to check performance on b VS light
# -- change it, if you want to look at a different performance
print 'Slicing'
bl_selection = (truthflav == 0) | (truthflav == 5)
print 'Plotting'
plot(bl_selection, 'bl', truthflav, oldMV2, c00, c07, c15)
print 'Slicing'
bc_selection = (truthflav == 4) | (truthflav == 5)
print 'Plotting'
plot(bc_selection, 'bc', truthflav, oldMV2, c00, c07, c15)
def plot(selection, ID, truthflav, oldMV2, c00, c07, c15):
# -- calculate the points that make up a roc curve
old_fpr, old_eff, _ = roc_curve(truthflav[selection], oldMV2[selection], pos_label=5)
c00_fpr, c00_eff, _ = roc_curve(truthflav[selection], c00[selection], pos_label=5)
c07_fpr, c07_eff, _ = roc_curve(truthflav[selection], c07[selection], pos_label=5)
c15_fpr, c15_eff, _ = roc_curve(truthflav[selection], c15[selection], pos_label=5)
# -- PLOTTING!
# -- settings
matplotlib.rcParams.update({'font.size': 18})
fig = plt.figure(figsize=(11.69, 8.27), dpi=100)
# -- add as many curves as you want here
# -- note: to plot rejection, take 1/false_positive_rate
plt.plot(old_eff, 1/old_fpr, label='mv2c10 branch', color='black')
plt.plot(c00_eff, 1/c00_fpr, label='new c00')
plt.plot(c07_eff, 1/c07_fpr, label='new c07')
plt.plot(c15_eff, 1/c15_fpr, label='new c15')
# -- more settings
plt.xlim(xmin=0.6)
plt.yscale('log')
plt.xlabel(r'$\varepsilon_b$')
if ID == 'bl':
plt.ylabel((r'$1/\varepsilon_u$'))
plt.ylim(ymax=1000)
elif ID == 'bc':
plt.ylabel((r'$1/\varepsilon_c$'))
plt.ylim(ymax=20)
plt.grid(which='both')
plt.legend() # display legend on plot
plt.show() # open window to show plot
fig.savefig('ROC'+ID+'.pdf') # save plot as a pdf
# -----------------------------------------------------------------
if __name__ == '__main__':
import sys
import argparse
# -- read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="input .root file name")
#parser.add_argument("picklename", help="path to the .pkl file with the evaluation results")
args = parser.parse_args()
# -- pass arguments to main
sys.exit(plotROC(args.filename))#, args.picklename)) | 0.439627 | 0.457621 |
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Rectangle
FRAME_DELTA = 500 # milliseconds
animationYear = 1999
fig, ax, = plt.subplots()
animationTitle = ax.text(0.5, 0.85, "", transform=ax.transAxes, ha="center", fontsize=20)
index = 0
N = 15
ind = np.arange(N)
width = 0.75
animate01 = Rectangle((-.35, 0), width, 0)
animate02 = Rectangle((.65, 0), width, 0)
animate03 = Rectangle((1.65, 0), width, 0)
animate04 = Rectangle((2.65, 0), width, 0)
animate05 = Rectangle((3.65, 0), width, 0)
animate06 = Rectangle((4.65, 0), width, 0)
animate07 = Rectangle((5.65, 0), width, 0)
animate08 = Rectangle((6.65, 0), width, 0)
animate09 = Rectangle((7.65, 0), width, 0)
animate10 = Rectangle((8.65, 0), width, 0)
animate11 = Rectangle((9.65, 0), width, 0)
animate12 = Rectangle((10.65, 0), width, 0)
animate13 = Rectangle((11.65, 0), width, 0)
animate14 = Rectangle((12.65, 0), width, 0)
animate15 = Rectangle((13.65, 0), width, 0)
years = np.zeros(19)
foodName = []
data99 = np.zeros(15)
data00 = np.zeros(15)
data01 = np.zeros(15)
data02 = np.zeros(15)
data03 = np.zeros(15)
data04 = np.zeros(15)
data05 = np.zeros(15)
data06 = np.zeros(15)
data07 = np.zeros(15)
data08 = np.zeros(15)
data09 = np.zeros(15)
data10 = np.zeros(15)
data11 = np.zeros(15)
data12 = np.zeros(15)
data13 = np.zeros(15)
data14 = np.zeros(15)
data15 = np.zeros(15)
data16 = np.zeros(15)
data17 = np.zeros(15)
with open("food_imports.csv", 'r') as fil:
data = csv.DictReader(fil, delimiter=',')
for row in data:
foodName.append(row['Food Type'])
del(row['Food Type'])
for year, dollarAmount in row.items():
temp = dollarAmount.replace(',', '')
if len(temp) != 0:
if year == '1999':
data99[index] = temp
years[0] = year
elif year == '2000':
data00[index] = temp
years[1] = year
elif year == '2001':
data01[index] = temp
years[2] = year
elif year == '2002':
data02[index] = temp
years[3] = year
elif year == '2003':
data03[index] = temp
years[4] = year
elif year == '2004':
data04[index] = temp
years[5] = year
elif year == '2005':
data05[index] = temp
years[6] = year
elif year == '2006':
data06[index] = temp
years[7] = year
elif year == '2007':
data07[index] = temp
years[8] = year
elif year == '2008':
data08[index] = temp
years[9] = year
elif year == '2009':
data09[index] = temp
years[10] = year
elif year == '2010':
data10[index] = temp
years[11] = year
elif year == '2011':
data11[index] = temp
years[12] = year
elif year == '2012':
data12[index] = temp
years[13] = year
elif year == '2013':
data13[index] = temp
years[14] = year
elif year == '2014':
data14[index] = temp
years[15] = year
elif year == '2015':
data15[index] = temp
years[16] = year
elif year == '2016':
data16[index] = temp
years[17] = year
elif year == '2017':
data17[index] = temp
years[18] = year
index = index + 1
dataSet = [data99, data00, data01, data02, data03, data04, data05, data06, data07, data08, data09, data10, data11,
data12, data13, data14, data15, data16, data17]
def init(): # init function for the animation
ax.set_xlim(-1, 15)
ax.set_ylim(0.0, 25000)
animate01.set_height(0)
animate02.set_height(0)
animate03.set_height(0)
animate04.set_height(0)
animate05.set_height(0)
animate06.set_height(0)
animate07.set_height(0)
animate08.set_height(0)
animate09.set_height(0)
animate10.set_height(0)
animate11.set_height(0)
animate12.set_height(0)
animate13.set_height(0)
animate14.set_height(0)
animate15.set_height(0)
ax.add_patch(animate01)
ax.add_patch(animate02)
ax.add_patch(animate03)
ax.add_patch(animate04)
ax.add_patch(animate05)
ax.add_patch(animate06)
ax.add_patch(animate07)
ax.add_patch(animate08)
ax.add_patch(animate09)
ax.add_patch(animate10)
ax.add_patch(animate11)
ax.add_patch(animate12)
ax.add_patch(animate13)
ax.add_patch(animate14)
ax.add_patch(animate15)
return animationTitle, animate01, animate02, animate03, animate04, animate05, animate06, animate07, animate08, \
animate09, animate10, animate11, animate12, animate13, animate14, animate15
def update(price):
global animationYear
animationTitle.set_text('Food Imports for {}'.format(animationYear))
animate01.set_height(price[0])
animate02.set_height(price[1])
animate03.set_height(price[2])
animate04.set_height(price[3])
animate05.set_height(price[4])
animate06.set_height(price[5])
animate07.set_height(price[6])
animate08.set_height(price[7])
animate09.set_height(price[8])
animate10.set_height(price[9])
animate11.set_height(price[10])
animate12.set_height(price[11])
animate13.set_height(price[12])
animate14.set_height(price[13])
animate15.set_height(price[14])
animationYear += 1
if animationYear > 2017:
animationYear = 1999
return animationTitle, animate01, animate02, animate03, animate04, animate05, animate06, animate07, animate08, \
animate09, animate10, animate11, animate12, animate13, animate14, animate15
ax.set_ylabel('Price ($) in Millions')
ax.set_xticks(ind)
ax.set_xticklabels(('Live\nmeat\nanimals', 'Meats', 'Fish\nand\nshellfish', 'Dairy', 'Vegies', 'Fruits', 'Nuts',
'Coffee,\ntea, and\nspices', 'Grains', 'Veg.\noils', 'Sugar\nand\ncandy',
'Cocoa\nand\nchoc.', 'Other\nedible\nprod.', 'Bev.', ' Liquors'))
ani = animation.FuncAnimation(fig, update, frames=dataSet, init_func=init, interval=FRAME_DELTA, blit=True)
plt.show() | Food Imports/foodImportsAnimation.py | import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Rectangle
FRAME_DELTA = 500 # milliseconds
animationYear = 1999
fig, ax, = plt.subplots()
animationTitle = ax.text(0.5, 0.85, "", transform=ax.transAxes, ha="center", fontsize=20)
index = 0
N = 15
ind = np.arange(N)
width = 0.75
animate01 = Rectangle((-.35, 0), width, 0)
animate02 = Rectangle((.65, 0), width, 0)
animate03 = Rectangle((1.65, 0), width, 0)
animate04 = Rectangle((2.65, 0), width, 0)
animate05 = Rectangle((3.65, 0), width, 0)
animate06 = Rectangle((4.65, 0), width, 0)
animate07 = Rectangle((5.65, 0), width, 0)
animate08 = Rectangle((6.65, 0), width, 0)
animate09 = Rectangle((7.65, 0), width, 0)
animate10 = Rectangle((8.65, 0), width, 0)
animate11 = Rectangle((9.65, 0), width, 0)
animate12 = Rectangle((10.65, 0), width, 0)
animate13 = Rectangle((11.65, 0), width, 0)
animate14 = Rectangle((12.65, 0), width, 0)
animate15 = Rectangle((13.65, 0), width, 0)
years = np.zeros(19)
foodName = []
data99 = np.zeros(15)
data00 = np.zeros(15)
data01 = np.zeros(15)
data02 = np.zeros(15)
data03 = np.zeros(15)
data04 = np.zeros(15)
data05 = np.zeros(15)
data06 = np.zeros(15)
data07 = np.zeros(15)
data08 = np.zeros(15)
data09 = np.zeros(15)
data10 = np.zeros(15)
data11 = np.zeros(15)
data12 = np.zeros(15)
data13 = np.zeros(15)
data14 = np.zeros(15)
data15 = np.zeros(15)
data16 = np.zeros(15)
data17 = np.zeros(15)
with open("food_imports.csv", 'r') as fil:
data = csv.DictReader(fil, delimiter=',')
for row in data:
foodName.append(row['Food Type'])
del(row['Food Type'])
for year, dollarAmount in row.items():
temp = dollarAmount.replace(',', '')
if len(temp) != 0:
if year == '1999':
data99[index] = temp
years[0] = year
elif year == '2000':
data00[index] = temp
years[1] = year
elif year == '2001':
data01[index] = temp
years[2] = year
elif year == '2002':
data02[index] = temp
years[3] = year
elif year == '2003':
data03[index] = temp
years[4] = year
elif year == '2004':
data04[index] = temp
years[5] = year
elif year == '2005':
data05[index] = temp
years[6] = year
elif year == '2006':
data06[index] = temp
years[7] = year
elif year == '2007':
data07[index] = temp
years[8] = year
elif year == '2008':
data08[index] = temp
years[9] = year
elif year == '2009':
data09[index] = temp
years[10] = year
elif year == '2010':
data10[index] = temp
years[11] = year
elif year == '2011':
data11[index] = temp
years[12] = year
elif year == '2012':
data12[index] = temp
years[13] = year
elif year == '2013':
data13[index] = temp
years[14] = year
elif year == '2014':
data14[index] = temp
years[15] = year
elif year == '2015':
data15[index] = temp
years[16] = year
elif year == '2016':
data16[index] = temp
years[17] = year
elif year == '2017':
data17[index] = temp
years[18] = year
index = index + 1
dataSet = [data99, data00, data01, data02, data03, data04, data05, data06, data07, data08, data09, data10, data11,
data12, data13, data14, data15, data16, data17]
def init(): # init function for the animation
ax.set_xlim(-1, 15)
ax.set_ylim(0.0, 25000)
animate01.set_height(0)
animate02.set_height(0)
animate03.set_height(0)
animate04.set_height(0)
animate05.set_height(0)
animate06.set_height(0)
animate07.set_height(0)
animate08.set_height(0)
animate09.set_height(0)
animate10.set_height(0)
animate11.set_height(0)
animate12.set_height(0)
animate13.set_height(0)
animate14.set_height(0)
animate15.set_height(0)
ax.add_patch(animate01)
ax.add_patch(animate02)
ax.add_patch(animate03)
ax.add_patch(animate04)
ax.add_patch(animate05)
ax.add_patch(animate06)
ax.add_patch(animate07)
ax.add_patch(animate08)
ax.add_patch(animate09)
ax.add_patch(animate10)
ax.add_patch(animate11)
ax.add_patch(animate12)
ax.add_patch(animate13)
ax.add_patch(animate14)
ax.add_patch(animate15)
return animationTitle, animate01, animate02, animate03, animate04, animate05, animate06, animate07, animate08, \
animate09, animate10, animate11, animate12, animate13, animate14, animate15
def update(price):
global animationYear
animationTitle.set_text('Food Imports for {}'.format(animationYear))
animate01.set_height(price[0])
animate02.set_height(price[1])
animate03.set_height(price[2])
animate04.set_height(price[3])
animate05.set_height(price[4])
animate06.set_height(price[5])
animate07.set_height(price[6])
animate08.set_height(price[7])
animate09.set_height(price[8])
animate10.set_height(price[9])
animate11.set_height(price[10])
animate12.set_height(price[11])
animate13.set_height(price[12])
animate14.set_height(price[13])
animate15.set_height(price[14])
animationYear += 1
if animationYear > 2017:
animationYear = 1999
return animationTitle, animate01, animate02, animate03, animate04, animate05, animate06, animate07, animate08, \
animate09, animate10, animate11, animate12, animate13, animate14, animate15
ax.set_ylabel('Price ($) in Millions')
ax.set_xticks(ind)
ax.set_xticklabels(('Live\nmeat\nanimals', 'Meats', 'Fish\nand\nshellfish', 'Dairy', 'Vegies', 'Fruits', 'Nuts',
'Coffee,\ntea, and\nspices', 'Grains', 'Veg.\noils', 'Sugar\nand\ncandy',
'Cocoa\nand\nchoc.', 'Other\nedible\nprod.', 'Bev.', ' Liquors'))
ani = animation.FuncAnimation(fig, update, frames=dataSet, init_func=init, interval=FRAME_DELTA, blit=True)
plt.show() | 0.347759 | 0.530723 |
from .Dark_Neuron_CNN import Dark_Neuron
import tensorflow as tf # Powerful Framework for Deep Learning
import os # For Searching Folder within the system
from .models import Create_Model, Train_Model # Script containing Different Models
from .Preprocessing_Image import Preprocess_Image #Preprocessing Image Script
from .Prediction import Prediction
import matplotlib.pyplot as plt
class Classify_Images(Dark_Neuron):
"""
This Class will have Following Properties:
Attributes:
--Working Directory
-- Output Directory
-- train --> Whether to train to predict
Methods:
--Preprocess_the_Images
--Create_the_Model
--Train_the_Model
--Predict_from_the_Model
--Visualize_the_Metric
"""
def __init__(self,working_directory,output_directory):
"""
In this function we will call Parent Function containing other Function
and Define other variables.
Arguments:
---------
working_directory --> Directory Containing Raw Data
output_directory --> Output Sirectory to which Results will be posted
Output:
------
None
"""
Dark_Neuron.__init__(self,working_directory,output_directory)
self.working_directory = working_directory
self.output_directory = output_directory
def load_model(self,user_model_name):
user_model_path = os.path.join(self.working_directory,user_model_name)
return tf.keras.models.load_model(user_model_path)
"""
Defining Preprocess Function to Preprocess the Images with Different Flow Method
"""
def Preprocess_the_Image(self,method,train,num_classes=2,batch_size=32,target_image_size=(224,224,3),model_name = None,user_model = None,image_path=None,grayscale=None,training_image_directory=None,validation_image_directory=None,dataframe=None,
test_image_directory=None,x_train=None,x_test=None,y_train=None,y_test=None,x_col=None,y_col = None,split=0.1,image_directory=None,input_tensor=None):
"""
This function Will do image processing and return training Data Generator, Validation Data Generator
and Test Data Generator on the Basis of Training Argument whether it is True Or False.
Arguments:
model_name --> Name for The Predefined Architecture
num_classes --> Number of Classes
batch_size --> Batch Size
method --> Method by which Images will flow in the Function --> directory,dataframe,point
training_image_directory --> Directory for method Directory
validation_image_directory --> (Optional) For method Directory
Outputs:
It will Return the Data Generator for Train and Test
"""
self.train = train
self.target_image_size = target_image_size
self.model_name = model_name
self.num_classes = num_classes
self.batch_size = batch_size
self.method = method
self.training_directory = training_image_directory
self.validation_directory = validation_image_directory
self.test_directory = test_image_directory
self.dataframe = dataframe
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.x_col_name = x_col
self.y_col_name = y_col
self.split = split
self.image_directory = image_directory
self.input_tensor = input_tensor
self.image_path = image_path
if user_model is not None:
self.user_model = user_model
else:
self.user_model = None
#Defining Variables for Preprocessing
preprocessing = Preprocess_Image(model_name=self.model_name,user_model=self.user_model,target_image_size = self.target_image_size,num_classes = self.num_classes,batch_size=self.batch_size,training=self.train,method=self.method,working_directory = self.working_directory)
#Getting results based on Different flow methods
if self.method == 'directory':
print('\n\t\t-----Getting Images From Directory------\n')
if self.train:# From Preprocessing_Image.py File
train_data,validation_data = preprocessing.Get_Images_from_Directory(self.training_directory,self.validation_directory,self.test_directory)
print('\n\t\t-------Training Data Generated--------\n')
return train_data,validation_data,(train_data.class_indices)
else:
test_data = preprocessing.Get_Images_from_Directory(self.training_directory,self.validation_directory,
self.test_directory)
print('\n\t\t-------Test Data Generated--------\n')
return test_data
elif self.method=='dataframe':
print('\n\t\t-----Getting Images From DataFrame------\n')
if self.train:
train_data,validation_data = preprocessing.Get_Images_from_DataFrame(self.dataframe,self.x_col_name,self.split,self.y_col_name,self.image_directory)
print('\n\t\t-----Training Data Generated------\n')
return train_data,validation_data,(train_data.class_indices)
else:
test_data = preprocessing.Get_Images_from_DataFrame(self.dataframe,self.x_col_name,self.split,self.y_col_name,self.image_directory)
print('\n\t\t-----Test Data Generated------\n')
return test_data
elif self.method=='point':
print('\n\t\t-----Getting Images From Points------\n')
if self.train:
train_data,validation_data = preprocessing.Get_Data(self.x_train,self.y_train,self.x_test,self.y_test)
print('\n\t\t------Training Data Generated-------\n')
return train_data,validation_data
else:
test_data = preprocessing.Get_Data(self.x_train,self.y_train,self.x_test,self.y_test)
print('\n\t\t---------Test Data Generated--------\n')
return test_data
elif self.method == 'image':
if image_path is None:
raise ValueError('Provide Image Path for Image Prediction or If it is containing in a directory having multiple ',
'images, then set method = "directory"')
print('\n\t\t----------Getting Image -------------\n')
image = preprocessing.Get_Image(image_path = image_path,model_name = self.model_name,user_model=user_model,
grayscale=grayscale)
return image
else:
raise ValueError('Invalid Method Input --Must be from "directory","dataframe","point","image"')
def Create_the_Model(self):
"""
This Function will be used for Initialisation of Model according to Model name Given
Arguments:
None
Returns:
It will return the model for Training the model
"""
print('\n\t\t--------------Model Creation Phase-----------\n')
model_init = Create_Model(working_directory=self.working_directory,image_shape = self.target_image_size,train = self.train,input_tensor=self.input_tensor)
# Defining Model based on Model name:
if self.model_name in ['mobilenetv2','MobileNetV2','mobilenet_v2','MobileNet_V2']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------MobileNetV2 Model Initiated Successfully----------\n')
return model_init.MobileNetV2()
if self.model_name in ['inceptionv3','InceptionV3','inception_v3','Inception_V3']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <75 or self.target_image_size[1]<75:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------InceptiontV3 Model Initiated Successfully----------\n')
return model_init.InceptionV3()
if self.model_name in ['resnet50','ResNet50','Resnet50']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------Resnet50 Model Initiated Successfully----------\n')
return model_init.ResNet50()
if self.model_name in ['Xception','xception']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <71 or self.target_image_size[1]<71:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------Xception Model Initiated Successfully----------\n')
return model_init.Xception()
if self.model_name in ['VGG16','Vgg16','vgg16']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------VGG16 Model Initiated Successfully----------\n')
return model_init.VGG16()
if self.model_name in ['VGG19','Vgg19','vgg19']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------VGG19 Model Initiated Successfully----------\n')
return model_init.VGG19()
def Train_the_Model(self,model,rebuild=False,train_data_object=None,validation_data_object=None,test_data_object=None,epochs = 10,optimizer='adam',loss = 'binary_crossentropy',fine_tuning = False,layers = 20,metrics='accuracy',validation_steps=80,save_model = True,steps_per_epoch = 50,callbacks=None):
"""
This function will call up the Initialised Model
"""
print('\n\t\t------------Model Training To be Start---------------')
history,model = Train_Model(model=model,rebuild=rebuild,num_classes = self.num_classes,train_data_object=train_data_object,
working_directory = self.working_directory,output_directory = self.output_directory,loss = loss,epochs=epochs,
optimizer = optimizer,metrics = metrics,validation_data_object = validation_data_object,fine_tuning = fine_tuning,
layers = layers,validation_steps=validation_steps,save_model=save_model,steps_per_epoch = steps_per_epoch,callbacks=callbacks)
self.model_history = history
return model
def Visualize_the_Metrics(self):
import matplotlib.pyplot as plt
# Plot for Training Loss and Training Accuracy
plt.plot(self.model_history.history['loss'],label='Training Loss')
plt.plot(self.model_history.history['acc'],label = 'Training Accuracy')
plt.title('Training Loss vs Training Accuracy')
plt.legend()
plt.show()
#excetuted when validation set will be there
try:
# Training Loss vs Vaidation Loss
plt.plot(self.model_history.history['val_loss'],label='Test Loss')
plt.plot(self.model_history.history['loss'],label = 'Training Loss')
plt.title('Training Loss vs Validation Loss')
plt.legend()
plt.show()
plt.plot(self.model_history.history['acc'],label='Training Accuracy')
plt.plot(self.model_history.history['val_acc'],label = 'Validation Accuracy')
plt.title('Training Accuracy vs Validation Accuracy')
plt.legend()
plt.show()
plt.plot(self.model_history.history['val_loss'],label='Validation_Loss')
plt.plot(self.model_history.history['val_acc'],label = 'Validation_Accuracy')
plt.title('Validation Loss vs Validation Accuracy')
plt.legend()
plt.show()
except:
pass
def Predict_from_the_Model(self,labels=None,generator=None,img = None,top = 5,model=None):
"""
This Function will be used to predict the classes from Model
Arguments:
preprocessed_image --> preprocessed_image suitable for model
model --> model get from trained part
top --> Number of High Probabilities
Return:
Classes
"""
self.generator = generator
self.img = img
prediction = Prediction(working_directory = self.working_directory,labels = labels,method = self.method,
model_name = self.model_name,user_model=self.user_model,
img = img,top=top,image_directory=self.image_path)
if self.user_model is not None:
model = self.user_model
else:
if model is None:
raise ValueError('Provide Model, model argument should not be empty')
model = model
predicted_indices,predictions = prediction.prediction(method=self.method,model=model,img=img,data_generator=generator)
label_score = prediction.label_class_provider(label=labels,predictions=predictions,predicted_indices=predicted_indices,
generator=generator,img=img)
print('\n\t\t--------------Generating Predictions with Score----------------')
self.label_score = label_score
if len(label_score) == 0:
print('\n\t\t----------No Predictions-----------')
return label_score
else:
print(f'\n\t\t------------Found {len(label_score)} Predicitons-------' )
return label_score
def Visualize_the_Predictions(self,number=6):
if number > len(self.label_score):
number = len(self.label_score)
if number ==0:
print('No predictions to Show')
else:
if self.generator is not None:
for label_score in self.label_score[:number]:
filepath = os.path.join(self.test_directory,label_score[0])
img = plt.imread(filepath)
plt.imshow(img)
plt.title(f'Predicted:{label_score[1].title()} ---- Score: {label_score[2]*100}')
plt.show()
elif self.img is not None:
for label_score in self.label_score[:number]:
filepath = label_score[0]
img = plt.imread(filepath)
plt.imshow(img)
plt.title(f'Predicted:{label_score[1].title()} ---- Score: {label_score[2]*100}')
plt.show()
def Model_Target_Value_Checker():
raise ValueError('Try with Different Model.Get '
'information on Keras Documentation\n'
'The Lowest Dimensions allowed for Different Model are : \n'
'Try to change in Preprocess Images Process \n'
'MobileNetV2 --> (32,32) \n'
'InceptionV3 --> (75,75)\n'
'Resnet50 --> (32,32) \n'
'Xception --> (71,71) \n'
'VGG16 --> (32,32) \n'
'VGG19 --> (32,32) \n'
) | build/lib/DarkNeurons/Classification.py | from .Dark_Neuron_CNN import Dark_Neuron
import tensorflow as tf # Powerful Framework for Deep Learning
import os # For Searching Folder within the system
from .models import Create_Model, Train_Model # Script containing Different Models
from .Preprocessing_Image import Preprocess_Image #Preprocessing Image Script
from .Prediction import Prediction
import matplotlib.pyplot as plt
class Classify_Images(Dark_Neuron):
"""
This Class will have Following Properties:
Attributes:
--Working Directory
-- Output Directory
-- train --> Whether to train to predict
Methods:
--Preprocess_the_Images
--Create_the_Model
--Train_the_Model
--Predict_from_the_Model
--Visualize_the_Metric
"""
def __init__(self,working_directory,output_directory):
"""
In this function we will call Parent Function containing other Function
and Define other variables.
Arguments:
---------
working_directory --> Directory Containing Raw Data
output_directory --> Output Sirectory to which Results will be posted
Output:
------
None
"""
Dark_Neuron.__init__(self,working_directory,output_directory)
self.working_directory = working_directory
self.output_directory = output_directory
def load_model(self,user_model_name):
user_model_path = os.path.join(self.working_directory,user_model_name)
return tf.keras.models.load_model(user_model_path)
"""
Defining Preprocess Function to Preprocess the Images with Different Flow Method
"""
def Preprocess_the_Image(self,method,train,num_classes=2,batch_size=32,target_image_size=(224,224,3),model_name = None,user_model = None,image_path=None,grayscale=None,training_image_directory=None,validation_image_directory=None,dataframe=None,
test_image_directory=None,x_train=None,x_test=None,y_train=None,y_test=None,x_col=None,y_col = None,split=0.1,image_directory=None,input_tensor=None):
"""
This function Will do image processing and return training Data Generator, Validation Data Generator
and Test Data Generator on the Basis of Training Argument whether it is True Or False.
Arguments:
model_name --> Name for The Predefined Architecture
num_classes --> Number of Classes
batch_size --> Batch Size
method --> Method by which Images will flow in the Function --> directory,dataframe,point
training_image_directory --> Directory for method Directory
validation_image_directory --> (Optional) For method Directory
Outputs:
It will Return the Data Generator for Train and Test
"""
self.train = train
self.target_image_size = target_image_size
self.model_name = model_name
self.num_classes = num_classes
self.batch_size = batch_size
self.method = method
self.training_directory = training_image_directory
self.validation_directory = validation_image_directory
self.test_directory = test_image_directory
self.dataframe = dataframe
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.x_col_name = x_col
self.y_col_name = y_col
self.split = split
self.image_directory = image_directory
self.input_tensor = input_tensor
self.image_path = image_path
if user_model is not None:
self.user_model = user_model
else:
self.user_model = None
#Defining Variables for Preprocessing
preprocessing = Preprocess_Image(model_name=self.model_name,user_model=self.user_model,target_image_size = self.target_image_size,num_classes = self.num_classes,batch_size=self.batch_size,training=self.train,method=self.method,working_directory = self.working_directory)
#Getting results based on Different flow methods
if self.method == 'directory':
print('\n\t\t-----Getting Images From Directory------\n')
if self.train:# From Preprocessing_Image.py File
train_data,validation_data = preprocessing.Get_Images_from_Directory(self.training_directory,self.validation_directory,self.test_directory)
print('\n\t\t-------Training Data Generated--------\n')
return train_data,validation_data,(train_data.class_indices)
else:
test_data = preprocessing.Get_Images_from_Directory(self.training_directory,self.validation_directory,
self.test_directory)
print('\n\t\t-------Test Data Generated--------\n')
return test_data
elif self.method=='dataframe':
print('\n\t\t-----Getting Images From DataFrame------\n')
if self.train:
train_data,validation_data = preprocessing.Get_Images_from_DataFrame(self.dataframe,self.x_col_name,self.split,self.y_col_name,self.image_directory)
print('\n\t\t-----Training Data Generated------\n')
return train_data,validation_data,(train_data.class_indices)
else:
test_data = preprocessing.Get_Images_from_DataFrame(self.dataframe,self.x_col_name,self.split,self.y_col_name,self.image_directory)
print('\n\t\t-----Test Data Generated------\n')
return test_data
elif self.method=='point':
print('\n\t\t-----Getting Images From Points------\n')
if self.train:
train_data,validation_data = preprocessing.Get_Data(self.x_train,self.y_train,self.x_test,self.y_test)
print('\n\t\t------Training Data Generated-------\n')
return train_data,validation_data
else:
test_data = preprocessing.Get_Data(self.x_train,self.y_train,self.x_test,self.y_test)
print('\n\t\t---------Test Data Generated--------\n')
return test_data
elif self.method == 'image':
if image_path is None:
raise ValueError('Provide Image Path for Image Prediction or If it is containing in a directory having multiple ',
'images, then set method = "directory"')
print('\n\t\t----------Getting Image -------------\n')
image = preprocessing.Get_Image(image_path = image_path,model_name = self.model_name,user_model=user_model,
grayscale=grayscale)
return image
else:
raise ValueError('Invalid Method Input --Must be from "directory","dataframe","point","image"')
def Create_the_Model(self):
"""
This Function will be used for Initialisation of Model according to Model name Given
Arguments:
None
Returns:
It will return the model for Training the model
"""
print('\n\t\t--------------Model Creation Phase-----------\n')
model_init = Create_Model(working_directory=self.working_directory,image_shape = self.target_image_size,train = self.train,input_tensor=self.input_tensor)
# Defining Model based on Model name:
if self.model_name in ['mobilenetv2','MobileNetV2','mobilenet_v2','MobileNet_V2']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------MobileNetV2 Model Initiated Successfully----------\n')
return model_init.MobileNetV2()
if self.model_name in ['inceptionv3','InceptionV3','inception_v3','Inception_V3']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <75 or self.target_image_size[1]<75:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------InceptiontV3 Model Initiated Successfully----------\n')
return model_init.InceptionV3()
if self.model_name in ['resnet50','ResNet50','Resnet50']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------Resnet50 Model Initiated Successfully----------\n')
return model_init.ResNet50()
if self.model_name in ['Xception','xception']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <71 or self.target_image_size[1]<71:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------Xception Model Initiated Successfully----------\n')
return model_init.Xception()
if self.model_name in ['VGG16','Vgg16','vgg16']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------VGG16 Model Initiated Successfully----------\n')
return model_init.VGG16()
if self.model_name in ['VGG19','Vgg19','vgg19']:
# Checking whether Target Image size is within bounds for Predefined Architecture
if self.target_image_size[0] <32 or self.target_image_size[1]<32:
Model_Target_Value_Checker() #Check the Function Below which Raise the Value Error
print('\n\t\t-------VGG19 Model Initiated Successfully----------\n')
return model_init.VGG19()
def Train_the_Model(self,model,rebuild=False,train_data_object=None,validation_data_object=None,test_data_object=None,epochs = 10,optimizer='adam',loss = 'binary_crossentropy',fine_tuning = False,layers = 20,metrics='accuracy',validation_steps=80,save_model = True,steps_per_epoch = 50,callbacks=None):
"""
This function will call up the Initialised Model
"""
print('\n\t\t------------Model Training To be Start---------------')
history,model = Train_Model(model=model,rebuild=rebuild,num_classes = self.num_classes,train_data_object=train_data_object,
working_directory = self.working_directory,output_directory = self.output_directory,loss = loss,epochs=epochs,
optimizer = optimizer,metrics = metrics,validation_data_object = validation_data_object,fine_tuning = fine_tuning,
layers = layers,validation_steps=validation_steps,save_model=save_model,steps_per_epoch = steps_per_epoch,callbacks=callbacks)
self.model_history = history
return model
def Visualize_the_Metrics(self):
import matplotlib.pyplot as plt
# Plot for Training Loss and Training Accuracy
plt.plot(self.model_history.history['loss'],label='Training Loss')
plt.plot(self.model_history.history['acc'],label = 'Training Accuracy')
plt.title('Training Loss vs Training Accuracy')
plt.legend()
plt.show()
#excetuted when validation set will be there
try:
# Training Loss vs Vaidation Loss
plt.plot(self.model_history.history['val_loss'],label='Test Loss')
plt.plot(self.model_history.history['loss'],label = 'Training Loss')
plt.title('Training Loss vs Validation Loss')
plt.legend()
plt.show()
plt.plot(self.model_history.history['acc'],label='Training Accuracy')
plt.plot(self.model_history.history['val_acc'],label = 'Validation Accuracy')
plt.title('Training Accuracy vs Validation Accuracy')
plt.legend()
plt.show()
plt.plot(self.model_history.history['val_loss'],label='Validation_Loss')
plt.plot(self.model_history.history['val_acc'],label = 'Validation_Accuracy')
plt.title('Validation Loss vs Validation Accuracy')
plt.legend()
plt.show()
except:
pass
def Predict_from_the_Model(self,labels=None,generator=None,img = None,top = 5,model=None):
"""
This Function will be used to predict the classes from Model
Arguments:
preprocessed_image --> preprocessed_image suitable for model
model --> model get from trained part
top --> Number of High Probabilities
Return:
Classes
"""
self.generator = generator
self.img = img
prediction = Prediction(working_directory = self.working_directory,labels = labels,method = self.method,
model_name = self.model_name,user_model=self.user_model,
img = img,top=top,image_directory=self.image_path)
if self.user_model is not None:
model = self.user_model
else:
if model is None:
raise ValueError('Provide Model, model argument should not be empty')
model = model
predicted_indices,predictions = prediction.prediction(method=self.method,model=model,img=img,data_generator=generator)
label_score = prediction.label_class_provider(label=labels,predictions=predictions,predicted_indices=predicted_indices,
generator=generator,img=img)
print('\n\t\t--------------Generating Predictions with Score----------------')
self.label_score = label_score
if len(label_score) == 0:
print('\n\t\t----------No Predictions-----------')
return label_score
else:
print(f'\n\t\t------------Found {len(label_score)} Predicitons-------' )
return label_score
def Visualize_the_Predictions(self,number=6):
if number > len(self.label_score):
number = len(self.label_score)
if number ==0:
print('No predictions to Show')
else:
if self.generator is not None:
for label_score in self.label_score[:number]:
filepath = os.path.join(self.test_directory,label_score[0])
img = plt.imread(filepath)
plt.imshow(img)
plt.title(f'Predicted:{label_score[1].title()} ---- Score: {label_score[2]*100}')
plt.show()
elif self.img is not None:
for label_score in self.label_score[:number]:
filepath = label_score[0]
img = plt.imread(filepath)
plt.imshow(img)
plt.title(f'Predicted:{label_score[1].title()} ---- Score: {label_score[2]*100}')
plt.show()
def Model_Target_Value_Checker():
raise ValueError('Try with Different Model.Get '
'information on Keras Documentation\n'
'The Lowest Dimensions allowed for Different Model are : \n'
'Try to change in Preprocess Images Process \n'
'MobileNetV2 --> (32,32) \n'
'InceptionV3 --> (75,75)\n'
'Resnet50 --> (32,32) \n'
'Xception --> (71,71) \n'
'VGG16 --> (32,32) \n'
'VGG19 --> (32,32) \n'
) | 0.636127 | 0.389605 |
from django.shortcuts import render
from django.views import View
import requests
ACCUWEATHER_API_KEY = "<KEY>"
ACCUWEATHER_CITY_URL = "http://dataservice.accuweather.com/locations/v1/cities/search?apikey={}&q={}&language=en-us"
ACCUWEATHER_WEATHER_URL = "http://dataservice.accuweather.com/currentconditions/v1/{}?apikey={}&language=en-us"
OPENWEATHERMAP_API_KEY = "<KEY>"
OPENWEATHERMAP_WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather?q={}&appid={}"
class ApiException(Exception):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return "Error {}".format(self.status_code)
class HomeView(View):
@staticmethod
def get(request):
template = "index.html"
context = {}
return render(request, template, context)
@staticmethod
def post(request):
city = request.POST.get("city", None)
try:
accuweather_data = get_accuweather(city)
openweathermap_data = get_openweathermap(city)
except ApiException as e:
template = "error.html"
context = {
"status_code": e.status_code,
"error_msg": "We probably couldn't find the city you requested. Sorry about that!"
}
return render(request, template, context)
weather_datapoints = [
accuweather_data,
openweathermap_data,
]
template = "weather.html"
context = {
"city": city,
"datapoints": weather_datapoints,
}
return render(request, template, context)
def get_accuweather(city):
city_url = ACCUWEATHER_CITY_URL.format(ACCUWEATHER_API_KEY, city)
city_rsp = requests.get(city_url)
city_data = city_rsp.json()[0]
city_key = city_data["Key"]
weather_url = ACCUWEATHER_WEATHER_URL.format(city_key, ACCUWEATHER_API_KEY)
weather_rsp = requests.get(weather_url)
if weather_rsp.status_code != 200:
raise ApiException(weather_rsp.status_code)
weather_data = weather_rsp.json()[0]
conditions = weather_data["WeatherText"]
temperature = weather_data["Temperature"]["Metric"]["Value"]
return {
"source": "AccuWeather",
"conditions": conditions,
"temperature": int(temperature),
}
def get_openweathermap(city):
kelvin_to_celsius = lambda k: k - 273.15
weather_url = OPENWEATHERMAP_WEATHER_URL.format(city, OPENWEATHERMAP_API_KEY)
weather_rsp = requests.get(weather_url)
if weather_rsp.status_code != 200:
raise ApiException(weather_rsp.status_code)
weather_data = weather_rsp.json()
conditions = weather_data["weather"][0]["main"]
temperature_k = weather_data["main"]["temp"]
temperature_c = kelvin_to_celsius(temperature_k)
return {
"source": "OpenWeatherMap",
"conditions": conditions,
"temperature": int(temperature_c),
} | weather/weather/views.py | from django.shortcuts import render
from django.views import View
import requests
ACCUWEATHER_API_KEY = "<KEY>"
ACCUWEATHER_CITY_URL = "http://dataservice.accuweather.com/locations/v1/cities/search?apikey={}&q={}&language=en-us"
ACCUWEATHER_WEATHER_URL = "http://dataservice.accuweather.com/currentconditions/v1/{}?apikey={}&language=en-us"
OPENWEATHERMAP_API_KEY = "<KEY>"
OPENWEATHERMAP_WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather?q={}&appid={}"
class ApiException(Exception):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return "Error {}".format(self.status_code)
class HomeView(View):
@staticmethod
def get(request):
template = "index.html"
context = {}
return render(request, template, context)
@staticmethod
def post(request):
city = request.POST.get("city", None)
try:
accuweather_data = get_accuweather(city)
openweathermap_data = get_openweathermap(city)
except ApiException as e:
template = "error.html"
context = {
"status_code": e.status_code,
"error_msg": "We probably couldn't find the city you requested. Sorry about that!"
}
return render(request, template, context)
weather_datapoints = [
accuweather_data,
openweathermap_data,
]
template = "weather.html"
context = {
"city": city,
"datapoints": weather_datapoints,
}
return render(request, template, context)
def get_accuweather(city):
city_url = ACCUWEATHER_CITY_URL.format(ACCUWEATHER_API_KEY, city)
city_rsp = requests.get(city_url)
city_data = city_rsp.json()[0]
city_key = city_data["Key"]
weather_url = ACCUWEATHER_WEATHER_URL.format(city_key, ACCUWEATHER_API_KEY)
weather_rsp = requests.get(weather_url)
if weather_rsp.status_code != 200:
raise ApiException(weather_rsp.status_code)
weather_data = weather_rsp.json()[0]
conditions = weather_data["WeatherText"]
temperature = weather_data["Temperature"]["Metric"]["Value"]
return {
"source": "AccuWeather",
"conditions": conditions,
"temperature": int(temperature),
}
def get_openweathermap(city):
kelvin_to_celsius = lambda k: k - 273.15
weather_url = OPENWEATHERMAP_WEATHER_URL.format(city, OPENWEATHERMAP_API_KEY)
weather_rsp = requests.get(weather_url)
if weather_rsp.status_code != 200:
raise ApiException(weather_rsp.status_code)
weather_data = weather_rsp.json()
conditions = weather_data["weather"][0]["main"]
temperature_k = weather_data["main"]["temp"]
temperature_c = kelvin_to_celsius(temperature_k)
return {
"source": "OpenWeatherMap",
"conditions": conditions,
"temperature": int(temperature_c),
} | 0.586523 | 0.087759 |
class Queue:
""" Queue is a collection of entities that are maintained in a sequence
and can be modified by the addition of entities at one end of the
sequence and removal from the other end of the sequence.
The order in which elements come off of a queue are
First In, First Out (FIFO).
https://en.wikipedia.org/wiki/Queue_(abstract_data_type)
"""
def __init__(self):
""" Initializing empty queue."""
self.items = []
def enqueue(self, item):
""" Takes in an item and inserts that item into the 0th index of the
list that is representing the Queue.
The runtime is O(n), or linear time, because inserting into the 0th
index of a list forces all the other items in the list to move one
index to the right.
:param item: item to be inserted in queue
"""
self.items.insert(0, item)
def dequeue(self):
""" Returns and removes the front-most item of the Queue, which is
represented by the last items in the list.
The runtime is O(1), or constant time, because indexing to the end of
a list happens in constant time.
:return: front-most item of the queue or None, if the queue is empty
"""
if not self.items:
raise IndexError('queue is empty')
return self.items.pop()
def peek(self):
""" Returns the last item in the list, which represents the front-most
item in the Queue.
The runtime is O(1), or constant time, because we are just indexing to
the last item of the list and returning the value found there.
:return: front-most item of the queue or None, if the queue is empty
"""
if not self.items:
raise IndexError('queue is empty')
return self.items[-1]
def size(self):
""" Returns the size of the Queue, which is represent bu the length
of the list
The runtime is O(1), or constant time, because we're only returning
the length.
:return: length of list
:rtype: int """
return len(self.items)
def is_empty(self):
""" Returns a boolean value expressing whether or not the list
representing the Queue is empty.
Runs in constant time, because it's only checking for equality.
:return: returns true if stack is empty, else false
"""
return self.items == [] | queue/library/queue.py | class Queue:
""" Queue is a collection of entities that are maintained in a sequence
and can be modified by the addition of entities at one end of the
sequence and removal from the other end of the sequence.
The order in which elements come off of a queue are
First In, First Out (FIFO).
https://en.wikipedia.org/wiki/Queue_(abstract_data_type)
"""
def __init__(self):
""" Initializing empty queue."""
self.items = []
def enqueue(self, item):
""" Takes in an item and inserts that item into the 0th index of the
list that is representing the Queue.
The runtime is O(n), or linear time, because inserting into the 0th
index of a list forces all the other items in the list to move one
index to the right.
:param item: item to be inserted in queue
"""
self.items.insert(0, item)
def dequeue(self):
""" Returns and removes the front-most item of the Queue, which is
represented by the last items in the list.
The runtime is O(1), or constant time, because indexing to the end of
a list happens in constant time.
:return: front-most item of the queue or None, if the queue is empty
"""
if not self.items:
raise IndexError('queue is empty')
return self.items.pop()
def peek(self):
""" Returns the last item in the list, which represents the front-most
item in the Queue.
The runtime is O(1), or constant time, because we are just indexing to
the last item of the list and returning the value found there.
:return: front-most item of the queue or None, if the queue is empty
"""
if not self.items:
raise IndexError('queue is empty')
return self.items[-1]
def size(self):
""" Returns the size of the Queue, which is represent bu the length
of the list
The runtime is O(1), or constant time, because we're only returning
the length.
:return: length of list
:rtype: int """
return len(self.items)
def is_empty(self):
""" Returns a boolean value expressing whether or not the list
representing the Queue is empty.
Runs in constant time, because it's only checking for equality.
:return: returns true if stack is empty, else false
"""
return self.items == [] | 0.898944 | 0.912942 |
import re
from datetime import datetime
from sqlalchemy import Column,Integer, String, DateTime, Sequence, Index, \
UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Lexicon(Base):
__tablename__ = 'lexicon_2_1'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
surface = Column(String(64), nullable=False, index=True)
pos = Column(String(64), nullable=False, index=True)
semantic_class = Column(String(16), default="*", nullable=False)
read = Column(String(64), nullable=False)
type_name = Column(String(16), name='type', default="*", nullable=False)
start_pos = Column(String(16), default="*", nullable=False)
end_pos = Column(String(16), default="*", nullable=False)
expression = Column(String(128), default="*", nullable=False)
class_name = Column(String(64), name='class', index=True)
is_available = Column(Integer, nullable=False, index=True)
is_inspected = Column(Integer, nullable=False, index=True)
last_modified = Column(DateTime,
default=datetime.now(),
nullable=False,
index=True)
comment = Column(String(256))
__table_args__ = (UniqueConstraint('surface',
'pos',
'semantic_class',
name='idx_surface_pos_semantic_class'),)
def __init__(self,
surface,
pos,
read,
semantic_class='*',
type_name='*',
start_pos='*',
end_pos='*',
expression='*',
class_name=None,
is_available=1,
is_inspected=0,
last_modified=datetime.now()):
self.surface = surface
self.pos = pos
self.semantic_class = semantic_class
self.read = read
self.type_name = type_name
self.start_pos = start_pos
self.end_pos = end_pos
self.expression = expression
self.class_name = class_name if class_name else '*'
self.is_available = is_available if is_available else 1
self.is_inspected = is_inspected if is_inspected else 0
def __repr__(self):
return '<' + ','.join((self.surface,
self.pos,
self.semantic_class,
self.read,
self.type_name,
self.start_pos,
self.end_pos,
self.expression,
self.class_name,
str(self.is_available),
str(self.is_inspected))) + '>' | utils/dictionary/lexicon.py | import re
from datetime import datetime
from sqlalchemy import Column,Integer, String, DateTime, Sequence, Index, \
UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Lexicon(Base):
__tablename__ = 'lexicon_2_1'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
surface = Column(String(64), nullable=False, index=True)
pos = Column(String(64), nullable=False, index=True)
semantic_class = Column(String(16), default="*", nullable=False)
read = Column(String(64), nullable=False)
type_name = Column(String(16), name='type', default="*", nullable=False)
start_pos = Column(String(16), default="*", nullable=False)
end_pos = Column(String(16), default="*", nullable=False)
expression = Column(String(128), default="*", nullable=False)
class_name = Column(String(64), name='class', index=True)
is_available = Column(Integer, nullable=False, index=True)
is_inspected = Column(Integer, nullable=False, index=True)
last_modified = Column(DateTime,
default=datetime.now(),
nullable=False,
index=True)
comment = Column(String(256))
__table_args__ = (UniqueConstraint('surface',
'pos',
'semantic_class',
name='idx_surface_pos_semantic_class'),)
def __init__(self,
surface,
pos,
read,
semantic_class='*',
type_name='*',
start_pos='*',
end_pos='*',
expression='*',
class_name=None,
is_available=1,
is_inspected=0,
last_modified=datetime.now()):
self.surface = surface
self.pos = pos
self.semantic_class = semantic_class
self.read = read
self.type_name = type_name
self.start_pos = start_pos
self.end_pos = end_pos
self.expression = expression
self.class_name = class_name if class_name else '*'
self.is_available = is_available if is_available else 1
self.is_inspected = is_inspected if is_inspected else 0
def __repr__(self):
return '<' + ','.join((self.surface,
self.pos,
self.semantic_class,
self.read,
self.type_name,
self.start_pos,
self.end_pos,
self.expression,
self.class_name,
str(self.is_available),
str(self.is_inspected))) + '>' | 0.452778 | 0.24159 |
import cv2
import numpy as np
from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, RealTimePlot
from src.DroneVision.DroneVision_src.hardware.PyQtImage import PyQtImage
from CameraCalibration import CameraCalibration
from src.bin.SaveParameters import SaveParameters
'''
@brief Class for calibrating the stereo vision system.
@param me_master (True if this instance is master, False for slave)
@param settings_inst (CALIB settings)
@param reset (True/False)
@param plot_figure (optional plot figure (default=None))
@param use_PyQt (default=True)
'''
class StereoCalibration():
def __init__(self, me_master, settings_inst, reset, plot_figure=None, use_PyQt=True):
'''CONSTRUCTOR'''
self.__saveParameters = SaveParameters(settings_inst.GetSettings('calib_save_folder'), settings_inst.GetSettings('calib_save_fname_stereo'), settings_inst.GetSettings('save_calib_param_to_json'))
self.__leftCameraCalibration = CameraCalibration(settings_inst, settings_inst.GetSettings('calib_img_folder_left_cam'), settings_inst.GetSettings('calib_save_fname_left_cam'), reset, plot_figure=plot_figure)
self.__rightCameraCalibration = CameraCalibration(settings_inst, settings_inst.GetSettings('calib_img_folder_right_cam'), settings_inst.GetSettings('calib_save_fname_right_cam'), reset, plot_figure=plot_figure)
self.__show_chessboard_img = settings_inst.GetSettings('calib_show_imgs')
self.__baseline = settings_inst.GetSettings('baseline')
self.__stereo_calib_reset = reset
self.__me_master = me_master
self.__stereo_calibrated = False
self.__plot_figure = plot_figure
self.__use_PyQt = use_PyQt
self.__calib_params = {}
def CalibrateStereoVisionSystem(self, force_calibration=False, default_frame_shape=(-1,-1)):
'''
@brief Calibrate stereo vision system, with full calibration of cameras as well.
@param force_calibration (True/False for forcing new full calibration)
@param default_frame_shape (Default desired frame shape of processed frames.
Given as a tuple of (height, width).
The rectification vectors will automatically be adjusted to incoming frame shapes (only ones for a new shape), but it is time consuming to compute.
Set (-1,-1) to not change the precomputed intrinsic parameters (default))
'''
self.__leftCameraCalibration.CalibrateCameraDistortion(force_calibration=force_calibration)
self.__rightCameraCalibration.CalibrateCameraDistortion(force_calibration=force_calibration)
self.__stereo_calibrated = True
new_calibration = False
if not(self.LoadStereoParameters()) or self.__stereo_calib_reset or force_calibration:
new_calibration = True
self.StereoCalibrate()
self.StereoRectify()
self.SaveStereoParameters()
self.InitUndistortRectifyMapStereo()
if new_calibration:
self.ShowTestCalibImage()
if default_frame_shape[0] > 0:
self.SetIntrinsicStereoScale(default_frame_shape)
def GetNewRealTimePlot(self):
'''
@brief Get new realtime plot figure
@return realtime plot figure
'''
if self.__plot_figure != None:
realTimePlot = self.__plot_figure
realTimePlot(reset=True)
else:
if self.__use_PyQt:
realTimePlot = PyQtImage(True)
else:
realTimePlot = RealTimePlot()
return realTimePlot
def ShowTestCalibImage(self):
'''
@brief Show test image in plot
'''
if self.__show_chessboard_img:
touple_frames = []
if self.__me_master:
side_txt = 'left'
test_img_fname = self.__leftCameraCalibration.GetDistorionCalibImages()[0]
else:
side_txt = 'right'
test_img_fname = self.__rightCameraCalibration.GetDistorionCalibImages()[0]
test_img = GetImage(test_img_fname)
headline = '[{0}] before shape {1}'.format(side_txt, test_img.shape)
touple_frames.append((headline, test_img))
test_und_img = self.Undistort(test_img)
headline = '[{0}] After undistort shape {1}'.format(side_txt, test_und_img.shape)
touple_frames.append((headline, test_und_img))
realTimePlot = self.GetNewRealTimePlot()
realTimePlot(touple_frames, 'calibration_result')
def AssertStereoCalibrated(self):
'''
@brief Assert that the stereo vision system is calibrated.
Raises Exception if it is not calibrated.
'''
if not(self.GetStereoCalibrated()):
raise Exception('Stereo is not calibrated. Run CalibrateStereoVisionSystem().')
def CheckIntrinsicStereoScale(self, frame_size):
'''
@brief Check intrinsic stereo scale
@return True/False
'''
return self.__leftCameraCalibration.CheckIntrinsicScale(frame_size)
def GetLeftCameraCalibrationInstance(self):
'''
@brief Get left camera calibration instance
@return leftCameraCalibration
'''
return self.__leftCameraCalibration
def GetRightCameraCalibrationInstance(self):
'''
@brief Get right camera calibration instance
@return rightCameraCalibration
'''
return self.__rightCameraCalibration
def GetBaseline(self):
'''
@brief Get baseline between cameras in mm
@return baseline
'''
return float(self.__baseline)
def GetPixelBaseline(self):
'''
@brief Get baseline between camers in pixel units
'''
self.AssertStereoCalibrated()
return self.__calib_params['P2'][0,3]*-1 # Projection matrix give negated baseline seen from the right camera.
def GetFocalLength(self):
'''
@brief Get original focal length in mm
@return focal_length
'''
return self.__leftCameraCalibration.GetFocalLength()
def GetPixelFocalLength(self):
'''
@brief Get focal length in camera pixel units
@return f_x, f_y, f_z
'''
self.AssertStereoCalibrated()
f_x = self.__calib_params['P1'][0,0]
f_y = self.__calib_params['P1'][1,1]
f_z = (f_x + f_y)/2.0
return f_x, f_y, f_z
def GetProjectionMatrices(self):
'''
@brief Get projection matrices (P1 and P2)
@return P1, P2
'''
return self.__calib_params['P1'], self.__calib_params['P2']
def GetDisparityToDepthMatrix(self):
'''
@brief Get disparity to depth transformation matrix (Q)
@return Q
'''
return self.__calib_params['Q']
def GetStereoCalibrated(self):
'''
@brief Check if stereo vision system is calibrated
@return True/False
'''
return self.__stereo_calibrated
def AssertSameStereoSize(self):
'''
@brief Assert that both cameras have same image dimensions
'''
left_imageSize = self.__leftCameraCalibration.GetImageSize()
right_imageSize = self.__rightCameraCalibration.GetImageSize()
if not(left_imageSize[0] == right_imageSize[0]) or not(left_imageSize[1] == right_imageSize[1]):
raise ValueError('Left and right image dimensions do not match!' )
def StereoCalibrate(self):
'''
@brief Calibrates the stereo camera first, and then computes rectification transforms for each head of a calibrated stereo camera.
Computes rotation matrix (R), translation vector (T), essential matrix (E) and fundamental matrix (F)
'''
self.AssertSameStereoSize()
cameraMatrix1, distCoeffs1 = self.__leftCameraCalibration.GetIntrinsicParameters()
cameraMatrix2, distCoeffs2 = self.__rightCameraCalibration.GetIntrinsicParameters()
objectPoints = self.__leftCameraCalibration.GetObjectPoints()
imagePoints1 = self.__leftCameraCalibration.GetImagePoints()
imagePoints2 = self.__rightCameraCalibration.GetImagePoints()
imageSize = self.__leftCameraCalibration.GetImageSize()
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
#stereocalib_flags = cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_SAME_FOCAL_LENGTH | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5
#stereocalib_flags = cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_SAME_FOCAL_LENGTH
stereocalib_flags = cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_ZERO_DISPARITY | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, \
imagePoints1, \
imagePoints2, \
cameraMatrix1, \
distCoeffs1, \
cameraMatrix2, \
distCoeffs2, \
(imageSize[1], imageSize[0]), \
criteria=stereocalib_criteria, \
flags=stereocalib_flags)
if not(retval):
raise Exception('Stereo calibration failed!')
# Store params in dictionary
self.__calib_params['cameraMatrix1'] = cameraMatrix1
self.__calib_params['distCoeffs1'] = distCoeffs1
self.__calib_params['cameraMatrix2'] = cameraMatrix2
self.__calib_params['distCoeffs2'] = distCoeffs2
self.__calib_params['R'] = R
self.__calib_params['T'] = T
self.__calib_params['E'] = E
self.__calib_params['F'] = F
def StereoRectify(self, frame_size=None, rectify_scale=0.0):
'''
@brief Rectify the stereopsis system.
Computes rectification transform (rotation matrices - 3x3) (R), projection matrices 3x4 (P) and disparity to depth mapping matrix 4x4 (Q)
rectify_scale: 0 = full crop, 1 = no crop
If rectify_scale = 1, all pixels are retained with some extra black images.
If rectify_scale = 0, it returns undistorted image with minimum unwanted pixels.
@param frame_size ((height, width) If None, then stored left frame size is used (default=None))
@param rectify_scale (default=0.0)
'''
if not(isinstance(frame_size, tuple)) and not(isinstance(frame_size, list)):
frame_size = self.__leftCameraCalibration.GetImageSize()
if not(self.CheckIntrinsicStereoScale(frame_size)):
self.__leftCameraCalibration.RectifyCamera(frame_size)
self.__rightCameraCalibration.RectifyCamera(frame_size)
self.StereoCalibrate()
self.__calib_params['R'], self.__calib_params['T'] = self.ComputeTranslationAndRotationMatrices()
self.__calib_params['R1'], self.__calib_params['R2'], self.__calib_params['P1'], self.__calib_params['P2'], self.__calib_params['Q'], self.__calib_params['roi1'], self.__calib_params['roi2'] = cv2.stereoRectify(self.__calib_params['cameraMatrix1'], self.__calib_params['distCoeffs1'], self.__calib_params['cameraMatrix2'], self.__calib_params['distCoeffs2'], (frame_size[1], frame_size[0]), self.__calib_params['R'], self.__calib_params['T'], alpha=rectify_scale)
def InitUndistortRectifyMapStereo(self):
'''
@brief Compute rectification maps
'''
frame_size = self.__leftCameraCalibration.GetImageSize()
self.__left_rectify_maps = cv2.initUndistortRectifyMap(self.__calib_params['cameraMatrix1'], self.__calib_params['distCoeffs1'], self.__calib_params['R1'], self.__calib_params['P1'], (frame_size[1], frame_size[0]), cv2.CV_16SC2)
self.__right_rectify_maps = cv2.initUndistortRectifyMap(self.__calib_params['cameraMatrix2'], self.__calib_params['distCoeffs2'], self.__calib_params['R2'], self.__calib_params['P2'], (frame_size[1], frame_size[0]), cv2.CV_16SC2)
def ComputeTranslationAndRotationMatrices(self):
'''
@brief Compute the translation vector (T) and rotation vector (R) on perfectly horizontally aligned cameras.
@return R, T
'''
R = np.eye(3) # Perfectly aligned cameras
T = np.zeros((3,1))
T_x = self.GetBaseline() # horizontal baseline in mm
T[0,0] = -T_x
return R, T
def SetIntrinsicStereoScale(self, frame_size):
'''
@brief Set new intrinsic scale parameters for a new frame shape (if it is different from the current parameters).
Calibration is invariant to scale, but intrinsic parameters are not.
@param frame_size (Tuple as (height, width))
'''
if not(self.CheckIntrinsicStereoScale(frame_size)):
self.StereoRectify(frame_size)
self.InitUndistortRectifyMapStereo()
def Undistort(self, frame):
'''
@brief Stereo undistorting
@return undistorted frame
'''
self.AssertStereoCalibrated()
if not(self.CheckIntrinsicStereoScale(GetShape(frame))):
self.SetIntrinsicStereoScale(GetShape(frame))
if self.__me_master:
und_frame = cv2.remap(frame, self.__left_rectify_maps[0], self.__left_rectify_maps[1], cv2.INTER_LANCZOS4)
else:
und_frame = cv2.remap(frame, self.__right_rectify_maps[0], self.__right_rectify_maps[1], cv2.INTER_LANCZOS4)
return self.CropUndistortedFrame(und_frame)
def CropUndistortedFrame(self, und_frame):
'''
@brief Crop undistorted frame
@param und_frame
@return und_frame Cropped undistorted frame
'''
if self.__me_master:
x, y, w, h = self.__calib_params['roi1']
else:
x, y, w, h = self.__calib_params['roi2']
und_frame = und_frame[y:y+h, x:x+w]
return und_frame
def SaveStereoParameters(self):
'''
@brief Save stereo parameters for later use.
'''
self.__saveParameters.Save(self.__calib_params)
def LoadStereoParameters(self):
'''
@brief Load stereo parameters
@return True/False - stereo parameters loaded successfully.
'''
ok, self.__calib_params = self.__saveParameters.Load()
return ok | src/DroneVision/DroneVision_src/imgProcessing/CameraCalibration/StereoCalibration.py | import cv2
import numpy as np
from src.DroneVision.DroneVision_src.imgProcessing.frameTools.frameTools import GetShape
from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, RealTimePlot
from src.DroneVision.DroneVision_src.hardware.PyQtImage import PyQtImage
from CameraCalibration import CameraCalibration
from src.bin.SaveParameters import SaveParameters
'''
@brief Class for calibrating the stereo vision system.
@param me_master (True if this instance is master, False for slave)
@param settings_inst (CALIB settings)
@param reset (True/False)
@param plot_figure (optional plot figure (default=None))
@param use_PyQt (default=True)
'''
class StereoCalibration():
def __init__(self, me_master, settings_inst, reset, plot_figure=None, use_PyQt=True):
'''CONSTRUCTOR'''
self.__saveParameters = SaveParameters(settings_inst.GetSettings('calib_save_folder'), settings_inst.GetSettings('calib_save_fname_stereo'), settings_inst.GetSettings('save_calib_param_to_json'))
self.__leftCameraCalibration = CameraCalibration(settings_inst, settings_inst.GetSettings('calib_img_folder_left_cam'), settings_inst.GetSettings('calib_save_fname_left_cam'), reset, plot_figure=plot_figure)
self.__rightCameraCalibration = CameraCalibration(settings_inst, settings_inst.GetSettings('calib_img_folder_right_cam'), settings_inst.GetSettings('calib_save_fname_right_cam'), reset, plot_figure=plot_figure)
self.__show_chessboard_img = settings_inst.GetSettings('calib_show_imgs')
self.__baseline = settings_inst.GetSettings('baseline')
self.__stereo_calib_reset = reset
self.__me_master = me_master
self.__stereo_calibrated = False
self.__plot_figure = plot_figure
self.__use_PyQt = use_PyQt
self.__calib_params = {}
def CalibrateStereoVisionSystem(self, force_calibration=False, default_frame_shape=(-1,-1)):
'''
@brief Calibrate stereo vision system, with full calibration of cameras as well.
@param force_calibration (True/False for forcing new full calibration)
@param default_frame_shape (Default desired frame shape of processed frames.
Given as a tuple of (height, width).
The rectification vectors will automatically be adjusted to incoming frame shapes (only ones for a new shape), but it is time consuming to compute.
Set (-1,-1) to not change the precomputed intrinsic parameters (default))
'''
self.__leftCameraCalibration.CalibrateCameraDistortion(force_calibration=force_calibration)
self.__rightCameraCalibration.CalibrateCameraDistortion(force_calibration=force_calibration)
self.__stereo_calibrated = True
new_calibration = False
if not(self.LoadStereoParameters()) or self.__stereo_calib_reset or force_calibration:
new_calibration = True
self.StereoCalibrate()
self.StereoRectify()
self.SaveStereoParameters()
self.InitUndistortRectifyMapStereo()
if new_calibration:
self.ShowTestCalibImage()
if default_frame_shape[0] > 0:
self.SetIntrinsicStereoScale(default_frame_shape)
def GetNewRealTimePlot(self):
'''
@brief Get new realtime plot figure
@return realtime plot figure
'''
if self.__plot_figure != None:
realTimePlot = self.__plot_figure
realTimePlot(reset=True)
else:
if self.__use_PyQt:
realTimePlot = PyQtImage(True)
else:
realTimePlot = RealTimePlot()
return realTimePlot
def ShowTestCalibImage(self):
'''
@brief Show test image in plot
'''
if self.__show_chessboard_img:
touple_frames = []
if self.__me_master:
side_txt = 'left'
test_img_fname = self.__leftCameraCalibration.GetDistorionCalibImages()[0]
else:
side_txt = 'right'
test_img_fname = self.__rightCameraCalibration.GetDistorionCalibImages()[0]
test_img = GetImage(test_img_fname)
headline = '[{0}] before shape {1}'.format(side_txt, test_img.shape)
touple_frames.append((headline, test_img))
test_und_img = self.Undistort(test_img)
headline = '[{0}] After undistort shape {1}'.format(side_txt, test_und_img.shape)
touple_frames.append((headline, test_und_img))
realTimePlot = self.GetNewRealTimePlot()
realTimePlot(touple_frames, 'calibration_result')
def AssertStereoCalibrated(self):
'''
@brief Assert that the stereo vision system is calibrated.
Raises Exception if it is not calibrated.
'''
if not(self.GetStereoCalibrated()):
raise Exception('Stereo is not calibrated. Run CalibrateStereoVisionSystem().')
def CheckIntrinsicStereoScale(self, frame_size):
'''
@brief Check intrinsic stereo scale
@return True/False
'''
return self.__leftCameraCalibration.CheckIntrinsicScale(frame_size)
def GetLeftCameraCalibrationInstance(self):
'''
@brief Get left camera calibration instance
@return leftCameraCalibration
'''
return self.__leftCameraCalibration
def GetRightCameraCalibrationInstance(self):
'''
@brief Get right camera calibration instance
@return rightCameraCalibration
'''
return self.__rightCameraCalibration
def GetBaseline(self):
'''
@brief Get baseline between cameras in mm
@return baseline
'''
return float(self.__baseline)
def GetPixelBaseline(self):
'''
@brief Get baseline between camers in pixel units
'''
self.AssertStereoCalibrated()
return self.__calib_params['P2'][0,3]*-1 # Projection matrix give negated baseline seen from the right camera.
def GetFocalLength(self):
'''
@brief Get original focal length in mm
@return focal_length
'''
return self.__leftCameraCalibration.GetFocalLength()
def GetPixelFocalLength(self):
'''
@brief Get focal length in camera pixel units
@return f_x, f_y, f_z
'''
self.AssertStereoCalibrated()
f_x = self.__calib_params['P1'][0,0]
f_y = self.__calib_params['P1'][1,1]
f_z = (f_x + f_y)/2.0
return f_x, f_y, f_z
def GetProjectionMatrices(self):
'''
@brief Get projection matrices (P1 and P2)
@return P1, P2
'''
return self.__calib_params['P1'], self.__calib_params['P2']
def GetDisparityToDepthMatrix(self):
'''
@brief Get disparity to depth transformation matrix (Q)
@return Q
'''
return self.__calib_params['Q']
def GetStereoCalibrated(self):
'''
@brief Check if stereo vision system is calibrated
@return True/False
'''
return self.__stereo_calibrated
def AssertSameStereoSize(self):
'''
@brief Assert that both cameras have same image dimensions
'''
left_imageSize = self.__leftCameraCalibration.GetImageSize()
right_imageSize = self.__rightCameraCalibration.GetImageSize()
if not(left_imageSize[0] == right_imageSize[0]) or not(left_imageSize[1] == right_imageSize[1]):
raise ValueError('Left and right image dimensions do not match!' )
def StereoCalibrate(self):
'''
@brief Calibrates the stereo camera first, and then computes rectification transforms for each head of a calibrated stereo camera.
Computes rotation matrix (R), translation vector (T), essential matrix (E) and fundamental matrix (F)
'''
self.AssertSameStereoSize()
cameraMatrix1, distCoeffs1 = self.__leftCameraCalibration.GetIntrinsicParameters()
cameraMatrix2, distCoeffs2 = self.__rightCameraCalibration.GetIntrinsicParameters()
objectPoints = self.__leftCameraCalibration.GetObjectPoints()
imagePoints1 = self.__leftCameraCalibration.GetImagePoints()
imagePoints2 = self.__rightCameraCalibration.GetImagePoints()
imageSize = self.__leftCameraCalibration.GetImageSize()
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
#stereocalib_flags = cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_SAME_FOCAL_LENGTH | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5
#stereocalib_flags = cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_SAME_FOCAL_LENGTH
stereocalib_flags = cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_ZERO_DISPARITY | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, \
imagePoints1, \
imagePoints2, \
cameraMatrix1, \
distCoeffs1, \
cameraMatrix2, \
distCoeffs2, \
(imageSize[1], imageSize[0]), \
criteria=stereocalib_criteria, \
flags=stereocalib_flags)
if not(retval):
raise Exception('Stereo calibration failed!')
# Store params in dictionary
self.__calib_params['cameraMatrix1'] = cameraMatrix1
self.__calib_params['distCoeffs1'] = distCoeffs1
self.__calib_params['cameraMatrix2'] = cameraMatrix2
self.__calib_params['distCoeffs2'] = distCoeffs2
self.__calib_params['R'] = R
self.__calib_params['T'] = T
self.__calib_params['E'] = E
self.__calib_params['F'] = F
def StereoRectify(self, frame_size=None, rectify_scale=0.0):
'''
@brief Rectify the stereopsis system.
Computes rectification transform (rotation matrices - 3x3) (R), projection matrices 3x4 (P) and disparity to depth mapping matrix 4x4 (Q)
rectify_scale: 0 = full crop, 1 = no crop
If rectify_scale = 1, all pixels are retained with some extra black images.
If rectify_scale = 0, it returns undistorted image with minimum unwanted pixels.
@param frame_size ((height, width) If None, then stored left frame size is used (default=None))
@param rectify_scale (default=0.0)
'''
if not(isinstance(frame_size, tuple)) and not(isinstance(frame_size, list)):
frame_size = self.__leftCameraCalibration.GetImageSize()
if not(self.CheckIntrinsicStereoScale(frame_size)):
self.__leftCameraCalibration.RectifyCamera(frame_size)
self.__rightCameraCalibration.RectifyCamera(frame_size)
self.StereoCalibrate()
self.__calib_params['R'], self.__calib_params['T'] = self.ComputeTranslationAndRotationMatrices()
self.__calib_params['R1'], self.__calib_params['R2'], self.__calib_params['P1'], self.__calib_params['P2'], self.__calib_params['Q'], self.__calib_params['roi1'], self.__calib_params['roi2'] = cv2.stereoRectify(self.__calib_params['cameraMatrix1'], self.__calib_params['distCoeffs1'], self.__calib_params['cameraMatrix2'], self.__calib_params['distCoeffs2'], (frame_size[1], frame_size[0]), self.__calib_params['R'], self.__calib_params['T'], alpha=rectify_scale)
def InitUndistortRectifyMapStereo(self):
'''
@brief Compute rectification maps
'''
frame_size = self.__leftCameraCalibration.GetImageSize()
self.__left_rectify_maps = cv2.initUndistortRectifyMap(self.__calib_params['cameraMatrix1'], self.__calib_params['distCoeffs1'], self.__calib_params['R1'], self.__calib_params['P1'], (frame_size[1], frame_size[0]), cv2.CV_16SC2)
self.__right_rectify_maps = cv2.initUndistortRectifyMap(self.__calib_params['cameraMatrix2'], self.__calib_params['distCoeffs2'], self.__calib_params['R2'], self.__calib_params['P2'], (frame_size[1], frame_size[0]), cv2.CV_16SC2)
def ComputeTranslationAndRotationMatrices(self):
'''
@brief Compute the translation vector (T) and rotation vector (R) on perfectly horizontally aligned cameras.
@return R, T
'''
R = np.eye(3) # Perfectly aligned cameras
T = np.zeros((3,1))
T_x = self.GetBaseline() # horizontal baseline in mm
T[0,0] = -T_x
return R, T
def SetIntrinsicStereoScale(self, frame_size):
'''
@brief Set new intrinsic scale parameters for a new frame shape (if it is different from the current parameters).
Calibration is invariant to scale, but intrinsic parameters are not.
@param frame_size (Tuple as (height, width))
'''
if not(self.CheckIntrinsicStereoScale(frame_size)):
self.StereoRectify(frame_size)
self.InitUndistortRectifyMapStereo()
def Undistort(self, frame):
'''
@brief Stereo undistorting
@return undistorted frame
'''
self.AssertStereoCalibrated()
if not(self.CheckIntrinsicStereoScale(GetShape(frame))):
self.SetIntrinsicStereoScale(GetShape(frame))
if self.__me_master:
und_frame = cv2.remap(frame, self.__left_rectify_maps[0], self.__left_rectify_maps[1], cv2.INTER_LANCZOS4)
else:
und_frame = cv2.remap(frame, self.__right_rectify_maps[0], self.__right_rectify_maps[1], cv2.INTER_LANCZOS4)
return self.CropUndistortedFrame(und_frame)
def CropUndistortedFrame(self, und_frame):
'''
@brief Crop undistorted frame
@param und_frame
@return und_frame Cropped undistorted frame
'''
if self.__me_master:
x, y, w, h = self.__calib_params['roi1']
else:
x, y, w, h = self.__calib_params['roi2']
und_frame = und_frame[y:y+h, x:x+w]
return und_frame
def SaveStereoParameters(self):
'''
@brief Save stereo parameters for later use.
'''
self.__saveParameters.Save(self.__calib_params)
def LoadStereoParameters(self):
'''
@brief Load stereo parameters
@return True/False - stereo parameters loaded successfully.
'''
ok, self.__calib_params = self.__saveParameters.Load()
return ok | 0.570092 | 0.273975 |
from sys import stdout, stderr
import csv
from argparse import ArgumentParser
from io import TextIOWrapper
from copy import deepcopy
from datetime import timedelta
from dateutil import parser
def command_line_options()->dict:
parser = ArgumentParser(prog="obd_log_to_csv",
description="""Telemetry CSV To Delta CSV
generates values indicating the rate of change for
identified columns. All original columns pass through
unmolested. The delta columns are added columns.
""")
parser.add_argument(
"--input_csv_file",
help="""
CSV file generated by obd_log_to_csv.obd_log_to_csv that includes the header.
That is, each column in the file has a valid text column name in the first row.
""",
)
parser.add_argument(
"--delta",
help="""
Comma separated list of commands where successive pairs of non-null return values would
be used to calculate the rate of change between the two return values. e.g.
"SPEED,FUEL_LEVEL,THROTTLE_POSITION". Calculated from
"(second-return-value - first-return-value) / (second-iso_ts_post - first-iso_ts_post)".
Applied in this way, delta SPEED would represent acceleration.
The results will be in a column headed by delta-COMMAND_NAME. e.g. delta SPEED column name
would be "delta-SPEED".
""",
)
parser.add_argument(
"--output_csv_file",
help="""CSV output file.
File can be either a full or relative path name.
If the file already exists, it will be overwritten.
Do not make the input and output file the same.
Bad things will happen. Defaults to stdout (terminal output).
""",
default="stdout"
)
parser.add_argument(
"--verbose",
help="Turn verbose output on. Default is off.",
default=False,
action='store_true'
)
return vars(parser.parse_args())
def delta_column_names(delta_columns:list) -> list:
return [f"delta-{name}" for name in delta_columns]
def delta(input_csv_file, output_csv_file, delta_columns, verbose=False):
delta_column_name_list = delta_column_names(delta_columns)
reader = csv.DictReader(input_csv_file)
field_names = reader.fieldnames
# check to make sure delta columns map into the input CSV file columns
for name in (delta_columns + ['iso_ts_pre', 'iso_ts_post', ]):
if name not in field_names:
raise ValueError(f"delta column '{name}' missing from CSV input file")
all_field_names = field_names + delta_column_name_list
writer = csv.DictWriter(output_csv_file, fieldnames=all_field_names)
writer.writeheader()
delta_first = {}
for in_row in reader:
if verbose:
print(f"in_row: {in_row}", file=stderr)
# the original row passes through unmolested
out_row = deepcopy(in_row)
# delta columns are added and set to None
for name in delta_columns:
if (
name in delta_first and
delta_first[name]['value'] and
in_row[name]
):
v1 = float(delta_first[name]['value'])
v2 = float(in_row[name])
t1 = parser.isoparse(delta_first[name]['iso_ts_pre'])
t2 = parser.isoparse(in_row['iso_ts_post'])
out_row[f"delta-{name}"] = (v2 - v1) / (float((t2 - t1) / timedelta(microseconds=1)) * 1000000.0)
else:
out_row[f"delta-{name}"] = None
if in_row[name]:
delta_first[name] = {
'value': in_row[name],
'iso_ts_pre': in_row['iso_ts_pre'],
'iso_ts_post': in_row['iso_ts_post'],
}
if verbose:
print(f"out_row: {out_row}", file=stderr)
writer.writerow(out_row)
def main():
args = command_line_options()
input_csv_file_name = args['input_csv_file']
output_csv_file_name = args['output_csv_file']
verbose = args['verbose']
delta_columns = (args['delta']).split(sep=',') if args['delta'] else []
if verbose:
print(f"verbose: {args['verbose']}", file=stderr)
print(f"input csv file: {input_csv_file_name}", file=stderr)
print(f"output csv file: {output_csv_file_name}", file=stderr)
print(f"delta: {delta_columns}", file=stderr)
if output_csv_file_name != "stdout":
with open(output_csv_file_name, "w") as output_csv_file:
with open(input_csv_file_name, "r") as input_csv_file:
delta(input_csv_file, output_csv_file, delta_columns, verbose=verbose)
else:
with open(input_csv_file_name, "r") as input_csv_file:
delta(input_csv_file, stdout, delta_columns, verbose=verbose)
if __name__ == "__main__":
main() | obd_log_to_csv/csv_to_delta_csv.py | from sys import stdout, stderr
import csv
from argparse import ArgumentParser
from io import TextIOWrapper
from copy import deepcopy
from datetime import timedelta
from dateutil import parser
def command_line_options()->dict:
parser = ArgumentParser(prog="obd_log_to_csv",
description="""Telemetry CSV To Delta CSV
generates values indicating the rate of change for
identified columns. All original columns pass through
unmolested. The delta columns are added columns.
""")
parser.add_argument(
"--input_csv_file",
help="""
CSV file generated by obd_log_to_csv.obd_log_to_csv that includes the header.
That is, each column in the file has a valid text column name in the first row.
""",
)
parser.add_argument(
"--delta",
help="""
Comma separated list of commands where successive pairs of non-null return values would
be used to calculate the rate of change between the two return values. e.g.
"SPEED,FUEL_LEVEL,THROTTLE_POSITION". Calculated from
"(second-return-value - first-return-value) / (second-iso_ts_post - first-iso_ts_post)".
Applied in this way, delta SPEED would represent acceleration.
The results will be in a column headed by delta-COMMAND_NAME. e.g. delta SPEED column name
would be "delta-SPEED".
""",
)
parser.add_argument(
"--output_csv_file",
help="""CSV output file.
File can be either a full or relative path name.
If the file already exists, it will be overwritten.
Do not make the input and output file the same.
Bad things will happen. Defaults to stdout (terminal output).
""",
default="stdout"
)
parser.add_argument(
"--verbose",
help="Turn verbose output on. Default is off.",
default=False,
action='store_true'
)
return vars(parser.parse_args())
def delta_column_names(delta_columns:list) -> list:
return [f"delta-{name}" for name in delta_columns]
def delta(input_csv_file, output_csv_file, delta_columns, verbose=False):
delta_column_name_list = delta_column_names(delta_columns)
reader = csv.DictReader(input_csv_file)
field_names = reader.fieldnames
# check to make sure delta columns map into the input CSV file columns
for name in (delta_columns + ['iso_ts_pre', 'iso_ts_post', ]):
if name not in field_names:
raise ValueError(f"delta column '{name}' missing from CSV input file")
all_field_names = field_names + delta_column_name_list
writer = csv.DictWriter(output_csv_file, fieldnames=all_field_names)
writer.writeheader()
delta_first = {}
for in_row in reader:
if verbose:
print(f"in_row: {in_row}", file=stderr)
# the original row passes through unmolested
out_row = deepcopy(in_row)
# delta columns are added and set to None
for name in delta_columns:
if (
name in delta_first and
delta_first[name]['value'] and
in_row[name]
):
v1 = float(delta_first[name]['value'])
v2 = float(in_row[name])
t1 = parser.isoparse(delta_first[name]['iso_ts_pre'])
t2 = parser.isoparse(in_row['iso_ts_post'])
out_row[f"delta-{name}"] = (v2 - v1) / (float((t2 - t1) / timedelta(microseconds=1)) * 1000000.0)
else:
out_row[f"delta-{name}"] = None
if in_row[name]:
delta_first[name] = {
'value': in_row[name],
'iso_ts_pre': in_row['iso_ts_pre'],
'iso_ts_post': in_row['iso_ts_post'],
}
if verbose:
print(f"out_row: {out_row}", file=stderr)
writer.writerow(out_row)
def main():
args = command_line_options()
input_csv_file_name = args['input_csv_file']
output_csv_file_name = args['output_csv_file']
verbose = args['verbose']
delta_columns = (args['delta']).split(sep=',') if args['delta'] else []
if verbose:
print(f"verbose: {args['verbose']}", file=stderr)
print(f"input csv file: {input_csv_file_name}", file=stderr)
print(f"output csv file: {output_csv_file_name}", file=stderr)
print(f"delta: {delta_columns}", file=stderr)
if output_csv_file_name != "stdout":
with open(output_csv_file_name, "w") as output_csv_file:
with open(input_csv_file_name, "r") as input_csv_file:
delta(input_csv_file, output_csv_file, delta_columns, verbose=verbose)
else:
with open(input_csv_file_name, "r") as input_csv_file:
delta(input_csv_file, stdout, delta_columns, verbose=verbose)
if __name__ == "__main__":
main() | 0.504394 | 0.374648 |
DOCUMENTATION = '''
---
module: foreman_image
short_description:
- Manage Foreman images using Foreman API v2.
description:
- Create, update and and delete Foreman images using Foreman API v2
options:
name:
description: Image name as used in Foreman
required: true
state:
description: image state
required: false
default: 'present'
choices: ['present', 'absent']
uuid:
operatingsystem:
description: Operatingsystem used on the image
required: True
architecture:
description: Architecture the image is for
required: True
uuid:
description: UUID of the image
required: True
user:
description: User used to log into the image
required: False
default: root
foreman_host:
description: Hostname or IP address of Foreman system
required: false
default: 127.0.0.1
foreman_port:
description: Port of Foreman API
required: false
default: 443
foreman_user:
description: Username to be used to authenticate on Foreman
required: true
foreman_pass:
description: Password to be used to authenticate user on Foreman
required: true
foreman_ssl:
description: Enable SSL when connecting to Foreman API
required: false
default: true
notes:
- Requires the python-foreman package to be installed. See https://github.com/Nosmoht/python-foreman.
version_added: "2.0"
author: "<NAME> <<EMAIL>>"
'''
EXAMPLES = '''
- name: Ensure Debian Jessie Image
foreman_image:
name: Debian Jessie Minimal
architecture: x86_64
operatingsystem: DebianJessie
uuid: /path/to/image
state: present
foreman_host: 127.0.0.1
foreman_port: 443
foreman_user: admin
foreman_pass: secret
'''
try:
from foreman.foreman import *
foremanclient_found = True
except ImportError:
foremanclient_found = False
def get_resources(resource_type, resource_func, resource_name, search_field='name'):
if not resource_name:
return None
search_data = dict()
search_data[search_field] = resource_name
try:
resource = resource_func(data=search_data)
if not resource:
module.fail_json(
msg='Could not find resource type {resource_type} specified as {name}'.format(
resource_type=resource_type, name=resource_name))
except ForemanError as e:
module.fail_json(msg='Could not search resource type {resource_type} specified as {name}: {error}'.format(
resource_type=resource_type, name=resource_name, error=e.message))
return resource
def ensure():
name = module.params['name']
compute_resource_name = module.params['compute_resource']
state = module.params['state']
data = dict(name=name)
try:
compute_resource = theforeman.search_compute_resource(data=dict(name=compute_resource_name))
except ForemanError as e:
module.fail_json(msg='Could not find compute resource {0}: {1}'.format(compute_resource_name, e.message))
if not compute_resource:
module.fail_json(msg='Could not find compute resource {0}'.format(compute_resource_name))
cid = compute_resource['id']
try:
images = theforeman.get_compute_resource_images(compute_resource['id'])
for i in images:
if i['name'] == name:
image = i
break
else:
image = None
except ForemanError as e:
module.fail_json(msg='Could not get images: {0}'.format(e.message))
if state == 'absent':
if image:
try:
image = theforeman.delete_compute_resource_image(cid, image.get('id'))
return True, image
except ForemanError as e:
module.fail_json(msg='Could not delete image: {0}'.format(e.message))
return False, image
data['compute_resource_id'] = cid
data['uuid'] = module.params['uuid']
data['username'] = module.params['user']
if module.params['password']:
data['password'] = module.params['password']
data['architecture_id'] = get_resources(resource_type='architecture',
resource_func=theforeman.search_architecture,
resource_name=module.params['architecture'])['id']
data['operatingsystem_id'] = get_resources(resource_type='operatingsystem',
resource_func=theforeman.search_operatingsystem,
resource_name=module.params['operatingsystem'],
search_field='title')['id']
if not image:
try:
image = theforeman.create_compute_resource_image(compute_resource_id=cid,
data=data)
return True, image
except ForemanError as e:
module.fail_json(msg='Could not create image: {0}'.format(e.message))
else:
data['id'] = image['id']
if not all(data[key] == image.get(key, data[key]) for key in data.keys()):
try:
new_data = dict(compute_resource_id=cid, id=image['id'], image=data)
image = theforeman.update_compute_resource_image(compute_resource_id=cid,
data=new_data)
return True, image
except ForemanError as e:
module.fail_json(msg='Could not update image: {0}'.format(e.message))
return False, image
def main():
global module
global theforeman
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
compute_resource=dict(type='str', required=True),
architecture=dict(type='str', required=True),
operatingsystem=dict(operatingsystem='str', required=True),
uuid=dict(type='str', required=True),
user=dict(type='str', default='root'),
password=dict(type='str', default=None, no_log=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
foreman_host=dict(type='str', default='127.0.0.1'),
foreman_port=dict(type='str', default='443'),
foreman_user=dict(type='str', required=True),
foreman_pass=dict(type='str', required=True, no_log=True),
foreman_ssl=dict(type='bool', default=True)
),
)
if not foremanclient_found:
module.fail_json(msg='python-foreman module is required. See https://github.com/Nosmoht/python-foreman.')
foreman_host = module.params['foreman_host']
foreman_port = module.params['foreman_port']
foreman_user = module.params['foreman_user']
foreman_pass = module.params['foreman_pass']
foreman_ssl = module.params['foreman_ssl']
theforeman = Foreman(hostname=foreman_host,
port=foreman_port,
username=foreman_user,
password=<PASSWORD>,
ssl=foreman_ssl)
changed, image = ensure()
module.exit_json(changed=changed, image=image)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | foreman_image.py |
DOCUMENTATION = '''
---
module: foreman_image
short_description:
- Manage Foreman images using Foreman API v2.
description:
- Create, update and and delete Foreman images using Foreman API v2
options:
name:
description: Image name as used in Foreman
required: true
state:
description: image state
required: false
default: 'present'
choices: ['present', 'absent']
uuid:
operatingsystem:
description: Operatingsystem used on the image
required: True
architecture:
description: Architecture the image is for
required: True
uuid:
description: UUID of the image
required: True
user:
description: User used to log into the image
required: False
default: root
foreman_host:
description: Hostname or IP address of Foreman system
required: false
default: 127.0.0.1
foreman_port:
description: Port of Foreman API
required: false
default: 443
foreman_user:
description: Username to be used to authenticate on Foreman
required: true
foreman_pass:
description: Password to be used to authenticate user on Foreman
required: true
foreman_ssl:
description: Enable SSL when connecting to Foreman API
required: false
default: true
notes:
- Requires the python-foreman package to be installed. See https://github.com/Nosmoht/python-foreman.
version_added: "2.0"
author: "<NAME> <<EMAIL>>"
'''
EXAMPLES = '''
- name: Ensure Debian Jessie Image
foreman_image:
name: Debian Jessie Minimal
architecture: x86_64
operatingsystem: DebianJessie
uuid: /path/to/image
state: present
foreman_host: 127.0.0.1
foreman_port: 443
foreman_user: admin
foreman_pass: secret
'''
try:
from foreman.foreman import *
foremanclient_found = True
except ImportError:
foremanclient_found = False
def get_resources(resource_type, resource_func, resource_name, search_field='name'):
if not resource_name:
return None
search_data = dict()
search_data[search_field] = resource_name
try:
resource = resource_func(data=search_data)
if not resource:
module.fail_json(
msg='Could not find resource type {resource_type} specified as {name}'.format(
resource_type=resource_type, name=resource_name))
except ForemanError as e:
module.fail_json(msg='Could not search resource type {resource_type} specified as {name}: {error}'.format(
resource_type=resource_type, name=resource_name, error=e.message))
return resource
def ensure():
name = module.params['name']
compute_resource_name = module.params['compute_resource']
state = module.params['state']
data = dict(name=name)
try:
compute_resource = theforeman.search_compute_resource(data=dict(name=compute_resource_name))
except ForemanError as e:
module.fail_json(msg='Could not find compute resource {0}: {1}'.format(compute_resource_name, e.message))
if not compute_resource:
module.fail_json(msg='Could not find compute resource {0}'.format(compute_resource_name))
cid = compute_resource['id']
try:
images = theforeman.get_compute_resource_images(compute_resource['id'])
for i in images:
if i['name'] == name:
image = i
break
else:
image = None
except ForemanError as e:
module.fail_json(msg='Could not get images: {0}'.format(e.message))
if state == 'absent':
if image:
try:
image = theforeman.delete_compute_resource_image(cid, image.get('id'))
return True, image
except ForemanError as e:
module.fail_json(msg='Could not delete image: {0}'.format(e.message))
return False, image
data['compute_resource_id'] = cid
data['uuid'] = module.params['uuid']
data['username'] = module.params['user']
if module.params['password']:
data['password'] = module.params['password']
data['architecture_id'] = get_resources(resource_type='architecture',
resource_func=theforeman.search_architecture,
resource_name=module.params['architecture'])['id']
data['operatingsystem_id'] = get_resources(resource_type='operatingsystem',
resource_func=theforeman.search_operatingsystem,
resource_name=module.params['operatingsystem'],
search_field='title')['id']
if not image:
try:
image = theforeman.create_compute_resource_image(compute_resource_id=cid,
data=data)
return True, image
except ForemanError as e:
module.fail_json(msg='Could not create image: {0}'.format(e.message))
else:
data['id'] = image['id']
if not all(data[key] == image.get(key, data[key]) for key in data.keys()):
try:
new_data = dict(compute_resource_id=cid, id=image['id'], image=data)
image = theforeman.update_compute_resource_image(compute_resource_id=cid,
data=new_data)
return True, image
except ForemanError as e:
module.fail_json(msg='Could not update image: {0}'.format(e.message))
return False, image
def main():
global module
global theforeman
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
compute_resource=dict(type='str', required=True),
architecture=dict(type='str', required=True),
operatingsystem=dict(operatingsystem='str', required=True),
uuid=dict(type='str', required=True),
user=dict(type='str', default='root'),
password=dict(type='str', default=None, no_log=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
foreman_host=dict(type='str', default='127.0.0.1'),
foreman_port=dict(type='str', default='443'),
foreman_user=dict(type='str', required=True),
foreman_pass=dict(type='str', required=True, no_log=True),
foreman_ssl=dict(type='bool', default=True)
),
)
if not foremanclient_found:
module.fail_json(msg='python-foreman module is required. See https://github.com/Nosmoht/python-foreman.')
foreman_host = module.params['foreman_host']
foreman_port = module.params['foreman_port']
foreman_user = module.params['foreman_user']
foreman_pass = module.params['foreman_pass']
foreman_ssl = module.params['foreman_ssl']
theforeman = Foreman(hostname=foreman_host,
port=foreman_port,
username=foreman_user,
password=<PASSWORD>,
ssl=foreman_ssl)
changed, image = ensure()
module.exit_json(changed=changed, image=image)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | 0.564339 | 0.452657 |
from django.contrib.auth import get_user_model, authenticate
from rest_framework import serializers
from core.models import CommunityMember, Community
class RegisterSerializer(serializers.ModelSerializer):
""" serializer for register class """
class Meta:
model = get_user_model()
fields = ('name', 'email', 'password',)
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data)
class LoginSerializer(serializers.Serializer):
""" serializer for user login """
email = serializers.EmailField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
""" validate and authenticate the user """
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = 'User not Found, Please Try Different User'
raise serializers.ValidationError(
{'message': msg},
code='authentication'
)
attrs['user'] = user
return attrs
class CommunityProfileSerializer(serializers.ModelSerializer):
""" community profile serializer """
class Meta:
model = Community
fields = ('id', 'name', 'image',)
read_only_field = ('id', 'name', 'image',)
class CommunityMemberSerializer(serializers.ModelSerializer):
""" community profile serializer """
community = CommunityProfileSerializer(read_only=True)
class Meta:
model = CommunityMember
fields = ('community',)
read_only_field = ('community',)
class ProfileSerializer(serializers.ModelSerializer):
""" User Profile Serializer """
community = serializers.SerializerMethodField()
class Meta:
model = get_user_model()
fields = ('name', 'email', 'exp', 'about', 'age',
'password', 'image', 'community',)
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def get_community(self, profile):
community_member = CommunityMember.objects.filter(user=profile)
communities = Community.objects.filter(communitymember__in=community_member)
serializer = CommunityProfileSerializer(instance=communities, many=True)
return serializer.data
def update(self, instance, validated_data):
""" update user """
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user | app/userauth/serializers.py | from django.contrib.auth import get_user_model, authenticate
from rest_framework import serializers
from core.models import CommunityMember, Community
class RegisterSerializer(serializers.ModelSerializer):
""" serializer for register class """
class Meta:
model = get_user_model()
fields = ('name', 'email', 'password',)
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data)
class LoginSerializer(serializers.Serializer):
""" serializer for user login """
email = serializers.EmailField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
""" validate and authenticate the user """
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = 'User not Found, Please Try Different User'
raise serializers.ValidationError(
{'message': msg},
code='authentication'
)
attrs['user'] = user
return attrs
class CommunityProfileSerializer(serializers.ModelSerializer):
""" community profile serializer """
class Meta:
model = Community
fields = ('id', 'name', 'image',)
read_only_field = ('id', 'name', 'image',)
class CommunityMemberSerializer(serializers.ModelSerializer):
""" community profile serializer """
community = CommunityProfileSerializer(read_only=True)
class Meta:
model = CommunityMember
fields = ('community',)
read_only_field = ('community',)
class ProfileSerializer(serializers.ModelSerializer):
""" User Profile Serializer """
community = serializers.SerializerMethodField()
class Meta:
model = get_user_model()
fields = ('name', 'email', 'exp', 'about', 'age',
'password', 'image', 'community',)
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def get_community(self, profile):
community_member = CommunityMember.objects.filter(user=profile)
communities = Community.objects.filter(communitymember__in=community_member)
serializer = CommunityProfileSerializer(instance=communities, many=True)
return serializer.data
def update(self, instance, validated_data):
""" update user """
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user | 0.695028 | 0.071656 |
# imports
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/tinkeringtech/rda5807m.git"
import time
# Registers definitions
FREQ_STEPS = 10
RADIO_REG_CHIPID = 0x00
RADIO_REG_CTRL = 0x02
RADIO_REG_CTRL_OUTPUT = 0x8000
RADIO_REG_CTRL_UNMUTE = 0x4000
RADIO_REG_CTRL_MONO = 0x2000
RADIO_REG_CTRL_BASS = 0x1000
RADIO_REG_CTRL_SEEKUP = 0x0200
RADIO_REG_CTRL_SEEK = 0x0100
RADIO_REG_CTRL_RDS = 0x0008
RADIO_REG_CTRL_NEW = 0x0004
RADIO_REG_CTRL_RESET = 0x0002
RADIO_REG_CTRL_ENABLE = 0x0001
RADIO_REG_CHAN = 0x03
RADIO_REG_CHAN_SPACE = 0x0003
RADIO_REG_CHAN_SPACE_100 = 0x0000
RADIO_REG_CHAN_BAND = 0x000C
RADIO_REG_CHAN_BAND_FM = 0x0000
RADIO_REG_CHAN_BAND_FMWORLD = 0x0008
RADIO_REG_CHAN_TUNE = 0x0010
RADIO_REG_CHAN_NR = 0x7FC0
RADIO_REG_R4 = 0x04
RADIO_REG_R4_EM50 = 0x0800
RADIO_REG_R4_SOFTMUTE = 0x0200
RADIO_REG_R4_AFC = 0x0100
RADIO_REG_VOL = 0x05
RADIO_REG_VOL_VOL = 0x000F
RADIO_REG_RA = 0x0A
RADIO_REG_RA_RDS = 0x8000
RADIO_REG_RA_RDSBLOCK = 0x0800
RADIO_REG_RA_STEREO = 0x0400
RADIO_REG_RA_NR = 0x03FF
RADIO_REG_RA_STC = 0x4000
RADIO_REG_RA_SF = 0x2000
RADIO_REG_RB = 0x0B
RADIO_REG_RB_FMTRUE = 0x0100
RADIO_REG_RB_FMREADY = 0x0080
# Radio class definition
class Radio:
"""
A class for communicating with the rda5807m chip
...
Attributes
----------
registers : list
virtual registers
address : int
chip's address
maxvolume : int
maximum volume
freqLow, freqHigh, freqSteps : int
min and max frequency for FM band, and frequency steps
board : busio.i2c object
used for i2c communication
frequency : int
current chip frequency
volume : int
current chip volume
bassBoost : boolean
toggle bass boost on the chip
mute : boolean
toggle mute/unmute
softMute : boolean
toggle soft mute (mute if signal strength too low)
mono : boolean
toggle stereo mode
rds : boolean
toggle rds
tuned : boolean
is chip tuned
band : string
selected band (FM or FMWORLD)
"""
# Initialize virtual registers
registers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# Chip constants
I2C_SEQ = 0x10
I2C_REG = 0x11
maxvolume = 15
# FMWORLD Band
freqLow = 8700
freqHigh = 10800
freqSteps = 10
# Set default frequency and volume
def __init__(self, i2c, frequency=10000, volume=1):
self.i2c = i2c
self.frequency = frequency
# Basic audio info
self.volume = volume
self.bassBoost = False
self.mute = False
self.softMute = False
# Radio features from the chip
self.mono = False
self.rds = False
self.tuned = False
# Band - Default FMWORLD
# 1. FM
# 2. FMWORLD
self.band = "FMWORLD"
# Functions saves register values to virtual registers, sets the basic frequency and volume
self.setup()
print("Got to point 1!")
self.tune() # Apply volume and frequency
def setup(self):
# Initialize registers
self.registers[RADIO_REG_CHIPID] = 0x58
self.registers[RADIO_REG_CTRL] = (RADIO_REG_CTRL_RESET | RADIO_REG_CTRL_ENABLE) | (
RADIO_REG_CTRL_UNMUTE | RADIO_REG_CTRL_OUTPUT)
# self.registers[RADIO_REG_R4] = RADIO_REG_R4_EM50
# Initialized to volume - 6 by default
self.registers[RADIO_REG_VOL] = 0x84D1
# Other registers are already set to zero
# Update registers
self._saveRegister(RADIO_REG_CTRL)
self._saveRegister(RADIO_REG_VOL)
self.registers[
RADIO_REG_CTRL] = RADIO_REG_CTRL_ENABLE | RADIO_REG_CTRL_NEW | RADIO_REG_CTRL_RDS | RADIO_REG_CTRL_UNMUTE | RADIO_REG_CTRL_OUTPUT
self._saveRegister(RADIO_REG_CTRL)
# Turn on bass boost and rds
self.setBassBoost(True)
self.rds = True
self.mute = False
def tune(self):
# Tunes radio to current frequency and volume
self.setFreq(self.frequency)
self.setVolume(self.volume)
self.tuned = True
def setFreq(self, freq):
# Sets frequency to freq
if freq < self.freqLow:
freq = self.freqLow
elif freq > self.freqHigh:
freq = self.freqHigh
self.frequency = freq
newChannel = (freq - self.freqLow) // 10
regChannel = RADIO_REG_CHAN_TUNE # Enable tuning
regChannel = regChannel | (newChannel << 6)
# Enable output, unmute
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | (
RADIO_REG_CTRL_OUTPUT | RADIO_REG_CTRL_UNMUTE | RADIO_REG_CTRL_RDS | RADIO_REG_CTRL_ENABLE)
self._saveRegister(RADIO_REG_CTRL)
# Save frequency to register
self.registers[RADIO_REG_CHAN] = regChannel
self._saveRegister(RADIO_REG_CHAN)
time.sleep(0.2)
# Adjust volume
self._saveRegister(RADIO_REG_VOL)
time.sleep(0.3)
# Get frequnecy
self.getFreq()
def getFreq(self):
# Read register RA
#self.writeBytes(bytes([RADIO_REG_RA]))
self.registers[RADIO_REG_RA] = self._read16()
ch = self.registers[RADIO_REG_RA] & RADIO_REG_RA_NR
self.frequency = self.freqLow + ch * 10
return self.frequency
def formatFreq(self):
# Formats the current frequency for better readabilitiy
freq = self.frequency
s = str(freq)
s = list(s)
last_two = s[-2:]
s[-2] = "."
s[-1] = last_two[0]
s.append(last_two[1])
return ("".join(s)) + " Mhz"
def setBand(self, band):
# Changes bands to FM or FMWORLD
self.band = band
if band == "FM":
r = RADIO_REG_CHAN_BAND_FM
else:
r = RADIO_REG_CHAN_BAND_FMWORLD
self.registers[RADIO_REG_CHAN] = (r | RADIO_REG_CHAN_SPACE_100)
self._saveRegister(RADIO_REG_CHAN)
def term(self):
# Terminates all receiver functions
self.setVolume(0)
self.registers[RADIO_REG_CTRL] = 0x0000
self._saveRegisters
def setBassBoost(self, switchOn):
# Switches bass boost to true or false
self.bassBoost = switchOn
regCtrl = self.registers[RADIO_REG_CTRL]
if switchOn:
regCtrl = regCtrl | RADIO_REG_CTRL_BASS
else:
regCtrl = regCtrl & (~RADIO_REG_CTRL_BASS)
self.registers[RADIO_REG_CTRL] = regCtrl
self._saveRegister(RADIO_REG_CTRL)
def setMono(self, switchOn):
# Switches mono to 0 or 1
self.mono = switchOn
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEK)
if switchOn:
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_MONO
else:
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_MONO)
self._saveRegister(RADIO_REG_CTRL)
def setMute(self, switchOn):
# Switches mute off or on
self.mute = switchOn
if (switchOn):
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_UNMUTE)
else:
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_UNMUTE
self._saveRegister(RADIO_REG_CTRL)
def setSoftMute(self, switchOn):
# Switches soft mute off or on
self.softMute = switchOn
if switchOn:
self.registers[RADIO_REG_R4] = self.registers[RADIO_REG_R4] | RADIO_REG_R4_SOFTMUTE
else:
self.registers[RADIO_REG_R4] = self.registers[RADIO_REG_R4] & (~RADIO_REG_R4_SOFTMUTE)
self._saveRegister(RADIO_REG_R4)
def softReset(self):
# Soft reset chip
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_RESET
self._saveRegister(RADIO_REG_CTRL)
time.sleep(2)
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_RESET)
self._saveRegister(RADIO_REG_CTRL)
def seekUp(self):
# Start seek mode upwards
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_SEEKUP
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_SEEK
self._saveRegister(RADIO_REG_CTRL)
# Wait until scan is over
time.sleep(1)
self.getFreq()
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEK)
self._saveRegister(RADIO_REG_CTRL)
def seekDown(self):
# Start seek mode downwards
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEKUP)
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_SEEK
self._saveRegister(RADIO_REG_CTRL)
# Wait until scan is over
time.sleep(1)
self.getFreq()
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEK)
self._saveRegister(RADIO_REG_CTRL)
def setVolume(self, volume):
# Sets the volume
if (volume > self.maxvolume):
volume = self.maxvolume
self.volume = volume
self.registers[RADIO_REG_VOL] = self.registers[RADIO_REG_VOL] & (~RADIO_REG_VOL_VOL)
self.registers[RADIO_REG_VOL] = self.registers[RADIO_REG_VOL] | volume
self._saveRegister(RADIO_REG_VOL)
def getRssi(self):
# Get the current signal strength
#self.writeBytes(bytes([RADIO_REG_RB]))
self.registers[RADIO_REG_RB]=self._readRegisters(0xb)
self.rssi = self.registers[RADIO_REG_RB] >> 10
return self.rssi
def getRadioInfo(self):
# Reads info from chip and saves it into virtual memory
self._readRegisters()
if self.registers[RADIO_REG_RA] & RADIO_REG_RA_RDS:
self.rds = True
self.rssi = self.registers[RADIO_REG_RB] >> 10
if self.registers[RADIO_REG_RB] & RADIO_REG_RB_FMTRUE:
self.tuned = True
if self.registers[RADIO_REG_CTRL] & RADIO_REG_CTRL_MONO:
self.mono = True
def _saveRegister(self, regN):
# Write register from memory to receiver
regVal=bytearray(2)
regVal1 = self.registers[regN] # 16 bit value in list
regVal[0] = regVal1 >> 8
regVal[1] = regVal1 & 255
#write to i2c address with particular register and value
self.i2c.writeto_mem(self.I2C_REG, regN,regVal)
def _saveRegisters(self):
#save data into register 2 to 7
for i in range(2, 7):
self._saveRegister(i)
def _read16(self):
# Reads two bytes, returns as one 16 bit integer
result = bytearray(4)
self.i2c.readfrom_into(self.I2C_SEQ, result)
return result[0] * 256 + result[1]
def _readRegisters(self,reg):
#redfrom_mem_into reg,memadd,buffer
result = bytearray(2)
self.i2c.readfrom_mem_into(self.I2C_REG , reg,result)
return result[0] * 256 + result[1] | rda5807m.py | # imports
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/tinkeringtech/rda5807m.git"
import time
# Registers definitions
FREQ_STEPS = 10
RADIO_REG_CHIPID = 0x00
RADIO_REG_CTRL = 0x02
RADIO_REG_CTRL_OUTPUT = 0x8000
RADIO_REG_CTRL_UNMUTE = 0x4000
RADIO_REG_CTRL_MONO = 0x2000
RADIO_REG_CTRL_BASS = 0x1000
RADIO_REG_CTRL_SEEKUP = 0x0200
RADIO_REG_CTRL_SEEK = 0x0100
RADIO_REG_CTRL_RDS = 0x0008
RADIO_REG_CTRL_NEW = 0x0004
RADIO_REG_CTRL_RESET = 0x0002
RADIO_REG_CTRL_ENABLE = 0x0001
RADIO_REG_CHAN = 0x03
RADIO_REG_CHAN_SPACE = 0x0003
RADIO_REG_CHAN_SPACE_100 = 0x0000
RADIO_REG_CHAN_BAND = 0x000C
RADIO_REG_CHAN_BAND_FM = 0x0000
RADIO_REG_CHAN_BAND_FMWORLD = 0x0008
RADIO_REG_CHAN_TUNE = 0x0010
RADIO_REG_CHAN_NR = 0x7FC0
RADIO_REG_R4 = 0x04
RADIO_REG_R4_EM50 = 0x0800
RADIO_REG_R4_SOFTMUTE = 0x0200
RADIO_REG_R4_AFC = 0x0100
RADIO_REG_VOL = 0x05
RADIO_REG_VOL_VOL = 0x000F
RADIO_REG_RA = 0x0A
RADIO_REG_RA_RDS = 0x8000
RADIO_REG_RA_RDSBLOCK = 0x0800
RADIO_REG_RA_STEREO = 0x0400
RADIO_REG_RA_NR = 0x03FF
RADIO_REG_RA_STC = 0x4000
RADIO_REG_RA_SF = 0x2000
RADIO_REG_RB = 0x0B
RADIO_REG_RB_FMTRUE = 0x0100
RADIO_REG_RB_FMREADY = 0x0080
# Radio class definition
class Radio:
"""
A class for communicating with the rda5807m chip
...
Attributes
----------
registers : list
virtual registers
address : int
chip's address
maxvolume : int
maximum volume
freqLow, freqHigh, freqSteps : int
min and max frequency for FM band, and frequency steps
board : busio.i2c object
used for i2c communication
frequency : int
current chip frequency
volume : int
current chip volume
bassBoost : boolean
toggle bass boost on the chip
mute : boolean
toggle mute/unmute
softMute : boolean
toggle soft mute (mute if signal strength too low)
mono : boolean
toggle stereo mode
rds : boolean
toggle rds
tuned : boolean
is chip tuned
band : string
selected band (FM or FMWORLD)
"""
# Initialize virtual registers
registers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# Chip constants
I2C_SEQ = 0x10
I2C_REG = 0x11
maxvolume = 15
# FMWORLD Band
freqLow = 8700
freqHigh = 10800
freqSteps = 10
# Set default frequency and volume
def __init__(self, i2c, frequency=10000, volume=1):
self.i2c = i2c
self.frequency = frequency
# Basic audio info
self.volume = volume
self.bassBoost = False
self.mute = False
self.softMute = False
# Radio features from the chip
self.mono = False
self.rds = False
self.tuned = False
# Band - Default FMWORLD
# 1. FM
# 2. FMWORLD
self.band = "FMWORLD"
# Functions saves register values to virtual registers, sets the basic frequency and volume
self.setup()
print("Got to point 1!")
self.tune() # Apply volume and frequency
def setup(self):
# Initialize registers
self.registers[RADIO_REG_CHIPID] = 0x58
self.registers[RADIO_REG_CTRL] = (RADIO_REG_CTRL_RESET | RADIO_REG_CTRL_ENABLE) | (
RADIO_REG_CTRL_UNMUTE | RADIO_REG_CTRL_OUTPUT)
# self.registers[RADIO_REG_R4] = RADIO_REG_R4_EM50
# Initialized to volume - 6 by default
self.registers[RADIO_REG_VOL] = 0x84D1
# Other registers are already set to zero
# Update registers
self._saveRegister(RADIO_REG_CTRL)
self._saveRegister(RADIO_REG_VOL)
self.registers[
RADIO_REG_CTRL] = RADIO_REG_CTRL_ENABLE | RADIO_REG_CTRL_NEW | RADIO_REG_CTRL_RDS | RADIO_REG_CTRL_UNMUTE | RADIO_REG_CTRL_OUTPUT
self._saveRegister(RADIO_REG_CTRL)
# Turn on bass boost and rds
self.setBassBoost(True)
self.rds = True
self.mute = False
def tune(self):
# Tunes radio to current frequency and volume
self.setFreq(self.frequency)
self.setVolume(self.volume)
self.tuned = True
def setFreq(self, freq):
# Sets frequency to freq
if freq < self.freqLow:
freq = self.freqLow
elif freq > self.freqHigh:
freq = self.freqHigh
self.frequency = freq
newChannel = (freq - self.freqLow) // 10
regChannel = RADIO_REG_CHAN_TUNE # Enable tuning
regChannel = regChannel | (newChannel << 6)
# Enable output, unmute
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | (
RADIO_REG_CTRL_OUTPUT | RADIO_REG_CTRL_UNMUTE | RADIO_REG_CTRL_RDS | RADIO_REG_CTRL_ENABLE)
self._saveRegister(RADIO_REG_CTRL)
# Save frequency to register
self.registers[RADIO_REG_CHAN] = regChannel
self._saveRegister(RADIO_REG_CHAN)
time.sleep(0.2)
# Adjust volume
self._saveRegister(RADIO_REG_VOL)
time.sleep(0.3)
# Get frequnecy
self.getFreq()
def getFreq(self):
# Read register RA
#self.writeBytes(bytes([RADIO_REG_RA]))
self.registers[RADIO_REG_RA] = self._read16()
ch = self.registers[RADIO_REG_RA] & RADIO_REG_RA_NR
self.frequency = self.freqLow + ch * 10
return self.frequency
def formatFreq(self):
# Formats the current frequency for better readabilitiy
freq = self.frequency
s = str(freq)
s = list(s)
last_two = s[-2:]
s[-2] = "."
s[-1] = last_two[0]
s.append(last_two[1])
return ("".join(s)) + " Mhz"
def setBand(self, band):
# Changes bands to FM or FMWORLD
self.band = band
if band == "FM":
r = RADIO_REG_CHAN_BAND_FM
else:
r = RADIO_REG_CHAN_BAND_FMWORLD
self.registers[RADIO_REG_CHAN] = (r | RADIO_REG_CHAN_SPACE_100)
self._saveRegister(RADIO_REG_CHAN)
def term(self):
# Terminates all receiver functions
self.setVolume(0)
self.registers[RADIO_REG_CTRL] = 0x0000
self._saveRegisters
def setBassBoost(self, switchOn):
# Switches bass boost to true or false
self.bassBoost = switchOn
regCtrl = self.registers[RADIO_REG_CTRL]
if switchOn:
regCtrl = regCtrl | RADIO_REG_CTRL_BASS
else:
regCtrl = regCtrl & (~RADIO_REG_CTRL_BASS)
self.registers[RADIO_REG_CTRL] = regCtrl
self._saveRegister(RADIO_REG_CTRL)
def setMono(self, switchOn):
# Switches mono to 0 or 1
self.mono = switchOn
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEK)
if switchOn:
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_MONO
else:
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_MONO)
self._saveRegister(RADIO_REG_CTRL)
def setMute(self, switchOn):
# Switches mute off or on
self.mute = switchOn
if (switchOn):
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_UNMUTE)
else:
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_UNMUTE
self._saveRegister(RADIO_REG_CTRL)
def setSoftMute(self, switchOn):
# Switches soft mute off or on
self.softMute = switchOn
if switchOn:
self.registers[RADIO_REG_R4] = self.registers[RADIO_REG_R4] | RADIO_REG_R4_SOFTMUTE
else:
self.registers[RADIO_REG_R4] = self.registers[RADIO_REG_R4] & (~RADIO_REG_R4_SOFTMUTE)
self._saveRegister(RADIO_REG_R4)
def softReset(self):
# Soft reset chip
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_RESET
self._saveRegister(RADIO_REG_CTRL)
time.sleep(2)
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_RESET)
self._saveRegister(RADIO_REG_CTRL)
def seekUp(self):
# Start seek mode upwards
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_SEEKUP
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_SEEK
self._saveRegister(RADIO_REG_CTRL)
# Wait until scan is over
time.sleep(1)
self.getFreq()
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEK)
self._saveRegister(RADIO_REG_CTRL)
def seekDown(self):
# Start seek mode downwards
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEKUP)
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] | RADIO_REG_CTRL_SEEK
self._saveRegister(RADIO_REG_CTRL)
# Wait until scan is over
time.sleep(1)
self.getFreq()
self.registers[RADIO_REG_CTRL] = self.registers[RADIO_REG_CTRL] & (~RADIO_REG_CTRL_SEEK)
self._saveRegister(RADIO_REG_CTRL)
def setVolume(self, volume):
# Sets the volume
if (volume > self.maxvolume):
volume = self.maxvolume
self.volume = volume
self.registers[RADIO_REG_VOL] = self.registers[RADIO_REG_VOL] & (~RADIO_REG_VOL_VOL)
self.registers[RADIO_REG_VOL] = self.registers[RADIO_REG_VOL] | volume
self._saveRegister(RADIO_REG_VOL)
def getRssi(self):
# Get the current signal strength
#self.writeBytes(bytes([RADIO_REG_RB]))
self.registers[RADIO_REG_RB]=self._readRegisters(0xb)
self.rssi = self.registers[RADIO_REG_RB] >> 10
return self.rssi
def getRadioInfo(self):
# Reads info from chip and saves it into virtual memory
self._readRegisters()
if self.registers[RADIO_REG_RA] & RADIO_REG_RA_RDS:
self.rds = True
self.rssi = self.registers[RADIO_REG_RB] >> 10
if self.registers[RADIO_REG_RB] & RADIO_REG_RB_FMTRUE:
self.tuned = True
if self.registers[RADIO_REG_CTRL] & RADIO_REG_CTRL_MONO:
self.mono = True
def _saveRegister(self, regN):
# Write register from memory to receiver
regVal=bytearray(2)
regVal1 = self.registers[regN] # 16 bit value in list
regVal[0] = regVal1 >> 8
regVal[1] = regVal1 & 255
#write to i2c address with particular register and value
self.i2c.writeto_mem(self.I2C_REG, regN,regVal)
def _saveRegisters(self):
#save data into register 2 to 7
for i in range(2, 7):
self._saveRegister(i)
def _read16(self):
# Reads two bytes, returns as one 16 bit integer
result = bytearray(4)
self.i2c.readfrom_into(self.I2C_SEQ, result)
return result[0] * 256 + result[1]
def _readRegisters(self,reg):
#redfrom_mem_into reg,memadd,buffer
result = bytearray(2)
self.i2c.readfrom_mem_into(self.I2C_REG , reg,result)
return result[0] * 256 + result[1] | 0.551815 | 0.38798 |
import numpy as np
import tensorflow as tf
from keras import layers, models
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
from scipy import ndimage
from vit_keras import vit
from ...backbone.utils import patch_config
from ...common import ConvBnRelu
@register_keras_serializable(package='SegMe>TriTrans')
class VisionTransformer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4)
@shape_type_conversion
def build(self, input_shape):
width, height, channels = input_shape[1:]
if width is None or height is None or channels is None:
raise ValueError('Width, height and channel dimensions of the inputs should be defined. Found `None`.')
if width != height:
raise ValueError('Only square images supported. Provided: {}.'.format(input_shape))
self.input_spec = layers.InputSpec(ndim=4, axes={1: width, 2: height, 3: channels})
base_model = vit.vit_b16(
image_size=(224, 224), pretrained=True, include_top=False, pretrained_top=False, weights='imagenet21k')
outputs = base_model.get_layer(name='Transformer/encoder_norm').output
outputs = layers.Lambda(lambda v: v[:, 1:, ...], name='ExtractFeatures')(outputs)
base_model = models.Model(inputs=base_model.inputs, outputs=outputs)
base_config = base_model.get_config()
vit_config = patch_config(base_config, [0], 'batch_input_shape', (None, width, height, channels))
vit_config = patch_config(vit_config, ['embedding'], 'kernel_size', (1, 1))
vit_config = patch_config(vit_config, ['embedding'], 'strides', (1, 1))
vit_config = patch_config(vit_config, [2], 'target_shape', lambda old: (width * height,) + old[-1:])
vit_model = models.Model.from_config(vit_config)
def _ext_weight(wb, wv):
if wb.shape == wv.shape:
return wb
if (16, 16, 3) == wb.shape[:3] and (1, 1) == wv.shape[:2]:
# embedding
# will be trained from scratch
return wv
if 3 == len(wb.shape) and wb.shape[0] == wv.shape[0] == 1 and wb.shape[2] == wv.shape[2]:
# posembed_input
token, grid = wb[0, :1], wb[0, 1:]
sin = int(np.sqrt(grid.shape[0]))
zoom = (width / sin, height / sin, 1)
grid = grid.reshape(sin, sin, -1)
grid = ndimage.zoom(grid, zoom, order=1).reshape(width * height, -1)
combo = np.concatenate([token, grid], axis=0)[None]
assert combo.shape == wv.shape
return combo
return wb # will raise error if something changes
base_weights = base_model.get_weights()
vit_weights = [_ext_weight(wb, wv) for wb, wv in zip(base_weights, vit_model.get_weights())]
vit_model.set_weights(vit_weights)
self.vit = vit_model
self.decoder = DecoderCup(channels)
super().build(input_shape)
def call(self, inputs, **kwargs):
outputs = self.vit(inputs)
outputs = self.decoder(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
output_shape = self.vit.compute_output_shape(input_shape)
output_shape = self.decoder.compute_output_shape(output_shape)
return output_shape
@register_keras_serializable(package='SegMe>TriTrans')
class DecoderCup(layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=3)
self.filters = filters
@shape_type_conversion
def build(self, input_shape):
width_height, channels = input_shape[1:]
if width_height is None or channels is None:
raise ValueError('Width/height and channel dimensions of the inputs should be defined. Found `None`.')
if width_height != int(width_height ** 0.5) ** 2:
raise ValueError('Provided input can\'t be reshaped to square image.')
self.input_spec = layers.InputSpec(ndim=3, axes={1: width_height, 2: channels})
self.width_height = int(width_height ** 0.5)
self.channels = channels
self.conv = ConvBnRelu(self.filters, kernel_size=3)
super().build(input_shape)
def call(self, inputs, **kwargs):
outputs = tf.reshape(inputs, [-1, self.width_height, self.width_height, self.channels])
outputs = self.conv(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
width_height = int(input_shape[1] ** 0.5)
return input_shape[:1] + (width_height, width_height, self.filters)
def get_config(self):
config = super().get_config()
config.update({'filters': self.filters})
return config | segme/model/tri_trans/transformer.py | import numpy as np
import tensorflow as tf
from keras import layers, models
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
from scipy import ndimage
from vit_keras import vit
from ...backbone.utils import patch_config
from ...common import ConvBnRelu
@register_keras_serializable(package='SegMe>TriTrans')
class VisionTransformer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4)
@shape_type_conversion
def build(self, input_shape):
width, height, channels = input_shape[1:]
if width is None or height is None or channels is None:
raise ValueError('Width, height and channel dimensions of the inputs should be defined. Found `None`.')
if width != height:
raise ValueError('Only square images supported. Provided: {}.'.format(input_shape))
self.input_spec = layers.InputSpec(ndim=4, axes={1: width, 2: height, 3: channels})
base_model = vit.vit_b16(
image_size=(224, 224), pretrained=True, include_top=False, pretrained_top=False, weights='imagenet21k')
outputs = base_model.get_layer(name='Transformer/encoder_norm').output
outputs = layers.Lambda(lambda v: v[:, 1:, ...], name='ExtractFeatures')(outputs)
base_model = models.Model(inputs=base_model.inputs, outputs=outputs)
base_config = base_model.get_config()
vit_config = patch_config(base_config, [0], 'batch_input_shape', (None, width, height, channels))
vit_config = patch_config(vit_config, ['embedding'], 'kernel_size', (1, 1))
vit_config = patch_config(vit_config, ['embedding'], 'strides', (1, 1))
vit_config = patch_config(vit_config, [2], 'target_shape', lambda old: (width * height,) + old[-1:])
vit_model = models.Model.from_config(vit_config)
def _ext_weight(wb, wv):
if wb.shape == wv.shape:
return wb
if (16, 16, 3) == wb.shape[:3] and (1, 1) == wv.shape[:2]:
# embedding
# will be trained from scratch
return wv
if 3 == len(wb.shape) and wb.shape[0] == wv.shape[0] == 1 and wb.shape[2] == wv.shape[2]:
# posembed_input
token, grid = wb[0, :1], wb[0, 1:]
sin = int(np.sqrt(grid.shape[0]))
zoom = (width / sin, height / sin, 1)
grid = grid.reshape(sin, sin, -1)
grid = ndimage.zoom(grid, zoom, order=1).reshape(width * height, -1)
combo = np.concatenate([token, grid], axis=0)[None]
assert combo.shape == wv.shape
return combo
return wb # will raise error if something changes
base_weights = base_model.get_weights()
vit_weights = [_ext_weight(wb, wv) for wb, wv in zip(base_weights, vit_model.get_weights())]
vit_model.set_weights(vit_weights)
self.vit = vit_model
self.decoder = DecoderCup(channels)
super().build(input_shape)
def call(self, inputs, **kwargs):
outputs = self.vit(inputs)
outputs = self.decoder(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
output_shape = self.vit.compute_output_shape(input_shape)
output_shape = self.decoder.compute_output_shape(output_shape)
return output_shape
@register_keras_serializable(package='SegMe>TriTrans')
class DecoderCup(layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=3)
self.filters = filters
@shape_type_conversion
def build(self, input_shape):
width_height, channels = input_shape[1:]
if width_height is None or channels is None:
raise ValueError('Width/height and channel dimensions of the inputs should be defined. Found `None`.')
if width_height != int(width_height ** 0.5) ** 2:
raise ValueError('Provided input can\'t be reshaped to square image.')
self.input_spec = layers.InputSpec(ndim=3, axes={1: width_height, 2: channels})
self.width_height = int(width_height ** 0.5)
self.channels = channels
self.conv = ConvBnRelu(self.filters, kernel_size=3)
super().build(input_shape)
def call(self, inputs, **kwargs):
outputs = tf.reshape(inputs, [-1, self.width_height, self.width_height, self.channels])
outputs = self.conv(outputs)
return outputs
@shape_type_conversion
def compute_output_shape(self, input_shape):
width_height = int(input_shape[1] ** 0.5)
return input_shape[:1] + (width_height, width_height, self.filters)
def get_config(self):
config = super().get_config()
config.update({'filters': self.filters})
return config | 0.926204 | 0.363816 |
from cores import cor, limpa
from espacos import tio, tracos
def calculoImposto(taxas):
#informações das ações
acao = input('Digite o código da ação: ')
valorAcao = float(input('Digite o valor da operação: '))
tipo = input('C/V? ').upper()
print()
#Cálculo do imposto
porcentagemL = taxas[0] / total
porcentagemE = taxas[1] / total
porcentagemC = taxas[2] / total
porcentagemIS = taxas[3] / total
porcentagemB = taxas[4] / total
porcentagemIR = taxas[5] / total
taxaL = valorAcao * porcentagemL
taxaE = valorAcao * porcentagemE
taxaC = valorAcao * porcentagemC
taxaIS = valorAcao * porcentagemIS
taxaB = valorAcao * porcentagemB
taxaIR = valorAcao * porcentagemIR
subTotal = taxaL + taxaE + taxaC + taxaIS + taxaB + taxaIR
if tipo == 'C': subTotal -= taxaIR
#Impressão dos impostos
print(f'{cor(1)}Subtotal das taxas = {subTotal:.2f}{cor(1,37)}')
return subTotal, acao
def recebeTaxas():
print('-'*50)
taxas = []
taxas.append(float(input(f'{cor(1,37)}Taxa de liquidação: ')))
taxas.append(float(input('Emolumentos: ')))
taxas.append(float(input('Taxa de corretagem: ')))
taxas.append(float(input('Taxa de ISS: ')))
taxas.append(float(input('Taxa Bovespa: ')))
taxas.append(float(input('Taxa de IRRF: ')))
print('-'*50)
return taxas
print(cor(1))
tracos('IMPOSTO DE MOVIMENTO DE RENDA VARIÁVEL', 50)
total = float(input('Digite o valor total da operação do dia: '))
taxas = recebeTaxas()
subtotais, acoes = [], []
while True:
valorTaxa, acao = calculoImposto(taxas)
subtotais.append(valorTaxa)
acoes.append(acao)
print('-'*40)
resposta = input('Deseja calcular mais alguma ação? [S/N]: ').upper()
if resposta == 'N':
limpa()
print()
break
if resposta not in 'SN': resposta = input('Digite uma resposta válida[S/N]: ')
print(f'{cor(1)}{"-"*15}RESULTADO{"-"*15}')
print()
print(f'Os valores dos impostos são:')
for x in range(len(subtotais)):
print(f'{x + 1}° - {acoes[x]}: {subtotais[x]:.2f}') | Pacote download/ImpostoAcoes.py | from cores import cor, limpa
from espacos import tio, tracos
def calculoImposto(taxas):
#informações das ações
acao = input('Digite o código da ação: ')
valorAcao = float(input('Digite o valor da operação: '))
tipo = input('C/V? ').upper()
print()
#Cálculo do imposto
porcentagemL = taxas[0] / total
porcentagemE = taxas[1] / total
porcentagemC = taxas[2] / total
porcentagemIS = taxas[3] / total
porcentagemB = taxas[4] / total
porcentagemIR = taxas[5] / total
taxaL = valorAcao * porcentagemL
taxaE = valorAcao * porcentagemE
taxaC = valorAcao * porcentagemC
taxaIS = valorAcao * porcentagemIS
taxaB = valorAcao * porcentagemB
taxaIR = valorAcao * porcentagemIR
subTotal = taxaL + taxaE + taxaC + taxaIS + taxaB + taxaIR
if tipo == 'C': subTotal -= taxaIR
#Impressão dos impostos
print(f'{cor(1)}Subtotal das taxas = {subTotal:.2f}{cor(1,37)}')
return subTotal, acao
def recebeTaxas():
print('-'*50)
taxas = []
taxas.append(float(input(f'{cor(1,37)}Taxa de liquidação: ')))
taxas.append(float(input('Emolumentos: ')))
taxas.append(float(input('Taxa de corretagem: ')))
taxas.append(float(input('Taxa de ISS: ')))
taxas.append(float(input('Taxa Bovespa: ')))
taxas.append(float(input('Taxa de IRRF: ')))
print('-'*50)
return taxas
print(cor(1))
tracos('IMPOSTO DE MOVIMENTO DE RENDA VARIÁVEL', 50)
total = float(input('Digite o valor total da operação do dia: '))
taxas = recebeTaxas()
subtotais, acoes = [], []
while True:
valorTaxa, acao = calculoImposto(taxas)
subtotais.append(valorTaxa)
acoes.append(acao)
print('-'*40)
resposta = input('Deseja calcular mais alguma ação? [S/N]: ').upper()
if resposta == 'N':
limpa()
print()
break
if resposta not in 'SN': resposta = input('Digite uma resposta válida[S/N]: ')
print(f'{cor(1)}{"-"*15}RESULTADO{"-"*15}')
print()
print(f'Os valores dos impostos são:')
for x in range(len(subtotais)):
print(f'{x + 1}° - {acoes[x]}: {subtotais[x]:.2f}') | 0.262369 | 0.436862 |
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from persine.bridges.youtube import YoutubeBridge
@pytest.fixture
def driver():
options = Options()
options.add_argument("--headless")
options.add_argument("--mute-audio")
# options.add_extension("ublock-origin.crx")
options.add_argument("--autoplay-policy=no-user-gesture-required")
return webdriver.Chrome(options=options)
def test_player_data(driver):
bridge = YoutubeBridge(driver)
driver.get("https://www.youtube.com/watch?v=1kIQT7uUiME")
res = bridge._YoutubeBridge__get_player_data()
comps = {
"title": "Land of Talk - Some Are Lakes [Official Music Video]",
"video_id": "1kIQT7uUiME",
"author": "Saddle Creek",
}
for key, value in comps.items():
assert comps[key] == res[key]
def test_video_data(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/watch?v=1kIQT7uUiME")
res = bridge._YoutubeBridge__get_video_data()
comps = {
"page_type": "video",
"title": "Land of Talk - Some Are Lakes [Official Music Video]",
"id": "1kIQT7uUiME",
"channel_name": "Saddle Creek",
"channel_url": "https://www.youtube.com/channel/UCW7MRMCxD5dbOU7TQaCAMLQ", # noqa: E501
}
for key, value in comps.items():
assert comps[key] == res[key]
assert len(res["recommendations"]) > 0
def test_recommendation_scraper(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/watch?v=1kIQT7uUiME")
recs = bridge._YoutubeBridge__scrape_sidebar()
assert len(recs) > 5
for rec in recs:
assert rec["item_type"] is not None
assert rec["title"] is not None
assert rec["url"] is not None
def test_likes_v_dislikes(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/watch?v=1kIQT7uUiME")
data = bridge._YoutubeBridge__get_player_page_data()
assert data['dislike_count'] != data['like_count']
def test_homepage_scraper(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/")
recs = bridge._YoutubeBridge__scrape_homepage()
assert len(recs) > 5
for rec in recs:
assert rec["item_type"] is not None
assert rec["title"] is not None
assert rec["url"] is not None
def test_search_results(driver):
bridge = YoutubeBridge(driver)
bridge.run("youtube:search?lofi")
recs = bridge._YoutubeBridge__scrape_search_results()
assert len(recs) > 5
for rec in recs:
assert rec["item_type"] is not None
assert rec["title"] is not None
assert rec["url"] is not None | tests/bridges/test_yt_bridge.py | import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from persine.bridges.youtube import YoutubeBridge
@pytest.fixture
def driver():
options = Options()
options.add_argument("--headless")
options.add_argument("--mute-audio")
# options.add_extension("ublock-origin.crx")
options.add_argument("--autoplay-policy=no-user-gesture-required")
return webdriver.Chrome(options=options)
def test_player_data(driver):
bridge = YoutubeBridge(driver)
driver.get("https://www.youtube.com/watch?v=1kIQT7uUiME")
res = bridge._YoutubeBridge__get_player_data()
comps = {
"title": "Land of Talk - Some Are Lakes [Official Music Video]",
"video_id": "1kIQT7uUiME",
"author": "Saddle Creek",
}
for key, value in comps.items():
assert comps[key] == res[key]
def test_video_data(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/watch?v=1kIQT7uUiME")
res = bridge._YoutubeBridge__get_video_data()
comps = {
"page_type": "video",
"title": "Land of Talk - Some Are Lakes [Official Music Video]",
"id": "1kIQT7uUiME",
"channel_name": "Saddle Creek",
"channel_url": "https://www.youtube.com/channel/UCW7MRMCxD5dbOU7TQaCAMLQ", # noqa: E501
}
for key, value in comps.items():
assert comps[key] == res[key]
assert len(res["recommendations"]) > 0
def test_recommendation_scraper(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/watch?v=1kIQT7uUiME")
recs = bridge._YoutubeBridge__scrape_sidebar()
assert len(recs) > 5
for rec in recs:
assert rec["item_type"] is not None
assert rec["title"] is not None
assert rec["url"] is not None
def test_likes_v_dislikes(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/watch?v=1kIQT7uUiME")
data = bridge._YoutubeBridge__get_player_page_data()
assert data['dislike_count'] != data['like_count']
def test_homepage_scraper(driver):
bridge = YoutubeBridge(driver)
bridge.run("https://www.youtube.com/")
recs = bridge._YoutubeBridge__scrape_homepage()
assert len(recs) > 5
for rec in recs:
assert rec["item_type"] is not None
assert rec["title"] is not None
assert rec["url"] is not None
def test_search_results(driver):
bridge = YoutubeBridge(driver)
bridge.run("youtube:search?lofi")
recs = bridge._YoutubeBridge__scrape_search_results()
assert len(recs) > 5
for rec in recs:
assert rec["item_type"] is not None
assert rec["title"] is not None
assert rec["url"] is not None | 0.519521 | 0.325815 |
__author__ = "Alex 'CubOfJudahsLion' Feterman"
__url__ = ("blender", "http://www.blender.org", "Author's homepage, http://geocities.com/cubofjudahslion")
__version__ = "0.1.2"
__bpydoc__ = """\
xmesh_import.py | Python Script for Blender3D | imports a VegaStrike .xmesh
Copyright (C)2005 Alex 'CubOfJudahsLion' Feterman
<p>This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
<p>This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
<p>You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
<p>Usage:<br>
Execute this script from the "File->Import" menu and choose a
Xmesh file to open.
<p>Notes:<br>
Generates UV mappings, but for the texture to be activated, go to
the texture buttons and
"""
import Blender
from Blender import Image, Texture, Material, Object, NMesh, Types, sys
import xml.sax
import meshtools
import os.path
from string import lower
locationDir = [] # registers the search of the results for images
def SetLocationDir(fileName, aDir, fileList):
"""
adds finding of fileName in aDir to a global variable in a list
"""
global locationDir
fullPath = os.path.join(aDir, fileName)
if os.path.isfile(fullPath):
locationDir.append(fullPath)
def FindTexture(path, fileName):
"""
finds the texture from path and its sub-paths
"""
sourcePath = os.path.join(path, fileName)
if os.path.isfile(sourcePath):
# check if the file is actually there and report if so
return sourcePath
else:
# otherwise check the directory hierarchy for the VS textures folder
global locationDir
searchBaseDir = os.path.normpath(os.path.join(path, '..', '..', 'textures'))
os.path.walk(searchBaseDir, SetLocationDir, fileName)
if len(locationDir) > 0:
return locationDir[0]
else:
return None
class XMeshHandler(xml.sax.handler.ContentHandler):
"""
Created to handle XML contexts in XMESH objects
"""
locationDir = None
def __init__(self, filename):
# find where the directory path ends
# (both un*x and win accounted for)
self.path, simpleFile = os.path.split(sys.expandpath(filename))
self.objName = os.path.splitext(simpleFile)[0]
# material values (to be checked later)
self.faces = []
self.verts = []
self.uvs = []
self.faceuvs = []
self.alpha =\
self.rgbCol =\
self.amb =\
self.emit =\
self.colorTexture =\
self.specTexture =\
self.spec =\
self.specCol = None
# finally, start chronometer
sys.time()
def startDocument(self):
"""
Callback. Invoked when the parsing starts. Used to
display notification of process initiation.
"""
print "Loading file..."
Blender.Window.DrawProgressBar(0.0, "Loading file...")
def endDocument(self):
"""
Invoked when mesh processing is done. Used for realizing
the mesh from collected vertex/faces and texturizing info.
"""
# report
print "Finished loading file, constructing mesh..."
Blender.Window.DrawProgressBar(0.9, "Building mesh...")
# build object
meshtools.create_mesh(self.verts, self.faces, self.objName, self.faceuvs, self.uvs)
print "Done, object built"
# load corresponding images and set texture
Blender.Window.DrawProgressBar(0.95, "Loading/Applying Texture...")
colorTex, specTex = None, None
# convert images into textures
if self.colorTexture:
colTexFName = FindTexture(self.path, self.colorTexture)
if colTexFName != None:
colorImg = Image.Load(colTexFName)
colorTex = Texture.New(self.objName + ".col.tx")
colorTex.type = Texture.Types.IMAGE
colorTex.image = colorImg
if self.specTexture:
specTexFName = FindTexture(self.path, self.specTexture)
if specTexFName != None:
specImg = Image.Load(specTexFName)
specTex = Texture.New(self.objName + ".spe.tx")
specTex.type = Texture.Types.IMAGE
specTex.image = specImg
# make material with them and all other previously collected data
mat = Material.New(self.objName + ".mat")
mat.mode |= Material.Modes.TEXFACE | Material.Modes.SHADOW | Material.Modes.TRACEABLE | Material.Modes.ZTRANSP
mat.specTransp = 1.0
if self.alpha : mat.alpha = self.alpha
if self.rgbCol : mat.rgbCol = self.rgbCol
if self.amb : mat.amb = self.amb
if self.emit : mat.emit = self.emit
if self.spec : mat.spec = self.spec
if self.specCol : mat.specCol = self.specCol
if colorTex:
mat.setTexture(0, colorTex, Texture.TexCo.UV, Texture.MapTo.COL)
if specTex:
mat.setTexture(1, specTex, Texture.TexCo.UV, Texture.MapTo.SPEC)
# apply to mesh
obj = Object.Get(self.objName)
mesh = obj.data
# mesh.mode = NMesh.Modes.NOVNORMALSFLIP
# uncomment the following if you want models automatically sub-surfaced
"""for currFace in mesh.faces:
currFace.smooth = 1
mesh.setSubDivLevels([1,2])
mesh.setMode("SubSurf", "TwoSided")"""
mesh.setMode("TwoSided")
mesh.addMaterial(mat)
mesh.update(1)
# Done, notify user
Blender.Window.DrawProgressBar(1.0, "Done.")
def startElement(self, pname, attrMixed):
"""
Receives pre-parsed data for every geometry/texture
datum in the mesh.
Like blender, wings3d and vegastrike are also opengl apps.
the internal format described by the xml tags is similar to
that of blender. see the xmesh format description and the opengl
red/blue books for structure and mapping details.
"""
# we transalte everything to lowercase
name = lower(pname)
attr = {}
for ik, iv in attrMixed.items():
attr[lower(ik)] = iv
# pre-parse attributes if available
if name == "mesh":
if "texture" in attr:
self.colorTexture = attr["texture"]
print "* color tex:", self.colorTexture
if "texture1" in attr:
self.specTexture = attr["texture1"]
print "* spec tex:", self.specTexture
elif name == "points":
print "Reading vertex coordinates..."
Blender.Window.DrawProgressBar(0.1, "Reading vertexes...")
elif name == "location":
self.verts.append( (float(attr["x"]), float(attr["y"]), float(attr["z"])) )
elif name == "polygons":
print "Reading faces..."
Blender.Window.DrawProgressBar(0.25, "Reading faces...")
elif name == "tri" or name == "quad" or name == "trifan":
self.faceVerts = []
self.facevUVs = []
elif name == "vertex":
self.faceVerts.append(int(attr["point"]))
self.facevUVs.append( (float(attr["s"]), 1-float(attr["t"])) )
elif name == "diffuse":
self.rgbCol = [float(attr['red']), float(attr['green']), float(attr['blue'])]
self.alpha = float(attr['alpha'])
elif name == "ambient":
self.amb = (float(attr['red']) + float(attr['green']) + float(attr['blue'])) / 3.0 * float(attr['alpha'])
elif name == "specular":
specIn = float(attr['alpha'])
self.specCol = [specIn*float(attr['red']), specIn*float(attr['green']), specIn*float(attr['blue'])]
self.spec = 0.01 # float(attr['alpha'])
elif name == "emissive":
# sorry, no emissive color, only emission index
self.emit = (float(attr['red']) + float(attr['green']) + float(attr['blue'])) / 3.0 * float(attr['alpha'])
def endElement(self, pname):
"""
Serves to assemble gathered
data from inner subelements
"""
name = lower(pname)
# these are handled directly
if name == "tri" or name == "quad":
# the faces are an array, each element an array of
# vertex indexes as collected in self.verts
# to get (x,y,z) tuples for the jth vertex of the ith face:
# self.verts[self.faces[i][j]]
self.faces.append(self.faceVerts)
# similarly, the UV mapping coordinats for the same vertex
# are expected to be retrievable as
# self.uvs[self.faceuvs[i][j]]
insertPos = len(self.uvs)
self.faceuvs.append(range(insertPos, insertPos+len(self.facevUVs)))
self.uvs.extend(self.facevUVs)
elif name == "trifan":
# yes, opengl handles triangle fans naturally, but not blender
fanIdx = 2
while fanIdx < len(self.faceVerts):
# so we make triangles out of them instead
self.faces.append( [self.faceVerts[0], self.faceVerts[fanIdx-1], self.faceVerts[fanIdx]] )
insertPos = len(self.uvs)
self.faceuvs.append(range(insertPos, insertPos+3))
self.uvs.extend( [self.facevUVs[0], self.facevUVs[fanIdx-1], self.facevUVs[fanIdx]] )
fanIdx += 1
def read(filename):
"""
Invokes the xml parser on the file upon being
called by the file selector with a file name
"""
xml.sax.parse(filename, XMeshHandler(filename))
Blender.Window.FileSelector(read, "VegaStrike .XMesh") | vegastrike/objconv/blender_xmesh_import.py | __author__ = "Alex 'CubOfJudahsLion' Feterman"
__url__ = ("blender", "http://www.blender.org", "Author's homepage, http://geocities.com/cubofjudahslion")
__version__ = "0.1.2"
__bpydoc__ = """\
xmesh_import.py | Python Script for Blender3D | imports a VegaStrike .xmesh
Copyright (C)2005 Alex 'CubOfJudahsLion' Feterman
<p>This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
<p>This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
<p>You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
<p>Usage:<br>
Execute this script from the "File->Import" menu and choose a
Xmesh file to open.
<p>Notes:<br>
Generates UV mappings, but for the texture to be activated, go to
the texture buttons and
"""
import Blender
from Blender import Image, Texture, Material, Object, NMesh, Types, sys
import xml.sax
import meshtools
import os.path
from string import lower
locationDir = [] # registers the search of the results for images
def SetLocationDir(fileName, aDir, fileList):
"""
adds finding of fileName in aDir to a global variable in a list
"""
global locationDir
fullPath = os.path.join(aDir, fileName)
if os.path.isfile(fullPath):
locationDir.append(fullPath)
def FindTexture(path, fileName):
"""
finds the texture from path and its sub-paths
"""
sourcePath = os.path.join(path, fileName)
if os.path.isfile(sourcePath):
# check if the file is actually there and report if so
return sourcePath
else:
# otherwise check the directory hierarchy for the VS textures folder
global locationDir
searchBaseDir = os.path.normpath(os.path.join(path, '..', '..', 'textures'))
os.path.walk(searchBaseDir, SetLocationDir, fileName)
if len(locationDir) > 0:
return locationDir[0]
else:
return None
class XMeshHandler(xml.sax.handler.ContentHandler):
"""
Created to handle XML contexts in XMESH objects
"""
locationDir = None
def __init__(self, filename):
# find where the directory path ends
# (both un*x and win accounted for)
self.path, simpleFile = os.path.split(sys.expandpath(filename))
self.objName = os.path.splitext(simpleFile)[0]
# material values (to be checked later)
self.faces = []
self.verts = []
self.uvs = []
self.faceuvs = []
self.alpha =\
self.rgbCol =\
self.amb =\
self.emit =\
self.colorTexture =\
self.specTexture =\
self.spec =\
self.specCol = None
# finally, start chronometer
sys.time()
def startDocument(self):
"""
Callback. Invoked when the parsing starts. Used to
display notification of process initiation.
"""
print "Loading file..."
Blender.Window.DrawProgressBar(0.0, "Loading file...")
def endDocument(self):
"""
Invoked when mesh processing is done. Used for realizing
the mesh from collected vertex/faces and texturizing info.
"""
# report
print "Finished loading file, constructing mesh..."
Blender.Window.DrawProgressBar(0.9, "Building mesh...")
# build object
meshtools.create_mesh(self.verts, self.faces, self.objName, self.faceuvs, self.uvs)
print "Done, object built"
# load corresponding images and set texture
Blender.Window.DrawProgressBar(0.95, "Loading/Applying Texture...")
colorTex, specTex = None, None
# convert images into textures
if self.colorTexture:
colTexFName = FindTexture(self.path, self.colorTexture)
if colTexFName != None:
colorImg = Image.Load(colTexFName)
colorTex = Texture.New(self.objName + ".col.tx")
colorTex.type = Texture.Types.IMAGE
colorTex.image = colorImg
if self.specTexture:
specTexFName = FindTexture(self.path, self.specTexture)
if specTexFName != None:
specImg = Image.Load(specTexFName)
specTex = Texture.New(self.objName + ".spe.tx")
specTex.type = Texture.Types.IMAGE
specTex.image = specImg
# make material with them and all other previously collected data
mat = Material.New(self.objName + ".mat")
mat.mode |= Material.Modes.TEXFACE | Material.Modes.SHADOW | Material.Modes.TRACEABLE | Material.Modes.ZTRANSP
mat.specTransp = 1.0
if self.alpha : mat.alpha = self.alpha
if self.rgbCol : mat.rgbCol = self.rgbCol
if self.amb : mat.amb = self.amb
if self.emit : mat.emit = self.emit
if self.spec : mat.spec = self.spec
if self.specCol : mat.specCol = self.specCol
if colorTex:
mat.setTexture(0, colorTex, Texture.TexCo.UV, Texture.MapTo.COL)
if specTex:
mat.setTexture(1, specTex, Texture.TexCo.UV, Texture.MapTo.SPEC)
# apply to mesh
obj = Object.Get(self.objName)
mesh = obj.data
# mesh.mode = NMesh.Modes.NOVNORMALSFLIP
# uncomment the following if you want models automatically sub-surfaced
"""for currFace in mesh.faces:
currFace.smooth = 1
mesh.setSubDivLevels([1,2])
mesh.setMode("SubSurf", "TwoSided")"""
mesh.setMode("TwoSided")
mesh.addMaterial(mat)
mesh.update(1)
# Done, notify user
Blender.Window.DrawProgressBar(1.0, "Done.")
def startElement(self, pname, attrMixed):
"""
Receives pre-parsed data for every geometry/texture
datum in the mesh.
Like blender, wings3d and vegastrike are also opengl apps.
the internal format described by the xml tags is similar to
that of blender. see the xmesh format description and the opengl
red/blue books for structure and mapping details.
"""
# we transalte everything to lowercase
name = lower(pname)
attr = {}
for ik, iv in attrMixed.items():
attr[lower(ik)] = iv
# pre-parse attributes if available
if name == "mesh":
if "texture" in attr:
self.colorTexture = attr["texture"]
print "* color tex:", self.colorTexture
if "texture1" in attr:
self.specTexture = attr["texture1"]
print "* spec tex:", self.specTexture
elif name == "points":
print "Reading vertex coordinates..."
Blender.Window.DrawProgressBar(0.1, "Reading vertexes...")
elif name == "location":
self.verts.append( (float(attr["x"]), float(attr["y"]), float(attr["z"])) )
elif name == "polygons":
print "Reading faces..."
Blender.Window.DrawProgressBar(0.25, "Reading faces...")
elif name == "tri" or name == "quad" or name == "trifan":
self.faceVerts = []
self.facevUVs = []
elif name == "vertex":
self.faceVerts.append(int(attr["point"]))
self.facevUVs.append( (float(attr["s"]), 1-float(attr["t"])) )
elif name == "diffuse":
self.rgbCol = [float(attr['red']), float(attr['green']), float(attr['blue'])]
self.alpha = float(attr['alpha'])
elif name == "ambient":
self.amb = (float(attr['red']) + float(attr['green']) + float(attr['blue'])) / 3.0 * float(attr['alpha'])
elif name == "specular":
specIn = float(attr['alpha'])
self.specCol = [specIn*float(attr['red']), specIn*float(attr['green']), specIn*float(attr['blue'])]
self.spec = 0.01 # float(attr['alpha'])
elif name == "emissive":
# sorry, no emissive color, only emission index
self.emit = (float(attr['red']) + float(attr['green']) + float(attr['blue'])) / 3.0 * float(attr['alpha'])
def endElement(self, pname):
"""
Serves to assemble gathered
data from inner subelements
"""
name = lower(pname)
# these are handled directly
if name == "tri" or name == "quad":
# the faces are an array, each element an array of
# vertex indexes as collected in self.verts
# to get (x,y,z) tuples for the jth vertex of the ith face:
# self.verts[self.faces[i][j]]
self.faces.append(self.faceVerts)
# similarly, the UV mapping coordinats for the same vertex
# are expected to be retrievable as
# self.uvs[self.faceuvs[i][j]]
insertPos = len(self.uvs)
self.faceuvs.append(range(insertPos, insertPos+len(self.facevUVs)))
self.uvs.extend(self.facevUVs)
elif name == "trifan":
# yes, opengl handles triangle fans naturally, but not blender
fanIdx = 2
while fanIdx < len(self.faceVerts):
# so we make triangles out of them instead
self.faces.append( [self.faceVerts[0], self.faceVerts[fanIdx-1], self.faceVerts[fanIdx]] )
insertPos = len(self.uvs)
self.faceuvs.append(range(insertPos, insertPos+3))
self.uvs.extend( [self.facevUVs[0], self.facevUVs[fanIdx-1], self.facevUVs[fanIdx]] )
fanIdx += 1
def read(filename):
"""
Invokes the xml parser on the file upon being
called by the file selector with a file name
"""
xml.sax.parse(filename, XMeshHandler(filename))
Blender.Window.FileSelector(read, "VegaStrike .XMesh") | 0.343892 | 0.112113 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from numba import njit
@njit(fastmath=True, cache=True)
def conditional_probability(k, i, r, a, p, d):
"""
Calculates the conditional probability to be used for the calculation of the a priori probabilities.
:param k: The discretized variable.
:param i: The value of the bin.
:param r: The correlation parameter.
:param a: The discretization cut-off parameter.
:param p: The number of bins exponent.
:param d: The constant-size interval divider.
:return: The conditional probability P(K|X).
"""
if i == 0:
ak = -np.inf
bk = -a + d
elif i == 2 ** p - 1:
ak = -a + (2 ** p - 1) * d
bk = np.inf
else:
ak = -a + i * d
bk = -a + (i + 1) * d
A = (ak - k * r) / np.sqrt(2 * (1 - r ** 2))
B = (bk - k * r) / np.sqrt(2 * (1 - r ** 2))
prob = 0.5 * (math.erf(B) - math.erf(A))
return prob
def q_ary_to_binary(m, q):
"""
Converts a q-ary sequence into a binary sequence of length q.
:param m: The q-ary sequence.
:param q: The Galois field exponent.
:return: The binary representations of the q-ary sequences.
"""
mA_bin = np.empty(len(m) * q, dtype=np.int8) # Binary representation of Alice's q-ary message
for i in range(len(m)):
bitsA = np.binary_repr(m[i], width=q)
for j in range(q):
mA_bin[i * q + j] = bitsA[j]
return mA_bin | utilities.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from numba import njit
@njit(fastmath=True, cache=True)
def conditional_probability(k, i, r, a, p, d):
"""
Calculates the conditional probability to be used for the calculation of the a priori probabilities.
:param k: The discretized variable.
:param i: The value of the bin.
:param r: The correlation parameter.
:param a: The discretization cut-off parameter.
:param p: The number of bins exponent.
:param d: The constant-size interval divider.
:return: The conditional probability P(K|X).
"""
if i == 0:
ak = -np.inf
bk = -a + d
elif i == 2 ** p - 1:
ak = -a + (2 ** p - 1) * d
bk = np.inf
else:
ak = -a + i * d
bk = -a + (i + 1) * d
A = (ak - k * r) / np.sqrt(2 * (1 - r ** 2))
B = (bk - k * r) / np.sqrt(2 * (1 - r ** 2))
prob = 0.5 * (math.erf(B) - math.erf(A))
return prob
def q_ary_to_binary(m, q):
"""
Converts a q-ary sequence into a binary sequence of length q.
:param m: The q-ary sequence.
:param q: The Galois field exponent.
:return: The binary representations of the q-ary sequences.
"""
mA_bin = np.empty(len(m) * q, dtype=np.int8) # Binary representation of Alice's q-ary message
for i in range(len(m)):
bitsA = np.binary_repr(m[i], width=q)
for j in range(q):
mA_bin[i * q + j] = bitsA[j]
return mA_bin | 0.870817 | 0.507812 |
import utils
from utils.unit_conversions import lin_to_db, db_to_lin, kft_to_km
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import seaborn as sns
import atm
import prop
import detector
def make_all_figures(close_figs=False):
"""
Call all the figure generators for this chapter
:close_figs: Boolean flag. If true, will close all figures after generating them; for batch scripting.
Default=False
:return: List of figure handles
"""
# Initializes colorSet - Mx3 RGB vector for successive plot lines
colors = plt.get_cmap("tab10")
# Reset the random number generator, to ensure reproducability
rng = np.random.default_rng(0)
# Find the output directory
prefix = utils.init_output_dir('chapter3')
# Activate seaborn for prettier plots
sns.set()
# Generate all figures
fig1 = make_figure_1(prefix)
fig2 = make_figure_2(prefix)
fig3 = make_figure_3(prefix)
fig4 = make_figure_4(prefix)
fig7 = make_figure_7(prefix)
fig8 = make_figure_8(prefix)
fig9 = make_figure_9(prefix, rng, colors)
fig10 = make_figure_10(prefix, rng, colors)
figs = [fig1, fig2, fig3, fig4, fig7, fig8, fig9, fig10]
if close_figs:
for fig in figs:
plt.close(fig)
return None
else:
plt.show()
return figs
def make_figure_1(prefix=None):
"""
Figure 1, Spectral Content
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Frequency of Signal
freq0 = 1
bandwidth = .4
# Amplitudes
noise_pwr = 1
signal_amplitude = 5
# Generate Frequency Content
num_freq_bins = 201
freq_vec = 2*freq0*np.linspace(start=-1, stop=1, num=num_freq_bins)
noise_vec = noise_pwr*np.ones(shape=(num_freq_bins, ))
signal_vec = np.fmax(0, signal_amplitude*(1-2*np.absolute(np.absolute(freq_vec)-freq0)/bandwidth))
# Plot
fig1 = plt.figure()
plt.plot(freq_vec, noise_vec, label='Noise')
plt.plot(freq_vec, signal_vec, label='Signal')
plt.xlabel('$f$')
plt.ylabel('$P(f)$')
plt.legend(loc='upper right')
# Annotate the bandwidth
plt.annotate(s='', xy=(-1.2, 5.1), xytext=(-0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.annotate(s='', xy=(1.2, 5.1), xytext=(0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.text(-1.1, 5.2, r'$B_F$')
plt.text(.9, 5.2, r'$B_F$')
# Change the x/y ticks
plt.xticks([-1, 0, 1], [r'$-f_0$', 0, r'$f_0$'])
plt.yticks([noise_pwr, signal_amplitude], [r'$N_0/2$', r'$S/2$'])
plt.xlim([freq_vec[0], freq_vec[-1]])
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig1.svg')
plt.savefig(prefix + 'fig1.png')
return fig1
def make_figure_2(prefix=None):
"""
Figure 2 - Plot with Spectrum
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Frequency of Signal
freq0 = 1
bandwidth = .4
# Amplitudes
noise_pwr = 1
signal_amplitude = 5
# Generate Frequency Content
num_freq_bins = 201
freq_vec = 2 * freq0 * np.linspace(start=-1, stop=1, num=num_freq_bins)
noise_vec = noise_pwr * np.ones(shape=(num_freq_bins, ))
signal_vec = np.fmax(0, signal_amplitude * (1 - 2 * np.absolute(np.absolute(freq_vec) - freq0) / bandwidth))
# Filtered
bandwidth_filtered = 2*bandwidth
filter_mask = np.absolute(np.absolute(freq_vec) - freq0) <= bandwidth_filtered/2
filter_vec = np.zeros_like(freq_vec)
filter_vec[filter_mask] = 1.2*signal_amplitude # Mark the pass-band slightly higher than the signal amplitude
noise_filtered = np.copy(noise_vec) # Copy and filter the noise
noise_filtered[np.logical_not(filter_mask)] = 0
# Plot
fig2 = plt.figure()
plt.plot(freq_vec, noise_vec, label='Noise')
plt.plot(freq_vec, noise_filtered, label='Noise (filtered)')
plt.plot(freq_vec, signal_vec, label='Signal')
plt.plot(freq_vec, filter_vec, '--', label='Filter')
plt.xlabel('$f$')
plt.ylabel('$P(f)$')
plt.legend(loc='lower right')
# Annotate the bandwidth
plt.annotate(s='', xy=(-1.2, 5.1), xytext=(-0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.annotate(s='', xy=(1.2, 5.1), xytext=(0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.text(-1.1, 5.2, r'$B_F$')
plt.text(.9, 5.2, r'$B_F$')
# Change the x/y ticks
plt.xticks([-1, 0, 1], [r'$-f_0$', 0, r'$f_0$'])
plt.yticks([noise_pwr, signal_amplitude], [r'$N_0/2$', r'$S/2$'])
plt.xlim([freq_vec[0], freq_vec[-1]])
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig2.svg')
plt.savefig(prefix + 'fig2.png')
return fig2
def make_figure_3(prefix=None):
"""
Figure 3 - CW Detection PFA vs. Threshold
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
num_samples = np.array([1, 10, 100])
eta_db = np.arange(start=-10, step=.1, stop=30.1)
eta_lin = db_to_lin(eta_db)
# The complementary cdf (1 - CDF) is called the 'survival function'
prob_fa = stats.chi2.sf(x=np.expand_dims(eta_lin, axis=1), df=2*np.expand_dims(num_samples, axis=0))
# Plot
fig3 = plt.figure()
for idx, this_m in enumerate(num_samples):
plt.semilogy(eta_db, prob_fa[:, idx], label='M = {}'.format(this_m))
plt.legend(loc='lower left')
plt.xlabel(r'$\eta [dB]$')
plt.ylabel('$P_{FA}$')
plt.ylim([1e-6, 1.1])
plt.xlim([eta_db[0], eta_db[-1]])
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig3.svg')
plt.savefig(prefix + 'fig3.png')
return fig3
def make_figure_4(prefix=None):
"""
Figure 4, PD vs. SNR for CW Detection
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
prob_fa = 1e-6
num_samples = np.expand_dims(np.array([1, 10, 100, 1000]), axis=0)
xi_db = np.expand_dims(np.arange(start=-20, step=.1, stop=20.1), axis=1)
xi_lin = db_to_lin(xi_db)
# Compute threshold
eta = stats.chi2.ppf(q=1-prob_fa, df=2*num_samples)
# Compute Probability of Detection
chi_lambda = 2*xi_lin # Non-centrality parameter, lambda, or chi-squared RV
prob_det = 1 - stats.ncx2.cdf(x=eta, df=2*num_samples, nc=num_samples*chi_lambda)
# Plot
fig4 = plt.figure()
for idx, this_m in enumerate(num_samples[0, :]):
plt.plot(xi_db, prob_det[:, idx], label='M = {}'.format(this_m))
plt.legend(loc='upper left')
plt.xlabel(r'$\xi$ [dB]')
plt.ylabel('$P_D$')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig4.svg')
plt.savefig(prefix + 'fig4.png')
return fig4
def make_figure_7(prefix=None):
"""
Figure 7, Atmospheric Loss Table
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
range_m = 1.0e3 # set ref distance to 1 km
freq_vec = np.arange(start=1.e9, step=50.e6, stop=100.e9+50.e6)
# Reference Atmosphere
# -- Sea Level, 10 kft, 20 kft, 30 kft, 40 kft
alt_kft = np.array([0., 10., 20., 30., 40.])
# T = [15, -4.8, -24.6, -44.4, -56.6];
# P = [101325, 69680, 46560, 30090,18750];
# g = [7.5,2.01,0.34,.05,.01];
loss_atm = np.zeros(shape=(np.size(freq_vec), np.size(alt_kft)))
for idx_alt, this_alt in enumerate(alt_kft):
# Create atmosphere for this altitude band
this_alt_m = kft_to_km(this_alt) * 1.0e3
atmosphere = atm.reference.get_standard_atmosphere(this_alt_m)
loss_atm[:, idx_alt] = atm.model.calc_atm_loss(freq_vec, gas_path_len_m=range_m, atmosphere=atmosphere)
# Generate plot
fig7 = plt.figure()
for idx_alt, this_alt in enumerate(alt_kft):
plt.semilogy(freq_vec/1e9, loss_atm[:, idx_alt], label='Alt = {} kft'.format(this_alt))
plt.legend(loc='upper left')
plt.xlabel('Frequency [GHz]')
plt.ylabel('Specific Attenuation [dB/km]')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig7.svg')
plt.savefig(prefix + 'fig7.png')
return fig7
def make_figure_8(prefix=None):
"""
Figures 8, FM Reception Power vs. Range
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Figure 8 : SNR vs. range
# Set up RF environment
ht = 100
hr = 2
range_vec = np.arange(start=10.0e3, step=10.0e3, stop=510.0e3)
f0 = 100e6
# Compute Losses and Fresnel Zone
# Lfspl = prop.model.get_free_space_path_loss(R=range_vec, f0=f0, include_atm_loss=False)
# Ltworay = prop.model.get_tworay_path_loss(R=range_vec, f0=f0, ht=ht, hr=hr, includeAtmLoss=False)
loss_prop = prop.model.get_path_loss(range_m=range_vec, freq_hz=f0, tx_ht_m=ht, rx_ht_m=hr, include_atm_loss=False)
# Noise Power
bandwidth = 2e6 # channel bandwidth [Hz]
noise_figure = 5 # noise figure [dB]
noise_pwr = lin_to_db(utils.constants.kT*bandwidth)+noise_figure
# Signal Power
eirp = 47 # dBW
rx_gain = 0 # Receive antenna gain
rx_loss = 0
# Received Power and SNR
signal_pwr = eirp-loss_prop+rx_gain-rx_loss
snr_min = 3.65
signal_pwr_min = noise_pwr+snr_min
snr0 = eirp+rx_gain-rx_loss-noise_pwr # snr with no propagation loss
range_max = detector.squareLaw.max_range(prob_fa=1e-6, prob_d=.5, num_samples=10, f0=f0, ht=ht, hr=hr,
snr0=snr0, include_atm_loss=False)
print('Max Range: {} m'.format(range_max))
fig8 = plt.figure()
plt.plot(range_vec/1e3, signal_pwr, label='$P_R$')
plt.plot(range_vec/1e3, signal_pwr_min*np.ones_like(range_vec), linestyle=':', label='MDS')
plt.legend(loc='upper right')
plt.xlabel('Range [km]')
plt.ylabel('Received Power [dBW]')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig8.svg')
plt.savefig(prefix + 'fig8.png')
return fig8
def make_figure_9(prefix=None, rng=None, colors=None):
"""
Figures 9, Example 3.1 Monte Carlo Results
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:param colors: colormap for plotting
:return: figure handle
"""
from examples import chapter3
fig9 = chapter3.example1(rng, colors)
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig9.svg')
plt.savefig(prefix + 'fig9.png')
return fig9
def make_figure_10(prefix=None, rng=None, colors=None):
"""
Figures 10, Example 3.2 Monte Carlo results
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:param colors: colormap for plotting
:return: figure handle
"""
# Figure 10, Monte Carlo Results
from examples import chapter3
fig10 = chapter3.example2(rng, colors)
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig10.svg')
plt.savefig(prefix + 'fig10.png')
return fig10 | make_figures/chapter3.py | import utils
from utils.unit_conversions import lin_to_db, db_to_lin, kft_to_km
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import seaborn as sns
import atm
import prop
import detector
def make_all_figures(close_figs=False):
"""
Call all the figure generators for this chapter
:close_figs: Boolean flag. If true, will close all figures after generating them; for batch scripting.
Default=False
:return: List of figure handles
"""
# Initializes colorSet - Mx3 RGB vector for successive plot lines
colors = plt.get_cmap("tab10")
# Reset the random number generator, to ensure reproducability
rng = np.random.default_rng(0)
# Find the output directory
prefix = utils.init_output_dir('chapter3')
# Activate seaborn for prettier plots
sns.set()
# Generate all figures
fig1 = make_figure_1(prefix)
fig2 = make_figure_2(prefix)
fig3 = make_figure_3(prefix)
fig4 = make_figure_4(prefix)
fig7 = make_figure_7(prefix)
fig8 = make_figure_8(prefix)
fig9 = make_figure_9(prefix, rng, colors)
fig10 = make_figure_10(prefix, rng, colors)
figs = [fig1, fig2, fig3, fig4, fig7, fig8, fig9, fig10]
if close_figs:
for fig in figs:
plt.close(fig)
return None
else:
plt.show()
return figs
def make_figure_1(prefix=None):
"""
Figure 1, Spectral Content
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Frequency of Signal
freq0 = 1
bandwidth = .4
# Amplitudes
noise_pwr = 1
signal_amplitude = 5
# Generate Frequency Content
num_freq_bins = 201
freq_vec = 2*freq0*np.linspace(start=-1, stop=1, num=num_freq_bins)
noise_vec = noise_pwr*np.ones(shape=(num_freq_bins, ))
signal_vec = np.fmax(0, signal_amplitude*(1-2*np.absolute(np.absolute(freq_vec)-freq0)/bandwidth))
# Plot
fig1 = plt.figure()
plt.plot(freq_vec, noise_vec, label='Noise')
plt.plot(freq_vec, signal_vec, label='Signal')
plt.xlabel('$f$')
plt.ylabel('$P(f)$')
plt.legend(loc='upper right')
# Annotate the bandwidth
plt.annotate(s='', xy=(-1.2, 5.1), xytext=(-0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.annotate(s='', xy=(1.2, 5.1), xytext=(0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.text(-1.1, 5.2, r'$B_F$')
plt.text(.9, 5.2, r'$B_F$')
# Change the x/y ticks
plt.xticks([-1, 0, 1], [r'$-f_0$', 0, r'$f_0$'])
plt.yticks([noise_pwr, signal_amplitude], [r'$N_0/2$', r'$S/2$'])
plt.xlim([freq_vec[0], freq_vec[-1]])
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig1.svg')
plt.savefig(prefix + 'fig1.png')
return fig1
def make_figure_2(prefix=None):
"""
Figure 2 - Plot with Spectrum
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Frequency of Signal
freq0 = 1
bandwidth = .4
# Amplitudes
noise_pwr = 1
signal_amplitude = 5
# Generate Frequency Content
num_freq_bins = 201
freq_vec = 2 * freq0 * np.linspace(start=-1, stop=1, num=num_freq_bins)
noise_vec = noise_pwr * np.ones(shape=(num_freq_bins, ))
signal_vec = np.fmax(0, signal_amplitude * (1 - 2 * np.absolute(np.absolute(freq_vec) - freq0) / bandwidth))
# Filtered
bandwidth_filtered = 2*bandwidth
filter_mask = np.absolute(np.absolute(freq_vec) - freq0) <= bandwidth_filtered/2
filter_vec = np.zeros_like(freq_vec)
filter_vec[filter_mask] = 1.2*signal_amplitude # Mark the pass-band slightly higher than the signal amplitude
noise_filtered = np.copy(noise_vec) # Copy and filter the noise
noise_filtered[np.logical_not(filter_mask)] = 0
# Plot
fig2 = plt.figure()
plt.plot(freq_vec, noise_vec, label='Noise')
plt.plot(freq_vec, noise_filtered, label='Noise (filtered)')
plt.plot(freq_vec, signal_vec, label='Signal')
plt.plot(freq_vec, filter_vec, '--', label='Filter')
plt.xlabel('$f$')
plt.ylabel('$P(f)$')
plt.legend(loc='lower right')
# Annotate the bandwidth
plt.annotate(s='', xy=(-1.2, 5.1), xytext=(-0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.annotate(s='', xy=(1.2, 5.1), xytext=(0.8, 5.1), arrowprops=dict(arrowstyle='<->',
color='k'))
plt.text(-1.1, 5.2, r'$B_F$')
plt.text(.9, 5.2, r'$B_F$')
# Change the x/y ticks
plt.xticks([-1, 0, 1], [r'$-f_0$', 0, r'$f_0$'])
plt.yticks([noise_pwr, signal_amplitude], [r'$N_0/2$', r'$S/2$'])
plt.xlim([freq_vec[0], freq_vec[-1]])
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig2.svg')
plt.savefig(prefix + 'fig2.png')
return fig2
def make_figure_3(prefix=None):
"""
Figure 3 - CW Detection PFA vs. Threshold
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
num_samples = np.array([1, 10, 100])
eta_db = np.arange(start=-10, step=.1, stop=30.1)
eta_lin = db_to_lin(eta_db)
# The complementary cdf (1 - CDF) is called the 'survival function'
prob_fa = stats.chi2.sf(x=np.expand_dims(eta_lin, axis=1), df=2*np.expand_dims(num_samples, axis=0))
# Plot
fig3 = plt.figure()
for idx, this_m in enumerate(num_samples):
plt.semilogy(eta_db, prob_fa[:, idx], label='M = {}'.format(this_m))
plt.legend(loc='lower left')
plt.xlabel(r'$\eta [dB]$')
plt.ylabel('$P_{FA}$')
plt.ylim([1e-6, 1.1])
plt.xlim([eta_db[0], eta_db[-1]])
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig3.svg')
plt.savefig(prefix + 'fig3.png')
return fig3
def make_figure_4(prefix=None):
"""
Figure 4, PD vs. SNR for CW Detection
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
prob_fa = 1e-6
num_samples = np.expand_dims(np.array([1, 10, 100, 1000]), axis=0)
xi_db = np.expand_dims(np.arange(start=-20, step=.1, stop=20.1), axis=1)
xi_lin = db_to_lin(xi_db)
# Compute threshold
eta = stats.chi2.ppf(q=1-prob_fa, df=2*num_samples)
# Compute Probability of Detection
chi_lambda = 2*xi_lin # Non-centrality parameter, lambda, or chi-squared RV
prob_det = 1 - stats.ncx2.cdf(x=eta, df=2*num_samples, nc=num_samples*chi_lambda)
# Plot
fig4 = plt.figure()
for idx, this_m in enumerate(num_samples[0, :]):
plt.plot(xi_db, prob_det[:, idx], label='M = {}'.format(this_m))
plt.legend(loc='upper left')
plt.xlabel(r'$\xi$ [dB]')
plt.ylabel('$P_D$')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig4.svg')
plt.savefig(prefix + 'fig4.png')
return fig4
def make_figure_7(prefix=None):
"""
Figure 7, Atmospheric Loss Table
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
range_m = 1.0e3 # set ref distance to 1 km
freq_vec = np.arange(start=1.e9, step=50.e6, stop=100.e9+50.e6)
# Reference Atmosphere
# -- Sea Level, 10 kft, 20 kft, 30 kft, 40 kft
alt_kft = np.array([0., 10., 20., 30., 40.])
# T = [15, -4.8, -24.6, -44.4, -56.6];
# P = [101325, 69680, 46560, 30090,18750];
# g = [7.5,2.01,0.34,.05,.01];
loss_atm = np.zeros(shape=(np.size(freq_vec), np.size(alt_kft)))
for idx_alt, this_alt in enumerate(alt_kft):
# Create atmosphere for this altitude band
this_alt_m = kft_to_km(this_alt) * 1.0e3
atmosphere = atm.reference.get_standard_atmosphere(this_alt_m)
loss_atm[:, idx_alt] = atm.model.calc_atm_loss(freq_vec, gas_path_len_m=range_m, atmosphere=atmosphere)
# Generate plot
fig7 = plt.figure()
for idx_alt, this_alt in enumerate(alt_kft):
plt.semilogy(freq_vec/1e9, loss_atm[:, idx_alt], label='Alt = {} kft'.format(this_alt))
plt.legend(loc='upper left')
plt.xlabel('Frequency [GHz]')
plt.ylabel('Specific Attenuation [dB/km]')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig7.svg')
plt.savefig(prefix + 'fig7.png')
return fig7
def make_figure_8(prefix=None):
"""
Figures 8, FM Reception Power vs. Range
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Figure 8 : SNR vs. range
# Set up RF environment
ht = 100
hr = 2
range_vec = np.arange(start=10.0e3, step=10.0e3, stop=510.0e3)
f0 = 100e6
# Compute Losses and Fresnel Zone
# Lfspl = prop.model.get_free_space_path_loss(R=range_vec, f0=f0, include_atm_loss=False)
# Ltworay = prop.model.get_tworay_path_loss(R=range_vec, f0=f0, ht=ht, hr=hr, includeAtmLoss=False)
loss_prop = prop.model.get_path_loss(range_m=range_vec, freq_hz=f0, tx_ht_m=ht, rx_ht_m=hr, include_atm_loss=False)
# Noise Power
bandwidth = 2e6 # channel bandwidth [Hz]
noise_figure = 5 # noise figure [dB]
noise_pwr = lin_to_db(utils.constants.kT*bandwidth)+noise_figure
# Signal Power
eirp = 47 # dBW
rx_gain = 0 # Receive antenna gain
rx_loss = 0
# Received Power and SNR
signal_pwr = eirp-loss_prop+rx_gain-rx_loss
snr_min = 3.65
signal_pwr_min = noise_pwr+snr_min
snr0 = eirp+rx_gain-rx_loss-noise_pwr # snr with no propagation loss
range_max = detector.squareLaw.max_range(prob_fa=1e-6, prob_d=.5, num_samples=10, f0=f0, ht=ht, hr=hr,
snr0=snr0, include_atm_loss=False)
print('Max Range: {} m'.format(range_max))
fig8 = plt.figure()
plt.plot(range_vec/1e3, signal_pwr, label='$P_R$')
plt.plot(range_vec/1e3, signal_pwr_min*np.ones_like(range_vec), linestyle=':', label='MDS')
plt.legend(loc='upper right')
plt.xlabel('Range [km]')
plt.ylabel('Received Power [dBW]')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig8.svg')
plt.savefig(prefix + 'fig8.png')
return fig8
def make_figure_9(prefix=None, rng=None, colors=None):
"""
Figures 9, Example 3.1 Monte Carlo Results
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:param colors: colormap for plotting
:return: figure handle
"""
from examples import chapter3
fig9 = chapter3.example1(rng, colors)
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig9.svg')
plt.savefig(prefix + 'fig9.png')
return fig9
def make_figure_10(prefix=None, rng=None, colors=None):
"""
Figures 10, Example 3.2 Monte Carlo results
Ported from MATLAB Code
<NAME>
23 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:param colors: colormap for plotting
:return: figure handle
"""
# Figure 10, Monte Carlo Results
from examples import chapter3
fig10 = chapter3.example2(rng, colors)
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig10.svg')
plt.savefig(prefix + 'fig10.png')
return fig10 | 0.840848 | 0.564339 |
import sys
from sys import stderr
import argparse
import yaml
import fontforge
parser = argparse.ArgumentParser(description='Merges glyphs from '
'several fonts, as specified in config.')
parser.add_argument('-c', '--config', type=str, required=False,
help='Config file in json or yml format. If missed, then '
'loaded from stdin. '
'example: ../config.json')
parser.add_argument('-o', '--dst_font', type=str, required=True,
help='Output font')
args = parser.parse_args()
if args.config is not None:
try:
unparsed_config = open(args.config, 'r')
except IOError as (errno, strerror):
stderr.write("Cannot open %s: %s\n" % (args.config, strerror))
sys.exit(1)
else:
unparsed_config = sys.stdin
try:
# yaml parser undestend both formats
config = yaml.load(unparsed_config)
except yaml.YAMLError, e:
config_file_name = '' if args.config is None else args.config
if hasattr(e, 'problem_mark'):
mark = e.problem_mark
stderr.write("YAML parser error in config %s at line %d, col %d\n" %
(config_file_name, mark.line + 1, mark.column + 1))
else:
stderr.write("YAML parser error in config %s: %s\n" % (config_file_name, e))
sys.exit(1)
# init new font
new_font = fontforge.font()
new_font.encoding = 'UnicodeFull'
# load font properties from config
for key, value in config['font'].iteritems():
setattr(new_font, key, value)
try:
# read source fonts
src_fonts = {}
for name, path in config['src_fonts'].iteritems():
src_fonts[name] = fontforge.open(path)
except:
stderr.write("Error: fontforge can't open source font from %s" % path)
sys.exit(1)
# prepare config to view:
# [(from_code1, to_code1, src), (from_code2, to_code2, src), ...]
remap_config = [(glyph.get('from', glyph['code']),
glyph['code'], glyph['src'])
for glyph in config['glyphs']]
for from_code, to_code, src in remap_config:
try:
src_fonts[src][from_code]
except TypeError:
stderr.write("Warning: no such glyph in the source font (code=0x%04x)\n" %
from_code)
continue
src_fonts[src].selection.select(("unicode",), from_code)
src_fonts[src].copy()
new_font.selection.select(("unicode",), to_code)
new_font.paste()
try:
new_font.generate(args.dst_font)
except:
stderr.write("Cannot write to file %s\n" % args.dst_font)
sys.exit(1) | bin/font_merge.py | import sys
from sys import stderr
import argparse
import yaml
import fontforge
parser = argparse.ArgumentParser(description='Merges glyphs from '
'several fonts, as specified in config.')
parser.add_argument('-c', '--config', type=str, required=False,
help='Config file in json or yml format. If missed, then '
'loaded from stdin. '
'example: ../config.json')
parser.add_argument('-o', '--dst_font', type=str, required=True,
help='Output font')
args = parser.parse_args()
if args.config is not None:
try:
unparsed_config = open(args.config, 'r')
except IOError as (errno, strerror):
stderr.write("Cannot open %s: %s\n" % (args.config, strerror))
sys.exit(1)
else:
unparsed_config = sys.stdin
try:
# yaml parser undestend both formats
config = yaml.load(unparsed_config)
except yaml.YAMLError, e:
config_file_name = '' if args.config is None else args.config
if hasattr(e, 'problem_mark'):
mark = e.problem_mark
stderr.write("YAML parser error in config %s at line %d, col %d\n" %
(config_file_name, mark.line + 1, mark.column + 1))
else:
stderr.write("YAML parser error in config %s: %s\n" % (config_file_name, e))
sys.exit(1)
# init new font
new_font = fontforge.font()
new_font.encoding = 'UnicodeFull'
# load font properties from config
for key, value in config['font'].iteritems():
setattr(new_font, key, value)
try:
# read source fonts
src_fonts = {}
for name, path in config['src_fonts'].iteritems():
src_fonts[name] = fontforge.open(path)
except:
stderr.write("Error: fontforge can't open source font from %s" % path)
sys.exit(1)
# prepare config to view:
# [(from_code1, to_code1, src), (from_code2, to_code2, src), ...]
remap_config = [(glyph.get('from', glyph['code']),
glyph['code'], glyph['src'])
for glyph in config['glyphs']]
for from_code, to_code, src in remap_config:
try:
src_fonts[src][from_code]
except TypeError:
stderr.write("Warning: no such glyph in the source font (code=0x%04x)\n" %
from_code)
continue
src_fonts[src].selection.select(("unicode",), from_code)
src_fonts[src].copy()
new_font.selection.select(("unicode",), to_code)
new_font.paste()
try:
new_font.generate(args.dst_font)
except:
stderr.write("Cannot write to file %s\n" % args.dst_font)
sys.exit(1) | 0.193757 | 0.070592 |
import json
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
DEVICE_CLASS_SPEAKER,
SUPPORT_PLAY_MEDIA,
MediaPlayerDevice,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import CONTROLLER, COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_FULLY_SETTING = "setting"
CONF_FULLY_SETTING_VALUE = "value"
SERVICE_SET_CONFIGURATION_STRING = "set_configuration_string"
SET_CONFIGURATION_STRING_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(CONF_FULLY_SETTING): cv.string,
vol.Required(CONF_FULLY_SETTING_VALUE): cv.string,
}
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fully Kiosk Browser media player."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
controller = hass.data[DOMAIN][config_entry.entry_id][CONTROLLER]
async_add_entities([FullyMediaPlayer(coordinator, controller)], False)
async def set_configuration_string(call) -> None:
"""Call set string config handler."""
await async_handle_set_configuration_string_service(call)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CONFIGURATION_STRING,
set_configuration_string,
schema=SET_CONFIGURATION_STRING_SCHEMA,
)
class FullyMediaPlayer(MediaPlayerDevice):
def __init__(self, coordinator, controller):
self._name = f"{coordinator.data['deviceName']} Media Player"
self.coordinator = coordinator
self.controller = controller
self._unique_id = f"{coordinator.data['deviceID']}-mediaplayer"
@property
def name(self):
return self._name
@property
def supported_features(self):
return SUPPORT_PLAY_MEDIA
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.coordinator.data["deviceID"])},
"name": self.coordinator.data["deviceName"],
"manufacturer": self.coordinator.data["deviceManufacturer"],
"model": self.coordinator.data["deviceModel"],
"sw_version": self.coordinator.data["appVersionName"],
}
@property
def unique_id(self):
return self._unique_id
def play_media(self, media_type, media_id, **kwargs):
self.controller.playSound(media_id)
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Fully Kiosk Browser entity."""
await self.coordinator.async_request_refresh()
async def async_handle_set_configuration_string_service(self, call):
"""Handle configuration string call."""
self.controller.setConfigurationString(
call.data[CONF_FULLY_SETTING], call.data[CONF_FULLY_SETTING_VALUE]
) | custom_components/fullykiosk/media_player.py | import json
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
DEVICE_CLASS_SPEAKER,
SUPPORT_PLAY_MEDIA,
MediaPlayerDevice,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import CONTROLLER, COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_FULLY_SETTING = "setting"
CONF_FULLY_SETTING_VALUE = "value"
SERVICE_SET_CONFIGURATION_STRING = "set_configuration_string"
SET_CONFIGURATION_STRING_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(CONF_FULLY_SETTING): cv.string,
vol.Required(CONF_FULLY_SETTING_VALUE): cv.string,
}
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fully Kiosk Browser media player."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
controller = hass.data[DOMAIN][config_entry.entry_id][CONTROLLER]
async_add_entities([FullyMediaPlayer(coordinator, controller)], False)
async def set_configuration_string(call) -> None:
"""Call set string config handler."""
await async_handle_set_configuration_string_service(call)
hass.services.async_register(
DOMAIN,
SERVICE_SET_CONFIGURATION_STRING,
set_configuration_string,
schema=SET_CONFIGURATION_STRING_SCHEMA,
)
class FullyMediaPlayer(MediaPlayerDevice):
def __init__(self, coordinator, controller):
self._name = f"{coordinator.data['deviceName']} Media Player"
self.coordinator = coordinator
self.controller = controller
self._unique_id = f"{coordinator.data['deviceID']}-mediaplayer"
@property
def name(self):
return self._name
@property
def supported_features(self):
return SUPPORT_PLAY_MEDIA
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.coordinator.data["deviceID"])},
"name": self.coordinator.data["deviceName"],
"manufacturer": self.coordinator.data["deviceManufacturer"],
"model": self.coordinator.data["deviceModel"],
"sw_version": self.coordinator.data["appVersionName"],
}
@property
def unique_id(self):
return self._unique_id
def play_media(self, media_type, media_id, **kwargs):
self.controller.playSound(media_id)
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Fully Kiosk Browser entity."""
await self.coordinator.async_request_refresh()
async def async_handle_set_configuration_string_service(self, call):
"""Handle configuration string call."""
self.controller.setConfigurationString(
call.data[CONF_FULLY_SETTING], call.data[CONF_FULLY_SETTING_VALUE]
) | 0.661923 | 0.084003 |
import cv2
from __init__ import Square, predictor, detector, vfps, size
from facefrontal import facefrontal, warp_mapping
import numpy as np
padw = 95
detw = 130
def getGaussianPyr(img, layers):
g = img.astype(np.float64)
pyramid = [g]
for i in range(layers):
g = cv2.pyrDown(g)
pyramid.append(g)
return pyramid
def getLaplacianPyr(Gaupyr):
pyramid = []
for i in range(len(Gaupyr)-2, -1, -1):
# len(Gaupyr)-2, ..., 1, 0
gi = Gaupyr[i]
gi_aprx = cv2.pyrUp(Gaupyr[i+1])
gi_aprx = cv2.resize(gi_aprx, gi.shape[:2][::-1])
pyramid.append((gi - gi_aprx))
return pyramid[::-1]
def reconstruct(G, Lappyr):
for i in range(len(Lappyr)-1, -1, -1):
# len(Gaupyr)-1, ..., 1, 0
G = cv2.pyrUp(G)
G = cv2.resize(G, Lappyr[i].shape[:2][::-1])
G += Lappyr[i]
return G.astype(np.uint8)
def pyramid_blend(img1, img2, mask_, layers=4):
assert(img1.shape == img2.shape and img1.shape[:2] == mask_.shape)
mask = mask_ / np.max(mask_) # 0 ~ 1
# construct Gaussian pyramids of input images
Gaupyr1 = getGaussianPyr(img1, layers+1)
Gaupyr2 = getGaussianPyr(img2, layers+1)
Gaupyrm = getGaussianPyr(mask, layers+1)
# construct Laplacian pyramids of input images
Lappyr1 = getLaplacianPyr(Gaupyr1)
Lappyr2 = getLaplacianPyr(Gaupyr2)
# blend pyramids in every layer
Gaupyrm1 = Gaupyrm[:-1]
Gaupyrm2 = [1-msk for msk in Gaupyrm1]
BLappyr1 = [lap * msk[:, :, np.newaxis] for lap, msk in zip(Lappyr1, Gaupyrm1)]
BLappyr2 = [lap * msk[:, :, np.newaxis] for lap, msk in zip(Lappyr2, Gaupyrm2)]
BLappyr = [lap1 + lap2 for lap1, lap2 in zip(BLappyr1, BLappyr2)]
initG = Gaupyr1[-1] * Gaupyrm[-1][:, :, np.newaxis] + Gaupyr2[-1] * (1-Gaupyrm[-1])[:, :, np.newaxis]
# collapse pyramids and form the blended image
img = reconstruct(initG, BLappyr)
return img
def getindices(ftl_face, sq, padw=padw, detw=detw):
# get mask region using boundary, chin landmarks and nose landmarks
# boundary region: left -> right, upper -> lower
WH = ftl_face.shape[0]
boundary = sq.align(detw)
left, right, upper, lower = np.array(boundary) + padw
indices = np.array([(x, y) for x in range(left, right) for y in range(upper, lower)])
# get landmarks of frontalized face
det = detector(ftl_face, 1)[0]
shape = predictor(ftl_face, det)
ldmk = np.asarray([(shape.part(n).x, shape.part(n).y,) for n in range(shape.num_parts)], np.float32)
chin_xp, chin_fp = ldmk[ 3:14, 0], ldmk[ 3:14, 1]
chin_line = np.interp(np.arange(WH), chin_xp, chin_fp)
nose_xp, nose_fp = ldmk[31:36, 0], ldmk[31:36, 1]
nose_line = np.interp(np.arange(WH), nose_xp, nose_fp)
# filter the position which is out of chin line and nose line
check = np.logical_and(indices[:, 1] < chin_line[indices[:, 0]],
indices[:, 1] > nose_line[indices[:, 0]])
return indices[check.nonzero()]
def align2target(syntxtr, tar_shape, sq, padw=padw, detw=detw):
# align lower-face to target frame
# |padw| detw |padw|
# |----|-------|---------
# | |padw
# | --------- -----
# | | | |
# | | | |detw
# | | | |
# | --------- -----
# | ftl_face |padw
# -----------------------
rsize = sq.getrsize(syntxtr.shape)
syn_face_ = np.zeros((rsize, rsize, syntxtr.shape[2]), dtype=np.uint8)
left, right, upper, lower = sq.align(rsize)
syn_face_[upper:lower, left:right, :] = syntxtr
syn_face_ = cv2.resize(syn_face_, (detw, detw))
syn_face = np.zeros(tar_shape, dtype=np.uint8)
syn_face[padw:padw+detw, padw:padw+detw, :] = syn_face_
return syn_face
def recalc_pixel(pt, coords, pixels, thr=5, sigma=0.2):
L2 = np.linalg.norm(coords-pt, ord=2, axis=1)
indx = np.where(L2 <= thr)
weights = np.exp(-L2[indx]**2 / (2* sigma**2))
weights /= np.sum(weights) # np.sum(weights) == 1
return np.matmul(weights, pixels[indx, :])
def warpback(face, tarfr, tarldmk, indices, projM, transM):
# get the pixels of given indices
pixels = face[indices[:, 1], indices[:, 0], :] # (N, 3)
# get the to-be-recalculated region in the original frame
warp_mask, region, coords, pixels = warp_mapping(indices, pixels, tarfr, tarldmk, projM, transM)
# do recalculation for every pixel in the region
tmpfr = np.zeros(tarfr.shape, dtype=np.uint8)
for pt in region:
tmpfr[pt[1], pt[0], :] = recalc_pixel(pt, coords, pixels)
tmpfr = cv2.inpaint(tmpfr, ~warp_mask, 10, cv2.INPAINT_TELEA)
return pyramid_blend(tmpfr, tarfr, warp_mask)
def synthesize_frame(tarfr, syntxtr, sq):
# frontalize the target frame
ftl_face, ldmk, projM, transM = facefrontal(tarfr, detector, predictor, detail=True)
# align lower-face to target frame
syn_face = align2target(syntxtr, ftl_face.shape, sq)
# get indices of pixels in ftl_face which needs to be blended into target frame
indices = getindices(ftl_face, sq)
# warp the synthesized face to the original pose and blending
return warpback(syn_face, tarfr, ldmk, indices, projM, transM)
def composite(inp_path, tar_path, save_path, sq):
syndata = np.load(inp_path)
cap = cv2.VideoCapture(tar_path)
writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'DIVX'), vfps, size)
nfr = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
assert(syndata.shape[0] == nfr)
for i in range(nfr):
print('%s: %04d/%04d' % (save_path, i+1, nfr))
ret, tarfr = cap.read()
assert(ret)
frame = synthesize_frame(tarfr, syndata[i], sq)
writer.write(frame)
print('%s: synthesis done.' % save_path)
return save_path
def test1():
left = cv2.imread('tmp/left.png')
right = cv2.imread('tmp/right.png')
mask = np.zeros(left.shape)
mask[:, :mask.shape[1]//2, :] = 1
n = 6
spec = np.zeros((mask.shape[0]*n, mask.shape[1], mask.shape[2]))
for layers in range(n):
blend = pyramid_blend(left, right, mask, layers)
spec[mask.shape[0]*layers:mask.shape[0]*(layers+1), :, :] = blend
cv2.imwrite('reference/blend.png', spec)
def test2():
tarfr = cv2.imread('tmp/0660.png')
region = np.load('tmp/region.npy')
tarfr[region[:, 1], region[:, 0], :] = (255, 255, 0)
cv2.imwrite('tmp/regiontest.png', tarfr)
def test3():
tarfr = cv2.imread('tmp/0660.png')
syntxtr = cv2.imread('tmp/syn100.png')
sq = Square(0.25, 0.75, 0.6, 1.0)
outpfr = synthesize_frame(tarfr, syntxtr, sq)
cv2.imwrite('tmp/i100t660.png', outpfr)
if __name__ == '__main__':
test3() | composite.py |
import cv2
from __init__ import Square, predictor, detector, vfps, size
from facefrontal import facefrontal, warp_mapping
import numpy as np
padw = 95
detw = 130
def getGaussianPyr(img, layers):
g = img.astype(np.float64)
pyramid = [g]
for i in range(layers):
g = cv2.pyrDown(g)
pyramid.append(g)
return pyramid
def getLaplacianPyr(Gaupyr):
pyramid = []
for i in range(len(Gaupyr)-2, -1, -1):
# len(Gaupyr)-2, ..., 1, 0
gi = Gaupyr[i]
gi_aprx = cv2.pyrUp(Gaupyr[i+1])
gi_aprx = cv2.resize(gi_aprx, gi.shape[:2][::-1])
pyramid.append((gi - gi_aprx))
return pyramid[::-1]
def reconstruct(G, Lappyr):
for i in range(len(Lappyr)-1, -1, -1):
# len(Gaupyr)-1, ..., 1, 0
G = cv2.pyrUp(G)
G = cv2.resize(G, Lappyr[i].shape[:2][::-1])
G += Lappyr[i]
return G.astype(np.uint8)
def pyramid_blend(img1, img2, mask_, layers=4):
assert(img1.shape == img2.shape and img1.shape[:2] == mask_.shape)
mask = mask_ / np.max(mask_) # 0 ~ 1
# construct Gaussian pyramids of input images
Gaupyr1 = getGaussianPyr(img1, layers+1)
Gaupyr2 = getGaussianPyr(img2, layers+1)
Gaupyrm = getGaussianPyr(mask, layers+1)
# construct Laplacian pyramids of input images
Lappyr1 = getLaplacianPyr(Gaupyr1)
Lappyr2 = getLaplacianPyr(Gaupyr2)
# blend pyramids in every layer
Gaupyrm1 = Gaupyrm[:-1]
Gaupyrm2 = [1-msk for msk in Gaupyrm1]
BLappyr1 = [lap * msk[:, :, np.newaxis] for lap, msk in zip(Lappyr1, Gaupyrm1)]
BLappyr2 = [lap * msk[:, :, np.newaxis] for lap, msk in zip(Lappyr2, Gaupyrm2)]
BLappyr = [lap1 + lap2 for lap1, lap2 in zip(BLappyr1, BLappyr2)]
initG = Gaupyr1[-1] * Gaupyrm[-1][:, :, np.newaxis] + Gaupyr2[-1] * (1-Gaupyrm[-1])[:, :, np.newaxis]
# collapse pyramids and form the blended image
img = reconstruct(initG, BLappyr)
return img
def getindices(ftl_face, sq, padw=padw, detw=detw):
# get mask region using boundary, chin landmarks and nose landmarks
# boundary region: left -> right, upper -> lower
WH = ftl_face.shape[0]
boundary = sq.align(detw)
left, right, upper, lower = np.array(boundary) + padw
indices = np.array([(x, y) for x in range(left, right) for y in range(upper, lower)])
# get landmarks of frontalized face
det = detector(ftl_face, 1)[0]
shape = predictor(ftl_face, det)
ldmk = np.asarray([(shape.part(n).x, shape.part(n).y,) for n in range(shape.num_parts)], np.float32)
chin_xp, chin_fp = ldmk[ 3:14, 0], ldmk[ 3:14, 1]
chin_line = np.interp(np.arange(WH), chin_xp, chin_fp)
nose_xp, nose_fp = ldmk[31:36, 0], ldmk[31:36, 1]
nose_line = np.interp(np.arange(WH), nose_xp, nose_fp)
# filter the position which is out of chin line and nose line
check = np.logical_and(indices[:, 1] < chin_line[indices[:, 0]],
indices[:, 1] > nose_line[indices[:, 0]])
return indices[check.nonzero()]
def align2target(syntxtr, tar_shape, sq, padw=padw, detw=detw):
# align lower-face to target frame
# |padw| detw |padw|
# |----|-------|---------
# | |padw
# | --------- -----
# | | | |
# | | | |detw
# | | | |
# | --------- -----
# | ftl_face |padw
# -----------------------
rsize = sq.getrsize(syntxtr.shape)
syn_face_ = np.zeros((rsize, rsize, syntxtr.shape[2]), dtype=np.uint8)
left, right, upper, lower = sq.align(rsize)
syn_face_[upper:lower, left:right, :] = syntxtr
syn_face_ = cv2.resize(syn_face_, (detw, detw))
syn_face = np.zeros(tar_shape, dtype=np.uint8)
syn_face[padw:padw+detw, padw:padw+detw, :] = syn_face_
return syn_face
def recalc_pixel(pt, coords, pixels, thr=5, sigma=0.2):
L2 = np.linalg.norm(coords-pt, ord=2, axis=1)
indx = np.where(L2 <= thr)
weights = np.exp(-L2[indx]**2 / (2* sigma**2))
weights /= np.sum(weights) # np.sum(weights) == 1
return np.matmul(weights, pixels[indx, :])
def warpback(face, tarfr, tarldmk, indices, projM, transM):
# get the pixels of given indices
pixels = face[indices[:, 1], indices[:, 0], :] # (N, 3)
# get the to-be-recalculated region in the original frame
warp_mask, region, coords, pixels = warp_mapping(indices, pixels, tarfr, tarldmk, projM, transM)
# do recalculation for every pixel in the region
tmpfr = np.zeros(tarfr.shape, dtype=np.uint8)
for pt in region:
tmpfr[pt[1], pt[0], :] = recalc_pixel(pt, coords, pixels)
tmpfr = cv2.inpaint(tmpfr, ~warp_mask, 10, cv2.INPAINT_TELEA)
return pyramid_blend(tmpfr, tarfr, warp_mask)
def synthesize_frame(tarfr, syntxtr, sq):
# frontalize the target frame
ftl_face, ldmk, projM, transM = facefrontal(tarfr, detector, predictor, detail=True)
# align lower-face to target frame
syn_face = align2target(syntxtr, ftl_face.shape, sq)
# get indices of pixels in ftl_face which needs to be blended into target frame
indices = getindices(ftl_face, sq)
# warp the synthesized face to the original pose and blending
return warpback(syn_face, tarfr, ldmk, indices, projM, transM)
def composite(inp_path, tar_path, save_path, sq):
syndata = np.load(inp_path)
cap = cv2.VideoCapture(tar_path)
writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'DIVX'), vfps, size)
nfr = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
assert(syndata.shape[0] == nfr)
for i in range(nfr):
print('%s: %04d/%04d' % (save_path, i+1, nfr))
ret, tarfr = cap.read()
assert(ret)
frame = synthesize_frame(tarfr, syndata[i], sq)
writer.write(frame)
print('%s: synthesis done.' % save_path)
return save_path
def test1():
left = cv2.imread('tmp/left.png')
right = cv2.imread('tmp/right.png')
mask = np.zeros(left.shape)
mask[:, :mask.shape[1]//2, :] = 1
n = 6
spec = np.zeros((mask.shape[0]*n, mask.shape[1], mask.shape[2]))
for layers in range(n):
blend = pyramid_blend(left, right, mask, layers)
spec[mask.shape[0]*layers:mask.shape[0]*(layers+1), :, :] = blend
cv2.imwrite('reference/blend.png', spec)
def test2():
tarfr = cv2.imread('tmp/0660.png')
region = np.load('tmp/region.npy')
tarfr[region[:, 1], region[:, 0], :] = (255, 255, 0)
cv2.imwrite('tmp/regiontest.png', tarfr)
def test3():
tarfr = cv2.imread('tmp/0660.png')
syntxtr = cv2.imread('tmp/syn100.png')
sq = Square(0.25, 0.75, 0.6, 1.0)
outpfr = synthesize_frame(tarfr, syntxtr, sq)
cv2.imwrite('tmp/i100t660.png', outpfr)
if __name__ == '__main__':
test3() | 0.533154 | 0.356195 |
import numpy as np
import torch
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, t_obs_shape, action_shape, capacity, device):
self.capacity = capacity
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.t_obses = np.empty((capacity, *t_obs_shape), dtype=obs_dtype)
self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.next_t_obses = np.empty((capacity, *t_obs_shape), dtype=obs_dtype)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.not_dones_no_max = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
def __len__(self):
return self.capacity if self.full else self.idx
def add(
self, obs, t_obs, action, reward, next_obs, next_t_obs, done, done_no_max,
):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.t_obses[self.idx], t_obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.next_t_obses[self.idx], next_t_obs)
np.copyto(self.not_dones[self.idx], not done)
np.copyto(self.not_dones_no_max[self.idx], not done_no_max)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def purge_frac(self, frac=0.5):
to_keep = int((1.0 - frac) * self.__len__())
idxs = np.random.randint(0, self.__len__(), size=to_keep)
self.obses[:to_keep] = self.obses[idxs]
self.t_obses[:to_keep] = self.t_obses[idxs]
self.actions[:to_keep] = self.actions[idxs]
self.rewards[:to_keep] = self.rewards[idxs]
self.next_obses[:to_keep] = self.next_obses[idxs]
self.next_t_obses[:to_keep] = self.next_t_obses[idxs]
self.not_dones[:to_keep] = self.not_dones[idxs]
self.not_dones_no_max[:to_keep] = self.not_dones_no_max[idxs]
self.idx = to_keep
self.full = False
def sample(self, batch_size):
idxs = np.random.randint(0, self.__len__(), size=batch_size)
obses = torch.from_numpy(self.obses[idxs]).to(self.device)
t_obses = torch.from_numpy(self.t_obses[idxs]).to(self.device)
actions = torch.from_numpy(self.actions[idxs]).to(self.device)
rewards = torch.from_numpy(self.rewards[idxs]).to(self.device)
next_obses = torch.from_numpy(self.next_obses[idxs]).to(self.device)
next_t_obses = torch.from_numpy(self.next_t_obses[idxs]).to(self.device)
not_dones = torch.from_numpy(self.not_dones[idxs]).to(self.device)
not_dones_no_max = torch.from_numpy(self.not_dones_no_max[idxs]).to(self.device)
return (
obses,
t_obses,
actions,
rewards,
next_obses,
next_t_obses,
not_dones,
not_dones_no_max,
) | buffers/replay_buffer.py | import numpy as np
import torch
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, t_obs_shape, action_shape, capacity, device):
self.capacity = capacity
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.t_obses = np.empty((capacity, *t_obs_shape), dtype=obs_dtype)
self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.next_t_obses = np.empty((capacity, *t_obs_shape), dtype=obs_dtype)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.not_dones_no_max = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
def __len__(self):
return self.capacity if self.full else self.idx
def add(
self, obs, t_obs, action, reward, next_obs, next_t_obs, done, done_no_max,
):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.t_obses[self.idx], t_obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.next_t_obses[self.idx], next_t_obs)
np.copyto(self.not_dones[self.idx], not done)
np.copyto(self.not_dones_no_max[self.idx], not done_no_max)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def purge_frac(self, frac=0.5):
to_keep = int((1.0 - frac) * self.__len__())
idxs = np.random.randint(0, self.__len__(), size=to_keep)
self.obses[:to_keep] = self.obses[idxs]
self.t_obses[:to_keep] = self.t_obses[idxs]
self.actions[:to_keep] = self.actions[idxs]
self.rewards[:to_keep] = self.rewards[idxs]
self.next_obses[:to_keep] = self.next_obses[idxs]
self.next_t_obses[:to_keep] = self.next_t_obses[idxs]
self.not_dones[:to_keep] = self.not_dones[idxs]
self.not_dones_no_max[:to_keep] = self.not_dones_no_max[idxs]
self.idx = to_keep
self.full = False
def sample(self, batch_size):
idxs = np.random.randint(0, self.__len__(), size=batch_size)
obses = torch.from_numpy(self.obses[idxs]).to(self.device)
t_obses = torch.from_numpy(self.t_obses[idxs]).to(self.device)
actions = torch.from_numpy(self.actions[idxs]).to(self.device)
rewards = torch.from_numpy(self.rewards[idxs]).to(self.device)
next_obses = torch.from_numpy(self.next_obses[idxs]).to(self.device)
next_t_obses = torch.from_numpy(self.next_t_obses[idxs]).to(self.device)
not_dones = torch.from_numpy(self.not_dones[idxs]).to(self.device)
not_dones_no_max = torch.from_numpy(self.not_dones_no_max[idxs]).to(self.device)
return (
obses,
t_obses,
actions,
rewards,
next_obses,
next_t_obses,
not_dones,
not_dones_no_max,
) | 0.826046 | 0.407923 |
from ..api import Extension, helpers
from ..log import logger
from ..templates import isort_cfg, pre_commit_config
class PreCommit(Extension):
"""Generate pre-commit configuration file"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return (
self.register(actions, self.add_files, after='define_structure') +
[self.instruct_user])
@staticmethod
def add_files(struct, opts):
"""Add .pre-commit-config.yaml file to structure
Since the default template uses isort, this function also provides an
initial version of .isort.cfg that can be extended by the user
(it contains some useful skips, e.g. tox and venv)
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
files = {
'.pre-commit-config.yaml': (
pre_commit_config(opts), helpers.NO_OVERWRITE
),
'.isort.cfg': (
isort_cfg(opts), helpers.NO_OVERWRITE
),
}
return helpers.merge(struct, {opts['project']: files}), opts
@staticmethod
def instruct_user(struct, opts):
logger.warning(
'\nA `.pre-commit-config.yaml` file was generated inside your '
'project but in order to make sure the hooks will run, please '
'don\'t forget to install the `pre-commit` package:\n\n'
' cd %s\n'
' # it is a good idea to create and activate a virtualenv here\n'
' pip install pre-commit\n'
' pre-commit install\n'
' # another good idea is update the hooks to the latest version\n'
' # pre-commit autoupdate\n\n'
'You might also consider including similar instructions in your '
'docs, to remind the contributors to do the same.\n',
opts['project'])
return struct, opts | .eggs/PyScaffold-3.1-py3.9.egg/pyscaffold/extensions/pre_commit.py | from ..api import Extension, helpers
from ..log import logger
from ..templates import isort_cfg, pre_commit_config
class PreCommit(Extension):
"""Generate pre-commit configuration file"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return (
self.register(actions, self.add_files, after='define_structure') +
[self.instruct_user])
@staticmethod
def add_files(struct, opts):
"""Add .pre-commit-config.yaml file to structure
Since the default template uses isort, this function also provides an
initial version of .isort.cfg that can be extended by the user
(it contains some useful skips, e.g. tox and venv)
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
files = {
'.pre-commit-config.yaml': (
pre_commit_config(opts), helpers.NO_OVERWRITE
),
'.isort.cfg': (
isort_cfg(opts), helpers.NO_OVERWRITE
),
}
return helpers.merge(struct, {opts['project']: files}), opts
@staticmethod
def instruct_user(struct, opts):
logger.warning(
'\nA `.pre-commit-config.yaml` file was generated inside your '
'project but in order to make sure the hooks will run, please '
'don\'t forget to install the `pre-commit` package:\n\n'
' cd %s\n'
' # it is a good idea to create and activate a virtualenv here\n'
' pip install pre-commit\n'
' pre-commit install\n'
' # another good idea is update the hooks to the latest version\n'
' # pre-commit autoupdate\n\n'
'You might also consider including similar instructions in your '
'docs, to remind the contributors to do the same.\n',
opts['project'])
return struct, opts | 0.765769 | 0.13134 |
from autosense.autodiff.autotensor import autoTensor, Node
import torch
from autosense.neural.param import Weight, Initializer
import autosense.autodiff.functional as F
import torch.nn.init as torchInit
class Layer(object):
"""Abstract class that is inherited by all types of layers"""
def __call__(self):
raise NotImplementedError
class Linear(Layer):
def __init__(self, input_dim, output_dim,initializer=None,bias=True):
self.weight = Weight(shape=(input_dim,output_dim),initializer=initializer)
if bias:
self.bias = Weight(shape=(1,output_dim),initializer=initializer)
self.bias_present = bias
def __call__(self,inputs):
if self.bias_present:
return F.MatMul(inputs,self.weight) + self.bias
else:
return F.MatMul(inputs,self.weight)
class Linear2(Layer):
def __init__(self, input1_dim,input2_dim, output_dim,initializer=None,bias=True):
self.weight = Weight(shape=(input1_dim,output_dim),initializer=initializer)
self.weight2 = Weight(shape=(input2_dim,output_dim),initializer=initializer)
if bias:
self.bias = Weight(shape=(1,output_dim),initializer=initializer)
self.bias_present = bias
def __call__(self,inputs1,inputs2):
if self.bias_present:
return F.MatMul(inputs1,self.weight) + F.MatMul(inputs2,self.weight2) + self.bias
else:
return F.MatMul(inputs1,self.weight) + F.MatMul(inputs2,self.weight2)
class Conv2D(Layer):
def __init__(self,filter_shape,padding=0,stride=1,initializer=None):
"""
input – input tensor of shape (minibatch,in_channels,iH,iW) \n
weight – filters of shape (out_channels,in_channels,kH,kW) \n
bias – bias tensor of shape (out_channels). """
self.padding = padding
self.stride = stride
self.filter = Weight(filter_shape,initializer=initializer)
self.bias = Weight(shape = filter_shape[0])
def __call__(self,inputs):
return F.Conv2d(image_block=inputs,
filters=self.filter,
bias=self.bias,
padding=self.padding,
stride=self.stride
)
class Dropout(Layer):
def __init__(self,input_shape,keep_prob=0.8):
self.input_shape = input_shape
self.keep_prob = keep_prob
def __call__(self,inputs):
mask = torchInit.uniform_(torch.rand(self.input_shape)).type(inputs.value.type())
mask[mask < self.keep_prob] = 1
mask[mask != 1 ] = 0
return F.Dpout(inputs,mask) | autosense/neural/layers.py | from autosense.autodiff.autotensor import autoTensor, Node
import torch
from autosense.neural.param import Weight, Initializer
import autosense.autodiff.functional as F
import torch.nn.init as torchInit
class Layer(object):
"""Abstract class that is inherited by all types of layers"""
def __call__(self):
raise NotImplementedError
class Linear(Layer):
def __init__(self, input_dim, output_dim,initializer=None,bias=True):
self.weight = Weight(shape=(input_dim,output_dim),initializer=initializer)
if bias:
self.bias = Weight(shape=(1,output_dim),initializer=initializer)
self.bias_present = bias
def __call__(self,inputs):
if self.bias_present:
return F.MatMul(inputs,self.weight) + self.bias
else:
return F.MatMul(inputs,self.weight)
class Linear2(Layer):
def __init__(self, input1_dim,input2_dim, output_dim,initializer=None,bias=True):
self.weight = Weight(shape=(input1_dim,output_dim),initializer=initializer)
self.weight2 = Weight(shape=(input2_dim,output_dim),initializer=initializer)
if bias:
self.bias = Weight(shape=(1,output_dim),initializer=initializer)
self.bias_present = bias
def __call__(self,inputs1,inputs2):
if self.bias_present:
return F.MatMul(inputs1,self.weight) + F.MatMul(inputs2,self.weight2) + self.bias
else:
return F.MatMul(inputs1,self.weight) + F.MatMul(inputs2,self.weight2)
class Conv2D(Layer):
def __init__(self,filter_shape,padding=0,stride=1,initializer=None):
"""
input – input tensor of shape (minibatch,in_channels,iH,iW) \n
weight – filters of shape (out_channels,in_channels,kH,kW) \n
bias – bias tensor of shape (out_channels). """
self.padding = padding
self.stride = stride
self.filter = Weight(filter_shape,initializer=initializer)
self.bias = Weight(shape = filter_shape[0])
def __call__(self,inputs):
return F.Conv2d(image_block=inputs,
filters=self.filter,
bias=self.bias,
padding=self.padding,
stride=self.stride
)
class Dropout(Layer):
def __init__(self,input_shape,keep_prob=0.8):
self.input_shape = input_shape
self.keep_prob = keep_prob
def __call__(self,inputs):
mask = torchInit.uniform_(torch.rand(self.input_shape)).type(inputs.value.type())
mask[mask < self.keep_prob] = 1
mask[mask != 1 ] = 0
return F.Dpout(inputs,mask) | 0.890604 | 0.33292 |
import numpy as np
from scipy.special import lpmv, gamma, hyp1f1, legendre
from scipy.special.orthogonal import genlaguerre
from scipy.misc import factorial
import sh, spf
# default parameters values
_default_radial_order = spf._default_radial_order
_default_angular_rank = sh._default_rank
_default_zeta = spf._default_zeta
class ModifiedSphericalPolarFourier:
"""This class implements the modified SPF basis, for the reconstruction of
a continuous function.
Parameters
----------
radial_order : int
The radial truncation order of the mSPF basis.
angular_rank : int
The truncation rank of the angular part of the mSPF basis.
zeta : float
The scale parameter of the mSPF basis.
"""
def __init__(self, radial_order=_default_radial_order,
angular_rank=_default_angular_rank, zeta=_default_zeta):
self.radial_order = radial_order
self.angular_rank = angular_rank
self.zeta = zeta
self.coefficients = np.zeros((self.radial_order,
sh.dimension(self.angular_rank)))
def get_angular_rank(self):
return self._angular_rank
def set_angular_rank(self, value):
if value % 2 != 0:
raise ValueError("'angular_rank' only accepts even values.")
self._angular_rank = value
angular_rank = property(get_angular_rank, set_angular_rank)
def spherical_function(self, r, theta, phi):
"""The 3d function represented by the mSPF object.
Parameters
----------
r : array-like, shape (K, )
The radii of the points in q-space where to compute the spherical
function.
theta : array-like, shape (K, )
The polar angles of the points in q-space where to compute the
spherical function.
phi : array-like, shape (K, )
The azimuthal angles of the points in q-space where to compute the
spherical function.
Returns
-------
f : array-like, shape (K, )
The function computed at the points provided as input.
"""
result = 0.0
for n in range(self.radial_order - 1):
if np.abs(self.coefficients[n]).max() > 0.0:
sh_coefs = self.coefficients[n]
spherical_harm = sh.SphericalHarmonics(sh_coefs)
result += spherical_harm.angular_function(theta, phi) * \
radial_function(r, n, self.zeta)
return result
def matrix(r, theta, phi, radial_order=_default_radial_order,
angular_rank=_default_angular_rank, zeta=_default_zeta):
"""Returns the spherical polar Fourier observation matrix for a given set
of points represented by their spherical coordinates.
Parameters
----------
r : array-like, shape (K, )
The radii of the points in q-space where to compute the spherical
function.
theta : array-like, shape (K, )
The polar angles of the points in q-space where to compute the
spherical function.
phi : array-like, shape (K, )
The azimuthal angles of the points in q-space where to compute the
spherical function.
radial_order : int
The radial truncation order of the SPF basis.
angular_rank : int
The truncation rank of the angular part of the SPF basis.
zeta : float
The scale parameter of the mSPF basis.
Returns
-------
H : array-like, shape (K, R)
The observation matrix corresponding to the point set passed as input.
"""
K = r.shape[0]
H = np.zeros((K, radial_order - 1, sh.dimension(angular_rank)))
b_n_j = ModifiedSphericalPolarFourier(radial_order, angular_rank, zeta)
for n in range(H.shape[1]):
for j in range(H.shape[2]):
b_n_j.coefficients[:] = 0
b_n_j.coefficients[n, j] = 1.0
H[:, n, j] = b_n_j.spherical_function(r, theta, phi)
return H.reshape(K, dimension(radial_order, angular_rank))
def to_spf_matrix(radial_order=_default_radial_order,
angular_rank=_default_angular_rank, zeta=_default_zeta):
"Computes the transition matrix from modified SPF basis to SPF basis."
M = np.zeros((spf.dimension(radial_order, angular_rank),
dimension(radial_order, angular_rank)))
for i in range(M.shape[0]):
n_i = spf.index_n(i, radial_order, angular_rank)
l_i = spf.index_l(i, radial_order, angular_rank)
m_i = spf.index_m(i, radial_order, angular_rank)
kappa_ni = spf.kappa(zeta, n_i)
for j in range(M.shape[1]):
n_j = index_n(j, radial_order, angular_rank)
l_j = index_l(j, radial_order, angular_rank)
m_j = index_m(j, radial_order, angular_rank)
chi_nj = chi(zeta, n_j)
if (l_i == l_j and m_i == m_j):
if n_i <= n_j:
M[i, j] = 3 * chi_nj / (2 * kappa_ni)
else:
if n_i == n_j + 1:
M[i, j] = - (n_j + 1) * chi_nj / kappa_ni
return M
def dimension(radial_order, angular_rank):
"Returns the dimension of the truncated mSPF basis."
return (radial_order - 1) * sh.dimension(angular_rank)
index_i = spf.index_i
index_n = spf.index_n
index_l = spf.index_l
index_m = spf.index_m
def chi(zeta, n):
"Returns the normalization constant of the mSPF basis."
return np.sqrt(2 / zeta**1.5 * factorial(n) / gamma(n + 3.5))
def radial_function(r, n, zeta):
"Computes the radial part of the mSPF basis."
return genlaguerre(n, 2.5)(r**2 / zeta) * \
r**2 / zeta * np.exp(- r**2 / (2 * zeta)) * chi(zeta, n)
def Lambda(radial_order, angular_rank, zeta=_default_zeta):
"""The Laplace regularization is computed by matrix multiplication
(x-x0)^T Lambda (x-x0).
"""
max_degree = 2 * (radial_order + 1)
gammas = gamma(np.arange(max_degree) + 0.5)
dim = dimension(radial_order, angular_rank)
L = np.zeros((dim, dim))
dim_sh = sh.dimension(angular_rank)
for n1 in range(radial_order - 1):
chi1 = chi(zeta, n1)
for n2 in range(radial_order - 1):
chi2 = chi(zeta, n2)
for j1 in range(dim_sh):
l1 = sh.index_l(j1)
coeffs = __Tcoeffs(n1, n2, l1)
degree = coeffs.shape[0]
matrix_entry = chi1 * chi2 / (2 * np.sqrt(zeta)) * \
np.dot(coeffs, gammas[range(degree-1, -1, -1)])
for j2 in range(dim_sh):
l2 = sh.index_l(j2)
if j1 == j2:
L[n1 * dim_sh + j1, n2 * dim_sh + j2] = matrix_entry
return L
def v(radial_order, angular_rank, zeta=_default_zeta):
"The vector x0 for Laplace regularization is -Lambda^-1 v."
max_degree = 2 * (radial_order + 1)
gammas = gamma(np.arange(max_degree) + 0.5)
dim = dimension(radial_order, angular_rank)
v = np.zeros(dim)
dim_sh = sh.dimension(angular_rank)
for n in range(radial_order - 1):
chi1 = chi(zeta, n)
coeffs = __Tcoeffs(n, -1, 0)
degree = coeffs.shape[0]
v[n * dim_sh] = chi1 / (2 * np.sqrt(zeta)) \
* np.dot(coeffs, gammas[range(degree-1, -1, -1)])
return v
def __F_n(n):
"""F_n(q) = \chi_n \exp(-q^2 / 2\zeta) P_n(q)
and P_n(q) = q^2 / zeta * L_n^{5/2}(q^2 / zeta)"""
if n == -1:
return np.poly1d([1.0])
else:
a = np.poly1d([1, 0.0, 0.0])
return a * genlaguerre(n, 2.5)(a)
def __diffFn(p):
"""F_n'(q) = \chi_n \exp(-q^2 / 2\zeta) *
(-q / \zeta * P_n(q) + P_n'(q))"""
a = np.poly1d([-1, 0.0])
return a * p + p.deriv()
def __h_i_poly(n, l):
"""h_i(q) = \chi_n \exp(-q^2 / 2\zeta) * h_i_poly(q)"""
F0n = __F_n(n)
F1n = __diffFn(F0n)
F2n = __diffFn(F1n)
a = np.poly1d([1.0, 0.0])
b = (F0n / a)[0] # Polynomial euclidian division
return a * F2n + 2 * F1n - l * (l + 1) * b
def __Tcoeffs(ni, nj, l):
"""The entry (i, j) of laplace matrix is
$\chi_{n(i)}\chi_{n(j)}
\int_0^\infty \exp(-q^2/\zeta) T_{i,j}(q^2/\zeta)\,\mathrm{d}q$.
This function returns the coefficients of T."""
Tij = __h_i_poly(ni, l) * __h_i_poly(nj, l)
degree = Tij.coeffs.shape[0]
coeffs = Tij.coeffs[range(0, degree, 2)]
return coeffs | qspace/bases/mspf.py |
import numpy as np
from scipy.special import lpmv, gamma, hyp1f1, legendre
from scipy.special.orthogonal import genlaguerre
from scipy.misc import factorial
import sh, spf
# default parameters values
_default_radial_order = spf._default_radial_order
_default_angular_rank = sh._default_rank
_default_zeta = spf._default_zeta
class ModifiedSphericalPolarFourier:
"""This class implements the modified SPF basis, for the reconstruction of
a continuous function.
Parameters
----------
radial_order : int
The radial truncation order of the mSPF basis.
angular_rank : int
The truncation rank of the angular part of the mSPF basis.
zeta : float
The scale parameter of the mSPF basis.
"""
def __init__(self, radial_order=_default_radial_order,
angular_rank=_default_angular_rank, zeta=_default_zeta):
self.radial_order = radial_order
self.angular_rank = angular_rank
self.zeta = zeta
self.coefficients = np.zeros((self.radial_order,
sh.dimension(self.angular_rank)))
def get_angular_rank(self):
return self._angular_rank
def set_angular_rank(self, value):
if value % 2 != 0:
raise ValueError("'angular_rank' only accepts even values.")
self._angular_rank = value
angular_rank = property(get_angular_rank, set_angular_rank)
def spherical_function(self, r, theta, phi):
"""The 3d function represented by the mSPF object.
Parameters
----------
r : array-like, shape (K, )
The radii of the points in q-space where to compute the spherical
function.
theta : array-like, shape (K, )
The polar angles of the points in q-space where to compute the
spherical function.
phi : array-like, shape (K, )
The azimuthal angles of the points in q-space where to compute the
spherical function.
Returns
-------
f : array-like, shape (K, )
The function computed at the points provided as input.
"""
result = 0.0
for n in range(self.radial_order - 1):
if np.abs(self.coefficients[n]).max() > 0.0:
sh_coefs = self.coefficients[n]
spherical_harm = sh.SphericalHarmonics(sh_coefs)
result += spherical_harm.angular_function(theta, phi) * \
radial_function(r, n, self.zeta)
return result
def matrix(r, theta, phi, radial_order=_default_radial_order,
angular_rank=_default_angular_rank, zeta=_default_zeta):
"""Returns the spherical polar Fourier observation matrix for a given set
of points represented by their spherical coordinates.
Parameters
----------
r : array-like, shape (K, )
The radii of the points in q-space where to compute the spherical
function.
theta : array-like, shape (K, )
The polar angles of the points in q-space where to compute the
spherical function.
phi : array-like, shape (K, )
The azimuthal angles of the points in q-space where to compute the
spherical function.
radial_order : int
The radial truncation order of the SPF basis.
angular_rank : int
The truncation rank of the angular part of the SPF basis.
zeta : float
The scale parameter of the mSPF basis.
Returns
-------
H : array-like, shape (K, R)
The observation matrix corresponding to the point set passed as input.
"""
K = r.shape[0]
H = np.zeros((K, radial_order - 1, sh.dimension(angular_rank)))
b_n_j = ModifiedSphericalPolarFourier(radial_order, angular_rank, zeta)
for n in range(H.shape[1]):
for j in range(H.shape[2]):
b_n_j.coefficients[:] = 0
b_n_j.coefficients[n, j] = 1.0
H[:, n, j] = b_n_j.spherical_function(r, theta, phi)
return H.reshape(K, dimension(radial_order, angular_rank))
def to_spf_matrix(radial_order=_default_radial_order,
angular_rank=_default_angular_rank, zeta=_default_zeta):
"Computes the transition matrix from modified SPF basis to SPF basis."
M = np.zeros((spf.dimension(radial_order, angular_rank),
dimension(radial_order, angular_rank)))
for i in range(M.shape[0]):
n_i = spf.index_n(i, radial_order, angular_rank)
l_i = spf.index_l(i, radial_order, angular_rank)
m_i = spf.index_m(i, radial_order, angular_rank)
kappa_ni = spf.kappa(zeta, n_i)
for j in range(M.shape[1]):
n_j = index_n(j, radial_order, angular_rank)
l_j = index_l(j, radial_order, angular_rank)
m_j = index_m(j, radial_order, angular_rank)
chi_nj = chi(zeta, n_j)
if (l_i == l_j and m_i == m_j):
if n_i <= n_j:
M[i, j] = 3 * chi_nj / (2 * kappa_ni)
else:
if n_i == n_j + 1:
M[i, j] = - (n_j + 1) * chi_nj / kappa_ni
return M
def dimension(radial_order, angular_rank):
"Returns the dimension of the truncated mSPF basis."
return (radial_order - 1) * sh.dimension(angular_rank)
index_i = spf.index_i
index_n = spf.index_n
index_l = spf.index_l
index_m = spf.index_m
def chi(zeta, n):
"Returns the normalization constant of the mSPF basis."
return np.sqrt(2 / zeta**1.5 * factorial(n) / gamma(n + 3.5))
def radial_function(r, n, zeta):
"Computes the radial part of the mSPF basis."
return genlaguerre(n, 2.5)(r**2 / zeta) * \
r**2 / zeta * np.exp(- r**2 / (2 * zeta)) * chi(zeta, n)
def Lambda(radial_order, angular_rank, zeta=_default_zeta):
"""The Laplace regularization is computed by matrix multiplication
(x-x0)^T Lambda (x-x0).
"""
max_degree = 2 * (radial_order + 1)
gammas = gamma(np.arange(max_degree) + 0.5)
dim = dimension(radial_order, angular_rank)
L = np.zeros((dim, dim))
dim_sh = sh.dimension(angular_rank)
for n1 in range(radial_order - 1):
chi1 = chi(zeta, n1)
for n2 in range(radial_order - 1):
chi2 = chi(zeta, n2)
for j1 in range(dim_sh):
l1 = sh.index_l(j1)
coeffs = __Tcoeffs(n1, n2, l1)
degree = coeffs.shape[0]
matrix_entry = chi1 * chi2 / (2 * np.sqrt(zeta)) * \
np.dot(coeffs, gammas[range(degree-1, -1, -1)])
for j2 in range(dim_sh):
l2 = sh.index_l(j2)
if j1 == j2:
L[n1 * dim_sh + j1, n2 * dim_sh + j2] = matrix_entry
return L
def v(radial_order, angular_rank, zeta=_default_zeta):
"The vector x0 for Laplace regularization is -Lambda^-1 v."
max_degree = 2 * (radial_order + 1)
gammas = gamma(np.arange(max_degree) + 0.5)
dim = dimension(radial_order, angular_rank)
v = np.zeros(dim)
dim_sh = sh.dimension(angular_rank)
for n in range(radial_order - 1):
chi1 = chi(zeta, n)
coeffs = __Tcoeffs(n, -1, 0)
degree = coeffs.shape[0]
v[n * dim_sh] = chi1 / (2 * np.sqrt(zeta)) \
* np.dot(coeffs, gammas[range(degree-1, -1, -1)])
return v
def __F_n(n):
"""F_n(q) = \chi_n \exp(-q^2 / 2\zeta) P_n(q)
and P_n(q) = q^2 / zeta * L_n^{5/2}(q^2 / zeta)"""
if n == -1:
return np.poly1d([1.0])
else:
a = np.poly1d([1, 0.0, 0.0])
return a * genlaguerre(n, 2.5)(a)
def __diffFn(p):
"""F_n'(q) = \chi_n \exp(-q^2 / 2\zeta) *
(-q / \zeta * P_n(q) + P_n'(q))"""
a = np.poly1d([-1, 0.0])
return a * p + p.deriv()
def __h_i_poly(n, l):
"""h_i(q) = \chi_n \exp(-q^2 / 2\zeta) * h_i_poly(q)"""
F0n = __F_n(n)
F1n = __diffFn(F0n)
F2n = __diffFn(F1n)
a = np.poly1d([1.0, 0.0])
b = (F0n / a)[0] # Polynomial euclidian division
return a * F2n + 2 * F1n - l * (l + 1) * b
def __Tcoeffs(ni, nj, l):
"""The entry (i, j) of laplace matrix is
$\chi_{n(i)}\chi_{n(j)}
\int_0^\infty \exp(-q^2/\zeta) T_{i,j}(q^2/\zeta)\,\mathrm{d}q$.
This function returns the coefficients of T."""
Tij = __h_i_poly(ni, l) * __h_i_poly(nj, l)
degree = Tij.coeffs.shape[0]
coeffs = Tij.coeffs[range(0, degree, 2)]
return coeffs | 0.90198 | 0.571527 |
from selenium import webdriver
from time import sleep
class Filler(object):
def __init__(self, key_pairs, submit_element, url_list, testing_mode):
self.testing_mode = testing_mode
self.key_pairs = key_pairs
self.submit_element = submit_element
self.url_list = url_list
self.first_click = False
self.first_click_el = ""
self.popups = False
self.popup_el = ""
def fill(self):
if self.testing_mode:
options = webdriver.FirefoxOptions()
options.accept_insecure_certs = True
driver = webdriver.Firefox(firefox_options=options, )
else:
options = webdriver.ChromeOptions()
options.accept_insecure_certs = True
options.headless = True
options.add_argument('--no-sandbox')
options.add_argument('--window-size=1920,1080')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(executable_path=r'/app/automator/chromedriver', chrome_options=options)
driver.set_window_size(1920, 1080)
driver.set_script_timeout(60)
driver.set_page_load_timeout(90)
print(self.key_pairs)
for url in self.url_list:
driver.get(url)
driver.get_cookies()
sleep(5)
if self.popups:
if isinstance(self.popup_el, list):
for popup in self.popup_el:
select = driver.find_element_by_xpath(popup)
select.click()
sleep(1)
driver.switch_to.default_content()
if self.first_click:
if isinstance(self.first_click_el, list):
for click in self.first_click_el:
driver.get_cookies()
sleep(5)
clicking_first = driver.find_element_by_xpath(click)
clicking_first.click()
else:
print(type(self.first_click_el))
driver.get_cookies()
sleep(5)
clicking_first = driver.find_element_by_xpath(self.first_click_el)
clicking_first.click()
for key in list(self.key_pairs.keys()):
element = driver.find_element_by_xpath(key)
element.send_keys(self.key_pairs[key])
sleep(0.3)
if isinstance(self.submit_element, list):
for el in self.submit_element:
submit = driver.find_element_by_xpath(el)
submit.click()
sleep(0.3)
sleep(5)
else:
submit = driver.find_element_by_xpath(self.submit_element)
submit.click()
sleep(5)
print("Submitted {} out of {} contests".format(str(self.url_list.index(url)+1), str(len(self.url_list)))) | automator/filler.py | from selenium import webdriver
from time import sleep
class Filler(object):
def __init__(self, key_pairs, submit_element, url_list, testing_mode):
self.testing_mode = testing_mode
self.key_pairs = key_pairs
self.submit_element = submit_element
self.url_list = url_list
self.first_click = False
self.first_click_el = ""
self.popups = False
self.popup_el = ""
def fill(self):
if self.testing_mode:
options = webdriver.FirefoxOptions()
options.accept_insecure_certs = True
driver = webdriver.Firefox(firefox_options=options, )
else:
options = webdriver.ChromeOptions()
options.accept_insecure_certs = True
options.headless = True
options.add_argument('--no-sandbox')
options.add_argument('--window-size=1920,1080')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(executable_path=r'/app/automator/chromedriver', chrome_options=options)
driver.set_window_size(1920, 1080)
driver.set_script_timeout(60)
driver.set_page_load_timeout(90)
print(self.key_pairs)
for url in self.url_list:
driver.get(url)
driver.get_cookies()
sleep(5)
if self.popups:
if isinstance(self.popup_el, list):
for popup in self.popup_el:
select = driver.find_element_by_xpath(popup)
select.click()
sleep(1)
driver.switch_to.default_content()
if self.first_click:
if isinstance(self.first_click_el, list):
for click in self.first_click_el:
driver.get_cookies()
sleep(5)
clicking_first = driver.find_element_by_xpath(click)
clicking_first.click()
else:
print(type(self.first_click_el))
driver.get_cookies()
sleep(5)
clicking_first = driver.find_element_by_xpath(self.first_click_el)
clicking_first.click()
for key in list(self.key_pairs.keys()):
element = driver.find_element_by_xpath(key)
element.send_keys(self.key_pairs[key])
sleep(0.3)
if isinstance(self.submit_element, list):
for el in self.submit_element:
submit = driver.find_element_by_xpath(el)
submit.click()
sleep(0.3)
sleep(5)
else:
submit = driver.find_element_by_xpath(self.submit_element)
submit.click()
sleep(5)
print("Submitted {} out of {} contests".format(str(self.url_list.index(url)+1), str(len(self.url_list)))) | 0.212722 | 0.059921 |
import os
import sys
import fcntl
import errno
import subprocess
import typing
import threading
from . import utils, const, _pidlock
from .exceptions import UpdaterInvalidHookCommandError
def __run_command(command):
def _fthread(file):
while True:
line = file.readline()
if not line:
break
utils.report(line.decode(sys.getdefaultencoding()))
utils.report('Running command: ' + command)
process = subprocess.Popen(command, stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
tout = threading.Thread(target=_fthread, args=(process.stdout,))
terr = threading.Thread(target=_fthread, args=(process.stderr,))
tout.daemon = True
terr.daemon = True
tout.start()
terr.start()
exit_code = process.wait()
if exit_code != 0:
utils.report('Command failed with exit code: ' + str(exit_code))
def register(command: str):
"""Add given command (format is expected to be same as if you call
subprocess.run) to be executed when updater exits. Note that this hook is
executed no matter if updater passed or failed or even if it just requested
user's approval. In all of those cases when updater exits this hook is
executed.
"commands" has to be single line shell script.
"""
if '\n' in command:
raise UpdaterInvalidHookCommandError(
"Argument register can be only single line string.")
# Open file for writing and take exclusive lock
file = os.open(const.POSTRUN_HOOK_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
fcntl.lockf(file, fcntl.LOCK_EX)
# Check if we are working with existing file
invalid = False
try:
if os.fstat(file).st_ino != os.stat(const.POSTRUN_HOOK_FILE).st_ino:
invalid = True
except OSError as excp:
if excp.errno == errno.ENOENT:
invalid = True
raise
if invalid: # File was removed before we locked it
os.close(file)
register(command)
return
if not _pidlock.pid_locked(): # Check if updater is running
os.close(file)
# If there is no running instance then just run given command
__run_command(command)
return
# Append given arguments to file
# Note: This takes ownership of file and automatically closes it. (at least
# it seems that way)
with os.fdopen(file, 'w') as fhook:
fhook.write(command + '\n')
utils.report('Postrun hook registered: ' + command)
def register_list(commands: typing.Iterable[str]):
"""Same as register but it allows multiple commands to be registered at
once.
"""
if commands is not None:
for cmd in commands:
register(cmd)
def _run():
"""Run all registered commands.
"""
# Open file for reading and take exclusive lock
try:
file = os.open(const.POSTRUN_HOOK_FILE, os.O_RDWR)
except OSError as excp:
if excp.errno == errno.ENOENT:
return # No file means nothing to do
raise
fcntl.lockf(file, fcntl.LOCK_EX)
# Note: nobody except us should be able to remove this file (because we
# should hold pidlock) so we don't have to check if file we opened is still
# on FS.
with os.fdopen(file, 'r') as fhook:
for line in fhook.readlines():
__run_command(line)
os.remove(const.POSTRUN_HOOK_FILE) | svupdater/hook.py | import os
import sys
import fcntl
import errno
import subprocess
import typing
import threading
from . import utils, const, _pidlock
from .exceptions import UpdaterInvalidHookCommandError
def __run_command(command):
def _fthread(file):
while True:
line = file.readline()
if not line:
break
utils.report(line.decode(sys.getdefaultencoding()))
utils.report('Running command: ' + command)
process = subprocess.Popen(command, stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
tout = threading.Thread(target=_fthread, args=(process.stdout,))
terr = threading.Thread(target=_fthread, args=(process.stderr,))
tout.daemon = True
terr.daemon = True
tout.start()
terr.start()
exit_code = process.wait()
if exit_code != 0:
utils.report('Command failed with exit code: ' + str(exit_code))
def register(command: str):
"""Add given command (format is expected to be same as if you call
subprocess.run) to be executed when updater exits. Note that this hook is
executed no matter if updater passed or failed or even if it just requested
user's approval. In all of those cases when updater exits this hook is
executed.
"commands" has to be single line shell script.
"""
if '\n' in command:
raise UpdaterInvalidHookCommandError(
"Argument register can be only single line string.")
# Open file for writing and take exclusive lock
file = os.open(const.POSTRUN_HOOK_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
fcntl.lockf(file, fcntl.LOCK_EX)
# Check if we are working with existing file
invalid = False
try:
if os.fstat(file).st_ino != os.stat(const.POSTRUN_HOOK_FILE).st_ino:
invalid = True
except OSError as excp:
if excp.errno == errno.ENOENT:
invalid = True
raise
if invalid: # File was removed before we locked it
os.close(file)
register(command)
return
if not _pidlock.pid_locked(): # Check if updater is running
os.close(file)
# If there is no running instance then just run given command
__run_command(command)
return
# Append given arguments to file
# Note: This takes ownership of file and automatically closes it. (at least
# it seems that way)
with os.fdopen(file, 'w') as fhook:
fhook.write(command + '\n')
utils.report('Postrun hook registered: ' + command)
def register_list(commands: typing.Iterable[str]):
"""Same as register but it allows multiple commands to be registered at
once.
"""
if commands is not None:
for cmd in commands:
register(cmd)
def _run():
"""Run all registered commands.
"""
# Open file for reading and take exclusive lock
try:
file = os.open(const.POSTRUN_HOOK_FILE, os.O_RDWR)
except OSError as excp:
if excp.errno == errno.ENOENT:
return # No file means nothing to do
raise
fcntl.lockf(file, fcntl.LOCK_EX)
# Note: nobody except us should be able to remove this file (because we
# should hold pidlock) so we don't have to check if file we opened is still
# on FS.
with os.fdopen(file, 'r') as fhook:
for line in fhook.readlines():
__run_command(line)
os.remove(const.POSTRUN_HOOK_FILE) | 0.23926 | 0.101411 |
from django.conf import settings
from django.conf.urls import include, url
from django.views.decorators.cache import cache_page
from .feeds import ArticleFeed
from .views import SourceSearchView, HomepageView, SlackMessageView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
from haystack.views import search_view_factory
from source.articles.views import ArticleList, ArticleDetail
from source.utils.caching import ClearCache
STANDARD_CACHE_TIME = getattr(settings, 'CACHE_MIDDLEWARE_SECONDS', 60*15)
FEED_CACHE_TIME = getattr(settings, 'FEED_CACHE_SECONDS', 60*15)
BASE_URLS = [
url(
regex = '^$',
view = cache_page(STANDARD_CACHE_TIME)(HomepageView.as_view(template_name='homepage.html')),
kwargs = {},
name = 'homepage',
),
url(r'^articles/', include('source.articles.urls')),
url(r'^code/', include('source.code.urls')),
url(r'^guides/', include('source.guides.urls')),
url(r'^jobs/', include('source.jobs.urls')),
url(r'^organizations/', include('source.people.urls.organizations')),
url(r'^people/', include('source.people.urls.people')),
url(
regex = '^search/$',
view = search_view_factory(view_class=SourceSearchView, form_class=SearchForm, searchqueryset=SearchQuerySet().order_by('django_ct')),
kwargs = {},
name = 'haystack_search',
),
url(
regex = '^clear-cache/$',
view = ClearCache.as_view(),
kwargs = {},
name = 'clear_cache',
),
url(
regex = '^send-to-slack/$',
view = SlackMessageView.as_view(),
kwargs = {},
name = 'send_to_slack',
),
url(
regex = '^rss/$',
view = cache_page(FEED_CACHE_TIME)(ArticleFeed()),
kwargs = {},
name = 'homepage_feed',
),
url(
regex = '^category/(?P<category>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ArticleList.as_view()),
kwargs = {},
name = 'article_list_by_category',
),
url(
regex = '^category/(?P<category>[-\w]+)/rss/$',
view = cache_page(FEED_CACHE_TIME)(ArticleFeed()),
kwargs = {},
name = 'article_list_by_category_feed',
),
url(
regex = '^(?P<section>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ArticleList.as_view()),
kwargs = {},
name = 'article_list_by_section',
),
url(
regex = '^(?P<section>[-\w]+)/rss/$',
view = cache_page(FEED_CACHE_TIME)(ArticleFeed()),
kwargs = {},
name = 'article_list_by_section_feed',
),
url(
regex = '^(?P<section>[-\w]+)/(?P<slug>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ArticleDetail.as_view()),
kwargs = {},
name = 'article_detail',
),
] | source/base/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.views.decorators.cache import cache_page
from .feeds import ArticleFeed
from .views import SourceSearchView, HomepageView, SlackMessageView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
from haystack.views import search_view_factory
from source.articles.views import ArticleList, ArticleDetail
from source.utils.caching import ClearCache
STANDARD_CACHE_TIME = getattr(settings, 'CACHE_MIDDLEWARE_SECONDS', 60*15)
FEED_CACHE_TIME = getattr(settings, 'FEED_CACHE_SECONDS', 60*15)
BASE_URLS = [
url(
regex = '^$',
view = cache_page(STANDARD_CACHE_TIME)(HomepageView.as_view(template_name='homepage.html')),
kwargs = {},
name = 'homepage',
),
url(r'^articles/', include('source.articles.urls')),
url(r'^code/', include('source.code.urls')),
url(r'^guides/', include('source.guides.urls')),
url(r'^jobs/', include('source.jobs.urls')),
url(r'^organizations/', include('source.people.urls.organizations')),
url(r'^people/', include('source.people.urls.people')),
url(
regex = '^search/$',
view = search_view_factory(view_class=SourceSearchView, form_class=SearchForm, searchqueryset=SearchQuerySet().order_by('django_ct')),
kwargs = {},
name = 'haystack_search',
),
url(
regex = '^clear-cache/$',
view = ClearCache.as_view(),
kwargs = {},
name = 'clear_cache',
),
url(
regex = '^send-to-slack/$',
view = SlackMessageView.as_view(),
kwargs = {},
name = 'send_to_slack',
),
url(
regex = '^rss/$',
view = cache_page(FEED_CACHE_TIME)(ArticleFeed()),
kwargs = {},
name = 'homepage_feed',
),
url(
regex = '^category/(?P<category>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ArticleList.as_view()),
kwargs = {},
name = 'article_list_by_category',
),
url(
regex = '^category/(?P<category>[-\w]+)/rss/$',
view = cache_page(FEED_CACHE_TIME)(ArticleFeed()),
kwargs = {},
name = 'article_list_by_category_feed',
),
url(
regex = '^(?P<section>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ArticleList.as_view()),
kwargs = {},
name = 'article_list_by_section',
),
url(
regex = '^(?P<section>[-\w]+)/rss/$',
view = cache_page(FEED_CACHE_TIME)(ArticleFeed()),
kwargs = {},
name = 'article_list_by_section_feed',
),
url(
regex = '^(?P<section>[-\w]+)/(?P<slug>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ArticleDetail.as_view()),
kwargs = {},
name = 'article_detail',
),
] | 0.358241 | 0.11353 |
# See TRANSFORMATIONS.md file for details
import json
from pierky.p2es.errors import P2ESError
# Parse list of conditions c against data d.
# Returns: True | False (conditions matched / did not match).
# Raises exceptions: yes.
def parse_conditions_list(c, d):
if not c:
raise P2ESError('Empty list')
if isinstance(c[0], basestring):
if c[0] == 'AND':
if len(c) > 2:
for sub_c in c[1:]:
if not parse_conditions(sub_c, d):
return False
return True
else:
return False
elif c[0] == 'OR':
if len(c) > 2:
for sub_c in c[1:]:
if parse_conditions(sub_c, d):
return True
return False
else:
return True
else:
raise P2ESError(
'Logical groups must begin with "AND" or "OR" '
'("{}" found)'.format(c[0])
)
else:
# default to "AND" if not specified
for sub_c in c:
if not parse_conditions(sub_c, d):
return False
return True
# Parse condition c against data d, using operator opfield.
# Returns: True | False (condition matched / did not match).
# Raises exceptions: yes.
def parse_conditions_dict(c, d, opfield):
op = '='
n = None
v = None
for k in c:
if k == opfield:
op = c[k]
if not op in ('=', '>', '>=', '<', '<=', '!=', 'in', 'notin'):
raise P2ESError('Unexpected operator: "{}"'.format(op))
else:
if n is None:
n = k
v = c[k]
else:
raise P2ESError('Only one name/value pair allowed')
if op in ('in', 'notin') and not isinstance(v, list):
raise P2ESError('The "{}" operator requires a list'.format(op))
if n is None:
raise P2ESError('Name/value pair expected')
if n not in d:
return False
if op == '=':
return d[n] == v
elif op == '>':
return d[n] > v
elif op == '>=':
return d[n] >= v
elif op == '<':
return d[n] < v
elif op == '<=':
return d[n] <= v
elif op == '!=':
return d[n] != v
elif op == 'in':
return d[n] in v
elif op == 'notin':
return not d[n] in v
else:
raise P2ESError('Operator not implemented: "{}"'.format(op))
# Parse conditions c against data d.
# Return: True | False (conditions matched / did not match).
# Raises exception: yes.
def parse_conditions(c, d, opfield='__op__'):
if isinstance(c, list):
return parse_conditions_list(c, d)
elif isinstance(c, dict):
return parse_conditions_dict(c, d, opfield)
else:
raise P2ESError('Unexpected object type {} from {}'.format(
type(c), str(c)
))
# Tests if a transformation syntax is valid.
# Returns: True | False.
# Raises exceptions: yes.
def test_transformation(tr):
ret = True
try:
tr_det = 'Transformations matrix ({})'.format(transformation)
except:
tr_det = 'Transformations matrix'
if 'Conditions' not in tr:
raise P2ESError('{}, "Conditions" is missing'.format(tr_det))
if 'Actions' not in tr:
raise P2ESError('{}, "Actions" is missing'.format(tr_det))
try:
parse_conditions(tr['Conditions'], {})
except P2ESError as e:
raise P2ESError('{}, invalid "Conditions": {}'.format(tr_det, str(e)))
for action in tr['Actions']:
if 'Type' not in action:
raise P2ESError('{}, "Type" is missing'.format(tr_det))
tr_det += ', action type = {}'.format(action['Type'])
if action['Type'] not in ('AddField', 'AddFieldLookup', 'DelField'):
raise P2ESError('{}, "Type" unknown'.format(tr_det))
if 'Name' not in action:
raise P2ESError('{}, "Name" is missing'.format(tr_det))
if action['Type'] == 'AddField':
if 'Value' not in action:
raise P2ESError(
'{}, "Value" is missing for new field "{}"'.format(
tr_det, action['Name']
)
)
if action['Type'] == 'AddFieldLookup':
if 'LookupFieldName' not in action:
raise P2ESError(
'{}, "LookupFieldName" is missing for '
'new field "{}"'.format(tr_det, action['Name'])
)
if 'LookupTable' in action and 'LookupTableFile' in action:
raise P2ESError(
'{}, only one from "LookupTable" and '
'"LookupTableFile" allowed'.format(tr_det)
)
if 'LookupTable' not in action and 'LookupTableFile' not in action:
raise P2ESError(
'{}, "LookupTable" and "LookupTableFile" missing '
'for new field "{}"'.format(tr_det, action['Name'])
)
if 'LookupTableFile' in action:
try:
with open(action['LookupTableFile'], "r") as f:
action['LookupTable'] = json.load(f)
except Exception as e:
raise P2ESError(
'{}, error loading lookup table from {}: {}'.format(
tr_det, action['LookupTableFile'], str(e)
)
)
if __name__ == '__main__':
#Test conditions
#-------------------
#C = [ { "Name": "Bob" }, { "Age": 16, "__op__": ">=" } ]
#C = [ "OR", { "Name": "Bob" }, { "Name": "Tom" } ]
C = [ "OR",
[ { "Name": "Bob" }, { "Age": 16, "__op__": ">=" } ],
{ "Name": "Tom" },
[ { "Name": "Lisa" }, { "Age": 20, "__op__": ">=" } ]
]
#C = [ "Invalid" ]
Data = [
{ "Name": "Bob", "Age": 15 },
{ "Name": "Bob", "Age": 16 },
{ "Name": "Ken", "Age": 14 },
{ "Name": "Tom", "Age": 14 },
{ "Name": "Tom", "Age": 20 },
{ "Name": "Lisa", "Age": 15 },
{ "Name": "Lisa", "Age": 22 }
]
print(C)
for Person in Data:
try:
if parse_conditions(C, Person):
print( "YES - %s" % Person )
else:
print( "--- - %s" % Person )
except P2ESError as e:
print( "ParseConditions error: %s" % str(e) )
raise | pierky/p2es/transformations.py |
# See TRANSFORMATIONS.md file for details
import json
from pierky.p2es.errors import P2ESError
# Parse list of conditions c against data d.
# Returns: True | False (conditions matched / did not match).
# Raises exceptions: yes.
def parse_conditions_list(c, d):
if not c:
raise P2ESError('Empty list')
if isinstance(c[0], basestring):
if c[0] == 'AND':
if len(c) > 2:
for sub_c in c[1:]:
if not parse_conditions(sub_c, d):
return False
return True
else:
return False
elif c[0] == 'OR':
if len(c) > 2:
for sub_c in c[1:]:
if parse_conditions(sub_c, d):
return True
return False
else:
return True
else:
raise P2ESError(
'Logical groups must begin with "AND" or "OR" '
'("{}" found)'.format(c[0])
)
else:
# default to "AND" if not specified
for sub_c in c:
if not parse_conditions(sub_c, d):
return False
return True
# Parse condition c against data d, using operator opfield.
# Returns: True | False (condition matched / did not match).
# Raises exceptions: yes.
def parse_conditions_dict(c, d, opfield):
op = '='
n = None
v = None
for k in c:
if k == opfield:
op = c[k]
if not op in ('=', '>', '>=', '<', '<=', '!=', 'in', 'notin'):
raise P2ESError('Unexpected operator: "{}"'.format(op))
else:
if n is None:
n = k
v = c[k]
else:
raise P2ESError('Only one name/value pair allowed')
if op in ('in', 'notin') and not isinstance(v, list):
raise P2ESError('The "{}" operator requires a list'.format(op))
if n is None:
raise P2ESError('Name/value pair expected')
if n not in d:
return False
if op == '=':
return d[n] == v
elif op == '>':
return d[n] > v
elif op == '>=':
return d[n] >= v
elif op == '<':
return d[n] < v
elif op == '<=':
return d[n] <= v
elif op == '!=':
return d[n] != v
elif op == 'in':
return d[n] in v
elif op == 'notin':
return not d[n] in v
else:
raise P2ESError('Operator not implemented: "{}"'.format(op))
# Parse conditions c against data d.
# Return: True | False (conditions matched / did not match).
# Raises exception: yes.
def parse_conditions(c, d, opfield='__op__'):
if isinstance(c, list):
return parse_conditions_list(c, d)
elif isinstance(c, dict):
return parse_conditions_dict(c, d, opfield)
else:
raise P2ESError('Unexpected object type {} from {}'.format(
type(c), str(c)
))
# Tests if a transformation syntax is valid.
# Returns: True | False.
# Raises exceptions: yes.
def test_transformation(tr):
ret = True
try:
tr_det = 'Transformations matrix ({})'.format(transformation)
except:
tr_det = 'Transformations matrix'
if 'Conditions' not in tr:
raise P2ESError('{}, "Conditions" is missing'.format(tr_det))
if 'Actions' not in tr:
raise P2ESError('{}, "Actions" is missing'.format(tr_det))
try:
parse_conditions(tr['Conditions'], {})
except P2ESError as e:
raise P2ESError('{}, invalid "Conditions": {}'.format(tr_det, str(e)))
for action in tr['Actions']:
if 'Type' not in action:
raise P2ESError('{}, "Type" is missing'.format(tr_det))
tr_det += ', action type = {}'.format(action['Type'])
if action['Type'] not in ('AddField', 'AddFieldLookup', 'DelField'):
raise P2ESError('{}, "Type" unknown'.format(tr_det))
if 'Name' not in action:
raise P2ESError('{}, "Name" is missing'.format(tr_det))
if action['Type'] == 'AddField':
if 'Value' not in action:
raise P2ESError(
'{}, "Value" is missing for new field "{}"'.format(
tr_det, action['Name']
)
)
if action['Type'] == 'AddFieldLookup':
if 'LookupFieldName' not in action:
raise P2ESError(
'{}, "LookupFieldName" is missing for '
'new field "{}"'.format(tr_det, action['Name'])
)
if 'LookupTable' in action and 'LookupTableFile' in action:
raise P2ESError(
'{}, only one from "LookupTable" and '
'"LookupTableFile" allowed'.format(tr_det)
)
if 'LookupTable' not in action and 'LookupTableFile' not in action:
raise P2ESError(
'{}, "LookupTable" and "LookupTableFile" missing '
'for new field "{}"'.format(tr_det, action['Name'])
)
if 'LookupTableFile' in action:
try:
with open(action['LookupTableFile'], "r") as f:
action['LookupTable'] = json.load(f)
except Exception as e:
raise P2ESError(
'{}, error loading lookup table from {}: {}'.format(
tr_det, action['LookupTableFile'], str(e)
)
)
if __name__ == '__main__':
#Test conditions
#-------------------
#C = [ { "Name": "Bob" }, { "Age": 16, "__op__": ">=" } ]
#C = [ "OR", { "Name": "Bob" }, { "Name": "Tom" } ]
C = [ "OR",
[ { "Name": "Bob" }, { "Age": 16, "__op__": ">=" } ],
{ "Name": "Tom" },
[ { "Name": "Lisa" }, { "Age": 20, "__op__": ">=" } ]
]
#C = [ "Invalid" ]
Data = [
{ "Name": "Bob", "Age": 15 },
{ "Name": "Bob", "Age": 16 },
{ "Name": "Ken", "Age": 14 },
{ "Name": "Tom", "Age": 14 },
{ "Name": "Tom", "Age": 20 },
{ "Name": "Lisa", "Age": 15 },
{ "Name": "Lisa", "Age": 22 }
]
print(C)
for Person in Data:
try:
if parse_conditions(C, Person):
print( "YES - %s" % Person )
else:
print( "--- - %s" % Person )
except P2ESError as e:
print( "ParseConditions error: %s" % str(e) )
raise | 0.569613 | 0.320476 |
class WarthogError(Exception):
"""Base for all errors raised by the Warthog library."""
def __init__(self, msg):
super(WarthogError, self).__init__()
self.msg = msg
def __str__(self):
return self.msg
class WarthogConfigError(WarthogError):
"""Base for errors raised while parsing or loading configuration."""
class WarthogNoConfigFileError(WarthogConfigError):
"""No configuration file could be found."""
def __init__(self, msg, locations_checked=None):
super(WarthogNoConfigFileError, self).__init__(msg)
self.locations_checked = list(locations_checked) if locations_checked is not None else []
def __str__(self):
out = [self.msg]
if self.locations_checked is not None:
out.append('Locations checked: ' + ', '.join(self.locations_checked))
return '. '.join(out)
class WarthogMalformedConfigFileError(WarthogConfigError):
"""The configuration file is missing required sections or fields."""
def __init__(self, msg, missing_section=None, missing_option=None):
super(WarthogMalformedConfigFileError, self).__init__(msg)
self.missing_section = missing_section
self.missing_option = missing_option
def __str__(self):
out = [self.msg]
if self.missing_section is not None:
out.append('Missing-section: {0}'.format(self.missing_section))
if self.missing_option is not None:
out.append('Missing-option: {0}'.format(self.missing_option))
return '. '.join(out)
class WarthogApiError(WarthogError):
"""Base for errors raised in the course of interacting with the load balancer."""
def __init__(self, msg, api_msg=None, api_code=None):
super(WarthogApiError, self).__init__(msg)
self.api_msg = api_msg
self.api_code = api_code
def __str__(self):
out = [self.msg]
if self.api_msg is not None:
# Some error messages from the A10 end with a period, others don't
out.append('API-message: {0}'.format(self.api_msg.rstrip('.')))
if self.api_code is not None:
out.append('API-code: {0}'.format(self.api_code))
return '. '.join(out)
class WarthogAuthFailureError(WarthogApiError):
"""The credentials for authentication are invalid."""
class WarthogInvalidSessionError(WarthogApiError):
"""The session ID or auth token used while performing some action is unrecognized."""
class WarthogNodeError(WarthogApiError):
"""Base for errors specific to operating on some individual node."""
def __init__(self, msg, api_msg=None, api_code=None, server=None):
super(WarthogNodeError, self).__init__(msg, api_msg=api_msg, api_code=api_code)
self.server = server
class WarthogNoSuchNodeError(WarthogNodeError):
"""The host being operated on is unrecognized."""
class WarthogPermissionError(WarthogNodeError):
"""The credentials lack required permissions to perform an operation.
.. versionadded:: 1.999.0
"""
class WarthogNodeStatusError(WarthogNodeError):
"""There was some error while getting the status of a node.""" | warthog/exceptions.py | class WarthogError(Exception):
"""Base for all errors raised by the Warthog library."""
def __init__(self, msg):
super(WarthogError, self).__init__()
self.msg = msg
def __str__(self):
return self.msg
class WarthogConfigError(WarthogError):
"""Base for errors raised while parsing or loading configuration."""
class WarthogNoConfigFileError(WarthogConfigError):
"""No configuration file could be found."""
def __init__(self, msg, locations_checked=None):
super(WarthogNoConfigFileError, self).__init__(msg)
self.locations_checked = list(locations_checked) if locations_checked is not None else []
def __str__(self):
out = [self.msg]
if self.locations_checked is not None:
out.append('Locations checked: ' + ', '.join(self.locations_checked))
return '. '.join(out)
class WarthogMalformedConfigFileError(WarthogConfigError):
"""The configuration file is missing required sections or fields."""
def __init__(self, msg, missing_section=None, missing_option=None):
super(WarthogMalformedConfigFileError, self).__init__(msg)
self.missing_section = missing_section
self.missing_option = missing_option
def __str__(self):
out = [self.msg]
if self.missing_section is not None:
out.append('Missing-section: {0}'.format(self.missing_section))
if self.missing_option is not None:
out.append('Missing-option: {0}'.format(self.missing_option))
return '. '.join(out)
class WarthogApiError(WarthogError):
"""Base for errors raised in the course of interacting with the load balancer."""
def __init__(self, msg, api_msg=None, api_code=None):
super(WarthogApiError, self).__init__(msg)
self.api_msg = api_msg
self.api_code = api_code
def __str__(self):
out = [self.msg]
if self.api_msg is not None:
# Some error messages from the A10 end with a period, others don't
out.append('API-message: {0}'.format(self.api_msg.rstrip('.')))
if self.api_code is not None:
out.append('API-code: {0}'.format(self.api_code))
return '. '.join(out)
class WarthogAuthFailureError(WarthogApiError):
"""The credentials for authentication are invalid."""
class WarthogInvalidSessionError(WarthogApiError):
"""The session ID or auth token used while performing some action is unrecognized."""
class WarthogNodeError(WarthogApiError):
"""Base for errors specific to operating on some individual node."""
def __init__(self, msg, api_msg=None, api_code=None, server=None):
super(WarthogNodeError, self).__init__(msg, api_msg=api_msg, api_code=api_code)
self.server = server
class WarthogNoSuchNodeError(WarthogNodeError):
"""The host being operated on is unrecognized."""
class WarthogPermissionError(WarthogNodeError):
"""The credentials lack required permissions to perform an operation.
.. versionadded:: 1.999.0
"""
class WarthogNodeStatusError(WarthogNodeError):
"""There was some error while getting the status of a node.""" | 0.930553 | 0.064477 |
import logging
from util.bleu import Bleu # validating metrics: bleu (referenced open codes on Github)
class MetricWrapper:
" Validate Metrics wrapper. "
def __init__(self, index2words, start_symbol, end_symbol, pad_symbol, metric=Bleu()):
'''
Args:
index2words: (dict) e.g.
start_symbol: (str of int / int) should be the id of start symbol.
end_symbol: (str of int / int) should be the id of end symbol.
pad_symbol: (str of int / int) should be the id of pad symbol.
metric: (class) only support Bleu() now
'''
self.index2words = index2words
self.metric = metric
self.Ngram = metric.n
if isinstance(start_symbol, str) and start_symbol.isnumeric():
self.start_symbol = int(start_symbol)
elif isinstance(start_symbol, int):
self.start_symbol = start_symbol
else:
raise ValueError(f"Invalid start_symbol:{start_symbol}")
if isinstance(end_symbol, str) and end_symbol.isnumeric():
self.end_symbol = int(end_symbol)
elif isinstance(end_symbol, int):
self.end_symbol = end_symbol
else:
raise ValueError(f"Invalid end_symbol:{end_symbol}")
if isinstance(pad_symbol, str) and pad_symbol.isnumeric():
self.pad_symbol = int(pad_symbol)
elif isinstance(pad_symbol, int):
self.pad_symbol = pad_symbol
else:
raise ValueError(f"Invalid pad_symbol:{pad_symbol}")
def intarr2str(self, intarr):
'''
Add logics of processing the int array of sequences. Cut off the <START>, <END>, <PAD>.
Args:
intarr: (ndarray) with int elements
Returns:
strlist: (nested lists of words) [["a","b","c"], ["g","h","j"], ]
'''
strlist = []
for item in intarr:
strlist_ = []
for wordid in item:
if wordid == self.start_symbol: #TODO
strlist_ = [self.index2words[str(self.start_symbol)]]
continue
if wordid == self.end_symbol or wordid == self.pad_symbol:
break
strlist_.append(self.index2words[str(wordid)])
if len(strlist_)<1:
strlist.append(None)
logging.error(f"MetricWrapper: strlist_ is None.")
else:
strlist.append(strlist_)
return strlist
def __call__(self, out, trg):
assert out.shape[0] == trg.shape[0], f"out:{out.shape[0]} trg:{trg.shape[0]}, dim-1 of out and trg must be the same."
candidate_list = self.intarr2str(out.cpu().numpy()) # [["a","b","c"], ["g","h","j"], ]
reference_list = self.intarr2str(trg.cpu().numpy()) # [["a","b","c"], ["g","h","j"], ]
scores, _ = self.metric(candidates=candidate_list,
references=reference_list) # scores: [[0.7, 0.6, 0.5, 0.2], [0.5, 0.5, 0.4, 0.1]]
return scores | util/metricwrapper.py | import logging
from util.bleu import Bleu # validating metrics: bleu (referenced open codes on Github)
class MetricWrapper:
" Validate Metrics wrapper. "
def __init__(self, index2words, start_symbol, end_symbol, pad_symbol, metric=Bleu()):
'''
Args:
index2words: (dict) e.g.
start_symbol: (str of int / int) should be the id of start symbol.
end_symbol: (str of int / int) should be the id of end symbol.
pad_symbol: (str of int / int) should be the id of pad symbol.
metric: (class) only support Bleu() now
'''
self.index2words = index2words
self.metric = metric
self.Ngram = metric.n
if isinstance(start_symbol, str) and start_symbol.isnumeric():
self.start_symbol = int(start_symbol)
elif isinstance(start_symbol, int):
self.start_symbol = start_symbol
else:
raise ValueError(f"Invalid start_symbol:{start_symbol}")
if isinstance(end_symbol, str) and end_symbol.isnumeric():
self.end_symbol = int(end_symbol)
elif isinstance(end_symbol, int):
self.end_symbol = end_symbol
else:
raise ValueError(f"Invalid end_symbol:{end_symbol}")
if isinstance(pad_symbol, str) and pad_symbol.isnumeric():
self.pad_symbol = int(pad_symbol)
elif isinstance(pad_symbol, int):
self.pad_symbol = pad_symbol
else:
raise ValueError(f"Invalid pad_symbol:{pad_symbol}")
def intarr2str(self, intarr):
'''
Add logics of processing the int array of sequences. Cut off the <START>, <END>, <PAD>.
Args:
intarr: (ndarray) with int elements
Returns:
strlist: (nested lists of words) [["a","b","c"], ["g","h","j"], ]
'''
strlist = []
for item in intarr:
strlist_ = []
for wordid in item:
if wordid == self.start_symbol: #TODO
strlist_ = [self.index2words[str(self.start_symbol)]]
continue
if wordid == self.end_symbol or wordid == self.pad_symbol:
break
strlist_.append(self.index2words[str(wordid)])
if len(strlist_)<1:
strlist.append(None)
logging.error(f"MetricWrapper: strlist_ is None.")
else:
strlist.append(strlist_)
return strlist
def __call__(self, out, trg):
assert out.shape[0] == trg.shape[0], f"out:{out.shape[0]} trg:{trg.shape[0]}, dim-1 of out and trg must be the same."
candidate_list = self.intarr2str(out.cpu().numpy()) # [["a","b","c"], ["g","h","j"], ]
reference_list = self.intarr2str(trg.cpu().numpy()) # [["a","b","c"], ["g","h","j"], ]
scores, _ = self.metric(candidates=candidate_list,
references=reference_list) # scores: [[0.7, 0.6, 0.5, 0.2], [0.5, 0.5, 0.4, 0.1]]
return scores | 0.489503 | 0.237852 |
from django.template.base import VariableDoesNotExist
EXCLUDE_EXCEPTIONS = [
VariableDoesNotExist,
]
# Lowercase only
EXCLUDE_PHRASES = [
'invalid http_host header',
]
def filter_exc_by_type(record):
"""Exclude blacklisted exception types."""
if record.exc_info:
exc = record.exc_info[1]
for excluded in EXCLUDE_EXCEPTIONS:
if isinstance(exc, excluded):
return False
return True
def filter_exc_by_phrase(record):
"""Exclude exceptions based on string content."""
for phrase in EXCLUDE_PHRASES:
if phrase in record.msg.lower():
return False
return True
def configure_logging(LOG_ROOT):
"""Return logging configuration."""
return {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '{levelname} | {asctime} | {module}: {message}',
'style': '{',
},
},
'filters': {
'filter_exc_by_type': {
'()': 'django.utils.log.CallbackFilter',
'callback': filter_exc_by_type,
},
'filter_exc_by_phrase': {
'()': 'django.utils.log.CallbackFilter',
'callback': filter_exc_by_phrase,
},
},
'handlers': {
'debug_file': {
'delay': True,
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000, # 1MB ~ 20k rows
'backupCount': 5,
'filename': LOG_ROOT / 'debug.log',
'formatter': 'verbose',
'filters': ['filter_exc_by_type'],
},
'main_file': {
'delay': True,
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000, # 1MB ~ 20k rows
'backupCount': 5,
'filename': LOG_ROOT / 'main.log',
'formatter': 'verbose',
},
'error_file': {
'delay': True,
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000, # 1MB ~ 20k rows
'backupCount': 5,
'filename': LOG_ROOT / 'error.log',
'formatter': 'verbose',
},
'error_mail': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'verbose',
'filters': ['filter_exc_by_phrase'],
},
'error_slack': {
# Credentials are read directly from .env
'level': 'ERROR',
'class': 'webapp.settings.log.handlers.SlackHandler',
'filters': ['filter_exc_by_phrase'],
},
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': [
'debug_file',
'main_file',
'error_file',
'error_mail',
'error_slack',
'console'
],
'level': 'DEBUG',
'propagate': True,
},
'django.utils.autoreload': {
'level': 'WARNING', # This logger is way too noisy on DEBUG
}
},
} | webapp/webapp/settings/log/config.py |
from django.template.base import VariableDoesNotExist
EXCLUDE_EXCEPTIONS = [
VariableDoesNotExist,
]
# Lowercase only
EXCLUDE_PHRASES = [
'invalid http_host header',
]
def filter_exc_by_type(record):
"""Exclude blacklisted exception types."""
if record.exc_info:
exc = record.exc_info[1]
for excluded in EXCLUDE_EXCEPTIONS:
if isinstance(exc, excluded):
return False
return True
def filter_exc_by_phrase(record):
"""Exclude exceptions based on string content."""
for phrase in EXCLUDE_PHRASES:
if phrase in record.msg.lower():
return False
return True
def configure_logging(LOG_ROOT):
"""Return logging configuration."""
return {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '{levelname} | {asctime} | {module}: {message}',
'style': '{',
},
},
'filters': {
'filter_exc_by_type': {
'()': 'django.utils.log.CallbackFilter',
'callback': filter_exc_by_type,
},
'filter_exc_by_phrase': {
'()': 'django.utils.log.CallbackFilter',
'callback': filter_exc_by_phrase,
},
},
'handlers': {
'debug_file': {
'delay': True,
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000, # 1MB ~ 20k rows
'backupCount': 5,
'filename': LOG_ROOT / 'debug.log',
'formatter': 'verbose',
'filters': ['filter_exc_by_type'],
},
'main_file': {
'delay': True,
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000, # 1MB ~ 20k rows
'backupCount': 5,
'filename': LOG_ROOT / 'main.log',
'formatter': 'verbose',
},
'error_file': {
'delay': True,
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 1000000, # 1MB ~ 20k rows
'backupCount': 5,
'filename': LOG_ROOT / 'error.log',
'formatter': 'verbose',
},
'error_mail': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'verbose',
'filters': ['filter_exc_by_phrase'],
},
'error_slack': {
# Credentials are read directly from .env
'level': 'ERROR',
'class': 'webapp.settings.log.handlers.SlackHandler',
'filters': ['filter_exc_by_phrase'],
},
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': [
'debug_file',
'main_file',
'error_file',
'error_mail',
'error_slack',
'console'
],
'level': 'DEBUG',
'propagate': True,
},
'django.utils.autoreload': {
'level': 'WARNING', # This logger is way too noisy on DEBUG
}
},
} | 0.558086 | 0.099602 |
""" collect network device data via napalm and write it to influxdb """
import logging
from influxdb import InfluxDBClient
from napalm import get_network_driver
from .get_interfaces_counters import get_interfaces_counters
from .get_optics import get_optics
class NapalmInflux(object):
"""
the NapalmInflux class implements the influx as well as device
connection, it polls the device and writes the resulting data
to the db
"""
def __init__(self, host, port, user, passwd, db):
"""
set up logger and influx connection
:param host: influx hostname
:param port: influx port
:param user: influx user
:param passwd: influx passwd
:param db: influx db
"""
self.log = logging.getLogger('NapalmInflux')
# connect to influx
self.influx = InfluxDBClient(host, port, user, passwd, db)
def run(self, device_host, user, passwd, device_os, tags):
"""
connect to provided device, fetch data and write to influx
:param device_host: (str) device hostname
:param user: (str) device user
:param passwd: (str) device passwd
:param os: (str) a supported napalm os
:param tags: (dict) dict of tags as specified in config file
"""
if tags is None:
tags = {}
try:
print("polling device: " + device_host)
# open device connection with napalm
driver = get_network_driver(device_os)
device = driver(device_host, user, passwd)
device.open()
# get interface counter data
iface_counters = get_interfaces_counters(device_host, device, tags)
print(' done polling device interfaces')
# get optics data
optics_counters = get_optics(device_host, device, tags)
print(' done polling device optical levels')
# write data to influx
print('writing data to influx')
self.influx.write_points(iface_counters)
self.influx.write_points(optics_counters)
# self.log.info('done polling device: %s', device_host)
print(' done writing data to influx')
except Exception as err:
print(err)
raise | napalm_influx/napalm_influx.py | """ collect network device data via napalm and write it to influxdb """
import logging
from influxdb import InfluxDBClient
from napalm import get_network_driver
from .get_interfaces_counters import get_interfaces_counters
from .get_optics import get_optics
class NapalmInflux(object):
"""
the NapalmInflux class implements the influx as well as device
connection, it polls the device and writes the resulting data
to the db
"""
def __init__(self, host, port, user, passwd, db):
"""
set up logger and influx connection
:param host: influx hostname
:param port: influx port
:param user: influx user
:param passwd: influx passwd
:param db: influx db
"""
self.log = logging.getLogger('NapalmInflux')
# connect to influx
self.influx = InfluxDBClient(host, port, user, passwd, db)
def run(self, device_host, user, passwd, device_os, tags):
"""
connect to provided device, fetch data and write to influx
:param device_host: (str) device hostname
:param user: (str) device user
:param passwd: (str) device passwd
:param os: (str) a supported napalm os
:param tags: (dict) dict of tags as specified in config file
"""
if tags is None:
tags = {}
try:
print("polling device: " + device_host)
# open device connection with napalm
driver = get_network_driver(device_os)
device = driver(device_host, user, passwd)
device.open()
# get interface counter data
iface_counters = get_interfaces_counters(device_host, device, tags)
print(' done polling device interfaces')
# get optics data
optics_counters = get_optics(device_host, device, tags)
print(' done polling device optical levels')
# write data to influx
print('writing data to influx')
self.influx.write_points(iface_counters)
self.influx.write_points(optics_counters)
# self.log.info('done polling device: %s', device_host)
print(' done writing data to influx')
except Exception as err:
print(err)
raise | 0.538255 | 0.294373 |
from itertools import combinations
from fcapsy.decorators import metadata
from fcapsy import Concept, Context
@metadata(name='RiceSiffConcepts', short_name='RSConcepts')
def concept_subset(context: Context, similarity_measure) -> list:
"""
Experimental implementation of
Rice, <NAME>., and <NAME>. "Clusters, concepts, and pseudometrics."
Electronic Notes in Theoretical Computer Science 40 (2001): 323-346.
"""
init_intent = context.Attributes.supremum
init_extent = context.down(init_intent)
init_concept = Concept(init_extent, init_intent)
atoms = context.Objects.supremum.atoms()
# init worklist with all atoms
worklist = {Concept.from_intent(
context.up(extent), context) for extent in atoms}
# init resulting concepts with init_concept and worklist
concepts = set(worklist)
concepts.add(init_concept)
while len(worklist) > 1:
# create all possible pairs of different concepts from worklist
concept_combinations = tuple(combinations(worklist, 2))
# calculate all distances
distances = [1 - similarity_measure(
concepts[0].intent, concepts[1].intent) for concepts in concept_combinations]
# select minimal distance from all distances
min_distance = min(distances)
# get all possible pairs of concepts with minimal distance
concept_pairs_min_distance = {concept_tuple for concept_tuple, distance in zip(
concept_combinations, distances) if distance == min_distance}
# flatten pairs and transform them to set
concepts_from_pairs = {
concept for concept_pair in concept_pairs_min_distance for concept in concept_pair}
# calculate new concepts and add them to worklist and result concepts
for concept_tuple in concept_pairs_min_distance:
extent = concept_tuple[0].extent | concept_tuple[1].extent
new_intent = context.up(extent)
new_extent = context.down(new_intent)
new_concept = Concept(new_extent, new_intent)
worklist.add(new_concept)
concepts.add(new_concept)
# remove already processed concepts
worklist = worklist.difference(concepts_from_pairs)
return concepts | fcapsy/algorithms/rice_siff.py | from itertools import combinations
from fcapsy.decorators import metadata
from fcapsy import Concept, Context
@metadata(name='RiceSiffConcepts', short_name='RSConcepts')
def concept_subset(context: Context, similarity_measure) -> list:
"""
Experimental implementation of
Rice, <NAME>., and <NAME>. "Clusters, concepts, and pseudometrics."
Electronic Notes in Theoretical Computer Science 40 (2001): 323-346.
"""
init_intent = context.Attributes.supremum
init_extent = context.down(init_intent)
init_concept = Concept(init_extent, init_intent)
atoms = context.Objects.supremum.atoms()
# init worklist with all atoms
worklist = {Concept.from_intent(
context.up(extent), context) for extent in atoms}
# init resulting concepts with init_concept and worklist
concepts = set(worklist)
concepts.add(init_concept)
while len(worklist) > 1:
# create all possible pairs of different concepts from worklist
concept_combinations = tuple(combinations(worklist, 2))
# calculate all distances
distances = [1 - similarity_measure(
concepts[0].intent, concepts[1].intent) for concepts in concept_combinations]
# select minimal distance from all distances
min_distance = min(distances)
# get all possible pairs of concepts with minimal distance
concept_pairs_min_distance = {concept_tuple for concept_tuple, distance in zip(
concept_combinations, distances) if distance == min_distance}
# flatten pairs and transform them to set
concepts_from_pairs = {
concept for concept_pair in concept_pairs_min_distance for concept in concept_pair}
# calculate new concepts and add them to worklist and result concepts
for concept_tuple in concept_pairs_min_distance:
extent = concept_tuple[0].extent | concept_tuple[1].extent
new_intent = context.up(extent)
new_extent = context.down(new_intent)
new_concept = Concept(new_extent, new_intent)
worklist.add(new_concept)
concepts.add(new_concept)
# remove already processed concepts
worklist = worklist.difference(concepts_from_pairs)
return concepts | 0.770465 | 0.284999 |
import uuid
import django.contrib.postgres.indexes
import django.contrib.postgres.search
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("backend", "0001"),
]
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated", models.DateTimeField(auto_now=True, db_index=True)),
("name", models.TextField()),
("description", models.TextField()),
(
"search_vector",
django.contrib.postgres.search.SearchVectorField(null=True),
),
],
),
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated", models.DateTimeField(auto_now=True, db_index=True)),
("full_name", models.TextField()),
("email", models.TextField()),
("hashed_password", models.TextField()),
(
"search_vector",
django.contrib.postgres.search.SearchVectorField(null=True),
),
],
),
migrations.AddIndex(
model_name="user",
index=models.Index(fields=["email"], name="backend_use_email_db66b5_idx"),
),
migrations.AddIndex(
model_name="user",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="backend_use_search__6cf6bf_gin"
),
),
migrations.AddConstraint(
model_name="user",
constraint=models.UniqueConstraint(fields=("email",), name="unique_email"),
),
migrations.AddField(
model_name="item",
name="owner",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="backend.user",
),
),
migrations.AddIndex(
model_name="item",
index=models.Index(
fields=["name", "owner"], name="backend_ite_name_c0732e_idx"
),
),
migrations.AddIndex(
model_name="item",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="backend_ite_search__4170f7_gin"
),
),
migrations.AddConstraint(
model_name="item",
constraint=models.UniqueConstraint(
fields=("name", "owner"), name="unique_owner_and_name"
),
),
] | {{ cookiecutter.project_slug }}/backend/migrations/0002.py | import uuid
import django.contrib.postgres.indexes
import django.contrib.postgres.search
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("backend", "0001"),
]
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated", models.DateTimeField(auto_now=True, db_index=True)),
("name", models.TextField()),
("description", models.TextField()),
(
"search_vector",
django.contrib.postgres.search.SearchVectorField(null=True),
),
],
),
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("created", models.DateTimeField(auto_now_add=True, db_index=True)),
("updated", models.DateTimeField(auto_now=True, db_index=True)),
("full_name", models.TextField()),
("email", models.TextField()),
("hashed_password", models.TextField()),
(
"search_vector",
django.contrib.postgres.search.SearchVectorField(null=True),
),
],
),
migrations.AddIndex(
model_name="user",
index=models.Index(fields=["email"], name="backend_use_email_db66b5_idx"),
),
migrations.AddIndex(
model_name="user",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="backend_use_search__6cf6bf_gin"
),
),
migrations.AddConstraint(
model_name="user",
constraint=models.UniqueConstraint(fields=("email",), name="unique_email"),
),
migrations.AddField(
model_name="item",
name="owner",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="backend.user",
),
),
migrations.AddIndex(
model_name="item",
index=models.Index(
fields=["name", "owner"], name="backend_ite_name_c0732e_idx"
),
),
migrations.AddIndex(
model_name="item",
index=django.contrib.postgres.indexes.GinIndex(
fields=["search_vector"], name="backend_ite_search__4170f7_gin"
),
),
migrations.AddConstraint(
model_name="item",
constraint=models.UniqueConstraint(
fields=("name", "owner"), name="unique_owner_and_name"
),
),
] | 0.446012 | 0.170819 |
import os
from oscar.defaults import *
from oscar import OSCAR_MAIN_TEMPLATE_DIR
from oscar import get_core_apps
from decouple import config, Csv
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_ID = 1
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'frobshop.apps.FrobshopConfig',
'django.contrib.flatpages',
'paypal',
'storages',
'compressor',
'widget_tweaks',
] + get_core_apps()
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
]
ROOT_URLCONF = 'oscar_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
OSCAR_MAIN_TEMPLATE_DIR
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
],
},
},
]
WSGI_APPLICATION = 'oscar_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': config('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'US/Pacific'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATIC_URL = '/static/'
STATIC_ROOT = ('static')
AWS_STORAGE_BUCKET_NAME = config('BUCKET_NAME')
AWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
STATIC_ROOT = 'lostkawzlifestyle1/staticfiles/'
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env) | oscar_project/settings.py | import os
from oscar.defaults import *
from oscar import OSCAR_MAIN_TEMPLATE_DIR
from oscar import get_core_apps
from decouple import config, Csv
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_ID = 1
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'frobshop.apps.FrobshopConfig',
'django.contrib.flatpages',
'paypal',
'storages',
'compressor',
'widget_tweaks',
] + get_core_apps()
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
]
ROOT_URLCONF = 'oscar_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
OSCAR_MAIN_TEMPLATE_DIR
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
],
},
},
]
WSGI_APPLICATION = 'oscar_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': config('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'US/Pacific'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATIC_URL = '/static/'
STATIC_ROOT = ('static')
AWS_STORAGE_BUCKET_NAME = config('BUCKET_NAME')
AWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
STATIC_ROOT = 'lostkawzlifestyle1/staticfiles/'
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env) | 0.368974 | 0.063832 |
import argparse
import json
import os
import sys
import time
import requests
MAX_FAIL = 5
PAGESIZE = 2000
VERSION = 0.1
# Interval in seconds between successive requests
WAIT_PERIOD = 5
class OutputManager:
def __init__(self, verbose=False):
self.verbose = verbose
def print(self, string):
if self.verbose:
print(string)
def store_data(filename, data, count):
with open(filename, "w") as file_handle:
cvedata = {
"CVE_data_type": "CVE",
"CVE_data_format": "MITRE",
"CVE_data_version": "4.0",
"CVE_data_numberOfCVEs": count,
"CVE_Items": data,
}
json.dump(cvedata, file_handle)
def process_data(elements):
for cve_item in elements:
# print(cve_item)
cve = {
"ID": cve_item["cve"]["CVE_data_meta"]["ID"],
"description": cve_item["cve"]["description"]["description_data"][0][
"value"
],
"severity": "unknown",
"score": "unknown",
"CVSS_version": "unknown",
"vector": "TBD",
"problem": "unknown",
}
if "baseMetricV3" in cve_item["impact"]:
cve["severity"] = cve_item["impact"]["baseMetricV3"]["cvssV3"][
"baseSeverity"
]
cve["score"] = cve_item["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
cve["vector"] = cve_item["impact"]["baseMetricV3"]["cvssV3"]["vectorString"]
cve["CVSS_version"] = 3
elif "baseMetricV2" in cve_item["impact"]:
cve["severity"] = cve_item["impact"]["baseMetricV2"]["severity"]
cve["score"] = cve_item["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
cve["vector"] = cve_item["impact"]["baseMetricV2"]["cvssV2"]["vectorString"]
cve["CVSS_version"] = 2
if cve["vector"] != "TBD":
try:
# cve["problem"] = cve_item["cve"]["problemtype"]["problemtype_data"][0]["description"][0]["value"]
check = cve_item["cve"]["problemtype"]["problemtype_data"][0][
"description"
]
if len(check) > 0:
problem = ""
for data_item in cve_item["cve"]["problemtype"]["problemtype_data"][0][
"description"
]:
# print (d["value"])
problem = data_item["value"] + ";"
cve["problem"] = problem[:-1]
except:
# print("Error with",cve_item["cve"]["CVE_data_meta"]["ID"] )
pass
print(cve["ID"], cve["score"], cve["severity"], cve["vector"], cve["problem"])
def get_data(startdate, enddate, outdir, config):
nvd_feed = "https://services.nvd.nist.gov/rest/json/cves/1.0"
default_filename = "NVD_Data_1.json"
index = 0
# Extract configuration parameters
verbose = config[0]
pagesize = config[1]
interval = config[2]
show_data = config[3]
# pagesize = 2000
items = 0
finished = False
query_count = 0
fail_count = 0
file_data = []
om = OutputManager(verbose)
om.print(f"Retrieve CVEs from {startdate}")
if enddate != "":
om.print(f"Retrieve CVEs to {enddate}")
while not finished:
if enddate != "":
query = {
"resultsPerPage": pagesize,
"startIndex": index,
"pubStartDate": startdate,
"pubEndDate": enddate,
}
filename = f"{outdir}NVD_data_{startdate[:4]}.json"
else:
query = {
"resultsPerPage": pagesize,
"startIndex": index,
"modStartDate": startdate,
}
filename = f"{outdir}{default_filename}"
try:
response = requests.get(nvd_feed, params=query)
om.print(f"Response :{response.status_code}")
query_count += 1
if response.status_code == 200:
j = response.json()
total_results = j["totalResults"]
om.print(f"Query {query_count}")
om.print(f"\tTotal results {total_results}")
om.print(f"\tStart index {j['startIndex']}")
no_of_results = j["resultsPerPage"]
om.print(f"\tNumber of results returned: {no_of_results}")
# Now process data
if show_data:
process_data(j['result']["CVE_Items"])
# filename = f"{outdir}NVD_data_{query_count}.json"
# store_data(filename, j['result']["CVE_Items"], no_of_results)
items = items + no_of_results
for item in j["result"]["CVE_Items"]:
file_data.append(item.copy())
# Have we finished?
if items < total_results:
index = index + pagesize
# Calculate number of requests remaining
count = int((total_results - items) / pagesize) + 1
om.print(f"Estimated remaining time {count * interval} seconds")
# And wait
# om.print(f"Pause for {interval} seconds")
time.sleep(interval)
else:
finished = True
store_data(filename, file_data, total_results)
om.print(f"Data saved in {filename}")
else:
fail_count += 1
finished = fail_count == MAX_FAIL
if not finished:
om.print(f"Pause for {interval} seconds")
time.sleep(interval)
except:
print(f"Failed to connect to NVD webservice {nvd_feed}")
fail_count += 1
finished = fail_count == MAX_FAIL
if not finished:
om.print(f"Pause for {interval} seconds")
time.sleep(interval)
return items
# Main
if __name__ == "__main__":
desc = "Download NVD data and store data in JSON file"
# Set all parser arguments here.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=desc
)
parser.add_argument(
"-o", "--output", help="Output directory", dest="output_directory", default="./"
)
parser.add_argument(
"-f",
"--file",
help="Name of file with time of last update",
dest="update_file",
default="",
)
parser.add_argument(
"-d",
"--date",
help="Download all items modified from specified date (YYYY-MM-DD)",
dest="start_date",
default="",
)
parser.add_argument(
"-a", "--all", help="Download all items", dest="all_items", action="store_true"
)
parser.add_argument(
"-y",
"--year",
help="Download all items published for specified year (YYYY)",
dest="year",
default="",
)
parser.add_argument(
"-t",
"--time",
help="Time (secs) between successive requests. Default "
+ str(WAIT_PERIOD)
+ " secs.",
dest="interval",
default=WAIT_PERIOD,
)
parser.add_argument(
"-p",
"--pagesize",
help="Maximum number of items per request. Default "
+ str(PAGESIZE)
+ " items.",
dest="page_size",
default=PAGESIZE,
)
parser.add_argument(
"-V", "--verbose", help="Verbose reporting", dest="verbose", action="store_true"
)
parser.add_argument(
"-v",
"--version",
help="Show version information and exit",
dest="version",
action="store_true",
)
parser.add_argument(
"-s", "--show", help="Output retrieved records to console", dest="show_data", action="store_true"
)
# Parse arguments in case they are provided.
params = parser.parse_args()
version = params.version
update_file = params.update_file
all_items = params.all_items
year = params.year
# start_date = params.start_date
output_directory = params.output_directory
request_interval = int(params.interval)
page_size = int(params.page_size)
end_date = ""
# Validate parameters
if version:
print("Version", VERSION)
sys.exit(0)
if page_size not in range(20, 5000):
print(f"[ERROR] Specified request size ({page_size}) is out of range")
sys.exit(-1)
# Determine dates for record retrieval
if update_file != "":
# Time from last time file was updated
try:
update_time = os.path.getmtime(update_file)
start_date = time.strftime(
"%Y-%m-%dT%H:%M:%S:000 UTC-00:00", time.gmtime(update_time)
)
except OSError:
print("[ERROR] File '%s' does not exist or is inaccessible" % update_file)
sys.exit(-1)
elif all_items:
# All files since start of 1999
start_date = "1999-01-01T00:00:00:000 UTC-00:00"
elif year != "":
start_date = f"{year}-01-01T00:00:00:000 UTC-00:00"
end_date = f"{year}-12-31T23:59:59:000 UTC-00:00"
elif params.start_date != "":
start_date = f"{params.start_date}T00:00:00:000 UTC-00:00"
# print ("Date",start_date)
else:
print("[ERROR] Start date not specified")
sys.exit(-1)
print(
"Number of records retrieved",
get_data(start_date, end_date, output_directory, [params.verbose, page_size, request_interval, params.show_data]),
)
# End | nvdget.py | import argparse
import json
import os
import sys
import time
import requests
MAX_FAIL = 5
PAGESIZE = 2000
VERSION = 0.1
# Interval in seconds between successive requests
WAIT_PERIOD = 5
class OutputManager:
def __init__(self, verbose=False):
self.verbose = verbose
def print(self, string):
if self.verbose:
print(string)
def store_data(filename, data, count):
with open(filename, "w") as file_handle:
cvedata = {
"CVE_data_type": "CVE",
"CVE_data_format": "MITRE",
"CVE_data_version": "4.0",
"CVE_data_numberOfCVEs": count,
"CVE_Items": data,
}
json.dump(cvedata, file_handle)
def process_data(elements):
for cve_item in elements:
# print(cve_item)
cve = {
"ID": cve_item["cve"]["CVE_data_meta"]["ID"],
"description": cve_item["cve"]["description"]["description_data"][0][
"value"
],
"severity": "unknown",
"score": "unknown",
"CVSS_version": "unknown",
"vector": "TBD",
"problem": "unknown",
}
if "baseMetricV3" in cve_item["impact"]:
cve["severity"] = cve_item["impact"]["baseMetricV3"]["cvssV3"][
"baseSeverity"
]
cve["score"] = cve_item["impact"]["baseMetricV3"]["cvssV3"]["baseScore"]
cve["vector"] = cve_item["impact"]["baseMetricV3"]["cvssV3"]["vectorString"]
cve["CVSS_version"] = 3
elif "baseMetricV2" in cve_item["impact"]:
cve["severity"] = cve_item["impact"]["baseMetricV2"]["severity"]
cve["score"] = cve_item["impact"]["baseMetricV2"]["cvssV2"]["baseScore"]
cve["vector"] = cve_item["impact"]["baseMetricV2"]["cvssV2"]["vectorString"]
cve["CVSS_version"] = 2
if cve["vector"] != "TBD":
try:
# cve["problem"] = cve_item["cve"]["problemtype"]["problemtype_data"][0]["description"][0]["value"]
check = cve_item["cve"]["problemtype"]["problemtype_data"][0][
"description"
]
if len(check) > 0:
problem = ""
for data_item in cve_item["cve"]["problemtype"]["problemtype_data"][0][
"description"
]:
# print (d["value"])
problem = data_item["value"] + ";"
cve["problem"] = problem[:-1]
except:
# print("Error with",cve_item["cve"]["CVE_data_meta"]["ID"] )
pass
print(cve["ID"], cve["score"], cve["severity"], cve["vector"], cve["problem"])
def get_data(startdate, enddate, outdir, config):
nvd_feed = "https://services.nvd.nist.gov/rest/json/cves/1.0"
default_filename = "NVD_Data_1.json"
index = 0
# Extract configuration parameters
verbose = config[0]
pagesize = config[1]
interval = config[2]
show_data = config[3]
# pagesize = 2000
items = 0
finished = False
query_count = 0
fail_count = 0
file_data = []
om = OutputManager(verbose)
om.print(f"Retrieve CVEs from {startdate}")
if enddate != "":
om.print(f"Retrieve CVEs to {enddate}")
while not finished:
if enddate != "":
query = {
"resultsPerPage": pagesize,
"startIndex": index,
"pubStartDate": startdate,
"pubEndDate": enddate,
}
filename = f"{outdir}NVD_data_{startdate[:4]}.json"
else:
query = {
"resultsPerPage": pagesize,
"startIndex": index,
"modStartDate": startdate,
}
filename = f"{outdir}{default_filename}"
try:
response = requests.get(nvd_feed, params=query)
om.print(f"Response :{response.status_code}")
query_count += 1
if response.status_code == 200:
j = response.json()
total_results = j["totalResults"]
om.print(f"Query {query_count}")
om.print(f"\tTotal results {total_results}")
om.print(f"\tStart index {j['startIndex']}")
no_of_results = j["resultsPerPage"]
om.print(f"\tNumber of results returned: {no_of_results}")
# Now process data
if show_data:
process_data(j['result']["CVE_Items"])
# filename = f"{outdir}NVD_data_{query_count}.json"
# store_data(filename, j['result']["CVE_Items"], no_of_results)
items = items + no_of_results
for item in j["result"]["CVE_Items"]:
file_data.append(item.copy())
# Have we finished?
if items < total_results:
index = index + pagesize
# Calculate number of requests remaining
count = int((total_results - items) / pagesize) + 1
om.print(f"Estimated remaining time {count * interval} seconds")
# And wait
# om.print(f"Pause for {interval} seconds")
time.sleep(interval)
else:
finished = True
store_data(filename, file_data, total_results)
om.print(f"Data saved in {filename}")
else:
fail_count += 1
finished = fail_count == MAX_FAIL
if not finished:
om.print(f"Pause for {interval} seconds")
time.sleep(interval)
except:
print(f"Failed to connect to NVD webservice {nvd_feed}")
fail_count += 1
finished = fail_count == MAX_FAIL
if not finished:
om.print(f"Pause for {interval} seconds")
time.sleep(interval)
return items
# Main
if __name__ == "__main__":
desc = "Download NVD data and store data in JSON file"
# Set all parser arguments here.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=desc
)
parser.add_argument(
"-o", "--output", help="Output directory", dest="output_directory", default="./"
)
parser.add_argument(
"-f",
"--file",
help="Name of file with time of last update",
dest="update_file",
default="",
)
parser.add_argument(
"-d",
"--date",
help="Download all items modified from specified date (YYYY-MM-DD)",
dest="start_date",
default="",
)
parser.add_argument(
"-a", "--all", help="Download all items", dest="all_items", action="store_true"
)
parser.add_argument(
"-y",
"--year",
help="Download all items published for specified year (YYYY)",
dest="year",
default="",
)
parser.add_argument(
"-t",
"--time",
help="Time (secs) between successive requests. Default "
+ str(WAIT_PERIOD)
+ " secs.",
dest="interval",
default=WAIT_PERIOD,
)
parser.add_argument(
"-p",
"--pagesize",
help="Maximum number of items per request. Default "
+ str(PAGESIZE)
+ " items.",
dest="page_size",
default=PAGESIZE,
)
parser.add_argument(
"-V", "--verbose", help="Verbose reporting", dest="verbose", action="store_true"
)
parser.add_argument(
"-v",
"--version",
help="Show version information and exit",
dest="version",
action="store_true",
)
parser.add_argument(
"-s", "--show", help="Output retrieved records to console", dest="show_data", action="store_true"
)
# Parse arguments in case they are provided.
params = parser.parse_args()
version = params.version
update_file = params.update_file
all_items = params.all_items
year = params.year
# start_date = params.start_date
output_directory = params.output_directory
request_interval = int(params.interval)
page_size = int(params.page_size)
end_date = ""
# Validate parameters
if version:
print("Version", VERSION)
sys.exit(0)
if page_size not in range(20, 5000):
print(f"[ERROR] Specified request size ({page_size}) is out of range")
sys.exit(-1)
# Determine dates for record retrieval
if update_file != "":
# Time from last time file was updated
try:
update_time = os.path.getmtime(update_file)
start_date = time.strftime(
"%Y-%m-%dT%H:%M:%S:000 UTC-00:00", time.gmtime(update_time)
)
except OSError:
print("[ERROR] File '%s' does not exist or is inaccessible" % update_file)
sys.exit(-1)
elif all_items:
# All files since start of 1999
start_date = "1999-01-01T00:00:00:000 UTC-00:00"
elif year != "":
start_date = f"{year}-01-01T00:00:00:000 UTC-00:00"
end_date = f"{year}-12-31T23:59:59:000 UTC-00:00"
elif params.start_date != "":
start_date = f"{params.start_date}T00:00:00:000 UTC-00:00"
# print ("Date",start_date)
else:
print("[ERROR] Start date not specified")
sys.exit(-1)
print(
"Number of records retrieved",
get_data(start_date, end_date, output_directory, [params.verbose, page_size, request_interval, params.show_data]),
)
# End | 0.209227 | 0.100392 |
from Crypto.Cipher import AES
from Crypto import Random
from ironic_neutron_plugin import config
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
import sqlalchemy as sa
from sqlalchemy import orm as sa_orm
import base64
LOG = logging.getLogger(__name__)
def aes_encrypt(key, msg):
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
ciphertext = iv + cipher.encrypt(msg)
return base64.b64encode(ciphertext)
def aes_decrypt(key, msg):
msg = base64.b64decode(msg)
iv = msg[:AES.block_size]
cipher = AES.new(key, AES.MODE_CFB, iv)
msg = cipher.decrypt(msg[AES.block_size:])
return msg
class EncryptedValue(sa.TypeDecorator):
impl = sa.String
def process_bind_param(self, value, dialect):
if value:
key = config.cfg.CONF.ironic.credential_secret
value = aes_encrypt(key, value)
return value
def process_result_value(self, value, dialect):
if value:
key = config.cfg.CONF.ironic.credential_secret
value = aes_decrypt(key, value)
return value
class SwitchPort(model_base.BASEV2, models_v2.HasId):
"""Maps a device to a physical switch port."""
__tablename__ = "switch_ports"
switch_id = sa.Column(sa.String(255),
sa.ForeignKey("switches.id"),
nullable=False)
# Interface name (eth0, some other meaningful identifier)
name = sa.Column(sa.String(255), nullable=False)
# Switchport identifier (Ethernet1/1, something your mech understands)
port = sa.Column(sa.String(255), nullable=False)
# Some kind of externally-identifiable id suitable for mapping multiple
# ports to a single entity (ironic node_id)
hardware_id = sa.Column(sa.String(255), nullable=True)
# Extra
mac_address = sa.Column(sa.String(255), nullable=True)
def as_dict(self):
return {
u"id": self.id,
u"switch_id": self.switch_id,
u"name": self.name,
u"port": self.port,
u"hardware_id": self.hardware_id,
# extra
u"mac_address": self.mac_address
}
@classmethod
def make_dict(cls, d):
return {
u"id": d.get("id"),
u"switch_id": d.get("switch_id"),
u"name": d.get("name"),
u"port": d.get("port"),
u"hardware_id": d.get("hardware_id"),
u"mac_address": d.get("mac_address")
}
class Switch(model_base.BASEV2):
"""An external attachment point."""
__tablename__ = "switches"
id = sa.Column(sa.String(255), primary_key=True)
description = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
# TODO(morgabra) move this out into a separate model
host = sa.Column(sa.String(255))
username = sa.Column(sa.String(255), nullable=True)
password = sa.Column(EncryptedValue(255), nullable=True)
ports = sa_orm.relationship(
SwitchPort, lazy="joined", cascade="delete", backref="switch")
def as_dict(self):
return {
u"id": self.id,
u"description": self.description,
u"host": self.host,
u"username": self.username,
u"password": "*****",
u"type": self.type
}
class PortExt(model_base.BASEV2):
"""Keep track of extra information about neutron ports.
TODO(morgabra) This is not correct, but we need to stick
this data somewhere.
"""
__tablename__ = "port_ext"
# TODO(morgabra) FK to the actual model and cascade
port_id = sa.Column(sa.String(255), primary_key=True)
hardware_id = sa.Column(sa.String(255), nullable=True)
commit = sa.Column(sa.Boolean, nullable=False)
trunked = sa.Column(sa.Boolean, nullable=True)
def as_dict(self):
return {
u"port_id": self.port_id,
u"commit": self.commit,
u"trunked": self.trunked,
u"hardware_id": self.hardware_id
}
class SwitchPortBindingState(object):
INACTIVE = u"INACTIVE"
WANT_ACTIVE = u"WANT_ACTIVE"
ACTIVE = u"ACTIVE"
WANT_INACTIVE = u"WANT_INACTIVE"
ERROR = u"ERROR"
@classmethod
def as_dict(cls):
return {
u"INACTIVE": cls.INACTIVE,
u"WANT_ACTIVE": cls.WANT_ACTIVE,
u"ACTIVE": cls.ACTIVE,
u"WANT_INACTIVE": cls.WANT_INACTIVE,
u"ERROR": cls.ERROR
}
class SwitchPortBinding(model_base.BASEV2):
"""Keep track of which neutron ports are bound to which
physical switchports.
"""
__tablename__ = "switch_port_bindings"
# TODO(morgabra) FK to the actual model and cascade
port_id = sa.Column(sa.String(255), primary_key=True)
network_id = sa.Column(sa.String(255), primary_key=True)
switch_port_id = sa.Column(
sa.String(36),
sa.ForeignKey("switch_ports.id"),
primary_key=True)
state = sa.Column(sa.String(255),
default=SwitchPortBindingState.INACTIVE)
def as_dict(self):
return {
u"port_id": self.port_id,
u"network_id": self.network_id,
u"switch_port_id": self.switch_port_id,
u"state": self.state
} | ironic_neutron_plugin/db/models.py |
from Crypto.Cipher import AES
from Crypto import Random
from ironic_neutron_plugin import config
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
import sqlalchemy as sa
from sqlalchemy import orm as sa_orm
import base64
LOG = logging.getLogger(__name__)
def aes_encrypt(key, msg):
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
ciphertext = iv + cipher.encrypt(msg)
return base64.b64encode(ciphertext)
def aes_decrypt(key, msg):
msg = base64.b64decode(msg)
iv = msg[:AES.block_size]
cipher = AES.new(key, AES.MODE_CFB, iv)
msg = cipher.decrypt(msg[AES.block_size:])
return msg
class EncryptedValue(sa.TypeDecorator):
impl = sa.String
def process_bind_param(self, value, dialect):
if value:
key = config.cfg.CONF.ironic.credential_secret
value = aes_encrypt(key, value)
return value
def process_result_value(self, value, dialect):
if value:
key = config.cfg.CONF.ironic.credential_secret
value = aes_decrypt(key, value)
return value
class SwitchPort(model_base.BASEV2, models_v2.HasId):
"""Maps a device to a physical switch port."""
__tablename__ = "switch_ports"
switch_id = sa.Column(sa.String(255),
sa.ForeignKey("switches.id"),
nullable=False)
# Interface name (eth0, some other meaningful identifier)
name = sa.Column(sa.String(255), nullable=False)
# Switchport identifier (Ethernet1/1, something your mech understands)
port = sa.Column(sa.String(255), nullable=False)
# Some kind of externally-identifiable id suitable for mapping multiple
# ports to a single entity (ironic node_id)
hardware_id = sa.Column(sa.String(255), nullable=True)
# Extra
mac_address = sa.Column(sa.String(255), nullable=True)
def as_dict(self):
return {
u"id": self.id,
u"switch_id": self.switch_id,
u"name": self.name,
u"port": self.port,
u"hardware_id": self.hardware_id,
# extra
u"mac_address": self.mac_address
}
@classmethod
def make_dict(cls, d):
return {
u"id": d.get("id"),
u"switch_id": d.get("switch_id"),
u"name": d.get("name"),
u"port": d.get("port"),
u"hardware_id": d.get("hardware_id"),
u"mac_address": d.get("mac_address")
}
class Switch(model_base.BASEV2):
"""An external attachment point."""
__tablename__ = "switches"
id = sa.Column(sa.String(255), primary_key=True)
description = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
# TODO(morgabra) move this out into a separate model
host = sa.Column(sa.String(255))
username = sa.Column(sa.String(255), nullable=True)
password = sa.Column(EncryptedValue(255), nullable=True)
ports = sa_orm.relationship(
SwitchPort, lazy="joined", cascade="delete", backref="switch")
def as_dict(self):
return {
u"id": self.id,
u"description": self.description,
u"host": self.host,
u"username": self.username,
u"password": "*****",
u"type": self.type
}
class PortExt(model_base.BASEV2):
"""Keep track of extra information about neutron ports.
TODO(morgabra) This is not correct, but we need to stick
this data somewhere.
"""
__tablename__ = "port_ext"
# TODO(morgabra) FK to the actual model and cascade
port_id = sa.Column(sa.String(255), primary_key=True)
hardware_id = sa.Column(sa.String(255), nullable=True)
commit = sa.Column(sa.Boolean, nullable=False)
trunked = sa.Column(sa.Boolean, nullable=True)
def as_dict(self):
return {
u"port_id": self.port_id,
u"commit": self.commit,
u"trunked": self.trunked,
u"hardware_id": self.hardware_id
}
class SwitchPortBindingState(object):
INACTIVE = u"INACTIVE"
WANT_ACTIVE = u"WANT_ACTIVE"
ACTIVE = u"ACTIVE"
WANT_INACTIVE = u"WANT_INACTIVE"
ERROR = u"ERROR"
@classmethod
def as_dict(cls):
return {
u"INACTIVE": cls.INACTIVE,
u"WANT_ACTIVE": cls.WANT_ACTIVE,
u"ACTIVE": cls.ACTIVE,
u"WANT_INACTIVE": cls.WANT_INACTIVE,
u"ERROR": cls.ERROR
}
class SwitchPortBinding(model_base.BASEV2):
"""Keep track of which neutron ports are bound to which
physical switchports.
"""
__tablename__ = "switch_port_bindings"
# TODO(morgabra) FK to the actual model and cascade
port_id = sa.Column(sa.String(255), primary_key=True)
network_id = sa.Column(sa.String(255), primary_key=True)
switch_port_id = sa.Column(
sa.String(36),
sa.ForeignKey("switch_ports.id"),
primary_key=True)
state = sa.Column(sa.String(255),
default=SwitchPortBindingState.INACTIVE)
def as_dict(self):
return {
u"port_id": self.port_id,
u"network_id": self.network_id,
u"switch_port_id": self.switch_port_id,
u"state": self.state
} | 0.609408 | 0.191252 |