content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import ops
import ops.cmd
import ops.env
import ops.cmd.safetychecks
VALID_OPTIONS = ['all', 'permanent', 'cached']
class PasswordDumpCommand(ops.cmd.DszCommand, ):
def __init__(self, plugin='passworddump', **optdict):
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
def validateInput(self):
truecount = 0
for optkey in self.optdict:
optval = self.optdict[optkey]
if (type(optval) is not bool):
try:
optval = bool(optval)
self.optdict[optkey] = optval
except:
return False
if optval:
truecount += 1
if (truecount > 1):
return False
return True
def mySafetyCheck(self):
if (self.validateInput() and (ops.env.get('OPS_NOINJECT').upper() != 'TRUE')):
return (True, '')
else:
return (False, 'OPS_NOINJECT is set to TRUE, you should probably not run passworddump')
ops.cmd.command_classes['passworddump'] = PasswordDumpCommand
ops.cmd.aliasoptions['passworddump'] = VALID_OPTIONS
ops.cmd.safetychecks.addSafetyHandler('passworddump', 'ops.cmd.passworddump.mySafetyCheck')
|
nilq/baby-python
|
python
|
"""An App Template based on Bootstrap with a header, sidebar and main section"""
import pathlib
import awesome_panel.express as pnx
import panel as pn
from awesome_panel.express.assets import SCROLLBAR_PANEL_EXPRESS_CSS
BOOTSTRAP_DASHBOARD_CSS = pathlib.Path(__file__).parent / "bootstrap_dashboard.css"
BOOTSTRAP_DASHBOARD_TEMPLATE = pathlib.Path(__file__).parent / "bootstrap_dashboard.html"
HEADER_HEIGHT = 58
SIDEBAR_WIDTH = 200
# Hack to make dynamically adding plotly work:
# See https://github.com/holoviz/panel/issues/840
pn.extension("plotly")
class BootstrapDashboardTemplate(pn.Template):
"""A Basic App Template"""
def __init__(
self,
app_title: str = "App Name",
app_url="#",
):
pn.config.raw_css.append(BOOTSTRAP_DASHBOARD_CSS.read_text(encoding="utf8"))
pn.config.raw_css.append(SCROLLBAR_PANEL_EXPRESS_CSS.read_text(encoding="utf8"))
pnx.bootstrap.extend()
pnx.fontawesome.extend()
template = BOOTSTRAP_DASHBOARD_TEMPLATE.read_text(encoding="utf8")
app_title = pn.Row(
pn.pane.Markdown(
f"[{app_title}]({app_url})",
css_classes=["app-title"],
),
width=SIDEBAR_WIDTH,
sizing_mode="stretch_height",
)
header = pn.Row(
app_title,
pn.layout.HSpacer(),
sizing_mode="stretch_width",
height=HEADER_HEIGHT,
)
top_spacer = pn.layout.HSpacer(height=15)
self.header = header
self.sidebar = pn.Column(
top_spacer,
height_policy="max",
width=SIDEBAR_WIDTH,
)
self.main = pn.Column(
sizing_mode="stretch_width",
margin=(
25,
50,
25,
50,
),
)
items = {
"header": header,
"sidebar": self.sidebar,
"main": self.main,
}
super().__init__(
template=template,
items=items,
)
|
nilq/baby-python
|
python
|
import codecs
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import itertools
def plot_confusion_matrix(cm,
target_names,
title='Confusion matrix',
prefix="",
cmap=None,
normalize=True,
save_dir="."
):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
save_dir parent directory to save the images
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
This function was modified slightly by the QUIPP development team.
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, size=24)
cbar = plt.colorbar(fraction=0.03)
cbar.ax.tick_params(labelsize=24)
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, size=20, rotation=90)
plt.yticks(tick_marks, target_names, size=20)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
fontsize=22,
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
fontsize=22,
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label', size=28)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), size=28)
plt.ylim(len(cm)-0.5, -0.5)
figpath = f"{prefix}_{title}_confusion_matrix.png"
save_path = os.path.join(save_dir, figpath)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
plt.savefig(save_path, format="PNG", bbox_inches = "tight")
return os.path.abspath(save_path)
def plot_util_confusion_matrix(confusion_dict_path, method_names=None,
prefix="", normalize=False, save_dir="."):
dict_r = codecs.open(confusion_dict_path, 'r', encoding='utf-8').read()
confusion_dict = json.loads(dict_r)
if type(method_names) == str:
method_names = [method_names]
if method_names == None:
method_names = list(confusion_dict.keys())
plt_names = []
for method_name in method_names:
if method_name not in confusion_dict:
print(confusion_dict.keys())
raise ValueError(f"Method name: {method_name} is not in the dictionary.")
title = method_name
cm = np.array(confusion_dict[method_name]["conf_matrix"])
target_names = confusion_dict[method_name]["target_names"]
plt_name = plot_confusion_matrix(cm,
target_names=target_names,
normalize=normalize,
title=title,
prefix=prefix,
save_dir=save_dir
)
plt_names.append(plt_name)
return plt_names
|
nilq/baby-python
|
python
|
class DataFilePath:
def __init__(self):
self.dataDir = '../data/citydata/season_1/'
return
def getOrderDir_Train(self):
return self.dataDir + 'training_data/order_data/'
def getOrderDir_Test1(self):
return self.dataDir + 'test_set_1/order_data/'
def getTest1Dir(self):
return self.dataDir + 'test_set_1/'
def getTest2Dir(self):
return self.dataDir + 'test_set_2/'
def getTrainDir(self):
return self.dataDir + 'training_data/'
def getGapCsv_Train(self):
return self.getOrderDir_Train() + self.getGapFilename()
def getGapCsv_Test1(self):
return self.getOrderDir_Test1() + self.getGapFilename()
def getTestset1Readme(self):
return self.dataDir + 'test_set_1/read_me_1.txt'
def getTestset2Readme(self):
return self.dataDir + 'test_set_2/read_me_2.txt'
def getGapFilename(self):
return "temp/gap.csv"
def getGapPredictionFileName(self):
return 'gap_prediction.csv'
def getPrevGapFileName(self):
return "temp/prevgap.df.pickle"
def get_dir_name(self, data_dir):
return data_dir.split('/')[-2]
g_singletonDataFilePath = DataFilePath()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import argparse
import logging
import os
import warnings
import torch.nn as nn
import torch.utils.data
from torch.utils.data import SubsetRandomSampler
from torch.utils.tensorboard import SummaryWriter
from Colorization import utils
from Multi_label_classification.dataset.dataset_big_earth_torch_mlc import BigEarthDatasetTorchMLC
from Multi_label_classification.job_config import set_params
from Multi_label_classification.metrics.metric import metrics_def
from Multi_label_classification.models.Ensemble import EnsembleModel
from Multi_label_classification.models.ResnetMLC import ResNetMLC
from Multi_label_classification.test import test
warnings.filterwarnings("ignore")
os.environ["OMP_NUM_THREADS"] = "1"
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
def main(args):
# enable cuda if available
args.cuda = args.cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
# READ JSON CONFIG FILE
assert os.path.isfile(args.json_config_file), "No json configuration file found at {}".format(args.json_config_file)
params = utils.Params(args.json_config_file)
# for change params related to job-id
params = set_params(params, args.id_optim)
# set the torch seed
torch.manual_seed(params.seed)
# initialize summary writer; every folder is saved inside runs
writer = SummaryWriter(params.path_nas + params.log_dir + '/runs/')
# create dir for log file
if not os.path.exists(params.path_nas + params.log_dir):
os.makedirs(params.path_nas + params.log_dir)
# save the json config file of the model
params.save(os.path.join(params.path_nas + params.log_dir, "params.json"))
# Set the logger
utils.set_logger(os.path.join(params.path_nas + params.log_dir, "log"))
# DATASET
# Torch version
big_earth = BigEarthDatasetTorchMLC(csv_path=params.dataset, random_seed=params.seed, bands_indices=params.bands,
img_size=params.img_size, n_samples=params.dataset_nsamples)
# Split
train_idx, val_idx, test_idx = big_earth.split_dataset(params.test_split, params.val_split)
test_sampler = SubsetRandomSampler(test_idx)
# define the loader
test_loader = torch.utils.data.DataLoader(big_earth, batch_size=params.batch_size,
sampler=test_sampler, num_workers=params.num_workers)
# MODELS definition for Ensemble
model_rgb = ResNetMLC(in_channels=3, out_cls=params.out_cls, resnet_version=params.resnet_version,
pretrained=0, colorization=0)
model_colorization = ResNetMLC(in_channels=9, out_cls=params.out_cls, resnet_version=params.resnet_version,
pretrained=0, colorization=1)
checkpoint = torch.load(args.rgb_checkpoint)
model_rgb.load_state_dict(checkpoint['state_dict'], strict=False)
checkpoint = torch.load(args.spectral_checkpoint)
model_colorization.load_state_dict(checkpoint['state_dict'], strict=False)
model = EnsembleModel(model_rgb=model_rgb, model_colorization=model_colorization, device=device)
# CUDA
model.to(device)
# loss for multilabel classification
loss_fn = nn.MultiLabelSoftMarginLoss()
# METRICS
metrics = metrics_def
logging.info("Starting final test with ensemble model...")
test(model=model, test_loader=test_loader, loss_fn=loss_fn,
device=device, metrics=metrics)
# CLOSE THE WRITER
writer.close()
if __name__ == '__main__':
# command line arguments
parser = argparse.ArgumentParser(description='multi_label_classification')
parser.add_argument('--cuda', action='store_true', default=True, help='enables CUDA training')
parser.add_argument('--json_config_file', default='Multi_label_classification/config/configuration.json', help='name of the json config file')
parser.add_argument('--id_optim', default=0, type=int, help='id_optim parameter')
parser.add_argument('--rgb_checkpoint', type=str, default=None, help='specify the rgb checkpoint path', required=True)
parser.add_argument('--spectral_checkpoint', type=str, default=None, help='specify the spectral checkpoint path', required=True)
# read the args
args = parser.parse_args()
main(args)
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.http import Http404
from django.views.generic.edit import UpdateView
from django.views.generic import ListView, View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.decorators import method_decorator
import logging
from ..models.projects import Project
from ..models.authors import Author
from ..forms import AuthorForm
from .permission_helpers import PermissionOnObjectViewMixin
# logger for this file
logger = logging.getLogger(__name__)
class AuthorListView(ListView):
"""A generic view of the authors in a list"""
paginate_by = 10
template_name = "code_doc/authors/author_list.html"
context_object_name = "authors"
model = Author
def detail_author(request, author_id):
try:
author = Author.objects.get(pk=author_id)
except Author.DoesNotExist:
raise Http404
project_list = Project.objects.filter(authors=author)
coauthor_list = (
Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id)
)
return render(
request,
"code_doc/authors/author_details.html",
{
"project_list": project_list,
"author": author,
"user": request.user,
"coauthor_list": coauthor_list,
},
)
class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView):
"""View for editing information about an Author
.. note:: in order to be able to edit an Author, the user should have the
'code_doc.author_edit' permission on the Author object.
"""
form_class = AuthorForm
model = Author
permissions_on_object = ("code_doc.author_edit",)
permissions_object_getter = "get_author_from_request"
template_name = "code_doc/authors/author_edit.html"
pk_url_kwarg = "author_id"
def get_author_from_request(self, request, *args, **kwargs):
# TODO check if needed
try:
return Author.objects.get(pk=kwargs["author_id"])
except Author.DoesNotExist:
logger.warning(
"[AuthorUpdateView] non existent Author with id %s", kwargs["author_id"]
)
return None
class MaintainerProfileView(View):
"""Manages the views associated to the maintainers"""
@method_decorator(login_required)
def get(self, request, maintainer_id):
try:
maintainer = User.objects.get(pk=maintainer_id)
except Project.DoesNotExist:
raise Http404
projects = Project.objects.filter(administrators=maintainer)
return render(
request,
"code_doc/maintainer_details.html",
{"projects": projects, "maintainer": maintainer},
)
@method_decorator(login_required)
def post(self, request):
pass
|
nilq/baby-python
|
python
|
from src.traces.traces import main as traces_main
import pandas as pd
def main():
db_path = '/Users/mossad/personal_projects/AL-public/src/crawler/crawled_kaggle.db'
traces_path = '/Users/mossad/personal_projects/AL-public/src/traces/extracted-traces.pkl'
clean_traces_path = '/Users/mossad/personal_projects/AL-public/src/traces/clean-traces.pkl'
# language = 'IPython Notebook'
language = 'Python'
traces_main(db_path, traces_path, language)
tr = pd.read_pickle(traces_path)
print()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2017 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""Defines the :class:`Page` model.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from django.utils.translation import get_language
from lino.api import dd, rt
from etgen.html import E, tostring_pretty
from lino.core.renderer import add_user_language
from lino import mixins
from django.conf import settings
from .utils import render_node
#~ class PageType(dbutils.BabelNamed,mixins.PrintableType,outbox.MailableType):
#~ templates_group = 'pages/Page'
#~ class Meta:
#~ verbose_name = _("Page Type")
#~ verbose_name_plural = _("Page Types")
#~ remark = models.TextField(verbose_name=_("Remark"),blank=True)
#~ def __unicode__(self):
#~ return self.name
#~ class PageTypes(dd.Table):
#~ """
#~ Displays all rows of :class:`PageType`.
#~ """
#~ model = 'pages.PageType'
#~ column_names = 'name build_method template *'
#~ order_by = ["name"]
#~ detail_layout = """
#~ id name
#~ build_method template email_template attach_to_email
#~ remark:60x5
#~ pages.PagesByType
#~ """
class Page(mixins.Referrable, mixins.Hierarchical, mixins.Sequenced):
class Meta:
verbose_name = _("Node")
verbose_name_plural = _("Nodes")
title = dd.BabelCharField(_("Title"), max_length=200, blank=True)
body = dd.BabelTextField(_("Body"), blank=True, format='plain')
raw_html = models.BooleanField(_("raw html"), default=False)
def get_absolute_url(self, **kwargs):
if self.ref:
if self.ref != 'index':
return dd.plugins.pages.build_plain_url(
self.ref, **kwargs)
return dd.plugins.pages.build_plain_url(**kwargs)
def get_sidebar_caption(self):
if self.title:
return dd.babelattr(self, 'title')
if self.ref == 'index':
return unicode(_('Home'))
if self.ref:
return self.ref
return str(self.id)
#~ if self.ref or self.parent:
#~ return self.ref
#~ return unicode(_('Home'))
def get_sidebar_item(self, request, other):
kw = dict()
add_user_language(kw, request)
url = self.get_absolute_url(**kw)
a = E.a(self.get_sidebar_caption(), href=url)
if self == other:
return E.li(a, **{'class':'active'})
return E.li(a)
def get_sidebar_html(self, request):
items = []
#~ loop over top-level nodes
for n in Page.objects.filter(parent__isnull=True).order_by('seqno'):
#~ items += [li for li in n.get_sidebar_items(request,self)]
items.append(n.get_sidebar_item(request, self))
if self.is_parented(n):
children = []
for ch in n.children.order_by('seqno'):
children.append(ch.get_sidebar_item(request, self))
if len(children):
items.append(E.ul(*children, **{'class':'nav nav-list'}))
e = E.ul(*items, **{'class':'nav nav-list'})
return tostring_pretty(e)
def get_sidebar_menu(self, request):
qs = Page.objects.filter(parent__isnull=True)
#~ qs = self.children.all()
yield ('/', 'index', unicode(_('Home')))
#~ yield ('/downloads/', 'downloads', 'Downloads')
#~ yield ('/about', 'about', 'About')
#~ if qs is not None:
for obj in qs:
if obj.ref and obj.title:
yield ('/' + obj.ref, obj.ref, dd.babelattr(obj, 'title'))
#~ else:
#~ yield ('/','index',obj.title)
#~ class PageDetail(dd.FormLayout):
#~ main = """
#~ ref title type:25
#~ project id user:10 language:8 build_time
#~ left right
#~ """
#~ left = """
# ~ # abstract:60x5
#~ body:60x20
#~ """
#~ right="""
#~ outbox.MailsByController
#~ postings.PostingsByController
#~ """
class PageDetail(dd.DetailLayout):
main = """
ref parent seqno
title
body
"""
class Pages(dd.Table):
model = 'pages.Page'
detail_layout = PageDetail()
column_names = "ref title *"
#~ column_names = "ref language title user type project *"
order_by = ["ref"]
#~ class MyPages(ByUser,Pages):
#~ required = dict(user_groups='office')
#~ column_names = "modified title type project *"
#~ label = _("My pages")
#~ order_by = ["-modified"]
#~ class PagesByType(Pages):
#~ master_key = 'type'
#~ column_names = "title user *"
#~ order_by = ["-modified"]
#~ if settings.SITE.project_model:
#~ class PagesByProject(Pages):
#~ master_key = 'project'
#~ column_names = "type title user *"
#~ order_by = ["-modified"]
def create_page(**kw):
#~ logger.info("20121219 create_page(%r)",kw['ref'])
return Page(**kw)
def lookup(ref, *args, **kw):
return Page.get_by_ref(ref, *args, **kw)
def get_all_pages():
return Page.objects.all()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from annotator import annotation
from annotator import document
from h._compat import text_type
class Annotation(annotation.Annotation):
@property
def uri(self):
"""Return this annotation's URI or an empty string.
The uri is escaped and safe to be rendered.
The uri is a Markup object so it won't be double-escaped.
"""
uri_ = self.get("uri")
if uri_:
# Convert non-string URIs into strings.
# If the URI is already a unicode string this will do nothing.
# We're assuming that URI cannot be a byte string.
return text_type(uri_)
else:
return ""
@property
def parent(self):
"""
Return the thread parent of this annotation, if it exists.
"""
if 'references' not in self:
return None
if not isinstance(self['references'], list):
return None
if not self['references']:
return None
return Annotation.fetch(self['references'][-1])
@property
def target_links(self):
"""A list of the URLs to this annotation's targets."""
links = []
targets = self.get("target")
if isinstance(targets, list):
for target in targets:
if not isinstance(target, dict):
continue
source = target.get("source")
if source is None:
continue
links.append(source)
return links
@property
def document(self):
return self.get("document", {})
class Document(document.Document):
__analysis__ = {}
|
nilq/baby-python
|
python
|
import os
import string
from file2quiz import utils, reader
def convert_quiz(input_dir, output_dir, file_format, save_files=False, *args, **kwargs):
print(f'##############################################################')
print(f'### QUIZ CONVERTER')
print(f'##############################################################\n')
# Get files
files = utils.get_files(input_dir, extensions={'json'})
# Create quizzes folder
convert_dir = os.path.join(output_dir, f"quizzes/{file_format}")
utils.create_folder(convert_dir, empty_folder=True) if save_files else None
# Set format
FILE_FORMATS = {"text": "txt", "anki": "txt"}
file_format = str(file_format).lower().strip().replace('.', '') # parse formats
output_ext = FILE_FORMATS.get(file_format, None)
# Fallback for unknown extension
if output_ext is None:
file_format = "text"
print(f'\t- [ERROR] No method to save "{output_ext}" files (fallback to "txt")')
# Convert quizzes
fquizzes = []
total_questions = 0
total_answers = 0
for i, filename in enumerate(files, 1):
tail, basedir = utils.get_tail(filename)
fname, ext = utils.get_fname(filename)
print("")
print(f'==============================================================')
print(f'[INFO] ({i}/{len(files)}) Converting quiz to "{file_format}": "{tail}"')
print(f'==============================================================')
# Read file
quiz = reader.read_json(filename)
solutions = sum([1 for q_id, q in quiz.items() if q.get('correct_answer') is not None])
total_answers += solutions
total_questions += len(quiz)
try:
fquiz = _convert_quiz(quiz, file_format, *args, **kwargs)
except ValueError as e:
print(f'\t- [ERROR] {e}. Skipping quiz "{tail}"')
continue
# Add formatted quizzes
fquizzes.append((fquiz, filename))
# Show info
if len(fquiz.strip()) == 0:
print(f"\t- [WARNING] No quiz were found ({tail})")
print(f"\t- [INFO] Conversion done! {len(quiz)} questions were found; {solutions} with solutions. ({tail})")
# Save quizzes
if save_files:
print(f"\t- [INFO] Saving file... ({tail}.txt)")
reader.save_txt(fquiz, os.path.join(convert_dir, f"{fname}.{output_ext}"))
# Check result
if not fquizzes:
print("\t- [WARNING] No quiz was converted successfully")
print("")
print("--------------------------------------------------------------")
print("SUMMARY")
print("--------------------------------------------------------------")
print(f"- [INFO] Quizzes converted: {len(fquizzes)}")
print(f"- [INFO] Questions found: {total_questions} (with solutions: {total_answers})")
print("--------------------------------------------------------------\n\n")
return fquizzes
def _convert_quiz(quiz, file_format, *args, **kwargs):
# Select format
if file_format == "anki":
return quiz2anki(quiz)
else: # Fallback to txt
return quiz2txt(quiz, *args, **kwargs)
def pdf2image(filename, savepath, dpi=300, img_format="tiff", **kwargs):
# This requires: ImageMagick
cmd = f'convert -density {dpi} "{filename}" -depth 8 -strip -background white -alpha off "{savepath}/page-%0d.{img_format}"'
os.system(cmd)
def image2text(filename, savepath, lang="eng", dpi=300, psm=3, oem=3, **kwargs):
# This requires: Tesseract
# Tesseract needs the save path without the extensions
basedir, tail = os.path.split(savepath)
fname, ext = os.path.splitext(tail)
# Run command
#sub_cmds = 'tessedit_char_whitelist="0123456789 abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYñÑçÇáéíóúÁÉíÓÚüÜ()¿?,;.:/-\"\'ºª%-+Ø=<>*"'
cmd = f'tesseract "{filename}" "{basedir}/{fname}" -l {lang} --dpi {dpi} --psm {psm} --oem {oem} letters' #-c {sub_cmds}
os.system(cmd)
def quiz2anki(quiz, **kwargs):
text = ""
# Sort questions by key
keys = sorted(quiz.keys(), key=utils.tokenize)
for i, id_question in enumerate(keys):
question = quiz[id_question]
# Check if the there is a correct answer
if question.get('correct_answer') is None:
raise ValueError("No correct answer was given.")
# Format fields
fields = ["{}. {}".format(id_question, question['question']), str(int(question['correct_answer'])+1)] + question['answers']
text += "{}\n".format("\t".join(fields))
return text.strip()
def quiz2txt(quiz, show_answers, answer_table=False, **kwargs):
txt = ""
txt_answers = ""
# Sort questions by key
keys = sorted(quiz.keys(), key=utils.tokenize)
for i, id_question in enumerate(keys):
question = quiz[id_question]
# Format question
txt += "{}. {}\n".format(id_question, question['question'])
# Format answers
for j, ans in enumerate(question['answers']):
marker = ""
ans_id = string.ascii_lowercase[j].lower()
# Show correct answer?
if show_answers:
if j == question.get("correct_answer"): # correct answer
if answer_table:
txt_answers += f"{id_question} - {ans_id}\n"
else:
marker = "*"
txt += "{}{}) {}\n".format(marker, ans_id, ans)
txt += "\n"
# Add answer table at the end of the file if requested
if show_answers and answer_table:
txt += "\n\n\n=========\n\n\n" + txt_answers
return txt.strip()
def json2text(path, *args, **kwargs):
texts = []
files = utils.get_files(path, extensions="json")
for filename in files:
fname, extension = os.path.splitext(os.path.basename(filename))
# Load quiz and text
quiz = reader.read_json(filename)
quiz_txt = quiz2txt(quiz, *args, **kwargs)
texts.append((fname, quiz_txt))
return texts
def _pdf2word(filename, savepath, word_client=None):
try:
import win32com.client
import pywintypes
except ImportError as e:
raise ImportError("'pywin32' missing. You need to install it manually (only Windows): pip install pywin32")
# Create a Word client if there isn't any
if not word_client:
# Load word client
word_client = win32com.client.Dispatch("Word.Application")
word_client.visible = 0
try:
# Open word file
wb = word_client.Documents.Open(filename)
# File format for .docx
# https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat
wb.SaveAs2(savepath, FileFormat=16)
wb.Close()
except pywintypes.com_error as e:
print(f"- [ERROR] There was an error converting the PDF file to DOCX. Skipping file. ({e})")
def pdf2word(input_dir, output_dir):
try:
import win32com.client
except ImportError as e:
raise ImportError("'pywin32' missing. You need to install it manually (only Windows): pip install pywin32")
# Get files
files = utils.get_files(input_dir, extensions={"pdf"})
# Create output dir
output_dir = os.path.join(output_dir, "docx-word")
utils.create_folder(output_dir, empty_folder=True)
# Load word client
word_client = win32com.client.Dispatch("Word.Application")
word_client.visible = 0
# Walk through files
for i, filename in enumerate(files, 1):
# Parse path
tail, basedir = utils.get_tail(filename)
fname, ext = utils.get_fname(filename)
print(f"#{i}. Converting *.pdf to *.docx ({tail})")
# Create save path
savepath = os.path.abspath(os.path.join(output_dir, f"{fname}.docx"))
# Convert pdf
_pdf2word(filename, savepath, word_client)
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 by xfangfang. All Rights Reserved.
#
# Using potplayer as DLNA media renderer
#
# Macast Metadata
# <macast.title>PotPlayer Renderer</macast.title>
# <macast.renderer>PotplayerRenderer</macast.title>
# <macast.platform>win32</macast.title>
# <macast.version>0.4</macast.version>
# <macast.host_version>0.7</macast.host_version>
# <macast.author>xfangfang</macast.author>
# <macast.desc>PotPlayer support for Macast, this is a simple plugin that only supports play and stop.</macast.desc>
import subprocess
import threading
import time
import cherrypy
from macast import gui
from macast.renderer import Renderer
from macast.utils import SETTING_DIR
POTPLAYER_PATH = r'"C:\Program Files\DAUM\PotPlayer\PotPlayermini64.exe"'
subtitle = None
class PotplayerRenderer(Renderer):
def __init__(self):
super(PotplayerRenderer, self).__init__()
self.start_position = 0
self.position_thread_running = True
self.position_thread = threading.Thread(target=self.position_tick, daemon=True)
self.position_thread.start()
# a thread is started here to increase the playback position once per second
# to simulate that the media is playing.
def position_tick(self):
while self.position_thread_running:
time.sleep(1)
self.start_position += 1
sec = self.start_position
position = '%d:%02d:%02d' % (sec // 3600, (sec % 3600) // 60, sec % 60)
self.set_state_position(position)
def set_media_stop(self):
subprocess.Popen(['taskkill', '/f', '/im', 'PotPlayerMini64.exe']).communicate()
self.set_state_transport('STOPPED')
cherrypy.engine.publish('renderer_av_stop')
def start_player(self, url):
try:
if subtitle is None:
subprocess.call('{} "{}"'.format(POTPLAYER_PATH, url))
else:
subprocess.call('{} "{}" /sub="{}"'.format(POTPLAYER_PATH, url, subtitle))
except Exception as e:
print(e)
self.set_media_stop()
cherrypy.engine.publish('app_notify', "Error", str(e))
def set_media_url(self, url, start=0):
self.set_media_stop()
self.start_position = 0
threading.Thread(target=self.start_player, daemon=True, kwargs={'url': url}).start()
self.set_state_transport("PLAYING")
cherrypy.engine.publish('renderer_av_uri', url)
def stop(self):
super(PotplayerRenderer, self).stop()
self.set_media_stop()
print("PotPlayer stop")
def start(self):
super(PotplayerRenderer, self).start()
print("PotPlayer start")
if __name__ == '__main__':
gui(PotplayerRenderer())
# or using cli to disable taskbar menu
# cli(PotplayerRenderer())
else:
import os
if os.path.exists(SETTING_DIR):
subtitle = os.path.join(SETTING_DIR, r"macast.ass")
if not os.path.exists(subtitle):
subtitle = None
|
nilq/baby-python
|
python
|
import numpy as np
from metod_alg import objective_functions as mt_obj
from metod_alg import metod_algorithm_functions as mt_alg
def test_1():
"""
Test for mt_alg.forward_tracking() - check that when flag=True,
track is updated.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_forward = 1.1
forward_tol = 1000000000
step = 0.0001
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
track, flag = (mt_alg.forward_tracking(
point, step, f_old, f_new, grad,
const_forward, forward_tol, f,
func_args))
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
if j < len(track) - 1:
assert(track[j][1] < track[j-1][1])
else:
assert(track[j][1] > track[j-1][1])
def test_2():
"""
Test for mt_alg.forward_tracking() - check for flag=False.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_forward = 1.1
forward_tol = 0.001
step = 0.0001
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
track, flag = (mt_alg.forward_tracking(
point, step, f_old, f_new, grad,
const_forward, forward_tol, f,
func_args))
assert(flag == False)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
assert(track[j][1] < track[j-1][1])
def test_3():
"""Test for mt_alg.backward_tracking() - back_tol is met"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
back_tol = 0.4
step = 0.9
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old < f_new)
track = (mt_alg.backward_tracking
(point, step, f_old, f_new,
grad, const_back, back_tol,
f, func_args))
assert(track[0][0] == 0)
assert(track[0][1] == f_old)
assert(track[1][0] == step)
assert(track[1][1] == f_new)
def test_4():
"""Test for mt_alg.backward_tracking() - back tol is not met"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
back_tol = 0.000001
step = 1
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old < f_new)
track = (mt_alg.backward_tracking
(point, step, f_old, f_new,
grad, const_back, back_tol,
f, func_args))
assert(track[0][0] == 0)
assert(track[0][1] == f_old)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_back
if j < len(track) - 1:
assert(track[0][1] < track[j][1])
else:
assert(track[j][1] < track[0][1])
def test_5():
"""Checks computation in mt_alg.compute_coeffs()"""
track_y = np.array([100, 200, 50])
track_t = np.array([0, 1, 0.5])
opt_t = mt_alg.compute_coeffs(track_y, track_t)
OLS_polyfit = np.polyfit(track_t, track_y, 2)
check = -OLS_polyfit[1] / (2 * OLS_polyfit[0])
assert(np.all(np.round(check, 5) == np.round(opt_t, 5)))
def test_6():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned when forward_tol is not met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
forward_tol = 1000000000
back_tol = 0.000001
step = 0.0001
f_old = f(np.copy(point), *func_args)
grad = g(point, *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t >= 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) < f_old)
def test_7():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned, when forward_tol is met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
forward_tol = 0.001
back_tol = 0.000001
step = 0.0001
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t >= 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) < f_old)
def test_8():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned, when back_tol is met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
back_tol = 0.4
forward_tol = 100000000
step = 0.9
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t == 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) == f_old)
def test_9():
"""
Test for mt_alg.combine_tracking() - check that correct step size is
returned, when back_tol is not met.
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
point = np.random.uniform(0, 1, (d, ))
const_back = 0.9
const_forward = 1.1
back_tol = 0.000001
forward_tol = 100000000
step = 1
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
opt_t = (mt_alg.combine_tracking
(point, f_old, grad, step,
const_back, back_tol,
const_forward, forward_tol,
f, func_args))
assert(opt_t >= 0)
upd_point = point - opt_t * grad
assert(f(upd_point, *func_args) < f_old)
def test_10():
"""Test that mt_alg.arrange_track_y_t produces expected outputs"""
track = np.array([[0, 100],
[1, 80],
[2, 160],
[4, 40],
[8, 20],
[16, 90]])
track_method = 'Forward'
track_y, track_t = mt_alg.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([40, 20, 90])))
assert(np.all(track_t == np.array([4, 8, 16])))
def test_11():
"""Test that mt_alg.arrange_track_y_t produces expected outputs"""
track = np.array([[0, 100],
[1, 80],
[2, 70],
[4, 90]])
track_method = 'Forward'
track_y, track_t = mt_alg.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([80, 70, 90])))
assert(np.all(track_t == np.array([1, 2, 4])))
def test_12():
"""Test that mt_alg.arrange_track_y_t produces expected outputs"""
track = np.array([[0, 100],
[1, 120],
[0.5, 110],
[0.25, 90]])
track_method = 'Backward'
track_y, track_t = mt_alg.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 90, 110])))
assert(np.all(track_t == np.array([0, 0.25, 0.5])))
def test_13():
"""
Test for mt_alg.check_func_val_coeffs() when func_val < track_y[1].
"""
np.random.seed(90)
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
d = 10
P = 5
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_combined = (mt_obj.function_parameters_several_quad
(P, d, lambda_1, lambda_2))
func_args = P, store_x0, matrix_combined
step = 0.00001
point = np.array([0.5525204, 0.8256308, 0.5034502, 0.68755988,
0.75954891, 0.64230399, 0.38500431, 0.0801039,
0.80748984, 0.81147401])
grad = g(point, *func_args)
f_old = f(np.copy(point), *func_args)
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
forward_tol = 100000000
const_forward = 1.1
track_method = 'Forward'
track, flag = (mt_alg.forward_tracking
(point, step, f_old, f_new, grad,
const_forward, forward_tol, f, func_args))
opt_t = mt_alg.check_func_val_coeffs(track, track_method, point, grad, f,
func_args)
assert(f(point - opt_t * grad, *func_args) < np.min(track[:, 1]))
def test_14():
"""
Test for mt_alg.check_func_val_coeffs() when func_val > track_y[1].
"""
np.random.seed(34272212)
f = mt_obj.sog_function
g = mt_obj.sog_gradient
d = 20
P = 10
lambda_1 = 1
lambda_2 = 4
sigma_sq = 0.8
store_x0, matrix_combined, store_c = (mt_obj.function_parameters_sog
(P, d, lambda_1, lambda_2))
func_args = P, sigma_sq, store_x0, matrix_combined, store_c
point = np.random.uniform(0,1,(d,))
f_old = f(point, *func_args)
grad = g(point, *func_args)
step = 0.1
const_forward = 1.5
forward_tol = 1000000000
f_new = f(np.copy(point) - step * grad, *func_args)
assert(f_old > f_new)
track_method = 'Forward'
track, flag = (mt_alg.forward_tracking
(point, step, f_old, f_new, grad,
const_forward, forward_tol, f, func_args))
opt_t = mt_alg.check_func_val_coeffs(track, track_method, point, grad, f,
func_args)
pos = np.argmin(track[:, 1])
step_length = track[pos][0]
assert(step_length == opt_t)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
import os
import sys
from .main import main as main_main
from .. import CondaError
from .._vendor.auxlib.ish import dals
log = getLogger(__name__)
def pip_installed_post_parse_hook(args, p):
if args.cmd not in ('init', 'info'):
raise CondaError(dals("""
Conda has not been initialized.
To enable full conda functionality, please run 'conda init'.
For additional information, see 'conda init --help'.
"""))
def main(*args, **kwargs):
os.environ[str('CONDA_PIP_UNINITIALIZED')] = str('true')
kwargs['post_parse_hook'] = pip_installed_post_parse_hook
return main_main(*args, **kwargs)
if __name__ == '__main__':
sys.exit(main())
|
nilq/baby-python
|
python
|
with open('input') as fp:
grid = [line[:-1] for line in fp]
grid_width = len(grid[-1])
x, y = (0, 0)
n_trees = 0
while y < len(grid):
if grid[y][x % grid_width] == '#':
n_trees += 1
x += 3
y += 1
print(n_trees, "trees")
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import sys, os
from subprocess import Popen, PIPE, check_output
import socket
from ..seqToolManager import seqToolManager, FeatureComputerException
from .windowHHblits import WindowHHblits
from utils import myMakeDir, tryToRemove #utils is at the root of the package
class HHBlitsManager(seqToolManager):
BAD_SCORE_CONSERVATION="-1048576"
def __init__(self, seqsManager, outPath, winSize,
hhBlitsCMD_template="%(hhBlitsBinPath)s/hhblits -i %(fastaInFname)s -n 4 -d %(hhblitsDB)s "+
"-oa3m %(aligsName)s -cpu %(psiBlastNThrs)d -ohhm %(profileNameRaw)s -o /dev/null"):
seqToolManager.__init__(self, seqsManager, outPath, winSize)
'''
@param seqsManager: ..manageSeqs.seqsManager.SeqsManager
@param outPath: str. path where hhblits results will be saved
@param winSize: int. The size of sliding window
'''
# self.hhBlits should be inherited from
# self.hhBlitsBinPath Inherited from ../../Config
# self.hhBlitsDB Inherited from ../../Config
# self.psiBlastNThrs Inherited from ../../Config
self.hhBlitsOut= myMakeDir(self.outPath,"hhBlits")
self.hhBlitsRaw= myMakeDir(self.hhBlitsOut,"rawHhBlits")
self.hhBlitsProc= myMakeDir(self.hhBlitsOut,"procHhBlits")
self.hhBlitsPathWindowed= myMakeDir(self.hhBlitsOut,"windowedHhBlits/wSize"+str(winSize))
self.hhBlitsCMD_template= hhBlitsCMD_template
def getFinalPath(self):
'''
returns path where final results (win hhBlits) are saved
@return self.hhBlitsPathWindowed: str
'''
return self.hhBlitsPathWindowed
def getFNames(self, prefixExtended):
'''
Returns a dict that contains the fnames that will be used by hhblits
@param prefixExtended. prefix for output fnames. They are formed as follows: prefix+chainType+chainId+b/u
@return Dict {"psiblast":(psiblastOutName, ), "pssm":(pssmOutNameRaw, pssmOutNameProc), "pssmWindow":(pssmWindowedOutName,)}
Processed pssm and pssmWindow are the ones that will be used for classification.
'''
hhBlitsAligName= os.path.join( self.hhBlitsRaw, prefixExtended+".a3m")
rawHhblits= os.path.join( self.hhBlitsRaw, prefixExtended+".ohhm")
procHhblits= os.path.join(self.hhBlitsProc, prefixExtended+".ohhm")
hhblitsWindowedOutName= os.path.join(self.hhBlitsPathWindowed, prefixExtended+".wsize"+str(self.winSize)+".ohhm")
fNames= { "hhBlitsAligName":(hhBlitsAligName,),"hhBlitsProfiles":(rawHhblits, procHhblits),
"hhBlitsProfilesWindow":(hhblitsWindowedOutName,)}
return fNames
def compute(self, prefixExtended):
'''
Computes hhblits for the sequence associated with prefixExtended as an unambiguous id and included in
self.seqsManager
@param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names. Must be included
in self.seqsManager
@return (profileNameProc, winProfileOutName)
profileNameProc: str
winProfileOutName: str
'''
prefix, chainType, chainId, __= prefixExtended.split("_")
seqStr, fastaInFname= self.seqsManager.getSeq(chainType, chainId)
fNames= self.getFNames( prefixExtended)
aligsName= fNames["hhBlitsAligName"][0]
profileNameRaw, profileNameProc= fNames["hhBlitsProfiles"]
winProfileOutName= fNames["hhBlitsProfilesWindow"][0]
if self.checkAlreayComputed(prefixExtended):
print("hhblits already computed for %s"%prefixExtended)
return aligsName, profileNameRaw, profileNameProc
# run hhblits
print("lauching hhblits over %s"%prefixExtended)
self.launchHhblits( fastaInFname, aligsName, profileNameRaw)
#Process psi-blast
self.processHhblits( seqStr, prefixExtended, profileNameRaw, profileNameProc)
#Compute windows
self.makeWindowedPSSMHhblits( profileNameProc, winProfileOutName)
return aligsName, profileNameProc, winProfileOutName
def launchHhblits(self, fastaInFname, aligsName, profileNameRaw):
'''
Launches hhblits command with fastaInFname as input file, aligsName as the output file that will
contain the aligments and profileNameRaw as the output file that will contain the profile.
@param fastaInFname: str. Path to fasta file where sequence is saved
@param aligsName: str. Path to results file where aligments will be saved
@param profileNameRaw: str. Path to results file where profile will be saved
'''
if os.path.isfile(profileNameRaw) and int(check_output('wc -l {}'.format(profileNameRaw), shell=True).split()[0])> 11:
print("hhblits raw files alredy computed")
return
hhBlitsBinPath= self.hhBlitsBinPath
hhblitsDB= self.hhBlitsDB
psiBlastNThrs= self.psiBlastNThrs if socket.gethostname()!="servet" else 1
hhblitsCMD = self.hhBlitsCMD_template%locals()
hhblitsCMD.replace("_*", "_\\*")
print(hhblitsCMD)
process= Popen( hhblitsCMD, shell=True, stdout=PIPE, stderr=PIPE)
processOut= process.communicate()
#Check for failure
if len(processOut[1])>0 and "Error" in processOut[1]: #No hits found will be dealt at processResults
print("Error computing hhblits. Caught stdin/stderr:\n",processOut[0],processOut[1])
raise FeatureComputerException("hhblits was not able to compute profile")
def processHhblits(self, seq, prefixExtended, profileNameRaw, profileNameProc):
'''
Reads hhblits profile output file and writes another one with tabulated format, headers and
some error checking.
@param: seq: str. Sequence of the chain
@param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names
@param profileNameRaw: str. Path to profiles results
@param profileNameProc: str. Path where formated results will be saved.
'''
try:
hhBlitsData = self.loadHhblits(profileNameRaw)
except IOError:
hhBlitsData= [ " ".join([HHBlitsManager.BAD_SCORE_CONSERVATION for i in range(31)]) for i in range(len(seq))]
prefix, chainType, chainId, __= prefixExtended.split("_")
try:
outFile= open(profileNameProc,"w")
outFile.write("chainId seqIndex structResId resName "+ "hhblits "*31+"\n")
for i, (hhBlitsArrayJoined,letter) in enumerate(zip(hhBlitsData,seq)):
structIndex= self.seqsManager.seqToStructIndex(chainType, chainId, i, asString= True)
if self.filterOutLabels and structIndex[-1].isalpha():
continue
outFile.write("%s %d %s %s "%(chainId, i, structIndex, letter)+ hhBlitsArrayJoined +"\n")
outFile.close()
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%profileNameProc)
tryToRemove(profileNameProc)
raise
def loadHhblits(self, fname):
'''
Loads a hhblits profile file
@param fname: str. Path to hhblits profile file.
@return list of strings. ["row0_hhblits_values","row1_hhblits_values"...]
'''
scores=[]
begin=False
count=0
with open(fname) as f:
for line in f:
if line.startswith("#"):
begin=True
continue
if begin==True:
count+=1
if count==4:
break
for i,line in enumerate(f):
if line.startswith("//"):
break
lineArray= line.split()
nElems= len(lineArray)
if i%3==0:
lineArray= lineArray[2:]
if nElems!=23:
raise ValueError("Bad format in hhblits file %s"%fname)
scores.append(lineArray)
elif i%3==1:
scores[-1]+= lineArray
scores[-1]= " ".join([ elem if elem!="*" else "-1" for elem in scores[-1] ])
return scores
def makeWindowedPSSMHhblits(self, profileNameProc, winProfileOutName):
'''
Computes sliding windows for a given profileNameProc.
@param profileNameProc: str. Path to processed hhblits profile file
@param winProfileOutName: str. Path to windowed results.
'''
try:
WindowHHblits(self.winSize).compute(profileNameProc, winProfileOutName)
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%winProfileOutName)
tryToRemove(winProfileOutName)
raise
def test():
fname="/home/rsanchez/Tesis/rriPredMethod/dependencies/bioinformaticTools/hh-Tools/seqExample.ohhm"
from computeFeatures.seqStep.manageSeqs.seqsManager import SeqsManager
seqManag= SeqsManager("rFname", "lFname", computedFeatsRootDir= ".")
hhblitsObj= HHBlitsManager( seqsManager= seqManag, outPath=".", winSize=11)
hhblitsObj.loadHhblits(fname)
if __name__=="__main__":
test()
|
nilq/baby-python
|
python
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# edge cases - if the element is present at start or last
if not head:
return head
while head.val == val:
head = head.next
if not head:
return head
prev, curr = head, head.next
while curr:
if curr.val == val:
prev.next = curr.next
else:
prev = prev.next
curr = curr.next
return head
if __name__ == "__main__":
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(2)
e = ListNode(5)
a.next = b
b.next = c
c.next = d
d.next = e
head = Solution().removeElements(a, 1)
while head:
print(head.val)
head = head.next
|
nilq/baby-python
|
python
|
# Export individual functions
# Copy
from .scopy import scopy
from .dcopy import dcopy
from .ccopy import ccopy
from .zcopy import zcopy
# Swap
from .sswap import sswap
from .dswap import dswap
from .cswap import cswap
from .zswap import zswap
# Scaling
from .sscal import sscal
from .dscal import dscal
from .cscal import cscal
from .csscal import csscal
from .zscal import zscal
from .zdscal import zdscal
# Scaling plus vector
from .saxpy import saxpy
from .daxpy import daxpy
from .caxpy import caxpy
from .zaxpy import zaxpy
# Absolute values of components
from .scabs1 import scabs1
from .dcabs1 import dcabs1
# Sum of absolute values
from .sasum import sasum
from .dasum import dasum
# Sum of absolute values of components
from .scasum import scasum
from .dzasum import dzasum
# Dot products
from .sdot import sdot
from .dsdot import dsdot
from .ddot import ddot
from .cdotu import cdotu
from .zdotu import zdotu
# Complex dot products
from .cdotc import cdotc
from .zdotc import zdotc
# Dot product plus scalar
from .sdsdot import sdsdot
# Euclidean norm
from .snrm2 import snrm2
from .dnrm2 import dnrm2
from .scnrm2 import scnrm2
from .dznrm2 import dznrm2
|
nilq/baby-python
|
python
|
import torch
import trtorch
precision = 'fp16'
ssd_model = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math=precision)
input_shapes = [1, 3, 300, 300]
model = ssd_model.eval().cuda()
scripted_model = torch.jit.script(model)
compile_settings = {
"input_shapes": [input_shapes],
"op_precision": torch.float16
}
trt_ts_module = trtorch.compile(scripted_model, compile_settings)
torch.jit.save(trt_ts_module, 'ssd.jit')
|
nilq/baby-python
|
python
|
# Main file for the PwnedCheck distribution.
# This file retrieves the password, calls the module check_pwd,
# and check if the password has been breached by checking it with
# the database in the following website
# https://haveibeenpwned.com/Passwords
import sys
import logging
from getpass import getpass
from checkpwd.check_pwd import check_pwd
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
log = logging.getLogger(__name__)
def splash():
"""Splash `pwnedcheck` logo and information."""
print(" _____________________________________________________ ")
print("| ____ _ _ ____ _ |")
print("| / ___| |__ ___ ___| | _| _ \__ ____| | |")
print("| | | | '_ \ / _ \/ __| |/ / |_) \ \ /\ / / _` | |")
print("| | |___| | | | __/ (__| <| __/ \ V V / (_| | |")
print("| \____|_| |_|\___|\___|_|\_\_| \_/\_/ \__,_| |")
print("| |")
print("| Author: Tanjona R. Rabemananjara |")
print("| URL: https://radonirinaunimi.github.io/pwnd-check/ |")
print("|_____________________________________________________|")
def main():
"""Function that fetchs the password given by the user from the command
line using `getpass`. The password is then checked on `HaveIBeenPwned`.
"""
splash()
print("\nEnter your password below.")
pwd = getpass()
try:
# Check the pwd and add the values to some variables
hashed_pwd, nb_match = check_pwd(pwd)
# Print the result
if nb_match:
print(f"The password occurs {nb_match} times (hash: {hashed_pwd})")
else:
print("Your password was not found")
except UnicodeError:
errormsg = sys.exc_info()[1]
log.warning(f"Your password could not be checked: {errormsg}")
|
nilq/baby-python
|
python
|
"""
================================
Symbolic Aggregate approXimation
================================
Binning continuous data into intervals can be seen as an approximation that
reduces noise and captures the trend of a time series. The Symbolic Aggregate
approXimation (SAX) algorithm bins continuous time series into intervals,
transforming independently each time series (a sequence of floats) into a
sequence of symbols, usually letters. This example illustrates the
transformation.
It is implemented as
:class:`pyts.approximation.SymbolicAggregateApproximation`.
"""
# Author: Johann Faouzi <johann.faouzi@gmail.com>
# License: BSD-3-Clause
import numpy as np
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from scipy.stats import norm
from pyts.approximation import SymbolicAggregateApproximation
# Parameters
n_samples, n_timestamps = 100, 24
# Toy dataset
rng = np.random.RandomState(41)
X = rng.randn(n_samples, n_timestamps)
# SAX transformation
n_bins = 3
sax = SymbolicAggregateApproximation(n_bins=n_bins, strategy='normal')
X_sax = sax.fit_transform(X)
# Compute gaussian bins
bins = norm.ppf(np.linspace(0, 1, n_bins + 1)[1:-1])
# Show the results for the first time series
bottom_bool = np.r_[True, X_sax[0, 1:] > X_sax[0, :-1]]
plt.figure(figsize=(6, 4))
plt.plot(X[0], 'o--', label='Original')
for x, y, s, bottom in zip(range(n_timestamps), X[0], X_sax[0], bottom_bool):
va = 'bottom' if bottom else 'top'
plt.text(x, y, s, ha='center', va=va, fontsize=14, color='#ff7f0e')
plt.hlines(bins, 0, n_timestamps, color='g', linestyles='--', linewidth=0.5)
sax_legend = mlines.Line2D([], [], color='#ff7f0e', marker='*',
label='SAX - {0} bins'.format(n_bins))
first_legend = plt.legend(handles=[sax_legend], fontsize=8, loc=(0.76, 0.86))
ax = plt.gca().add_artist(first_legend)
plt.legend(loc=(0.81, 0.93), fontsize=8)
plt.xlabel('Time', fontsize=14)
plt.title('Symbolic Aggregate approXimation', fontsize=16)
plt.show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-10 01:54
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cxp_v1', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Download',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(storage=django.core.files.storage.FileSystemStorage(location=b'/home/ubuntu/Django/sessionfiles/protected'), upload_to='download')),
],
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pytest
from click.testing import CliRunner
from jak.app import main as jak
import jak.crypto_services as cs
@pytest.fixture
def runner():
return CliRunner()
def test_empty(runner):
result = runner.invoke(jak)
assert result.exit_code == 0
assert not result.exception
@pytest.mark.parametrize('version_flag', ['--version', '-v'])
def test_version(runner, version_flag):
result = runner.invoke(jak, [version_flag])
assert not result.exception
assert result.exit_code == 0
assert '(Troubled Toddler)' in result.output.strip()
@pytest.mark.parametrize('cmd, filepath', [
('encrypt', 'filethatdoesnotexist'),
('decrypt', 'filethatdoesnotexist2')])
def test_file_not_found(runner, cmd, filepath):
result = runner.invoke(jak, [cmd, filepath, '-k', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'])
assert 'find the file:' in result.output
def test_encrypt_smoke(runner, tmpdir):
"""This one has proven to be an absolute godsend for finding
weirdness, especially between python versions."""
plaintext_secret = tmpdir.join("secret.txt")
plaintext_secret.write('secret')
runner.invoke(jak,
['encrypt',
plaintext_secret.strpath,
'--key',
'f40ec5d3ef66166720b24b3f8716c2c31ffc6b45295ff72024a45d90e5fddb56'])
assert cs.ENCRYPTED_BY_HEADER in plaintext_secret.read()
def test_decrypt_smoke(runner, tmpdir, monkeypatch):
ciphertext_secret = tmpdir.join("secret.txt")
# This test was leaking backup files
# The cause was the decorator "attach_jwd" which would
# force the filesystem back into the realworld with os.getcwd().
# My attempt at patching os.getcwd had unintended sideeffects so instead
# I patched the helper function to force it's return to be the files location.
def mock_getjwd():
return ciphertext_secret.dirpath().strpath
import jak.helpers as jakh
monkeypatch.setattr(jakh, "get_jak_working_directory", mock_getjwd)
ciphertext_secret.write('''- - - Encrypted by jak - - -
SkFLLTAwMHM0jlOUIaTUeVwbfS459sfDJ1SUW9_3wFFcm2rCxTnLvy1N-Ndb
O7t2Vcol566PnyniPGn9IadqwWFNykZdaycRJG7aL8P4pZnb4gnJcp08OLwR
LiFC7wcITbo6l3Q7Lw==''')
runner.invoke(jak,
['decrypt',
ciphertext_secret.strpath,
'--key',
'f40ec5d3ef66166720b24b3f8716c2c31ffc6b45295ff72024a45d90e5fddb56'])
result = ciphertext_secret.read()
assert cs.ENCRYPTED_BY_HEADER not in result
assert result.strip('\n') == 'attack at dawn'
|
nilq/baby-python
|
python
|
try:
import matplotlib.pyplot as plt
import fuzzycorr.prepro as pp
from pathlib import Path
import numpy as np
import gdal
except:
print('ModuleNotFoundError: Missing fundamental packages (required: pathlib, numpy, gdal).')
cur_dir = Path.cwd()
Path(cur_dir / "rasters").mkdir(exist_ok=True)
raster_meas = pp.PreProCategorization(str(cur_dir / 'rasters') + '/' + 'vali_meas_2013_res5_clipped.tif')
raster_sim = pp.PreProCategorization(str(cur_dir / 'rasters') + '/' + 'vali_hydro_FT_manual_2013_res5_clipped.tif')
n_classes = 12
nb_classes = np.insert(raster_meas.nb_classes(n_classes), 0, -np.inf, axis=0)
nb_classes[-1] = np.inf
raster_meas.categorize_raster(nb_classes, map_out=str(cur_dir / 'rasters') + '/' + 'vali_meas_class_nbreaks.tif', save_ascii=False)
raster_sim.categorize_raster(nb_classes, map_out=str(cur_dir / 'rasters') + '/' + 'vali_hydro_FT_manual_class_nbreaks.tif', save_ascii=False)
|
nilq/baby-python
|
python
|
"""Support for Vallox ventilation units."""
from __future__ import annotations
from dataclasses import dataclass, field
import ipaddress
import logging
from typing import Any, NamedTuple
from uuid import UUID
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.exceptions import ValloxApiException
from vallox_websocket_api.vallox import get_uuid as calculate_uuid
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME, Platform
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType, StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
DEFAULT_FAN_SPEED_AWAY,
DEFAULT_FAN_SPEED_BOOST,
DEFAULT_FAN_SPEED_HOME,
DEFAULT_NAME,
DOMAIN,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
STATE_SCAN_INTERVAL,
STR_TO_VALLOX_PROFILE_SETTABLE,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
PLATFORMS: list[str] = [
Platform.SENSOR,
Platform.FAN,
Platform.BINARY_SENSOR,
]
ATTR_PROFILE = "profile"
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE = vol.Schema(
{
vol.Required(ATTR_PROFILE): vol.All(
cv.string, vol.In(STR_TO_VALLOX_PROFILE_SETTABLE)
)
}
)
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
class ServiceMethodDetails(NamedTuple):
"""Details for SERVICE_TO_METHOD mapping."""
method: str
schema: vol.Schema
SERVICE_SET_PROFILE = "set_profile"
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE: ServiceMethodDetails(
method="async_set_profile",
schema=SERVICE_SCHEMA_SET_PROFILE,
),
SERVICE_SET_PROFILE_FAN_SPEED_HOME: ServiceMethodDetails(
method="async_set_profile_fan_speed_home",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: ServiceMethodDetails(
method="async_set_profile_fan_speed_away",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: ServiceMethodDetails(
method="async_set_profile_fan_speed_boost",
schema=SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
),
}
@dataclass
class ValloxState:
"""Describes the current state of the unit."""
metric_cache: dict[str, Any] = field(default_factory=dict)
profile: VALLOX_PROFILE = VALLOX_PROFILE.NONE
def get_metric(self, metric_key: str) -> StateType:
"""Return cached state value."""
if (value := self.metric_cache.get(metric_key)) is None:
return None
if not isinstance(value, (str, int, float)):
return None
return value
def get_uuid(self) -> UUID | None:
"""Return cached UUID value."""
uuid = calculate_uuid(self.metric_cache)
if not isinstance(uuid, UUID):
raise ValueError
return uuid
class ValloxDataUpdateCoordinator(DataUpdateCoordinator):
"""The DataUpdateCoordinator for Vallox."""
data: ValloxState
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the integration from configuration.yaml (DEPRECATED)."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the client and boot the platforms."""
host = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
client = Vallox(host)
async def async_update_data() -> ValloxState:
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
metric_cache = await client.fetch_metrics()
profile = await client.get_profile()
except (OSError, ValloxApiException) as err:
raise UpdateFailed("Error during state cache update") from err
return ValloxState(metric_cache, profile)
coordinator = ValloxDataUpdateCoordinator(
hass,
_LOGGER,
name=f"{name} DataUpdateCoordinator",
update_interval=STATE_SCAN_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_config_entry_first_refresh()
service_handler = ValloxServiceHandler(client, coordinator)
for vallox_service, service_details in SERVICE_TO_METHOD.items():
hass.services.async_register(
DOMAIN,
vallox_service,
service_handler.async_handle,
schema=service_details.schema,
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"client": client,
"coordinator": coordinator,
"name": name,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
if hass.data[DOMAIN]:
return unload_ok
for service in SERVICE_TO_METHOD:
hass.services.async_remove(DOMAIN, service)
return unload_ok
class ValloxServiceHandler:
"""Services implementation."""
def __init__(
self, client: Vallox, coordinator: DataUpdateCoordinator[ValloxState]
) -> None:
"""Initialize the proxy."""
self._client = client
self._coordinator = coordinator
async def async_set_profile(self, profile: str = "Home") -> bool:
"""Set the ventilation profile."""
_LOGGER.debug("Setting ventilation profile to: %s", profile)
_LOGGER.warning(
"Attention: The service 'vallox.set_profile' is superseded by the "
"'fan.set_preset_mode' service. It will be removed in the future, please migrate to "
"'fan.set_preset_mode' to prevent breakage"
)
try:
await self._client.set_profile(STR_TO_VALLOX_PROFILE_SETTABLE[profile])
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting ventilation profile: %s", err)
return False
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Away profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, call: ServiceCall) -> None:
"""Dispatch a service call."""
service_details = SERVICE_TO_METHOD.get(call.service)
params = call.data.copy()
if service_details is None:
return
if not hasattr(self, service_details.method):
_LOGGER.error("Service not implemented: %s", service_details.method)
return
result = await getattr(self, service_details.method)(**params)
# This state change affects other entities like sensors. Force an immediate update that can
# be observed by all parties involved.
if result:
await self._coordinator.async_request_refresh()
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from math import isnan
from typing import Any, List, Optional, TYPE_CHECKING, Union
from iceberg.exceptions import ValidationException
from .expression import (Expression,
Operation)
from .literals import (BaseLiteral,
Literals)
from .term import BoundTerm, UnboundTerm
from ..types import TypeID
if TYPE_CHECKING:
from iceberg.api import StructLike
class Predicate(Expression):
def __init__(self, op: Operation, term: Union[BoundTerm, UnboundTerm]):
if term is None:
raise ValueError("Term cannot be None")
self.op: Operation = op
self.term: Union[BoundTerm, UnboundTerm] = term
@property
def ref(self):
return self.term.ref
@property
def lit(self):
raise NotImplementedError("Not Implemented for base class")
def __eq__(self, other):
if id(self) == id(other):
return True
elif other is None or not isinstance(other, Predicate):
return False
return self.op == other.op and self.ref == other.ref and self.lit == other.lit
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Predicate({},{},{})".format(self.op, self.ref, self.lit)
def __str__(self):
if self.op == Operation.IS_NULL:
return "is_null({})".format(self.ref)
elif self.op == Operation.NOT_NULL:
return "not_null({})".format(self.ref)
elif self.op == Operation.LT:
return "less_than({})".format(self.ref)
elif self.op == Operation.LT_EQ:
return "less_than_equal({})".format(self.ref)
elif self.op == Operation.GT:
return "greater_than({})".format(self.ref)
elif self.op == Operation.GT_EQ:
return "greater_than_equal({})".format(self.ref)
elif self.op == Operation.EQ:
return "equal({})".format(self.ref)
elif self.op == Operation.NOT_EQ:
return "not_equal({})".format(self.ref)
else:
return "invalid predicate: operation = {}".format(self.op)
class BoundPredicate(Predicate):
def __init__(self, op: Operation, term: BoundTerm, lit: BaseLiteral = None, literals: List[BaseLiteral] = None,
is_unary_predicate: bool = False, is_literal_predicate: bool = False,
is_set_predicate: bool = False):
self.is_unary_predicate = is_unary_predicate
self.is_literal_predicate = is_literal_predicate
self.is_set_predicate = is_set_predicate
super(BoundPredicate, self).__init__(op, term)
ValidationException.check(sum([is_unary_predicate, is_literal_predicate, is_set_predicate]) == 1,
"Only a single predicate type may be set: %s=%s, %s=%s, %s=%s",
("is_unary_predicate", is_unary_predicate,
"is_literal_predicate", is_literal_predicate,
"is_set_predicate", is_set_predicate))
self._literals: Optional[List[BaseLiteral]] = None
if self.is_unary_predicate:
ValidationException.check(lit is None, "Unary Predicates may not have a literal", ())
elif self.is_literal_predicate:
ValidationException.check(lit is not None, "Literal Predicates must have a literal set", ())
self._literals = [lit] # type: ignore
elif self.is_set_predicate:
ValidationException.check(literals is not None, "Set Predicates must have literals set", ())
self._literals = literals
else:
raise ValueError(f"Unable to instantiate {op} -> (lit={lit}, literal={literals}")
@property
def lit(self) -> Optional[BaseLiteral]:
if self._literals is None or len(self._literals) == 0:
return None
return self._literals[0]
def eval(self, struct: StructLike) -> bool:
ValidationException.check(isinstance(self.term, BoundTerm), "Term must be bound to eval: %s", (self.term))
return self.test(self.term.eval(struct)) # type: ignore
def test(self, struct: StructLike = None, value: Any = None) -> bool:
ValidationException.check(struct is None or value is None, "Either struct or value must be none", ())
if struct is not None:
ValidationException.check(isinstance(self.term, BoundTerm), "Term must be bound to eval: %s", (self.term))
return self.test(value=self.term.eval(struct)) # type: ignore
else:
if self.is_unary_predicate:
return self.test_unary_predicate(value)
elif self.is_literal_predicate:
return self.test_literal_predicate(value)
else:
return self.test_set_predicate(value)
def test_unary_predicate(self, value: Any) -> bool:
if self.op == Operation.IS_NULL:
return value is None
elif self.op == Operation.NOT_NULL:
return value is not None
elif self.op == Operation.IS_NAN:
return isnan(value)
elif self.op == Operation.NOT_NAN:
return not isnan(value)
else:
raise ValueError(f"{self.op} is not a valid unary predicate")
def test_literal_predicate(self, value: Any) -> bool:
if self.lit is None:
raise ValidationException("Literal must not be none", ())
if self.op == Operation.LT:
return value < self.lit.value
elif self.op == Operation.LT_EQ:
return value <= self.lit.value
elif self.op == Operation.GT:
return value > self.lit.value
elif self.op == Operation.GT_EQ:
return value >= self.lit.value
elif self.op == Operation.EQ:
return value == self.lit.value
elif self.op == Operation.NOT_EQ:
return value != self.lit.value
else:
raise ValueError(f"{self.op} is not a valid literal predicate")
def test_set_predicate(self, value: Any) -> bool:
if self._literals is None:
raise ValidationException("Literals must not be none", ())
if self.op == Operation.IN:
return value in self._literals
elif self.op == Operation.NOT_IN:
return value not in self._literals
else:
raise ValueError(f"{self.op} is not a valid set predicate")
class UnboundPredicate(Predicate):
def __init__(self, op, term, value=None, lit=None, values=None, literals=None):
self._literals = None
num_set_args = sum([1 for x in [value, lit, values, literals] if x is not None])
if num_set_args > 1:
raise ValueError(f"Only one of value={value}, lit={lit}, values={values}, literals={literals} may be set")
super(UnboundPredicate, self).__init__(op, term)
if isinstance(value, BaseLiteral):
lit = value
value = None
if value is not None:
self._literals = [Literals.from_(value)]
elif lit is not None:
self._literals = [lit]
elif values is not None:
self._literals = map(Literals.from_, values)
elif literals is not None:
self._literals = literals
@property
def literals(self):
return self._literals
@property
def lit(self):
if self.op in [Operation.IN, Operation.NOT_IN]:
raise ValueError(f"{self.op} predicate cannot return a literal")
return None if self.literals is None else self.literals[0]
def negate(self):
return UnboundPredicate(self.op.negate(), self.term, literals=self.literals)
def bind(self, struct, case_sensitive=True):
bound = self.term.bind(struct, case_sensitive=case_sensitive)
if self.literals is None:
return self.bind_unary_operation(bound)
elif self.op in [Operation.IN, Operation.NOT_IN]:
return self.bind_in_operation(bound)
return self.bind_literal_operation(bound)
def bind_unary_operation(self, bound_term: BoundTerm) -> BoundPredicate:
from .expressions import Expressions
if self.op == Operation.IS_NULL:
if bound_term.ref.field.is_required:
return Expressions.always_false()
return BoundPredicate(Operation.IS_NULL, bound_term, is_unary_predicate=True)
elif self.op == Operation.NOT_NULL:
if bound_term.ref.field.is_required:
return Expressions.always_true()
return BoundPredicate(Operation.NOT_NULL, bound_term, is_unary_predicate=True)
elif self.op in [Operation.IS_NAN, Operation.NOT_NAN]:
if not self.floating_type(bound_term.ref.type.type_id):
raise ValidationException(f"{self.op} cannot be used with a non-floating column", ())
return BoundPredicate(self.op, bound_term, is_unary_predicate=True)
raise ValidationException(f"Operation must be in [IS_NULL, NOT_NULL, IS_NAN, NOT_NAN] was:{self.op}", ())
def bind_in_operation(self, bound_term):
from .expressions import Expressions
def convert_literal(lit):
converted = lit.to(bound_term)
ValidationException.check(converted is not None,
"Invalid Value for conversion to type %s: %s (%s)",
(bound_term.type, lit, lit.__class__.__name__))
return converted
converted_literals = filter(lambda x: x != Literals.above_max() and x != Literals.below_min(),
[convert_literal(lit) for lit in self.literals])
if len(converted_literals) == 0:
return Expressions.always_true() if Operation.NOT_IN else Expressions.always_false()
literal_set = set(converted_literals)
if len(literal_set) == 1:
if self.op == Operation.IN:
return BoundPredicate(Operation.EQ, bound_term, literal_set[0])
elif self.op == Operation.NOT_IN:
return BoundPredicate(Operation.NOT_EQ, bound_term, literal_set[0])
else:
raise ValidationException("Operation must be in or not in", ())
return BoundPredicate(self.op, bound_term, literals=literal_set, is_set_predicate=True)
def bind_literal_operation(self, bound_term):
from .expressions import Expressions
lit = self.lit.to(bound_term.type)
ValidationException.check(lit is not None,
"Invalid Value for conversion to type %s: %s (%s)",
(bound_term.type, self.lit, self.lit.__class__.__name__))
if lit == Literals.above_max():
if self.op in [Operation.LT, Operation.LT_EQ, Operation.NOT_EQ]:
return Expressions.always_true()
elif self.op in [Operation.GT, Operation.GT_EQ, Operation.EQ]:
return Expressions.always_false()
elif lit == Literals.below_min():
if self.op in [Operation.LT, Operation.LT_EQ, Operation.NOT_EQ]:
return Expressions.always_false()
elif self.op in [Operation.GT, Operation.GT_EQ, Operation.EQ]:
return Expressions.always_true()
return BoundPredicate(self.op, bound_term, lit=lit, is_literal_predicate=True)
@staticmethod
def floating_type(type_id: TypeID) -> bool:
return type_id in [TypeID.FLOAT, TypeID.DOUBLE]
|
nilq/baby-python
|
python
|
import numpy as np # type: ignore
import pandas as pd # type: ignore
# imported ML models from scikit-learn
from sklearn.model_selection import (ShuffleSplit, StratifiedShuffleSplit, # type: ignore
TimeSeriesSplit, cross_val_score) # type: ignore
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # type: ignore
from sklearn.ensemble import (BaggingRegressor, ExtraTreesRegressor, # type: ignore
RandomForestClassifier, ExtraTreesClassifier, # type: ignore
AdaBoostRegressor, AdaBoostClassifier) # type: ignore
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeCV # type: ignore
from sklearn.svm import LinearSVC, SVR, LinearSVR # type: ignore
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier # type: ignore
# imported specialized tree models from scikit-garden
# from skgarden import RandomForestQuantileRegressor
# helper functions
from ..utils import print_static_rmse, print_dynamic_rmse
import pdb
def run_ensemble_model(X, Y, modeltype='Regression', scoring='', verbose=0):
"""
Quickly builds and runs multiple models for a clean data set(only numerics).
"""
seed = 99
if len(X) <= 100000 or X.shape[1] < 50:
NUMS = 50
FOLDS = 3
else:
NUMS = 20
FOLDS = 5
## create Voting models
estimators = []
if modeltype == 'Regression':
if scoring == '':
scoring = 'neg_mean_squared_error'
scv = ShuffleSplit(n_splits=FOLDS, random_state=seed)
model5 = LinearRegression()
results1 = cross_val_score(model5, X, Y, cv=scv, scoring=scoring)
estimators.append(('Linear Model', model5, np.sqrt(abs(results1.mean()))))
model6 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(
min_samples_leaf=2, max_depth=1, random_state=seed),
n_estimators=NUMS, random_state=seed)
results2 = cross_val_score(model6, X, Y, cv=scv, scoring=scoring)
estimators.append(('Boosting', model6, np.sqrt(abs(results2.mean()))))
model7 = RidgeCV(alphas=np.logspace(-10, -1, 50), cv=scv)
results3 = cross_val_score(model7, X, Y, cv=scv, scoring=scoring)
estimators.append(('Linear Regularization', model7, np.sqrt(abs(results3.mean()))))
## Create an ensemble model ####
# estimators_list = [(tuples[0], tuples[1]) for tuples in estimators] # unused
ensemble = BaggingRegressor(DecisionTreeRegressor(random_state=seed),
n_estimators=NUMS, random_state=seed)
results4 = cross_val_score(ensemble, X, Y, cv=scv, scoring=scoring)
estimators.append(('Bagging', ensemble, np.sqrt(abs(results4.mean()))))
if verbose == 1:
print('\nLinear Model = %0.4f \nBoosting = %0.4f\nRegularization = %0.4f \nBagging = %0.4f' %(
np.sqrt(abs(results1.mean()))/Y.std(), np.sqrt(abs(results2.mean()))/Y.std(),
np.sqrt(abs(results3.mean()))/Y.std(), np.sqrt(abs(results4.mean()))/Y.std()))
besttype = sorted(estimators, key=lambda x: x[2], reverse=False)[0][0]
bestmodel = sorted(estimators, key=lambda x: x[2], reverse=False)[0][1]
bestscore = sorted(estimators, key=lambda x: x[2], reverse=False)[0][2]/Y.std()
if verbose == 1:
print(' Best Model = %s with %0.2f Normalized RMSE score\n' %(besttype,bestscore))
elif modeltype == 'TimeSeries' or modeltype =='Time Series' or modeltype == 'Time_Series':
#### This section is for Time Series Models only ####
if scoring == '':
scoring = 'neg_mean_squared_error'
tscv = TimeSeriesSplit(n_splits=FOLDS)
scoring = 'neg_mean_squared_error'
model5 = SVR(C=0.1, kernel='rbf', degree=2)
results1 = cross_val_score(model5, X, Y, cv=tscv, scoring=scoring)
estimators.append(('SVR', model5, np.sqrt(abs(results1.mean()))))
model6 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(
min_samples_leaf=2, max_depth=1, random_state=seed),
n_estimators=NUMS, random_state=seed)
results2 = cross_val_score(model6, X, Y, cv=tscv, scoring=scoring)
estimators.append(('Extra Trees', model6,np.sqrt(abs(results2.mean()))))
model7 = LinearSVR(random_state=seed)
results3 = cross_val_score(model7, X, Y, cv=tscv, scoring=scoring)
estimators.append(('LinearSVR', model7, np.sqrt(abs(results3.mean()))))
## Create an ensemble model ####
# estimators_list = [(tuples[0], tuples[1]) for tuples in estimators] # unused
ensemble = BaggingRegressor(DecisionTreeRegressor(random_state=seed),
n_estimators=NUMS, random_state=seed)
results4 = cross_val_score(ensemble, X, Y, cv=tscv, scoring=scoring)
estimators.append(('Bagging', ensemble, np.sqrt(abs(results4.mean()))))
print('Running multiple models...')
if verbose == 1:
print(' Instance Based = %0.4f \n Boosting = %0.4f\n Linear Model = %0.4f \n Bagging = %0.4f' %(
np.sqrt(abs(results1.mean()))/Y.std(), np.sqrt(abs(results2.mean()))/Y.std(),
np.sqrt(abs(results3.mean()))/Y.std(), np.sqrt(abs(results4.mean()))/Y.std()))
besttype = sorted(estimators, key=lambda x: x[2], reverse=False)[0][0]
bestmodel = sorted(estimators, key=lambda x: x[2], reverse=False)[0][1]
bestscore = sorted(estimators, key=lambda x: x[2], reverse=False)[0][2]/Y.std()
if verbose == 1:
print('Best Model = %s with %0.2f Normalized RMSE score\n' % (besttype, bestscore))
print('Model Results:')
else:
if scoring == '':
scoring = 'f1'
scv = StratifiedShuffleSplit(n_splits=FOLDS, random_state=seed)
model5 = LogisticRegression(random_state=seed)
results1 = cross_val_score(model5, X, Y, cv=scv, scoring=scoring)
estimators.append(('Logistic Regression', model5, abs(results1.mean())))
model6 = LinearDiscriminantAnalysis()
results2 = cross_val_score(model6, X, Y, cv=scv, scoring=scoring)
estimators.append(('Linear Discriminant', model6, abs(results2.mean())))
model7 = ExtraTreesClassifier(n_estimators=NUMS, min_samples_leaf=2, random_state=seed)
results3 = cross_val_score(model7, X, Y, cv=scv, scoring=scoring)
estimators.append(('Bagging', model7, abs(results3.mean())))
## Create an ensemble model ####
# estimators_list = [(tuples[0], tuples[1]) for tuples in estimators] # unused
ensemble = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(
random_state=seed, max_depth=1, min_samples_leaf=2),
n_estimators=NUMS, random_state=seed)
results4 = cross_val_score(ensemble, X, Y, cv=scv, scoring=scoring)
estimators.append(('Boosting', ensemble, abs(results4.mean())))
if verbose == 1:
print('\nLogistic Regression = %0.4f \nLinear Discriminant = %0.4f \nBagging = %0.4f \nBoosting = %0.4f' %
(abs(results1.mean()), abs(results2.mean()), abs(results3.mean()), abs(results4.mean())))
besttype = sorted(estimators, key=lambda x: x[2], reverse=True)[0][0]
bestmodel = sorted(estimators, key=lambda x: x[2], reverse=True)[0][1]
bestscore = sorted(estimators, key=lambda x: x[2], reverse=True)[0][2]
if verbose == 1:
print(' Best Model = %s with %0.2f %s score\n' % (besttype, bestscore, scoring))
return bestmodel, bestscore, besttype
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
__author__ = 'Florian Hase'
#=======================================================================
from DatabaseHandler.PickleWriters.db_writer import DB_Writer
|
nilq/baby-python
|
python
|
#
# @lc app=leetcode id=383 lang=python
#
# [383] Ransom Note
#
# https://leetcode.com/problems/ransom-note/description/
#
# algorithms
# Easy (49.29%)
# Total Accepted: 107.4K
# Total Submissions: 216.8K
# Testcase Example: '"a"\n"b"'
#
#
# Given an arbitrary ransom note string and another string containing letters
# from all the magazines, write a function that will return true if the ransom
# note can be constructed from the magazines ; otherwise, it will return
# false.
#
#
# Each letter in the magazine string can only be used once in your ransom
# note.
#
#
# Note:
# You may assume that both strings contain only lowercase letters.
#
#
#
# canConstruct("a", "b") -> false
# canConstruct("aa", "ab") -> false
# canConstruct("aa", "aab") -> true
#
#
#
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
magazine = list(magazine)
for i in ransomNote:
if i in magazine:
magazine.remove(i)
else:
return False
return True
|
nilq/baby-python
|
python
|
"""
Guidelines from whitehouse.gov/openingamerica/
SYMPTOMS:
- Downward Trajectory of Flu-like illnesses
AND
- Downward Trajectory of Covid symptoms 14 day period
CASES:
- Downward Trajectory of documented cases within 14 day period
OR
- Downward Trajectory of positive tests within 14 days
(flat or increasing volume of tests)
HOSPITALS:
- Treat all patients WITHOUT crisis care
- Robust testing program in place including antibody testing
Data is collected daily at 9PM from John Hopkins University
- https://github.com/CSSEGISandData/COVID-19
- Data is assumed to be accurate. Confirmed cases include presumptive
positive cases
- A positive test counts as an active case
- Only data from the mainland 48 states, Alaska, Hawaii, and D.C is parsed and
calculated. Territories are not included in this calculation.
"""
import requests, time, io, pandas, numpy, json, smtplib, ssl, csv, schedule
from datetime import datetime, date, timedelta
from ftplib import FTP
import firebase_admin
from firebase_admin import credentials, firestore
from state_code import state_codes
from hospital_capacity import hospital_capacity
class DataCollect():
"""
Main data collection class.
Contains methods used to collect and aggregate data.
"""
def __init__(self):
self.data_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/{}.csv"
self.set_data()
print("Script is now running. Waiting to collect and upload data...")
self.upload_data_or_email()
schedule.every().day.at("00:01").do(self.set_data)
schedule.every().day.at("00:02").do(self.upload_data_or_email)
self.run_automated_tasks()
def set_data(self):
"""
Sets variables (to be run every night at midnight)
"""
self.data = self.get_data()
self.state_cases = self.get_cases_trend()
self.state_hospitals = self.get_hospital_capacity()
self.state_tests = self.get_test_trend()
def get_data(self):
"""
Requests data from John Hopkins University database from the most
recent day it can find, and then also from 14 days prior and every day
inbetween (in order to calculate general trajectory)
"""
today_date = date.today()
data = None
while True:
str__today_date = today_date.strftime("%m-%d-%Y")
print("Attempting to download data from %s..." % str__today_date)
request = requests.get(self.data_url.format(str__today_date))
#If we have data for today, use that
if request != None and request.status_code == 200:
data = pandas.read_csv(io.StringIO(request.content.decode('utf-8')))
data['date'] = str__today_date
self.most_recent_date = str__today_date
prior_date = today_date - timedelta(days=1)
#Get data from every day between 14 days ago and today
while prior_date != today_date - timedelta(days=15):
str__prior_date = prior_date.strftime("%m-%d-%Y")
prior_request = requests.get(self.data_url.format(str__prior_date))
print("Attempting to download data from %s..." % str__prior_date)
if prior_request != None and request.status_code == 200:
prior_data = pandas.read_csv(io.StringIO(prior_request.content.decode('utf-8')))
prior_data['date'] = str__prior_date
data = pandas.concat([data, prior_data], ignore_index=True)
else:
print("Couldn't find data for %s, skipping..." % str__prior_date)
prior_date = prior_date - timedelta(days=1)
break
else:
print("Couldn't find data for %s, attempting previous day..." % str__today_date)
today_date = today_date - timedelta(days=1)
#Convert all dates to datetime
data['date'] = pandas.to_datetime(data['date'])
#Convert dataframe to all lowercase
data.columns = data.columns.str.lower()
data = data.applymap(lambda s:s.lower() if type(s) == str else s)
#Reverse dataframe (so the latest day is at the top)
data = data.iloc[::-1]
print("Data retrieved!")
return data
def get_cases_trend(self):
"""
Calculates which states meet the guideline:
"Downward Trajectory of documented cases within 14 day period"
Grabs active cases for each state over the 14 day period and calculates
slope of those points.
If slope is negative, then it meets guideline.
If the slope if positive, then it does not meet guideline.
"""
state_cases = {}
for state in state_codes.keys():
state_data = self.data.loc[self.data['province_state'] == state]
df = pandas.DataFrame(state_data, columns=['active'])
df = df.reset_index(drop=True)
state_cases[state] = self.is_downward_trend(df, 'active')
print("States that meet active case guidelines: \n{}".format(json.dumps(state_cases, indent=2)))
return state_cases
def get_hospital_capacity(self):
"""
Calculates which states meet the guideline:
"Treat all patients WITHOUT crisis care"
Grabs the active cases for each state and calculates
if that number is <= the total hospital capacity of the state.
If the number is <= the capacity, then it meets the guideline.
If the number is > the capacity, then it does not meet the guideline.
"""
state_hospitals = {}
for state in state_codes.keys():
state_data = self.data.loc[self.data['province_state'] == state]
current_active = state_data.iloc[-1]
state_code = state_codes[state]
state_hospitals[state] = bool(current_active['active'] <= hospital_capacity[state_code])
print("States that meet hospital guidelines: \n{}".format(json.dumps(state_hospitals, indent=2)))
return state_hospitals
def get_test_trend(self):
"""
Calculates which states meet the guideline:
"Downward Trajectory of positive tests within 14 days
(flat or increasing volume of tests)"
Grabs the number of people tested and active cases per state and
calculates the slope of both lines.
If (active cases <= 0 && tests >= 0), then it meets the guideline.
If the above expression is not true, then it does not meet the guideline.
"""
state_tests = {}
for state in state_codes.keys():
state_data = self.data.loc[self.data['province_state'] == state]
df = pandas.DataFrame(state_data, columns=['people_tested', 'active'])
df = df.reset_index(drop=True)
state_tests[state] = (self.is_downward_trend(df, 'active') & self.is_upward_trend(df, 'people_tested'))
return state_tests
def get_case_info(self, state):
"""
Returns information about cases over the 14 day period for a given
state. That info is:
- Net change in new cases
- Cases at beginning of 14 days
- Cases at end of 14 days
- Number of people tested
"""
case_info = {}
state_data = self.data.loc[self.data['province_state'] == state.lower()]
df = pandas.DataFrame(state_data, columns=['active', 'confirmed', 'people_tested'])
df = df.reset_index(drop=True)
case_info["beginning"] = df.iloc[0]['active']
case_info["end"] = df.iloc[-1]['active']
case_info["net"] = df.iloc[-1]['active'] - df.iloc[0]['active']
case_info["total"] = df.iloc[-1]['confirmed']
case_info["total_tests"] = df.iloc[-1]['people_tested']
return case_info
def get_slope(self, data, column):
"""
Calculates the slope of a graph given a list of data
"""
slope_data = data.apply(lambda x: numpy.polyfit(data.index, x, 1)[0])
return slope_data[column]
def is_downward_trend(self, data, column):
"""
Determines if the trend is downward (slope is negative)
"""
slope = self.get_slope(data, column)
return True if slope <= 0 else False
def is_upward_trend(self, data, column):
"""
Determines if the trend is upward (slope is positive)
"""
slope = self.get_slope(data, column)
return True if slope >= 0 else False
def compile_data(self):
"""
Compiles all data into one coherent json file to be parsed by the
frontend
"""
data = { "info": {} }
for state in state_codes.keys():
case_info = self.get_case_info(state)
data["info"][state] = {}
data["info"][state]["state_code"] = state_codes[state]
data["info"][state]["total_hospital_capacity"] = hospital_capacity[state_codes[state]]
data["info"][state]["downward_cases"] = self.state_cases[state]
data["info"][state]["enough_hospital_capacity"] = self.state_hospitals[state]
data["info"][state]["beginning_cases"] = case_info["beginning"]
data["info"][state]["end_cases"] = case_info["end"]
data["info"][state]["net_case_change"] = case_info["net"]
data["info"][state]["total_cases"] = case_info["total"]
data["info"][state]["enough_tests"] = self.state_tests[state]
data["info"][state]["total_tests"] = case_info["total_tests"]
data["info"][state]["should_open"] = (self.state_cases[state] & self.state_hospitals[state] & self.state_tests[state])
data["info"][state]["most_recent_date"] = self.most_recent_date
print("Successfully parsed data into json file. Result:\n")
print(json.dumps(data, indent=2))
#Upload file to firebase
creds = credentials.Certificate('credentials.json')
firebase_admin.initialize_app(creds, { 'databaseURL': 'https://should-my-state-open.firebaseio.com' })
try:
database = firestore.client()
collection = database.collection('data')
data['createdAt'] = datetime.now()
collection.document().create(data)
except Exception as e:
print(f"An exception Occurred:\n {e}")
self.exception_message = e
return False
return True
def send_error_email(self):
"""
Sends an error email to johnnyleek2001@gmail.com using provided
credentials if something goes wrong
"""
email_user = ""
email_password = ""
with open('email_credentials', 'r') as email_credentials:
csv_reader = csv.reader(email_credentials, delimiter=',')
for row in csv_reader:
email_user = row[0]
email_password = row[1]
message = """\
Subject: WEB ERROR shouldmystateopen.com
WEB ERROR OCCURRED @ https://shouldmystateopen.com (FTP UPLOAD)
Date: {date} | Time: {time}
Exception message as follows:
{error}
""".format(date=date.today().strftime("%m-%d-%Y"),
time=datetime.now().strftime("%H:%M:%S"),
error=self.exception_message if self.exception_message else "No message provided."
)
ssl._create_default_https_context = ssl._create_unverified_context
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as email_server:
try:
email_server.login(email_user, email_password)
email_server.sendmail(email_user, "johnnyleek2001@gmail.com", message)
self.exception_message = None
except Exception as e:
print("Couldn't send email...\n{}".format(e))
finally:
email_server.quit()
def upload_data_or_email(self):
"""
Attempts to upload FTP data, else send email error
"""
if not self.compile_data():
self.send_error_email()
def run_automated_tasks(self):
"""
Runs scheduled tasks
"""
while True:
schedule.run_pending()
time.sleep(1)
collect = DataCollect()
|
nilq/baby-python
|
python
|
from pymodm import connect, MongoModel, fields
from pymodm.base.fields import MongoBaseField
import pymongo
from datetime import datetime as dt
db_server = "mongodb+srv://AtlasUser:8dNHh2kXNijBjNuQ@cluster0.a532e"
db_server += ".mongodb.net/ECGServer?retryWrites=true&w=majority"
mongodb_server = connect(db_server)
class Patient(MongoModel):
# Medical Record Number
# Patient Name
# ECG Images as b64 string
# Heart Rate Data
# Datetime timestamps as strftime strings
# Medical Images as b64 string
MRN = fields.IntegerField(primary_key=True)
patient_name = fields.CharField()
ECG_trace = fields.ListField(fields.CharField())
heart_rate = fields.ListField(fields.IntegerField())
receipt_timestamps = fields.ListField(fields.CharField())
medical_image = fields.ListField(fields.CharField())
class PatientTest(MongoModel):
MRN = fields.IntegerField(primary_key=True)
patient_name = fields.CharField()
ECG_trace = fields.ListField(fields.CharField())
heart_rate = fields.ListField(fields.IntegerField())
receipt_timestamps = fields.ListField(fields.CharField())
medical_image = fields.ListField(fields.CharField())
def get_database():
"""Simply returns the mongodb_server object
Returns:
mongodb_server object
"""
return mongodb_server
def clean_database():
"""Deletes all contents of the Patient database
"""
Patient.objects.raw({}).delete()
# from PIL import Image
# # Testing Patient class & Database Connection
# import image_toolbox as tb
# x = PatientTest()
# x.MRN = 1
# x.patient_name = "Anuj Som"
# x.ECG_trace.append(tb.file_to_b64("images/test_image.png"))
# x.heart_rate.append(60)
# x.receipt_timestamps.append(dt.now().strftime("%Y-%m-%d %H:%M:%S"))
# x.save()
|
nilq/baby-python
|
python
|
from django.urls import path
from account.views import (
index, profile,
LOGIN, LOGOUT, REGISTER,
activate, change_password, update_profile, change_profile_pic
)
from account.api import (
check_username_existing, get_users
)
app_name = 'account'
urlpatterns = [
# API URLs
path('api/check_username/', check_username_existing, name='username_existing'),
path('api/get_users/', get_users, name='get_users'),
# View URLs
path('', index, name='home'),
path('profile/<int:pk>/', profile, name='profile'),
path('login/', LOGIN, name='login'),
path('logout/', LOGOUT, name='logout'),
path('register/', REGISTER, name='register'),
path('update-password/', change_password, name='update-password'),
path('update-profile/', update_profile, name='update-profile'),
path('update-profile-pic', change_profile_pic, name='update-profile-pic'),
# Functional URLs
path('activate/<uidb64>/<token>', activate, name='activate'),
]
|
nilq/baby-python
|
python
|
import msgpack_numpy
import os
import torch
from collections import defaultdict
from typing import List
import lmdb
import magnum as mn
import numpy as np
from torch.utils.data import Dataset
from tqdm import tqdm
import habitat
from habitat import logger
from habitat.datasets.utils import VocabDict
from habitat.tasks.pickplace.pickplace import RearrangementEpisode
class ObservationsDict(dict):
def pin_memory(self):
for k, v in self.items():
self[k] = v.pin_memory()
return self
def collate_fn(batch):
"""Each sample in batch: (
obs,
prev_actions,
oracle_actions,
inflec_weight,
)
"""
def _pad_helper(t, max_len, fill_val=0):
pad_amount = max_len - t.size(0)
if pad_amount == 0:
return t
pad = torch.full_like(t[0:1], fill_val).expand(pad_amount, *t.size()[1:])
return torch.cat([t, pad], dim=0)
transposed = list(zip(*batch))
observations_batch = list(transposed[1])
next_actions_batch = list(transposed[2])
prev_actions_batch = list(transposed[3])
weights_batch = list(transposed[4])
B = len(prev_actions_batch)
new_observations_batch = defaultdict(list)
for sensor in observations_batch[0]:
for bid in range(B):
new_observations_batch[sensor].append(observations_batch[bid][sensor])
observations_batch = new_observations_batch
max_traj_len = max(ele.size(0) for ele in prev_actions_batch)
for bid in range(B):
for sensor in observations_batch:
observations_batch[sensor][bid] = _pad_helper(
observations_batch[sensor][bid], max_traj_len, fill_val=1.0
)
next_actions_batch[bid] = _pad_helper(next_actions_batch[bid], max_traj_len)
prev_actions_batch[bid] = _pad_helper(prev_actions_batch[bid], max_traj_len)
weights_batch[bid] = _pad_helper(weights_batch[bid], max_traj_len)
for sensor in observations_batch:
observations_batch[sensor] = torch.stack(observations_batch[sensor], dim=1)
next_actions_batch = torch.stack(next_actions_batch, dim=1)
prev_actions_batch = torch.stack(prev_actions_batch, dim=1)
weights_batch = torch.stack(weights_batch, dim=1)
not_done_masks = torch.ones_like(next_actions_batch, dtype=torch.float)
not_done_masks[0] = 0
observations_batch = ObservationsDict(observations_batch)
return (
observations_batch,
prev_actions_batch,
not_done_masks,
next_actions_batch,
weights_batch,
)
class PickPlaceDataset(Dataset):
"""Pytorch dataset for object rearrangement task for each episode"""
def __init__(self, config, content_scenes=["*"], mode="train", use_iw=False, inflection_weight_coef=1.0):
"""
Args:
env (habitat.Env): Habitat environment
config: Config
mode: 'train'/'val'
"""
scene_split_name = "train"
if content_scenes[0] != "*":
scene_split_name = "_".join(content_scenes)
self.config = config.TASK_CONFIG
self.dataset_path = config.DATASET_PATH.format(split=mode, scene_split=scene_split_name)
self.config.defrost()
self.config.DATASET.CONTENT_SCENES = content_scenes
self.config.freeze()
self.resolution = [self.config.SIMULATOR.RGB_SENSOR.WIDTH, self.config.SIMULATOR.RGB_SENSOR.HEIGHT]
self.possible_actions = config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS
self.total_actions = 0
self.inflections = 0
self.inflection_weight_coef = inflection_weight_coef
if use_iw:
self.inflec_weight = torch.tensor([1.0, inflection_weight_coef])
else:
self.inflec_weight = torch.tensor([1.0, 1.0])
if not self.cache_exists():
"""
for each scene > load scene in memory > save frames for each
episode corresponding to that scene
"""
self.env = habitat.Env(config=self.config)
self.episodes = self.env._dataset.episodes
self.instruction_vocab = self.env._dataset.instruction_vocab
logger.info(
"Dataset cache not found. Saving rgb, seg, depth scene images"
)
logger.info(
"Number of {} episodes: {}".format(mode, len(self.episodes))
)
self.scene_ids = []
self.scene_episode_dict = {}
# dict for storing list of episodes for each scene
for episode in self.episodes:
if episode.scene_id not in self.scene_ids:
self.scene_ids.append(episode.scene_id)
self.scene_episode_dict[episode.scene_id] = [episode]
else:
self.scene_episode_dict[episode.scene_id].append(episode)
self.lmdb_env = lmdb.open(
self.dataset_path,
map_size=int(2e12),
writemap=True,
)
self.count = 0
for scene in tqdm(list(self.scene_episode_dict.keys())):
for episode in tqdm(self.scene_episode_dict[scene]):
self.load_scene(scene, episode)
state_index_queue = []
try:
# TODO: Consider alternative for shortest_paths
state_index_queue.extend(range(0, len(episode.reference_replay) - 1))
except AttributeError as e:
logger.error(e)
self.save_frames(state_index_queue, episode)
print("Inflection weight coef: {}, N: {}, nI: {}".format(self.total_actions / self.inflections, self.total_actions, self.inflections))
logger.info("Rearrangement database ready!")
self.env.close()
else:
logger.info("Dataset cache found.")
self.lmdb_env = lmdb.open(
self.dataset_path,
readonly=True,
lock=False,
)
self.dataset_length = int(self.lmdb_env.begin().stat()["entries"] / 4)
self.lmdb_env.close()
self.lmdb_env = None
def save_frames(
self, state_index_queue: List[int], episode: RearrangementEpisode
) -> None:
r"""
Writes rgb, seg, depth frames to LMDB.
"""
next_actions = []
prev_actions = []
observations = {
"rgb": [],
"depth": [],
"instruction": [],
}
reference_replay = episode.reference_replay
instruction = episode.instruction
print("Replay len: {}".format(len(reference_replay)))
for state_index in state_index_queue:
instruction_tokens = np.array(instruction.instruction_tokens)
state = reference_replay[state_index]
position = state.agent_state.position
rotation = state.agent_state.rotation
object_states = state.object_states
sensor_states = state.agent_state.sensor_data
observation = self.env.sim.get_observations_at(
position, rotation, sensor_states, object_states
)
next_state = reference_replay[state_index + 1]
next_action = self.possible_actions.index(next_state.action)
prev_state = reference_replay[state_index]
prev_action = self.possible_actions.index(prev_state.action)
observations["depth"].append(observation["depth"])
observations["rgb"].append(observation["rgb"])
observations["instruction"].append(instruction_tokens)
next_actions.append(next_action)
prev_actions.append(prev_action)
oracle_actions = np.array(next_actions)
inflection_weights = np.concatenate(([1], oracle_actions[1:] != oracle_actions[:-1]))
self.total_actions += inflection_weights.shape[0]
self.inflections += np.sum(inflection_weights)
inflection_weights = self.inflec_weight[torch.from_numpy(inflection_weights)].numpy()
sample_key = "{0:0=6d}".format(self.count)
with self.lmdb_env.begin(write=True) as txn:
txn.put((sample_key + "_obs").encode(), msgpack_numpy.packb(observations, use_bin_type=True))
txn.put((sample_key + "_next_action").encode(), np.array(next_actions).tobytes())
txn.put((sample_key + "_prev_action").encode(), np.array(prev_actions).tobytes())
txn.put((sample_key + "_weights").encode(), inflection_weights.tobytes())
self.count += 1
# images_to_video(images=obs_list, output_dir="demos", video_name="dummy_{}".format(self.count))
def cache_exists(self) -> bool:
if os.path.exists(self.dataset_path):
if os.listdir(self.dataset_path):
return True
else:
os.makedirs(self.dataset_path)
return False
def get_vocab_dict(self) -> VocabDict:
r"""Returns Instruction VocabDicts"""
return self.instruction_vocab
def load_scene(self, scene, episode) -> None:
self.config.defrost()
self.config.SIMULATOR.SCENE = scene
self.config.SIMULATOR.objects = episode.objects
self.config.freeze()
self.env.sim.reconfigure(self.config.SIMULATOR)
def __len__(self) -> int:
return self.dataset_length
def __getitem__(self, idx: int):
r"""Returns batches to trainer.
batch: (rgb, depth, seg)
"""
if self.lmdb_env is None:
self.lmdb_env = lmdb.open(
self.dataset_path,
map_size=int(2e12),
writemap=True,
)
self.lmdb_txn = self.lmdb_env.begin()
self.lmdb_cursor = self.lmdb_txn.cursor()
height, width = int(self.resolution[0]), int(self.resolution[1])
obs_idx = "{0:0=6d}_obs".format(idx)
observations_binary = self.lmdb_cursor.get(obs_idx.encode())
observations = msgpack_numpy.unpackb(observations_binary, raw=False)
for k, v in observations.items():
obs = np.array(observations[k])
observations[k] = torch.from_numpy(obs)
next_action_idx = "{0:0=6d}_next_action".format(idx)
next_action_binary = self.lmdb_cursor.get(next_action_idx.encode())
next_action = np.frombuffer(next_action_binary, dtype="int")
next_action = torch.from_numpy(np.copy(next_action))
prev_action_idx = "{0:0=6d}_prev_action".format(idx)
prev_action_binary = self.lmdb_cursor.get(prev_action_idx.encode())
prev_action = np.frombuffer(prev_action_binary, dtype="int")
prev_action = torch.from_numpy(np.copy(prev_action))
weight_idx = "{0:0=6d}_weights".format(idx)
weight_binary = self.lmdb_cursor.get(weight_idx.encode())
weight = np.frombuffer(weight_binary, dtype="float32")
weight = torch.from_numpy(np.copy(weight))
weight = torch.where(weight != 1.0, self.inflection_weight_coef, 1.0)
return idx, observations, next_action, prev_action, weight
|
nilq/baby-python
|
python
|
from visualization import plot_binary_grid # used to show results
from wrapper import multi_image # import segmenter for multiple images
if __name__ == "__main__":
# Apply segmenter to default test images.
print("Classifying images...")
masks = multi_image() # uses default test image
print("Complete.")
# Show results.
plot_binary_grid(masks)
|
nilq/baby-python
|
python
|
# from sqlalchemy package we can import Column, String, Integer, Date, Sequence
from sqlalchemy import Column, String, Integer, Date, Sequence
# imports Config class from config module
from config import Config
#exception handling using try except for FeatureRequestAppClass.
try:
# A class FeatureRequestApp will be the class to which we map 'FeatureRequestApp' table and contains requeired columns from table as variable in class.
class FeatureRequestApp(Config.base):
"""Simple database model with required columns and table name."""
# A class using Declarative needs a __tablename__ attribute, and one Column which is a primary key
__tablename__ = 'FeatureRequestApp'
featureId = Column('featureId', Integer, Sequence('feature_id_seq'),unique=True,primary_key=True)
title = Column(String(250),unique=True)
description = Column(String(1000))
client = Column(String(100))
clientPriority = Column(Integer())
targetDate = Column(Date())
productArea = Column(String(100))
# __init__ is a special method in Python classes, it is the constructor method for a class
# __init__ is called when ever an object of the class is constructed.
def __init__(self, title, description, client, clientpriority, targetdate, productarea):
self.title = title
self.description = description
self.client = client
self.clientPriority = clientpriority
self.targetDate = targetdate
self.productArea = productarea
# The declarative_base() base class contains a MetaData object where newly defined Table objects are collected.
# This object is to be accessed for MetaData-specific operations.Such as, to issue CREATE statements for all tables.
Config.base.metadata.create_all(Config.db)
except ArgumentError as argexp:
print('Missing connection string or primary key', argexp)
except UnboundExecutionError as unexp:
print('SQL was attempted without a database connection to execute it on', unexp)
except IndexError as indexerror:
print('Missing Table Name', indexerror)
except TypeError as typeerror:
print('Check Params', typeerror)
except TimeoutError as timeout:
print('Connection TimedOut', timeout)
|
nilq/baby-python
|
python
|
from typing import List
import config
import datetime
from email.mime.text import MIMEText
from html.parser import HTMLParser
import email.utils as utils
import logging
import queue
import re
import sys
import threading
from time import strftime
import socket
import feedparser
import yaml
from imap_wrapper import ImapWrapper
class FilterError(IOError):
pass
class TranslationException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def item_message_id(feed, item):
msgid = item.get('id', item.link)
if not msgid:
msgid = feed.Name + " / " + item.title + " AT " + item.get('date', 'No date')
msgid = msgid.replace(' ', '_')
msgid = re.sub('[^\x00-\x7f]', '_', msgid)
return msgid
def rss_item_to_email(item, feed):
# Cribbing things from StackOverflow is fun. :)
def strip_html(dat):
class TagStripper(HTMLParser):
def __init__(self):
super().__init__()
self.convert_charrefs = True
self.texts = []
def handle_data(self, t):
self.texts.append(t)
def result(self):
return ''.join(self.texts)
ts = TagStripper()
ts.feed(dat)
ts.close()
return ts.result()
try:
text = '<p>Item Link: <a href="%s">%s</a></p>' % (item.link, item.link)
if 'summary' in item:
text = text + "<br>" + item.summary
email = MIMEText(text, "html")
email['Subject'] = feed.format_subject(subject=strip_html(item.title))
email['From'] = item.get('author', '(Author Not Provided)')
email['Message-Id'] = item_message_id(feed, item)
if 'published' in item:
date = item.published
date_parts = item.published_parsed
elif 'updated' in item:
date = item.updated
date_parts = item.updated_parsed
elif 'created' in item:
date = item.created
date_parts = item.created_parsed
else:
date = None
date_parts = datetime.datetime.now().timetuple()
if date_parts is None:
date_parts = utils.parsedate(strip_html(date))
# RSS feeds may contain parsable dates that aren't allowed in email.
if not (date_parts is None):
date = strftime("%A, %b %d %Y %H:%M:%S %Z", date_parts)
email['Date'] = strip_html(date)
return email
except Exception as e:
raise TranslationException(item) from e
class FeedItem:
def __init__(self, feed, rss_item):
self.feed = feed
self.rss_item = rss_item
self.email = rss_item_to_email(rss_item, feed)
self.message_id = self.email['Message-Id']
class FeedConfig:
def __init__(self, dat, *parent_configs):
def _extract_setting(name):
for obj in [dat, *parent_configs]:
if name in obj:
return obj[name]
raise IndexError(f'Cannot find config value for {name}')
self.Name = dat['Name']
self.URL = dat['URL']
self.folder_template = _extract_setting('FolderTemplate')
self.subject_template = _extract_setting('SubjectTemplate')
def __repr__(self):
return ("{ Name: %s; URL: %s; Folder: %s; Subject: %s }" % (self.Name, self.URL, self.folder_template, self.subject_template))
def quoted_folder(self):
return self.folder_template.format(name=self.Name)
def format_subject(self, subject):
return self.subject_template.format(name=self.Name, subject=subject)
def fetch_feed_items(feed):
l = logging.getLogger(__name__)
l.info("Fetching feed %s", feed.URL)
content = feedparser.parse(feed.URL)
l.info("Done fetching feed %s", feed.URL)
if content.bozo:
l.warning("Feed %s had bozo set for '%s'", feed.URL, content.bozo_exception)
for item in content.entries:
yield FeedItem(feed, item)
def parse_configs(configs):
l = logging.getLogger(__name__)
feed_configs : List[FeedConfig] = []
app_config = {'FolderTemplate': config.feed_folder_template, 'SubjectTemplate': config.subject_template}
for dat in configs:
parent_config = app_config
l.debug("Config data: %s", dat)
for item in filter(lambda p: p != None, yaml.safe_load_all(dat)):
if 'Configuration' in item and 'Items' not in item:
l.debug("Config item: %s", dat)
parent_config = item['Configuration']
elif 'Configuration' in item and 'Items' in item:
parent = item['Configuration']
for feed in item['Items']:
feed_configs.append(FeedConfig(feed, parent, parent_config))
elif 'Items' in item:
for feed in item['Items']:
feed_configs.append(FeedConfig(feed, parent_config))
else:
feed_configs.append(FeedConfig(item, parent_config))
return feed_configs
class RssIMAP:
def __init__(self):
pass
def connect_imap(self, hostname, username, password, **kwargs):
self._W = ImapWrapper(hostname, username, password, **kwargs)
self._W.ensure_folder(config.config_mailbox)
def config_data_from_imap(self):
# Don't be lazy about this.
ret = []
for msg in self._W.fetch_messages(config.config_mailbox, 'SUBJECT', 'rss-imap', 'NOT', 'DELETED'):
if msg.is_multipart():
for part in msg.get_payload():
name = part.get_param('Name', '(none)')
if 'Folders' in name:
ret.append(part.get_payload(None, True).decode('UTF-8'))
elif name == '(none)' and part.get_content_type() == 'text/plain':
ret.append(part.get_payload(None, True).decode('UTF-8'))
else:
ret.append(msg.get_payload())
return ret
def get_feed_config_from_imap(self):
the_data = self.config_data_from_imap()
return parse_configs(the_data)
def filter_items(self, folder, items):
"""Filter a list of items to only those that do not exist on the server."""
try:
have_ids = self._W.check_folder_for_message_ids(folder, [item.message_id for item in items])
except:
l = logging.getLogger(__name__)
l.exception("Exception while checking existing items in %s", folder)
try:
have_ids = self._W.check_folder_for_message_ids(folder, [item.message_id for item in items])
except:
l.exception("Second exception while checking existing items in %s; skipping.", folder)
return []
want_items = []
for item in items:
if not (item.message_id.encode('utf-8') in have_ids):
want_items.append(item)
return want_items
def save_item_to_imap(self, item):
l = logging.getLogger(__name__)
l.info('New item "%s" for feed "%s", with message_id "%s"', item.email['Subject'], item.feed.Name, item.message_id)
self._W.append(item.feed.quoted_folder(), item.email)
def save_items_to_imap(self, items):
for item in items:
self.save_item_to_imap(item)
def disconnect(self):
self._W.logout()
if __name__ == '__main__':
config.configure_logging()
# The default is to just hang forever if one of
# the RSS feed servers isn't responding.
socket.setdefaulttimeout(10)
x = RssIMAP()
x.connect_imap(config.hostname, config.username, config.password)
feeds = x.get_feed_config_from_imap()
todo = queue.Queue()
producer_threads = []
def producer(feed):
l = logging.getLogger(__name__)
items = list(fetch_feed_items(feed))
if len(items) > 0:
todo.put((feed, items))
def consumer():
l = logging.getLogger(__name__)
while True:
(feed, items) = todo.get()
if items == None:
break
l.info("Filtering %d items from feed %s", len(items), feed.URL)
filtered = x.filter_items(feed.quoted_folder(), items)
l.info("Done filtering feed %s", feed.URL)
if len(items) == 0:
continue
x.save_items_to_imap(filtered)
l.info("Done saving %d new items from feed %s", len(filtered), feed.URL)
consumer_thread = threading.Thread(target=consumer, name="Consumer")
consumer_thread.start()
for feed in feeds:
thread = threading.Thread(target=producer, name=f"Fetch {feed.URL}", args=(feed,))
thread.start()
producer_threads.append(thread)
for producer in producer_threads:
producer.join()
todo.put((None, None))
consumer_thread.join()
x.disconnect()
|
nilq/baby-python
|
python
|
from flask.views import MethodView
class APIView(MethodView):
api_version = None
path = None
@classmethod
def get_path(self):
if self.path:
return self.path
elif self.__name__.endswith('View'):
return self.__name__[:-4].lower()
else:
return self.__name__
@classmethod
def get_rule(self):
if self.api_version is None:
raise RuntimeError("An API version is required")
return '/v{}/{}'.format(self.api_version, self.get_path())
@classmethod
def add_rule_to_app(self, app, prefix=None):
rule = self.get_rule()
app.add_url_rule(
(prefix or '') + rule,
view_func=self.as_view(rule.strip('/').replace('/', '_').lower())
)
|
nilq/baby-python
|
python
|
'''
- Application Factory
'''
from app import create_app
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pytest import fixture
@fixture
def client():
'''Cliente do FastAPI.'''
app = create_app()
return TestClient(app)
def test_create_app(client):
assert isinstance(create_app(), FastAPI)
def test_home_deve_retornar_200(client):
response = client.get('/')
assert response.status_code == 200
def test_home_deve_retornar_ola_regis(client):
# response é da api do requests.
response = client.get('/')
assert response.json() == {'message': 'Ola Regis'}
def test_pessoas_deve_retornar_200_quando_chamar_com_eduardo(client):
response = client.get('/pessoa/eduardo')
assert response.status_code == 200
def test_pessoas_deve_retornar_chamou_eduardo_quando_chamar_com_eduardo(client):
response = client.get('/pessoa/eduardo')
assert response.json() == {'message': 'Você chamou eduardo'}
# def test_busca_por_id_deve_retornar_404(client):
# response = client.get('/id/42')
# assert response.status_code == 404
def test_busca_por_id_1_deve_retornar_404(client):
response = client.get('/id/1')
assert response.status_code == 404
def test_busca_por_id_1_deve_retornar_nao_tem_1(client):
response = client.get('/id/1')
assert response.json() == {'detail': 'Não tem 1'}
def test_busca_por_id_2_deve_retornar_200(client):
response = client.get('/id/2')
assert response.status_code == 200
def test_busca_por_id_2_deve_retornar_regis(client):
response = client.get('/id/2')
assert response.json() == {'name': 'regis'}
def test_inserir_usuario_no_banco_deve_retornar_201(client):
user = {
'id': 1,
'nome': 'Regis',
'idade': 42,
'email': 'regis@email.com',
}
response = client.post('/inserir/', json=user)
assert response.status_code == 201
def test_inserir_entidade_não_processável_retorna_422(client):
user = {
'nome': 'Regis',
'email': 'regis@email.com',
}
response = client.post('/inserir/', json=user)
assert response.status_code == 422
def test_pessoas_deve_retornar_200(client):
response = client.get('/pessoas')
assert response.status_code == 200
def test_pessoas_deve_retornar_lista_de_pessoas(client):
response = client.get('/pessoas')
pessoas = [
{"id": 1, "nome": "Regis", "idade": 42, "email": "regis@email.com"}
]
assert response.json() == pessoas
def test_get_pessoas_deve_retornar_200(client):
response = client.get('/pessoas/1')
assert response.status_code == 200
def test_get_pessoas_deve_retornar_um_dict(client):
response = client.get('/pessoas/1')
pessoa = {"id": 1, "nome": "Regis", "idade": 42, "email": "regis@email.com"}
assert response.json() == pessoa
def test_pessoas_add_deve_retornar_201(client):
pessoa = {"id": 1, "nome": "Regis", "idade": 42, "email": "regis@email.com"}
response = client.post('/pessoas/add/', json=pessoa)
assert response.status_code == 201
|
nilq/baby-python
|
python
|
"""A module for evaluating policies."""
import os
import json
import pandas as pd
import matplotlib.pyplot as plt
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from lake_monster.environment.environment import LakeMonsterEnvironment
from lake_monster.environment.variations import MultiMonsterEnvironment, JumpingEnvironment
from lake_monster import configs
def evaluate_episode(policy, env_params):
"""Use naive while loop to evaluate policy in single episode."""
if 'n_monsters' in env_params:
env = MultiMonsterEnvironment
elif 'is_jumping' in env_params:
env = JumpingEnvironment
else:
env = LakeMonsterEnvironment
py_env = env(**env_params)
tf_env = TFPyEnvironment(py_env)
ts = tf_env.reset()
n_steps = 0
while not ts.is_last():
action = policy.action(ts)
ts = tf_env.step(action.action)
n_steps += 1
reward = ts.reward.numpy().item()
return reward, n_steps * py_env.step_size
def probe_policy_const_steps(policy, env_params):
"""Determine the maximum monster speed at which policy can succeed."""
highest_speed_with_success = 0.0
n_steps_at_success = 0
n_consecutive_fails = 0
current_monster_speed = 0.0
delta = 1.0
while True:
print('.', end='', flush=True)
current_monster_speed += delta
env_params['monster_speed'] = current_monster_speed
env_params['use_mini_rewards'] = False
reward, n_steps = evaluate_episode(policy, env_params)
reward = round(reward)
if reward not in [0, 1]:
raise ValueError(f'Strange reward. Reward encountered: {reward}')
if reward == 0:
n_consecutive_fails += 1
else:
highest_speed_with_success = current_monster_speed
n_steps_at_success = n_steps
n_consecutive_fails = 0
if n_consecutive_fails == 3:
if delta < 0.001 - 1e-6: # tolerance
print('')
return highest_speed_with_success, n_steps_at_success
delta *= 0.5
current_monster_speed = highest_speed_with_success
n_consecutive_fails = 0
def probe_policy(policy, env_params):
"""Call probe_policy at different step_sizes."""
current_step_size = env_params['step_size']
result = {'monster_speed': 0.0, 'step_size': 0.0, 'n_env_steps': 0}
for multiplier in [1/16, 1/8, 1/4, 1/2, 1]:
step_size = multiplier * current_step_size
env_params['step_size'] = step_size
monster_speed, n_env_steps = probe_policy_const_steps(policy, env_params)
if monster_speed > result['monster_speed']:
result['monster_speed'] = monster_speed
result['step_size'] = step_size
result['n_env_steps'] = n_env_steps
return result
def result_df():
"""Return DataFrame of monster speed data in results.json."""
with open(configs.RESULTS_PATH) as f:
data = json.load(f)
dfs = []
params = {}
for uid in data:
params[uid] = data[uid]['params']
results = data[uid]['results']
if results:
df = pd.DataFrame(results)
df = df.set_index('n_episode', drop=True)
df = df.drop(['step_size', 'n_env_steps'], axis=1)
df = df.rename(columns={'monster_speed': uid})
dfs.append(df)
return pd.concat(dfs, axis=1), params
def plot_results(policies=None):
"""Plot evaluation monter speeds over training."""
df, _ = result_df()
if policies:
df = df[policies]
df = df[df.index <= 600_000]
df = df.rolling(25).mean()
plt.figure(figsize=(12, 8))
df.plot(legend=False, ax=plt.gca())
plt.xlabel('episode number')
plt.ylabel('monster speed')
plt.title('Smoothed evaluation scores over training')
# plt.legend(loc='lower right', fontsize='xx-small')
plt.grid()
save_path = os.path.join(configs.ASSETS_DIR, 'results.png')
plt.savefig(save_path, dpi=300)
plt.show()
def print_strongest_policies():
"""Print a markdown table showing agent parameters and evaluation results."""
df, params = result_df()
shortened_names = {
'n_actions': 'n act',
'initial_step_size': 'init step',
'initial_monster_speed': 'init speed',
'timeout_factor': 'timeout',
'fc_layer_params': 'layers',
'dropout_layer_params': 'dropout',
'learning_rate': 'learn rate',
'epsilon_greedy': 'epsilon',
'n_step_update': 'update',
'use_categorical': 'categorical',
'use_step_schedule': 'schedule'
}
params_df = []
for p in df.columns:
# printing out high speed policies separately from markdown
episode = df[p].idxmax()
speed = df[p][episode]
if speed > 4.3:
print(speed, p + '-' + str(episode))
results = {}
results['max speed'] = round(speed, 3)
results['avg speed'] = round(df[p].mean(), 3)
for k, v in shortened_names.items():
results[v] = params[p][k]
if results['categorical']:
results['dropout'] = 'None'
if results['dropout'] is None: # getting None to appear in markdown
results['dropout'] = 'None'
params_df.append(results)
params_df = pd.DataFrame(params_df)
params_df = params_df.sort_values(by='max speed', axis=0, ascending=False)
print(params_df.to_markdown(index=False))
if __name__ == '__main__':
print_strongest_policies()
plot_results()
|
nilq/baby-python
|
python
|
import numpy as np
from numpy import exp,dot,full,cos,sin,real,imag,power,pi,log,sqrt,roll,linspace,arange,transpose,pad,complex128 as c128, float32 as f32, float64 as f64
from numba import njit,jit,complex128 as nbc128, void
import os
os.environ['NUMEXPR_MAX_THREADS'] = '16'
os.environ['NUMEXPR_NUM_THREADS'] = '8'
import numexpr as ne
from mesh import RectMesh3D,RectMesh2D
import optics
from misc import timeit, overlap, normalize,printProgressBar, overlap_nonu, norm_nonu,resize
### to do ###
## performance
# maybe adaptive z stepping
# get a better refinement criterion -- now weighting partially by 2nd deriv. still could use some work
# compute r = 1 and r=/=1 points separately? -- I TRIED IT -- WHY IS THIS SLOWER
# more efficient ways to store arrays with many repeated values -- some sort of sparse-like data structure?
# optimize tri_solve_vec : maybe try out dask (parallelize) -- WHY IS THIS ALSO SLOWER
#ignore shifting of IOR arrays in trimats calc?
## readability
# actually add doc strings
# combine some functions into "remesh" and "recompute" functions
# remove unused functions
# move all the eval strings somewhere else (together)
def genc(shape):
return np.empty(shape,dtype=c128,order='F')
def genf(shape):
return np.empty(shape,dtype=c128,order='F')
@njit(void(nbc128[:,:],nbc128[:,:],nbc128[:,:],nbc128[:,:],nbc128[:,:],nbc128[:,:]))
def tri_solve_vec(a,b,c,r,g,u):
'''Apply Thomas' method for simultaneously solving a set of tridagonal systems. a, b, c, and r are matrices
(N rows) where each column corresponds a separate system'''
N = a.shape[0]
beta = b[0]
u[0] = r[0]/beta
for j in range(1,N):
g[j] = c[j-1]/beta
beta = b[j] - a[j]*g[j]
u[j] = (r[j] - a[j]*u[j-1])/beta
for j in range(N-1):
k = N-2-j
u[k] = u[k] - g[k+1]*u[k+1]
class Prop3D:
'''beam propagator. employs finite-differences beam propagation with PML as the boundary condition. works on an adaptive mesh'''
def __init__(self,wl0,mesh:RectMesh3D,optical_system:optics.OpticSys,n0):
xymesh = mesh.xy
self.wl0 = wl0
self.k0 = k0 = 2.*pi/wl0
self.k02 = k02 = k0*k0
self.mesh = mesh
self.n0 = n0
self.sig = sig = -2.j*k0*n0/mesh.dz
self.field = None
self.optical_system = optical_system
self.optical_system.set_sampling(xymesh)
self.nb2 = nb2 = optical_system.nb2
self.n02 = n02 = n0*n0
## things that will be set during computation
self.xgrid_cor_facs = [[]]*3
self.ygrid_cor_facs = [[]]*3
self.xgrid_cor_mask = []
self.ygrid_cor_mask = []
## precomputing some stuff ##
Rx,Tupx,Tdox,Ry,Tupy,Tdoy = self.calculate_PML_mats()
dx02 = mesh.xy.dx0**2
dy02 = mesh.xy.dy0**2
K = k02*(nb2-n02)
n02 = power(n0,2)
## coeff matrices of tridiagonal system, updated periodically
self._a0x = None
self._b0x = None
self._c0x = None
self._a0y = None
self._b0y = None
self._c0y = None
self.a0x_ = None
self.b0x_ = None
self.c0x_ = None
self.a0y_ = None
self.b0y_ = None
self.c0y_ = None
## same as above but in PML zone
self._apmlx = sig/12. - 0.5/dx02*Tdox - K/48.
self._bpmlx = 5./6.*sig + Rx/dx02 - 5./24. * K
self._cpmlx = sig/12. - 0.5/dx02*Tupx - K/48.
self.apmlx_ = sig/12. + 0.5/dx02*Tdox + K/48.
self.bpmlx_ = 5./6.*sig - Rx/dx02 + 5./24. * K
self.cpmlx_ = sig/12. + 0.5/dx02*Tupx + K/48.
self._apmly = sig/12. - 0.5/dy02*Tdoy - K/48.
self._bpmly = 5./6.*sig + Ry/dy02 - 5./24. * K
self._cpmly = sig/12. - 0.5/dy02*Tupy - K/48.
self.apmly_ = sig/12. + 0.5/dy02*Tdoy + K/48.
self.bpmly_ = 5./6.*sig - Ry/dy02 + 5./24. * K
self.cpmly_ = sig/12. + 0.5/dy02*Tupy + K/48.
self.half_dz = mesh.dz/2.
self.power = np.empty((mesh.zres,))
self.totalpower = np.empty((mesh.zres,))
def allocate_mats(self):
sx,sy = self.mesh.xy.xg.shape,self.mesh.xy.yg.T.shape
_trimatsx = (genc(sx),genc(sx),genc(sx))
_trimatsy = (genc(sy),genc(sy),genc(sy))
rmatx,rmaty = genc(sx),genc(sy)
gx = genc(sx)
gy = genc(sy)
fill = self.nb2*self.k02
IORsq__ = np.full(sx,fill,dtype=f64)
_IORsq_ = np.full(sx,fill,dtype=f64)
__IORsq = np.full(sx,fill,dtype=f64)
return _trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq
def check_z_inv(self):
return self.optical_system.z_invariant
def set_IORsq(self,out,z,xg=None,yg=None):
#premultiply by k02 so we don't have to keep doing it later
self.optical_system.set_IORsq(out,z,xg,yg,coeff=self.k02)
def calculate_PML_mats(self):
'''As per textbook <Beam Propagation Method for Design of Optical Waveguide Devices> ,
calculate the matrices R, T_j+1, and T_j-1 in the PML zone. We assume that the
the PML's refractive index will be constant, equal to the background index.
'''
m = self.mesh
xy = m.xy
xverts = xy.pvert_xa
sdox = m.sigmax(xverts-xy.dx0)
sx = m.sigmax(xverts)
supx = m.sigmax(xverts+xy.dx0)
yverts = xy.pvert_ya
sdoy = m.sigmay(yverts-xy.dy0)
sy = m.sigmay(yverts)
supy = m.sigmay(yverts+xy.dy0)
Qdox = 1./(1.+1.j*sdox*self.nb2)
Qx = 1./(1.+1.j*sx*self.nb2)
Qupx = 1./(1.+1.j*supx*self.nb2)
Tupx = 0.5 * Qx * (Qx+Qupx)
Tdox = 0.5 * Qx * (Qx+Qdox)
Rx = 0.25 * Qx * (Qdox+2*Qx+Qupx)
Qdoy = 1./(1.+1.j*sdoy*self.nb2)
Qy = 1./(1.+1.j*sy*self.nb2)
Qupy = 1./(1.+1.j*supy*self.nb2)
Tupy= 0.5 * Qy * (Qy+Qupy)
Tdoy = 0.5 * Qy * (Qy+Qdoy)
Ry = 0.25 * Qy * (Qdoy+2*Qy+Qupy)
return (Rx,Tupx,Tdox,Ry,Tupy,Tdoy)
def update_grid_cor_facs(self,which='x'):
xy = self.mesh.xy
ix = xy.cvert_ix
if which=='x':
r = xy.rxa[ix]
self.xgrid_cor_imask = np.where(r[1:-1]!=1)[0]
else:
r = xy.rya[ix]
self.ygrid_cor_imask = np.where(r[1:-1]!=1)[0]
r2 = r*r
R1 = (r2 + r -1)/(6*r*(r+1))
R2 = (r2 + 3*r + 1)/(6*r)
R3 = (-r2 + r + 1)/(6*(r+1))
## alternative values from paper
#R1 = (3*r2 - 3*r + 1)/ (6*r*(r+1))
#R2 = (-r2 + 7*r - 1)/(6*r)
#R3 = (r2 - 3*r + 3)/(6*(r+1))
if which=='x':
self.xgrid_cor_facs[0] = R1
self.xgrid_cor_facs[1] = R2
self.xgrid_cor_facs[2] = R3
else:
self.ygrid_cor_facs[0] = R1
self.ygrid_cor_facs[1] = R2
self.ygrid_cor_facs[2] = R3
def precomp_trimats(self,which='x'):
ix = self.mesh.xy.cvert_ix
s = self.sig
nu0 = -self.k02*self.n02
eval1 = "s*r3 - 1/(r+1)/(d*d) - 0.25*r3*n"
eval2 = "s*r2 + 1/r/(d*d) - 0.25*r2*n"
eval3 = "s*r1 - 1/r/(r+1)/(d*d) - 0.25*r1*n"
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
r = self.mesh.xy.rxa[ix]
dla = self.mesh.xy.dxa[ix]
self._a0x = ne.evaluate(eval1,local_dict={"s":s,"r3":R3[1:,None],"r":r[1:,None],"d":dla[1:,None],"n":nu0})
self._b0x = ne.evaluate(eval2,local_dict={"s":s,"r2":R2[:,None],"r":r[:,None],"d":dla[:,None],"n":nu0})
self._c0x = ne.evaluate(eval3,local_dict={"s":s,"r1":R1[:-1,None],"r":r[:-1,None],"d":dla[:-1,None],"n":nu0})
else:
R1,R2,R3 = self.ygrid_cor_facs
r = self.mesh.xy.rya[ix]
dla = self.mesh.xy.dya[ix]
self._a0y = ne.evaluate(eval1,local_dict={"s":s,"r3":R3[1:,None],"r":r[1:,None],"d":dla[1:,None],"n":nu0})
self._b0y = ne.evaluate(eval2,local_dict={"s":s,"r2":R2[:,None],"r":r[:,None],"d":dla[:,None],"n":nu0})
self._c0y = ne.evaluate(eval3,local_dict={"s":s,"r1":R1[:-1,None],"r":r[:-1,None],"d":dla[:-1,None],"n":nu0})
def _trimats(self,out,IORsq,which='x'):
''' calculate the tridiagonal matrices in the computational zone '''
ix = self.mesh.xy.cvert_ix
_IORsq = IORsq[ix]
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
r = self.mesh.xy.rxa[ix]
dla = self.mesh.xy.dxa[ix]
a,b,c = self._a0x,self._b0x,self._c0x
else:
R1,R2,R3 = self.ygrid_cor_facs
r = self.mesh.xy.rya[ix]
dla = self.mesh.xy.dya[ix]
a,b,c = self._a0y,self._b0y,self._c0y
_a,_b,_c = out
s = self.sig
eval1 = "a - 0.25*r3*n"
eval2 = "b - 0.25*r2*n"
eval3 = "c - 0.25*r1*n"
ne.evaluate(eval1,local_dict={"a":a,"r3":R3[1:,None],"n":_IORsq[:-1]},out=_a[ix][1:])
ne.evaluate(eval2,local_dict={"b":b,"r2":R2[:,None],"n":_IORsq},out=_b[ix])
ne.evaluate(eval3,local_dict={"c":c,"r1":R1[:-1,None],"n":_IORsq[1:]},out=_c[ix][:-1])
_a[ix][0] = s*R3[0] - 1. / ((r[0]+1) * dla[0]*dla[0]) - 0.25*R3[0]*(_IORsq[0]-self.n02*self.k02)
_c[ix][-1] = s*R1[-1] - 1/r[-1]/(r[-1]+1)/(dla[-1]*dla[-1]) - 0.25*R1[-1]*(_IORsq[-1]-self.n02*self.k02)
def rmat_pmlcorrect(self,_rmat,u,which='x'):
if which == 'x':
apml,bpml,cpml = self.apmlx_,self.bpmlx_,self.cpmlx_
else:
apml,bpml,cpml = self.apmly_,self.bpmly_,self.cpmly_
pix = self.mesh.xy.pvert_ix
temp = np.empty_like(_rmat[pix])
temp[1:-1] = apml[1:-1,None]*u[pix-1][1:-1] + bpml[1:-1,None]*u[pix][1:-1] + cpml[1:-1,None]*u[pix+1][1:-1]
temp[0] = bpml[0]*u[0] + cpml[0]*u[1]
temp[-1] = apml[-1]*u[-2] + bpml[-1]*u[-1]
_rmat[pix] = temp
def rmat(self,_rmat,u,IORsq,which='x'):
ix = self.mesh.xy.cvert_ix
_IORsq = IORsq[ix]
s = self.sig
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
dla = self.mesh.xy.dxa[ix]
r = self.mesh.xy.rxa[ix]
a,b,c = self.a0x_,self.b0x_,self.c0x_
else:
R1,R2,R3 = self.ygrid_cor_facs
dla = self.mesh.xy.dya[ix]
r = self.mesh.xy.rya[ix]
a,b,c = self.a0y_,self.b0y_,self.c0y_
N = self.n02*self.k02
m = np.s_[1:-1,None]
_dict = _dict = {"a":a,"b":b,"c":c,"u1":u[ix][:-2],"u2":u[ix][1:-1],"u3":u[ix][2:],"n1":_IORsq[:-2],"n2":_IORsq[1:-1],"n3":_IORsq[2:],"r3":R3[m],"r2":R2[m],"r1":R1[m] }
_eval = "(a+0.25*r3*n1)*u1 + (b+0.25*r2*n2)*u2 + (c+0.25*r1*n3)*u3"
ne.evaluate(_eval,local_dict=_dict,out=_rmat[ix][1:-1])
_rmat[ix][0] = (s*R2[0] - 1/(r[0]*dla[0]**2 ) + 0.25*R2[0]*(_IORsq[0]-N))*u[0] + (s*R1[0] + 1/r[0]/(r[0]+1)/dla[0]**2 + 0.25*R1[0] * (_IORsq[1]-N) )*u[1]
_rmat[ix][-1] = (s*R3[-1] + 1. / ((r[-1]+1) * dla[-1]**2) + 0.25*R3[-1]*(_IORsq[-2]-N))*u[-2] + (s*R2[-1] - 1/(r[-1]*dla[-1]**2) + 0.25*R2[-1]*(_IORsq[-1]-N))*u[-1]
def rmat_precomp(self,which='x'):
ix = self.mesh.xy.cvert_ix
s = self.sig
n0 = -self.k02 * self.n02
m = np.s_[1:-1,None]
eval1="(s*r3+1/(r+1)/(d*d)+0.25*r3*n)"
eval2="(s*r2-1/r/(d*d)+0.25*r2*n)"
eval3="(s*r1+1/r/(r+1)/(d*d) + 0.25*r1*n)"
if which == 'x':
R1,R2,R3 = self.xgrid_cor_facs
r = self.mesh.xy.rxa[ix]
dla = self.mesh.xy.dxa[ix]
_dict = {"s":s,"r3":R3[m],"r":r[m],"d":dla[m],"n":n0,"r2":R2[m],"r1":R1[m]}
self.a0x_ = ne.evaluate(eval1,local_dict=_dict)
self.b0x_ = ne.evaluate(eval2,local_dict=_dict)
self.c0x_ = ne.evaluate(eval3,local_dict=_dict)
else:
R1,R2,R3 = self.ygrid_cor_facs
r = self.mesh.xy.rya[ix]
dla = self.mesh.xy.dya[ix]
_dict = {"s":s,"r3":R3[m],"r":r[m],"d":dla[m],"n":n0,"r2":R2[m],"r1":R1[m]}
self.a0y_ = ne.evaluate(eval1,local_dict=_dict)
self.b0y_ = ne.evaluate(eval2,local_dict=_dict)
self.c0y_ = ne.evaluate(eval3,local_dict=_dict)
def _pmlcorrect(self,_trimats,which='x'):
ix = self.mesh.xy.pvert_ix
_a,_b,_c = _trimats
if which=='x':
_a[ix] = self._apmlx[:,None]
_b[ix] = self._bpmlx[:,None]
_c[ix] = self._cpmlx[:,None]
else:
_a[ix] = self._apmly[:,None]
_b[ix] = self._bpmly[:,None]
_c[ix] = self._cpmly[:,None]
@timeit
def prop2end(self,_u,xyslice=None,zslice=None,u1_func=None,writeto=None,ref_val=5.e-6,remesh_every=20,dynamic_n0 = False,fplanewidth=0):
mesh = self.mesh
PML = mesh.PML
if not (xyslice is None and zslice is None):
za_keep = mesh.za[zslice]
if type(za_keep) == np.ndarray:
minz, maxz = za_keep[0],za_keep[-1]
shape = (len(za_keep),*mesh.xg[xyslice].shape)
else:
raise Exception('uhh not implemented')
self.field = np.zeros(shape,dtype=c128)
#pull xy mesh
xy = mesh.xy
dx,dy = xy.dx0,xy.dy0
if fplanewidth == 0:
xa_in = np.linspace(-mesh.xw/2,mesh.xw/2,xy.shape0_comp[0])
ya_in = np.linspace(-mesh.yw/2,mesh.yw/2,xy.shape0_comp[1])
else:
xa_in = np.linspace(-fplanewidth/2,fplanewidth/2,xy.shape0_comp[0])
ya_in = np.linspace(-fplanewidth/2,fplanewidth/2,xy.shape0_comp[1])
dx0 = xa_in[1]-xa_in[0]
dy0 = ya_in[1]-ya_in[0]
# u can either be a field or a function that generates a field.
# the latter option allows for coarse base grids to be used
# without being penalized by forcing the use of a low resolution
# launch field
if type(_u) is np.ndarray:
_power = overlap(_u,_u)
print('input power: ',_power)
# normalize the field, preserving the input power. accounts for grid resolution
normalize(_u,weight=dx0*dy0,normval=_power)
#resample the field onto the smaller xy mesh (in the smaller mesh's computation zone)
u0 = xy.resample_complex(_u,xa_in,ya_in,xy.xa[PML:-PML],xy.ya[PML:-PML])
_power2 = overlap(u0,u0,dx*dy)
#now we pad w/ zeros to extend it into the PML zone
u0 = np.pad(u0,((PML,PML),(PML,PML)))
#initial mesh refinement
xy.refine_base(u0,ref_val)
weights = xy.get_weights()
#now resample the field onto the smaller *non-uniform* xy mesh
u = xy.resample_complex(_u,xa_in,ya_in,xy.xa[PML:-PML],xy.ya[PML:-PML])
u = np.pad(u,((PML,PML),(PML,PML)))
#do another norm to correct for the slight power change you get when resampling. I measure 0.1% change for psflo. should check again
norm_nonu(u,weights,_power2)
elif callable(_u):
# must be of the form u(x,y)
u0 = _u(xy.xg,xy.yg)
_power = overlap(u0,u0)
print('input power: ',_power)
# normalize the field, preserving the input power. accounts for grid resolution
normalize(u0,weight=dx0*dy0,normval=_power)
# do an initial mesh refinement
xy.refine_base(u0,ref_val)
# compute the field on the nonuniform grid
u = norm_nonu(_u(xy.xg,xy.yg),xy.get_weights(),_power)
else:
raise Exception("unsupported type for argument u in prop2end()")
counter = 0
total_iters = self.mesh.zres
print("propagating field...")
__z = 0
z__ = 0
#step 0 setup
self.update_grid_cor_facs('x')
self.update_grid_cor_facs('y')
# initial array allocation
_trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq = self.allocate_mats()
self.precomp_trimats('x')
self.precomp_trimats('y')
self.rmat_precomp('x')
self.rmat_precomp('y')
self._pmlcorrect(_trimatsx,'x')
self._pmlcorrect(_trimatsy,'y')
#get the current IOR dist
self.set_IORsq(IORsq__,z__)
#plt.figure(frameon=False)
#plt.imshow(xy.get_base_field(IORsq__))
#plt.show()
print("initial shape: ",xy.shape)
for i in range(total_iters):
if i%20 == 0:
printProgressBar(i,total_iters-1)
u0 = xy.get_base_field(u)
u0c = np.conj(u0)
weights = xy.get_weights()
## Total power monitor ##
self.totalpower[i] = overlap_nonu(u,u,weights)
#print(self.totalpower[i])
## Other monitors ##
if u1_func is not None:
lp = norm_nonu(u1_func(xy.xg,xy.yg),weights)
self.power[i] = power(overlap_nonu(u,lp,weights),2)
_z_ = z__ + mesh.half_dz
__z = z__ + mesh.dz
if self.field is not None and (minz<=__z<=maxz):
ix0,ix1,ix2,ix3 = mesh.get_loc()
mid = int(u0.shape[1]/2)
self.field[counter][ix0:ix1+1] = u0[:,mid] ## FIX ##
counter+=1
#avoid remeshing on step 0
if (i+1)%remesh_every== 0:
## update the effective index
if dynamic_n0:
#update the effective index
base = xy.get_base_field(IORsq__)
self.n02 = xy.dx0*xy.dy0*np.real(np.sum(u0c*u0*base))/self.k02
oldxm,oldxM = xy.xm,xy.xM
oldym,oldyM = xy.ym,xy.yM
oldxw,oldyw = xy.xw,xy.yw
new_xw,new_yw = oldxw,oldyw
#expand the grid if necessary
if mesh.xwfunc is not None:
new_xw = mesh.xwfunc(__z)
if mesh.ywfunc is not None:
new_yw = mesh.ywfunc(__z)
new_xw, new_yw = xy.snapto(new_xw,new_yw)
xy.reinit(new_xw,new_yw) #set grid back to base res with new dims
if (xy.xw > oldxw or xy.yw > oldyw):
#now we need to pad u,u0 with zeros to make sure it matches the new space
xpad = int((xy.shape0[0]-u0.shape[0])/2)
ypad = int((xy.shape0[1]-u0.shape[1])/2)
u = np.pad(u,((xpad,xpad),(ypad,ypad)))
u0 = np.pad(u0,((xpad,xpad),(ypad,ypad)))
#pad coord arrays to do interpolation
xy.xa_last = np.hstack( ( np.linspace(xy.xm,oldxm-dx,xpad) , xy.xa_last , np.linspace(oldxM + dx, xy.xM,xpad) ) )
xy.ya_last = np.hstack( ( np.linspace(xy.ym,oldym-dy,ypad) , xy.ya_last , np.linspace(oldyM + dy, xy.yM,ypad) ) )
#subdivide into nonuniform grid
xy.refine_base(u0,ref_val)
#interp the field to the new grid
u = xy.resample_complex(u)
#give the grid to the optical sys obj so it can compute IORs
self.optical_system.set_sampling(xy)
#compute nonuniform grid correction factors R_i
self.update_grid_cor_facs('x')
self.update_grid_cor_facs('y')
# grid size has changed, so now we need to reallocate arrays for at least the next remesh_period iters
_trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq = self.allocate_mats()
#get the current IOR dist
self.set_IORsq(IORsq__,z__)
#precompute things that will be reused
self.precomp_trimats('x')
self.precomp_trimats('y')
self.rmat_precomp('x')
self.rmat_precomp('y')
self._pmlcorrect(_trimatsx,'x')
self._pmlcorrect(_trimatsy,'y')
self.set_IORsq(_IORsq_,_z_,)
self.set_IORsq(__IORsq,__z)
self.rmat(rmatx,u,IORsq__,'x')
self.rmat_pmlcorrect(rmatx,u,'x')
self._trimats(_trimatsx,_IORsq_,'x')
self._trimats(_trimatsy,__IORsq.T,'y')
tri_solve_vec(_trimatsx[0],_trimatsx[1],_trimatsx[2],rmatx,gx,u)
self.rmat(rmaty,u.T,_IORsq_.T,'y')
self.rmat_pmlcorrect(rmaty,u.T,'y')
tri_solve_vec(_trimatsy[0],_trimatsy[1],_trimatsy[2],rmaty,gy,u.T)
z__ = __z
if (i+2)%remesh_every != 0:
IORsq__[:,:] = __IORsq
print("final total power",self.totalpower[-1])
if writeto:
np.save(writeto,self.field)
return u,u0
@timeit
def prop2end_uniform(self,u,xyslice=None,zslice=None,u1_func=None,writeto=None,dynamic_n0 = False,fplanewidth=0):
mesh = self.mesh
PML = mesh.PML
if not (xyslice is None and zslice is None):
za_keep = mesh.za[zslice]
if type(za_keep) == np.ndarray:
minz, maxz = za_keep[0],za_keep[-1]
shape = (len(za_keep),*mesh.xg[xyslice].shape)
else:
raise Exception('uhh not implemented')
self.field = np.zeros(shape,dtype=c128)
if fplanewidth == 0:
xa_in = np.linspace(-mesh.xw/2,mesh.xw/2,u.shape[0])
ya_in = np.linspace(-mesh.yw/2,mesh.yw/2,u.shape[1])
else:
xa_in = np.linspace(-fplanewidth/2,fplanewidth/2,u.shape[0])
ya_in = np.linspace(-fplanewidth/2,fplanewidth/2,u.shape[1])
dx0 = xa_in[1]-xa_in[0]
dy0 = ya_in[1]-ya_in[0]
_power = overlap(u,u)
print('input power: ',_power)
# normalize the field, preserving the input power. accounts for grid resolution
normalize(u,weight=dx0*dy0,normval=_power)
__z = 0
#pull xy mesh
xy = mesh.xy
dx,dy = xy.dx0,xy.dy0
#resample the field onto the smaller xy mesh (in the smaller mesh's computation zone)
u0 = xy.resample_complex(u,xa_in,ya_in,xy.xa[PML:-PML],xy.ya[PML:-PML])
_power2 = overlap(u0,u0,dx*dy)
#now we pad w/ zeros to extend it into the PML zone
u0 = np.pad(u0,((PML,PML),(PML,PML)))
counter = 0
total_iters = self.mesh.zres
print("propagating field...")
z__ = 0
#step 0 setup
self.update_grid_cor_facs('x')
self.update_grid_cor_facs('y')
# initial array allocation
_trimatsx,rmatx,gx,_trimatsy,rmaty,gy,IORsq__,_IORsq_,__IORsq = self.allocate_mats()
self.precomp_trimats('x')
self.precomp_trimats('y')
self.rmat_precomp('x')
self.rmat_precomp('y')
self._pmlcorrect(_trimatsx,'x')
self._pmlcorrect(_trimatsy,'y')
#get the current IOR dist
self.set_IORsq(IORsq__,z__)
weights = xy.get_weights()
print("initial shape: ",xy.shape)
for i in range(total_iters):
if i%20 == 0:
printProgressBar(i,total_iters-1)
## Total power monitor ##
self.totalpower[i] = overlap_nonu(u0,u0,weights)
## Other monitors ##
if u1_func is not None:
lp = norm_nonu(u1_func(xy.xg,xy.yg),weights)
self.power[i] = power(overlap_nonu(u0,lp,weights),2)
_z_ = z__ + mesh.half_dz
__z = z__ + mesh.dz
if self.field is not None and (minz<=__z<=maxz):
ix0,ix1,ix2,ix3 = mesh.get_loc()
mid = int(u0.shape[1]/2)
self.field[counter][ix0:ix1+1] = u0[:,mid] ## FIX ##
counter+=1
self.set_IORsq(_IORsq_,_z_,)
self.set_IORsq(__IORsq,__z)
self.rmat(rmatx,u0,IORsq__,'x')
self.rmat_pmlcorrect(rmatx,u0,'x')
self._trimats(_trimatsx,_IORsq_,'x')
self._trimats(_trimatsy,__IORsq.T,'y')
tri_solve_vec(_trimatsx[0],_trimatsx[1],_trimatsx[2],rmatx,gx,u0)
self.rmat(rmaty,u0.T,_IORsq_.T,'y')
self.rmat_pmlcorrect(rmaty,u0.T,'y')
tri_solve_vec(_trimatsy[0],_trimatsy[1],_trimatsy[2],rmaty,gy,u0.T)
z__ = __z
IORsq__[:,:] = __IORsq
print("final total power",self.totalpower[-1])
if writeto:
np.save(writeto,self.field)
return u0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2014 Spotify AB.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import types
import uuid
# Set up some basic logging.
logging.basicConfig(level = logging.INFO, format = "[%(asctime)s] [%(levelname)s] %(message)s")
try:
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError, RuntimeInconsistency
from kazoo.handlers.threading import TimeoutError
except ImportError:
logging.error("This script uses Kazoo Python libraries to work with Zookeeper")
logging.error("You can install them by typing 'sudo easy_install kazoo' in your console")
# Let the original exception propagate, because sometimes it's not working for different
# reasons, like package conflicts or whatever else (Python packaging is weird), so it is
# a good idea to let the user see the actual exception message.
raise
DESCRIPTION = """
Bootstraps a new Helios cluster.
Bootstrapping is done via populating Zookeeper with a basic data structures required
by Helios to properly function. The script cannot be be used on a NONEMPTY ZooKeeper
cluster.
"""
def main():
parser = argparse.ArgumentParser(description = DESCRIPTION)
parser.add_argument("hosts", metavar = "<zookeeper-endpoint>", type = str,
nargs = "+", help = "Zookeeper node endpoints to connect to")
parser.add_argument("--timeout", dest = "timeout", action = "store", type = int,
default = 30, help = "Zookeeper connection timeout")
parser.add_argument("--force", dest = "force", action = "store_true",
help = "Bootstrap even when Zookeeper is not empty")
option = parser.parse_args()
logging.debug("Using %s as a Zookeeper connection string" % option.hosts)
client = KazooClient(hosts = ",".join(option.hosts))
try:
client.start(timeout = option.timeout)
except TimeoutError as e:
logging.error("Timed out while connecting to Zookeeper")
return 1
status = bootstrap(client, str(uuid.uuid4()), option.force)
# If the client is not stopped, it will hang forever maintaining the connection.
client.stop()
return status
def bootstrap(client, cluster_id, force):
nodes = [
"/config",
"/config/id",
"/config/id/%s" % cluster_id
]
transaction = client.transaction()
# Version is not important here. If any of these nodes exist, just stop doing anything and
# report the error to avoid messing things up.
[transaction.check(node, version = -1) for node in nodes]
# Operation results are either True if the given node exists or an exception of NoNodeError or
# RuntimeIncosistency and RolledBackError types if the previous (1) or following (2) operation
# has failed. We want all results to be NoNodeError or RuntimeInconsistency (which means, node
# existance check wasn't performed, because node's parent is not there).
types = NoNodeError, RuntimeInconsistency
nodes_missing = [isinstance(result, types) for result in transaction.commit()]
if not force and not all(nodes_missing):
logging.error("Aborting, some nodes already exist: %s" %
", ".join(nodes[idx] for idx, missing in enumerate(nodes_missing) if not missing)
)
return 1
transaction = client.transaction()
# Filter the node list so that only the missing nodes are in it.
nodes = [node for idx, node in enumerate(nodes) if nodes_missing[idx]]
# TODO: Might be a good idea to set ACLs here so that these structural nodes are protected from
# accidental deletions, but allow children modifications.
[transaction.create(node) for node in nodes]
# Operation results are either a string representing the created path or an exception object we
# don't really care about.
nodes_created = [result == nodes[idx] for idx, result in enumerate(transaction.commit())]
if not all(nodes_created):
logging.error("Aborting, couldn't create some nodes: %s" %
", ".join(nodes[idx] for idx, created in enumerate(nodes_created) if not created)
)
return 1
logging.info("Cluster has been successfully bootstrapped, cluster id is: %s" % cluster_id)
if __name__ == "__main__":
exit(main())
|
nilq/baby-python
|
python
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.backends.frame import FrameBase, FrameNode
from .port import Port
from .vessel import gen_vessel_definition
from .matrix import gen_matrix
def gen_ecr_frame(port_num: int, vessel_num: int, stop_nums: tuple, snapshots_num: int):
"""Define and generate ecr frame
Args:
port_num (int): number of ports
vessel_num (int): number of vessels
stop_nums (tuple): past stops number and future stop number
"""
vessel_cls = gen_vessel_definition(stop_nums)
matrix_cls = gen_matrix(port_num, vessel_num)
class EcrFrame(FrameBase):
"""Our ecr frame that contains vessels, ports, and a general matrix"""
vessels = FrameNode(vessel_cls, vessel_num)
ports = FrameNode(Port, port_num)
matrix = FrameNode(matrix_cls, 1)
def __init__(self):
super().__init__(enable_snapshot=True, total_snapshot=snapshots_num)
return EcrFrame()
|
nilq/baby-python
|
python
|
"""Provide functionality to handle package settings."""
import logging
from enum import Enum
from os import makedirs
from os.path import isdir
from urllib.error import URLError
from urllib.parse import urlparse
from urllib.request import Request, urlopen
from dynaconf import Validator, settings
LOGGER = logging.getLogger('standardlog')
class SettingKeys(Enum):
"""Holds all required setting keys with description."""
OPENEO_VERSION = "OPENEO_VERSION"
"""The OpenEO version running - in the 'version url format'.
This is used for version urls only! So depending how you setup version urls the format can change. E.g. v1.0
"""
AIRFLOW_HOST = "AIRFLOW_HOST"
"""The complete url to the Apache Airflow webserver as a string.
If you are running the provided docker setup use: http://airflow-webserver:8080.
"""
AIRFLOW_OUTPUT = "AIRFLOW_OUTPUT"
"""The path on the Airflow worker where data output is written to.
This path does not need to exist in the jobs service! It is only needed to write a correct dag for a job as the
absolute paths of the output directories - on the airflow worker (!) - are also written in the dag.
If you are running inside docker this path has to be inside the corresponding airflow worker container. E.g.
/data_out
"""
AIRFLOW_DAGS = "AIRFLOW_DAGS"
"""The path a folder where all dag files will be stored.
If you are running in docker the path needs to be inside the container. E.g.: /usr/src/dags
"""
SYNC_DEL_DELAY = "SYNC_DEL_DELAY"
"""Delay after which to delete sync-jobs output.
It must be minimum above the timeout of gunicorn and nginx. E.g. 300
"""
SYNC_RESULTS_FOLDER = "SYNC_RESULTS_FOLDER"
"""The path to the sync-results folder.
The content of this folder is also mounted to the gateway to simplify data transfer.
If you are running in docker the path needs to be inside the container. E.g.: /usr/src/sync-results
"""
WEKEO_STORAGE = "WEKEO_STORAGE"
"""The path where files downloaded via the WEkEO HDA API will be available
on the VM, where the processing engine (e.g. Airflow) executes jobs.
e.g. /usr/local/airflow/wekeo_storage
"""
# Connection to RabbitMQ
RABBIT_HOST = "RABBIT_HOST"
"""The host name of the RabbitMQ - e.g. `rabbitmq`.
If you are running in docker this is the hostname of the container!
"""
RABBIT_PORT = "RABBIT_PORT"
"""The port on which the RabbitMQ is running - e.g. `5672`.
If you are running in docker and the capabilities container is in the same network as the RabbitMQ this is the port
inside the docker network NOT the exposed one!
"""
RABBIT_USER = "RABBIT_USER"
"""The username to authenticate on the RabbitMQ - e.g. `rabbitmq`."""
RABBIT_PASSWORD = "RABBIT_PASSWORD" # noqa S105
"""The password to authenticate with the given user on the RabbitMQ."""
# Jobs Database
DB_USER = "DB_USER"
"""Database username for the jobs database."""
DB_PASSWORD = "DB_PASSWORD" # noqa S105 - not a hardcoded password only the parameter name!
"""Database user password for the jobs database matching the provided user name."""
DB_HOST = "DB_HOST"
"""Host where the jobs database is running."""
DB_PORT = "DB_PORT"
"""Port where the jobs database is running."""
DB_NAME = "DB_NAME"
"""Database name of the jobs database."""
# Additional
LOG_DIR = "LOG_DIR"
"""The path to the directory where log files should be saved.
If you are running in docker this is the path inside the docker container! E.g. `/usr/src/logs`
In case you want to persist the logs a volume or a local folder needs to be mounted into the specified location.
"""
class SettingValidationUtils:
"""Provides a set of utility functions to validated settings."""
def check_create_folder(self, folder_path: str) -> bool:
"""Create the given folder path if it does not exist, always returns True."""
if not isdir(folder_path):
makedirs(folder_path)
return True
def check_positive_int(self, value: int) -> bool:
"""Return a boolean whether a given value is a positive integer."""
return isinstance(value, int) and value > 0
def check_parse_url(self, url: str) -> bool:
"""Return a boolean whether the url could be parsed.
This is useful if a setting holding a url may not be reachable at the time of setting validation. Then this
method at least validates that a valid url is provided. E.g. the gateway will most probably be not reachable
when bringing up microservices.
"""
result = urlparse(url)
return all([result.scheme, result.netloc])
def check_url_is_reachable(self, url: str) -> bool:
"""Return a boolean whether a connection to a given url could be created."""
try:
if url.lower().startswith('http'):
req = Request(url)
with urlopen(req) as resp: # noqa
return resp.status == 200
else:
return False
except URLError:
return False
def initialise_settings() -> None:
"""Configure and validates settings.
This method is called when starting the microservice to ensure all configuration settings are properly provided.
Raises:
:class:`~dynaconf.validator.ValidationError`: A setting is not valid.
"""
not_doc = Validator("ENV_FOR_DYNACONF", is_not_in=["documentation"])
not_doc_unittest = Validator("ENV_FOR_DYNACONF", is_not_in=["documentation", "unittest"])
settings.configure(ENVVAR_PREFIX_FOR_DYNACONF="OEO")
utils = SettingValidationUtils()
settings.validators.register(
Validator(SettingKeys.OPENEO_VERSION.value, must_exist=True, when=not_doc),
Validator(SettingKeys.AIRFLOW_HOST.value, must_exist=True, condition=utils.check_parse_url,
when=(not_doc_unittest & not_doc)),
Validator(SettingKeys.AIRFLOW_OUTPUT.value, must_exist=True, when=not_doc),
Validator(SettingKeys.AIRFLOW_DAGS.value, must_exist=True, condition=utils.check_create_folder, when=not_doc),
Validator(SettingKeys.SYNC_DEL_DELAY.value, must_exist=True, is_type_of=int, condition=utils.check_positive_int,
when=not_doc),
Validator(SettingKeys.SYNC_RESULTS_FOLDER.value, must_exist=True, condition=utils.check_create_folder,
when=not_doc),
Validator(SettingKeys.WEKEO_STORAGE.value, default="", when=not_doc_unittest),
Validator(SettingKeys.RABBIT_HOST.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.RABBIT_PORT.value, must_exist=True, is_type_of=int, when=not_doc_unittest),
Validator(SettingKeys.RABBIT_USER.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.RABBIT_PASSWORD.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_USER.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_PASSWORD.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_HOST.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.DB_PORT.value, must_exist=True, is_type_of=int, when=not_doc_unittest),
Validator(SettingKeys.DB_NAME.value, must_exist=True, when=not_doc_unittest),
Validator(SettingKeys.LOG_DIR.value, must_exist=True, condition=utils.check_create_folder,
when=not_doc_unittest),
)
settings.validators.validate()
LOGGER.info("Settings validated")
|
nilq/baby-python
|
python
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for experimental sonnet functions and classes.
This file contains functions and classes that are being tested until they're
either removed or promoted into the wider sonnet library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import weakref
import tensorflow as tf
from tensorflow.python.ops import variable_scope as variable_scope_ops
def reuse_vars(method):
"""Wraps an arbitrary method so it does variable sharing.
This decorator creates variables the first time it calls `method`, and reuses
them for subsequent calls. The object that calls `method` provides a
`tf.VariableScope`, either as a `variable_scope` attribute or as the return
value of an `_enter_variable_scope()` method.
The first time the wrapped method is invoked, it enters the caller's
`tf.VariableScope` with `reuse=False`. On all subsequent calls it enters the
same variable scope with `reuse=True`.
Variables are created in the context of the `tf.VariableScope` provided by the
caller object. Ops are created with an additional `tf.name_scope()`, which
adds a scope for the wrapped method name. For example:
```python
class MyModule(object):
def __init__(self, name):
with tf.variable_scope(name) as variable_scope:
self.variable_scope = variable_scope
@snt.experimental.reuse_vars
def add_x(self, tensor):
x = tf.get_variable("x", shape=tensor.get_shape())
return tensor + x
module = MyModule("my_module_name")
input_tensor = tf.zeros(shape=(5,))
# This creates the variable "my_module_name/x:0"
# and op "my_module_name/add_x/add:0"
output = module.add_x(input_tensor)
```
Args:
method: The method to wrap.
Returns:
The wrapped method.
"""
initialized_variable_scopes = weakref.WeakKeyDictionary()
# Ensure that the argument passed in is really a method by checking that the
# first positional argument to it is "self".
arg_spec = inspect.getargspec(method)
is_method = arg_spec.args and arg_spec.args[0] == "self"
if not is_method:
raise TypeError("reuse_vars can only be used with methods.")
@functools.wraps(method)
def wrapper(*args, **kwargs):
"""Calls `method` with a variable scope whose reuse flag is set correctly.
The first time the wrapper is called it creates a
`(tf.Graph, tf.VariableScope)` key and checks it for membership in
`initialized_variable_scopes`. The check is `False` if and only if this is
the first time the wrapper has been called with the key, otherwise the
check is `True`. The result of this check is used as the `reuse` flag for
entering the provided variable scope before calling `method`.
Here are two examples of how to use the reuse_vars decorator.
1. Decorate an arbitrary instance method with a `variable_scope` attribute:
```python
class Reusable(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@snt.experimental.reuse_vars
def add_a(self, input_tensor):
a = tf.get_variable("a", shape=input_tensor.get_shape())
return a + input_tensor
obj = Reusable("reusable")
x = tf.constant(5.0)
out1 = obj.add_a(x)
out2 = obj.add_a(x)
# out1 == out2
```
2. Decorating a snt.AbstractModule instance method:
```python
class ReusableModule(snt.AbstractModule):
@snt.experimental.reuse_vars
def add_a(self, input_tensor):
a = tf.get_variable("a", shape=input_tensor.get_shape())
return a + input_tensor
# We don't need @snt.experimental.reuse_vars here because build is
wrapped by # `tf.make_template` inside `snt.AbstractModule`.
def _build(self, input_tensor):
b = tf.get_variable("b", shape=input_tensor.get_shape())
return b + self.add_a(input_tensor)
obj = Reusable("reusable")
x = tf.constant(5.0)
out1 = obj(x)
out2 = obj(x)
# out1 == out2
```
Args:
*args: The positional arguments (Tensors) passed to the wrapped method.
**kwargs: The keyword arguments passed to the wrapped method.
Returns:
Output of the wrapped method.
Raises:
ValueError: If no variable scope is provided or if `method` is a method
and a variable_scope keyword argument is also provided.
"""
obj = args[0]
def default_context_manager(reuse=None):
variable_scope = obj.variable_scope
return tf.variable_scope(variable_scope, reuse=reuse)
variable_scope_context_manager = getattr(obj, "_enter_variable_scope",
default_context_manager)
graph = tf.get_default_graph()
if graph not in initialized_variable_scopes:
initialized_variable_scopes[graph] = set([])
initialized_variable_scopes_for_graph = initialized_variable_scopes[graph]
# Temporarily enter the variable scope to capture it
with variable_scope_context_manager() as tmp_variable_scope:
variable_scope = tmp_variable_scope
reuse = variable_scope.name in initialized_variable_scopes_for_graph
# Enter the pure variable scope with reuse correctly set
with variable_scope_ops._pure_variable_scope( # pylint:disable=protected-access
variable_scope, reuse=reuse) as pure_variable_scope:
# Force tf.name_scope to treat variable_scope.original_name_scope as
# an "absolute" scope name so we can re-enter it.
name_scope = variable_scope.original_name_scope
if name_scope[-1] != "/":
name_scope += "/"
with tf.name_scope(name_scope):
with tf.name_scope(method.__name__):
out_ops = method(*args, **kwargs)
initialized_variable_scopes_for_graph.add(pure_variable_scope.name)
return out_ops
return wrapper
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
(C) 2007-2019 1024jp
"""
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
_flags = (cv2.CALIB_ZERO_TANGENT_DIST |
cv2.CALIB_FIX_K3
)
class Undistorter:
def __init__(self, camera_matrix, dist_coeffs, rvecs, tvecs, image_size,
new_camera_matrix=None):
self.camera_matrix = camera_matrix
self.dist_coeffs = dist_coeffs
self.rvecs = rvecs
self.tvecs = tvecs
self.image_size = image_size
if new_camera_matrix:
self.new_camera_matrix = new_camera_matrix
else:
self.__get_new_camera_matrix()
@classmethod
def init(cls, image_points, dest_points, image_size):
dest_points = [(x, y, 0) for x, y, z in dest_points]
_, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(
[np.float32([dest_points])],
[np.float32([image_points])],
image_size, None, None, flags=_flags)
return cls(camera_matrix, dist_coeffs, rvecs, tvecs, image_size)
@classmethod
def load(cls, f):
return pickle.load(f)
def save(self, f):
pickle.dump(self, f)
def calibrate_points(self, points):
dest = cv2.undistortPoints(np.array([points]), self.camera_matrix,
self.dist_coeffs,
P=self.new_camera_matrix)
return np.squeeze(dest)
def undistort_image(self, image):
return cv2.undistort(image, self.camera_matrix, self.dist_coeffs,
newCameraMatrix=self.new_camera_matrix)
def show_map(self):
interval = 200
size = self.image_size
w, h = np.meshgrid(range(0, size[0], interval),
range(0, size[1], interval))
points = np.vstack((w.flatten(), h.flatten())).T.astype('float32')
new_points = self.calibrate_points(points)
plt.scatter(points[:, 0], points[:, 1], 20, 'b', alpha=.5)
plt.scatter(new_points[:, 0], new_points[:, 1], 20, 'r', alpha=.5)
plt.axes().set_aspect('equal', 'datalim')
plt.show()
def __get_new_camera_matrix(self):
self.new_camera_matrix = cv2.getOptimalNewCameraMatrix(
self.camera_matrix, self.dist_coeffs, self.image_size, 0)[0]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
names = ["Amy", "Bob", "Cathy", "David", "Eric"]
for x in names:
print("Hello " + x)
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from django.contrib.auth.models import AnonymousUser
from mock import Mock
from saleor.cart import decorators
from saleor.cart.models import Cart
from saleor.checkout.core import Checkout
from saleor.discount.models import Voucher
from saleor.product.models import Product, ProductVariant, Stock
from saleor.shipping.models import ShippingMethod
from saleor.userprofile.models import Address, User
@pytest.fixture
def cart(db): # pylint: disable=W0613
return Cart.objects.create()
@pytest.fixture
def customer_user(db): # pylint: disable=W0613
return User.objects.create_user('test@example.com', 'password')
@pytest.fixture
def request_cart(cart, monkeypatch):
monkeypatch.setattr(
decorators, 'get_cart_from_request',
lambda request, create=False: cart)
return cart
@pytest.fixture
def request_cart_with_item(product_in_stock, request_cart):
variant = product_in_stock.variants.get()
# Prepare some data
request_cart.add(variant)
return request_cart
@pytest.fixture()
def admin_user(db): # pylint: disable=W0613
"""A Django admin user.
"""
return User.objects.create_superuser('admin@example.com', 'password')
@pytest.fixture()
def admin_client(admin_user):
"""A Django test client logged in as an admin user."""
from django.test.client import Client
client = Client()
client.login(username=admin_user.email, password='password')
return client
@pytest.fixture()
def authorized_client(client, customer_user):
client.login(username=customer_user.email, password='password')
return client
@pytest.fixture
def billing_address(db): # pylint: disable=W0613
return Address.objects.create(
first_name='John', last_name='Doe',
company_name='Mirumee Software',
street_address_1='Tęczowa 7',
city='Wrocław',
postal_code='53-601',
country='PL')
@pytest.fixture
def shipping_method(db): # pylint: disable=W0613
shipping_method = ShippingMethod.objects.create(name='DHL')
shipping_method.price_per_country.create(price=10)
return shipping_method
@pytest.fixture
def product_in_stock(db): # pylint: disable=W0613
product = Product.objects.create(
name='Test product', price=10, weight=1)
variant = ProductVariant.objects.create(product=product, sku='123')
Stock.objects.create(
variant=variant, cost_price=1, quantity=5, quantity_allocated=5,
location='Warehouse 1')
Stock.objects.create(
variant=variant, cost_price=100, quantity=5, quantity_allocated=5,
location='Warehouse 2')
Stock.objects.create(
variant=variant, cost_price=10, quantity=5, quantity_allocated=0,
location='Warehouse 3')
return product
@pytest.fixture
def anonymous_checkout():
return Checkout(Mock(), AnonymousUser(), 'tracking_code')
@pytest.fixture
def voucher(db): # pylint: disable=W0613
return Voucher.objects.create(code='mirumee', discount_value=20)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import path,getenv
from glob import glob
import argparse
parser = argparse.ArgumentParser(description='make forest')
parser.add_argument('--region',metavar='region',type=str,default=None)
toProcess = parser.parse_args().region
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Tools.Load import *
import PandaCore.Tools.Functions # kinematics
import PandaAnalysis.Monotop.CombinedSelection as sel
Load('PandaAnalysisFlat','LimitTreeBuilder')
baseDir = getenv('PANDA_FLATDIR')
lumi = 36560
factory = root.LimitTreeBuilder()
if toProcess:
factory.SetOutFile(baseDir+'/limits/limitForest_%s.root'%toProcess)
else:
factory.SetOutFile(baseDir+'/limits/limitForest_all.root')
def dataCut(basecut,trigger):
return tAND(trigger,basecut)
treelist = []
def getTree(fpath):
global treelist
fIn = root.TFile(baseDir+fpath+'.root')
tIn = fIn.Get('events')
treelist.append(tIn)
return tIn,fIn
def enable(regionName):
if toProcess:
return (toProcess==regionName)
else:
return True
def shiftBtags(label,tree,varmap,cut,baseweight):
ps = []
for shift in ['BUp','BDown','MUp','MDown']:
for cent in ['sf_btag','sf_sjbtag']:
shiftedlabel = '_'
if 'sj' in cent:
shiftedlabel += 'sj'
if 'B' in shift:
shiftedlabel += 'btag'
else:
shiftedlabel += 'mistag'
if 'Up' in shift:
shiftedlabel += 'Up'
else:
shiftedlabel += 'Down'
weight = sel.weights[baseweight+'_'+cent+shift]%lumi
shiftedProcess = root.Process(label,tree,varmap,cut,weight)
shiftedProcess.syst = shiftedlabel
ps.append(shiftedProcess)
return ps
# input
tZll,fZll = getTree('ZJets')
tZvv,fZvv = getTree('ZtoNuNu')
tWlv,fWlv = getTree('WJets')
tPho,fPho = getTree('GJets')
tTTbar,fTT = getTree('TTbar')
tVV,fVV = getTree('Diboson')
tQCD,fQCD = getTree('QCD')
tST,fST = getTree('SingleTop')
tMET,fMET = getTree('MET')
tSingleEle,fSEle = getTree('SingleElectron')
tSinglePho,fSPho = getTree('SinglePhoton')
#tSig,fSig = getTree('monotop-nr-v3-1700-100_med-1700_dm-100') # this is just a sample point
tAllSig = {}; fAllSig = {}
if enable('signal'):
signalFiles = glob(baseDir+'/Vector*root')
for f in signalFiles:
fname = f.split('/')[-1].replace('.root','')
signame = fname
replacements = {
'Vector_MonoTop_NLO_Mphi-':'',
'_gSM-0p25_gDM-1p0_13TeV-madgraph':'',
'_Mchi-':'_',
}
for k,v in replacements.iteritems():
signame = signame.replace(k,v)
tAllSig[signame],fAllSig[signame] = getTree(fname)
factory.cd()
regions = {}
processes = {}
vms = {}
for region_type,met_type,phi_type in [('signal','pfmet','pfmetphi'),
('w','pfUWmag','pfUWphi'),
('z','pfUZmag','pfUZphi'),
('a','pfUAmag','pfUAphi')]:
vms[region_type] = root.VariableMap()
vms[region_type].AddVar('met',met_type)
# vms[region_type].AddVar('metphi',phi_type)
vms[region_type].AddVar('genBosonPt','genBosonPt')
# vms[region_type].AddVar('genBosonPhi','genBosonPhi')
for x in ['fj1Tau32','top_ecf_bdt']:
vms[region_type].AddVar(x,x)
# test region
if enable('test'):
regions['test'] = root.Region('test')
cut = sel.cuts['signal']
weight = sel.weights['signal']%lumi
processes['test'] = [
root.Process('Data',tMET,vms['signal'],dataCut(cut,sel.triggers['met']),'1'),
root.Process('Diboson',tVV,vms['signal'],cut,weight),
]
btag_shifts = []
for p in processes['test']:
if p.name=='Data':
continue
btag_shifts += shiftCSV(p.name,p.GetInput(),vms['signal'],cut,'signal')
processes['test'] += btag_shifts
for p in processes['test']:
regions['test'].AddProcess(p)
factory.AddRegion(regions['test'])
# signal region
if enable('signal'):
regions['signal'] = root.Region('signal')
cut = sel.cuts['signal']
weight = sel.weights['signal']%lumi
vm = vms['signal']
processes['signal'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
# root.Process('signal',tSig,vm,cut,weight),
]
for signame,tsig in tAllSig.iteritems():
processes['signal'].append( root.Process(signame,tsig,vm,cut,weight) )
btag_shifts = []
for p in processes['signal']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'signal')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'signal')
processes['signal'] += btag_shifts
for p in processes['signal']:
regions['signal'].AddProcess(p)
factory.AddRegion(regions['signal'])
#singlemuonw
if enable('singlemuonw'):
regions['singlemuonw'] = root.Region('singlemuonw')
cut = sel.cuts['singlemuonw']
weight = sel.weights['singlemuonw']%lumi
vm = vms['w']
processes['singlemuonw'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singlemuonw']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singlemuonw')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singlemuonw')
processes['singlemuonw'] += btag_shifts
for p in processes['singlemuonw']:
regions['singlemuonw'].AddProcess(p)
factory.AddRegion(regions['singlemuonw'])
#singleelectronw
if enable('singleelectronw'):
regions['singleelectronw'] = root.Region('singleelectronw')
cut = sel.cuts['singleelectronw']
weight = sel.weights['singleelectronw']%lumi
vm = vms['w']
processes['singleelectronw'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singleelectronw']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singleelectronw')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singleelectronw')
processes['singleelectronw'] += btag_shifts
for p in processes['singleelectronw']:
regions['singleelectronw'].AddProcess(p)
factory.AddRegion(regions['singleelectronw'])
#singlemuontop
if enable('singlemuontop'):
regions['singlemuontop'] = root.Region('singlemuontop')
cut = sel.cuts['singlemuontop']
weight = sel.weights['singlemuontop']%lumi
vm = vms['w']
processes['singlemuontop'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singlemuontop']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singlemuontop')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singlemuontop')
processes['singlemuontop'] += btag_shifts
for p in processes['singlemuontop']:
regions['singlemuontop'].AddProcess(p)
factory.AddRegion(regions['singlemuontop'])
#singleelectrontop
if enable('singleelectrontop'):
regions['singleelectrontop'] = root.Region('singleelectrontop')
cut = sel.cuts['singleelectrontop']
weight = sel.weights['singleelectrontop']%lumi
vm = vms['w']
processes['singleelectrontop'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
btag_shifts = []
for p in processes['singleelectrontop']:
if p.name=='Data':
continue
btag_shifts += shiftBtags(p.name,p.GetInput(),vm,cut,'singleelectrontop')
#btag_shifts += shiftCSV(p.name,p.GetInput(),vm,cut,'singleelectrontop')
processes['singleelectrontop'] += btag_shifts
for p in processes['singleelectrontop']:
regions['singleelectrontop'].AddProcess(p)
factory.AddRegion(regions['singleelectrontop'])
#dimuon
if enable('dimuon'):
regions['dimuon'] = root.Region('dimuon')
cut = sel.cuts['dimuon']
weight = sel.weights['dimuon']%lumi
vm = vms['z']
processes['dimuon'] = [
root.Process('Data',tMET,vm,dataCut(cut,sel.triggers['met']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['dimuon']:
regions['dimuon'].AddProcess(p)
factory.AddRegion(regions['dimuon'])
#dielectron
if enable('dielectron'):
regions['dielectron'] = root.Region('dielectron')
cut = sel.cuts['dielectron']
weight = sel.weights['dielectron']%lumi
vm = vms['z']
processes['dielectron'] = [
root.Process('Data',tSingleEle,vm,dataCut(cut,sel.triggers['ele']),'1'),
root.Process('Zvv',tZvv,vm,cut,weight),
root.Process('Zll',tZll,vm,cut,weight),
root.Process('Wlv',tWlv,vm,cut,weight),
root.Process('ttbar',tTTbar,vm,cut,weight),
root.Process('ST',tST,vm,cut,weight),
root.Process('Diboson',tVV,vm,cut,weight),
root.Process('QCD',tQCD,vm,cut,weight),
]
for p in processes['dielectron']:
regions['dielectron'].AddProcess(p)
factory.AddRegion(regions['dielectron'])
#photon
if enable('photon'):
regions['photon'] = root.Region('photon')
cut = sel.cuts['photon']
weight = sel.weights['photon']%lumi
vm = vms['a']
processes['photon'] = [
root.Process('Data',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'1'),
root.Process('Pho',tPho,vm,cut,weight),
root.Process('QCD',tSinglePho,vm,dataCut(cut,sel.triggers['pho']),'sf_phoPurity'),
]
for p in processes['photon']:
regions['photon'].AddProcess(p)
factory.AddRegion(regions['photon'])
PInfo('makeLimitForest','Starting '+str(toProcess))
factory.Run()
PInfo('makeLimitForest','Finishing '+str(toProcess))
factory.Output()
PInfo('makeLimitForest','Outputted '+str(toProcess))
|
nilq/baby-python
|
python
|
from tensorflow.keras import initializers
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
def build_subnet(output_filters, bias_initializer='zeros', name=None):
"""构建功能子网络.
Args:
output_filters: int,
功能子网络输出层的神经元数量.
bias_initializer: str or tf.keras.initializers.Initializer instance, default='zeros',
网络层偏置项初始化器.
name: (可选) str, default=None, 功能子网络名称.
Return:
tf.keras.models.Sequential, 功能子网络实例.
"""
model = Sequential(name=name)
model.add(layers.InputLayer(input_shape=[None, None, 256])) # 输入特征是每层FPN.
_kernel_initializer = initializers.RandomNormal(mean=0.0, stddev=0.01) # 高斯分布初始化.
for _ in range(4):
model.add(layers.Conv2D(filters=256,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
kernel_initializer=_kernel_initializer))
model.add(layers.ReLU())
model.add(layers.Conv2D(filters=output_filters,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
kernel_initializer=_kernel_initializer,
bias_initializer=bias_initializer))
return model
|
nilq/baby-python
|
python
|
from .seld import *
|
nilq/baby-python
|
python
|
import numpy as np
class UnionFind:
def __init__(self, n):
self.n = n
self.parent = np.arange(n)
self.rank = np.zeros(n, dtype=np.int32)
self.csize = np.ones(n, dtype=np.int32)
def find(self, u):
v = u
while u != self.parent[u]:
u = self.parent[u]
while v != self.parent[v]:
t = self.parent[v]
self.parent[v] = u
v = t
return u
def union(self, u, v):
u = self.find(u)
v = self.find(v)
if u != v:
if self.rank[u] < self.rank[v]:
self.parent[u] = v
self.csize[v] += self.csize[u]
else:
self.parent[v] = u
self.csize[u] += self.csize[v]
if self.rank[u] == self.rank[v]:
self.rank[u] += 1
def is_same_set(self, u, v):
return self.find(u) == self.find(v)
|
nilq/baby-python
|
python
|
import logging.config
from .Camera import Camera
import time
from io import BytesIO
from PIL import Image
from dateutil import zoneinfo
timezone = zoneinfo.get_zonefile_instance().get("Australia/Canberra")
try:
logging.config.fileConfig("/etc/eyepi/logging.ini")
except:
pass
try:
import picamera
import picamera.array
except Exception as e:
logging.error("Couldnt import picamera module, no picamera camera support: {}".format(str(e)))
pass
class PiCamera(Camera):
"""
Picamera extension to the Camera abstract class.
"""
@classmethod
def stream_thread(cls):
"""
Streaming thread member.
uses :func:`picamera.PiCamera.capture_continuous` to stream data from the rpi camera video port.
:func:`time.sleep` added to rate limit a little bit.
"""
import picamera
print("start thread")
try:
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
# camera.hflip = True
# camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls._frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
time.sleep(0.01)
if time.time() - cls._last_access > 1:
break
except Exception as e:
print("Couldnt acquire camera")
print("Closing Thread")
cls._thread = None
def set_camera_settings(self, camera):
"""
Sets the camera resolution to the max resolution
if the config provides camera/height or camera/width attempts to set the resolution to that.
if the config provides camera/isoattempts to set the iso to that.
if the config provides camera/shutter_speed to set the shutterspeed to that.
:param picamera.PiCamera camera: picamera camera instance to modify
"""
try:
camera.resolution = camera.MAX_RESOLUTION
if type(self.config) is dict:
if hasattr(self, "width") and hasattr(self, "height"):
camera.resolution = (int(self.width),
int(self.height))
if "width" in self.config and "height" in self.config:
camera.resolution = (int(self.config['width']),
int(self.config['height']))
camera.shutter_speed = getattr(self, "shutter_speed", camera.shutter_speed)
camera.iso = getattr(self, "iso", camera.iso)
else:
if self.config.has_option("camera", "width") and self.config.has_option("camera", "height"):
camera.resolution = (self.config.getint("camera", "width"),
self.config.getint("camera", "height"))
if self.config.has_option("camera", "shutter_speed"):
camera.shutter_speed = self.config.getfloat("camera", "shutter_speed")
if self.config.has_option("camera", "iso"):
camera.iso = self.config.getint("camera", "iso")
except Exception as e:
self.logger.error("error setting picamera settings: {}".format(str(e)))
def capture_image(self, filename: str = None):
"""
Captures image using the Raspberry Pi Camera Module, at either max resolution, or resolution
specified in the config file.
Writes images disk using :func:`encode_write_image`, so it should write out to all supported image formats
automatically.
:param filename: image filename without extension
:return: :func:`numpy.array` if filename not specified, otherwise list of files.
:rtype: numpy.array
"""
st = time.time()
try:
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as output:
time.sleep(2) # Camera warm-up time
self.set_camera_settings(camera)
time.sleep(0.2)
# self._image = numpy.empty((camera.resolution[1], camera.resolution[0], 3), dtype=numpy.uint8)
camera.capture(output, 'rgb')
# self._image = output.array
self._image = Image.fromarray(output.array)
# self._image = cv2.cvtColor(self._image, cv2.COLOR_BGR2RGB)
if filename:
filenames = self.encode_write_image(self._image, filename)
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
return filenames
else:
self.logger.debug("Took {0:.2f}s to capture".format(time.time() - st))
except Exception as e:
self.logger.critical("EPIC FAIL, trying other method. {}".format(str(e)))
return None
return None
|
nilq/baby-python
|
python
|
# 建立一个登录页面
from tkinter import *
root = Tk()
root.title("登陆页面")
msg = "欢迎进入海绵宝宝系统"
sseGif = PhotoImage(file="../img/hmbb1.gif")
logo = Label(root,image=sseGif,text=msg, compound=BOTTOM)
logo.grid(row=0,column=0,columnspan=2,padx=10,pady=10)
accountL = Label(root,text="Account")
accountL.grid(row=1)
pwdL = Label(root,text="Password")
pwdL.grid(row=2)
accountE = Entry(root)
accountE.grid(row=1,column=1)
accountE.insert(0,"海绵宝宝") #在0位置插入默认文本框里的文字
pwdE = Entry(root,show="*")
pwdE.grid(row=2,column=1,pady=10)
pwdE.insert(0,"hmbb") #在0位置插入默认文本框里的文字
# LOGIN QUIT
def printInfo():
print("Account: %s\nPassword: %s" %(accountE.get(), pwdE.get()))
accountE.delete(0,END) #删除文本框内从0到最后的内容
pwdE.delete(0,END) #删除文本框内从0到最后的内容
loginbtn = Button(root,text="Login",command=printInfo)
loginbtn.grid(row=3,column=0,sticky=W,padx=10,pady=10)
quitbtn = Button(root,text="Quit",command=root.quit)
quitbtn.grid(row=3,column=1,sticky=W,padx=10,pady=10)
root.mainloop()
#86
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name='YinPortfolioManagement',
author='Yiqiao Yin',
version='1.0.0',
description="This package uses Long Short-Term Memory (LSTM) to forecast a stock price that user enters.",
packages=['YinCapital_forecast']
)
|
nilq/baby-python
|
python
|
import analysis
#Create historgram
analysis.histogram()
#Create scatterplot
analysis.scatterplot("sepal_length","sepal_width")
analysis.scatterplot("petal_length","petal_width")
analysis.pair_plot()
#Create summary.txt
analysis.writeToAFile
|
nilq/baby-python
|
python
|
import os
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
@sched.scheduled_job('cron', hour=9)
def scheduled_job():
os.system("python manage.py runbot")
sched.start()
|
nilq/baby-python
|
python
|
from django.db import models
from i18nfield.fields import I18nCharField
from pretalx.common.mixins import LogMixin
from pretalx.common.urls import EventUrls
class Track(LogMixin, models.Model):
event = models.ForeignKey(
to='event.Event', on_delete=models.PROTECT, related_name='tracks'
)
name = I18nCharField(max_length=200)
color = models.CharField(max_length=7)
class urls(EventUrls):
base = edit = '{self.event.cfp.urls.tracks}{self.pk}'
delete = '{base}delete'
def __str__(self) -> str:
return str(self.name)
|
nilq/baby-python
|
python
|
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import Column, Integer, String, Boolean, BigInteger, Text, ForeignKey, Float
from sqlalchemy.orm import relationship
#from sqlalchemy_views import CreateView, DropView
# connection.execute("CREATE TABLE `jobs` (`clock` BIGINT(11),`jobid` VARCHAR(50),`type` TEXT,`username` TEXT,`status` INT,`data` TEXT,`error` TEXT,`reqid` TEXT, PRIMARY KEY(jobid))")
# connection.execute("CREATE TABLE `data_raw` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute("CREATE TABLE `data_finalized` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute("CREATE TABLE `data_processed` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute("CREATE TABLE `data_other` (`clock` BIGINT(11), PRIMARY KEY(clock))")
# connection.execute(text("INSERT INTO `daqbroker_settings`.`databases` VALUES(:dbname,'0')"),dbname=newdbname)
daqbroker_database = declarative_base()
#connection.execute("CREATE TABLE `instruments` ( `Name` text NOT NULL, `instid` int(11) NOT NULL, `active` int(11) NOT NULL, `description` text NOT NULL, `username` text NOT NULL, `email` text NOT NULL, `insttype` int(11) NOT NULL,`log` text, PRIMARY KEY (`instid`))")
class instruments(daqbroker_database):
__tablename__ = "instruments"
Name = Column(String(50))
instid = Column(Integer, primary_key=True)
active = Column(Boolean)
description = Column(Text)
username = Column(Text)
email = Column(Text)
insttype = Column(Integer)
log = Column(Text)
sources = relationship("instmeta", backref="meta", cascade="all, delete, delete-orphan", order_by="instmeta.metaid")
#connection.execute("CREATE TABLE `instmeta` (`clock` bigint(11) NOT NULL DEFAULT '0', `name` varchar(50) NOT NULL, `metaid` int(11) NOT NULL DEFAULT '0', `instid` int(11) NOT NULL DEFAULT '0', `type` int(11) DEFAULT '0', `node` varchar(50) NOT NULL, `remarks` text, `sentRequest` tinyint(1) DEFAULT '0', `lastAction` bigint(11) NOT NULL DEFAULT '0', `lasterrortime` bigint(11) NOT NULL DEFAULT '0', `lasterror` text, `lockSync` tinyint(1) DEFAULT '0', PRIMARY KEY (`instid`,`metaid`,`name`))")
class instmeta(daqbroker_database):
__tablename__ = "instmeta"
clock = Column(BigInteger)
name = Column(String(50))
metaid = Column(Integer, primary_key=True)
instrument_id = Column(Integer, ForeignKey('instruments.instid'))
type = Column(Integer)
node = Column(String(50))
remarks = Column(Text)
sentRequest = Column(Boolean)
lastAction = Column(BigInteger)
lasterrortime = Column(BigInteger)
lasterror = Column(Text)
lockSync = Column(Boolean)
channels = relationship("channels", backref="chann", cascade="all, delete, delete-orphan", order_by="channels.channelid")
parsing = relationship("parsing", backref="metaParse", cascade="all, delete, delete-orphan")
#connection.execute("CREATE TABLE `channels` (`Name` text NOT NULL, `channelid` int(11) NOT NULL, `channeltype` int(11) NOT NULL, `valuetype` int(11) NOT NULL DEFAULT '0', `units` text NOT NULL, `instid` int(11) NOT NULL, `description` text NOT NULL, `active` int(11) NOT NULL, `remarks` text NOT NULL, `metaid` INT, `lastclock` BIGINT(11) NOT NULL DEFAULT 0, `lastValue` text, `fileorder` int(11) DEFAULT 0,`alias` text NOT NULL,`firstClock` BIGINT(11) DEFAULT 10000000000000, PRIMARY KEY (channelid,metaid,instid))")
class channels(daqbroker_database):
__tablename__ = "channels"
Name = Column(Text)
channelid = Column(Integer, primary_key=True)
channeltype = Column(Integer)
valuetype = Column(Integer)
units = Column(Text)
description = Column(Text)
active = Column(Boolean)
remarks = Column(Text)
metaid = Column(Integer, ForeignKey('instmeta.metaid'))
lastclock = Column(BigInteger)
lastValue = Column(Text)
firstClock = Column(BigInteger)
fileorder = Column(Text)
alias = Column(Text)
#chann = relationship("instmeta", back_populates="channels")
#connection.execute("CREATE TABLE `parsing` (clock BIGINT(11),lastAction BIGINT(11), `metaid` INT(11) , `instid` INT(11), `type` INT(11), `locked` INT(11), `forcelock` BOOLEAN DEFAULT '0', `remarks` MEDIUMTEXT, PRIMARY KEY (metaid,instid))")
class parsing(daqbroker_database):
__tablename__ = "parsing"
clock = Column(BigInteger)
metaid = Column(Integer, ForeignKey('instmeta.metaid'), primary_key=True)
type = Column(Integer)
locked = Column(Boolean)
forcelock = Column(Boolean)
remarks = Column(Text)
#metaParse = relationship("instmeta", back_populates="parsing")
#connection.execute("CREATE TABLE `plots` ( `plotname` varchar(200) NOT NULL, `plotid` int(11) NOT NULL, `channelids` text NOT NULL, `plottype` int(11) NOT NULL, `adminPlot` int(11) NOT NULL, `active` int(11) NOT NULL, `remarks` text NOT NULL, PRIMARY KEY (`plotname`,`plotid`))")
class plots(daqbroker_database):
__tablename__ = "plots"
plotname = Column(String(200))
plotid = Column(Integer, primary_key=True)
channelids = Column(Text)
plottype = Column(Integer)
adminPlot = Column(Boolean)
active = Column(Boolean)
remarks = Column(Text)
#connection.execute("CREATE TABLE `plotcomments` (`clock` BIGINT(11),`plotid` INT,`channelid` INT,`comment` TEXT,`author` TEXT,`remarks` TEXT, PRIMARY KEY(clock,plotid,channelid))")
class plotcomments(daqbroker_database):
__tablename__ = "plotcomments"
clock = Column(BigInteger, primary_key=True)
plotid = Column(Integer, primary_key=True)
channelid = Column(Integer, primary_key=True)
comment = Column(Text)
author = Column(Text)
remarks = Column(Text)
#connection.execute("CREATE TABLE `layouts` (`Name` varchar(50) NOT NULL,`layoutid` int(11) NOT NULL,`plots` text NOT NULL,`format` text NOT NULL,PRIMARY KEY (`layoutid`) USING BTREE,UNIQUE KEY `Name` (`Name`))")
class layouts(daqbroker_database):
__tablename__ = "layouts"
Name = Column(String(200))
layoutid = Column(Integer, primary_key=True)
plots = Column(Text)
format = Column(Text)
plottype = Column(Integer)
adminPlot = Column(Boolean)
active = Column(Boolean)
remarks = Column(Text)
#connection.execute("CREATE TABLE `collections` (`Name` VARCHAR(30),`channels` TEXT,`remarks` TEXT, PRIMARY KEY(Name))")
class collections(daqbroker_database):
__tablename__ = "collections"
Name = Column(String(200), primary_key=True)
channels = Column(Text)
remarks = Column(Text)
#connection.execute("CREATE TABLE `runs` (`clock` BIGINT(11),`lastUpdate` BIGINT(11),`isLinked` INT(11),`linkRemarks` TEXT,`linkType` INT(11),`runlistRemarks` TEXT, PRIMARY KEY (clock))")
class runs(daqbroker_database):
__tablename__ = "runs"
clock = Column(BigInteger, primary_key=True)
lastUpdate = Column(BigInteger)
isLinked = Column(Boolean)
linkRemarks = Column(Text)
linkType = Column(Integer)
runlistRemarks = Column(Text)
#connection.execute("CREATE TABLE `runlist` (`start` BIGINT(11),`end` BIGINT(11),`run` VARCHAR(50),`summary` LONGTEXT,`comments` TEXT,`active` INT, PRIMARY KEY(run))")
class runlist(daqbroker_database):
__tablename__ = "runlist"
start = Column(BigInteger)
end = Column(BigInteger)
run = Column(String(20), primary_key=True)
lastUpdate = Column(BigInteger)
summary = Column(Text)
comments = Column(Text)
active = Column(Boolean)
#connection.execute("CREATE TABLE `subscribers` (`email` VARCHAR(100), PRIMARY KEY(email))")
class subscribers(daqbroker_database):
__tablename__ = "subscribers"
email = Column(String(200), primary_key=True)
class instTable(object):
def __init__(self, cols):
for key in cols:
setattr(self, key, cols[key])
def __repr__(self):
return "<instTable class>"
def createInstrumentTable(iname, cols, isNew):
attrDictData = {'__tablename__': iname + '_data', 'clock': Column(BigInteger, primary_key=True)} # For raw data
attrDictCustom = {
'__tablename__': iname + '_custom',
'clock': Column(
BigInteger,
primary_key=True)} # For custom (processed) data
for col in cols:
if col["type"] == 1:
attrDictData[col["name"]] = Column(Float)
if col["type"] == 2:
attrDictData[col["name"]] = Column(Text)
if col["type"] == 3:
attrDictCustom[col["name"]] = Column(Float)
#tableClassData = type (instTable, (attrDictData)
if not isNew:
tableClassData = type(iname + '_data', (instTable,), attrDictData)
tableClassCustom = type(iname + '_custom', (instTable,), attrDictCustom)
else:
tableClassData = type(iname + '_data', (daqbroker_database,), attrDictData)
tableClassCustom = type(iname + '_custom', (daqbroker_database,), attrDictCustom)
return (tableClassData,tableClassCustom)
def dropTable(tableName, engine, is_view):
tablesDrop = []
tablesDropKeys = []
for table in daqbroker_database.metadata.tables.keys():
if table == tableName:
tablesDrop.append(daqbroker_database.metadata.tables[table])
tablesDropKeys.append(table)
if is_view:
for table in tablesDrop:
engine.execute(DropView(table, if_exists=True))
else:
daqbroker_database.metadata.drop_all(engine, tables=tablesDrop)
for table in tablesDropKeys:
daqbroker_database.metadata.remove(daqbroker_database.metadata.tables[table])
def newMetaData():
global daqbroker_database
daqbroker_database.metadata.clear()
daqbroker_database = declarative_base()
instruments = type('instruments', (daqbroker_database,), dict(
__tablename__="instruments",
Name=Column(String(50)),
instid=Column(Integer, primary_key=True),
active=Column(Boolean),
description=Column(Text),
username=Column(Text),
email=Column(Text),
insttype=Column(Integer),
log=Column(Text),
sources=relationship("instmeta", backref="meta", cascade="all, delete, delete-orphan", order_by="instmeta.metaid"))
)
instmeta = type('instmeta', (daqbroker_database,), dict(
__tablename__="instmeta",
clock=Column(BigInteger),
name=Column(String(50)),
metaid=Column(Integer, primary_key=True),
instrument_id=Column(Integer, ForeignKey('instruments.instid')),
type=Column(Integer),
node=Column(String(50)),
remarks=Column(Text),
sentRequest=Column(Boolean),
lastAction=Column(BigInteger),
lasterrortime=Column(BigInteger),
lasterror=Column(Text),
lockSync=Column(Boolean),
channels=relationship("channels", backref="chann", cascade="all, delete, delete-orphan", order_by="channels.channelid"),
parsing=relationship("parsing", backref="metaParse", cascade="all, delete, delete-orphan"))
)
channels = type('channels', (daqbroker_database,), dict(
__tablename__="channels",
Name = Column(Text),
channelid = Column(Integer, primary_key=True),
channeltype = Column(Integer),
valuetype = Column(Integer),
units = Column(Text),
description = Column(Text),
active = Column(Boolean),
remarks = Column(Text),
metaid = Column(Integer, ForeignKey('instmeta.metaid')),
lastclock = Column(BigInteger),
lastValue = Column(Text),
firstClock = Column(BigInteger),
fileorder = Column(Text),
alias = Column(Text)
))
parsing = type('parsing', (daqbroker_database,), dict(
__tablename__="parsing",
clock = Column(BigInteger),
metaid = Column(Integer, ForeignKey('instmeta.metaid'), primary_key=True),
type = Column(Integer),
locked = Column(Boolean),
forcelock = Column(Boolean),
remarks = Column(Text)
))
plots = type('plots', (daqbroker_database,), dict(
__tablename__="plots",
plotname = Column(String(200)),
plotid = Column(Integer, primary_key=True),
channelids = Column(Text),
plottype = Column(Integer),
adminPlot = Column(Boolean),
active = Column(Boolean),
remarks = Column(Text)
))
plotcomments = type('plotcomments', (daqbroker_database,), dict(
__tablename__="plotcomments",
clock = Column(BigInteger, primary_key=True),
plotid = Column(Integer, primary_key=True),
channelid = Column(Integer, primary_key=True),
comment = Column(Text),
author = Column(Text),
remarks = Column(Text),
))
layouts = type('layouts', (daqbroker_database,), dict(
__tablename__="layouts",
Name = Column(String(200)),
layoutid = Column(Integer, primary_key=True),
plots = Column(Text),
format = Column(Text),
plottype = Column(Integer),
adminPlot = Column(Boolean),
active = Column(Boolean),
remarks = Column(Text)
))
runs = type('runs', (daqbroker_database,), dict(
__tablename__="runs",
clock = Column(BigInteger, primary_key=True),
lastUpdate = Column(BigInteger),
isLinked = Column(Boolean),
linkRemarks = Column(Text),
linkType = Column(Integer),
runlistRemarks = Column(Text)
))
runlist = type('runlist', (daqbroker_database,), dict(
__tablename__="runlist",
start = Column(BigInteger),
end = Column(BigInteger),
run = Column(String(20), primary_key=True),
lastUpdate = Column(BigInteger),
summary = Column(Text),
comments = Column(Text),
active = Column(Boolean)
))
collections = type('collections', (daqbroker_database,), dict(
__tablename__="collections",
Name = Column(String(200), primary_key=True),
channels = Column(Text),
remarks = Column(Text)
))
collections = type('collections', (daqbroker_database,), dict(
__tablename__="subscribers",
email=Column(String(200), primary_key=True)
))
return daqbroker_database
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
import os
import sys
import time
import caffe
import numpy as np
from timer import Timer
from db_helper import DBHelper
import matplotlib.pyplot as plt
from caffe.proto import caffe_pb2
from google.protobuf import text_format
# Load LabelMap file.
def get_labelmap(labelmap_path):
labelmap_file = labelmap_path
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
return labelmap
# get labelnames
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
# 处理输入的数据,caffe中用的图像是BGR空间,但是matplotlib用的是RGB空间
# caffe的数值空间是[0,255],但是matplotlib的空间是[0,1]
def input_process(net):
# 定义转换输入的data数值的函数
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1)) # 分离图像的RGB三通道
transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel,减去平均像素值,从而减少噪声的影响
transformer.set_raw_scale('data', 255) # 将0-1空间变成0-255空间
transformer.set_channel_swap('data', (2, 1, 0)) # 交换RGB空间到BGR空间
image_resize = 300
net.blobs['data'].reshape(1, 3, image_resize, image_resize) # batchsize=1,三通道,图像大小是300*300
return transformer
# 检测一张图片
def im_detect(transformer, labelmap, image_name, images_path, db):
i = 0
while os.path.exists(images_path + image_name) is False: # 图片不存在则循环等待,超过10秒退出程序
if i > 100:
sys.exit()
time.sleep(0.1)
i += 1
image = caffe.io.load_image(images_path + image_name)
image_byte = os.path.getsize(images_path + image_name) / 1024.0
# Run the net and examine the top_k results
timer = Timer()
timer.tic()
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image # 将图像数据拷贝到内存中并分配给网络net
# 分类
detections = net.forward()['detection_out']
# 解析输出结果
det_label = detections[0, 0, :, 1]
det_conf = detections[0, 0, :, 2]
det_xmin = detections[0, 0, :, 3]
det_ymin = detections[0, 0, :, 4]
det_xmax = detections[0, 0, :, 5]
det_ymax = detections[0, 0, :, 6]
# 得到置信度>=0.6的检测目标
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
timer.toc()
print 'Detection took {:.3f}s for {}'.format(timer.total_time, image_name)
image_name = image_name[:-4]
if db.search(image_name, 'ssd_detect_res') != 0:
db.delete_data(image_name, 'ssd_detect_res') # 删除数据库中该图片原有的数据
if db.search(image_name, 'image_byte') != 0:
db.delete_data(image_name, 'image_byte') # 删除数据库中该图片原有的数据
db.insert_data(image_name, image_byte)
if top_conf.shape[0] == 0: # 图片中没有检测到目标
if db.insert_data(image_name, 0, 1) == 0:
print 'Insert data without object failed!'
else:
# 获取每个目标类别及目标的bounding box
for i in xrange(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * image.shape[1]))
ymin = int(round(top_ymin[i] * image.shape[0]))
xmax = int(round(top_xmax[i] * image.shape[1]))
ymax = int(round(top_ymax[i] * image.shape[0]))
score = top_conf[i]
label_name = top_labels[i]
if i != top_conf.shape[0] - 1:
if db.insert_data(image_name, str(label_name), xmin, ymin, xmax, ymax, score) == 0:
print 'Insert data with object failed!'
else:
if db.insert_data(image_name, str(label_name), xmin, ymin, xmax, ymax, score, 1) == 0:
print 'Insert data with object failed!'
print 'class: ' + str(label_name) + ' ' + ' location: ' + str(xmin) + ' ' + str(ymin) + \
' ' + str(xmax) + ' ' + str(ymax) + ' possibility: ' + str(score)
return timer.total_time
if __name__ == '__main__':
# Make sure that caffe is on the python path:
caffe_root = '/home/beijing/opt/caffe/'
os.chdir(caffe_root)
sys.path.insert(0, 'python')
caffe.set_device(1) # 假如有多块gpu,选择第一块gpu
caffe.set_mode_gpu() # 设置用GPU来加载Caffe并且加载网络
labelmap_path = 'data/KITTI/labelmap_kitti.prototxt'
labelmap = get_labelmap(labelmap_path)
# * Load the net in the test phase for inference, and configure input preprocessing.
model_def = 'models/VGGNet/KITTI3/SSD_300x300/deploy.prototxt'
model_weights = 'models/VGGNet/KITTI3/SSD_300x300/VGG_KITTI_SSD_300x300_iter_80000.caffemodel'
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
transformer = input_process(net)
images_path = '/mnt/disk_a/beijing/DataSet/augsburg/'
im_names = []
for index in range(1000):
s = "%06d" % index
im_names.append(str(s) + '.png')
totaltime = 0
db = DBHelper()
db.get_conn()
db.create_database()
db.create_table()
for image_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
totaltime += im_detect(transformer, labelmap, image_name, images_path, db)
db.close_conn()
print 'totaltime = ' + str(totaltime) + ' for ' + str(len(im_names)) + ' images'
print 'averagetime = ' + str(totaltime / len(im_names))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Script to delete files that are also present on Wikimedia Commons.
Do not run this script on Wikimedia Commons itself. It works based on
a given array of templates defined below.
Files are downloaded and compared. If the files match, it can be deleted on
the source wiki. If multiple versions of the file exist, the script will not
delete. If the SHA1 comparison is not equal, the script will not delete.
A sysop account on the local wiki is required if you want all features of
this script to work properly.
This script understands various command-line arguments:
-always run automatically, do not ask any questions. All files
that qualify for deletion are deleted. Reduced screen
output.
-replace replace links if the files are equal and the file names
differ
-replacealways replace links if the files are equal and the file names
differ without asking for confirmation
-replaceloose Do loose replacements. This will replace all occurrences
of the name of the image (and not just explicit image
syntax). This should work to catch all instances of the
file, including where it is used as a template parameter
or in galleries. However, it can also make more
mistakes.
-replaceonly Use this if you do not have a local sysop account, but do
wish to replace links from the NowCommons template.
-hash Use the hash to identify the images that are the same. It
doesn't work always, so the bot opens two tabs to let to
the user to check if the images are equal or not.
-- Example --
python pwb.py nowcommons -replaceonly -replaceloose -replacealways \
-replace -hash
-- Known issues --
Please fix these if you are capable and motivated:
- if a file marked nowcommons is not present on Wikimedia Commons, the bot
will exit.
"""
#
# (C) Wikipedian, 2006-2007
# (C) Siebrand Mazeland, 2007-2008
# (C) xqt, 2010-2014
# (C) Pywikibot team, 2006-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 166b7765189cec83e67c95450417db9aa553ab0f $'
#
import re
import sys
import webbrowser
import pywikibot
from pywikibot import i18n, Bot
from pywikibot import pagegenerators as pg
from pywikibot.tools.formatter import color_format
from scripts.image import ImageRobot as ImageBot
nowCommons = {
'_default': [
u'NowCommons'
],
'ar': [
u'الآن كومنز',
u'الآن كومونز',
],
'de': [
u'NowCommons',
u'NC',
u'NCT',
u'Nowcommons',
u'NowCommons/Mängel',
u'NowCommons-Überprüft',
],
'en': [
u'NowCommons',
u'Ncd',
],
'eo': [
u'Nun en komunejo',
u'NowCommons',
],
'fa': [
u'موجود در انبار',
u'NowCommons',
],
'fr': [
u'Image sur Commons',
u'DoublonCommons',
u'Déjà sur Commons',
u'Maintenant sur commons',
u'Désormais sur Commons',
u'NC',
u'NowCommons',
u'Nowcommons',
u'Sharedupload',
u'Sur Commons',
u'Sur Commons2',
],
'he': [
u'גם בוויקישיתוף'
],
'hu': [
u'Azonnali-commons',
u'NowCommons',
u'Nowcommons',
u'NC'
],
'ia': [
u'OraInCommons'
],
'it': [
u'NowCommons',
],
'ja': [
u'NowCommons',
],
'ko': [
u'NowCommons',
u'공용중복',
u'공용 중복',
u'Nowcommons',
],
'nds-nl': [
u'NoenCommons',
u'NowCommons',
],
'nl': [
u'NuCommons',
u'Nucommons',
u'NowCommons',
u'Nowcommons',
u'NCT',
u'Nct',
],
'ro': [
u'NowCommons'
],
'ru': [
u'NowCommons',
u'NCT',
u'Nowcommons',
u'Now Commons',
u'Db-commons',
u'Перенесено на Викисклад',
u'На Викискладе',
],
'zh': [
u'NowCommons',
u'Nowcommons',
u'NCT',
],
}
namespaceInTemplate = [
'en',
'ia',
'it',
'ja',
'ko',
'lt',
'ro',
'zh',
]
# Stemma and stub are images not to be deleted (and are a lot) on it.wikipedia
# if your project has images like that, put the word often used here to skip them
word_to_skip = {
'en': [],
'it': ['stemma', 'stub', 'hill40 '],
}
class NowCommonsDeleteBot(Bot):
"""Bot to delete migrated files."""
def __init__(self, **kwargs):
"""Constructor."""
self.availableOptions.update({
'replace': False,
'replacealways': False,
'replaceloose': False,
'replaceonly': False,
'use_hash': False,
})
super(NowCommonsDeleteBot, self).__init__(**kwargs)
self.site = pywikibot.Site()
if repr(self.site) == 'commons:commons':
sys.exit('Do not run this bot on Commons!')
def ncTemplates(self):
"""Return nowcommons templates."""
if self.site.lang in nowCommons:
return nowCommons[self.site.lang]
else:
return nowCommons['_default']
@property
def nc_templates(self):
"""A set of now commons template Page instances."""
if not hasattr(self, '_nc_templates'):
self._nc_templates = set(pywikibot.Page(self.site, title, ns=10)
for title in self.ncTemplates())
return self._nc_templates
def useHashGenerator(self):
"""Use hash generator."""
# https://toolserver.org/~multichill/nowcommons.php?language=it&page=2&filter=
lang = self.site.lang
num_page = 0
word_to_skip_translated = i18n.translate(self.site, word_to_skip)
images_processed = list()
while 1:
url = ('https://toolserver.org/~multichill/nowcommons.php?'
'language=%s&page=%s&filter=') % (lang, num_page)
HTML_text = self.site.getUrl(url, no_hostname=True)
reg = r'<[Aa] href="(?P<urllocal>.*?)">(?P<imagelocal>.*?)</[Aa]> +?</td><td>\n\s*?'
reg += r'<[Aa] href="(?P<urlcommons>http[s]?://commons.wikimedia.org/.*?)" \
>Image:(?P<imagecommons>.*?)</[Aa]> +?</td><td>'
regex = re.compile(reg, re.UNICODE)
found_something = False
change_page = True
for x in regex.finditer(HTML_text):
found_something = True
image_local = x.group('imagelocal')
image_commons = x.group('imagecommons')
if image_local in images_processed:
continue
change_page = False
images_processed.append(image_local)
# Skip images that have something in the title (useful for it.wiki)
image_to_skip = False
for word in word_to_skip_translated:
if word.lower() in image_local.lower():
image_to_skip = True
if image_to_skip:
continue
url_local = x.group('urllocal')
url_commons = x.group('urlcommons')
pywikibot.output(color_format(
'\n\n>>> {lightpurple}{0}{default} <<<',
image_local))
pywikibot.output(u'Local: %s\nCommons: %s\n'
% (url_local, url_commons))
webbrowser.open(url_local, 0, 1)
webbrowser.open(url_commons, 0, 1)
if image_local.split('Image:')[1] == image_commons:
choice = pywikibot.input_yn(
u'The local and the commons images have the same name, '
'continue?', default=False, automatic_quit=False)
else:
choice = pywikibot.input_yn(
u'Are the two images equal?',
default=False, automatic_quit=False)
if choice:
yield [image_local, image_commons]
else:
continue
# The page is dinamically updated, so we may don't need to change it
if change_page:
num_page += 1
# If no image found means that there aren't anymore, break.
if not found_something:
break
def getPageGenerator(self):
"""Generator method."""
if self.getOption('use_hash'):
gen = self.useHashGenerator()
else:
gens = [t.getReferences(follow_redirects=True, namespaces=[6],
onlyTemplateInclusion=True)
for t in self.nc_templates]
gen = pg.CombinedPageGenerator(gens)
gen = pg.DuplicateFilterPageGenerator(gen)
gen = pg.PreloadingGenerator(gen)
return gen
def findFilenameOnCommons(self, localImagePage):
"""Find filename on Commons."""
filenameOnCommons = None
for templateName, params in localImagePage.templatesWithParams():
if templateName in self.nc_templates:
if params == []:
filenameOnCommons = localImagePage.title(withNamespace=False)
elif self.site.lang in namespaceInTemplate:
skip = False
filenameOnCommons = None
for par in params:
val = par.split('=')
if len(val) == 1 and not skip:
filenameOnCommons = par[par.index(':') + 1:]
break
if val[0].strip() == '1':
filenameOnCommons = val[1].strip()[val[1].strip().index(':') + 1:]
break
skip = True
if not filenameOnCommons:
filenameOnCommons = localImagePage.title(withNamespace=False)
else:
val = params[0].split('=')
if len(val) == 1:
filenameOnCommons = params[0].strip()
else:
filenameOnCommons = val[1].strip()
return filenameOnCommons
def run(self):
"""Run the bot."""
commons = pywikibot.Site('commons', 'commons')
comment = i18n.twtranslate(self.site, 'imagetransfer-nowcommons_notice')
for page in self.getPageGenerator():
if self.getOption('use_hash'):
# Page -> Has the namespace | commons image -> Not
images_list = page # 0 -> local image, 1 -> commons image
page = pywikibot.Page(self.site, images_list[0])
else:
# If use_hash is true, we have already print this before, no need
self.current_page = page
try:
localImagePage = pywikibot.FilePage(self.site, page.title())
if localImagePage.fileIsShared():
pywikibot.output(u'File is already on Commons.')
continue
sha1 = localImagePage.latest_file_info.sha1
if self.getOption('use_hash'):
filenameOnCommons = images_list[1]
else:
filenameOnCommons = self.findFilenameOnCommons(
localImagePage)
if not filenameOnCommons and not self.getOption('use_hash'):
pywikibot.output(u'NowCommons template not found.')
continue
commonsImagePage = pywikibot.FilePage(commons, 'Image:%s'
% filenameOnCommons)
if (localImagePage.title(withNamespace=False) ==
commonsImagePage.title(withNamespace=False) and
self.getOption('use_hash')):
pywikibot.output(
u'The local and the commons images have the same name')
if (localImagePage.title(withNamespace=False) !=
commonsImagePage.title(withNamespace=False)):
usingPages = list(localImagePage.usingPages())
if usingPages and usingPages != [localImagePage]:
pywikibot.output(color_format(
'"{lightred}{0}{default}" is still used in {1} pages.',
localImagePage.title(withNamespace=False),
len(usingPages)))
if self.getOption('replace') is True:
pywikibot.output(color_format(
'Replacing "{lightred}{0}{default}" by '
'"{lightgreen}{1}{default}\".',
localImagePage.title(withNamespace=False),
commonsImagePage.title(withNamespace=False)))
bot = ImageBot(
pg.FileLinksGenerator(localImagePage),
localImagePage.title(withNamespace=False),
commonsImagePage.title(withNamespace=False),
'', self.getOption('replacealways'),
self.getOption('replaceloose'))
bot.run()
# If the image is used with the urlname the
# previous function won't work
is_used = bool(list(pywikibot.FilePage(
self.site, page.title()).usingPages(total=1)))
if is_used and self.getOption('replaceloose'):
bot = ImageBot(
pg.FileLinksGenerator(
localImagePage),
localImagePage.title(
withNamespace=False, asUrl=True),
commonsImagePage.title(
withNamespace=False),
'', self.getOption('replacealways'),
self.getOption('replaceloose'))
bot.run()
# refresh because we want the updated list
usingPages = len(list(pywikibot.FilePage(
self.site, page.title()).usingPages()))
if usingPages > 0 and self.getOption('use_hash'):
# just an enter
pywikibot.input(
u'There are still %s pages with this \
image, confirm the manual removal from them please.'
% usingPages)
else:
pywikibot.output(u'Please change them manually.')
continue
else:
pywikibot.output(color_format(
'No page is using "{lightgreen}{0}{default}" '
'anymore.',
localImagePage.title(withNamespace=False)))
commonsText = commonsImagePage.get()
if self.getOption('replaceonly') is False:
if sha1 == commonsImagePage.latest_file_info.sha1:
pywikibot.output(
u'The image is identical to the one on Commons.')
if (len(localImagePage.getFileVersionHistory()) > 1 and
not self.getOption('use_hash')):
pywikibot.output(
u"This image has a version history. Please \
delete it manually after making sure that the \
old versions are not worth keeping.""")
continue
if self.getOption('always') is False:
format_str = color_format(
'\n\n>>>> Description on {lightpurple}%s'
'{default} <<<<\n')
pywikibot.output(format_str % page.title())
pywikibot.output(localImagePage.get())
pywikibot.output(format_str %
commonsImagePage.title())
pywikibot.output(commonsText)
if pywikibot.input_yn(
u'Does the description on Commons contain '
'all required source and license\n'
'information?',
default=False, automatic_quit=False):
localImagePage.delete(
'%s [[:commons:Image:%s]]'
% (comment, filenameOnCommons), prompt=False)
else:
localImagePage.delete(
comment + ' [[:commons:Image:%s]]'
% filenameOnCommons, prompt=False)
else:
pywikibot.output(
u'The image is not identical to the one on Commons.')
except (pywikibot.NoPage, pywikibot.IsRedirectPage) as e:
pywikibot.output(u'%s' % e[0])
continue
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg == '-replacealways':
options['replace'] = True
options['replacealways'] = True
elif arg == '-hash':
options['use_hash'] = True
elif arg == '-autonomous':
pywikibot.warning(u"The '-autonomous' argument is DEPRECATED,"
u" use '-always' instead.")
options['always'] = True
elif arg.startswith('-'):
if arg[1:] in ('always', 'replace', 'replaceloose', 'replaceonly'):
options[arg[1:]] = True
bot = NowCommonsDeleteBot(**options)
bot.run()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#@+leo-ver=5-thin
#@+node:ville.20110403115003.10348: * @file valuespace.py
#@+<< docstring >>
#@+node:ville.20110403115003.10349: ** << docstring >>
'''Supports Leo scripting using per-Leo-outline namespaces.
Commands
========
.. note::
The first four commands are a light weight option for python calculations
within Leo bodies. The remainder are a more complex system for tree wide
computations.
This plugin supports the following commands:
vs-eval
-------
Execute the selected text, if any. Select next line of text.
Tries hard to capture the result of from the last expression in the
selected text::
import datetime
today = datetime.date.today()
will capture the value of ``today`` even though the last line is a
statement, not an expression.
Stores results in ``c.vs['_last']`` for insertion
into body by ``vs-last`` or ``vs-last-pretty``.
Removes common indentation (``textwrap.dedent()``) before executing,
allowing execution of indented code.
``g``, ``c``, and ``p`` are available to executing code, assignments
are made in the ``c.vs`` namespace and persist for the life of ``c``.
vs-eval-replace
---------------
Execute the selected text, if any. Replace the selected text with the
result.
vs-eval-block
-------------
In the body, "# >>>" marks the end of a code block, and "# <<<" marks
the end of an output block. E.g.::
a = 2
# >>>
4
# <<<
b = 2.0*a
# >>>
4.0
# <<<
``vs-eval-block`` evaluates the current code block, either the code block
the cursor's in, or the code block preceding the output block the cursor's
in. Subsequent output blocks are marked "# >>> *" to show they may need
re-evaluation.
Note: you don't really need to type the "# >>>" and "# <<<" markers
because ``vs-eval-block`` will add them as needed. So just type the
first code block and run ``vs-eval-block``.
vs-last
-------
Insert the last result from ``vs-eval``. Inserted as a string,
so ``"1\n2\n3\n4"`` will cover four lines and insert no quotes,
for ``repr()`` style insertion use ``vs-last-pretty``.
vs-last-pretty
--------------
Insert the last result from ``vs-eval``. Formatted by
``pprint.pformat()``, so ``"1\n2\n3\n4"`` will appear as
'``"1\n2\n3\n4"``', see all ``vs-last``.
vs-create-tree
--------------
Creates a tree whose root node is named 'valuespace' containing one child node
for every entry in the namespace. The headline of each child is *@@r <key>*,
where *<key>* is one of the keys of the namespace. The body text of the child
node is the value for *<key>*.
vs-dump
-------
Prints key/value pairs of the namespace.
vs-reset
--------
Clears the namespace.
vs-update
---------
Scans the entire Leo outline twice, processing *@=*, *@a* and *@r* nodes.
Pass 1
++++++
Pass 1 evaluates all *@=* and *@a* nodes in the outline as follows:
*@=* (assignment) nodes should have headlines of the the form::
@= <var>
Pass 1 evaluates the body text and assigns the result to *<var>*.
*@a* (anchor) nodes should have headlines of one of two forms::
@a
@a <var>
The first form evaluates the script in the **parent** node of the *@a* node.
Such **bare** @a nodes serve as markers that the parent contains code to be
executed.
The second form evaluates the body of the **parent** of the *@a* node and
assigns the result to *<var>*.
**Important**: Both forms of *@a* nodes support the following **@x convention**
when evaluating the parent's body text. Before evaluating the body text, pass1
scans the body text looking for *@x* lines. Such lines have two forms:
1. *@x <python statement>*
Pass 1 executes *<python statement>*.
2. The second form spans multiple lines of the body text::
@x {
python statements
@x }
Pass 1 executes all the python statements between the *@x {* and the *@x }*
3. Assign block of text to variable::
@x =<var> {
Some
Text
@x }
Pass 1 assigns the block of text to <var>. The type of value is SList,
a special subclass of standard 'list' that makes operating with string
lists convenient. Notably, you can do <var>.n to get the content as plain
string.
A special case of this is the "list append" notation::
@x =<var>+ {
Some
Text
@x }
This assumes that <var> is a list, and appends the content as SList to this
list. You will typically do '@x var = []' earlier in the document to make this
construct work.
<var> in all constructs above can be arbitrary expression that can be on left hand
side of assignment. E.g. you can use foo.bar, foo['bar'], foo().bar etc.
Pass 2
++++++
Pass 2 "renders" all *@r* nodes in the outline into body text. *@r* nodes should
have the form::
@r <expression>
Pass 2 evaluates *<expression>* and places the result in the body pane.
**TODO**: discuss SList expressions.
Evaluating expressions
======================
All expression are evaluated in a context that predefines Leo's *c*, *g* and *p*
vars. In addition, *g.vs* is a dictionary whose keys are *c.hash()* and whose
values are the namespaces for each commander. This allows communication between
different namespaces, while keeping namespaces generally separate.
'''
# SList docs: http://ipython.scipy.org/moin/Cookbook/StringListProcessing
#@-<< docstring >>
# By Ville M. Vainio and Terry N. Brown.
#@+<< imports >>
#@+node:ville.20110403115003.10351: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoPlugins as leoPlugins
from leo.external.stringlist import SList
# Uses leoPlugins.TryNext.
import pprint
import os
import re
# import sys
# import types
import textwrap
import json
from io import BytesIO
try:
import yaml
except ImportError:
yaml = None
#@-<< imports >>
controllers = {}
# Keys are c.hash(), values are ValueSpaceControllers.
# pylint: disable=eval-used
# Eval is essential to this plugin.
#@+others
#@+node:ekr.20110408065137.14221: ** Module level
#@+node:ville.20110403115003.10353: *3* colorize_headlines_visitor
def colorize_headlines_visitor(c,p, item):
""" Changes @thin, @auto, @shadow to bold """
if p.h.startswith("!= "):
f = item.font(0)
f.setBold(True)
item.setFont(0,f)
raise leoPlugins.TryNext
#@+node:ville.20110403115003.10352: *3* init
def init ():
'''Return True if the plugin has loaded successfully.'''
# vs_reset(None)
global controllers
# g.vs = {} # A dictionary of dictionaries, one for each commander.
# create global valuaspace controller for ipython
g.visit_tree_item.add(colorize_headlines_visitor)
g.registerHandler('after-create-leo-frame',onCreate)
g.plugin_signon(__name__)
return True
#@+node:ekr.20110408065137.14222: *3* onCreate
def onCreate (tag,key):
global controllers
c = key.get('c')
if c:
h = c.hash()
vc = controllers.get(h)
if not vc:
controllers [h] = vc = ValueSpaceController(c)
#@+node:tbrown.20170516194332.1: *3* get_blocks
def get_blocks(c):
"""get_blocks - iterate code blocks
:return: (current, source, output)
:rtype: (bool, str, str)
"""
pos = c.frame.body.wrapper.getInsertPoint()
chrs = 0
lines = c.p.b.split('\n')
block = {'source': [], 'output': []}
reading = 'source'
seeking_current = True
# if the last non-blank line isn't the end of a possibly empty
# output block, make it one
if [i for i in lines if i.strip()][-1] != "# <<<":
lines.append("# <<<")
while lines:
line = lines.pop(0)
chrs += len(line)+1
if line.startswith("# >>>"):
reading = 'output'
continue
if line.startswith("# <<<"):
current = seeking_current and (chrs >= pos+1)
if current:
seeking_current = False
yield current, '\n'.join(block['source']), '\n'.join(block['output'])
block = {'source': [], 'output': []}
reading = 'source'
continue
block[reading].append(line)
#@+node:ville.20110403115003.10355: ** Commands
#@+node:ville.20130127115643.3695: *3* get_vs
def get_vs(c):
'''deal with singleton "ipython" controller'''
if g.app.ipk:
vsc = controllers.get('ipython')
if not vsc:
controllers['ipython'] = vsc = ValueSpaceController(c = None,
ns = g.app.ipk.namespace)
vsc.set_c(c)
return vsc
return controllers[c.hash()]
#@+node:ville.20110407210441.5691: *3* vs-create-tree
@g.command('vs-create-tree')
def vs_create_tree(event):
"""Create tree from all variables."""
get_vs(event['c']).create_tree()
#@+node:ekr.20110408065137.14227: *3* vs-dump
@g.command('vs-dump')
def vs_dump(event):
"""Dump the valuespace for this commander."""
get_vs(event['c']).dump()
#@+node:ekr.20110408065137.14220: *3* vs-reset
@g.command('vs-reset')
def vs_reset(event):
# g.vs = types.ModuleType('vs')
# sys.modules['vs'] = g.vs
get_vs(event['c']).reset()
#@+node:ville.20110403115003.10356: *3* vs-update
@g.command('vs-update')
def vs_update(event):
get_vs(event['c']).update()
#@+node:tbrown.20130227164110.21222: *3* vs-eval
@g.command("vs-eval")
def vs_eval(event):
"""
Execute the selected text, if any. Select next line of text.
Tries hard to capture the result of from the last expression in the
selected text::
import datetime
today = datetime.date.today()
will capture the value of ``today`` even though the last line is a
statement, not an expression.
Stores results in ``c.vs['_last']`` for insertion
into body by ``vs-last`` or ``vs-last-pretty``.
Removes common indentation (``textwrap.dedent()``) before executing,
allowing execution of indented code.
``g``, ``c``, and ``p`` are available to executing code, assignments
are made in the ``c.vs`` namespace and persist for the life of ``c``.
"""
c = event['c']
w = c.frame.body.wrapper
txt = w.getSelectedText()
# select next line ready for next select/send cycle
# copied from .../plugins/leoscreen.py
b = w.getAllText()
i = w.getInsertPoint()
try:
j = b[i:].index('\n')+i+1
w.setSelectionRange(i,j)
except ValueError: # no more \n in text
w.setSelectionRange(i,i)
eval_text(c, txt)
def eval_text(c, txt):
if not txt:
return
vsc = get_vs(c)
cvs = vsc.d
txt = textwrap.dedent(txt)
blocks = re.split('\n(?=[^\\s])', txt)
leo_globals = {'c':c, 'p':c.p, 'g':g}
ans = None
dbg = False
redirects = c.config.getBool('valuespace_vs_eval_redirect')
if redirects:
old_stderr = g.stdErrIsRedirected()
old_stdout = g.stdOutIsRedirected()
if not old_stderr:
g.redirectStderr()
if not old_stdout:
g.redirectStdout()
try:
# execute all but the last 'block'
if dbg: print('all but last')
# exec '\n'.join(blocks[:-1]) in leo_globals, c.vs
exec('\n'.join(blocks[:-1]), leo_globals, cvs) # Compatible with Python 3.x.
all_done = False
except SyntaxError:
# splitting of the last block caused syntax error
try:
# is the whole thing a single expression?
if dbg: print('one expression')
ans = eval(txt, leo_globals, cvs)
except SyntaxError:
if dbg: print('statement block')
# exec txt in leo_globals, c.vs
exec(txt, leo_globals, cvs) # Compatible with Python 3.x.
all_done = True # either way, the last block is used now
if not all_done: # last block still needs using
try:
if dbg: print('final expression')
ans = eval(blocks[-1], leo_globals, cvs)
except SyntaxError:
ans = None
if dbg: print('final statement')
# exec blocks[-1] in leo_globals, c.vs
exec(blocks[-1], leo_globals, cvs) # Compatible with Python 3.x.
if redirects:
if not old_stderr:
g.restoreStderr()
if not old_stdout:
g.restoreStdout()
if ans is None: # see if last block was a simple "var =" assignment
key = blocks[-1].split('=', 1)[0].strip()
if key in cvs:
ans = cvs[key]
if ans is None: # see if whole text was a simple /multi-line/ "var =" assignment
key = blocks[0].split('=', 1)[0].strip()
if key in cvs:
ans = cvs[key]
cvs['_last'] = ans
if ans is not None:
# annoying to echo 'None' to the log during line by line execution
txt = str(ans)
lines = txt.split('\n')
if len(lines) > 10:
txt = '\n'.join(lines[:5]+['<snip>']+lines[-5:])
if len(txt) > 500:
txt = txt[:500] + ' <truncated>'
g.es(txt)
return ans
#@+node:tbrown.20170516202419.1: *3* vs-eval-block
@g.command("vs-eval-block")
def vs_eval_block(event):
c = event['c']
pos = 0
lines = []
current_seen = False
for current, source, output in get_blocks(c):
lines.append(source)
lines.append("# >>>" + (" *" if current_seen else ""))
if current:
old_log = c.frame.log.logCtrl.getAllText()
eval_text(c, source)
new_log = c.frame.log.logCtrl.getAllText()[len(old_log):]
lines.append(new_log.strip())
# lines.append(str(get_vs(c).d.get('_last')))
pos = len('\n'.join(lines))+7
current_seen = True
else:
lines.append(output)
lines.append("# <<<")
c.p.b = '\n'.join(lines) + '\n'
c.frame.body.wrapper.setInsertPoint(pos)
c.redraw()
c.bodyWantsFocusNow()
#@+node:tbnorth.20171222141907.1: *3* vs-eval-replace
@g.command("vs-eval-replace")
def vs_eval_replace(event):
"""Execute the selected text, if any. Replace it with the result."""
c = event['c']
w = c.frame.body.wrapper
txt = w.getSelectedText()
eval_text(c, txt)
result = pprint.pformat(get_vs(c).d.get('_last'))
i, j = w.getSelectionRange()
new_text = c.p.b[:i]+result+c.p.b[j:]
bunch = c.undoer.beforeChangeNodeContents(c.p)
w.setAllText(new_text)
c.p.b = new_text
w.setInsertPoint(i+len(result))
c.undoer.afterChangeNodeContents(c.p, 'Insert result', bunch)
c.setChanged()
#@+node:tbrown.20130227164110.21223: *3* vs-last
@g.command("vs-last")
def vs_last(event, text=None):
"""
Insert the last result from ``vs-eval``.
Inserted as a string, so ``"1\n2\n3\n4"`` will cover four lines and
insert no quotes, for ``repr()`` style insertion use ``vs-last-pretty``.
"""
c = event['c']
if text is None:
text = str(get_vs(c).d.get('_last'))
editor = c.frame.body.wrapper
insert_point = editor.getInsertPoint()
editor.insert(insert_point, text+'\n')
editor.setInsertPoint(insert_point+len(text)+1)
c.setChanged(True)
#@+node:tbrown.20130227164110.21224: *3* vs-last-pretty
@g.command("vs-last-pretty")
def vs_last_pretty(event):
"""
Insert the last result from ``vs-eval``.
Formatted by ``pprint.pformat()``, so ``"1\n2\n3\n4"`` will appear as
'``"1\n2\n3\n4"``', see all ``vs-last``.
"""
c = event['c']
vs_last(event, text=pprint.pformat(get_vs(c).d.get('_last')))
#@+node:ekr.20110408065137.14219: ** class ValueSpaceController
class ValueSpaceController(object):
'''A class supporting per-commander evaluation spaces
containing @a, @r and @= nodes.
'''
#@+others
#@+node:ekr.20110408065137.14223: *3* ctor
def __init__ (self,c = None, ns = None ):
# g.trace('(ValueSpaceController)',c)
self.c = c
if ns is None:
self.d = {}
else:
self.d = ns
self.reset()
self.trace = False
self.verbose = False
if c:
# important this come after self.reset()
c.keyHandler.autoCompleter.namespaces.append(self.d)
# changed g.vs.__dict__ to self.d
# Not strictly necessary, but allows cross-commander communication.
#g.vs [c.hash()] = self.d
#@+node:ekr.20110408065137.14224: *3* create_tree
def create_tree (self):
'''The vs-create-tree command.'''
c = self.c ; p = c.p ; tag = 'valuespace'
# Create a 'valuespace' node if p's headline is not 'valuespace'.
if p.h == tag:
r = p
else:
r = p.insertAsLastChild()
r.h = tag
# Create a child of r for all items of self.d
for k,v in self.d.items():
if not k.startswith('__'):
child = r.insertAsLastChild()
child.h = '@@r ' + k
self.render_value(child,v) # Create child.b from child.h
c.bodyWantsFocus()
c.redraw()
#@+node:ekr.20110408065137.14228: *3* dump
def dump (self):
c,d = self.c,self.d
exclude = (
'__builtins__',
# 'c','g','p',
)
print('Valuespace for %s...' % c.shortFileName())
keys = list(d.keys())
keys = [z for z in keys if z not in exclude]
keys.sort()
max_s = 5
for key in keys:
max_s = max(max_s,len(key))
for key in keys:
val = d.get(key)
pad = max(0,max_s-len(key))*' '
print('%s%s = %s' % (pad,key,val))
c.bodyWantsFocus()
#@+node:ekr.20110408065137.14225: *3* reset
def reset (self):
'''The vs-reset command.'''
# do not allow resetting the dict if using ipython
if not g.app.ipk:
self.d = {}
self.c.vs = self.d
self.init_ns(self.d)
#@+node:ville.20110409221110.5755: *3* init_ns
def init_ns(self,ns):
""" Add 'builtin' methods to namespace """
def slist(body):
""" Return body as SList (string list) """
return SList(body.split("\n"))
ns['slist'] = slist
# xxx todo perhaps add more?
#@+node:ville.20130127122722.3696: *3* set_c
def set_c(self,c):
""" reconfigure vsc for new c
Needed by ipython integration
"""
self.c = c
#@+node:ekr.20110408065137.14226: *3* update & helpers
def update (self):
'''The vs-update command.'''
# names are reversed, xxx TODO fix later
self.render_phase() # Pass 1
self.update_vs() # Pass 2
self.c.bodyWantsFocus()
#@+node:ekr.20110407174428.5781: *4* render_phase (pass 1) & helpers
def render_phase(self):
'''Update p's tree (or the entire tree) as follows:
- Evaluate all @= nodes and assign them to variables
- Evaluate the body of the *parent* nodes for all @a nodes.
- Read in @vsi nodes and assign to variables
'''
c = self.c
self.d['c'] = c # g.vs.c = c
self.d['g'] = g # g.vs.g = g
for p in c.all_unique_positions():
h = p.h.strip()
if h.startswith('@= '):
if self.trace and self.verbose: g.trace('pass1',p.h)
self.d['p'] = p.copy() # g.vs.p = p.copy()
var = h[3:].strip()
self.let_body(var,self.untangle(p))
elif h.startswith("@vsi "):
fname = h[5:]
bname, ext = os.path.splitext(fname)
g.es("@vsi " + bname +" " + ext)
if ext.lower() == '.json':
pth = c.getNodePath(p)
fn = os.path.join(pth, fname)
g.es("vsi read from " + fn)
if os.path.isfile(fn):
cont = open(fn).read()
val = json.loads(cont)
self.let(bname, val)
self.render_value(p, cont)
elif h == '@a' or h.startswith('@a '):
if self.trace and self.verbose: g.trace('pass1',p.h)
tail = h[2:].strip()
parent = p.parent()
if tail:
self.let_body(tail,self.untangle(parent))
try:
self.parse_body(parent)
except Exception:
g.es_exception()
g.es("Error parsing " + parent.h)
# g.trace(self.d)
#@+node:ekr.20110407174428.5777: *5* let & let_body
def let(self,var,val):
'''Enter var into self.d with the given value.
Both var and val must be strings.'''
if self.trace:
print("Let [%s] = [%s]" % (var,val))
self.d ['__vstemp'] = val
if var.endswith('+'):
rvar = var.rstrip('+')
# .. obj = eval(rvar,self.d)
exec("%s.append(__vstemp)" % rvar,self.d)
else:
exec(var + " = __vstemp",self.d)
del self.d ['__vstemp']
def let_cl(self, var, body):
""" handle @cl node """
# g.trace()
lend = body.find('\n')
firstline = body[0:lend]
rest = firstline[4:].strip()
print("rest",rest)
try:
translator = eval(rest, self.d)
except Exception:
g.es_exception()
g.es("Can't instantate @cl xlator: " + rest)
translated = translator(body[lend+1:])
self.let(var, translated)
def let_body(self,var,val):
if var.endswith(".yaml"):
if yaml:
#print "set to yaml", `val`
sio = BytesIO(val)
try:
d = yaml.load(sio)
except Exception:
g.es_exception()
g.es("yaml error for: " + var)
return
parts = os.path.splitext(var)
self.let(parts[0], d)
else:
g.es("did not import yaml")
return
if val.startswith('@cl '):
self.let_cl(var, val)
return
self.let(var,val)
#@+node:ekr.20110407174428.5780: *5* parse_body & helpers
def parse_body(self,p):
body = self.untangle(p) # body is the script in p's body.
# print("Body")
# print(body)
if self.trace and self.verbose: g.trace('pass1',p.h,'\n',body)
self.d ['p'] = p.copy()
backop = []
segs = re.finditer('^(@x (.*))$',body,re.MULTILINE)
for mo in segs:
op = mo.group(2).strip()
# print("Oper",op)
if op.startswith('='):
# print("Assign", op)
backop = ('=', op.rstrip('{').lstrip('='), mo.end(1))
elif op == '{':
backop = ('runblock', mo.end(1))
elif op == '}':
bo = backop[0]
# print("backop",bo)
if bo == '=':
self.let_body(backop[1].strip(), body[backop[2] : mo.start(1)])
elif bo == 'runblock':
self.runblock(body[backop[1] : mo.start(1)])
else:
self.runblock(op)
#@+node:ekr.20110407174428.5779: *6* runblock
def runblock(self,block):
if self.trace and self.verbose:
g.trace('pass1',block)
exec(block,self.d)
#@+node:ekr.20110407174428.5778: *6* untangle (getScript)
def untangle(self,p):
return g.getScript(self.c,p,
useSelectedText=False,
useSentinels=False)
#@+node:ekr.20110407174428.5782: *4* update_vs (pass 2) & helper
def update_vs(self):
'''
Evaluate @r <expr> nodes, puting the result in their body text.
Output @vso nodes, based on file extension
'''
c = self.c
for p in c.all_unique_positions():
h = p.h.strip()
if h.startswith('@r '):
if self.trace and self.verbose: g.trace('pass2:',p.h)
expr = h[3:].strip()
try:
result = eval(expr,self.d)
except Exception:
g.es_exception()
g.es("Failed to render " + h)
continue
if self.trace: print("Eval:",expr,"result:",repr(result))
self.render_value(p,result)
if h.startswith("@vso "):
expr = h[5:].strip()
bname, ext = os.path.splitext(expr)
try:
result = eval(bname,self.d)
except Exception:
g.es_exception()
g.es("@vso failed: " + h)
continue
if ext.lower() == '.json':
cnt = json.dumps(result, indent = 2)
pth = os.path.join(c.getNodePath(p), expr)
self.render_value(p, cnt)
g.es("Writing @vso: " + pth)
open(pth, "w").write(cnt)
else:
g.es_error("Unknown vso extension (should be .json, ...): " + ext)
#@+node:ekr.20110407174428.5784: *5* render_value
def render_value(self,p,value):
'''Put the rendered value in p's body pane.'''
if isinstance(value, SList):
p.b = value.n
elif g.isString(value): # Works with Python 3.x.
p.b = value
else:
p.b = pprint.pformat(value)
#@+node:ekr.20110407174428.5783: *3* test
def test(self):
self.update()
# test()
#@-others
#@-others
#@-leo
|
nilq/baby-python
|
python
|
#Let's do some experiments to understand why to use numpy and what we can do with it.
#import numpy with alias np
import numpy as np
import sys
# one dimensional array
a=np.array([1,2,3])
print(a)
# two dimensional array
a= np.array([(1,2,3),(4,5,6)])
print(a)
#why numpy better than list..
#1)Less memory
#2)faste2
#3)Convenien3
r = range(1000)
print(sys.getsizeof(0)*len(r))
d = np.arange(1000)
print(d.size *d.itemsize)
#so python int value is taking 28*1000 as total size, where as numpy is taking 4*1000.
|
nilq/baby-python
|
python
|
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable
import dezero.functions as F
# im2col
x1 = np.random.rand(1, 3, 7, 7)
col1 = F.im2col(x1, kernel_size=5, stride=1, pad=0, to_matrix=True)
print(col1.shape) # (9, 75)
x2 = np.random.rand(10, 3, 7, 7) # 10個のデータ
kernel_size = (5, 5)
stride = (1, 1)
pad = (0, 0)
col2 = F.im2col(x2, kernel_size, stride, pad, to_matrix=True)
print(col2.shape) # (90, 75)
# conv2d
N, C, H, W = 1, 5, 15, 15
OC, (KH, KW) = 8, (3, 3)
x = Variable(np.random.randn(N, C, H, W))
W = np.random.randn(OC, C, KH, KW)
y = F.conv2d_simple(x, W, b=None, stride=1, pad=1)
y.backward()
print(y.shape) # (1, 8, 15, 15)
print(x.grad.shape) # (1, 5, 15, 15)
|
nilq/baby-python
|
python
|
import sys
import os
from os.path import join as opj
workspace = os.environ["WORKSPACE"]
sys.path.append(
opj(workspace, 'code/GeDML/src')
)
import argparse
import logging
logging.getLogger().setLevel(logging.INFO)
import torch.distributed as dist
from gedml.launcher.runners.distributed_runner import DistributedRunner
def subprocess_start():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--world_size", type=int, default=1)
parser.add_argument("--rank", type=int, default=0)
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--dataset", type=str, default='ImageNet')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(opt.gpu)
opt.gpu = 0
opt.link_path = os.path.join(workspace, "code/Experiments/GeDML/demo/moco/link.yaml")
runner = DistributedRunner(
opt
)
runner.run()
if __name__ == '__main__':
subprocess_start()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class Company(models.Model):
_inherit = 'res.company'
hr_presence_control_email_amount = fields.Integer(string="# emails to send")
hr_presence_control_ip_list = fields.Char(string="Valid IP addresses")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
_ConfigureState_
Populate the states tables with all known states, and set the max retries for
each state. Default to one retry.
Create the CouchDB and associated views if needed.
"""
from WMCore.Database.CMSCouch import CouchServer
from WMCore.DataStructs.WMObject import WMObject
class ConfigureState(WMObject):
def configure(self):
server = CouchServer(self.config.JobStateMachine.couchurl)
dbname = 'JSM/JobHistory'
if dbname not in server.listDatabases():
server.createDatabase(dbname)
|
nilq/baby-python
|
python
|
import numpy as np
from numba import float32, jit
from numba.np.ufunc import Vectorize
from numba.core.errors import TypingError
from ..support import TestCase
import unittest
dtype = np.float32
a = np.arange(80, dtype=dtype).reshape(8, 10)
b = a.copy()
c = a.copy(order='F')
d = np.arange(16 * 20, dtype=dtype).reshape(16, 20)[::2, ::2]
def add(a, b):
return a + b
def add_multiple_args(a, b, c, d):
return a + b + c + d
def gufunc_add(a, b):
result = 0.0
for i in range(a.shape[0]):
result += a[i] * b[i]
return result
def ufunc_reduce(ufunc, arg):
for i in range(arg.ndim):
arg = ufunc.reduce(arg)
return arg
vectorizers = [
Vectorize,
# ParallelVectorize,
# StreamVectorize,
# CudaVectorize,
# GUFuncVectorize,
]
class TestUFuncs(TestCase):
def _test_ufunc_attributes(self, cls, a, b, *args):
"Test ufunc attributes"
vectorizer = cls(add, *args)
vectorizer.add(float32(float32, float32))
ufunc = vectorizer.build_ufunc()
info = (cls, a.ndim)
self.assertPreciseEqual(ufunc(a, b), a + b, msg=info)
self.assertPreciseEqual(ufunc_reduce(ufunc, a), np.sum(a), msg=info)
self.assertPreciseEqual(ufunc.accumulate(a), np.add.accumulate(a),
msg=info)
self.assertPreciseEqual(ufunc.outer(a, b), np.add.outer(a, b), msg=info)
def _test_broadcasting(self, cls, a, b, c, d):
"Test multiple args"
vectorizer = cls(add_multiple_args)
vectorizer.add(float32(float32, float32, float32, float32))
ufunc = vectorizer.build_ufunc()
info = (cls, a.shape)
self.assertPreciseEqual(ufunc(a, b, c, d), a + b + c + d, msg=info)
def test_ufunc_attributes(self):
for v in vectorizers: # 1D
self._test_ufunc_attributes(v, a[0], b[0])
for v in vectorizers: # 2D
self._test_ufunc_attributes(v, a, b)
for v in vectorizers: # 3D
self._test_ufunc_attributes(v, a[:, np.newaxis, :],
b[np.newaxis, :, :])
def test_broadcasting(self):
for v in vectorizers: # 1D
self._test_broadcasting(v, a[0], b[0], c[0], d[0])
for v in vectorizers: # 2D
self._test_broadcasting(v, a, b, c, d)
for v in vectorizers: # 3D
self._test_broadcasting(v, a[:, np.newaxis, :], b[np.newaxis, :, :],
c[:, np.newaxis, :], d[np.newaxis, :, :])
def test_implicit_broadcasting(self):
for v in vectorizers:
vectorizer = v(add)
vectorizer.add(float32(float32, float32))
ufunc = vectorizer.build_ufunc()
broadcasting_b = b[np.newaxis, :, np.newaxis, np.newaxis, :]
self.assertPreciseEqual(ufunc(a, broadcasting_b),
a + broadcasting_b)
def test_ufunc_exception_on_write_to_readonly(self):
z = np.ones(10)
z.flags.writeable = False # flip write bit
tests = []
expect = "ufunc 'sin' called with an explicit output that is read-only"
tests.append((jit(nopython=True), TypingError, expect))
tests.append((jit(nopython=False), ValueError,
"output array is read-only"))
for dec, exc, msg in tests:
def test(x):
a = np.ones(x.shape, x.dtype) # do not copy RO attribute from x
np.sin(a, x)
with self.assertRaises(exc) as raises:
dec(test)(z)
self.assertIn(msg, str(raises.exception))
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import json
import numpy as np
def pack(request):
data = request.get_json()
furniture = data['furniture']
orientations = []
'''
Test TEST TTEESSTT
'''
##################################################################
####### Preliminary Functions
##################################################################
def order(x,vals):
'''
idea is to apply this to the pieces, with different
vectors for vals depending on the ordering rule
(probably start with non-increasing volume)
'''
x = [i for _,i in sorted(zip(vals,x), reverse = True)]
return x
''' Permuatations of indices for dimensions '''
def re_order(dim, OR):
'''
dim stores original dimensions, OR is a permutation
'''
D = dim
new_dim = []
for i in range(3):
new_dim.append(D[OR[i]])
return new_dim
def Feas(Dims, EP, Bin_Size, OR, Curr_items, Curr_EP):
'''
Returns True if the orientation OR of a piece of dimension
Dims = HxWxD is feasible in a bin with leftmost corner at EP
Bin_Size = 1x3 dimensions of bin
Dims = 1x3
EP = 1x3 -- coordinates of the chosen spot
OR = 1x3 a permutation of [0,1,2]
For all items in Curr_items placed at Curr_Ep
have to make sure that EP[0] + d[OR[0]] doesn't
poke through... item[j][0] -- item[j][0] + Curr_Ep[j][0]
'''
BS = Bin_Size
D = re_order(Dims,OR)
CI = Curr_items
CE = Curr_EP
check = True
for i in range(3):
# Bin limits
if D[i] + EP[i] > BS[i]:
check = False
for j in range(len(CI)):
# checking intersections with other items
####################################################
#### DOUBLE CHECK THIS FOR CORRECTNESS!!!!
####################################################
for k in range(3):
a = (k + 1)%3
b = (k + 2)%3
if overlap(D,EP,CI[j],CE[j],k,a,b):
check = False
return check
def overlap(d1,c1, d2,c2, k,x, y):
'''
returns True if two 3-d boxes with dimensions d1 d2
and lower left corners c1, c2 overlap on the xy plane AND k dim...
'''
ov = True
if c1[x] >= c2[x] + d2[x]:
ov = False
if c2[x] >= c1[x] + d1[x]:
ov = False
if c1[y] >= c2[y] + d2[y]:
ov = False
if c2[y] >= c1[y]+d2[y]:
ov = False
if c1[k] >= c2[k] + d2[k]:
ov = False
if c2[k] >= c1[k] + d1[k]:
ov = False
return ov
'''
Compute Merit function for given placement of a piece
'''
def Merit_Res(Dims, OR, EP, Rs, Bin_Size):
'''
not gonna bother checking feasibility...
assume that this calc comes AFTER feasibility check...
--Maybe weight the dimensions differently to
make the different orientations different?
'''
D = Dims
BS = Bin_Size
'''
this does NOT take account of the orientation
so the orientation is basically just for feasibility...
'''
# The "extra" EP[0] + Dims[0] is supposed to penalize "high" positions...
return sum(Rs) - sum(Dims) + EP[0] + Dims[0]
#### Work with people to determine best/better merit functions.
#### CODE UP THE BOUNDING BOX ONES TOO!! THESE SEEM LIKELY
#### CANDIDATES FOR US...
def Merit_WD(Dims, OR, EP, curr_items, curr_eps):
'''
Selects position that minimizes the bounding
box in the WxD dimension
curr_items = items in crate
curr_eps = position of items
EP = candidate position
OR = candidate orientation
'''
Dim = re_order(Dims,OR)
CI = curr_items
CE = curr_eps
'''
start out with the box bounds as the new guy
'''
W = EP[1] + Dim[1]
D = EP[2] + Dim[2]
for i in range(len(CI)):
if CE[i][1] + CI[i][1] > W:
W = CE[i][1] + CI[i][1]
if CE[i][2] + CI[i][2] > D:
D = CE[i][2] + CI[i][2]
#Penalizes Height
val = W*D + (EP[0] + Dim[0]) * W
return(val)
'''
Update Extreme point list
'''
def proj(d1,e1,d2,e2, ep_dir, proj_dir):
'''
d1, e1 -- dim of new piece, placed at point e1
d2, e2 -- cycle these through the other pieces
ep_dir is the coordinate "pushed out" by the piece dimension in
the candidate extreme point
proj_dir is the one to shrink... (number 0,1,2 corresponding to x, y, z)
These are NEVER the same...
'''
e = ep_dir
pd = proj_dir
# remaining dimension???
od = 3-e - pd
eps = 0.0
check = True
if d2[pd] + e2[pd] > e1[pd] - eps:
#i.e. piece is further from axis in projection direction
check = False
if e2[e] > e1[e] + d1[e] - eps:
#i.e. piece too far
check = False
if e2[e] + d2[e] < e1[e] + d1[e] + eps:
# i.e. piece not far enough
check = False
if e2[od] > e1[od] - eps:
#i.e. piece too far
check = False
if e2[od] + d2[od] < e1[od] + eps:
# i.e. piece not far enough
check = False
return check
def Update_EP(Dims, EP, Curr_EPs, Curr_Items):
'''
Dims = 1x3 HxWxD of current piece placed
(in orienation OR* decided by Feas and Merit...)
EP = 1x3 coordinates of lower left corner of current piece
Curr_EPs = list of current extreme points where Curr_Items
are located
Curr_Items = list of dimensions of current items
idea is you take current EP and push it out in the
three dimensions of the current piece, then project
each of these towards the two other axes...
e.g. [ep[0],ep[1] + Dims[1], ep[2]] projected in
x and z directions...
- Six possible new ones (possibly duplicated...)
- each of the three
New_Eps[0], [1] are x_y and x_z projections of (ep[0]+dim[0],ep[1],ep[2])
by shrinking the y and z coordinates, respectively...
'''
D = Dims
CI = Curr_Items
CE = Curr_EPs
New_Eps = [[EP[0]+D[0],EP[1],EP[2]],[EP[0]+D[0],EP[1],EP[2]],
[EP[0],EP[1]+D[1],EP[2]],[EP[0],EP[1]+D[1],EP[2]],
[EP[0],EP[1],EP[2]+D[2]],[EP[0],EP[1],EP[2]+D[2]]]
Max_bounds = -1*np.ones(6)
for i in range(len(CI)):
# x_y -- New_Eps[0] shrinking y coordinate
if proj(D, EP, CI[i], CE[i],0,1) and CE[i][1] + CI[i][1] > Max_bounds[0]:
New_Eps[0] = [EP[0] + D[0], CE[i][1] + CI[i][1],EP[2]]
Max_bounds[0] = CE[i][1] + CI[i][1]
#x_z -- New_Eps[1] shrinking z coordinate
if proj(D, EP, CI[i], CE[i],0,2) and CE[i][2] + CI[i][2] > Max_bounds[1]:
New_Eps[1] = [EP[0] + D[0], EP[1], CE[i][2] + CI[i][2]]
Max_bounds[1] = CE[i][2] + CI[i][2]
# y_x -- New_Eps[2] shrinking x coordinate
if proj(D, EP, CI[i], CE[i],1,0) and CE[i][0] + CI[i][0] > Max_bounds[2]:
New_Eps[2] = [CE[i][0] + CI[i][0], EP[1] + D[1],EP[2]]
Max_bounds[2] = CE[i][0] + CI[i][0]
#y_z -- New_Eps[3] shrinking z coordinate
if proj(D, EP, CI[i], CE[i],1,2) and CE[i][2] + CI[i][2] > Max_bounds[3]:
New_Eps[3] = [EP[0], EP[1]+D[1], CE[i][2] + CI[i][2]]
Max_bounds[3] = CE[i][2] + CI[i][2]
# z_x -- New_Eps[4] shrinking x coordinate
if proj(D, EP, CI[i], CE[i],2,0) and CE[i][0] + CI[i][0] > Max_bounds[2]:
New_Eps[2] = [CE[i][0] + CI[i][0], EP[1],EP[2] + D[2]]
Max_bounds[2] = CE[i][0] + CI[i][0]
# z_y -- New_Eps[5] shrinking y coordinate
if proj(D, EP, CI[i], CE[i],2,1) and CE[i][1] + CI[i][1] > Max_bounds[0]:
New_Eps[0] = [EP[0], CE[i][1] + CI[i][1],EP[2] + D[2]]
Max_bounds[0] = CE[i][1] + CI[i][1]
# remove duplicates
New_Eps = np.unique(New_Eps, axis = 0)
return New_Eps
def Init_RS(NE, Bin_Dims):
'''
Input is a list of new EPs
Initializes the residual space in each axis
This may be updated by the Update_RS function'''
BD = Bin_Dims
RS = []
for i in range(len(NE)):
RS_i = [BD[0] - NE[i][0], BD[1] - NE[i][1],BD[2] - NE[i][2]]
RS.append(RS_i)
return RS
def Update_RS(Dims, EP, All_EPs, RS_list):
'''
This updates the EXISTING RS's to account for
the new item in the Bin.
DOES NOT update the initialized RS to account for
the other items already in the bin -- would have to
include the current items to do that...
Dims = **re-ordered** dimensions of the newly added piece
EP = extreme point PLACEMENT location of the new piece
-- this guy is no longer in the list...
-- the initial res of the
All_Eps = list of all other extreme points
RS_list = current residuals list (each entry a 3-tuple)
'''
EPL = All_EPs
D = Dims
RL = RS_list
for i in range(len(EPL)):
if EPL[i][0] >= EP[0] and EPL[i][0] < EP[0] + D[0]:
if EPL[i][1] <= EP[1] and EPL[i][2] >= EP[2] and EPL[i][2] < EP[2] + D[2]:
RL[i][1] = min([RL[i][1], EP[1] - EPL[i][1]])
if EPL[i][2] <= EP[2] and EPL[i][1] >= EP[1] and EPL[i][1] < EP[1] + D[1]:
RL[i][2] = min([RL[i][2], EP[2] - EPL[i][2]])
if EPL[i][1] >= EP[1] and EPL[i][1] < EP[1] + D[1]:
if EPL[i][0] <= EP[0] and EPL[i][2] >= EP[2] and EPL[i][2] < EP[2] + D[2]:
RL[i][0] = min([RL[i][0], EP[0] - EPL[i][0]])
return RL
##################################################################
####### INPUT STAGE
##################################################################
# Maximum box dimensions
# need to make sure that dimensions are big enough to handle each piece...
H_box = 40
W_box = 60
D_box = 48
#e_ST is the "allowed overhang" of the nesting in this case...is this a thing??
e_ST = 2
# dims are H x W x D
# pieces are (dimensions, label, nesting_dimensions -- default will be [0,0,0])
Pieces = furniture
for i in range(len(Pieces)):
Pieces[i].append(i)
# i.e. the simone table can't have long-way as height...
Or_Ex = {'Simone Table': [2,3]}
# could also fix orientations using something like...
# Fix_Or = {'label': fixed_orientation , etc.}
# probably also add something to specify that certain pieces
# have to go on the bottom, can't be stacked, etc...
#stack_ex = {'Simone Table': 1, 'Harper Shelf':0}
##################################################################
####### NESTING PACKING STAGE
##################################################################
Nest_possible = []
pieces_to_pack = []
for i in range(len(Pieces)):
pieces_to_pack.append(Pieces[i])
for i in range(len(Pieces)):
if Pieces[i][2][0] != 0:
Nest_possible.append(Pieces[i])
##### For now just ordered by volume
Nest_space_ordering = [Nest_possible[i][2][0] * Nest_possible[i][2][1] * Nest_possible[i][2][2]
for i in range(len(Nest_possible))]
Pack_piece_ordering = [pieces_to_pack[i][0][0] * pieces_to_pack[i][0][1] *pieces_to_pack[i][0][2]
for i in range(len(pieces_to_pack))]
Nest_possible = order(Nest_possible, Nest_space_ordering)
pieces_to_pack = order(pieces_to_pack, Pack_piece_ordering)
for j in range(len(Nest_possible)):
'''
try packing, and remove pieces from "pieces to pack"
if they are packed...
'''
Nestings = []
Bin_size = Nest_possible[j][2]
#initialize extreme point list
EPL = np.array([[0,0,0]])
Curr_items = []
Curr_EP = []
RS_list = [[Bin_size[0],Bin_size[1],Bin_size[2]]]
ptp_j = []
for i in range(len(pieces_to_pack)):
ptp_j.append(pieces_to_pack[i])
ptp_j.remove(Nest_possible[j])
for p in range(len(ptp_j)):
'''
try packing - for each succesful pack add the phrase
" label packed in label at EP in orientation ___" to some list to be
printed at the end...
'''
Dims = ptp_j[p][0]
best_merit = 2 * H_box * W_box * D_box
e_cand = None
o_cand = None
for e in range(len(EPL)):
for o in range(len(Ors)):
''' Skip if an orientation exception '''
if ptp_j[p][1] in Or_Ex and o in Or_Ex[ptp_j[p][1]]:
continue
if Feas(Dims, EPL[e], Bin_size, Ors[o], Curr_items, Curr_EP) and Merit_Res(Dims, Ors[o], EPL[e], RS_list[e], Bin_size) < best_merit:
best_merit = Merit_Res(Dims, Ors[o], EPL[e], RS_list[e], Bin_size)
e_cand = e
o_cand = o
if e_cand is None:
continue
else:
Dims = re_order(Dims, Ors[o_cand])
NE = Update_EP(Dims, EPL[e_cand], Curr_EP, Curr_items)
### Again had the original dimensions in here...
Curr_items.append(Dims)
Curr_EP.append(EPL[e_cand])
L = len(Curr_EP)
RS_list.remove(RS_list[e_cand])
EPL = np.delete(EPL,e_cand,axis = 0)
for i in range(len(NE)):
EPL = np.append(EPL,[NE[i]], axis = 0)
# Sort the EPs by lowest z, y, x respectively...
# might want to change this, depending on how things go...
for i in range(3):
### Probably Change this to be like the sorting further down...
EPL = EPL[EPL[:,2-i].argsort(kind='mergesort')]
N_RS = Init_RS(NE, Bin_size)
for i in range(len(N_RS)):
RS_list.append(N_RS[i])
RS_list = Update_RS(Dims, Curr_EP[L-1], EPL, RS_list)
Result = f'{ptp_j[p][1]}, orientation HxWxD = {Dims}, bottom left at {Curr_EP[L-1]} in {Nest_possible[j][1]}.'
Nestings.append(Result)
pieces_to_pack.remove(ptp_j[p])
for i in range(len(Nestings)):
print(Nestings[i])
##################################################################
####### Full Packing Stage
##################################################################
#### pieces_to_pack is THE SAME as from the nesting stage...
#### with all the nested pieces removed (whole nested ensemble
#### treated as one...)
#### Instantiate first Crate with first EP at [0,0,0]...
# can be different for each one... in principle...
Bin_size = [H_box,W_box,D_box]
# List of open EP's in open Crates
Cr = [[[0,0,0]]]
## when create a new crate, give it one of the size bounds
## from Crate_Dims and initialize the Crate_RS_Lists with these
## Stores Residuals for each EP in each Crate (ORDERING HAS TO BE THE SAME)
Cr_RS = [[Bin_size]]
# Stores a list of the dimensions of items currently in each crate
Cr_Item=[[]]
# Stores a list of the EPs where the current items
# were placed -- need this to compute intersections
Cr_EPs =[[]]
ptp = pieces_to_pack
## List of the locations and orientations of packed pieces
Packings = []
for p in range(len(ptp)):
'''
try the piece in EACH existing crate, pick best spot
according to the merit function.
If NO possible packing THEN Crates.append([[0,0,0]]) and
pack it in this one...
For bounding box merit function, maybe also start a new
crate if the BEST Merit value is too bad...
'''
# update this with the crate it's packed in...
packed_in = None
Dims = ptp[p][0]
Best_Merit = 2 * H_box * W_box * D_box
e_cand = None
o_cand = None
for c in range(len(Cr)):
EPL = Cr[c]
Curr_Items = Cr_Item[c]
Curr_EP = Cr_EPs[c]
RS_List = Cr_RS[c]
Ordered_RS = []
Ordered_EPL = []
for e in range(len(EPL)):
if EPL[e][0] > 0:
# no stacking
continue
for o in range(len(Ors)):
''' Skip if an orientation exception '''
if ptp[p][1] in Or_Ex and o in Or_Ex[ptp[p][1]]:
continue
#if Feas(Dims, EPL[e], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_Res(Dims, Ors[o], EPL[e], RS_List[e], Bin_size) < Best_Merit:
if Feas(Dims, EPL[e], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_WD(Dims, Ors[o], EPL[e], Curr_Items, Curr_EP) < Best_Merit:
#Best_Merit = Merit_Res(Dims, Ors[o], EPL[e], RS_List[e], Bin_size)
Best_Merit = Merit_WD(Dims, Ors[o], EPL[e], Curr_Items, Curr_EP)
e_cand = e
o_cand = o
packed_in = c
if packed_in is not None:
k = packed_in
EPL = Cr[k]
Curr_Items = Cr_Item[k]
#Curr_EP = Cr_EPs[k]
RS_List = Cr_RS[k]
Dims = re_order(Dims, Ors[o_cand])
NE = Update_EP(Dims, EPL[e_cand], Curr_EP, Curr_Items)
## before had this appending the ORIGINAL orientation
Cr_Item[k].append(Dims)
Cr_EPs[k].append(EPL[e_cand])
L = len(Cr_EPs[k])
del EPL[e_cand]
for i in range(len(NE)):
EPL.append(NE[i])
# Sort the EPs by lowest z, y, x respectively...
# might want to change this, depending on how things go...
for i in range(3):
# the [2-i] means it sorts the 0 index last -- i.e. really ordered
# by smallest height... wherever height is in the list...
order_i = [np.argsort(EPL,0)[r][2-i] for r in range(len(EPL))]
#### Seems to be ok to do this in place like this...
EPL = [EPL[order_i[j]] for j in range(len(order_i))]
#print('EPL:',EPL)
#print('RSList:', RS_List)
'''
WILL NEED TO CHANGE THIS so that it returns the format that Kyle wants
need to make a dictionary mapping the orientation chosen in the loop
to the relevant orientation in the "XY 90 degree" language...
'''
Result = [ptp[p][3],{'name': ptp[p][1], 'rotationXY': rotXY[o_cand], 'rotationYZ': rotYZ[o_cand], 'rotationXZ': rotXZ[o_cand],'bottomLeftX':Cr_EPs[k][L-1][1] + Bin_size[1]*packed_in, 'bottomLeftY': Cr_EPs[k][L-1][2], 'bottomLeftZ': Cr_EPs[k][L-1][0], 'crate': packed_in}]
#orientation HxWxD = {Dims}, bottom left at {Cr_EPs[k][L-1]} in Crate {packed_in}.
Packings.append(Result)
Cr[k] = EPL
#Cr_Item[k] = Curr_Items
#Cr_EPs[k] = Curr_EP
Cr_RS[k] = RS_List
if packed_in is None:
Cr.append([[0,0,0]])
Cr_RS.append([Bin_size])
Cr_Item.append([])
Cr_EPs.append([])
c = len(Cr)-1
packed_in = c
EPL = Cr[c]
Curr_Items = Cr_Item[c]
Curr_EP = Cr_EPs[c]
RS_List = Cr_RS[c]
e_cand = 0
o_cand = None
for o in range(len(Ors)):
''' Skip if an orientation exception '''
if ptp[p][1] in Or_Ex and o in Or_Ex[ptp[p][1]]:
continue
#if Feas(Dims, EPL[e_cand], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_Res(Dims, Ors[o], EPL[e_cand], RS_List[e_cand], Bin_size) < Best_Merit:
if Feas(Dims, EPL[e_cand], Bin_size, Ors[o], Curr_Items, Curr_EP) and Merit_WD(Dims, Ors[o], EPL[e_cand], Curr_Items, Curr_EP) < Best_Merit:
#Best_Merit = Merit_Res(Dims, Ors[o], EPL[e_cand], RS_List[e_cand], Bin_size)
Best_Merit = Merit_WD(Dims, Ors[o], EPL[e_cand], Curr_Items, Curr_EP) < Best_Merit
o_cand = o
Dims = re_order(Dims, Ors[o_cand])
NE = Update_EP(Dims, EPL[e_cand], Curr_EP, Curr_Items)
## same thing, was adding the ORIGNINAL Orientation before...
Curr_Items.append(Dims)
Curr_EP.append(EPL[e_cand])
L = len(Curr_EP)
del EPL[e_cand]
for i in range(len(NE)):
EPL.append(NE[i])
# Sort the EPs by lowest height, width, and depth respectively...
# might want to change this, depending on how things go...
for i in range(3):
order_i = [np.argsort(EPL,0)[r][2-i] for r in range(len(EPL))]
RS_List = [RS_List[order_i[j]] for j in range(len(order_i))]
EPL = [EPL[order_i[j]] for j in range(len(order_i))]
Result = [ptp[p][3],{'name': ptp[p][1], 'rotationXY': rotXY[o_cand], 'rotationYZ': rotYZ[o_cand], 'rotationXZ': rotXZ[o_cand],'bottomLeftX': Cr_EPs[k][L-1][1]+Bin_size[1]*packed_in, 'bottomLeftY': Cr_EPs[k][L-1][2], 'bottomLeftZ': Cr_EPs[k][L-1][0], 'crate': packed_in}]
Packings.append(Result)
Cr[c] = EPL
Cr_Item[c] = Curr_Items
Cr_EPs[c] = Curr_EP
################################################################################
######## Generate dimensions of crates
################################################################################
'''
X - width
Y - Depth
Z - Height
(Z,X,Y)
'''
Crate_dims = []
for i in range(len(Cr_Item)):
H_dim = max([Cr_Item[i][j][0] + Cr_EPs[i][j][0] for j in range(len(Cr_Item[i]))])
W_dim = max([Cr_Item[i][j][1] + Cr_EPs[i][j][1] for j in range(len(Cr_Item[i]))])
D_dim = max([Cr_Item[i][j][2] + Cr_EPs[i][j][2] for j in range(len(Cr_Item[i]))])
Crate_dims.append([H_dim, W_dim, D_dim])
for i in range(len(Pieces)):
for j in range(len(Packings)):
if Packings[j][0] == i:
orientations.append(Packings[j][1])
print('orientations', orientations)
print('orientations = packings', Packings)
return {'pieces': orientations}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Dependencies: python3-pandas python3-plotly
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.colors
df = pd.read_csv(".perf-out/all.csv")
fig = make_subplots(
rows=2, cols=2,
horizontal_spacing = 0.1,
vertical_spacing = 0.1,
subplot_titles=(
"Requests per second",
"Latency: 90%ile", "Latency: 99%ile", "Latency: 99.9%ile"),
)
fig.update_yaxes(row=1, col=1, rangemode="tozero")
fig.update_yaxes(row=1, col=2, title_text="milliseconds",
rangemode="tozero")
fig.update_yaxes(row=2, col=1, title_text="milliseconds",
rangemode="tozero")
fig.update_yaxes(row=2, col=2, title_text="milliseconds",
rangemode="tozero")
fig.update_layout(legend_orientation="h", hovermode="x")
colors = plotly.colors.DEFAULT_PLOTLY_COLORS
for i, s in enumerate(set(df.server.values)):
dfs = df[df.server == s]
color = colors[i]
fig.add_trace(
go.Scatter(
x=dfs["size"],
y=dfs.reqps,
mode='lines+markers',
line=dict(color=color),
showlegend=True,
name=s),
row=1, col=1)
for (row, col), k in [
((1, 2), "lat90"),
((2, 1), "lat99"),
((2, 2), "lat99.9")]:
fig.add_trace(
go.Scatter(
x=dfs["size"],
y=dfs[k]/1000, # convert us -> ms
mode='lines+markers',
line=dict(color=color),
showlegend=False,
name=s),
row=row, col=col)
fig.write_html('.perf-out/results.html', auto_open=False)
|
nilq/baby-python
|
python
|
import json, re, pymongo, os
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
from unidecode import unidecode
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import date, datetime
from scrapy import settings
format="%d/%m/%Y"
class NoticiasPipeline:
def process_item(self, item, spider):
return item
class DropFaultDataPipeline:
def process_item(self, item, spider):
if item['data']:
return item
else:
raise DropItem("Missing data in %s" % item)
class DropFaultCorpoPipeline:
def process_item(self, item, spider):
if item['corpo']:
return item
else:
raise DropItem("Missing corpo in %s" % item)
class DropNotCovid19:
def process_item(self, item, spider):
if (re.search("covid", item['corpo']) or re.search("vacina", item['corpo']) or re.search("doses", item['corpo']) or re.search("cloroquina", item['corpo']) or re.search("cpi", item['corpo']) ):
return item
else:
raise DropItem("Is not notice about Covid-19")
class LowerPipeline:
def process_item(self, item, spider):
item['corpo'] = item['corpo'].lower()
return item
class TagsSpecialsCorpoPipeline:
def process_item(self,item,spider):
str1 = item['corpo']
str1 = unidecode(str1)
str1 = re.sub('["\'\-,;%\[\]\{\}.*:@#?!&$\(\)/|]', ' ', str1)
item['corpo'] = str1
return item
class RemoveStopwordsPipeline:
def process_item(self,item,spider):
text = item['corpo']
stop_words = set(stopwords.words('portuguese'))
word_tokens = word_tokenize(text)
filtered_text = [w for w in word_tokens if not w in stop_words]
filtered_text = []
for w in word_tokens:
if w not in stop_words:
filtered_text.append(w)
item['corpo'] = filtered_text
return item
class ProcessedCorpoPipeline:
def process_item(self,item,spider):
processed_corpo = DataUtility.pre_processing(item['corpo'])
item['corpo'] = processed_corpo
class MongoDBPipeline:
collection_name = 'notices_collection'
#collection_name = 'teste'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'crawler')
)
def open_spider(self,spider):
#self.client = pymongo.MongoClient(self.mongo_uri)
self.client = pymongo.MongoClient("your_url_database")
self.db = self.client[self.mongo_db]
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
link = item['link']
title = item['title']
data = item['data']
corpo = item['corpo']
data = datetime.strptime(data,format).date()
self.db[self.collection_name].insert_one({
'link': link,
'title': title,
'data': datetime(data.year, data.month, data.day),
'corpo': corpo
})
return item
|
nilq/baby-python
|
python
|
from math import *
from scipy.integrate import quad
from scipy.integrate import dblquad
from scipy import integrate
from scipy import special
from numpy import median
from numpy import linspace
from copy import deepcopy
from scipy.stats import fisk
"Defining the parameters for the tests"
alpha = 0.05
delta = 0.002
k = int(log(2 * int(log(1 / delta)) * (1 / delta)))
beta = 0.001
eta = 0.001
max_iterations = 15
max_gradient_descents = 15
sample_size = 2000
optimal_w = [2, 3]
initial_w = [4, 5]
initial_v = 5
mu = [0, 0]
covariance = [[10, 0], [0, 10]]
X = np.random.multivariate_normal(mu, covariance, sample_size).T
X1 = X[:, 0:int(sample_size / 2)]
X2 = X[:, int(sample_size / 2):sample_size]
max_trials = 1000
min_sample = 15
max_sample = 125
samples = 20
prediction = linspace(min_sample, max_sample, samples).astype(int)
|
nilq/baby-python
|
python
|
from rest_framework import permissions
class AnonymousPermission(permissions.BasePermission):
"""
Non Anonymous Users.
"""
message = "You are already registered. Please logout and try again."
def has_permission(self, request, view):
print(not request.user.is_authenticated)
return not request.user.is_authenticated
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object level permissions that only allow owner of object to edit it.
"""
message = "You cannot edit this object, since you are not the owner of this object."
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
from operator import itemgetter
from copy import deepcopy
from pydruid.utils import aggregators
from pydruid.utils import filters
class TestAggregators:
def test_aggregators(self):
aggs = [('longsum', 'longSum'), ('doublesum', 'doubleSum'),
('min', 'min'), ('max', 'max'), ('count', 'count'),
('hyperunique', 'hyperUnique')]
aggs_funcs = [(getattr(aggregators, agg_name), agg_type)
for agg_name, agg_type in aggs]
for f, agg_type in aggs_funcs:
assert f('metric') == {'type': agg_type, 'fieldName': 'metric'}
def test_filtered_aggregator(self):
filter_ = filters.Filter(dimension='dim', value='val')
aggs = [aggregators.count('metric1'),
aggregators.longsum('metric2'),
aggregators.doublesum('metric3'),
aggregators.min('metric4'),
aggregators.max('metric5'),
aggregators.hyperunique('metric6'),
aggregators.cardinality('dim1'),
aggregators.cardinality(['dim1', 'dim2'], by_row=True)]
for agg in aggs:
expected = {
'type': 'filtered',
'filter': {
'type': 'selector',
'dimension': 'dim',
'value': 'val'
},
'aggregator': agg
}
actual = aggregators.filtered(filter_, agg)
assert actual == expected
def test_build_aggregators(self):
agg_input = {
'agg1': aggregators.count('metric1'),
'agg2': aggregators.longsum('metric2'),
'agg3': aggregators.doublesum('metric3'),
'agg4': aggregators.min('metric4'),
'agg5': aggregators.max('metric5'),
'agg6': aggregators.hyperunique('metric6'),
'agg7': aggregators.cardinality('dim1'),
'agg8': aggregators.cardinality(['dim1', 'dim2'], by_row=True)
}
built_agg = aggregators.build_aggregators(agg_input)
expected = [
{'name': 'agg1', 'type': 'count', 'fieldName': 'metric1'},
{'name': 'agg2', 'type': 'longSum', 'fieldName': 'metric2'},
{'name': 'agg3', 'type': 'doubleSum', 'fieldName': 'metric3'},
{'name': 'agg4', 'type': 'min', 'fieldName': 'metric4'},
{'name': 'agg5', 'type': 'max', 'fieldName': 'metric5'},
{'name': 'agg6', 'type': 'hyperUnique', 'fieldName': 'metric6'},
{'name': 'agg7', 'type': 'cardinality', 'fieldNames': ['dim1'], 'byRow': False},
{'name': 'agg8', 'type': 'cardinality', 'fieldNames': ['dim1', 'dim2'], 'byRow': True},
]
assert (sorted(built_agg, key=itemgetter('name')) ==
sorted(expected, key=itemgetter('name')))
def test_build_filtered_aggregator(self):
filter_ = filters.Filter(dimension='dim', value='val')
agg_input = {
'agg1': aggregators.filtered(filter_,
aggregators.count('metric1')),
'agg2': aggregators.filtered(filter_,
aggregators.longsum('metric2')),
'agg3': aggregators.filtered(filter_,
aggregators.doublesum('metric3')),
'agg4': aggregators.filtered(filter_,
aggregators.min('metric4')),
'agg5': aggregators.filtered(filter_,
aggregators.max('metric5')),
'agg6': aggregators.filtered(filter_,
aggregators.hyperunique('metric6')),
'agg7': aggregators.filtered(filter_,
aggregators.cardinality('dim1')),
'agg8': aggregators.filtered(filter_,
aggregators.cardinality(['dim1', 'dim2'], by_row=True)),
}
base = {
'type': 'filtered',
'filter': {
'type': 'selector',
'dimension': 'dim',
'value': 'val'
}
}
aggs = [
{'name': 'agg1', 'type': 'count', 'fieldName': 'metric1'},
{'name': 'agg2', 'type': 'longSum', 'fieldName': 'metric2'},
{'name': 'agg3', 'type': 'doubleSum', 'fieldName': 'metric3'},
{'name': 'agg4', 'type': 'min', 'fieldName': 'metric4'},
{'name': 'agg5', 'type': 'max', 'fieldName': 'metric5'},
{'name': 'agg6', 'type': 'hyperUnique', 'fieldName': 'metric6'},
{'name': 'agg7', 'type': 'cardinality', 'fieldNames': ['dim1'], 'byRow': False},
{'name': 'agg8', 'type': 'cardinality', 'fieldNames': ['dim1', 'dim2'], 'byRow': True},
]
expected = []
for agg in aggs:
exp = deepcopy(base)
exp.update({'aggregator': agg})
expected.append(exp)
built_agg = aggregators.build_aggregators(agg_input)
expected = sorted(built_agg, key=lambda k: itemgetter('name')(
itemgetter('aggregator')(k)))
actual = sorted(expected, key=lambda k: itemgetter('name')(
itemgetter('aggregator')(k)))
assert expected == actual
|
nilq/baby-python
|
python
|
import os
from django.templatetags.static import static
from django.utils.html import format_html
from django.utils.module_loading import import_string
from wagtail.core import hooks
from wagtail_icons.settings import BASE_PATH, SETS
@hooks.register("insert_global_admin_css")
def global_admin_css():
stylesheets = []
for set in SETS:
iconset_class = import_string(set)
iconset_instance = iconset_class()
stylesheets += iconset_instance.get_css_files()
html = "".join(
'<link rel="stylesheet" href="%s">'
% static(os.path.join(BASE_PATH, stylesheet))
for stylesheet in stylesheets
)
return format_html(html)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Author: Ting'''
import logging
import traceback
from argparse import ArgumentParser
from datetime import date
from pprint import pprint
from subprocess import call
import re
from collections import defaultdict
from os.path import join, abspath, dirname, isfile
import csv
import xlrd
# import matplotlib.pyplot as plt
import psycopg2
log = logging.getLogger(name=__file__)
class DataIO:
today_str = str(date.today()).replace("-", "")
file_dir = dirname(abspath(__file__))
main_dir = file_dir.split('git')[0]
def __init__(self, *args, **kwargs):
super().__init__(self, config=None, *args, **kwargs)
self.parse_args = self._parse_args()
if not config is None:
self.file_path = input('Please enter the location of your file: ')
else:
self.config = config
try:
self.file_path_list = config.get('file_path')
except Exception as e:
log.error('Error! No path found.')
return
for file_path in file_path_list:
try:
parse_file(file_path)
except EXCEPTION as e:
log.error('Error! Unknown file type, please select only Excel, CSV, or Postgres Login.')
def _parse_args(self):
parser = ArgumentParser()
parser.add_argument('--filename', required=True, help="Filename.")
parser.add_argument('--file-type', required=True, help="File Type.")
# parser.add_argument('--debug', required=False, action='store_true', help="Log in debug mode.")
# parser.add_argument('--querysrch', required=False, action='store_true', nargs='+', help="Initiate search of the site entered.")
# if both querysrch and tags are there, use parser.parse_args('args'.split())
args, trash = parser.parse_known_args()
return args
def parse_file(file_path):
file_type = file_path.split('.')[1]
if file_type == 'csv':
return True # read_csv(file_type)
elif 'xls' in file_type:
return True # read_xlsx(file_path)
elif 'txt' in file_type:
return True # read_xlsx(file_path)
elif file_type is None and 'dbname' in file_path:
return True # read_db(file_path)
else:
return None
def read_csv(filename, sheet_names='Turnout Rates'):
# file_path = join(main_dir, filename)
csv_data = csv.reader(file_path, delimiter=',')
csv_data = list(csv_data)
return data_set
def save_xlsx_to_csv(filename, xlsx_sh, save_to_csv=True):
filename = filename.split('.')[0]
sh = xlsx_sh
if save_to_csv is True:
csv_file = open(filename + '.csv', 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(sh.nrows):
wr.writerow(sh.row_values(rownum))
csv_file.close()
def read_xlsx(filename, sheet_names='Turnout Rates'):
# xlsx is expected to have xls in the string
file_path = join(main_dir, filename)
wb = xlrd.open_workbook(file_path)
sh = wb.sheet_by_name(sheet_names)
save_xlsx_to_csv(filename, sh, save_to_csv=False)
# Organize data
data_class = {}
data_subclass = {}
data_set = {}
for rownum in xrange(sh.nrows):
if rownum > 1:
colnum = 0
data = {}
for ds in sh.row_values(rownum):
data[data_subclass[colnum]] = ds
colnum += 1
data_set[data['State']] = data
elif rownum == 1:
data_subclass = sh.row_values(1)
data_subclass[0] = 'State'
data_subclass = tuple(data_subclass)
elif rownum == 0:
for dc in sh.row_values(0):
data_class[dc] = {}
return data_set
def log_data_to_psql(args_dict, db_name='test_db'):
db_name = 'dbname=' + db_name
'''This will be a place holder to customized log_data_psql fcn'''
with psycopg2.connect(db_name).cursor() as cur:
args_str = ','.join(cur.mogrify("(%s,%s,%s,%s,%s,%s,%s,%s,%s)", x) for x in args_dict)
cur.execute("INSERT INTO table VALUES " + args_str)
def read_db(query, db_name='test_db'):
db_name = 'dbname=' + db_name
if 'SELECT' not in query.upper():
return
with psycopg2.connect("dbname=test_db").cursor() as cur:
cur.execute(query)
return cur.fetchall()
def read_text_file(filename):
key_phrase = ''
expression = ''
if isfile(filename) is True:
with open(filename, 'rb') as file_content:
file_content_readlines = file_content.readlines()
# Read the last line
for line in reversed(file_content_readlines):
if key_phrase in line:
# Do something fancy AF
break
else:
# Do something else fancy AF
break
# Base on that fancy thing, choose what to do
if True:
log.info('Reading text file')
for row in file_content_readlines:
# Search for expression
if expression in row:
matching_expression = re.search('(?<=@)\w+', row).group(0)
if matching_expression in expression_dict:
# Do something fancy
keys = [k for k in table_dict.keys()]
content = [tuple(str(v) for k, v in table_dict.iteritems())]
if __name__ == '__main__':
# parsed_args = _parse_args()
filename = '2016 November General Election.xlsx'
data_set = read_xlsx(filename)
print data_set
# if parsed_args.filename:
# if parsed_args.file_type == 'txt':
# read_text_file(parsed_args.filename)
# if parsed_args.file_type == 'tbl':
# read_table(parsed_args.filename)
# if parsed_args.file_type == 'db':
# read_db(parsed_args.filename)
### Reference:
# http://stackoverflow.com/questions/20105118/convert-xlsx-to-csv-correctly-using-python
# http://stackoverflow.com/questions/1038160/data-structure-for-maintaining-tabular-data-in-memory
|
nilq/baby-python
|
python
|
#
# wcsmod -- module wrapper for WCS calculations.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
We are fortunate to have several possible choices for a python WCS package
compatible with Ginga: astlib, kapteyn, starlink and astropy.
kapteyn and astropy wrap Mark Calabretta's "WCSLIB", astLib wraps
Jessica Mink's "wcstools", and I'm not sure what starlink uses (their own?).
Note that astlib requires pyfits (or astropy) in order to create a WCS
object from a FITS header.
To force the use of one, do:
.. code-block:: python
from ginga.util import wcsmod
wcsmod.use('kapteyn')
before you load any images. Otherwise Ginga will try to pick one for
you.
Note that you can register custom WCS types using:
.. code-block:: python
from ginga.util.wcsmod.common import register_wcs
register_wcs('mywcs', MyWCSClass, list_of_coord_types)
Look at the implemented WCS wrappers for details.
"""
import sys
import os.path
import glob
from ginga.misc.ModuleManager import my_import
from . import common
# Module variables that get configured at module load time
# or when use() is called
wcs_configured = False
WCS = None
"""Alias to the chosen WCS system."""
# Holds names of coordinate types
coord_types = []
display_types = ['sexagesimal', 'degrees']
# try to load them in this order until we find one that works.
# If none can be loaded, we default to the BareBones dummy WCS
wcs_try_order = ('astropy', 'astropy_ape14', 'kapteyn', 'starlink', 'astlib',
'barebones')
wcs_home = os.path.split(sys.modules[__name__].__file__)[0]
def use(wcspkg, raise_err=True):
"""Choose WCS package."""
global coord_types, wcs_configured, WCS
if wcspkg not in common.custom_wcs:
# Try to dynamically load WCS
modname = 'wcs_%s' % (wcspkg)
path = os.path.join(wcs_home, '%s.py' % (modname))
try:
my_import(modname, path)
except ImportError as e:
if raise_err:
raise e
return False
if wcspkg in common.custom_wcs:
bnch = common.custom_wcs[wcspkg]
WCS = bnch.wrapper_class
coord_types = bnch.coord_types
wcs_configured = True
return True
return False
# configure at least one WCS wrapper
if not wcs_configured:
# Try some preconfigured names
for name in wcs_try_order:
if use(name, raise_err=False):
break
if not wcs_configured:
wcs_path = os.path.join(wcs_home, 'wcs_*.py')
# look up WCS wrappers we have in this directory
for path in glob.glob(wcs_path):
dirname, filename = os.path.split(path)
modname, ext = os.path.splitext(filename)
modname = modname[4:] # strip off "wcs_"
if use(name, raise_err=False):
break
def get_wcs_wrappers():
return list(common.custom_wcs.keys())
def get_wcs_class(name):
"""Get a WCS class corresponding to the registered name.
Will raise a KeyError if a class of the given name does not exist.
"""
return common.custom_wcs[name]
# END
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Configure logging
#
import logging
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="Show debugging output.")
args = parser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=log_level, format='[%(levelname)8s] %(name)-15s : %(message)s')
#
# The following demonstrates random access
# operations on sequences.
#
#
# Generate a sequence
#
import os
print()
print("Running: \"python3 ./write_from_files.py > /dev/null\"")
os.system("python3 ./write_from_files.py > /dev/null")
print()
print()
#
# Read single items in linear mode
#
import itypes
from itypes import Sequence
seq = Sequence("out_write_from_files/data.gridseq").read()
device = "numpy"
# Access by linear index
frame = seq.frames()[0]
# NOTE that you can also access single ids as in read_linear_entry.py
struct = frame.torch_struct("hwc", device)
print('Read frame:')
print(struct)
print()
# Access by scene and frame name
frame = seq.data["Scene-001"]["0000000001"]
# NOTE that you can also access single ids as in read_linear_entry.py
struct = frame.torch_struct("hwc", device)
print('Read frame:')
print(struct)
print()
|
nilq/baby-python
|
python
|
from multiprocessing import Manager, Process
def to_add(d, k, v):
d[k] = v
if __name__ == "__main__":
process_dict = Manager().dict()
p1 = Process(target=to_add, args=(process_dict, 'name', 'li'))
p2 = Process(target=to_add, args=(process_dict, 'age', 13))
p1.start()
p2.start()
p1.join()
p2.join()
print(process_dict)
|
nilq/baby-python
|
python
|
''' Create words from an existing wordlist '''
import random
import re
class WordBuilder(object):
''' uses an existing corpus to create new phonemically consistent words '''
def __init__(self, initial='>', terminal='<', chunk_size=2):
#indicators for start and ends of words - set if necessary to avoid collision
self.initial = initial
self.terminal = terminal
self.chunk_size = chunk_size
self.links = {
self.initial: []
}
self.average_word_length = 0
self.shortest = None
def ingest(self, corpus_file):
''' load and parse a pre-formatted and cleaned text file. Garbage in, garbage out '''
corpus = open(corpus_file)
total_letters = 0
total_words = 0
shortest_word = 100
for word in corpus.readlines():
# clean word
word = word.strip()
word = re.sub(r'[\',\.\"]', '', word)
total_letters += len(word)
total_words += 1
shortest_word = len(word) if len(word) < shortest_word else shortest_word
# iterate through n letter groups, where 1 <= n <= 3
n = self.chunk_size
start = 0
# >: C, Cys: t, yst: i
self.links[self.initial].append(word[0:n])
for position in range(n, len(word)):
start = position - n if position - n >= 0 else 0
base = word[start:position]
if not base in self.links:
self.links[base] = []
self.links[base].append(word[position])
if not word[-n:] in self.links:
self.links[word[-n:]] = []
self.links[word[-n:]].append(self.terminal)
self.average_word_length = total_letters / total_words
self.shortest = shortest_word
def get_word(self, word=None):
''' creates a new word '''
word = word if not word == None else self.initial
if not self.terminal in word:
if len(word) > self.average_word_length and \
self.terminal in self.links[word[-self.chunk_size:]] \
and random.randint(0, 1):
addon = self.terminal
else:
options = self.links[word[-self.chunk_size:]]
addon = random.choice(options)
word = word + addon
return self.get_word(word)
return word[1:-1]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 01 20:28:46 2016
@author: Anna
Directory structure: assumes galaxy directory is the working directory, input_data/ should contain pars, phot and fake files
This code generates a CMD plot in the same directory where the program, CMDscript.py, is
Use optional -phot, -fake flags to specify their location, or change the default setting in parse_options()
Assumes the default values of the column numbers for fake data to be 2,4,9 and 11; specify the values in the bash script or edit the default values
Give any name for .png file after -fake=... The file gets saved as [filename].png
Syntax: python CMDscript.py [path to working directory, ending in /] -phot=[path from working directory to phot file] -fake=...
e.g. python CMDscript.py /work/04316/kmcquinn/wrangler/shield/galaxies/agc223254/ -phot=phot/a223254_tilted_ellipse.phot3 2 4 9 11 -fake=input_data/fake AGC223254
The output files (.png files) are stored in the same directory as this program's
"""
import matplotlib
matplotlib.use("Agg")
# Comment this line if not running on tacc
import matplotlib.pyplot as plt
plt.ion()
import numpy as np
import pylab as pl
import sys
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator
import argparse
#def parse_options():
# Creates argument parser
parser = argparse.ArgumentParser(description='Process input parameters.')
# Defines required arguments used on the command line
parser.add_argument('galaxydir',action='store',help='path to galaxy directory (note: this is the whole path ending in the galaxy directory)')
parser.add_argument('-phot',dest='phot',action='store',help="location of phot file from galaxy directory",default='input_data/phot')
parser.add_argument('col1',action='store',help="column number for V",default='2', type=int)
parser.add_argument('col2',action='store',help="column number for Verr",default='4', type = int)
parser.add_argument('col3',action='store',help="column number for I",default='9',type = int)
parser.add_argument('col4',action='store',help="column number for Ierr",default='11',type=int)
parser.add_argument('-fake',dest='fake',action='store',help="location of fake file from galaxy directory",default='input_data/fake')
parser.add_argument('gname',action = 'store', help="names the galaxy", type=str)
# Parses through the arguments and saves them within the keyword args
args = parser.parse_args()
#plot_title = sys.argv[1]
#redfilter = sys.argv[2]
#bluefilter = sys.argv[3]
#plot_title = "AGC238890"
# Parses through command line arguments
#args = parse_options()
galdir = args.galaxydir
#col_1 = args.col1
#col_2 = args.col2
#col_3 = args.col3
#col_4 = args.col4
# Defines the location of phot, and fake
phot = galdir + args.phot
fake = galdir + args.fake
#print filtername(pars)
gal_name = args.gname
plot_title= gal_name
redfilter = "F814W"
bluefilter = "F606W"
print(str(galdir))
#Real Data
"""Parsers the path to phot file"""
dat = np.genfromtxt(phot)
#dat = np.genfromtxt(args.phot+'')
#remove all NaNs
dat = dat[~np.isnan(dat).any(axis=1)]
"""Parsers the column numbers for fake"""
#V = np.array(dat[:,2])
V= np.array(dat[:,args.col1])
#Verr = np.array(dat[:,4])
Verr = np.array(dat[:,args.col2])
#I = np.array(dat[:,9])
I = np.array(dat[:,args.col3])
#Ierr = np.array(dat[:,11])
Ierr = np.array(dat[:,args.col4])
VmI = V-I
VmIerr = (((Verr)**2)+(((Ierr)**2)))**.5
print VmIerr
#fake error stuff
fdat = np.loadtxt(fake)
#fdat = np.loadtxt(args.fake)
fdat = np.asarray([d for d in fdat if not 99.999 in d])
fVerr = np.array(fdat[:,2])
fIerr = np.array(fdat[:,3])
fI = np.array(fdat[:,1])
fV = np.array(dat[:,0])
fVmIerr = (fVerr**2 + fIerr**2)**0.5
print fVmIerr
#Here I am finding the max and min values of the data
#We will use this to automate the figsize thing
maxV = np.amax(V) + .3
print max(V)
maxI = np.amax(I) + .3
minV = np.amin(V) + 1.3
minI = np.amin(I) + 1.3
meanVmI = np.mean(VmI)
maxVmI = (meanVmI) + 5*np.std(VmI)
minVmI = (meanVmI) - 4*np.std(VmI)
Ierrup = np.around(maxI - .5)
Verrup = np.around(maxV - .5)
Ierrlow = np.around(minI + .5)
Verrlow = np.around(minV + .5)
errx = maxVmI -.3
print maxVmI
print errx
#maxVmI = np.amax(VmI) + .2
#minVmI = np.amin(VmI) - .2
#print minI
#print min(I)
#Making plain scatter plot
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.scatter(VmI, I, color = 'black', s = 2)
ax.set_ylim(maxI, minI)
ax.set_xlim(minVmI, maxVmI)
ax.set_xlabel(bluefilter + '-' + redfilter + ' (mag)')
ax.set_ylabel(redfilter + ' (mag)')
fig.text(.18, .85, 'Number of stars: '+str(len(V)))
majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = AutoMinorLocator(10)
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_minor_locator(minorLocator)
plt.suptitle(plot_title)
#ax.set_title(plot_title)
#adding error to the plot
#c = 3
c = errx
#location of the line
errlist = []
ylist = []
Verrlist = []
VmIerrlist = []
#Ilist = range(18, 27)
Ilist = range(int(Ierrlow), int(Ierrup))
for a in Ilist:
Iw = np.where(I > a)
Iwr = np.where(I[Iw] < a+1)
fIw = np.where(fI > a)
fIwr = np.where(fI[fIw] < a+1)
Ierravg = ((np.mean(Ierr[Iwr]))**2 + (np.mean(abs(fIerr[fIwr]))**2))**.5
errlist.append(Ierravg)
ylist.append(a+.5)
VmIerravg = (np.mean(VmIerr[Iwr])**2 + np.mean(abs(fVmIerr[fIwr]))**2)**.5
VmIerrlist.append(VmIerravg)
xlist = c*np.ones_like(ylist)
plt.errorbar(xlist, ylist, xerr = VmIerrlist, yerr= errlist, fmt = '.', capsize=0)
#adding in the contour script
def multidigitize(VmI,I,binsVmI,binsV):
dVmI = np.digitize(VmI.flat, binsVmI)
dI = np.digitize(I.flat, binsV)
return dVmI,dI
def linlogspace(xmin,xmax,n):
return np.logspace(np.log10(xmin),np.log10(xmax),n)
#here's the contour actual values
def adaptive_param_plot(VmI,I,
bins=3,
threshold=2,
marker='.',
marker_color=None,
ncontours=5,
fill=False,
mesh=False,
contourspacing=linlogspace,
mesh_alpha=0.5,
norm=None,
axis=None,
cmap=None,
**kwargs):
if axis is None:
axis = pl.gca()
axis.set_ylim(28, 18)
ok = np.isfinite(VmI)*np.isfinite(I)
if hasattr(bins, 'ndim') and bins.ndim == 2:
nbinsVmI, nbinsI = bins.shape[0]-1, bins.shape[1]-1
else:
try:
nbinsVmI = nbinsI = len(bins)-1
except TypeError:
nbinsVmI = nbinsI = bins
H, bVmI, bI = np.histogram2d(VmI[ok], I[ok], bins = bins)
dVmI, dI = multidigitize(VmI[ok], I[ok], bVmI, bI)
plottable = np.ones([nbinsVmI+2, nbinsI+2], dtype = 'bool')
plottable_hist = plottable[1:-1, 1:-1]
assert H.shape == plottable_hist.shape
plottable_hist[H > threshold] = False
H[plottable_hist] = 0
toplot = plottable[dVmI, dI]
cVmI = (bVmI[1:]+bVmI[:-1])/2
cI = (bI[1:]+bI[:-1])/2
levels = contourspacing(threshold-0.5, H.max(), ncontours)
if cmap is None:
cmap = plt.cm.get_cmap()
cmap.set_under((0,0,0,0))
cmap.set_bad((0,0,0,0))
if fill:
con = axis.contourf(cVmI, cI, H.T, levels= levels, norm = norm, cmap = cmap, **kwargs)
else:
con = axis.contour(cVmI, cI, H.T,levels=levels,norm=norm,cmap=cmap,**kwargs)
if mesh:
mesh = axis.pcolormesh(bVmI, bI, H.T, **kwargs)
mesh.set_alpha(mesh_alpha)
#Is there a way to add lines w the contour levels?
if 'linestyle' in kwargs:
kwargs.pop('linestyle')
#if i wanted to plot the scatter from this script intstead, but I can't make it look as nice
# axis.plot(VmI[ok][toplot],
# I[ok][toplot],
# linestyle='none',
# marker=marker,
# markerfacecolor=marker_color,
# markeredgecolor=marker_color,
# **kwargs)
return cVmI, cI, H, VmI[ok][toplot], I[ok][toplot]
adaptive_param_plot(VmI, I, bins = 100, fill = True, ncontours = 7, threshold = 2, axis = ax)
#SECOND PLOT
#Making plain scatter plot
#fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 2)
ax1.scatter(VmI, V, color = 'black', s = 2)
ax1.set_ylim(maxV, minV)
ax1.set_xlim(minVmI, maxVmI)
#ax.set_ylim(29, 18)
#ax.set_xlim(-.98, 3.48)
ax1.set_xlabel(redfilter + '-' + bluefilter + ' (mag)')
ax1.set_ylabel(bluefilter + ' (mag)')
ax1.yaxis.set_label_coords(-0.12, 0.5)
#fig.text(.6, .85, 'Number of stars: '+str(len(V)))
majorLocator = MultipleLocator(1)
majorFormatter = FormatStrFormatter('%d')
minorLocator = AutoMinorLocator(10)
ax1.yaxis.set_major_locator(majorLocator)
ax1.yaxis.set_major_formatter(majorFormatter)
ax1.yaxis.set_minor_locator(minorLocator)
ax.get_shared_y_axes().join(ax, ax1)
ax.set_yticklabels([])
ax1.autoscale()
#ax.set_title(plot_title)
#adding error to the plot
#c = 3
c = errx
#location of the line
errlist = []
ylist = []
Verrlist = []
VmIerrlist = []
#Ilist = range(18, 28)
Ilist = range(int(Verrlow), int(Verrup))
for a in Ilist:
Iw = np.where(I > a)
Iwr = np.where(I[Iw] < a+1)
fIw = np.where(fI > a)
fIwr = np.where(fI[fIw] < a+1)
Ierravg = ((np.mean(Ierr[Iwr]))**2 + (np.mean(abs(fIerr[fIwr])))**2)**.5
errlist.append(Ierravg)
ylist.append(a+.5)
VmIerravg = (np.mean(VmIerr[Iwr])**2 + np.mean(fVmIerr[fIwr])**2)**.5
VmIerrlist.append(VmIerravg)
xlist = c*np.ones_like(ylist)
plt.errorbar(xlist, ylist, xerr = VmIerrlist, yerr= errlist, fmt = '.', capsize = 0)
#adding in the contour script
def multidigitize(VmI,V,binsVmI,binsV):
dVmI = np.digitize(VmI.flat, binsVmI)
dV = np.digitize(V.flat, binsV)
return dVmI,dV
def linlogspace(xmin,xmax,n):
return np.logspace(np.log10(xmin),np.log10(xmax),n)
#here's the contour actual values
def adaptive_param_plot(VmI,V,
bins=3,
threshold=2,
marker='.',
marker_color=None,
ncontours=5,
fill=False,
mesh=False,
contourspacing=linlogspace,
mesh_alpha=0.5,
norm=None,
axis=None,
cmap=None,
**kwargs):
if axis is None:
axis = pl.gca()
axis.set_ylim(28, 18)
ok = np.isfinite(VmI)*np.isfinite(V)
if hasattr(bins, 'ndim') and bins.ndim == 2:
nbinsVmI, nbinsV = bins.shape[0]-1, bins.shape[1]-1
else:
try:
nbinsVmI = nbinsV = len(bins)-1
except TypeError:
nbinsVmI = nbinsV = bins
H, bVmI, bV = np.histogram2d(VmI[ok], V[ok], bins = bins)
dVmI, dV = multidigitize(VmI[ok], V[ok], bVmI, bV)
plottable = np.ones([nbinsVmI+2, nbinsV+2], dtype = 'bool')
plottable_hist = plottable[1:-1, 1:-1]
assert H.shape == plottable_hist.shape
plottable_hist[H > threshold] = False
H[plottable_hist] = 0
toplot = plottable[dVmI, dV]
cVmI = (bVmI[1:]+bVmI[:-1])/2
cV = (bV[1:]+bV[:-1])/2
levels = contourspacing(threshold-0.5, H.max(), ncontours)
if cmap is None:
cmap = plt.cm.get_cmap()
cmap.set_under((0,0,0,0))
cmap.set_bad((0,0,0,0))
if fill:
con = axis.contourf(cVmI, cV, H.T, levels= levels, norm = norm, cmap = cmap, **kwargs)
else:
con = axis.contour(cVmI, cV, H.T,levels=levels,norm=norm,cmap=cmap,**kwargs)
if mesh:
mesh = axis.pcolormesh(bVmI, bV, H.T, **kwargs)
mesh.set_alpha(mesh_alpha)
#Is there a way to add lines w the contour levels?
if 'linestyle' in kwargs:
kwargs.pop('linestyle')
return cVmI, cV, H, VmI[ok][toplot], I[ok][toplot]
adaptive_param_plot(VmI, V, bins = 100, fill = True, ncontours = 7, threshold = 2, axis = ax)
#plt.savefig('CMD.png')
plt.savefig(gal_name+'.png')
|
nilq/baby-python
|
python
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show fqdn --all`."""
from sqlalchemy.orm import contains_eager
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.formats.list import StringList
from aquilon.aqdb.model import DnsDomain, DnsEnvironment, Fqdn
class CommandShowFqdnAll(BrokerCommand):
def render(self, session, dns_environment, **arguments):
self.deprecated_command("The show_fqdn command is deprecated. Please "
"use search_dns instead.", **arguments)
dbdns_env = DnsEnvironment.get_unique_or_default(session,
dns_environment)
q = session.query(Fqdn)
q = q.filter_by(dns_environment=dbdns_env)
q = q.join(DnsDomain)
q = q.options(contains_eager("dns_domain"))
q = q.order_by(DnsDomain.name, Fqdn.name)
return StringList(q.all())
|
nilq/baby-python
|
python
|
"""Request handler base classes
"""
from decorator import decorator
from pyramid.httpexceptions import HTTPForbidden
from dd_app.django_codec import DjangoSessionCodec
from dd_app.messaging.mixins import MsgMixin
class DDHandler(object):
"""Base view handler object
"""
def __init__(self, request, *args, **kwargs):
self.request = request
@property
def mongo(self):
if not hasattr(self, '__mongo'):
self.__mongo = self.settings['mongodb.connector']
return self.__mongo
@property
def redis(self):
if not hasattr(self, '__redis'):
self.__redis = self.settings['redis.connector']
return self.__redis
@property
def settings(self):
return self.request.registry.settings
@property
def cookies(self):
return self.request.cookies
@property
def debug_charge_accel(self):
return int(self.settings.get('dd_app.debug_charge_accel', 1))
@property
def powerup_types(self):
return ('ad', 'teammember', 'upgrade')
class DjangoSessionMixin(object):
"""Mixin implementing authentication agains django sessions"""
def _get_redis_key(self, key):
return "%s%s" % (self.settings['session.prefix'], key)
@property
def session_codec(self):
if not hasattr(self, '_session_codec'):
self._session_codec = DjangoSessionCodec(self.settings)
return self._session_codec
def get_session_cookie(self):
if hasattr(self, '_token'):
return self._token
return self.cookies.get(self.settings['session.cookie_id'], None)
def get_redis_session(self, key):
self._raw_session = self.redis.get().get(self._get_redis_key(key))
result = self._raw_session
return result
def _get_session_data(self):
key = self.get_session_cookie()
if key is None:
return {} # no session cookie
session_data = self.get_redis_session(key)
if session_data is None:
return {} # no session data for key
session_dec, auth_uid = self.session_codec.decode(session_data)
return session_dec
@property
def session_data(self):
if not hasattr(self, '_django_session'):
self._django_session = self._get_session_data()
return self._django_session
@property
def session_language(self):
return self.session_data.get('django_language', 'en')
@property
def auth_uid(self):
return self.session_data.get('_auth_user_id', None)
def check_user(self):
if self.auth_uid is not None:
return self.mongo.get_user_by_auth_uid(self.auth_uid, {'_id': 1}) is not None
return False
def get_user_info(self):
if self.auth_uid is not None:
return self.mongo.get_user_by_auth_uid(self.auth_uid)
@property
def userdata(self):
if not hasattr(self, '_userdata'):
self._userdata = self.get_user_info()
return self._userdata
@property
def game_query_base(self):
oid = self.userdata['_id']
query_base = {'user.$id': oid}
version = self.userdata.get('game_version', None)
if version is not None:
query_base.update({'version': version})
return query_base
def _delete_session(self):
del self._django_session
del self._raw_session
if hasattr(self, '_delkey'):
self.redis.get().delete(self._get_redis_key(self._delkey))
del self._delkey
def _delete_cookie(self):
def del_cookie_callback(request, response):
response.delete_cookie(self.settings['session.cookie_id'])
self.request.add_response_callback(del_cookie_callback)
def _logout(self):
self._delkey = self.get_session_cookie()
self._delete_cookie()
self._delete_session()
def get_game_version(self, auth_uid):
if not hasattr(self, '_game_version'):
data = self.mongo.get_game_version(auth_uid)
if data is None:
self._game_version = None
else:
self._game_version = data.get('game_version', None)
return self._game_version
class BaseHandler(DDHandler, DjangoSessionMixin, MsgMixin):
def _get_uid(self):
# For MsgMixin compatibility
return self.auth_uid
# decorator preserving the argspec,
# see https://micheles.googlecode.com/hg/decorator/documentation.html
@decorator
def dd_protected(f, obj, token, *args, **kwargs):
obj._token = token
if obj.auth_uid is None:
raise HTTPForbidden('unauthorized')
return f(obj, token, *args, **kwargs)
|
nilq/baby-python
|
python
|
"""Module with view functions that serve each uri."""
from datetime import datetime
from learning_journal.models.mymodel import Journal
from learning_journal.security import is_authenticated
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.security import NO_PERMISSION_REQUIRED, forget, remember
from pyramid.view import view_config
@view_config(route_name='home', renderer='learning_journal:templates/index.jinja2', permission='view')
def list_view(request):
"""Pass response to send to index.html page with all entries."""
entries = request.dbsession.query(Journal).all()
entries = [entry.to_dict() for entry in entries]
return {
'entries': entries
}
@view_config(route_name='detail', renderer='learning_journal:templates/detail.jinja2', permission='view')
def detail_view(request):
"""Pass response to send to detail page for individual entries."""
target_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journal).get(target_id)
if not entry:
raise HTTPNotFound
if request.method == 'GET':
return {
'entry': entry.to_dict()
}
if request.method == "POST":
return HTTPFound(request.route_url('edit', id=entry.id))
@view_config(route_name='create', renderer='learning_journal:templates/new.jinja2', permission='secret')
def create_view(request):
"""Pass response to send to new page."""
if request.method == 'GET':
return{
'textarea': 'New Entry'
}
if request.method == 'POST':
new_entry = Journal(
title=request.POST['title'],
text=request.POST['text'],
created=datetime.now()
)
request.dbsession.add(new_entry)
return HTTPFound(request.route_url('home'))
@view_config(route_name='edit', renderer='learning_journal:templates/edit.jinja2', permission='secret')
def update_view(request):
"""Pass response to send to edit page."""
target_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journal).get(target_id)
if not entry:
raise HTTPNotFound
if request.method == 'GET':
return {
'entry': entry.to_dict()
}
if request.method == 'POST' and request.POST:
entry.title = request.POST['title']
entry.text = request.POST['body']
entry.created = datetime.now()
request.dbsession.add(entry)
request.dbsession.flush()
return HTTPFound(request.route_url('detail', id=entry.id))
@view_config(route_name='delete', permission='secret')
def delete_view(request):
"""Delete a specific entry."""
target_id = int(request.matchdict['id'])
entry = request.dbsession.query(Journal).get(target_id)
if entry:
request.dbsession.delete(entry)
return HTTPFound(request.route_url('home'))
raise HTTPNotFound
@view_config(
route_name='login', renderer="learning_journal:templates/login.jinja2", permission=NO_PERMISSION_REQUIRED
)
def login(request):
"""Login view config to authenticate username/password."""
if request.authenticated_userid:
return HTTPFound(request.route_url('home'))
if request.method == "GET":
return {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
if is_authenticated(username, password):
headers = remember(request, username)
return HTTPFound(request.route_url('home'), headers=headers)
return {
'error': 'Invalid username/password combination.'
}
@view_config(route_name='logout', permission=NO_PERMISSION_REQUIRED)
def logout(request):
"""Logout view config to redirect to home view."""
headers = forget(request)
return HTTPFound(request.route_url('home'), headers=headers)
|
nilq/baby-python
|
python
|
import os
import torch
import pickle
import numpy as np
from transformers import RobertaConfig, RobertaModel, RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
roberta = RobertaModel.from_pretrained('roberta-base')
path = '../wikidata5m_alias'
if not os.path.exists('../wikidata5m_alias_emb'):
os.makedirs('../wikidata5m_alias_emb')
with open('../read_ent_vocab.bin', 'rb') as fin:
ent_vocab = pickle.load(fin)
with open('../read_rel_vocab.bin', 'rb') as fin:
rel_vocab = pickle.load(fin)
print(len(ent_vocab))
print(len(rel_vocab))
aliases = {}
with open(os.path.join(path, 'wikidata5m_entity.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
segs = line.strip().split('\t')
entity = segs[0]
alias = segs[1:]
aliases[entity] = alias
print(len(aliases))
miss = 0
entity_embeddings = []
for k, v in ent_vocab.items():
if k in aliases:
alias = aliases[k][0]
tokens = tokenizer.encode(' '+alias, add_special_tokens=False)
embedding = roberta.embeddings.word_embeddings(torch.tensor(tokens).view(1,-1)).squeeze(0).mean(dim=0)
else:
miss += 1
embedding = torch.randn(768) / 10
entity_embeddings.append(embedding)
assert len(ent_vocab) == len(entity_embeddings)
entity_embeddings = torch.stack(entity_embeddings, dim=0)
print(miss * 1.0 / len(ent_vocab))
print(entity_embeddings.shape)
np.save('../wikidata5m_alias_emb/entities.npy', entity_embeddings.detach().numpy())
del entity_embeddings
rel_aliases = {}
with open(os.path.join(path, 'wikidata5m_relation.txt'), 'r', encoding='utf-8') as fin:
for line in fin:
segs = line.strip().split('\t')
relation = segs[0]
alias = segs[1:]
rel_aliases[relation] = alias
miss = 0
relation_embeddings = []
for k, v in rel_vocab.items():
if k in rel_aliases:
alias = rel_aliases[k][0]
tokens = tokenizer.encode(' '+alias, add_special_tokens=False)
embedding = roberta.embeddings.word_embeddings(torch.tensor(tokens).view(1,-1)).squeeze(0).mean(dim=0)
else:
miss += 1
embedding = torch.randn(768) / 10
relation_embeddings.append(embedding)
assert len(rel_vocab) == len(relation_embeddings)
relation_embeddings = torch.stack(relation_embeddings, dim=0)
print(relation_embeddings.shape)
print(miss * 1.0 / len(ent_vocab))
np.save('../wikidata5m_alias_emb/relations.npy', relation_embeddings.detach().numpy())
|
nilq/baby-python
|
python
|
from array import array
import numpy as np
import os
import cv2
import argparse
from tensorflow import lite as tflite
from matplotlib import pyplot as plt
import time
import torch
from torchvision import transforms
from omegaconf import OmegaConf
import torch.nn.functional as F
import tensorflow as tf
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision
import pytorch_lightning as pl
from PIL import Image
from aei_net import AEINet
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, default="../config/train.yaml",
help="path of configuration yaml file"),
parser.add_argument("--model_path", type=str, default="../ONNX/",
help="path of onnx extra data folder"),
parser.add_argument("--checkpoint_path", type=str, default="../chkpt/30.ckpt",
help="path of aei-net pre-trained file"),
parser.add_argument("--images_folder", type=str, default="../data/faceshifter-datasets-preprocessed/train/",
help="path of preprocessed source face image"),
parser.add_argument("--gpu_num", type=int, default=0,
help="number of gpu"),
parser.add_argument("--num_images", type=int, default=50,
help="number of images used to convert the model")
args = parser.parse_args()
def optizeADD_w_optim_MLE(argument):
device = torch.device(f"cuda:{argument.gpu_num}" if torch.cuda.is_available() else 'cpu')
#set experimental memory growth
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(
physical_devices[0], True
)
#load model for converter
converter = tf.lite.TFLiteConverter.from_saved_model(argument.model_path + "ADD_gen")
converter.optimizations = [tf.lite.Optimize.DEFAULT]
#load model for data preparation
hp = OmegaConf.load(argument.config)
model = AEINet.load_from_checkpoint(argument.checkpoint_path, hp=hp)
model.eval()
model.freeze()
model.to(device)
interpreter = tflite.Interpreter(args.model_path+ "MultiLevelEncoder_gen_Lite_optimized.tflite", num_threads=12)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
#setup for data preparation
def representative_dataset_gen():
for i in range(argument.num_images):
#choose a picture
source_img_path = os.path.join(argument.images_folder, f"{i:08}.png")
source_img = transforms.ToTensor()(Image.open(source_img_path)).unsqueeze(0).to(device)
#prepare the image for the model
z_id = model.Z(F.interpolate(source_img, size=112, mode='bilinear'))
z_id = F.normalize(z_id)
z_id = z_id.detach()
#choose target image
target_img_number = (i+argument.num_images)
target_img_path = os.path.join(argument.images_folder, f"{target_img_number:08}.png")
img = cv2.imread(target_img_path)
img = cv2.resize(img, (256, 256))
img = img.astype(np.float32)
img = img/255.0
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
interpreter.set_tensor(input_details[0]['index'], img)
interpreter.invoke()
feature_map = [interpreter.get_tensor(output_details[1]['index']), interpreter.get_tensor(output_details[0]['index']), interpreter.get_tensor(output_details[3]['index']),
interpreter.get_tensor(output_details[5]['index']), interpreter.get_tensor(output_details[6]['index']), interpreter.get_tensor(output_details[4]['index']),
interpreter.get_tensor(output_details[7]['index']), interpreter.get_tensor(output_details[2]['index'])]
#converting to cpu and numpy and prepraring with dictionary signature
yield {"input.5": z_id.cpu().numpy(),
"input.119": feature_map[5],
"input.145": feature_map[6],
"input.171": feature_map[7],
"input.27": feature_map[1],
"input.47": feature_map[2],
"input.67": feature_map[3],
"input.7": feature_map[0],
"input.93": feature_map[4]}
#converter setup
converter.representative_dataset = representative_dataset_gen
#converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
#converter.inference_input_type = tf.float32
#converter.inference_output_type = tf.float32
#convert the model
tflite_quant_model = converter.convert()
#save the model
with open(args.model_path + "ADD_gen_Lite_optimized.tflite", 'wb') as f:
f.write(tflite_quant_model)
optizeADD_w_optim_MLE(args)
|
nilq/baby-python
|
python
|
import os
# Bot Setup
os.environ["prefix"] = "g!"
os.environ["token"] = "NzM5Mjc0NjkxMDg4MjIwMzEw.XyYFNQ.Mb6wJSHhpj9LP5bqO2Hb0w8NBQM"
os.environ["botlog"] = "603894328212848651"
os.environ["debugEnabled"] = "False"
# Database Setup
os.environ["db_type"] = "MySQL" # Either MySQL or Flat (or just leave empty for Flat SQLite)
os.environ["db_user"] = "USERNAME" # Defaults to root if empty
os.environ["db_pword"] = "PASSWORD" # Defaults to none if empty
os.environ["db_host"] = "HOST" # Defaults to localhost if empty
os.environ["db_port"] = "PORT" # Defaults to 3306
# Bot list API tokens
os.environ["top.gg_token"] = ""
os.environ["discord.bots.gg_token"] = ""
os.environ["discordbotlist.com_token"] = ""
os.environ["bots.ondiscord.xyz_token"] = ""
os.environ["botsfordiscord.com_token"] = ""
|
nilq/baby-python
|
python
|
###
# Adapted from Avalanche LvisDataset
# https://github.com/ContinualAI/avalanche/tree/detection/avalanche/benchmarks/datasets/lvis
#
# Released under the MIT license, see:
# https://github.com/ContinualAI/avalanche/blob/master/LICENSE
###
from pathlib import Path
from typing import List, Sequence, Union
from PIL import Image
from torch.utils.data import Dataset
from torchvision.datasets.folder import default_loader
from devkit_tools.challenge_constants import DEFAULT_CHALLENGE_TRAIN_JSON, \
DEFAULT_CHALLENGE_TEST_JSON
from ego_objects import EgoObjects, EgoObjectsAnnotation, \
EgoObjectsImage
import torch
class ChallengeDetectionDataset(Dataset):
"""
The sample dataset. For internal use by challenge organizers only.
"""
def __init__(
self,
root: Union[str, Path],
*,
train=True,
transform=None,
loader=default_loader,
ego_api=None,
img_ids: List[int] = None,
bbox_format: str = 'ltwh',
categories_id_mapping: List[int] = None
):
"""
Instantiates the sample dataset.
:param root: The path to the images and annotation file.
:param transform: The transformation to apply.
:param loader: The image loader. Defaults to PIL Image open.
:param ego_api: An EgoObjects object. If not provided, annotations
will be loaded from the json file found in the root. Defaults to
None.
:param img_ids: A list of image ids to use. If not None, only those
images (a subset of the original dataset) will be used. Defaults
to None.
:param bbox_format: The bounding box format. Defaults to "ltwh"
(Left, Top, Width, Height).
:param categories_id_mapping: If set, it must define a mapping from
the to-be-used-id to the real category id so that:
real_cat_id = categories_id_mapping[mapped_id].
"""
self.root: Path = Path(root)
self.train = train
self.transform = transform
self.loader = loader
self.bbox_crop = True
self.img_ids = img_ids
self.bbox_format = bbox_format
self.categories_id_mapping = categories_id_mapping
self.ego_api = ego_api
must_load_api = self.ego_api is None
must_load_img_ids = self.img_ids is None
# Load metadata
if must_load_api:
if self.train:
ann_json_path = str(self.root / DEFAULT_CHALLENGE_TRAIN_JSON)
else:
ann_json_path = str(self.root / DEFAULT_CHALLENGE_TEST_JSON)
self.ego_api = EgoObjects(ann_json_path)
if must_load_img_ids:
self.img_ids = list(sorted(self.ego_api.get_img_ids()))
self.targets = EgoObjectsDetectionTargets(
self.ego_api, self.img_ids,
categories_id_mapping=categories_id_mapping)
# Try loading an image
if len(self.img_ids) > 0:
img_id = self.img_ids[0]
img_dict = self.ego_api.load_imgs(ids=[img_id])[0]
assert self._load_img(img_dict) is not None
def __getitem__(self, index):
"""
Loads an instance given its index.
:param index: The index of the instance to retrieve.
:return: a (sample, target) tuple where the target is a
torchvision-style annotation for object detection
https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
"""
img_id = self.img_ids[index]
img_dict: EgoObjectsImage = self.ego_api.load_imgs(ids=[img_id])[0]
annotation_dicts = self.targets[index]
# Transform from EgoObjects dictionary to torchvision-style target
num_objs = len(annotation_dicts)
boxes = []
labels = []
areas = []
for i in range(num_objs):
xmin = annotation_dicts[i]['bbox'][0]
ymin = annotation_dicts[i]['bbox'][1]
if self.bbox_format == 'ltrb':
# Left, Top, Right, Bottom
xmax = annotation_dicts[i]['bbox'][2]
ymax = annotation_dicts[i]['bbox'][3]
boxw = xmax - xmin
boxh = ymax - ymin
else:
# Left, Top, Width, Height
boxw = annotation_dicts[i]['bbox'][2]
boxh = annotation_dicts[i]['bbox'][3]
xmax = boxw + xmin
ymax = boxh + ymin
boxes.append([xmin, ymin, xmax, ymax])
labels.append(annotation_dicts[i]['category_id'])
areas.append(boxw * boxh)
if len(boxes) > 0:
boxes = torch.as_tensor(boxes, dtype=torch.float32)
else:
boxes = torch.empty((0, 4), dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([img_id])
areas = torch.as_tensor(areas, dtype=torch.float32)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = dict()
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
target["area"] = areas
target["iscrowd"] = iscrowd
img = self._load_img(img_dict)
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.img_ids)
def _load_img(self, img_dict):
img_url = img_dict['url']
splitted_url = img_url.split('/')
img_path = 'images/' + splitted_url[-1]
img_path_alt = 'cltest/' + splitted_url[-1]
final_path = self.root / img_path # <root>/images/<img_id>.jpg
if not final_path.exists():
final_path = self.root / img_path_alt
return Image.open(str(final_path)).convert("RGB")
class EgoObjectsDetectionTargets(Sequence[List[EgoObjectsAnnotation]]):
def __init__(
self,
ego_api: EgoObjects,
img_ids: List[int] = None,
categories_id_mapping: List[int] = None):
super(EgoObjectsDetectionTargets, self).__init__()
self.ego_api = ego_api
if categories_id_mapping is not None:
self.reversed_mapping = dict()
for mapped_id, real_id in enumerate(categories_id_mapping):
self.reversed_mapping[real_id] = mapped_id
else:
self.reversed_mapping = None
if img_ids is None:
img_ids = list(sorted(ego_api.get_img_ids()))
self.img_ids = img_ids
def __len__(self):
return len(self.img_ids)
def __getitem__(self, index):
img_id = self.img_ids[index]
annotation_ids = self.ego_api.get_ann_ids(img_ids=[img_id])
annotation_dicts: List[EgoObjectsAnnotation] = \
self.ego_api.load_anns(annotation_ids)
if self.reversed_mapping is None:
return annotation_dicts
mapped_anns: List[EgoObjectsAnnotation] = []
for ann_dict in annotation_dicts:
ann_dict: EgoObjectsAnnotation = dict(ann_dict)
ann_dict['category_id'] = \
self.reversed_mapping[ann_dict['category_id']]
mapped_anns.append(ann_dict)
return mapped_anns
__all__ = [
'ChallengeDetectionDataset'
]
|
nilq/baby-python
|
python
|
import os
import cv2
import numpy as np
from constants import DATA_DIR
MYPY = False
if MYPY:
from typing import Tuple, Union
Pair = Tuple[int, int]
def scale(image, scale_percent):
height, width = image.shape[:2]
return cv2.resize(image, (int(width * scale_percent), int(height * scale_percent)))
def show(image, scale_percent=None):
if scale_percent is not None:
image = scale(image, scale_percent)
cv2.namedWindow("test")
cv2.imshow("test", image)
return cv2.waitKey()
def get_drawn_contours(im, contours, draw_to_existing_image=False):
if draw_to_existing_image:
if len(im.shape) < 3 or im.shape[2] == 1:
orig = im
im = np.empty((im.shape[0], im.shape[1], 3))
im[:, :, 0] = orig
im[:, :, 1] = orig
im[:, :, 2] = orig
else:
im = im.copy()
else:
im = np.empty((im.shape[0], im.shape[1], 3))
im[:, :] = [0, 0, 0]
cv2.drawContours(im, contours, -1, (0, 255, 0), 1)
return im
def get_center_for_contour(contour):
x, y, w, h = cv2.boundingRect(contour)
return x + w / 2, y + h / 2
def order_points(pts):
# Handle the common case of pts being a contour
if pts.shape == (4, 1, 2):
pts = pts.reshape((4, 2))
# initialize a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts, margin_percent=0):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordinates or the top-right and top-left x-coordinates
width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
max_width = max(int(width_a), int(width_b))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
max_height = max(int(height_a), int(height_b))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
margin_width = max_width * margin_percent / 100
margin_height = max_width * margin_percent / 100
dst = np.array([
[margin_width, margin_height],
[margin_width + max_width, margin_height],
[margin_width + max_width, margin_height + max_height],
[margin_width, margin_height + max_height],
], dtype="float32")
# compute the perspective transform matrix and then apply it
perspective_transform = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, perspective_transform,
(2 * margin_width + max_width, 2 * margin_height + max_height))
# return the warped image
return warped
def inflate_classifier(classifier_root_dir):
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(classifier_root_dir)
with open(vocab_path, "rb") as f:
vocab = np.load(f)
# FLANN parameters
flann_index_kdtree = 0
index_params = dict(algorithm=flann_index_kdtree, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
matcher = cv2.FlannBasedMatcher(index_params, search_params)
detector = cv2.SIFT()
extractor = cv2.DescriptorExtractor_create("SIFT")
bow_de = cv2.BOWImgDescriptorExtractor(extractor, matcher)
bow_de.setVocabulary(vocab)
svm = cv2.SVM()
svm.load(os.path.join(svm_data_dir, "svm_data.dat"))
def classifier(image):
keypoints = detector.detect(image)
descriptor = bow_de.compute(image, keypoints)
return svm.predict(descriptor)
return classifier
def get_classifier_directories(root_dir):
vocab_path = os.path.join(root_dir, "vocab.npy")
unlabelled_dir = os.path.join(root_dir, "unlabelled")
labelled_dir = os.path.join(root_dir, "labelled")
features_dir = os.path.join(root_dir, "features")
svm_data_dir = os.path.join(root_dir, "svm_data")
dirs = (
unlabelled_dir,
labelled_dir,
features_dir,
svm_data_dir
)
for directory in dirs:
if not os.path.exists(directory):
os.makedirs(directory)
return (vocab_path,) + dirs
def ls(path, limit=None, name_filter=None):
i = 0
for name in os.listdir(path):
if name == ".DS_Store":
continue
if name_filter is not None and name_filter not in name:
continue
i += 1
if limit is not None and i > limit:
break
yield os.path.join(path, name)
def apply_offset_to_locations(locations, offset):
x_offset, y_offset = offset
return [(location[0] + x_offset, location[1] + y_offset) for location in locations]
def apply_offset_to_single_location(location, offset):
x_offset, y_offset = offset
return location[0] + x_offset, location[1] + y_offset
def _apply_sensitivity_to_255(orig_100):
low = max(0, orig_100 - 10)
high = min(100, orig_100 + 10)
return int((255 * low) / 100.0), int((255 * high) / 100.0)
def extract_color_2(im, hue_360, saturation_100, value_100):
saturation_255 = _apply_sensitivity_to_255(saturation_100)
value_255 = _apply_sensitivity_to_255(value_100)
return extract_color(im, hue_360/2, saturation_255, value_255)
def extract_color(im, hue, saturation, value):
# type: (np.ndarray, Union[int, Pair], Pair, Pair) -> np.ndarray
if isinstance(hue, int):
sensitivity = 10
hue = (hue - sensitivity, hue + sensitivity)
# Handle hue's near the boundary
split_hue_pairs = None
if hue[0] < 0:
split_hue_pairs = ((hue[0] % 180, 180), (0, hue[1]))
elif hue[1] > 180:
split_hue_pairs = ((hue[0], 180), (0, hue[1] % 180))
if split_hue_pairs is not None:
a_hue, b_hue = split_hue_pairs
return extract_color(im, a_hue, saturation, value) + \
extract_color(im, b_hue, saturation, value)
lower_bound = np.array([hue[0], saturation[0], value[0]])
upper_bound = np.array([hue[1], saturation[1], value[1]])
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
mono = cv2.inRange(hsv, lower_bound, upper_bound)
return mono
def _handle_points(a, b, most_horizontal, most_vertical):
line = (a, b)
most_horizontal.append(line)
most_horizontal[:] = sorted(most_horizontal, key=aspect_ratio, reverse=True)[:2]
most_vertical.append(line)
most_vertical[:] = sorted(most_vertical, key=aspect_ratio)[:2]
def get_corners_from_cornerless_rect(contour):
most_horizontal = []
most_vertical = []
prev_point = None
for (point,) in contour:
if prev_point is None:
prev_point = point
continue
_handle_points(prev_point, point, most_horizontal, most_vertical)
prev_point = point
# Make sure to consider the line between the first and last points.
_handle_points(contour[0][0], prev_point, most_horizontal, most_vertical)
top, bottom = sorted(most_horizontal, key=lambda (j, k): (j[1] + k[1]) / 2)
left, right = sorted(most_vertical, key=lambda (j, k): (j[0] + k[0]) / 2)
tl = find_intersection(left, top)
tr = find_intersection(top, right)
br = find_intersection(right, bottom)
bl = find_intersection(bottom, left)
points = np.array((tl, tr, br, bl))
return points
def aspect_ratio(line):
(x1, y1), (x2, y2) = line
denominator = float(abs(y2 - y1))
if denominator == 0:
return float("inf")
return float(abs(x2 - x1)) / denominator
def find_intersection(line_a, line_b):
# Math'ed the shit out of this
# https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_two_points_on_each_line
(x1, y1), (x2, y2) = line_a
(x3, y3), (x4, y4) = line_b
intersect_x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / \
((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
intersect_y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / \
((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
return intersect_x, intersect_y
def point_closest_to(points, x, y):
def dist(point):
x_offset = point[0][0] - x
y_offset = point[0][1] - y
sqrt = np.math.sqrt(x_offset ** 2 + y_offset ** 2)
return sqrt
return sorted(points, key=dist)[0]
def contour_bounding_box_for_contour(contour):
x, y, w, h = cv2.boundingRect(contour)
contour = np.array([
[x, y],
[x + w, y],
[x + w, y + h],
[x, y + h],
]).reshape((4, 1, 2))
return contour
def get_dimens(im):
h, w = im.shape[:2]
return w, h
def get_contours(im, close_and_open=True):
im = im.copy()
if close_and_open:
structuring_element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))
structuring_element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
im = cv2.morphologyEx(im, cv2.MORPH_CLOSE, structuring_element1)
im = cv2.morphologyEx(im, cv2.MORPH_OPEN, structuring_element2)
return cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
def get_width(im):
return get_dimens(im)[0]
def get_height(im):
return get_dimens(im)[1]
def ls_debug(start_num=None, end_num=None, explicit_options=None):
assert (start_num is None) == (end_num is None) and \
(start_num is None) != (explicit_options is None), \
"Should specify either start and end or explicit"
if start_num is not None:
explicit_options = range(start_num, end_num + 1)
for path in ls(DATA_DIR + "module_specific_data/debug/"):
name, _ = os.path.splitext(os.path.basename(path))
try:
num = int(name)
except ValueError:
continue
if num in explicit_options:
yield path
def get_subset(im, x_percents, y_percents, margin_percent=0):
w, h = get_dimens(im)
left, right, top, bottom = [
int((percent * full)/100.0) for percent, full in zip(x_percents + y_percents, (w, w, h, h))
]
if margin_percent != 0:
half_w = (right - left) / 2.0
half_h = (bottom - top) / 2.0
pos_dist_to_center_pairs = (
(left, half_w),
(right, -half_w),
(top, half_h),
(bottom, -half_h),
)
left, right, top, bottom = [
int(pos + dist_to_center - (dist_to_center * (100 + margin_percent) / 100.0))
for pos, dist_to_center in pos_dist_to_center_pairs
]
return im[top:bottom, left:right]
def rotate_image_180(image):
image = cv2.flip(image, 0)
image = cv2.flip(image, 1)
return image
def rotate_image_clockwise(image):
image = cv2.transpose(image)
image = cv2.flip(image, 1)
return image
def rotate_image_counter_clockwise(image):
image = cv2.transpose(image)
image = cv2.flip(image, 0)
return image
def extract_threshold(im, threshold):
return cv2.threshold(im, threshold, 255, 0)[1]
|
nilq/baby-python
|
python
|
from django.contrib import admin
# Register your models here.
from .models import SiteConfiguration, SingleOrder, Order, UniqueFeature, Size, Color
admin.site.register(Size)
admin.site.register(Color)
admin.site.register(SiteConfiguration)
admin.site.register(UniqueFeature)
|
nilq/baby-python
|
python
|
import pygame
from pygame import draw
from .player import Player, Enemy
from .utils import Director, TileSet
class lvl1(Director):
def __init__(self) -> None:
super().__init__(self)
self.tile = TileSet("assets/tileset.png", 10, 28)
self.tile.gen_map(self.alto, self.ancho, 52)
self.plr = Player()
self.enm = Enemy()
def update(self):
self.plr.update()
self.enm.update()
if pygame.sprite.collide_rect(self.plr, self.enm):
if self.plr.pos in [1, 3]:
if self.plr.rect.top <= self.enm.rect.bottom and self.enm.rect.bottom < self.plr.rect.bottom:
self.plr.rect.top = self.enm.rect.bottom
elif self.plr.rect.bottom >= self.enm.rect.top and self.enm.rect.top > self.plr.rect.top:
self.plr.rect.bottom = self.enm.rect.top
if self.plr.pos in [0, 2]:
if self.plr.rect.left <= self.enm.rect.right and self.enm.rect.right < self.plr.rect.right:
self.plr.rect.left = self.enm.rect.right
elif self.plr.rect.right >= self.enm.rect.left and self.enm.rect.left > self.plr.rect.left:
self.plr.rect.right = self.enm.rect.left
def draw(self, screen):
screen.fill((0, 0, 0))
self.tile.draw(self.screen,self.tile.tilemap)
self.plr.draw(self.screen)
self.enm.draw(self.screen)
|
nilq/baby-python
|
python
|
from django.urls import path, include
from .views import *
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('titles', TitleViewSet, basename='titles')
router.register('categories', CategoryViewSet, basename='categories')
router.register('genres', GenreViewSet, basename='genres')
urlpatterns = [
path('', include(router.urls)),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-09 10:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_prices.models
class Migration(migrations.Migration):
dependencies = [
('order', '0016_order_language_code'),
]
operations = [
migrations.AlterField(
model_name='deliverygroup',
name='shipping_price',
field=django_prices.models.PriceField(currency='KES', decimal_places=4, default=0, editable=False, max_digits=12, verbose_name='shipping price'),
),
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(default=1, editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='userprofile.Address', verbose_name='billing address'),
),
migrations.AlterField(
model_name='order',
name='discount_amount',
field=django_prices.models.PriceField(blank=True, currency='KES', decimal_places=2, max_digits=12, null=True, verbose_name='discount amount'),
),
migrations.AlterField(
model_name='order',
name='token',
field=models.CharField(max_length=36, null=True, verbose_name='token'),
),
migrations.AlterField(
model_name='order',
name='total_net',
field=django_prices.models.PriceField(blank=True, currency='KES', decimal_places=2, max_digits=12, null=True, verbose_name='total net'),
),
migrations.AlterField(
model_name='order',
name='total_tax',
field=django_prices.models.PriceField(blank=True, currency='KES', decimal_places=2, max_digits=12, null=True, verbose_name='total tax'),
),
migrations.AlterField(
model_name='ordernote',
name='content',
field=models.CharField(max_length=250, verbose_name='content'),
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
##
# @file goldman_equation.py
# @brief Contain a function that calculates the equilibrium potential using the Goldman equation
# @author Gabriel H Riqueti
# @email gabrielhriqueti@gmail.com
# @date 22/04/2021
#
from biomedical_signal_processing import FARADAYS_CONSTANT as F
from biomedical_signal_processing import GAS_CONSTANT as R
import numpy as np
def goldman_equation(temperature, mono_cations_in, mono_cations_out, mono_cations_perm, mono_anions_in, mono_anions_out, mono_anions_perm,):
"""
Calculate the resting membrane potential for a specific ion
Parameters
----------
temperature : float
Temperature (Kelvin)
mono_cations_in : positive float
Concentration of monovalent cations inside the cell
mono_cations_out : positive float
Concentration of monovalent cations outside the cell
mono_cations_perm : positive float
Relative permeability of monovalent cations outside the cell
mono_anions_in : positive float
Concentration of monovalent anions inside the cell
mono_anions_out : positive float
Concentration of monovalent anions outside the cell
mono_anions_perm : positive float
Relative permeability of monovalent anions outside the cell
Returns
-------
e_r : float
Resting membrane potential
"""
if (mono_cations_in <= 0).any() or (mono_cations_out <= 0).any() or (mono_anions_in <= 0).any() or (mono_anions_out <= 0).any():
raise ValueError('The ionic concentrations must have positive values')
if temperature < 0:
raise ValueError('temperature must have non-negative values')
if (np.sum(mono_cations_in * mono_cations_perm) + np.sum(mono_anions_out * mono_anions_perm) == 0
and (np.sum(mono_cations_out * mono_cations_perm) + np.sum(mono_anions_in * mono_anions_perm)) == 0):
return 0 * (R * temperature / F) * (
(np.sum(mono_cations_out * mono_cations_perm) + np.sum(mono_anions_in * mono_anions_perm)) *
(np.sum(mono_cations_in * mono_cations_perm) + np.sum(mono_anions_out * mono_anions_perm))
)
return (R * temperature / F) * np.log(
(np.sum(mono_cations_out * mono_cations_perm) + np.sum(mono_anions_in * mono_anions_perm)) /
(np.sum(mono_cations_in * mono_cations_perm) + np.sum(mono_anions_out * mono_anions_perm))
)
|
nilq/baby-python
|
python
|
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
import textwrap
from ansible import constants as C
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from units.mock.path import mock_unfrackpath_noop
from ansible.inventory.manager import InventoryManager, split_host_pattern
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a', None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
fake_loader = DictDataLoader({})
self.i = InventoryManager(loader=fake_loader, sources=[None])
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
class TestInventoryPlugins(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_variables_stringify(self):
values = ['string', 'no', 'No', 'false', 'FALSE', [], False, 0]
inventory_content = "host1 "
inventory_content += ' '.join(['var%s=%s' % (i, to_text(x)) for i, x in enumerate(values)])
inventory = self._get_inventory(inventory_content)
variables = inventory.get_host('host1').vars
for i in range(len(values)):
if isinstance(values[i], string_types):
self.assertIsInstance(variables['var%s' % i], string_types)
else:
self.assertIsInstance(variables['var%s' % i], type(values[i]))
@mock.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop)
@mock.patch('os.path.exists', lambda x: True)
@mock.patch('os.access', lambda x, y: True)
def test_yaml_inventory(self, filename="test.yaml"):
inventory_content = {filename: textwrap.dedent("""\
---
all:
hosts:
test1:
test2:
""")}
C.INVENTORY_ENABLED = ['yaml']
fake_loader = DictDataLoader(inventory_content)
im = InventoryManager(loader=fake_loader, sources=filename)
self.assertTrue(im._inventory.hosts)
self.assertIn('test1', im._inventory.hosts)
self.assertIn('test2', im._inventory.hosts)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['all'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['all'].hosts)
self.assertEqual(len(im._inventory.groups['all'].hosts), 2)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['ungrouped'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['ungrouped'].hosts)
self.assertEqual(len(im._inventory.groups['ungrouped'].hosts), 2)
def _get_inventory(self, inventory_content):
fake_loader = DictDataLoader({__file__: inventory_content})
return InventoryManager(loader=fake_loader, sources=[__file__])
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
|
nilq/baby-python
|
python
|
''' Given a binary string str of length N, the task is to find the maximum count of substrings str can be divided into such that all the substrings are balanced i.e. they have equal number of 0s and 1s. If it is not possible to split str satisfying the conditions then print -1.
Example:
Input: str = “0100110101”
Output: 4
The required substrings are “01”, “0011”, “01” and “01”.
Approach:
Initialize count = 0 and traverse the string character by character and keep track of the number of 0s and 1s so far, whenever the count of 0s and 1s become equal increment the count. If the count of 0s and 1s in the original string is not equal then print -1 else print the value of count after the traversal of the complete string.'''
s = str(input())
count_sub = 0
count_0 = 0
count_1 = 0
for i in range(len(s)):
if (s[i]== '0'):count_0 += 1
else:count_1 += 1
if(count_0 == count_1):count_sub += 1
print(count_sub)
|
nilq/baby-python
|
python
|
from Cocoa import *
gNumDaysInMonth = ( 0, 31, 28, 31, 30, 21, 30, 31, 31, 30, 31, 30, 31 )
def isLeap(year):
return (((year % 4) == 0 and ((year % 100) != 0)) or (year % 400) == 0)
class CalendarMatrix (NSMatrix):
lastMonthButton = objc.IBOutlet()
monthName = objc.IBOutlet()
nextMonthButton = objc.IBOutlet()
__slots__ = ('_selectedDay', '_startOffset')
def initWithFrame_(self, frameRect):
self._selectedDay = None
self._startOffset = 0
cell = NSButtonCell.alloc().initTextCell_("")
now = NSCalendarDate.date()
cell.setShowsStateBy_(NSOnOffButton)
self.initWithFrame_mode_prototype_numberOfRows_numberOfColumns_(
frameRect, NSRadioModeMatrix, cell, 5, 7)
count = 0
for i in range(6):
for j in range(7):
val = self.cellAtRow_column_(i, j)
if val:
val.setTag_(count)
count += 1
self._selectedDay = NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(
now.yearOfCommonEra(),
now.monthOfYear(),
now.dayOfMonth(),
0,
0,
0,
NSTimeZone.localTimeZone())
return self
@objc.IBAction
def choseDay_(self, sender):
prevSelDate = self.selectedDay()
selDay = self.selectedCell().tag() - self._startOffset + 1
selDate = NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(
prevSelDate.yearOfCommonEra(),
prevSelDate.monthOfYear(),
selDay,
0,
0,
0,
NSTimeZone.localTimeZone())
self.setSelectedDay_(selDate)
self.highlightTodayIfVisible()
if self.delegate().respondsToSelector_('calendarMatrix:didChangeToDate:'):
self.delegate().calendarMatrix_didChangeToDate_(
self, selDate)
@objc.IBAction
def monthChanged_(self, sender):
thisDate = self.selectedDay()
currentYear = thisDate.yearOfCommonEra()
currentMonth = thisDate.monthOfYear()
if sender is self.nextMonthButton:
if currentMonth == 12:
currentMonth = 1
currentYear += 1
else:
currentMonth += 1
else:
if currentMonth == 1:
currentMonth = 12
currentYear -= 1
else:
currentMonth -= 1
self.setSelectedDay_(NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(currentYear, currentMonth, 1, 0, 0, 0, NSTimeZone.localTimeZone()))
self.refreshCalendar()
self.choseDay_(self)
def setSelectedDay_(self, newDay):
self._selectedDay = newDay
def selectedDay(self):
return self._selectedDay
def refreshCalendar(self):
selDate = self.selectedDay()
currentMonth = selDate.monthOfYear()
currentYear = selDate.yearOfCommonEra()
firstOfMonth = NSCalendarDate.dateWithYear_month_day_hour_minute_second_timeZone_(
currentYear,
currentMonth,
1,
0,
0,
0,
NSTimeZone.localTimeZone())
self.monthName.setStringValue_(
firstOfMonth.descriptionWithCalendarFormat_("%B %Y"))
daysInMonth = gNumDaysInMonth[currentMonth]
if (currentMonth == 2) and isLeap(currentYear):
daysInMonth += 1
self._startOffset = firstOfMonth.dayOfWeek()
dayLabel = 1
for i in range(42):
cell = self.cellWithTag_(i)
if cell is None:
continue
if i < self._startOffset or i >= (daysInMonth + self._startOffset):
# blank out unused cells in the matrix
cell.setBordered_(False)
cell.setEnabled_(False)
cell.setTitle_("")
cell.setCellAttribute_to_(NSCellHighlighted, False)
else:
# Fill in valid days in the matrix
cell.setBordered_(True)
cell.setEnabled_(True)
cell.setFont_(NSFont.systemFontOfSize_(12))
cell.setTitle_(str(dayLabel))
dayLabel += 1
cell.setCellAttribute_to_(NSCellHighlighted, False)
self.selectCellWithTag_(selDate.dayOfMonth() + self._startOffset - 1)
self.highlightTodayIfVisible()
def highlightTodayIfVisible(self):
now = NSCalendarDate.date()
selDate = self.selectedDay()
if (selDate.yearOfCommonEra() == now.yearOfCommonEra()
and selDate.monthOfYear() == now.monthOfYear()
and selDate.dayOfMonth() == now.dayOfMonth()):
aCell = self.cellWithTag_(
now.dayOfMonth() + self._startOffset - 1)
aCell.setHighlightsBy_(NSMomentaryChangeButton)
aCell.setCellAttribute_to_(NSCellHighlighted, True)
def awakeFromNib(self):
self.setTarget_(self)
self.setAction_('choseDay:')
self.setAutosizesCells_(True)
self.refreshCalendar()
self.choseDay_(self)
|
nilq/baby-python
|
python
|
import warnings
from collections import OrderedDict
from ConfigSpace import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter
from mindware.components.feature_engineering.transformations import _bal_balancer, _preprocessor, _rescaler, \
_image_preprocessor, _text_preprocessor, _bal_addons, _imb_balancer, _gen_addons, _res_addons, _sel_addons, \
EmptyTransformer
from mindware.components.utils.class_loader import get_combined_fe_candidtates
from mindware.components.utils.constants import CLS_TASKS
from mindware.components.feature_engineering import TRANS_CANDIDATES
builtin_stage = ['balancer', 'preprocessor', 'rescaler']
stage_list = ['balancer', 'preprocessor', 'rescaler']
thirdparty_candidates_dict = OrderedDict()
def set_stage(udf_stage_list, stage_candidates_dict):
'''
:param udf_stage_list: List, a list for stage_name like ['my_stage','selector']
:param stage_candidates_dict: Dictionary, <key, value>.
Key is stage_name, and value is a list of operators in this stage.
Each operator must be a Transformer.
:return:
'''
global stage_list
stage_list = udf_stage_list
print("Current Stage: %s" % ', '.join(stage_list))
for stage in udf_stage_list:
if stage in builtin_stage:
print("Built-in stage '%s' found!" % stage)
else:
print("User-defined stage '%s' found!" % stage)
if stage not in stage_candidates_dict:
raise ValueError("Expected stage name '%s' in stage_candidates_dict." % stage)
if len(stage_candidates_dict[stage]) == 0:
warnings.warn("Candidate list for stage '%s' is empty! EmptyTransformer will be used instead!" % stage)
stage_candidates_dict[stage] = [EmptyTransformer]
thirdparty_candidates_dict[stage] = {candidate.__name__: candidate for candidate in
stage_candidates_dict[stage]}
def get_task_hyperparameter_space(task_type, include_preprocessors=None,
include_text=False, include_image=False, if_imbal=False,
optimizer='smac'):
"""
Fetch the underlying hyperparameter space for feature engineering.
Pipeline Space:
1. balancer: weight_balancer,
data_balancer.
2. scaler: normalizer, scaler, quantile.
3. preprocessor
:return: hyper space.
"""
if task_type in CLS_TASKS:
trans_types = TRANS_CANDIDATES['classification'].copy()
else:
trans_types = TRANS_CANDIDATES['regression'].copy()
_preprocessor_candidates, trans_types = get_combined_fe_candidtates(_preprocessor, _gen_addons, trans_types)
_preprocessor_candidates, trans_types = get_combined_fe_candidtates(_preprocessor_candidates, _sel_addons,
trans_types)
_rescaler_candidates, trans_types = get_combined_fe_candidtates(_rescaler, _res_addons, trans_types)
if not if_imbal:
_balancer_candadates, trans_types = get_combined_fe_candidtates(_bal_balancer, _bal_addons, trans_types)
else:
_balancer_candadates, trans_types = get_combined_fe_candidtates(_imb_balancer, _bal_addons, trans_types)
# TODO: Avoid transformations, which would take too long
# feature_learning = ["kitchen_sinks", "kernel_pca", "nystroem_sampler"]
# if task_type in CLS_TASKS:
# classifier_set = ["adaboost", "decision_tree", "extra_trees",
# "gradient_boosting", "k_nearest_neighbors",
# "libsvm_svc", "random_forest", "gaussian_nb",
# "decision_tree", "lightgbm"]
#
# if estimator_id in classifier_set:
# for tran_id in [12, 13, 15]:
# if tran_id in trans_types:
# trans_types.remove(tran_id)
preprocessor = dict()
if include_preprocessors:
for key in include_preprocessors:
if key not in _preprocessor_candidates:
raise ValueError(
"Preprocessor %s not in built-in preprocessors! Only the following preprocessors are supported: %s." % (
key, ','.join(_preprocessor_candidates.keys())))
preprocessor[key] = _preprocessor_candidates[key]
trans_types.append(_preprocessor_candidates[key].type)
else:
preprocessor = _preprocessor_candidates
configs = dict()
if include_image:
image_preprocessor_dict = _get_configuration_space(_image_preprocessor, optimizer=optimizer)
configs['image_preprocessor'] = image_preprocessor_dict
if include_text:
text_preprocessor_dict = _get_configuration_space(_text_preprocessor, optimizer=optimizer)
configs['text_preprocessor'] = text_preprocessor_dict
for stage in stage_list:
if stage == 'preprocessor':
stage_dict = _get_configuration_space(preprocessor, trans_types, optimizer=optimizer)
elif stage == 'rescaler':
stage_dict = _get_configuration_space(_rescaler_candidates, trans_types, optimizer=optimizer)
elif stage == 'balancer':
if task_type in CLS_TASKS:
stage_dict = _get_configuration_space(_balancer_candadates, optimizer=optimizer)
else:
stage_dict = None
else:
# Third party stage
trans_types.extend([candidate.type for _, candidate in thirdparty_candidates_dict[stage].items()])
stage_dict = _get_configuration_space(thirdparty_candidates_dict[stage], trans_types, optimizer=optimizer)
configs[stage] = stage_dict
cs = _build_hierachical_configspace(configs, optimizer=optimizer)
return cs
def _get_configuration_space(builtin_transformers, trans_type=None, optimizer='smac'):
config_dict = dict()
for tran_key in builtin_transformers:
tran = builtin_transformers[tran_key]
tran_id = tran.type
if trans_type is None or tran_id in trans_type:
try:
sub_configuration_space = builtin_transformers[tran_key].get_hyperparameter_search_space(
optimizer=optimizer)
config_dict[tran_key] = sub_configuration_space
except:
if optimizer == 'smac':
config_dict[tran_key] = ConfigurationSpace()
elif optimizer == 'tpe':
config_dict[tran_key] = {}
return config_dict
def _add_hierachical_configspace(cs, config, parent_name):
config_cand = list(config.keys())
config_option = CategoricalHyperparameter(parent_name, config_cand,
default_value=config_cand[0])
cs.add_hyperparameter(config_option)
for config_item in config_cand:
sub_configuration_space = config[config_item]
parent_hyperparameter = {'parent': config_option,
'value': config_item}
cs.add_configuration_space(config_item, sub_configuration_space,
parent_hyperparameter=parent_hyperparameter)
def _build_hierachical_configspace(configs, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
for config_key in configs:
if configs[config_key] is not None:
_add_hierachical_configspace(cs, configs[config_key], config_key)
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {}
def dict2hi(dictionary):
hi_list = list()
for key in dictionary:
hi_list.append((key, dictionary[key]))
return hi_list
for config_key in configs:
if configs[config_key] is not None:
space[config_key] = hp.choice(config_key, dict2hi(configs[config_key]))
return space
|
nilq/baby-python
|
python
|
# Copyright 2020 by Christophe Lambin
# All rights reserved.
import logging
from prometheus_client import start_http_server
from libpimon.version import version
from libpimon.configuration import print_configuration
from libpimon.cpu import CPUTempProbe, CPUFreqProbe
from libpimon.gpio import GPIOProbe
from libpimon.openvpn import OpenVPNProbe, OpenVPNStatusProbe
from libpimon.mediacentre import TransmissionProbe, MonitorProbe
from pimetrics.scheduler import Scheduler
def initialise(config):
scheduler = Scheduler()
# Probes
if config.monitor_cpu:
try:
scheduler.register(
CPUFreqProbe(config.freq_filename),
5
)
scheduler.register(
CPUTempProbe(config.temp_filename, 1000),
5
)
except FileNotFoundError as err:
logging.warning(f'Could not add CPU monitor(s): {err}')
if config.monitor_fan:
try:
scheduler.register(
GPIOProbe(config.monitor_fan_pin),
5
)
except RuntimeError:
logging.warning('Could not add Fan monitor. Possibly /dev/gpiomem isn\'t accessible?')
if config.monitor_vpn:
try:
scheduler.register(
OpenVPNProbe(config.monitor_vpn_client_status),
5
)
except FileNotFoundError as err:
logging.warning(f'Could not add OpenVPN monitor: {err}')
if config.monitor_vpn_proxies:
scheduler.register(
OpenVPNStatusProbe(config.monitor_vpn_proxies),
60
)
else:
logging.warning('No VPN Proxies defined. VPN status monitoring is disabled')
if config.monitor_mediaserver:
if config.monitor_mediaserver_transmission:
scheduler.register(
TransmissionProbe(config.monitor_mediaserver_transmission),
5
)
if config.monitor_mediaserver_sonarr:
scheduler.register(
MonitorProbe(
config.monitor_mediaserver_sonarr, MonitorProbe.App.sonarr,
config.monitor_mediaserver_sonarr_apikey),
60
)
if config.monitor_mediaserver_radarr:
scheduler.register(
MonitorProbe(
config.monitor_mediaserver_radarr, MonitorProbe.App.radarr,
config.monitor_mediaserver_radarr_apikey),
60
)
return scheduler
def pimon(config):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG if config.debug else logging.INFO)
logging.info(f'Starting pimon v{version}')
logging.info(f'Configuration: {print_configuration(config)}')
start_http_server(config.port)
scheduler = initialise(config)
if config.once:
scheduler.run(once=True)
else:
while True:
scheduler.run(duration=config.interval)
return 0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes as OperandDef
from ...serialize import BoolField, KeyField
from ..utils import to_numpy
from ..core import TensorOrder
from .core import TensorHasInput
class TensorFromDataFrame(TensorHasInput):
""" represent tensor from DataFrame """
_op_type_ = OperandDef.TENSOR_FROM_DATAFRAME
_input = KeyField('_input')
_extract_multi_index = BoolField('extract_multi_index')
def __init__(self, extract_multi_index=False, **kw):
super().__init__(_extract_multi_index=extract_multi_index, **kw)
@classmethod
def execute(cls, ctx, op: 'TensorFromDataFrame'):
df = ctx[op.inputs[0].key]
if op._extract_multi_index:
df = df.to_frame()
ctx[op.outputs[0].key] = to_numpy(df).astype(op.dtype, order='F')
@classmethod
def tile(cls, op: 'TensorFromDataFrame'):
output = op.outputs[0]
out_chunks = []
for c in op.input.chunks:
shape = (c.shape[0], output.shape[1]) if op._extract_multi_index else c.shape
index = (c.index[0], 0) if op._extract_multi_index else c.index
out_chunk = op.copy().reset_key().new_chunk(
[c], shape=shape, index=index, order=output.order)
out_chunks.append(out_chunk)
new_op = op.copy()
nsplits = (op.input.nsplits[0], (output.shape[1],)) if op._extract_multi_index else op.input.nsplits
return new_op.new_tensors(op.inputs, output.shape, order=output.order,
chunks=out_chunks, nsplits=nsplits)
def __call__(self, a, order=None):
from ...dataframe.core import INDEX_TYPE, IndexValue
if self._extract_multi_index and isinstance(a, INDEX_TYPE) \
and isinstance(a.index_value.value, IndexValue.MultiIndex):
order = a.order if order is None else order
return self.new_tensor([a], (a.shape[0], len(a.index_value.value.names)), order=order)
else:
self._extract_multi_index = False
return super().__call__(a, order=order)
def from_dataframe(in_df, dtype=None):
from ...dataframe.utils import build_empty_df
if dtype is None:
empty_pdf = build_empty_df(in_df.dtypes)
dtype = to_numpy(empty_pdf).dtype
op = TensorFromDataFrame(dtype=dtype, gpu=in_df.op.gpu)
return op(in_df, order=TensorOrder.F_ORDER) # return tensor with F-order always
def from_series(in_series, dtype=None):
op = TensorFromDataFrame(dtype=dtype or in_series.dtype, gpu=in_series.op.gpu)
return op(in_series, order=TensorOrder.F_ORDER) # return tensor with F-order always
def from_index(in_index, dtype=None, extract_multi_index=False):
op = TensorFromDataFrame(dtype=dtype or in_index.dtype, gpu=in_index.op.gpu,
extract_multi_index=extract_multi_index)
return op(in_index, order=TensorOrder.F_ORDER) # return tensor with F-order always
|
nilq/baby-python
|
python
|
# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones
# modified from mmclassification timm_backbone.py
try:
import timm
except ImportError:
timm = None
from mmcv.cnn.bricks.registry import NORM_LAYERS
from openmixup.utils import get_root_logger
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
def print_timm_feature_info(feature_info):
"""Print feature_info of timm backbone to help development and debug.
Args:
feature_info (list[dict] | timm.models.features.FeatureInfo | None):
feature_info of timm backbone.
"""
logger = get_root_logger()
if feature_info is None:
logger.warning('This backbone does not have feature_info')
elif isinstance(feature_info, list):
for feat_idx, each_info in enumerate(feature_info):
logger.info(f'backbone feature_info[{feat_idx}]: {each_info}')
else:
try:
logger.info(f'backbone out_indices: {feature_info.out_indices}')
logger.info(f'backbone out_channels: {feature_info.channels()}')
logger.info(f'backbone out_strides: {feature_info.reduction()}')
except AttributeError:
logger.warning('Unexpected format of backbone feature_info')
@BACKBONES.register_module()
class TIMMBackbone(BaseBackbone):
"""Wrapper to use backbones from timm library.
More details can be found in
`timm <https://github.com/rwightman/pytorch-image-models>`_.
See especially the document for `feature extraction
<https://rwightman.github.io/pytorch-image-models/feature_extraction/>`_.
Args:
model_name (str): Name of timm model to instantiate.
in_channels (int): Number of input image channels. Defaults to 3.
num_classes (int): Number of classes for classification head (used when
features_only is False). Default to 1000.
features_only (bool): Whether to extract feature pyramid (multi-scale
feature maps from the deepest layer at each stride) by using timm
supported `forward_features()`. Defaults to False.
pretrained (bool): Whether to load pretrained weights.
Defaults to False.
checkpoint_path (str): Path of checkpoint to load at the last of
``timm.create_model``. Defaults to empty string, which means
not loading.
init_cfg (dict or list[dict], optional): Initialization config dict of
OpenMMLab projects (removed!). Defaults to None.
**kwargs: Other timm & model specific arguments.
"""
def __init__(self,
model_name,
num_classes=1000,
in_channels=3,
features_only=False,
pretrained=False,
checkpoint_path='',
**kwargs):
if timm is None:
raise RuntimeError(
'Failed to import timm. Please run "pip install timm". '
'"pip install dataclasses" may also be needed for Python 3.6.')
if not isinstance(pretrained, bool):
raise TypeError('pretrained must be bool, not str for model path')
super(TIMMBackbone, self).__init__()
if 'norm_layer' in kwargs:
kwargs['norm_layer'] = NORM_LAYERS.get(kwargs['norm_layer'])
self.timm_model = timm.create_model(
model_name=model_name,
pretrained=pretrained,
in_chans=in_channels,
checkpoint_path=checkpoint_path,
num_classes=0 if features_only else num_classes,
**kwargs)
self.features_only = features_only
# reset classifier
if hasattr(self.timm_model, 'reset_classifier'):
self.timm_model.reset_classifier(0, '')
# Hack to use pretrained weights from timm
if pretrained or checkpoint_path:
self._is_init = True
feature_info = getattr(self.timm_model, 'feature_info', None)
print_timm_feature_info(feature_info)
def forward(self, x):
if self.features_only:
features = self.timm_model.forward_features(x)
else:
features = self.timm_model(x)
if isinstance(features, (list, tuple)):
features = list(features)
else:
features = [features]
return features
|
nilq/baby-python
|
python
|
import torch
import os
import pickle
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
class TrainUtil():
def __init__(self, checkpoint_path='checkpoints', version='mcts_nas_net_v1'):
self.checkpoint_path = checkpoint_path
self.version = version
return
def save_model_fixed(self, epoch, fixed_cnn, fixed_cnn_optmizer, save_best=False, **kwargs):
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
# Torch Save State Dict
state = {
'epoch': epoch+1,
'shared_cnn': fixed_cnn.state_dict(),
'shared_cnn_optmizer': fixed_cnn_optmizer.state_dict(),
}
for key, value in kwargs.items():
state[key] = value
torch.save(state, filename)
filename = os.path.join(self.checkpoint_path, self.version) + '_best.pth'
if save_best:
torch.save(state, filename)
return
def load_model_fixed(self, fixed_cnn, fixed_cnn_optmizer, **kwargs):
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
# Load Torch State Dict
checkpoints = torch.load(filename)
fixed_cnn.load_state_dict(checkpoints['fixed_cnn'])
fixed_cnn_optmizer.load_state_dict(checkpoints['fixed_cnn_optmizer'])
print(filename + " Loaded!")
return checkpoints
def save_model(self,
mcts,
shared_cnn,
shared_cnn_optmizer,
shared_cnn_schduler,
estimator,
estimator_optmizer,
epoch,
**kwargs):
mcts_filename = os.path.join(self.checkpoint_path, self.version) + '_mcts' + '.pkl'
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
# Torch Save State Dict
state = {
'epoch': epoch+1,
'shared_cnn': shared_cnn.state_dict(),
'shared_cnn_optmizer': shared_cnn_optmizer.state_dict(),
'shared_cnn_schduler': shared_cnn_schduler.state_dict(),
'estimator': estimator.state_dict(),
'estimator_optmizer': estimator_optmizer.state_dict()
}
for key, value in kwargs.items():
state[key] = value
torch.save(state, filename)
print(filename + " saved!")
# Save MCTS to pickle
rolloutPolicy, searchPolicy = mcts.rollout, mcts.searchPolicy
mcts.rollout, mcts.searchPolicy = None, None
with open(mcts_filename, 'wb') as handle:
pickle.dump(mcts, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(mcts_filename + " Saved!")
mcts.rollout, mcts.searchPolicy = rolloutPolicy, searchPolicy
return
def load_model(self,
shared_cnn,
shared_cnn_optmizer,
shared_cnn_schduler,
estimator,
estimator_optmizer,
**kwargs):
filename = os.path.join(self.checkpoint_path, self.version) + '.pth'
mcts_filename = os.path.join(self.checkpoint_path, self.version) + '_mcts' + '.pkl'
# Load Torch State Dict
checkpoints = torch.load(filename)
shared_cnn.load_state_dict(checkpoints['shared_cnn'])
shared_cnn_optmizer.load_state_dict(checkpoints['shared_cnn_optmizer'])
shared_cnn_schduler.load_state_dict(checkpoints['shared_cnn_schduler'])
shared_cnn_schduler.optimizer = shared_cnn_optmizer
estimator.load_state_dict(checkpoints['estimator'])
estimator_optmizer.load_state_dict(checkpoints['estimator_optmizer'])
print(filename + " Loaded!")
# Load MCTS
with open(mcts_filename, 'rb') as handle:
mcts = pickle.load(handle)
print(mcts_filename + " Loaded!")
return checkpoints, mcts
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.