text stringlengths 957 885k |
|---|
<gh_stars>100-1000
import click
import json, cPickle
import requests, zipfile
import os, glob
from music21 import analysis, converter, corpus, meter
from music21.note import Note
from constants import *
@click.group()
def datasets():
"""Constructs various datasets."""
pass
@click.command()
@click.option('--keep-fermatas', type=bool, default=True)
@click.option('--subset', type=bool, default=False)
@click.option('--mono', type=bool, default=False, help='Extract only monophonic Soprano part')
@click.option('--parts_to_mask', '-m', multiple=True, type=str)
def prepare(keep_fermatas, subset, mono, parts_to_mask=[]):
"""
Prepares polyphonic scores using a chord tuple representation.
Each score is transformed into a sequence of tuples with a constant
timestep of (1/`FRAMES_PER_CROTCHET`) crotchets between consecutive chords.
Each encoded chord has the following format:
Notes : List[(
Midi: Int,
Tied : Bool (true if note is continuation of previous note)
)]
"""
txt_to_utf, utf_to_txt = build_vocabulary()
txt_to_utf[BLANK_MASK_TXT] = BLANK_MASK_UTF # don't add to `utf_to_txt` because samples should never contain BLANK_MASK
it = iter_standardized_chorales()
if subset:
it = [next(it) for _ in range(5)]
for score in it:
bwv_id = score.metadata.title
print('Processing BWV {0}'.format(bwv_id))
# remove all except 'Soprano' part if --mono
if mono:
for part in score.parts:
if part.id != 'Soprano':
score.remove(part)
#key = score.analyze('key') # TODO: filter to only majors for task?
encoded_score = encode_score(score, keep_fermatas=keep_fermatas, parts_to_mask=parts_to_mask)
encoded_score_txt = to_text(encoded_score)
fname = 'BWV-{0}'.format(bwv_id)
if mono:
fname += '-mono'
if parts_to_mask:
fname += '-mask-{0}'.format('-'.join(parts_to_mask))
else:
fname += '-nomask'
if keep_fermatas:
fname += '-fermatas'
else:
fname += '-nofermatas'
out_path = SCRATCH_DIR + '/{0}'.format(fname)
print 'Writing {0}'.format(out_path)
with open(out_path + '.txt', 'w') as fd:
fd.write('\n'.join(encoded_score_txt))
with open(out_path + '.utf', 'w') as fd:
fd.write(to_utf(txt_to_utf, encoded_score_txt))
@click.command()
@click.argument('files', nargs=-1, required=True)
@click.option('-o', '--output', type=click.File('wb'), default=SCRATCH_DIR + '/concat_corpus.txt')
def concatenate_corpus(files, output):
"""Concatenates individual files together into single corpus.
Try `bachbot concatenate_corpus scratch/*.utf`.
"""
print 'Writing concatenated corpus to {0}'.format(output.name)
for fp in files:
with open(fp, 'rb') as fd:
output.write(''.join(filter(lambda x: x != '\n', fd.read())))
@click.command()
@click.option('--utf-to-txt-json', type=click.File('rb'), default=SCRATCH_DIR + '/utf_to_txt.json')
@click.argument('in-file', type=click.File('rb'))
@click.argument('out-file', type=click.File('wb'))
def encode_text(utf_to_txt_json, in_file, out_file):
utf_to_txt = json.load(utf_to_txt_json)
txt_to_utf = { v:k for k,v in utf_to_txt.items() }
out_file.write(to_utf(txt_to_utf, in_file))
def standardize_key(score):
"""Converts into the key of C major or A minor.
Adapted from https://gist.github.com/aldous-rey/68c6c43450517aa47474
"""
# major conversions
majors = dict([("A-", 4),("A", 3),("B-", 2),("B", 1),("C", 0),("C#",-1),("D-", -1),("D", -2),("E-", -3),("E", -4),("F", -5),("F#",6),("G-", 6),("G", 5)])
minors = dict([("A-", 1),("A", 0),("B-", -1),("B", -2),("C", -3),("C#",-4),("D-", -4),("D", -5),("E-", 6),("E", 5),("F", 4),("F#",3),("G-", 3),("G", 2)])
# transpose score
key = score.analyze('key')
if key.mode == "major":
halfSteps = majors[key.tonic.name]
elif key.mode == "minor":
halfSteps = minors[key.tonic.name]
tScore = score.transpose(halfSteps)
# transpose key signature
for ks in tScore.flat.getKeySignatures():
ks.transpose(halfSteps, inPlace=True)
return tScore
def extract_SATB(score):
"""
Extracts the Soprano, Alto, Tenor, and Bass parts from a piece. The returned score is guaranteed
to have parts with names 'Soprano', 'Alto', 'Tenor', and 'Bass'.
This method mutates its arguments.
"""
ids = dict()
ids['Soprano'] = {
'Soprano',
'S.',
'Soprano 1', # NOTE: soprano1 or soprano2?
'Soprano\rOboe 1\rViolin1'}
ids['Alto'] = { 'Alto', 'A.'}
ids['Tenor'] = { 'Tenor', 'T.'}
ids['Bass'] = { 'Bass', 'B.'}
id_to_name = {id:name for name in ids for id in ids[name] }
for part in score.parts:
if part.id in id_to_name:
part.id = id_to_name[part.id]
else:
score.remove(part)
return score
def build_vocabulary():
if os.path.exists(SCRATCH_DIR + '/utf_to_txt.json'):
with open(SCRATCH_DIR + '/utf_to_txt.json', 'r') as f:
utf_to_txt = json.load(f)
txt_to_utf = {v:k for k,v in utf_to_txt.items()}
else:
vocabulary = set([str((midi, tie)) for tie in [True, False] for midi in range(128)]) # all MIDI notes and tie/notie
vocabulary.update(set([CHORD_BOUNDARY_DELIM, FERMATA_SYM]))
txt_to_utf = dict(map(lambda x: (x[1], unichr(x[0])), enumerate(vocabulary)))
txt_to_utf['START'] = START_DELIM
txt_to_utf['END'] = END_DELIM
utf_to_txt = {utf:txt for txt,utf in txt_to_utf.items()}
# save vocabulary
with open(SCRATCH_DIR + '/utf_to_txt.json', 'w') as fd:
print 'Writing vocabulary to ' + SCRATCH_DIR + '/utf_to_txt.json'
json.dump(utf_to_txt, fd)
return txt_to_utf, utf_to_txt
def iter_standardized_chorales():
"Iterator over 4/4 Bach chorales standardized to Cmaj/Amin with SATB parts extracted."
for score in corpus.chorales.Iterator(
numberingSystem='bwv',
returnType='stream'):
if score.getTimeSignatures()[0].ratioString == '4/4': # only consider 4/4
yield extract_SATB(standardize_key(score))
@click.command()
@click.argument('in_path', type=click.Path(exists=True))
@click.argument('outfile', type=click.File('w'))
def prepare_harm_input(in_path, outfile):
"Prepares and encodes a musicXML file containing a monophonic melody line as a Soprano voice to harmonize."
txt_to_utf, utf_to_txt = build_vocabulary()
txt_to_utf[BLANK_MASK_TXT] = BLANK_MASK_UTF # don't add to `utf_to_txt` because samples should never contain BLANK_MASK
sc = converter.parseFile(in_path)
encoded_score = []
for note in sc.flat.notesAndRests:
if note.isRest:
encoded_score.extend((int(note.quarterLength * FRAMES_PER_CROTCHET)) * [[]])
else:
has_fermata = any(map(lambda e: e.isClassOrSubclass(('Fermata',)), note.expressions))
has_tie = note.tie is not None and note.tie.type != 'start'
encoded_chord = [(note.pitch.midi, has_tie)] + ([BLANK_MASK_TXT for _ in range(3)])
encoded_score.append((has_fermata, encoded_chord))
encoded_score.extend((int(note.quarterLength * FRAMES_PER_CROTCHET) - 1) * [
(has_fermata,
map(lambda note: BLANK_MASK_TXT if note == BLANK_MASK_TXT else (note[0], True), encoded_chord))
])
outfile.write(to_utf(txt_to_utf, to_text(encoded_score)))
def encode_score(score, keep_fermatas=True, parts_to_mask=[]):
"""
Encodes a music21 score into a List of chords, where each chord is represented with
a (Fermata :: Bool, List[(Note :: Integer, Tie :: Bool)]).
If `keep_fermatas` is True, all `has_fermata`s will be False.
All tokens from parts in `parts_to_mask` will have output tokens `BLANK_MASK_TXT`.
Time is discretized such that each crotchet occupies `FRAMES_PER_CROTCHET` frames.
"""
encoded_score = []
for chord in (score
.quantize((FRAMES_PER_CROTCHET,))
.chordify(addPartIdAsGroup=bool(parts_to_mask))
.flat
.notesAndRests): # aggregate parts, remove markup
# expand chord/rest s.t. constant timestep between frames
if chord.isRest:
encoded_score.extend((int(chord.quarterLength * FRAMES_PER_CROTCHET)) * [[]])
else:
has_fermata = (keep_fermatas) and any(map(lambda e: e.isClassOrSubclass(('Fermata',)), chord.expressions))
encoded_chord = []
# TODO: sorts Soprano, Bass, Alto, Tenor without breaking ties
# c = chord.sortAscending()
# sorted_notes = [c[-1], c[0]] + c[1:-1]
# for note in sorted_notes:
for note in chord:
if parts_to_mask and note.pitch.groups[0] in parts_to_mask:
encoded_chord.append(BLANK_MASK_TXT)
else:
has_tie = note.tie is not None and note.tie.type != 'start'
encoded_chord.append((note.pitch.midi, has_tie))
encoded_score.append((has_fermata, encoded_chord))
# repeat pitches to expand chord into multiple frames
# all repeated frames when expanding a chord should be tied
encoded_score.extend((int(chord.quarterLength * FRAMES_PER_CROTCHET) - 1) * [
(has_fermata,
map(lambda note: BLANK_MASK_TXT if note == BLANK_MASK_TXT else (note[0], True), encoded_chord))
])
return encoded_score
def to_utf(txt_to_utf, score_txt):
"""
Converts a text-encoded score into UTF encoding (appending start/end delimiters).
Throws `KeyError` when out-of-vocabulary token is encountered
"""
return START_DELIM +\
''.join(map(lambda txt: txt_to_utf[txt.strip()], score_txt)) +\
END_DELIM
def to_text(encoded_score):
"Converts a Python encoded score into plain-text."
encoded_score_plaintext = []
for i,chord_pair in enumerate(encoded_score):
if i > 0:
encoded_score_plaintext.append(CHORD_BOUNDARY_DELIM) # chord boundary delimiter
if len(chord_pair) > 0:
is_fermata, chord = chord_pair
if is_fermata:
encoded_score_plaintext.append(FERMATA_SYM)
for note in chord:
encoded_score_plaintext.append(str(note))
return encoded_score_plaintext
map(datasets.add_command, [
prepare,
prepare_harm_input,
encode_text,
concatenate_corpus,
])
|
<reponame>mail2nsrajesh/oslo.service
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for eventlet backdoor
"""
import errno
import os
import socket
import eventlet
import mock
from oslo_service import eventlet_backdoor
from oslo_service.tests import base
class BackdoorSocketPathTest(base.ServiceBaseTestCase):
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_path(self, listen_mock, spawn_mock):
self.config(backdoor_socket="/tmp/my_special_socket")
listen_mock.side_effect = mock.Mock()
path = eventlet_backdoor.initialize_if_enabled(self.conf)
self.assertEqual("/tmp/my_special_socket", path)
@mock.patch.object(os, 'unlink')
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_path_already_exists(self, listen_mock,
spawn_mock, unlink_mock):
self.config(backdoor_socket="/tmp/my_special_socket")
sock = mock.Mock()
listen_mock.side_effect = [socket.error(errno.EADDRINUSE, ''), sock]
path = eventlet_backdoor.initialize_if_enabled(self.conf)
self.assertEqual("/tmp/my_special_socket", path)
unlink_mock.assert_called_with("/tmp/my_special_socket")
@mock.patch.object(os, 'unlink')
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_path_already_exists_and_gone(self, listen_mock,
spawn_mock, unlink_mock):
self.config(backdoor_socket="/tmp/my_special_socket")
sock = mock.Mock()
listen_mock.side_effect = [socket.error(errno.EADDRINUSE, ''), sock]
unlink_mock.side_effect = OSError(errno.ENOENT, '')
path = eventlet_backdoor.initialize_if_enabled(self.conf)
self.assertEqual("/tmp/my_special_socket", path)
unlink_mock.assert_called_with("/tmp/my_special_socket")
@mock.patch.object(os, 'unlink')
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_path_already_exists_and_not_gone(self, listen_mock,
spawn_mock,
unlink_mock):
self.config(backdoor_socket="/tmp/my_special_socket")
listen_mock.side_effect = socket.error(errno.EADDRINUSE, '')
unlink_mock.side_effect = OSError(errno.EPERM, '')
self.assertRaises(OSError, eventlet_backdoor.initialize_if_enabled,
self.conf)
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_path_no_perms(self, listen_mock, spawn_mock):
self.config(backdoor_socket="/tmp/my_special_socket")
listen_mock.side_effect = socket.error(errno.EPERM, '')
self.assertRaises(socket.error,
eventlet_backdoor.initialize_if_enabled,
self.conf)
class BackdoorPortTest(base.ServiceBaseTestCase):
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_port(self, listen_mock, spawn_mock):
self.config(backdoor_port=1234)
sock = mock.Mock()
sock.getsockname.return_value = ('127.0.0.1', 1234)
listen_mock.return_value = sock
port = eventlet_backdoor.initialize_if_enabled(self.conf)
self.assertEqual(1234, port)
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_port_inuse(self, listen_mock, spawn_mock):
self.config(backdoor_port=2345)
listen_mock.side_effect = socket.error(errno.EADDRINUSE, '')
self.assertRaises(socket.error,
eventlet_backdoor.initialize_if_enabled, self.conf)
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_port_range(self, listen_mock, spawn_mock):
self.config(backdoor_port='8800:8899')
sock = mock.Mock()
sock.getsockname.return_value = ('127.0.0.1', 8800)
listen_mock.return_value = sock
port = eventlet_backdoor.initialize_if_enabled(self.conf)
self.assertEqual(8800, port)
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_port_range_one_inuse(self, listen_mock, spawn_mock):
self.config(backdoor_port='8800:8900')
sock = mock.Mock()
sock.getsockname.return_value = ('127.0.0.1', 8801)
listen_mock.side_effect = [socket.error(errno.EADDRINUSE, ''), sock]
port = eventlet_backdoor.initialize_if_enabled(self.conf)
self.assertEqual(8801, port)
@mock.patch.object(eventlet, 'spawn')
@mock.patch.object(eventlet, 'listen')
def test_backdoor_port_range_all_inuse(self, listen_mock, spawn_mock):
self.config(backdoor_port='8800:8899')
side_effects = []
for i in range(8800, 8900):
side_effects.append(socket.error(errno.EADDRINUSE, ''))
listen_mock.side_effect = side_effects
self.assertRaises(socket.error,
eventlet_backdoor.initialize_if_enabled, self.conf)
def test_backdoor_port_reverse_range(self):
self.config(backdoor_port='8888:7777')
self.assertRaises(eventlet_backdoor.EventletBackdoorConfigValueError,
eventlet_backdoor.initialize_if_enabled, self.conf)
def test_backdoor_port_bad(self):
self.config(backdoor_port='abc')
self.assertRaises(eventlet_backdoor.EventletBackdoorConfigValueError,
eventlet_backdoor.initialize_if_enabled, self.conf)
|
<filename>dbe/issues/views.py
from pprint import pprint
from django.http import HttpResponse
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.admin.views.decorators import staff_member_required
from django.forms import forms
from django.core.mail import send_mail
from shared.utils import *
from issues.models import *
from issues.forms import *
from mcbv.edit_custom import UpdateView, FormSetView
from mcbv.list_custom import DetailListCreateView
@staff_member_required
def update_issue(request, pk, mode=None, action=None):
"""AJAX view, toggle Done on/off, set progress or delete an issue."""
issue = Issue.obj.get(pk=pk)
if mode == "delete":
issue.delete()
return redir("admin:issues_issue_changelist")
else:
if mode == "progress" : val = int(action)
else : val = bool(action=="on")
setattr(issue, mode, val)
issue.save()
return HttpResponse('')
@staff_member_required
def delete_comment(request, pk):
IssueComment.obj.get(pk=pk).delete()
return redir(referer(request))
class UpdateIssue(UpdateView):
form_model = Issue
modelform_class = IssueForm
msg_tpl = "Issue '%s' was updated <%s%s>\n\n%s"
template_name = "issue_form.html"
def modelform_valid(self, modelform):
""" If form was changed, send notification email the (new) issue owner.
Note: at the start of the function, FK relationships are already updated in `self.object`.
"""
if modelform.has_changed() and self.modelform_object.owner:
notify_owner(self.request, self.modelform_object, "Issue Updated", self.msg_tpl)
return super(UpdateIssue, self).modelform_valid(modelform)
class UpdateComment(UpdateView):
form_model = IssueComment
modelform_class = CommentForm
template_name = "issues/comment_form.html"
def get_success_url(self):
return self.modelform_object.issue.get_absolute_url()
class ViewIssue(DetailListCreateView):
"""View issue, comments and new comment form."""
detail_model = Issue
list_model = IssueComment
modelform_class = CommentForm
related_name = "comments"
fk_attr = "issue"
msg_tpl = "Comment was added to the Issue '%s' <%s%s>\n\n%s"
template_name = "issue.html"
def modelform_valid(self, modelform):
"""Send notification email to the issue owner."""
resp = super(ViewIssue, self).modelform_valid(modelform)
obj = self.modelform_object
obj.update(creator=self.user)
notify_owner(self.request, obj.issue, "New Comment", self.msg_tpl, comment_body=obj.body)
return resp
class AddIssues(FormSetView):
"""Create new issues."""
formset_model = Issue
formset_form_class = IssueForm
success_url = reverse_lazy("admin:issues_issue_changelist")
msg_tpl = "New Issue '%s' was created <%s%s>\n\n%s"
extra = 2
template_name = "add_issues.html"
def process_form(self, form):
form.save()
notify_owner(self.request, form.instance, "New Issue", self.msg_tpl)
def notify_owner(request, obj, title, msg_tpl, comment_body=''):
serv_root = request.META["HTTP_ORIGIN"]
url = reverse2("issue", dpk=obj.pk)
lst = [obj.name, serv_root, url, comment_body]
msg = msg_tpl % tuple(lst)
if obj.owner:
send_mail(title, msg, "IssuesApp", [obj.owner.email], fail_silently=False)
|
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import GCNConv, SAGEConv, GATConv, RGCNConv, SGConv, APPNP, ClusterGCNConv
from src.data.data_loader import GraphDataset
import warnings
import pandas as pd
import os
import argparse
import numpy as np
import pickle
import torch
from src.evaluation.network_split import NetworkSplitShchur
from src.data.create_modified_configuration_model import generate_modified_conf_model
from torch_geometric.utils import from_networkx, to_networkx
from community import best_partition
import networkx as nx
def parse_args():
parser = argparse.ArgumentParser(description="Test accuracy for GCN/SAGE/GAT/RGCN/SGC/APPNP")
parser.add_argument('--size',
type=int,
default=96,
help='Channel size. Default is 12.')
parser.add_argument('--lr',
type=float,
default=0.01,
help='Learning rate. Default is 0.01.')
parser.add_argument('--wd',
type=float,
default=0.01,
help='Regularization weight. Default is 0.01.')
parser.add_argument('--dropout',
type=float,
default=0.8,
help='Dropout probability. Default is 0.6.')
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating with SBMs created from labels? Default is False.')
parser.add_argument('--heads',
type=int,
default=4,
help='Attention heads. Default is 4.')
parser.add_argument('--attention_dropout',
type=float,
default=0.4,
help='Attention dropout for GAT. Default is 0.4.')
parser.add_argument('--dataset',
default="cora",
help='Dataset name. Default is cora.')
parser.add_argument('--model',
default="gcn",
help='Model name. Default is GCN.')
parser.add_argument('--splits',
type=int,
default=100,
help='Number of random train/validation/test splits. Default is 100.')
parser.add_argument('--runs',
type=int,
default=20,
help='Number of random initializations of the model. Default is 20.')
parser.add_argument('--conf_inits',
type=int,
default=10,
help='Number of configuration model runs. Default is 10.')
parser.add_argument('--sbm_inits',
type=int,
default=10,
help='Number of SBM runs. Default is 10.')
parser.add_argument('--directionality',
default='undirected',
help='Directionality: undirected/directed/reversed. Default is undirected.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
args = parser.parse_args()
return args
name2conv = {'gcn': GCNConv, 'sage': SAGEConv, 'gat': GATConv, 'rgcn': RGCNConv, 'rgcn2':RGCN2, 'sgc':SGConv, 'appnp':APPNP, 'cgcn':ClusterGCNConv}
def eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads,attention_dropout,runs,splits,train_examples,val_examples, models=[MonoGAT],isDirected = False):
if isDirected:
models = [MonoGAT]
return eval_gnn(dataset, dataset_name, GATConv, channel_size, dropout, lr, wd, heads=heads, attention_dropout=attention_dropout,
models=models, num_runs=runs, num_splits=splits, test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_gcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoModel], isDirected=False):
if isDirected:
models = [MonoModel]
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_appnp(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoAPPNPModel]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_rgcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoRGCN]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval(model, dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, runs, splits, train_examples, val_examples, isDirected):
if model == 'gat':
return eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
elif model == 'rgcn' or model == 'rgcn2':
return eval_archs_rgcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
elif model == 'appnp':
return eval_archs_appnp(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
else:
return eval_archs_gcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
def eval_original(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_shuffled_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = dataset.x[torch.randperm(dataset.x.size()[0])]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_random_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = torch.randint(0, 2, dataset.x.shape, dtype=torch.float)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_cm_communities(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}-cm_communities-{i}', dataset_name,
f'data/graphs/cm_communities/{dataset_name}/{dataset_name}_cm_communities_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# G = to_networkx(dataset)
# G = nx.DiGraph(G)
# node_communities = best_partition(nx.to_undirected(G))
# nx.set_node_attributes(G,node_communities,'label')
# # print(dataset.edge_index)
# old_edges = dataset.edge_index
# G = generate_modified_conf_model(G)
# # dir_path = f'data/graphs/cm_communities/{dataset_name}'
# # if not os.path.exists(dir_path):
# # os.mkdir(dir_path)
# # nx.write_edgelist(G, f'{dir_path}/{dataset_name}_cm_communities_{i}.cites')
# dataset.edge_index = torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)
# print((torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)-old_edges).abs().sum())
# print(dataset.edge_index)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_random(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, random_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(random_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-random{i}', dataset_name,
f'data/graphs/random/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['random_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_erdos(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, erdos_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(erdos_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-erdos{i}', dataset_name,
f'data/graphs/erdos/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['erdos_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# print(f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites')
# print(dataset.edge_index.shape)
# print(dataset.edge_index)
# if last_edge is None:
# last_edge = dataset.edge_index
# continue
# print((1-last_edge.eq(last_edge).double()).sum())
# continue
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_degree_cat(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
e = num_edges
hubs_experiment = 'global_edges'
for i in range(inits):
for frm in range(0,100,percentile):
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_degree_cat/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_constant_nodes(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for frm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio}nodes_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_constant_nodes/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_attack_target(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for atkfrm in range(0,100,percentile):
for tgtfrm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
atkto = atkfrm + percentile
tgtto = tgtfrm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio:.3f}nodes_{i}_{hubs_experiment}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}', dataset_name,
f'data/graphs/injected_edges_attack_target/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['atkfrm'] = atkfrm
df_cur['atkto'] = atkto
df_cur['tgtfrm'] = tgtfrm
df_cur['tgtto'] = tgtto
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_sbm_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_label_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples,hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-label_sbm_{hubs_experiment}', dataset_name,
f'data/graphs/label_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_conf(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, conf_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(conf_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-confmodel{i}', dataset_name,
f'data/graphs/confmodel/{dataset_name}/{dataset_name}_confmodel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['confmodel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_shifting(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, shifting_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for change in 'CL':
for inc in [True, False]:
for r in [0.16,0.32,0.64]: #[0.02,0.04,0.08]:
for i in range(shifting_inits):
output_prefix = f'data/graphs/shifting/{dataset_name}/{dataset_name}_shifting'
output_suffix = '.cites'
graph_path = f'{output_prefix}_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}'
if not os.path.exists(graph_path):
print(f'File not found: {graph_path}')
continue
dataset = GraphDataset(f'data/tmp/{dataset_name}_shifting_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}',
dataset_name, graph_path,
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph_num'] = i
df_cur['inc'] = inc
df_cur['change'] = change
df_cur['r'] = r
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm_label(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm_label{i}', dataset_name,
f'data/graphs/sbm_label/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modcm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modcm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modcm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modcm{i}', dataset_name,
f'data/graphs/modcm/{dataset_name}/{dataset_name}_modcm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modcm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modsbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modsbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modsbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modsbm{i}', dataset_name,
f'data/graphs/modsbm/{dataset_name}/{dataset_name}_modsbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modsbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_reglabel(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, reglabel_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(reglabel_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-reglabel{i}', dataset_name,
f'data/graphs/reglabel/{dataset_name}/{dataset_name}_reglabel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['reglabel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
################## Synthetic part #####################################
def load_communities(path):
with open(path, 'rb') as handle:
ret = pickle.load(handle)
return ret
def load_labels(path):
label = {}
with open(path, 'r') as handle:
label = {}
for line in handle:
s = line.strip().split()
label[s[0]] = s[-1]
return label
def agg(x):
return len(x.unique())
def calc_uncertainty(df_community,dataset_name,labeled=False,seed=0):
if dataset_name == 'cora':
df_community.label = df_community.label.apply(lambda x : ''.join([c for c in x if c.isupper()]))
if labeled:
df_community = df_community[df_community[f'labeled{seed}']]
communities = df_community.community.unique()
labels = df_community.label.unique()
mtx = df_community.pivot_table(index='community', columns='label',values='node',aggfunc=agg).fillna(0) / len(df_community)
def Pmarg(c):
return len(df_community[df_community.community == c]) / len(df_community)
def Pcond(l,c):
return mtx.loc[c,l]/Pmarg(c)
H = 0
for c in communities:
h = 0
for l in labels:
if Pcond(l,c) == 0:
continue
h += Pcond(l,c) * np.log2(1./Pcond(l,c))
H += h * Pmarg(c)
def Pl(l):
return len(df_community[df_community.label == l]) / len(df_community)
Hl = 0
for l in labels:
if Pl(l) == 0:
continue
Hl += Pl(l) * np.log2(1./Pl(l))
IG = Hl-H
return IG/Hl
def eval_sbm_swap(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits, is_sbm):
step = 10
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits if is_sbm else 1):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
if is_sbm:
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}-', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)
else:
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)
data = dataset[0]
community = load_communities(f'data/community_id_dicts/{dataset_name}/{dataset_name}_louvain.pickle')
mapping = data.node_name_mapping
label = load_labels(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
df_community = pd.DataFrame({'dataset':dataset_name, 'node':node, 'community':community[node], 'label':label[node]} for node in community)
df_community['node_id'] = df_community.node.apply(lambda x:mapping[x])
for seed in range(splits):
split = NetworkSplitShchur(dataset, train_examples_per_class=train_examples,early_examples_per_class=0,
val_examples_per_class=val_examples, split_seed=seed)
df_community[f'labeled{seed}'] = df_community.node_id.apply(lambda x: (split.train_mask[x]).numpy())
n = len(data.y)
# select nodes at random
shuffled = np.arange(n)
np.random.shuffle(shuffled)
row = shuffled[:int(n/2)]
col = shuffled[int(n/2):int(n/2)*2]
assert(len(row) == len(col))
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
if is_sbm:
df_cur['sbm_num'] = i
df_cur['ratio'] = 0
df_cur['uncertainty'] = calc_uncertainty(df_community, dataset_name)
ulc = [calc_uncertainty(df_community, dataset_name, True, seed) for seed in range(splits)]
df_cur['uncertainty_known'] = [ulc]
print(df_cur)
df_val = pd.concat([df_val, df_cur])
for ratio in range(0,100,step):
frm = int(ratio/100 * len(row))
to = int((ratio+step)/100 * len(row))
U = row[frm:to]
V = col[frm:to]
for u,v in zip(U,V):
tmp = data.x[v].detach().clone()
data.x[v] = dataset[0].x[u]
data.x[u] = tmp
tmp = data.y[v].detach().clone()
data.y[v] = dataset[0].y[u]
data.y[u] = tmp
tmp = df_community.loc[df_community.node_id == v, 'community'].values[0]
df_community.loc[df_community.node_id == v, 'community'] = df_community.loc[df_community.node_id == u, 'community'].values[0]
df_community.loc[df_community.node_id == u, 'community'] = tmp
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
if is_sbm:
df_cur['sbm_num'] = i
df_cur['ratio'] = ratio+step
df_cur['uncertainty'] = calc_uncertainty(df_community, dataset_name)
ulc = [calc_uncertainty(df_community, dataset_name, True, seed) for seed in range(splits)]
df_cur['uncertainty_known'] = [ulc]
print(df_cur)
df_val = pd.concat([df_val, df_cur])
return df_val
################## END: Synthetic part #####################################
def eval_flipped(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=range(10,51,10)):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in percentages:
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-flipped{i}', dataset_name,
f'data/graphs/flip_edges/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['percentage'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_removed_hubs(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=[1,2,4,8]):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in percentages:
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-removed-hubs{i}', dataset_name,
f'data/graphs/removed_hubs/{dataset_name}/{dataset_name}_{i:02}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['percentage'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_added_2hop_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=[1,2,4,8,16,32,64,128,256,512]):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in percentages:
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
network_path = f'data/graphs/added_2hop_edges/{dataset_name}/{dataset_name}_{i:02}.cites'
if not os.path.exists(network_path):
continue
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-added-2hops{i}', dataset_name,
network_path,
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['percentage'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
if __name__ == '__main__':
warnings.filterwarnings('ignore')
args = parse_args()
if args.directionality not in {'undirected', 'reversed', 'directed'}:
print("--directionality must be in {'undirected','reversed','directed'}")
exit(1)
isDirected = (args.directionality != 'undirected')
isReversed = (args.directionality == 'reversed')
# TODO find a better way to create names
val_out = f'reports/results/test_acc/{args.model}_{args.dataset}{"_conf" if args.conf else ""}' \
f'{"_sbm" if args.sbm else ""}{("_" + args.directionality) if isDirected else ""}.csv'
if os.path.exists(val_out):
df_val = pd.read_csv(val_out)
else:
df_val = pd.DataFrame(
columns='conv arch ch dropout lr wd heads attention_dropout splits inits val_accs val_avg val_std'
' test_accs test_avg test_std stopped elapsed'.split())
if args.conf:
df_cur = eval_conf(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, args.conf_inits)
if args.shifting:
df_cur = eval_shifting(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, args.shifting_inits)
elif args.sbm:
df_cur = eval_sbm(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, args.sbm_inits)
elif args.sbm_label:
df_cur = eval_sbm_label(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, args.sbm_inits)
elif args.flipped:
df_cur = eval_flipped(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples)
elif args.removed_hubs:
df_cur = eval_removed_hubs(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples)
elif args.added_2hop_edges:
df_cur = eval_added_2hop_edges(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples)
elif args.label_sbm:
df_cur = eval_label_sbm(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples)
elif args.injected_edges:
df_cur = eval_injected_edges(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, 5, range(1000,5001,1000), args.hubs_experiment)
elif args.injected_edges_degree_cat:
df_cur = eval_injected_edges_degree_cat(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, 5, 500, 5)
elif args.injected_edges_sbm:
df_cur = eval_injected_edges_sbm(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples, 5, range(100,2001,100), args.hubs_experiment)
else:
df_cur = eval_original(args.model, args.dataset, args.directionality, args.size, args.dropout, args.lr, args.wd,
args.heads, args.attention_dropout,
args.splits, args.runs, args.train_examples, args.val_examples)
df_val = pd.concat([df_val, df_cur])
df_val.to_csv(val_out, index=False)
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import config
import utils
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import plot_utils
import numpy as np
import pandas as pd
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as sch
import scipy.stats as spst
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
def collapse_to_median(df, meta):
medians = pd.DataFrame(index=meta.unique(), columns=df.columns, dtype=float)
for label, subser in meta.groupby(meta):
medians.loc[label] = df.loc[subser.index].median()
return medians
def filter_to_high_var(data, columns, nkeep):
'''Filter to the top nkeep high variance columns
'''
if nkeep is None: return data, columns
if nkeep <=1: nkeep = int(data.shape[1] * nkeep)
var = np.var(data, axis=0)
assert var.size == data.shape[1]
keep_cols = np.argsort(var)[-nkeep:]
return keep_cols
def heatmap_dists_with_dendro(data, norm=False, labels=None, metric='euclidean', method='ward'):
fig = plt.figure(figsize=(7 * 1.30, 7 * 1.25))
gs = gridspec.GridSpec(ncols=3, nrows=2, height_ratios=[.25, 1], width_ratios=[.25, 1, .05], hspace=0)
dend_top_ax = fig.add_subplot(gs[0,1])
hmap_ax = fig.add_subplot(gs[1,1])
cbar_ax = fig.add_subplot(gs[1,2])
dend_top_ax.set_axis_off()
if labels is None:
try:
labels = data.index
except AttributeError:
pass
n = data.shape[0]
assert labels is None or len(labels) == n
dists = ssd.pdist(data, metric=metric)
linkage = sch.linkage(dists, metric=metric, method=method)
dendro = sch.dendrogram(linkage, ax=dend_top_ax, color_threshold=0, above_threshold_color='black')
order = dendro['leaves']
sq_form_dists = ssd.squareform(dists)[order][:, order]
assert sq_form_dists.shape == (n,n)
if norm:
sq_form_dists = spst.zscore(sq_form_dists, axis=None)
sq_form_dists *= -1
cmap = plt.get_cmap('cubehelix')
vmin = -4
vmax = 4
else:
cmap = plt.get_cmap()
vmin = None
vmax = None
hmap = hmap_ax.imshow(sq_form_dists, aspect='auto', cmap=cmap, vmin=vmin, vmax=vmax)
hmap_ax.set_xticks(np.arange(n))
hmap_ax.set_yticks(np.arange(n))
if labels is not None:
hmap_ax.set_xticklabels(labels[order], rotation=90)
hmap_ax.set_yticklabels(labels[order])
cb = plt.colorbar(hmap, cax=cbar_ax)
return
def heatmap_dists(data, norm=False, labels=None, metric='euclidean', method='ward'):
fig, (ax, cax) = plt.subplots(ncols=2,figsize=(7 * 1.05 ,7),
gridspec_kw={"width_ratios":[1, 0.05]})
if labels is None:
try:
labels = data.index
except AttributeError:
pass
n = data.shape[0]
assert labels is None or len(labels) == n
dists = ssd.pdist(data, metric=metric)
linkage = sch.linkage(dists, metric=metric, method=method)
dendro = sch.dendrogram(linkage, no_plot=True)
order = dendro['leaves']
sq_form_dists = ssd.squareform(dists)[order][:, order]
assert sq_form_dists.shape == (n,n)
hmap = ax.imshow(sq_form_dists, aspect='auto')
ax.set_xticks(np.arange(n))
ax.set_yticks(np.arange(n))
if labels is not None:
ax.set_xticklabels(labels[order], rotation=90)
ax.set_yticklabels(labels[order])
cb = plt.colorbar(hmap, cax=cax)
return fig, (ax, cax)
# Tasks
CNC = True
mRNA = False
# Filtering
MAX_EVENTS = 5000
DEBUG = False
NORM = True
if __name__ == '__main__':
path_list = list()
outdir_list = list()
desc_list = list()
# Add Expression
if True:
path = os.path.join(config.embed_dir, 'expression', 'data.tsv')
outdir = os.path.join(config.plot_dir, 'expression', 'heatmaps')
if not os.path.exists(outdir): os.makedirs(outdir)
desc = 'Expression'
if NORM: desc = 'Normalized ' + desc
try:
df = utils.load_large_df(path.replace('.tsv', ''))
except IOError:
df = pd.read_csv(path, sep='\t', index_col=0)
df.iloc[:] = np.minimum(df.values, np.percentile(df.values, 99, axis=0))
keep_cols = filter_to_high_var(df.values, df.columns, MAX_EVENTS)
df = df.iloc[:, keep_cols]
metadata_df = utils.load_metadata_df(config.metadata_path, df.index)
medians = collapse_to_median(df, metadata_df['cnc'])
heatmap_dists_with_dendro(medians, norm=NORM)
outpath = os.path.join(outdir, desc.lower().replace(' ', '_') +'_rep_dists_heatmap.png')
plot_utils.save(outpath, do_pdf=True)
# Add AltSplice
if False:
altsplice_event_list= ['alt_3prime', 'alt_5prime', 'intron_retention', 'exon_skip']
for event in altsplice_event_list:
path = os.path.join(config.embed_dir, 'altsplice', event, 'data.tsv')
outdir = os.path.join(config.plot_dir, 'altsplice', event, 'heatmap')
if not os.path.exists(outdir): os.makedirs(outdir)
desc = 'AltSplice %s'%event.title()
if NORM: desc = 'Normalized ' + desc
print desc
print "Loading %s" %path
try:
df = utils.load_large_df(path.replace('.tsv', ''))
except IOError:
df = pd.read_csv(path, sep='\t', index_col=0)
keep_cols = filter_to_high_var(df.values, df.columns, MAX_EVENTS)
df = df.iloc[:, keep_cols]
metadata_df = utils.load_metadata_df(config.metadata_path, df.index)
medians = collapse_to_median(df, metadata_df['cnc'])
heatmap_dists_with_dendro(medians, metric='cosine', norm=NORM)
outpath = os.path.join(outdir, desc.lower().replace(' ', '_') +'_rep_dists_heatmap_TEST.png')
plot_utils.save(outpath, do_pdf=True)
|
import json
import re
import pprint
import os
import argparse
import pandas as pd
import random
import numpy as np
from tqdm import tqdm
SQUAD_TEST_HEADS = ['where were', 'what political', 'what religion', 'why did', 'what type', 'what language', 'who had', 'what percentage', 'what can', 'how much']
def strip(sent):
return sent.strip(" ").rstrip('.').rstrip('?').rstrip('!').rstrip('"')
blacklist = ["of the", "is a", "is the", "did the"]
wh = {
"(what|what's)": 0,
"(who|who's)": 0,
"where": 0,
"when": 0,
"which": 0,
"whose": 0,
"whom": 0,
"how": 0,
"why": 0,
"(can|could|may|might|should)": 0,
"(is|are|were|was)": 0,
"(will|would)": 0,
"(do|does|did)": 0,
"(has|have|had)": 0,
"(name|identify|describe|define)": 0
}
wh2 = {}
wh3 = {}
keys = wh.keys()
isascii = lambda s: len(s) == len(s.encode())
def find_match(query, keys):
for key in keys:
if re.search('^' + key + '$', query):
return key
return None
def dict_add(entry, example, dict):
if entry in blacklist:
return
if entry in dict:
dict[entry].append(example)
else:
dict[entry] = [example]
def find_top_q_head(examples, topn):
for example in examples:
question_text = strip(example["question"])
# simple tokenization
t = question_text.split(" ")
t = [strip(item.lower()) for item in t]
# search if the any key is in the first three words
flag = False
for i in range(3):
if i >= len(t):
break
key = find_match(t[i], keys)
if key:
wh[key] += 1
try:
if key == "which" and "in which" in question_text:
st2 = " ".join(t[i - 1:i + 1])
st3 = " ".join(t[i - 1:i + 2])
elif key == "whom" and "by whom" in question_text:
st2 = " ".join(t[i - 1:i + 1])
st3 = " ".join(t[i - 1:i + 2])
else:
st2 = " ".join(t[i:i + 2])
st3 = " ".join(t[i:i + 3])
dict_add(st2, example, wh2)
# dict_add(st3, example, wh3)
except Exception as e:
print(e.args)
flag = True
break
if not flag:
for i in range(len(t)):
key = find_match(t[len(t) - i - 1], keys)
if key:
wh[key] += 1
flag = True
idx = len(t) - i - 1
try:
if key == "which" and "in which" in question_text:
st2 = " ".join(t[i - 1:i + 1])
st3 = " ".join(t[i - 1:i + 2])
elif key == "whom" and "by whom" in question_text:
st2 = " ".join(t[i - 1:i + 1])
st3 = " ".join(t[i - 1:i + 2])
else:
st2 = " ".join(t[i:i + 2])
st3 = " ".join(t[i:i + 3])
dict_add(st2, example, wh2)
# dict_add(st3, wh3)
except Exception as e:
print(e.args)
break
# if not flag:
# print("No question word found: ", question_text)
sorted_x = sorted(wh2.items(), key=lambda kv: len(kv[1]), reverse=True)
print('#Question Head:', len(sorted_x))
# for i in range(topn):
# print(sorted_x[i][0], len(sorted_x[i][1]))
# pp = pprint.PrettyPrinter(indent=4)
# print(sorted_x[:topn])
# print('#Hits in Top {}:'.format(topn), sum(item[1] for item in sorted_x[:40]))
# print('#Examples', len(examples))
# return [kv[0] for kv in sorted_x[:topn]]
return sorted_x[:topn]
def get_questions(examples, head, num):
random.shuffle(examples)
ret = []
count = 0
for example in examples:
if head in example.question_text.lower() and len(example.orig_answer_text) > 0 \
and isascii(example.orig_answer_text) and isascii(" ".join(example.doc_tokens)):
ret.append(example)
count += 1
if count == num:
break
if count != num:
print(head)
print(ret)
return ret
def read_nq_examples(input_file):
with open(input_file, 'r') as fin:
lines = fin.readlines()
lines = lines[1:] # exclude the header
source = [json.loads(line.strip()) for line in lines]
total = 0
examples = []
for para in tqdm(source):
context = para["context"]
for qa in para["qas"]:
total += 1
ques = qa["question"]
ans = qa["answers"]
examples.append({'context': context, 'question': ques, 'answer': [{'text': ans[0]}]})
print(examples[:5])
return examples
def down_sample_and_split(heads, n_per_head):
random.shuffle(heads)
new_heads = {}
for head in heads:
if len(head[1]) < n_per_head:
continue
new_heads[head[0]] = random.sample(head[1], n_per_head)
print(new_heads.keys())
test = {head: new_heads[head] for head in SQUAD_TEST_HEADS if head in new_heads}
return test
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--in_file", default='../data/newsqa/NewsQA.jsonl', type=str, required=False)
parser.add_argument("--out_dir", default='../data/newsqa/', type=str, required=False,
help="Output directory")
parser.add_argument("--out_train", default='zs_train.json', type=str, required=False)
parser.add_argument("--out_dev", default='zs_dev.json', type=str, required=False)
parser.add_argument("--out_test", default='zs_test.json', type=str, required=False)
parser.add_argument('--seed', type=int, default=55, help="random seed")
args = parser.parse_args()
opt = vars(args)
random.seed(opt['seed'])
examples = read_nq_examples(opt['in_file'])
top_heads = find_top_q_head(examples, topn=300)
test = down_sample_and_split(top_heads, n_per_head=64)
print('Test heads: {}'.format(test.keys()))
# if not os.path.exists(opt['out_dir']):
# os.makedirs(opt['out_dir'])
# with open(os.path.join(opt['out_dir'], opt['out_train']), 'w') as fout:
# json.dump(train, fout)
# with open(os.path.join(opt['out_dir'], opt['out_dev']), 'w') as fout:
# json.dump(dev, fout)
with open(os.path.join(opt['out_dir'], opt['out_test']), 'w') as fout:
json.dump(test, fout)
if __name__ == "__main__":
main() |
<filename>tests/test_forward.py
import os
import sys
import numpy as np
import torch
sys.path.insert(0, os.path.abspath('../retina'))
def _demo_mm_inputs(
input_shape=(1, 3, 300, 300), num_items=None, num_classes=10):
"""
Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mm_inputs = {
'imgs': torch.FloatTensor(imgs),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
}
return mm_inputs
def test_retina_forward():
from retina.utils import Config
from retina.model import build_detector
from retina.criterion import Criteria
# init
cfg_fp = os.path.join(os.path.abspath('configs'), 'test.py')
cfg = Config.fromfile(cfg_fp)
model = build_detector(cfg['model'])
criterion = Criteria(
cls_loss_cfg=cfg['criterion']['cls_loss'],
reg_loss_cfg=cfg['criterion']['reg_loss'],
num_classes=cfg['num_classes']
)
# input
input_shape = (3, 3, 224, 224)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
# forward
preds_results, targets_results = model(
imgs,
img_metas,
False,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
)
reg_losses, cls_losses = criterion(preds_results, targets_results)
print(reg_losses, cls_losses)
# cuda forward
if torch.cuda.is_available():
model = model.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
preds_results, targets_results = model(
imgs,
img_metas,
False,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
)
reg_losses, cls_losses = criterion(preds_results, targets_results)
print(reg_losses, cls_losses)
if __name__ == '__main__':
test_retina_forward()
|
"""
Unit tests for memory networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from texar.tf.modules.memory.memory_network import MemNetRNNLike
from texar.tf import context
# pylint: disable=no-member, too-many-locals, too-many-instance-attributes
# pylint: disable=too-many-arguments, protected-access
class MemNetRNNLikeTest(tf.test.TestCase):
"""Tests :class:`~texar.tf.modules.memory.memory_network.MemNetRNNLike`.
"""
def _test_memory_dim(self, combine_mode='add', soft_memory=False,
soft_query=False, use_B=False):
"""Tests :attr:`memory_dim` in the :attr:`combine_mode` and soft
options.
"""
print('testing: combine_mode={}, soft_memory={}, soft_query={}, '
'use_B={}'.format(combine_mode, soft_memory, soft_query, use_B))
n_hops = 3
if combine_mode == 'add' or combine_mode is None:
memory_dim = 19
embedding_dim = memory_dim
temporal_embedding_dim = memory_dim
elif combine_mode == 'concat':
embedding_dim = 19
temporal_embedding_dim = 17
memory_dim = embedding_dim + temporal_embedding_dim
else:
raise ValueError(
"combine_mode = {} is not recognized".format(combine_mode))
relu_dim = 13
memory_size = 7
raw_memory_dim = 11
batch_size = 2
embed_hparams = {
"embedding": {
"dim": embedding_dim,
},
"temporal_embedding": {
"dim": temporal_embedding_dim,
},
"combine_mode": combine_mode,
}
memnet_hparams = {
"n_hops": n_hops,
"relu_dim": relu_dim,
"memory_size": memory_size,
"A": embed_hparams,
"C": embed_hparams,
"B": embed_hparams,
"use_B": use_B,
}
memnet = MemNetRNNLike(raw_memory_dim=raw_memory_dim,
hparams=memnet_hparams)
kwargs = {}
if soft_memory:
kwargs['soft_memory'] = tf.random_uniform(
[batch_size, memory_size, raw_memory_dim])
else:
kwargs['memory'] = tf.tile(tf.expand_dims(
tf.range(memory_size, dtype=tf.int32), 0), [batch_size, 1])
if use_B:
if soft_query:
kwargs['soft_query'] = tf.random_uniform(
[batch_size, raw_memory_dim])
else:
kwargs['query'] = tf.random_uniform(
[batch_size], maxval=raw_memory_dim, dtype=tf.int32)
else:
kwargs['query'] = tf.random_uniform([batch_size, memory_dim])
logits = memnet(**kwargs)
self.assertEqual(memnet.memory_dim, memory_dim)
self.assertEqual(logits.shape[0], batch_size)
self.assertEqual(logits.shape[1], raw_memory_dim)
def test_memory_dim(self):
"""Tests :attr:`memory_dim` in different :attr:`combine_mode` and
different soft options.
"""
for combine_mode in ['add', 'concat']:
for soft_memory in [False, True]:
for use_B in [False, True]:
for soft_query in ([False, True] if use_B else [False]):
self._test_memory_dim(combine_mode, soft_memory,
soft_query, use_B)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/local/bin/python3
# encoding: utf-8
'''
webcamloop -- get image from cam and ftp it, loop
see configuration file
@author: <EMAIL>
@copyright: 2020 werner.fuerst
@license: CC0
@contact: <EMAIL>
@deffield updated: Updated
'''
__all__ = []
__version__ = 0.1
__date__ = '2020-02-21'
__updated__ = '2020-03-07'
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
import configparser
import sys
import logging
import time
from helpers import tools
# create logger
logger = logging.getLogger('webcam')
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
# logger.addHandler(fh)
logger.addHandler(ch)
DEBUG = 0
TESTRUN = 0
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
# program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version,
program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by wf on %s.
Copyright 2020 wf. All rights reserved.
Free Software
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
# helpers.tools.test1()
logger.info('starting')
try:
# Setup argument parser
parser = ArgumentParser(description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", dest="verbose",
action="count",
help="set verbosity level "
"[default: %(default)s]")
parser.add_argument('-V', '--version', action='version',
version=program_version_message)
parser.add_argument("-c", "--config", dest="configfile",
help="config file. [default: "
"%(default)s]", metavar="FILE")
# Process arguments
args = parser.parse_args()
verbose = args.verbose
configfile = args.configfile
if verbose is not None and verbose > 0:
print("Verbose mode on")
config = configparser.ConfigParser()
config.read(configfile)
local_config = config['DEFAULT']
image_file = local_config.get('image_loop', 'loop.jpeg')
image_file_annotated = local_config.get('image_loop_annotated',
'loop_annotated.jpeg')
while True:
logger.info("get image from cam")
if not tools.get_image_from_webcam(config['WEBCAM'],
image_file):
logger.critical(
"giving up, did not get an image from cam")
time.sleep(4)
continue
logger.info("crop and annotate image %s %s",
image_file, image_file_annotated)
tools.annotate_image(image_file, image_file_annotated,
do_crop=True)
logger.info("send image to Webpage")
tools.send_image_to_webpage(
config['FTPLOOP'],
image_file_annotated)
time.sleep(4)
for cnt in range(74):
logger.info("in loop %s", cnt)
if tools.is_there_anybody_out_there(
config['SEMAPHORE']):
break
time.sleep(4)
except KeyboardInterrupt:
# handle keyboard interrupt #
return 0
# except Exception as ex_all:
# if DEBUG or TESTRUN:
# raise ex_all
# indent = len(program_name) * " "
# sys.stderr.write(program_name + ": " + repr(ex_all) + "\n")
# sys.stderr.write(indent + " for help use --help")
# return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-h")
sys.argv.append("-v")
if TESTRUN:
import doctest
doctest.testmod()
sys.exit(main())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os.path
import logging
import datetime
import markdown
from dateutil.parser import *
from dateutil.relativedelta import *
import trialcondition
import trialmedication
import trialallergy
import triallab
import trialmutation
import clinicaltrials.jsondocument.jsondocument as jsondocument
import smartclient.fhirclient.models.condition as condition
import smartclient.fhirclient.models.medicationprescription as medicationprescription
import smartclient.fhirclient.models.allergyintolerance as allergyintolerance
import smartclient.fhirclient.models.observation as observation
class TrialPatient(jsondocument.JSONDocument):
""" A representation for a patient.
Properties:
- full_name: string
- gender: string, "female" or "male"
- birthday: ISO-8601 date string
- deathdate: ISO-8601 date string
- age_years: int
- age_string: string
- city: string
- region: string
- country: string
- location = city, region: string
- conditions: [TrialCondition]
- medications: [TrialMedication]
- allergies: [TrialAllergy]
- labs: [TrialLab]
- trial_info: [TrialPatientInfo] (loaded from db on init)
- cached: when the patient data was last cached
"""
def __init__(self, ident, json=None):
super().__init__(ident, "patient", json)
if self.gender is None:
self.gender = "female"
if self.country is None:
self.country = "United States"
if self.location is None:
self.update_location()
self.trial_info = TrialPatientInfo.find({'type': 'trial-patient-info', 'patient_id': ident})
def update_with(self, json):
super().update_with(json)
#print('===> ', json)
if self.conditions is not None:
cond = []
for c in self.conditions:
if isinstance(c, trialcondition.TrialCondition):
cond.append(c)
else:
cond.append(trialcondition.TrialCondition(c))
self.conditions = cond
else:
self.conditions = []
if self.medications is not None:
meds = []
for m in self.medications:
if isinstance(m, trialmedication.TrialMedication):
meds.append(m)
else:
meds.append(trialmedication.TrialMedication(m))
self.medications = meds
else:
self.medications = []
if self.allergies is not None:
allergs = []
for a in self.allergies:
if isinstance(a, trialallergy.TrialAllergy):
allergs.append(a)
else:
allergs.append(trialallergy.TrialAllergy(a))
self.allergies = allergs
else:
self.allergies = []
if self.labs is not None:
lbs = []
for l in self.labs:
if isinstance(l, triallab.TrialLab):
lbs.append(l)
else:
lbs.append(triallab.TrialLab(l))
self.labs = lbs
else:
self.labs = []
def __setattr__(self, name, value):
""" Overridden to perform some value generation after setting certain
properties.
"""
super().__setattr__(name, value)
if 'birthday' == name:
self.update_age_years()
if 'country' == name or 'city' == name or 'region' == name:
self.update_location()
def as_json(self):
js_dict = super().as_json()
if 'trial_info' in js_dict:
del js_dict['trial_info']
if 'fhir' in js_dict:
del js_dict['fhir']
return js_dict
def for_api(self, stripped=False):
js_dict = super().for_api()
if stripped:
#if 'conditions' in js_dict:
# del js_dict['conditions']
if 'medications' in js_dict:
del js_dict['medications']
if 'allergies' in js_dict:
del js_dict['allergies']
if 'labs' in js_dict:
del js_dict['labs']
if 'cached' in js_dict:
del js_dict['cached']
if 'fhir' in js_dict:
del js_dict['fhir']
return js_dict
def process_observations(self, observations):
""" Given a list of FHIR observations, determines which are mutations
and which are lab values, filling the receiver's ivars accordingly.
"""
if observations is None:
return
if self.labs is None:
self.labs = []
for obs in observations:
if triallab.TrialLab.is_lab(obs):
self.labs.append(triallab.TrialLab.from_fhir(obs))
elif trialmutation.TrialMutation.is_mutation(obs):
mut = trialmutation.TrialMutation.from_fhir(obs)
# this is a mutation, find corresponding observation
if self.conditions is not None:
found = False
for cond in self.conditions:
if mut.reference and cond.id == os.path.basename(mut.reference):
if cond.mutations is None:
cond.mutations = []
cond.mutations.append(mut)
found = True
break
if not found:
logging.warning("Found a mutation but not the matching condition in patient {}"
.format(self.id))
else:
logging.warning("Found a mutation but patient {} has no conditions"
.format(self.id))
self.labs = self.labs if len(self.labs) > 0 else None
@classmethod
def load_from_fhir(cls, client):
""" Instantiates a TrialPatient with data from a FHIR Patient resource,
retrieved from a SMART client (fhirclient) instance.
:param client: A handle to a `fhirclient` instance
:returns: A TrialPatient instance, or None on error
"""
fpat = client.patient if client is not None else None
if fpat is None:
return None
patient = cls(fpat.id)
patient.fhir = fpat
patient.full_name = client.human_name(fpat.name[0] if fpat.name and len(fpat.name) > 0 else None)
patient.gender = fpat.gender
patient.birthday = fpat.birthDate.isostring
if fpat.address is not None and len(fpat.address) > 0:
address = fpat.address[0]
for addr in fpat.address:
if 'home' == addr.use:
address = addr
break
patient.city = address.city
patient.region = address.state
patient.country = address.country
# retrieve problem list
cond_search = condition.Condition.where(struct={'subject': fpat.id})
patient.conditions = [trialcondition.TrialCondition.from_fhir(c) for c in cond_search.perform_resources(fpat._server)]
# retrieve observations: labs and mutations
obs_search = observation.Observation.where(struct={'subject': fpat.id})
observations = obs_search.perform_resources(fpat._server)
patient.process_observations(observations)
# retrieve meds
med_search = medicationprescription.MedicationPrescription.where(struct={'subject': fpat.id})
patient.medications = [trialmedication.TrialMedication.from_fhir(m) for m in med_search.perform_resources(fpat._server)]
# retrieve allergies
allerg_search = allergyintolerance.AllergyIntolerance.where(struct={'subject': fpat.id})
patient.allergies = [trialallergy.TrialAllergy.from_fhir(a) for a in allerg_search.perform_resources(fpat._server)]
return patient
# MARK: Trial Info
def info_for_trial(self, trial_id):
if self.trial_info is not None:
for trialinfo in self.trial_info:
if trialinfo.trial_id == trial_id:
return trialinfo
return None
# MARK: Birthday & Age
def age_delta(self):
if self.birthday:
try:
birth = parse(self.birthday)
except Exception as e:
logging.error("Failed to parse birthday \"{}\": {}".format(self.birthday, e))
return None
now = datetime.datetime.now()
if self.deathdate:
try:
now = parse(self.deathdate)
except Exception as e:
logging.error("Failed to parse deathdate \"{}\": {}".format(self.deathdate, e))
return relativedelta(now, birth)
return None
@property
def age_years(self):
if self.__dict__.get('age_years') is None:
self.update_age_years()
return self.__dict__.get('age_years')
@age_years.setter
def age_years(self, years):
self.__dict__['age_years'] = years
def update_age_years(self):
delta = self.age_delta()
self.age_years = delta.years if delta is not None else None
@property
def age_string(self):
delta = self.age_delta()
if delta is not None:
if 1 == delta.years:
years = "{} year".format(delta.years)
else:
years = "{} years".format(delta.years)
if delta.years < 3:
if 1 == delta.months:
return "{} {} month".format(years, delta.months)
return "{} {} months".format(years, delta.months)
return years
return ''
# MARK: Portrait
def load_photo(self):
""" Retrieves a FHIR Patient's first photo and returns a tuple with
content-type and data.
"""
fpat = self.fhir if self.fhir is not None else None
if fpat is None:
logging.warning("Patient instance lost its handle to the FHIR Patient instance, cannot retrieve photo")
return None, None
if fpat.photo is not None:
photo_data = None
for photo in fpat.photo:
if photo.url is not None:
photo_data = fpat._server.request_data(photo.url)
break
elif photo.data is not None:
logging.info("Base-64 encoded photo data is not yet supported")
if photo_data is not None:
return photo.contentType, photo_data
return None, None
# MARK: Location
def update_location(self):
parts = []
if self.city:
parts.append(self.city)
if self.region:
parts.append(self.region)
setattr(self, 'location', ', '.join(parts) if len(parts) > 0 else None)
class TrialPatientInfo(jsondocument.JSONDocument):
""" Information linking a patient and a trial, stored by app users.
"""
def __init__(self, trial_id=None, patient_id=None, json=None):
if json is not None:
if trial_id is None:
trial_id = json.get('trial_id')
if patient_id is None:
patient_id = json.get('patient_id')
if not trial_id or not patient_id:
raise Exception("Need both a trial- and patient-id, have trial: {}, patient: {}"
.format(trial_id, patient_id))
ident = '{}-{}'.format(trial_id, patient_id)
super().__init__(ident, 'trial-patient-info', json)
self.trial_id = trial_id
self.patient_id = patient_id
def for_api(self):
js = {
'trial_id': self.trial_id,
'patient_id': self.patient_id,
}
if self.suggested:
js['suggested'] = True
if self.notes:
js['notes'] = {
'raw': self.notes,
'html': markdown.markdown(self.notes),
}
return js
def update_from_api(self, json):
d = {}
if 'suggested' in json:
d['suggested'] = True if 'true' == json['suggested'] or 1 == int(json['suggested']) else False
if 'notes' in json:
d['notes'] = json['notes']
self.update_with(d)
self.store()
|
<reponame>zouguojian/pollutant-prediction
# -- coding: utf-8 --
import tensorflow as tf
import numpy as np
import argparse
from spatial_temporal_model.hyparameter import parameter
import pandas as pd
class DataIterator():
def __init__(self,
site_id=0,
site_num=41,
pollutant_id=2,
is_training=True,
time_size=48,
prediction_size=24,
data_divide=0.9,
window_step=1,
normalize=False):
'''
:param is_training: while is_training is True,the model is training state
:param field_len:
:param time_size:
:param prediction_size:
:param target_site:
'''
self.min_value=0.000000000001
self.site_id=site_id # ozone ID
self.site_num=site_num
self.pollutant_id=pollutant_id
self.time_size=time_size # time series length of input
self.prediction_size=prediction_size # the length of prediction
self.is_training=is_training # true or false
self.data_divide=data_divide # the divide between in training set and test set ratio
self.window_step=window_step # windows step
# 读取电厂的数据和某个站点的污染物浓度
self.data_s=self.get_source_data('/Users/guojianzou/pollutant-prediction/data/new_data/train_s.csv').values
self.data_p=self.get_source_data('/Users/guojianzou/pollutant-prediction/data/new_data/train_p.csv').values
# print('the resource dada shape are : ',self.data_s.shape, self.data_p.shape)
# 寻找数据集中的最大值和最小值
self.max_s,self.min_s=self.get_max_min(self.data_s[:,2:])
self.max_p, self.min_p = self.get_max_min(self.data_p[:,3:])
self.normalize=normalize
if self.normalize:
self.normalization(self.data_s, self.min_s, self.max_s, 2) # normalization
self.normalization(self.data_p, self.min_p, self.max_p, 3) # normalization
# 数据集分割出来的部分,作为训练集
self.train_s = self.data_s[0: int(self.data_s.shape[0]//self.site_num * self.data_divide) * self.site_num]
self.train_p = self.data_p[0: int(self.data_p.shape[0] * self.data_divide)]
# 数据集分割出来的部分,作为测试集
self.test_s = self.data_s[int(self.data_s.shape[0] // self.site_num * self.data_divide) * self.site_num:]
self.test_p = self.data_p[int(self.data_p.shape[0] * self.data_divide):]
def get_source_data(self,file_path):
'''
:return:
'''
data=None
try:
data = pd.read_csv(file_path, encoding='utf-8')
except IOError:
print("Error: do not to find or failed to read the file")
else:
print("successful to read the data")
return data
def get_max_min(self, data):
'''
:return: the max and min value of input features
'''
min_list=[]
max_list=[]
for i in range(data.shape[1]):
min_list.append(min(data[:,i]))
max_list.append(max(data[:,i]))
print('the max feature list is :', max_list)
print('the min feature list is :', min_list)
return max_list, min_list
def normalization(self,data, min, max, index_start):
for i in range(data.shape[1]-index_start):
data[:,i+index_start]=(data[:,i+index_start] - np.array(min[i])) / (np.array(max[i]) - np.array(min[i]+self.min_value))
def generator(self):
'''
:return: yield the data of every time,
shape:input_series:[time_size,field_size]
label:[predict_size]
'''
if self.is_training:
data_s=self.train_s
data_p=self.train_p
else:
data_s=self.test_s
data_p = self.test_p
low1, low2=0,0
high1,high2=data_s.shape[0],data_p.shape[0]
while (low2+self.time_size+self.prediction_size) <= high2:
label=data_p[low2 + self.time_size : low2 + self.time_size + self.prediction_size, 3 + self.pollutant_id]
# label=np.concatenate([label[i : (i + 1), :] for i in range(self.prediction_size)], axis=1)
yield (data_s[low1:low1+self.time_size * self.site_num,2:],
data_p[low2:low2+self.time_size,3:],
data_p[low2:low2 + self.time_size + self.prediction_size,0],
data_p[low2:low2 + self.time_size + self.prediction_size, 1],
data_p[low2:low2 + self.time_size + self.prediction_size, 2],
label)
if self.is_training:
low1 += self.window_step * self.site_num
low2 += self.window_step
else:
low1 += self.prediction_size * self.site_num
low2 += self.prediction_size
def next_batch(self, batch_size=32, epochs=1, is_training=True):
'''
:return the iterator!!!
:param batch_size:
:param epochs:
:return:
'''
self.is_training=is_training
dataset=tf.data.Dataset.from_generator(self.generator,output_types=(tf.float32,tf.float32,tf.float32,tf.float32,tf.float32,tf.float32))
if self.is_training:
dataset=dataset.shuffle(buffer_size=int(self.train_p.shape[0]-self.time_size-self.prediction_size)//self.window_step)
dataset=dataset.repeat(count=epochs)
dataset=dataset.batch(batch_size=batch_size)
iterator=dataset.make_one_shot_iterator()
return iterator.get_next()
#
if __name__=='__main__':
para = parameter(argparse.ArgumentParser())
para = para.get_para()
iter=DataIterator(site_id=0,site_num=41, data_divide=0.8, pollutant_id=2, normalize=True, time_size=48,prediction_size=24,window_step=para.step)
# print(iter.data.loc[iter.data['ZoneID']==0])
next=iter.next_batch(32,1,is_training=False)
with tf.Session() as sess:
for i in range(2):
x,x1,y=sess.run(next)
print(x.shape)
print(x1.shape)
print(y.shape) |
from implementation.hospitalViejo import Hospital
import numpy as np
from implementation.optimizer.AllocationOptimizerHeuristica import AllocationOptimizer
#from implementation.optimizer.AllocationOptimizerGoalProgramming3 import AllocationOptimizer
#from implementation.optimizer.AllocationOptimizerNonGoal import AllocationOptimizer
from collections import deque
from agent_model.model import Model
# from optimizer.AllocationOptimizerCplexDocPlex import AllocationOptimizer
from sklearn.metrics import mean_squared_error
import timeit
import math
import pandas as pd
from implementation.inventory import FIFOInventory
from statistics import mean
import json
from scipy import stats
class VMI(Model):
# LOS ESTADOS DE ESTA CLASE SON LOS NIVELES DE INVENTARIOS QUE TIENE PARA CADA UNA DE LAS CADUCIDADES
# LAS ACCIONES POSIBLES VAN DESDE 0 HASTA MAX_A
def __init__(self, hospitals, max_A, shelf_life, train_runs, initial_state=None, exp_cost=None, stockout_cost=None):
super(VMI, self).__init__(initial_state, max_A * 11, len(initial_state))
self.year_day = 0
self.year = 0
self.day = 1
self.train_runs = train_runs
self.shelf_life = shelf_life
self.exp_cost = exp_cost
self.stockout_cost = stockout_cost
self.hospitals = [Hospital([0] * shelf_life, 1.5 * exp_cost, stockout_cost) for _ in range(hospitals)]
# [Hospital([0] * shelf_life, None, exp_cost*1.5, stockout_cost*1.5)] * hospitals
self.demands_and_donors = pd.read_csv(r'implementation/run_parameters.csv')
# print(self.demands_and_donors)
self.demand_registry = [deque(maxlen=3) for _ in range(hospitals)]
self.log = {"train": {}, "validate": {}}
self.solve_memory = {}
def model_logic(self, state, action):
# demands = [5, 10, 15, 20]
# print(state[:self.shelf_life])
# demand_data =self.get_demand(state[5])# self.demands_and_donors.iloc[self.year_day]
# donors = demand_data["donors"]
donors = 100
# self.get_donors(state[5])
demands = self.get_demand(state[5])
# self.get_demand(state[5])
# donors = self.get_donors()
# A = min(action,sum(state[:self.shelf_life]))
A = action // 11
prep_donors = int((((action % 11) * 10) / 100.0) * donors)
# print(action, A, prep_donors ,sum(demands))
A_i = [0] * self.shelf_life
for i, val in enumerate(A_i):
if i == 0:
A_i[i] = min(A, state[i])
else:
A_i[i] = min(A - sum(A_i[0:i]), state[i])
II = []
for i in self.hospitals:
II.append(i.inventory)
# print(II , state[self.shelf_life:])
# demand_forecast = [round(mean(x)) for x in self.demand_registry] if len(
# self.demand_registry[0]) >= 3 else self.get_average_demand(state[5])
demand_forecast = self.get_average_demand(state[5])
# self.forecast_acc_mse+=mean_squared_error(demands,demand_forecast)
# print(self.forecast_acc_mse)
# json_model = json.dumps({"II": II, "A": A_i, "demands": demand_forecast})
# if json_model in self.solve_memory:
# rep, used_model = self.solve_memory[json_model], False
# else:
#
# opt = AllocationOptimizer(II, A_i, demand_forecast, self.exp_cost, self.stockout_cost, self.shelf_life,
# len(self.hospitals))
# rep, used_model = opt.allocate()
# self.solve_memory[json_model]=rep
opt = AllocationOptimizer(II, A_i, demand_forecast, self.exp_cost, self.stockout_cost, self.shelf_life,
len(self.hospitals))
rep, used_model = opt.allocate()
for idx, i in enumerate(self.demand_registry):
i.append(demands[idx])
# opt = AllocationOptimizer(II, A_i, demands, self.exp_cost, self.stockout_cost, self.shelf_life, len(self.hospitals))
# print("Day ",self.year_day, rep)
# print(rep)
reward = 0
rewards = []
stockouts = []
expireds = []
for hosp in range(len(self.hospitals)):
r, st, exp = self.hospitals[hosp].supply(rep[hosp], demands[hosp])
rewards.append(-r)
stockouts.append(st)
expireds.append(exp)
reward += r
next_state, dc_exp = self.update_inventory_bloodbank(state, prep_donors, A,sum([sum(i.inventory) for i in self.hospitals]))
# print(donors)
# print(next_state)
reward += dc_exp * self.exp_cost
# reward=0
# print(reward)
year = self.year
if year < self.train_runs:
data = {"rewards": rewards, "stockouts": stockouts, "expirees": expireds, "allocation": rep,
"shipment_size": A, "production_level": (((action % 11) * 10) / 100.0),
"inventory": state[:self.shelf_life], "donors": donors, "reward": reward, "demands": demands,
'DC_expirees': dc_exp, 'II': II, 'Used_LP_Model': used_model}
if year in self.log["train"]:
self.log["train"][year].append(data)
else:
self.log["train"][year] = []
self.log["train"][year].append(data)
else:
data = {"rewards": rewards, "stockouts": stockouts, "expirees": expireds, "allocation": rep,
"shipment_size": A, "production_level": (((action % 11) * 10) / 100.0),
"inventory": state[:self.shelf_life], "donors": donors, "reward": reward, "demands": demands,
'DC_expirees': dc_exp, 'II': II, 'Used_LP_Model': used_model}
if year in self.log["validate"]:
self.log["validate"][year].append(data)
else:
self.log["validate"][year] = []
self.log["validate"][year].append(data)
self.year_day += 1
# print(reward)
reward *= -1
return state, action, next_state, reward, False
def valid_actions(self, state):
t_inv = sum(state[:self.shelf_life])
# a_max = min(t_inv, self.action_dim)
# v_act = [*range(a_max)]
v_act2 = {x for x in range(1100) if (x // 11) <= t_inv}
# print(t_inv,v_act2)
return v_act2
def reset_model(self):
# print("Solutions buffer:",len(self.solve_memory))
self.forecast_acc_mse = 0
self.year_day = 0
self.year += 1
self.hospitals = [Hospital([0] * self.shelf_life, self.exp_cost * 1.5, self.stockout_cost) for _ in
range(len(self.hospitals))]
for i in self.demand_registry:
i.clear()
def update_inventory_bloodbank(self, state, donors, delivered, hospital_new_inv):
# inv = FIFOInventory(state[:self.shelf_life])
# stk = inv.pop(delivered)
# dc_exp = inv.age()
# supply = [0] * self.shelf_life
# supply[-1] = donors
# inv.push(supply)
#
# if stk > 0:
# raise Exception("Malfunction : DC should never incur in stockouts. ")
state_aux = [0] * (self.shelf_life)
dc_exp = state[0]
for i in range(self.shelf_life):
if (i == 0):
state_aux[i] = max(0, state[i + 1] - delivered)
elif 0 < i < 4:
state_aux[i] = max(0, state[i + 1] - max(0, delivered - sum(state[:i])))
elif (i == 4):
state_aux[i] = max(0, donors - max(0, delivered - sum(state[:i])))
state_aux += [(state[5] % 7) + 1]
state_aux += [hospital_new_inv]
# state_aux=inv.inventory
# state_aux+=[(state[5] % 7) + 1]
# state_aux+=[hospital_new_inv]
return state_aux, dc_exp
def arima_forecast(self):
import pmdarima as pm
forecast = [round(pm.auto_arima(self.demand_registry[i],
start_p=1,
start_q=1,
test="adf",
seasonal=True,
trace=False).predict(n_periods=1, return_conf_int=False)[0]) for i in
range(len(self.hospitals))]
return forecast
def get_donors(self, day):
if day == 1:
don = np.random.triangular(50, 90, 120)
elif day == 2:
don = np.random.triangular(50, 90, 120)
elif day == 3:
don = np.random.triangular(50, 90, 120)
elif day == 4:
don = np.random.triangular(50, 90, 120)
elif day == 5:
don = np.random.triangular(50, 90, 120)
elif day == 6:
don = np.random.triangular(50, 90, 120)
else:
don = np.random.triangular(50, 90, 120)
don = math.floor(don)
# don=100
return don
def get_average_demand(self, day):
if day == 1:
d1 = 2.6 * 6.1 * 0.5
d2 = 2.6 * 6.1 * 0.3
d3 = 2.6 * 6.1 * 0.2
d4 = 2.6 * 6.1 * 0.1
elif day == 2:
d1 = 4.9 * 9.2 * 0.5
d2 = 4.9 * 9.2 * 0.3
d3 = 4.9 * 9.2 * 0.2
d4 = 4.9 * 9.2 * 0.1
elif day == 3:
d1 = 6.9 * 8.2 * 0.5
d2 = 6.9 * 8.2 * 0.3
d3 = 6.9 * 8.2 * 0.2
d4 = 6.9 * 8.2 * 0.1
elif day == 4:
d1 = 4.7 * 9.3 * 0.5
d2 = 4.7 * 9.3 * 0.3
d3 = 4.7 * 9.3 * 0.2
d4 = 4.7 * 9.3 * 0.1
elif day == 5:
d1 = 5.7 * 8.0 * 0.5
d2 = 5.7 * 8.0 * 0.3
d3 = 5.7 * 8.0 * 0.2
d4 = 5.7 * 8.0 * 0.1
elif day == 6:
d1 = 4.8 * 8.7 * 0.5
d2 = 4.8 * 8.7 * 0.3
d3 = 4.8 * 8.7 * 0.2
d4 = 4.8 * 8.7 * 0.1
else:
d1 = 1.7 * 3.2 * 0.5
d2 = 1.7 * 3.2 * 0.3
d3 = 1.7 * 3.2 * 0.2
d4 = 1.7 * 3.2 * 0.1
d1 = self.checkDemand(d1)
d2 = self.checkDemand(d2)
d3 = self.checkDemand(d3)
d4 = self.checkDemand(d4)
return [d1, d2, d3, d4]
def get_demand(self, day):
# VENTA DIRECTA UNION TEMPORAL
if day == 1:
d1 = np.random.gamma(2.6, 6.1) * 0.5
d2 = np.random.gamma(2.6, 6.1) * 0.3
d3 = np.random.gamma(2.6, 6.1) * 0.2
d4 = np.random.gamma(2.6, 6.1) * 0.1
elif day == 2:
d1 = np.random.gamma(4.9, 9.2) * 0.5
d2 = np.random.gamma(4.9, 9.2) * 0.3
d3 = np.random.gamma(4.9, 9.2) * 0.2
d4 = np.random.gamma(4.9, 9.2) * 0.1
elif day == 3:
d1 = np.random.gamma(6.9, 8.2) * 0.5
d2 = np.random.gamma(6.9, 8.2) * 0.3
d3 = np.random.gamma(6.9, 8.2) * 0.2
d4 = np.random.gamma(6.9, 8.2) * 0.1
elif day == 4:
d1 = np.random.gamma(4.7, 9.3) * 0.5
d2 = np.random.gamma(4.7, 9.3) * 0.3
d3 = np.random.gamma(4.7, 9.3) * 0.2
d4 = np.random.gamma(4.7, 9.3) * 0.1
elif day == 5:
d1 = np.random.gamma(5.7, 8.0) * 0.5
d2 = np.random.gamma(5.7, 8.0) * 0.3
d3 = np.random.gamma(5.7, 8.0) * 0.2
d4 = np.random.gamma(5.7, 8.0) * 0.1
elif day == 6:
d1 = np.random.gamma(4.8, 8.7) * 0.5
d2 = np.random.gamma(4.8, 8.7) * 0.3
d3 = np.random.gamma(4.8, 8.7) * 0.2
d4 = np.random.gamma(4.8, 8.7) * 0.1
else:
d1 = np.random.gamma(1.7, 3.2) * 0.5
d2 = np.random.gamma(1.7, 3.2) * 0.3
d3 = np.random.gamma(1.7, 3.2) * 0.2
d4 = np.random.gamma(1.7, 3.2) * 0.1
d1 = self.checkDemand(d1)
d2 = self.checkDemand(d2)
d3 = self.checkDemand(d3)
d4 = self.checkDemand(d4)
demands = [d1, d2, d3, d4]
# demands=[10,15,8,11]
return demands
def checkDemand(self, a):
a = math.floor(a)
if (a == 0):
a = 1
return a
# agent=QAgent(model,0.99,0.1)
# agent.run(365, 1000)
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import astropy.units as u
import numpy as np
from astropy.io import fits
from warnings import warn
try:
from radio_beam import Beam, NoBeamException
RADIO_BEAM_INSTALL = True
except ImportError:
RADIO_BEAM_INSTALL = False
def find_beam_properties(hdr):
'''
Try to read beam properties from a header. Uses radio_beam when installed.
Parameters
----------
hdr : `~astropy.io.fits.Header`
FITS header.
Returns
-------
bmaj : `~astropy.units.Quantity`
Major axis of the beam in degrees.
bmin : `~astropy.units.Quantity`
Minor axis of the beam in degrees. If this cannot be read from the
header, assumes `bmaj=bmin`.
bpa : `~astropy.units.Quantity`
Position angle of the major axis. If this cannot read from the
header, assumes an angle of 0 deg.
'''
if RADIO_BEAM_INSTALL:
try:
beam = Beam.from_fits_header(hdr)
bmaj = beam.major.to(u.deg)
bmin = beam.minor.to(u.deg)
bpa = beam.pa.to(u.deg)
except NoBeamException:
bmaj = None
bmin = None
bpa = None
else:
if not isinstance(hdr, fits.Header):
raise TypeError("Header is not a FITS header.")
if "BMAJ" in hdr:
bmaj = hdr["BMAJ"] * u.deg
else:
warn("Cannot find 'BMAJ' in the header. Try installing"
" the `radio_beam` package for loading header"
" information.")
bmaj = None
if "BMIN" in hdr:
bmin = hdr["BMIN"] * u.deg
else:
warn("Cannot find 'BMIN' in the header. Assuming circular beam.")
bmin = bmaj
if "BPA" in hdr:
bpa = hdr["BPA"] * u.deg
else:
warn("Cannot find 'BPA' in the header. Assuming PA of 0.")
bpa = 0 * u.deg
return bmaj, bmin, bpa
class BaseInfoMixin(object):
"""
Common celestial information
"""
@property
def image(self):
'''
Image.
'''
return self._image
@property
def header(self):
'''
FITS Header.
'''
return self._header
@property
def wcs(self):
'''
WCS Object.
'''
return self._wcs
@property
def beamwidth(self):
'''
Beam major axis.
'''
return self._beamwidth
@property
def _has_beam(self):
if hasattr(self, '_beamwidth'):
return True
return False
class UnitConverter(object):
"""
Handle pixel, angular, and physical spatial unit conversions. Requires
pixels to be square. Conversions are not aware of any axis misalignment.
"""
def __init__(self, wcs=None, distance=None):
if wcs is not None:
if not wcs.is_celestial:
self._wcs = wcs.celestial
else:
self._wcs = wcs
self._ang_size = np.abs(self._wcs.wcs.cdelt[0]) * \
u.Unit(self._wcs.wcs.cunit[0])
self._ang_size = self._ang_size.to(u.deg)
if distance is not None:
self.distance = distance
@property
def ang_size(self):
'''
Angular size of one pixel.
'''
return self._ang_size
@property
def angular_equiv(self):
return [(u.pix, u.deg, lambda x: x * float(self.ang_size.value),
lambda x: x / float(self.ang_size.value))]
@property
def distance(self):
if not hasattr(self, "_distance"):
raise AttributeError("No distance has not been given.")
return self._distance
@distance.setter
def distance(self, value):
'''
Value must be a quantity with a valid distance unit. Will keep the
units given.
'''
if not isinstance(value, u.Quantity):
raise TypeError("Value for distance must an astropy Quantity.")
if not value.unit.is_equivalent(u.pc):
raise u.UnitConversionError("Given unit ({}) is not a valid unit"
" of distance.".format(value.unit))
if not value.isscalar:
raise TypeError("Distance must be a scalar quantity.")
self._distance = value
@property
def physical_size(self):
'''
Physical size of one pixel.
'''
if not hasattr(self, "_distance"):
raise AttributeError("No distance has not been given.")
return (self.ang_size *
self.distance).to(self.distance.unit,
equivalencies=u.dimensionless_angles())
@property
def physical_equiv(self):
if not hasattr(self, "_distance"):
raise AttributeError("No distance has not been given.")
return [(u.pix, self.distance.unit,
lambda x: x * float(self.physical_size.value),
lambda x: x / float(self.physical_size.value))]
def to_pixel(self, value):
'''
Convert from angular or physical scales to pixels.
'''
if not isinstance(value, u.Quantity):
raise TypeError("value must be an astropy Quantity object.")
# Angular converions
if value.unit.is_equivalent(u.pix):
return value
elif value.unit.is_equivalent(u.deg):
return value.to(u.pix, equivalencies=self.angular_equiv)
elif value.unit.is_equivalent(u.pc):
return value.to(u.pix, equivalencies=self.physical_equiv)
else:
raise u.UnitConversionError("value has units of {}. It must have "
"an angular or physical unit."
.format(value.unit))
def to_pixel_area(self, value):
'''
Should have an area-equivalent unit.
'''
return self.to_pixel(np.sqrt(value))**2
def to_angular(self, value, unit=u.deg):
return value.to(unit, equivalencies=self.angular_equiv)
def to_physical(self, value, unit=u.pc):
if not hasattr(self, "_distance"):
raise AttributeError("No distance has not been given.")
return value.to(unit, equivalencies=self.physical_equiv)
def from_pixel(self, pixel_value, unit):
'''
Convert a value in pixel units to the given unit.
'''
if isinstance(unit, u.Quantity):
unit = unit.unit
if unit.is_equivalent(u.pix):
return pixel_value
elif unit.is_equivalent(u.deg):
return self.to_angular(pixel_value, unit)
elif unit.is_equivalent(u.pc):
return self.to_physical(pixel_value, unit)
else:
raise u.UnitConversionError("unit must be an angular or physical"
" unit.")
def data_unit_check(value, unit):
'''
Check that a value has a unit equivalent to the given unit. If no unit is
attached, add the given unit to the value.
'''
if hasattr(value, 'unit'):
if not value.unit.is_equivalent(unit):
raise u.UnitConversionError("The given value does not have "
"equivalent units.")
return value.to(unit)
else:
return value * unit
|
<reponame>infosecsecurity/OSPTF<filename>RVS/TerminalEmulator.py
# Copyright (c) 2012-2015 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import array
import unicodedata
class TerminalEmulator:
RENDITION_BOLD = 0x001
RENDITION_DIM = 0x002
RENDITION_UNDERLINE = 0x008
RENDITION_INVERSE = 0x040
RENDITION_HIDDEN = 0x080
RENDITION_FOREGROUND_256 = 0x100
RENDITION_BACKGROUND_256 = 0x200
RENDITION_WRITTEN_CHAR = 0x800 # Only set when character was written normally, used to detect line wrap on copy/paste
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
# Initialize screen arrays
self.screen = []
self.rendition = []
self.other_screen = []
self.other_rendition = []
self.alt_screen = False
self.dirty = set()
self.default_screen_line = array.array('u')
self.default_rendition_line = array.array('I')
for i in xrange(0, cols):
self.default_screen_line.append(u' ')
self.default_rendition_line.append(0)
for i in xrange(0, rows):
self.screen.append(array.array('u', self.default_screen_line))
self.rendition.append(array.array('I', self.default_rendition_line))
self.other_screen.append(array.array('u', self.default_screen_line))
self.other_rendition.append(array.array('I', self.default_rendition_line))
self.history_screen = []
self.history_rendition = []
self.active_rendition = 0
self.cursor_row = 0
self.cursor_col = 0
self.cursor_visible = True
self.tab_width = 8
self.scroll_top = 0
self.scroll_bottom = self.rows - 1
self.saved_cursor_row = 0
self.saved_cursor_col = 0
self.saved_normal_cursor_row = 0
self.saved_normal_cursor_col = 0
self.saved_alt_cursor_row = 0
self.saved_alt_cursor_col = 0
self.escape_mode = False
self.window_title_mode = False
self.ignored_window_title = False
self.line_draw = False
self.utf8_buffer = ""
self.utf8_len = 0
self.unprocessed_input = u""
self.application_cursor_keys = False
self.insert_mode = False
self.update_callback = None
self.title_callback = None
self.response_callback = None
self.special_chars = {
u'\x07': self.bell,
u'\x08': self.backspace,
u'\x09': self.horizontal_tab,
u'\x0a': self.line_feed,
u'\x0b': self.line_feed,
u'\x0c': self.line_feed,
u'\x0d': self.carriage_return
}
self.escape_sequences = {
u'@': self.insert_chars,
u'A': self.cursor_up,
u'B': self.cursor_down,
u'C': self.cursor_right,
u'D': self.cursor_left,
u'E': self.cursor_next_line,
u'F': self.cursor_prev_line,
u'G': self.set_cursor_col,
u'`': self.set_cursor_col,
u'd': self.set_cursor_row,
u'H': self.move_cursor,
u'f': self.move_cursor,
u'I': self.cursor_right_tab,
u'J': self.erase_screen,
u'?J': self.erase_screen,
u'K': self.erase_line,
u'?K': self.erase_line,
u'r': self.scroll_region,
u'L': self.insert_lines,
u'P': self.delete_chars,
u'M': self.delete_lines,
u'S': self.scroll_up_lines,
u'T': self.scroll_down_lines,
u'X': self.erase_chars,
u'Z': self.cursor_left_tab,
u'm': self.graphic_rendition,
u'h': self.set_option,
u'l': self.clear_option,
u'?h': self.set_private_option,
u'?l': self.clear_private_option,
u'c': self.device_attr,
u'>c': self.device_secondary_attr,
u'n': self.device_status,
u'?n': self.device_status,
u'!p': self.soft_reset
}
self.charset_escapes = [u' ', u'#', u'%', u'(', u')', u'*', u'+']
self.line_draw_map = {
u'j': unicode('\xe2\x94\x98', 'utf8'),
u'k': unicode('\xe2\x94\x90', 'utf8'),
u'l': unicode('\xe2\x94\x8c', 'utf8'),
u'm': unicode('\xe2\x94\x94', 'utf8'),
u'n': unicode('\xe2\x94\xbc', 'utf8'),
u'q': unicode('\xe2\x94\x80', 'utf8'),
u't': unicode('\xe2\x94\x9c', 'utf8'),
u'u': unicode('\xe2\x94\xa4', 'utf8'),
u'v': unicode('\xe2\x94\xb4', 'utf8'),
u'w': unicode('\xe2\x94\xac', 'utf8'),
u'x': unicode('\xe2\x94\x82', 'utf8')
}
def invalidate(self):
for i in xrange(0, self.rows):
self.dirty.add(i)
def resize(self, rows, cols):
if rows > self.rows:
# Adding rows
for i in xrange(self.rows, rows):
self.screen.append(array.array('u', self.default_screen_line))
self.rendition.append(array.array('I', self.default_rendition_line))
self.other_screen.append(array.array('u', self.default_screen_line))
self.other_rendition.append(array.array('I', self.default_rendition_line))
elif rows < self.rows:
if self.alt_screen:
# Alternate screen buffer is active
normal_cursor_row = self.saved_normal_cursor_row
if normal_cursor_row < rows:
# Cursor is at top, remove lines from bottom
self.other_screen = self.other_screen[:rows]
self.other_rendition = self.other_rendition[:rows]
else:
# Cursor is at bottom, remove lines from top, and place them in the
# history buffer
for i in xrange(0, (normal_cursor_row + 1) - rows):
screen_line = self.other_screen.pop(0)
rendition_line = self.other_rendition.pop(0)
self.history_screen.append(screen_line)
self.history_rendition.append(rendition_line)
self.other_screen = self.other_screen[:rows]
self.other_rendition = self.other_rendition[:rows]
self.screen = self.screen[:rows]
self.rendition = self.rendition[:rows]
else:
# Normal screen buffer is active
normal_cursor_row = self.cursor_row
if normal_cursor_row < rows:
# Cursor is at top, remove lines from bottom
self.screen = self.screen[:rows]
self.rendition = self.rendition[:rows]
else:
# Cursor is at bottom, remove lines from top, and place them in the
# history buffer
for i in xrange(0, (normal_cursor_row + 1) - rows):
screen_line = self.screen.pop(0)
rendition_line = self.rendition.pop(0)
self.history_screen.append(screen_line)
self.history_rendition.append(rendition_line)
self.screen = self.screen[:rows]
self.rendition = self.rendition[:rows]
self.other_screen = self.other_screen[:rows]
self.other_rendition = self.other_rendition[:rows]
if cols > self.cols:
# Adding columns
for i in xrange(0, rows):
for j in xrange(self.cols, cols):
self.screen[i].append(u' ')
self.rendition[i].append(0)
self.other_screen[i].append(u' ')
self.other_rendition[i].append(0)
for j in xrange(self.cols, cols):
self.default_screen_line.append(u' ')
self.default_rendition_line.append(0)
elif cols < self.cols:
# Removing columns
for i in xrange(0, rows):
self.screen[i] = self.screen[i][0:cols]
self.rendition[i] = self.rendition[i][0:cols]
self.other_screen[i] = self.other_screen[i][0:cols]
self.other_rendition[i] = self.other_rendition[i][0:cols]
self.default_screen_line = self.default_screen_line[0:cols]
self.default_rendition_line = self.default_rendition_line[0:cols]
self.rows = rows
self.cols = cols
self.scroll_top = 0
self.scroll_bottom = self.rows - 1
# Ensure cursors are within bounds
if self.cursor_col > cols:
self.cursor_col = cols
if self.cursor_row >= rows:
self.cursor_row = rows - 1
if self.saved_cursor_col > cols:
self.saved_cursor_col = cols
if self.saved_cursor_row >= rows:
self.saved_cursor_row = rows - 1
if self.saved_normal_cursor_col > cols:
self.saved_normal_cursor_col = cols
if self.saved_normal_cursor_row >= rows:
self.saved_normal_cursor_row = rows - 1
if self.saved_alt_cursor_col > cols:
self.saved_alt_cursor_col = cols
if self.saved_alt_cursor_row >= rows:
self.saved_alt_cursor_row = rows - 1
self.invalidate()
if self.update_callback:
self.update_callback()
def response(self, data):
if self.response_callback:
self.response_callback(data)
def bell(self):
# I'm not going to annoy people here
pass
def backspace(self):
if self.cursor_col > 0:
self.cursor_col -= 1
def horizontal_tab(self):
self.cursor_col += self.tab_width - (self.cursor_col % self.tab_width)
if self.cursor_col > self.cols:
self.cursor_col = self.cols
def scroll_up(self):
top_screen = self.screen.pop(self.scroll_top)
top_rendition = self.rendition.pop(self.scroll_top)
# Only update history if windowing isn't being used and the normal screen buffer is active
if (self.scroll_top == 0) and (self.scroll_bottom == (self.rows - 1)) and (not self.alt_screen):
self.history_screen.append(top_screen)
self.history_rendition.append(top_rendition)
top_screen = array.array('u', self.default_screen_line)
top_rendition = array.array('I', self.default_rendition_line)
else:
top_screen[0:self.cols] = self.default_screen_line
top_rendition[0:self.cols] = self.default_rendition_line
if self.active_rendition != 0:
for i in xrange(0, self.cols):
top_rendition[i] = self.active_rendition
self.screen.insert(self.scroll_bottom, top_screen)
self.rendition.insert(self.scroll_bottom, top_rendition)
self.invalidate()
def line_feed(self):
if self.cursor_row >= self.scroll_bottom:
self.scroll_up()
else:
self.cursor_row += 1
def reverse_line_feed(self):
if self.cursor_row <= self.scroll_top:
self.insert_lines([1])
else:
self.cursor_row -= 1
def newline(self):
self.line_feed()
self.cursor_col = 0
def carriage_return(self):
self.cursor_col = 0
def escape(self):
self.escape_mode = True
def write_char(self, ch):
if self.cursor_col >= self.cols:
self.newline()
if self.line_draw and (ch in self.line_draw_map):
ch = self.line_draw_map[ch]
if self.insert_mode:
self.insert_chars([1])
# Write character at cursor location
self.screen[self.cursor_row][self.cursor_col] = ch
self.rendition[self.cursor_row][self.cursor_col] = self.active_rendition | TerminalEmulator.RENDITION_WRITTEN_CHAR
self.dirty.add(self.cursor_row)
self.cursor_col += 1
def erase_rect(self, top_row, left_col, bot_row, right_col):
for row in xrange(top_row, bot_row):
if row < 0:
continue
if row >= self.rows:
break
for col in xrange(left_col, right_col):
if col < 0:
continue
if col >= self.cols:
break
self.screen[row][col] = u' '
self.rendition[row][col] = self.active_rendition
self.dirty.add(row)
def cursor_up(self, params):
count = params[0]
if count == 0:
count = 1
self.cursor_row -= count
if self.cursor_row < 0:
self.cursor_row = 0
def cursor_down(self, params):
count = params[0]
if count == 0:
count = 1
self.cursor_row += count
if self.cursor_row >= self.rows:
self.cursor_row = self.rows - 1
def cursor_right(self, params):
count = params[0]
if count == 0:
count = 1
self.cursor_col += count
if self.cursor_col >= self.cols:
self.cursor_col = self.cols
def cursor_left(self, params):
count = params[0]
if count == 0:
count = 1
self.cursor_col -= count
if self.cursor_col < 0:
self.cursor_col = 0
def cursor_next_line(self, params):
count = params[0]
if count == 0:
count = 1
self.cursor_col = 0
self.cursor_row += count
if self.cursor_row >= self.rows:
self.cursor_row = self.rows - 1
def cursor_prev_line(self, params):
count = params[0]
if count == 0:
count = 1
self.cursor_col = 0
self.cursor_row -= count
if self.cursor_row < 0:
self.cursor_row = 0
def set_cursor_col(self, params):
self.cursor_col = params[0] - 1
if self.cursor_col < 0:
self.cursor_col = 0
if self.cursor_col > self.cols:
self.cursor_col = self.cols
def set_cursor_row(self, params):
self.cursor_row = params[0] - 1
if self.cursor_row < 0:
self.cursor_row = 0
if self.cursor_row >= self.rows:
self.cursor_row = self.rows - 1
def move_cursor(self, params):
self.cursor_row = params[0] - 1
if len(params) < 2:
self.cursor_col = 0
else:
self.cursor_col = params[1] - 1
if self.cursor_col < 0:
self.cursor_col = 0
if self.cursor_col > self.cols:
self.cursor_col = self.cols
if self.cursor_row < 0:
self.cursor_row = 0
if self.cursor_row >= self.rows:
self.cursor_row = self.rows - 1
def cursor_left_tab(self, params):
count = params[0]
if count == 0:
count = 1
if count > self.cols:
count = self.cols
for i in xrange(0, count):
if (self.cursor_col % self.tab_width) == 0:
self.cursor_col -= self.tab_width
else:
self.cursor_col -= self.cursor_col % self.tab_width
if self.cursor_col < 0:
self.cursor_col = 0
def cursor_right_tab(self, params):
count = params[0]
if count == 0:
count = 1
if count > self.cols:
count = self.cols
for i in xrange(0, count):
self.cursor_col += self.tab_width - (self.cursor_col % self.tab_width)
if self.cursor_col > self.cols:
self.cursor_col = self.cols
def erase_screen(self, params):
if (len(params) == 0) or (params[0] == 0):
self.erase_rect(self.cursor_row, self.cursor_col, self.cursor_row + 1, self.cols)
self.erase_rect(self.cursor_row + 1, 0, self.rows, self.cols)
elif params[0] == 1:
self.erase_rect(0, 0, self.cursor_row, self.cols)
self.erase_rect(self.cursor_row, 0, self.cursor_row + 1, self.cursor_col + 1)
elif params[0] == 2:
self.erase_rect(0, 0, self.rows, self.cols)
self.cursor_row = 0
self.cursor_col = 0
def erase_line(self, params):
if (len(params) == 0) or (params[0] == 0):
self.erase_rect(self.cursor_row, self.cursor_col, self.cursor_row + 1, self.cols)
elif params[0] == 1:
self.erase_rect(self.cursor_row, 0, self.cursor_row + 1, self.cursor_col + 1)
elif params[0] == 2:
self.erase_rect(self.cursor_row, 0, self.cursor_row + 1, self.cols)
def scroll_region(self, params):
if len(params) < 2:
return
self.scroll_top = params[0] - 1
self.scroll_bottom = params[1] - 1
if self.scroll_top < 0:
self.scroll_top = 0
if self.scroll_top >= self.rows:
self.scroll_top = self.rows - 1
if self.scroll_bottom < 0:
self.scroll_bottom = 0
if self.scroll_bottom >= self.rows:
self.scroll_bottom = self.rows - 1
def insert_lines(self, params):
count = params[0]
if count == 0:
count = 1
if count == 0:
return
if (self.cursor_row < self.scroll_top) or (self.cursor_row > self.scroll_bottom):
return
if count > ((self.scroll_bottom + 1) - self.cursor_row):
count = (self.scroll_bottom + 1) - self.cursor_row
erased_screen = []
erased_rendition = []
for i in xrange(0, count):
erased_screen.append(self.screen.pop((self.scroll_bottom + 1) - count))
erased_rendition.append(self.rendition.pop((self.scroll_bottom + 1) - count))
for j in xrange(0, self.cols):
erased_screen[i][j] = u' '
erased_rendition[i][j] = self.active_rendition
for i in xrange(0, count):
self.screen.insert(self.cursor_row, erased_screen[i])
self.rendition.insert(self.cursor_row, erased_rendition[i])
self.invalidate()
def delete_lines(self, params):
count = params[0]
if count == 0:
count = 1
if (self.cursor_row < self.scroll_top) or (self.cursor_row > self.scroll_bottom):
return
if count == 0:
return
if count > ((self.scroll_bottom + 1) - self.cursor_row):
count = (self.scroll_bottom + 1) - self.cursor_row
erased_screen = []
erased_rendition = []
for i in xrange(0, count):
erased_screen.append(self.screen.pop(self.cursor_row))
erased_rendition.append(self.rendition.pop(self.cursor_row))
for j in xrange(0, self.cols):
erased_screen[i][j] = u' '
erased_rendition[i][j] = self.active_rendition
for i in xrange(0, count):
self.screen.insert((self.scroll_bottom + 1) - count, erased_screen[i])
self.rendition.insert((self.scroll_bottom + 1) - count, erased_rendition[i])
self.invalidate()
def scroll_up_lines(self, params):
count = params[0]
if count == 0:
count = 1
if count == 0:
return
if count > ((self.scroll_bottom + 1) - self.scroll_top):
count = (self.scroll_bottom + 1) - self.scroll_top
erased_screen = []
erased_rendition = []
for i in xrange(0, count):
erased_screen.append(self.screen.pop(self.scroll_top))
erased_rendition.append(self.rendition.pop(self.scroll_top))
for j in xrange(0, self.cols):
erased_screen[i][j] = u' '
erased_rendition[i][j] = self.active_rendition
for i in xrange(0, count):
self.screen.insert((self.scroll_bottom + 1) - count, erased_screen[i])
self.rendition.insert((self.scroll_bottom + 1) - count, erased_rendition[i])
self.invalidate()
def scroll_down_lines(self, params):
count = params[0]
if count == 0:
count = 1
if count == 0:
return
if count > ((self.scroll_bottom + 1) - self.scroll_top):
count = (self.scroll_bottom + 1) - self.scroll_top
erased_screen = []
erased_rendition = []
for i in xrange(0, count):
erased_screen.append(self.screen.pop((self.scroll_bottom + 1) - count))
erased_rendition.append(self.rendition.pop((self.scroll_bottom + 1) - count))
for j in xrange(0, self.cols):
erased_screen[i][j] = u' '
erased_rendition[i][j] = self.active_rendition
for i in xrange(0, count):
self.screen.insert(self.scroll_top, erased_screen[i])
self.rendition.insert(self.scroll_top, erased_rendition[i])
self.invalidate()
def insert_chars(self, params):
count = params[0]
if count == 0:
count = 1
if count > (self.cols - self.cursor_col):
count = self.cols - self.cursor_col
for i in xrange(self.cols - 1, self.cursor_col + count - 1, -1):
self.screen[self.cursor_row][i] = self.screen[self.cursor_row][i - count]
self.rendition[self.cursor_row][i] = self.rendition[self.cursor_row][i - count]
self.erase_rect(self.cursor_row, self.cursor_col, self.cursor_row + 1, self.cursor_col + count)
self.dirty.add(self.cursor_row)
def delete_chars(self, params):
count = params[0]
if count == 0:
count = 1
if count > (self.cols - self.cursor_col):
count = self.cols - self.cursor_col
for i in xrange(self.cursor_col, self.cols - count):
self.screen[self.cursor_row][i] = self.screen[self.cursor_row][i + count]
self.rendition[self.cursor_row][i] = self.rendition[self.cursor_row][i + count]
self.erase_rect(self.cursor_row, self.cols - count, self.cursor_row + 1, self.cols)
self.dirty.add(self.cursor_row)
def erase_chars(self, params):
count = params[0]
if count == 0:
count = 1
self.erase_rect(self.cursor_row, self.cursor_col, self.cursor_row + 1, self.cursor_col + count)
def graphic_rendition(self, params):
i = 0
while i < len(params):
val = params[i]
if val == 0:
# Default rendition
self.active_rendition = 0
elif (val >= 1) and (val <= 9):
# Set style
self.active_rendition &= ~0xff
self.active_rendition |= 1 << (val - 1)
elif (val >= 21) and (val <= 29):
# Clear style
self.active_rendition &= ~(1 << (val - 21))
elif (val >= 30) and (val <= 37):
# Normal foreground color
self.active_rendition &= ~(0x00ff0000 | TerminalEmulator.RENDITION_FOREGROUND_256)
self.active_rendition |= (val - 29) << 16
elif val == 38:
if ((i + 2) < len(params)) and (params[i + 1] == 5):
# 256-color foreground
self.active_rendition &= ~0x00ff0000
self.active_rendition |= TerminalEmulator.RENDITION_FOREGROUND_256
self.active_rendition |= (params[i + 2] & 0xff) << 16
i += 2
elif val == 39:
# Default foreground color
self.active_rendition &= ~(0x00ff0000 | TerminalEmulator.RENDITION_FOREGROUND_256)
elif (val >= 40) and (val <= 47):
# Normal background color
self.active_rendition &= ~(0xff000000 | TerminalEmulator.RENDITION_BACKGROUND_256)
self.active_rendition |= (val - 39) << 24
elif val == 48:
if ((i + 2) < len(params)) and (params[i + 1] == 5):
# 256-color background
self.active_rendition &= ~0xff000000
self.active_rendition |= TerminalEmulator.RENDITION_BACKGROUND_256
self.active_rendition |= (params[i + 2] & 0xff) << 24
i += 2
elif val == 49:
# Default background color
self.active_rendition &= ~(0xff000000 | TerminalEmulator.RENDITION_BACKGROUND_256)
elif (val >= 90) and (val <= 97):
# High intensity foreground color
self.active_rendition &= ~(0x00ff0000 | TerminalEmulator.RENDITION_FOREGROUND_256)
self.active_rendition |= (val - 81) << 16
elif (val >= 100) and (val <= 107):
# High intensity background color
self.active_rendition &= ~(0xff000000 | TerminalEmulator.RENDITION_BACKGROUND_256)
self.active_rendition |= (val - 91) << 16
else:
print "Unsupported graphic rendition %d" % val
i += 1
def set_option(self, params):
for option in params:
if option == 4: # Insert mode
self.insert_mode = True
def clear_option(self, params):
for option in params:
if option == 4: # Insert mode
self.insert_mode = False
def set_private_option(self, params):
for option in params:
if option == 1: # Cursor key setting
self.application_cursor_keys = True
if option == 25: # Cursor visibility
self.cursor_visible = True
if ((option == 47) or (option == 1049)) and (not self.alt_screen): # Alternate screen buffer
self.screen, self.other_screen = self.other_screen, self.screen
self.rendition, self.other_rendition = self.other_rendition, self.rendition
self.saved_normal_cursor_row = self.cursor_row
self.saved_normal_cursor_col = self.cursor_col
self.cursor_row = self.saved_alt_cursor_row
self.cursor_col = self.saved_alt_cursor_col
self.alt_screen = True
self.invalidate()
def clear_private_option(self, params):
for option in params:
if option == 1: # Cursor key setting
self.application_cursor_keys = False
if option == 25: # Cursor visibility
self.cursor_visible = False
if ((option == 47) or (option == 1049)) and (self.alt_screen): # Alternate screen buffer
self.screen, self.other_screen = self.other_screen, self.screen
self.rendition, self.other_rendition = self.other_rendition, self.rendition
self.saved_alt_cursor_row = self.cursor_row
self.saved_alt_cursor_col = self.cursor_col
self.cursor_row = self.saved_normal_cursor_row
self.cursor_col = self.saved_normal_cursor_col
self.alt_screen = False
self.invalidate()
def device_attr(self, params):
self.response("\033[?1;2c")
def device_secondary_attr(self, params):
self.response("\033[>0;1;0c")
def device_status(self, params):
if params[0] == 5:
self.response("\033[0n") # OK
elif params[0] == 6:
self.response("\033[%d;%dR" % (self.cursor_row + 1, self.cursor_col + 1))
def soft_reset(self, params):
self.active_rendition = 0
self.cursor_visible = True
self.tab_width = 8
self.scroll_top = 0
self.scroll_bottom = self.rows - 1
self.line_draw = False
def parse_params(self, params):
if len(params) == 0:
result = []
else:
try:
result = [int(i) for i in params.split(u';')]
except ValueError:
print "Invalid parameters '%s'" % params
return []
return result
def process_escape(self, sequence):
if (sequence == u'=') or (sequence == u'>'):
# Numpad handling, just ignore it
return
if sequence == u'c':
# Terminal reset
self.active_rendition = 0
self.erase_rect(0, 0, self.rows, self.cols)
self.cursor_row = 0
self.cursor_col = 0
self.saved_cursor_row = 0
self.saved_cursor_col = 0
self.invalidate()
return
if sequence == u'7':
# Save cursor
self.saved_cursor_row = self.cursor_row
self.saved_cursor_col = self.cursor_col
return
if sequence == u'8':
# Restore cursor
self.cursor_row = self.saved_cursor_row
self.cursor_col = self.saved_cursor_col
return
if sequence == u'D':
self.line_feed()
return
if sequence == u'E':
self.newline()
return
if sequence == u'M':
self.reverse_line_feed()
return
if sequence[0] != u'[':
print "Unhandled escape sequence '%s'" % sequence
return
params = sequence[1:-1]
mode = sequence[-1]
if (len(params) > 0) and (params[0] == u'?'):
mode = u'?' + mode
params = params[1:]
if (len(params) > 0) and (params[0] == u'>'):
mode = u'>' + mode
params = params[1:]
if (len(params) > 0) and (params[0] == u'!'):
mode = u'!' + mode
params = params[1:]
params = self.parse_params(params)
if len(params) == 0:
params = [0]
if mode in self.escape_sequences:
self.escape_sequences[mode](params)
else:
print "Unhandled escape sequence '%s'" % sequence
def start_window_title(self, sequence):
params = self.parse_params(sequence[1:-1])
if (len(params) == 0) or (params[0] == 0) or (params[0] == 2):
# Setting window name
self.ignored_window_title = False
else:
# Setting icon name, just ignore
self.ignored_window_title = True
def process(self, data):
for raw_ch in data:
if self.utf8_len == 0:
if ord(raw_ch) < 128:
ch = unicode(raw_ch)
elif ord(raw_ch) < 0xc0:
# Unexpected continuation character
ch = unichr(ord(raw_ch))
elif ord(raw_ch) < 0xe0:
self.utf8_buffer = raw_ch
self.utf8_len = 1
elif ord(raw_ch) < 0xf0:
self.utf8_buffer = raw_ch
self.utf8_len = 2
elif ord(raw_ch) < 0xf8:
self.utf8_buffer = raw_ch
self.utf8_len = 3
elif ord(raw_ch) < 0xfc:
self.utf8_buffer = raw_ch
self.utf8_len = 4
elif ord(raw_ch) < 0xfe:
self.utf8_buffer = raw_ch
self.utf8_len = 5
else:
# Invalid first byte
ch = unichr(ord(raw_ch))
else:
if (ord(raw_ch) & 0xc0) != 0x80:
# Invalid continuation character
ch = unichr(ord(raw_ch))
self.utf8_len = 0
else:
self.utf8_buffer += raw_ch
self.utf8_len -= 1
if self.utf8_len == 0:
ch = unicode(self.utf8_buffer, 'utf8', 'replace')
if self.utf8_len > 0:
continue
# Check for combining characters
try:
if (unicodedata.combining(ch) != 0) and (self.cursor_col > 0):
# Combining character, so combine it with the previously written character
last_ch = self.screen[self.cursor_row][self.cursor_col - 1]
combined = unicodedata.normalize("NFC", last_ch + ch)
if len(combined) == 1:
# Successful combine, write out new character
self.screen[self.cursor_row][self.cursor_col - 1] = combined
self.dirty.add(self.cursor_row)
continue
except TypeError:
# Invalid character
ch = u' '
if self.window_title_mode:
if ch == u'\007': # Bell character ends window title
if self.title_callback and not self.ignored_window_title:
self.title_callback(self.unprocessed_input)
self.unprocessed_input = u""
self.window_title_mode = False
else:
self.unprocessed_input += ch
elif ch in self.special_chars:
self.special_chars[ch]()
elif self.escape_mode:
self.unprocessed_input += ch
if len(self.unprocessed_input) == 1:
if (ch != u'[') and (ch != u']') and (ch not in self.charset_escapes):
# Special type of escape sequence, no parameters
self.process_escape(self.unprocessed_input)
self.unprocessed_input = u""
self.escape_mode = False
elif (len(self.unprocessed_input) == 2) and (self.unprocessed_input[0] in self.charset_escapes):
if self.unprocessed_input == "(0":
# Select line drawing character set
self.line_draw = True
else:
# Other character set escape, just use UTF8
self.line_draw = False
self.unprocessed_input = u""
self.escape_mode = False
elif (ch >= u'@') and (ch <= u'~'):
# Ending character found, process sequence
self.process_escape(self.unprocessed_input)
self.unprocessed_input = u""
self.escape_mode = False
else:
# Parameter character, add to pending string
if self.unprocessed_input.startswith(u']') and (ch == u';'):
# Setting window title, wait for bell character to finish
self.start_window_title(self.unprocessed_input)
self.unprocessed_input = u""
self.escape_mode = False
self.window_title_mode = True
elif ch == u'\033':
self.escape()
else:
self.write_char(ch)
if self.update_callback:
self.update_callback()
def get_dirty_lines(self):
result = self.dirty
self.dirty = set()
return result
|
import hashlib
import mock
import pytest
from rest_framework import status
from rest_framework.reverse import reverse
from apps.mood_groups.models import UserMoodGroup, MoodGroup
from apps.moods.models import UserMood, Mood
from apps.users.models import User
from tests.request_helper import pytest_request
MOOD_FIELDS_LIST = ['id', 'status', 'simple_summary']
# Oauth2 인증 Mock 처리 ( TODO: Oauth2.0 도 테스트 될 수 있게 로직 추가해야함 )
@pytest.fixture(scope='function')
def mock_is_authenticated():
with mock.patch('rest_framework.permissions.IsAuthenticatedOrReadOnly') as patch:
yield patch
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_group_create(rf, client, user_context, mock_is_authenticated):
"""
그룹 생성
"""
user = user_context.init.create_user()
data = {
'title': '5boon',
'summary': '5boon 팀원들과의 기분 공유'
}
url = reverse(viewname="mood_groups:group-list")
response = pytest_request(rf,
method='post',
url=url,
user=user,
data=data)
assert response.status_code == status.HTTP_201_CREATED
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_my_group_list(rf, client, user_context, mock_is_authenticated):
"""
그룹 리스트 보기
"""
user = user_context.init.create_user()
user_context.init.create_groups(
user=user,
title='5boon',
summary='5boon 팀원들과의 기분 공유'
)
url = reverse(viewname="mood_groups:my_group-list")
response = pytest_request(rf,
method='get',
url=url,
user=user)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_my_group_delete(rf, client, user_context, mock_is_authenticated):
"""
그룹 나가기
"""
user = user_context.init.create_user()
# 그룹 생성
mood_group, user_mood_group = user_context.init.create_groups(
user=user,
title='5boon',
summary='5boon 팀원들과의 기분 공유'
)
guest = User.objects.create(
username='test_guest',
name='test_guest',
password='<PASSWORD>'
)
# user 기분 생성
mood = Mood.objects.create(status=Mood.GOOD, simple_summary='test')
UserMood.objects.create(user=user, mood=mood)
# guest 기분 생성
guest_mood = Mood.objects.create(status=Mood.BAD, simple_summary='guest mood summary')
UserMood.objects.create(user=guest, mood=guest_mood)
UserMood.objects.create(
user=guest,
mood=guest_mood,
mood_group=mood_group
)
# 그룹에 게스트 추가
UserMoodGroup.objects.create(
user=guest,
mood_group=mood_group,
)
url = reverse(
viewname="mood_groups:my_group-detail",
kwargs={"pk": user_mood_group.id}
)
response = pytest_request(rf,
method='delete',
url=url,
user=user)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert MoodGroup.objects.filter(id=user_mood_group.mood_group.id).exists()
assert not UserMoodGroup.objects.filter(id=user_mood_group.id).exists()
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_my_group_delete_and_no_member_group(rf, client, user_context, mock_is_authenticated):
"""
내 그룹 나가기 테스트, 그룹에 멤버가 없는경우 그룹 삭제 되는지 테스트
"""
user = user_context.init.create_user()
mood_group, user_mood_group = user_context.init.create_groups(
user=user,
title='5boon',
summary='5boon 팀원들과의 기분 공유'
)
url = reverse(
viewname="mood_groups:my_group-detail",
kwargs={
'pk': user_mood_group.id
}
)
response = pytest_request(rf,
method='delete',
url=url,
user=user)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert not MoodGroup.objects.filter(id=user_mood_group.mood_group.id).exists()
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_my_group_list_detail(rf, client, user_context, mock_is_authenticated):
"""
그룹 자세히 보기
"""
user = user_context.init.create_user()
# 그룹 생성
mood_group, user_mood_group = user_context.init.create_groups(
user=user,
title='5boon',
summary='5boon 팀원들과의 기분 공유'
)
guest = User.objects.create(
username='test_guest',
name='test_guest',
password='<PASSWORD>'
)
# user 기분 생성
mood = Mood.objects.create(status=Mood.GOOD, simple_summary='test')
UserMood.objects.create(user=user, mood=mood)
# guest 기분 생성
guest_mood = Mood.objects.create(status=Mood.BAD, simple_summary='guest mood summary')
UserMood.objects.create(user=guest, mood=guest_mood)
UserMood.objects.create(
user=guest,
mood=guest_mood,
mood_group=mood_group
)
# 그룹에 게스트 추가
UserMoodGroup.objects.create(
user=guest,
mood_group=mood_group,
)
url = reverse(
viewname="mood_groups:my_group-detail",
kwargs={"pk": user_mood_group.id}
)
response = pytest_request(rf,
method='get',
url=url,
user=user)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_invitation_join(rf, client, user_context, mock_is_authenticated):
"""
그룹 초대 테스트
"""
user = user_context.init.create_user()
title = '5boon'
user_context.init.create_groups(
user=user,
title=title,
summary='5boon 팀원들과의 기분 공유'
)
# 게스트 추가
guest = User.objects.create(
username='test_guest',
name='test_guest',
password='<PASSWORD>'
)
data = {
'code': hashlib.sha256(title.encode()).hexdigest(),
}
url = reverse(
viewname="mood_groups:invitation-list",
)
response = pytest_request(rf,
method='post',
url=url,
user=guest,
data=data)
assert response.status_code == status.HTTP_201_CREATED
|
import configurations
from eval import Evaluate
from utils import Vectorizer, headline2abstractdataset, load_embeddings
from seq2seq.fb_seq2seq import FbSeq2seq
from seq2seq.EncoderRNN import EncoderRNN
from seq2seq.DecoderRNNFB import DecoderRNNFB
from seq2seq.ContextEncoder import ContextEncoder
import torch
import torch.nn as nn
import os
import json
class ModelManager:
def __init__(self, args):
self.args = args
# Model Configuration to execute.
self.config = configurations.init(args.dataset)[args.conf]
if args.local_rank == 0:
print("Config is", args.conf)
# Model's checkpoint filename.
v = vars(self.args)
v['save'] = "models/" + self.config.experiment_name + '.pkl'
# Set the random seed manually for reproducibility.
self.seed()
# Evaluation API for calculating the BLEU, METEOR and ROUGE scores
self.validation_eval = Evaluate()
# Training and Validation datasets
self.training_abstracts, self.validation_abstracts = self.load_datasets()
# THE model!
self.model = self.initialize_model()
def seed(self):
args = self.args
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
def load_datasets(self):
config = self.config
args = self.args
cwd = os.getcwd()
vectorizer = Vectorizer(min_frequency=config.min_freq)
data_path = cwd + config.relative_data_path
training_abstracts = headline2abstractdataset(data_path, vectorizer, args.cuda, max_len=1000,
use_topics=config.use_topics, use_structure_info=config.use_labels)
validation_data_path = cwd + config.relative_dev_path
validation_abstracts = headline2abstractdataset(validation_data_path, vectorizer, args.cuda, max_len=1000,
use_topics=config.use_topics,
use_structure_info=config.use_labels)
if args.local_rank == 0:
print("number of training examples: %d" % len(training_abstracts), flush=True)
return training_abstracts, validation_abstracts
def initialize_model(self):
config = self.config
args = self.args
training_abstracts = self.training_abstracts
context_encoder = None
vocab_size = len(training_abstracts.vectorizer.word2idx)
embedding = nn.Embedding(vocab_size, config.emsize, padding_idx=0)
if config.pretrained:
embedding = load_embeddings(embedding, training_abstracts.vectorizer.word2idx, config.pretrained, config.emsize)
if config.use_topics or config.use_labels:
context_encoder = ContextEncoder(config.context_dim, len(training_abstracts.vectorizer.context_vectorizer), config.emsize)
title_encoder_rnn_dim = config.emsize + (config.use_topics * training_abstracts.max_context_length) * config.context_dim
abstract_encoder_rnn_dim = config.emsize + (
config.use_labels + config.use_topics * training_abstracts.max_context_length) * config.context_dim
structure_labels = {"introduction": training_abstracts.vectorizer.context_vectorizer["introduction"],
"body": training_abstracts.vectorizer.context_vectorizer["body"],
"conclusion": training_abstracts.vectorizer.context_vectorizer["conclusion"],
"full_stop": training_abstracts.vectorizer.word2idx["."],
"question_mark": training_abstracts.vectorizer.word2idx["?"]}
encoder_title = EncoderRNN(vocab_size, embedding, training_abstracts.head_len, title_encoder_rnn_dim,
abstract_encoder_rnn_dim, input_dropout_p=config.input_dropout_p, output_dropout_p=config.output_dropout_p,
n_layers=config.nlayers, bidirectional=config.bidirectional,
rnn_cell=config.cell)
encoder = EncoderRNN(vocab_size, embedding, training_abstracts.abs_len, abstract_encoder_rnn_dim,
abstract_encoder_rnn_dim, input_dropout_p=config.input_dropout_p, output_dropout_p=config.output_dropout_p,
variable_lengths=False, n_layers=config.nlayers,
bidirectional=config.bidirectional, rnn_cell=config.cell)
decoder = DecoderRNNFB(vocab_size, embedding, training_abstracts.abs_len, abstract_encoder_rnn_dim, sos_id=2, eos_id=1,
n_layers=config.nlayers, rnn_cell=config.cell, bidirectional=config.bidirectional,
input_dropout_p=config.input_dropout_p, dropout_p=config.dropout_p,
output_dropout_p=config.output_dropout_p, labels=structure_labels,
use_labels=config.use_labels, context_model=context_encoder, use_cuda=args.cuda,
use_intra_attention=config.use_intra_attention,
intra_attention_window_size=config.window_size_attention)
model = FbSeq2seq(encoder_title, encoder, context_encoder, decoder)
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in model.parameters())
if args.local_rank == 0:
print("Configuration is as follows", json.dumps({"training data path": config.relative_data_path,
"validation data path": config.relative_dev_path,
"training batch size": config.batch_size,
"word embedding dim": config.emsize,
"context embedding dim": config.context_dim,
"validation batch size": config.validation_batch_size,
"input dropout": config.input_dropout_p,
"dropout": config.dropout_p,
"output dropout": config.output_dropout_p,
"data parallel": config.data_parallel,
"distributed data parallel": config.distributed_data_parallel,
"log_interval": config.log_interval,
"print_running_loss": config.print_running_loss,
"learning rate": config.lr,
"pre-trained embeddings location": config.pretrained,
"use topics": config.use_topics,
"use labels": config.use_labels,
"use intra-attention": config.use_intra_attention,
"intra-attention window size": config.window_size_attention,
"experiment name": config.experiment_name},
sort_keys=True, indent=4, separators=(',', ': ')), flush=True)
print('Model total parameters:', total_params, flush=True)
return model
def get_model(self):
return self.model
def get_training_data(self):
return self.training_abstracts
def get_validation_data(self):
return self.validation_abstracts
def get_config(self):
return self.config
def get_eval_object(self):
return self.validation_eval |
<reponame>SanketSinha10/Datalake-Pipeline<filename>src/datawarehousing/change_data_capture.py
import hashlib
from pyspark.sql import SparkSession, DataFrame, Window
from pyspark.sql.functions import col, row_number
from utils.Utilities import is_null_or_empty
def append_audit_attributes_to_xml(file, file_contents, xml_closing_tag):
hash_val = hashlib.md5(file_contents.encode('utf-8')).hexdigest()
return str(file_contents).replace(f'</{xml_closing_tag}>',
f'<hashcode>{hash_val}</hashcode'
f'><xml_file_name>'
f'{str(file)}</xml_file_name></'
f'{xml_closing_tag}>')
def add_row_number_to_dataframe(dataframe: DataFrame, primary_keys, order_by_keys, eliminate_duplicate_records=False,
drop_row_number_column=False):
window = Window.partitionBy(
*list(map(lambda c: col(c), primary_keys))).orderBy(
*list(map(lambda c: col(c).desc(), order_by_keys)))
row_num_col = row_number().over(window=window).alias('row_num')
if eliminate_duplicate_records and drop_row_number_column:
return dataframe.withColumn(colName='row_num', col=row_num_col).filter('row_num = 1').drop('row_num')
elif eliminate_duplicate_records:
return dataframe.withColumn(colName='row_num', col=row_num_col).filter('row_num = 1')
else:
return dataframe.withColumn(colName='row_num', col=row_num_col)
def add_audit_columns(_df: DataFrame) -> DataFrame:
import datetime
ts = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
df: DataFrame = _df
sel_cols = list(map(lambda x: str(f'`{x}`'), df.schema.names))
sel_cols.append(f"reverse(split(input_file_name(), '/'))[0] AS spark_file_name")
sel_cols.append(f"CAST('{ts}' AS TIMESTAMP) AS spark_timestamp")
print(sel_cols)
df: DataFrame = df.selectExpr(sel_cols)
return df
def identify_new_records(spark: SparkSession, old_dataframe: DataFrame, new_dataframe: DataFrame,
primary_keys=[], order_by_keys=['current_timestamp']) -> DataFrame:
old_df = "old_df"
new_df = "new_df"
if is_null_or_empty(primary_keys):
print("WARNING - Empty primary keys given: Assuming all fields in the table for Deduplication")
dedup_query = f"SELECT *FROM (SELECT t1.*, row_number() over (order by {','.join(order_by_keys)} desc) as row_num FROM {old_df} t1) WHERE row_num = 1"
elif is_null_or_empty(old_dataframe) and is_null_or_empty(
new_dataframe) and new_dataframe.count() <= 0 and old_dataframe.count() <= 0:
print("Empty Dataframes")
return None
elif not is_null_or_empty(new_dataframe) and new_dataframe.count() > 0 and (
is_null_or_empty(old_dataframe) or old_dataframe.count() <= 0):
print("Assuming initial load CDC not required")
return new_dataframe
else:
print(f"Before CDC Staging count = {old_dataframe.count()}")
dedup_query = f"SELECT *FROM (SELECT t1.*, row_number() over (partition by {','.join(primary_keys)} order by {','.join(order_by_keys)} desc) as row_num FROM {old_df} t1) WHERE row_num = 1"
old_dataframe.createOrReplaceTempView(old_df)
new_dataframe.createOrReplaceTempView(new_df)
spark.sql(dedup_query).createOrReplaceTempView(old_df)
join_condition = list(map(lambda x: str(f'{old_df}.{x} = {new_df}.{x}'), primary_keys))
exclude_condition = list(map(lambda x: str(f'{old_df}.{x} IS NULL'), primary_keys))
new_pks_query = f"SELECT {new_df}.* FROM {new_df} LEFT JOIN {old_df} ON {' AND '.join(join_condition)} WHERE {' AND '.join(exclude_condition)}"
updates_query = f"SELECT {new_df}.* FROM {new_df} INNER JOIN {old_df} ON {' AND '.join(join_condition)} WHERE {new_df}.hashcode <> {old_df}.hashcode"
print(f"Fetch only New PK records query = {new_pks_query}")
print(f"Fetch updated records query = {updates_query}")
new_pk_records_df: DataFrame = spark.sql(new_pks_query).dropDuplicates()
updates_df: DataFrame = spark.sql(updates_query).dropDuplicates()
return new_pk_records_df.union(updates_df)
|
<filename>bands_inspect/_cli.py
# -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: <NAME> <<EMAIL>>
"""
Defines the bands-inspect CLI.
"""
import click
import numpy as np
import matplotlib.pyplot as plt
from . import io
from . import plot
from .compare import difference as _diff
from .compare import align as _align
@click.group()
def cli():
pass
@cli.command()
@click.argument(
'eigenval_files', nargs=2, type=click.Path(exists=True, dir_okay=False)
)
@click.option('--energy-window', nargs=2, type=float, required=False)
def difference(eigenval_files, energy_window):
"""
Calculate the difference between two bandstructures.
"""
ev1, ev2 = [io.load(filename) for filename in eigenval_files]
kwargs = {}
if energy_window:
kwargs['weight_eigenval'] = _diff.energy_window(*energy_window)
click.echo(_diff.calculate(ev1, ev2, **kwargs))
@cli.command()
@click.option(
'-i',
'--input-files',
nargs=2,
type=click.Path(exists=True, dir_okay=False),
default=['eigenvals1.hdf5', 'eigenvals2.hdf5']
)
@click.option(
'-o',
'--output-files',
nargs=2,
type=click.Path(exists=False, dir_okay=False),
default=['eigenvals1_shifted.hdf5', 'eigenvals2_shifted.hdf5']
)
@click.option('--energy-window', nargs=2, type=float, required=False)
def align(input_files, output_files, energy_window):
"""
Align two bandstructures.
"""
ev1, ev2 = [io.load(filename) for filename in input_files]
kwargs = {}
if energy_window:
kwargs['weight_eigenval'] = _diff.energy_window(*energy_window)
res = _align.calculate(
ev1, ev2, symmetric_eigenval_weights=False, **kwargs
)
io.save(res.eigenvals1_shifted, output_files[0])
io.save(res.eigenvals2_shifted, output_files[1])
click.echo('Shift: {: }'.format(res.shift))
click.echo('Difference: {: }'.format(res.difference))
@cli.command()
@click.option(
'--input',
'-i',
type=click.Path(exists=True, dir_okay=False),
default='eigenval.hdf5',
help='File containing the input eigenvalues (in HDF5 format).'
)
@click.option('--output', '-o', type=click.Path(dir_okay=False))
@click.argument('slice_idx', nargs=-1, type=int)
def slice_bands(input, output, slice_idx): # pylint: disable=redefined-builtin
"""
Modify a bandstructure by selecting/re-arranging specific bands.
"""
eigenvals = io.load(input)
io.save(eigenvals.slice_bands(slice_idx), output)
@cli.command()
@click.option(
'--output',
'-o',
type=click.Path(exists=False, dir_okay=False),
help='Output file for the plot.',
default='plot.pdf'
)
@click.argument(
'eigenvals_files',
nargs=-1,
type=click.Path(dir_okay=False),
required=True
)
def plot_bands(eigenvals_files, output):
"""
Plot one or more bandstructures which share the same set of k-points.
"""
eigenvals_list = []
for filename in eigenvals_files:
eigenvals_list.append(io.load(filename))
kpoints = eigenvals_list[0].kpoints.kpoints_explicit
for eigenvals in eigenvals_list:
if not np.allclose(kpoints, eigenvals.kpoints.kpoints_explicit):
raise ValueError('K-points do not match!')
_, axis = plt.subplots()
for i, eigenvals in enumerate(eigenvals_list):
plot.eigenvals(
eigenvals,
ax=axis,
plot_options={
'color': 'C{}'.format(i),
'lw': 0.8
}
)
plt.savefig(output, bbox_inches='tight')
|
"""
File: darc_evaluator.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/Drayer34
Description: Evaluator used in the context of the DARC (Data Anonymization and Re-identification
Competition).
"""
import os
import logging
import pandas as pd
import redis
try:
from darc_core.metrics import Metrics, utility_metric
from darc_core.preprocessing import round1_preprocessing, round2_preprocessing, read_tar
from darc_core.utils import check_format_f_file, check_format_trans_file
from config import Config as config
except ImportError:
from .darc_core.metrics import Metrics, utility_metric
from .darc_core.preprocessing import round1_preprocessing, round2_preprocessing, read_tar
from .darc_core.utils import check_format_f_file, check_format_trans_file
from .config import Config as config
class RedisConnection():
"""Class to control redis data base and stock team submission scores.
The data base is store by the organizator of the competition
"""
def __init__(self, host, port, password):
"""Initialization method
:host: adresse of the host for the redis data base.
:port: port for the host.
:password: password of the data base.
"""
self._host = host
self._port = port
self._password = password
self._redis_co = self._connect_to_bdd()
def _connect_to_bdd(self):
"""Connect to the online BDD redis
:returns: an instance of redis bdd
"""
redis_co = redis.Redis(\
host=self._host,
port=self._port,
password=self._password)
return redis_co
def get_redis_connection(self):
"""Return the connection to the redis data base
"""
return self._redis_co
def get_nb_try_reid(self, team_name, attempt_attacked):
""" Return the number of attempts the team as made against one opponent team and their
submission dataset.
:team_name: the name of the attacking team.
:attempt_attacked: the file (1, 2, or 3) of the opponent team attacked.
:return: Number of attempts made by team A against team B file.
"""
# Return the number of attempts or 0 if there is no value at regis_get address.
redis_get = "{}_vs_file_{}".format(team_name, attempt_attacked)
return int(self._redis_co.get(redis_get) or 0)
def set_nb_try_reid(self, nb_tries, team_name, attempt_attacked):
"""Set the number of attempts the team as made against one opponent team and their
submission dataset.
:nb_tries: the number of attempts to set
:team_name: the name of the attacking team.
:opponent_name: the name of the opponent team
:attempt_attacked: the file (1, 2, or 3) of the opponent team attacked.
"""
redis_set = "{}_vs_file_{}".format(team_name, attempt_attacked)
self._redis_co.set(redis_set, nb_tries)
def set_value(self, value, adress):
""" Set the value into redis BDD.
:value: the value to set into the redis BDD.
:adress: the adress where to set the value.
"""
return self._redis_co.set(adress, value)
def get_value(self, adress):
""" Get the value at adress `adress` into redis BDD.
:adress: the adress where to get the value.
"""
return self._redis_co.get(adress)
class DarcEvaluator():
"""
Evaluate submission file of users in the context od DARC competition
This is a fork from aicrowd_evaluator https://github.com/AIcrowd/AIcrowd-example-evaluator
"""
def __init__(self, answer_file_path, round=1,
redis_host='127.0.0.1', redis_port=6379, redis_password=False,
round2_storage=None
):
"""
`round` : Holds the round for which the evaluation is being done.
can be 1, 2...upto the number of rounds the challenge has.
Different rounds will mostly have different ground truth files.
"""
self.answer_file_path = answer_file_path
self.round = round
# Determine the score depending on the round
self.redis_co = ""
self.redis_host = redis_host
self.redis_port = redis_port
self.redis_password = <PASSWORD>
self.round2_storage = round2_storage
def _evaluate(self, client_payload, _context={}):
"""
`client_payload` will be a dict with (atleast) the following keys :
- submission_file_path : local file path of the submitted file
- aicrowd_submission_id : A unique id representing the submission
- aicrowd_participant_id : A unique id for participant/team submitting (if enabled)
"""
# Initialize redis_co
self.redis_co = RedisConnection(self.redis_host, self.redis_port, self.redis_password)
# Initialize directory variable
submission_file_path = client_payload["submission_file_path"]
try:
aicrowd_submission_uid = client_payload["crowdai_participant_id"]
aicrowd_submission_id = client_payload["crowdai_submission_id"]
except Exception:
aicrowd_submission_uid = client_payload["aicrowd_participant_id"]
aicrowd_submission_id = client_payload["aicrowd_submission_id"]
## ROUND 1
if self.round == 1:
# Read database from files
ground_truth, submission = round1_preprocessing(
self.answer_file_path, submission_file_path
)
# Check the format of the Anonymized Transaction file
check_format_trans_file(ground_truth, submission)
# Determine all the scores for a anonymization transaction file
metric = Metrics(ground_truth, submission)
scores = metric.scores()
# TODO: This should be done only for the inter round submission (i.e. the final ones) <27-06-19, antoine> #
# save score submission_reid for round2
self.redis_co.set_value(f"{aicrowd_submission_id}", max(scores[6:12]))
_result_object = {
"score" : (max(scores[0:6]) + max(scores[6:13]))/2,
"score_secondary": max(scores[0:6]),
"meta" : {
"e1":scores[0],
"e2":scores[1],
"e3":scores[2],
"e4":scores[3],
"e5":scores[4],
"e6":scores[5],
"s1":scores[6],
"s2":scores[7],
"s3":scores[8],
"s4":scores[9],
"s5":scores[10],
"s6":scores[11],
"s7":scores[12]
}
}
return _result_object
# ROUND 2
elif self.round == 2:
#Read tar file
submission_file_path, aicrowd_submission_id_attacked = read_tar(
submission_file_path
)
# Recover ground_truth
ground_truth = round1_preprocessing(self.answer_file_path)
# Read submitted files and ground truth
submission = round2_preprocessing(submission_file_path)
# Recover ground Truth from Redis database
try:
at_origin = pd.read_csv(f"{self.round2_storage}/{aicrowd_submission_id_attacked}.csv")
except FileNotFoundError:
raise Exception("There is no team with submission number {}".format(
aicrowd_submission_id_attacked
))
# Check if they've attacked them 10 times already
nb_atcks = self.redis_co.get_nb_try_reid(
aicrowd_submission_uid, aicrowd_submission_id_attacked
)
if nb_atcks >= 10:
raise Exception("You've reach your 10 attempts on this file.")
# Compute score for round 2
check_format_f_file(submission)
metrics = Metrics(ground_truth, at_origin)
reidentification_score = metrics.compare_f_files(submission)
# Increment by 1 the number of attempts
self.redis_co.set_nb_try_reid(
nb_atcks+1, aicrowd_submission_uid, aicrowd_submission_id_attacked
)
# Update score of submission
previous_score = float(self.redis_co.get_value(aicrowd_submission_id_attacked) or 0)
attack_success = False
attck_sc = float(self.redis_co.get_value(f"{aicrowd_submission_id}_attck_sc") or 0)
if previous_score < reidentification_score:
self.redis_co.set_value(reidentification_score, aicrowd_submission_id_attacked)
attack_success = True
diff_sc = reidentification_score - previous_score
attck_sc = attck_sc + diff_sc
nb_atcks_success = int(
self.redis_co.get_value(f"{aicrowd_submission_id}_attck_succ") or 0
)
if attack_success:
nb_atcks_success += 1
self.redis_co.set_value(nb_atcks_success, f"{aicrowd_submission_id}_attck_succ")
# Compute the attack score as the mean of all attacks
if nb_atcks_success > 0:
attck_sc /= nb_atcks_success
else:
attck_sc = 0
# Return object
_result_object = {
"score": reidentification_score,
"score_secondary": attck_sc
}
# Remove submission_file extracted
os.remove(submission_file_path)
return _result_object
return None
def main():
"""Main loop
"""
#log file when Docker is launch
try:
logging.basicConfig(filename='/test/darc.log', level=logging.DEBUG)
except Exception:
pass
answer_file_path = config.GROUND_TRUTH
_client_payload = {}
# Setting name of submitting team
_client_payload["aicrowd_participant_id"] = "a"
logging.info("TESTING: Round 1")
# Ground truth path for round 1
_client_payload["submission_file_path"] = config.R1_SUBMISSION_FILE
# Setting the submission id
_client_payload["aicrowd_submission_id"] = 2
# Reading info for stockage server
RHOST = config.REDIS_HOST
RPORT = config.REDIS_PORT
RPASSWORD = config.REDIS_PASSWORD
if not RHOST:
raise Exception("Please provide the Redis Host and other credentials, by providing the following environment variables : REDIS_HOST, REDIS_PORT, REDIS_PASSWORD")
_context = {}
# Instantiate an evaluator
aicrowd_evaluator = DarcEvaluator(
answer_file_path, round=1,
redis_host=RHOST, redis_port=RPORT, redis_password=<PASSWORD>
)
# Evaluate
result = aicrowd_evaluator._evaluate(
_client_payload, _context
)
logging.info(f"Scores : {result}")
logging.info("TESTING : Round 2")
# Submission file for round 2
_client_payload["submission_file_path"] = config.R2_SUBMISSION_FILE
# Instantiate an evaluator
aicrowd_evaluator = DarcEvaluator(
answer_file_path, round=2,
redis_host=RHOST, redis_port=RPORT, redis_password=<PASSWORD>,
round2_storage = config.ROUND2_STORAGE
)
#Evaluate
result = aicrowd_evaluator._evaluate(
_client_payload, _context
)
logging.info(f"Scores : {result}")
if __name__ == "__main__":
main()
|
<reponame>caleb15/mocurly
"""Classes used to simulate recurly resources and endpoints
Each endpoint class will define the CRUD interface into the resource.
"""
from datetime import datetime
import recurly
import six
import random
import string
import dateutil.relativedelta
import dateutil.parser
from dateutil.tz import tzutc
from .utils import current_time
from .errors import TRANSACTION_ERRORS, ResponseError
from .utils import details_route, serialize, serialize_list
from .backend import accounts_backend, billing_info_backend, transactions_backend, invoices_backend, subscriptions_backend, plans_backend, plan_add_ons_backend, adjustments_backend, coupons_backend, coupon_redemptions_backend
class BaseRecurlyEndpoint(object):
"""Baseclass for simulating resource endpoints.
Provides basic CRUD functionality given a resource XML template, and object
store backend.
"""
pk_attr = 'uuid'
XML = 0
RAW = 1
def hydrate_foreign_keys(self, obj):
"""Hydrates all foreign key objects from Id strings into actual objects
"""
return obj
def get_object_uri(self, obj):
"""Returns the URI to access the given object resource
"""
cls = self.__class__
return recurly.base_uri() + cls.base_uri + '/' + obj[cls.pk_attr]
def uris(self, obj):
"""Returns a dictionary of all URIs related to the object, including foreign keys
"""
obj = self.hydrate_foreign_keys(obj)
uri_out = {}
uri_out['object_uri'] = self.get_object_uri(obj)
return uri_out
def serialize(self, obj, format=XML):
"""Serialize the object into the provided format, using the resource
template.
Currently only supports XML (for XML representation of the resource.
This is what recurly expects) and RAW (a dictionary representation of
the resource)
"""
if format == BaseRecurlyEndpoint.RAW:
return obj
cls = self.__class__
if type(obj) == list:
for o in obj:
o['uris'] = self.uris(o)
return serialize_list(cls.template, cls.object_type_plural, cls.object_type, obj)
else:
obj['uris'] = self.uris(obj)
return serialize(cls.template, cls.object_type, obj)
def list(self, format=XML):
"""Endpoint to list all resources stored in the backend
"""
cls = self.__class__
out = cls.backend.list_objects()
return self.serialize(out, format=format)
def create(self, create_info, format=XML):
"""Endpoint to create a new instance of the resource into the backend
"""
cls = self.__class__
if cls.pk_attr in create_info:
create_info['uuid'] = create_info[cls.pk_attr]
else:
create_info['uuid'] = self.generate_id()
new_obj = cls.backend.add_object(create_info['uuid'], create_info)
return self.serialize(new_obj, format=format)
def retrieve(self, pk, format=XML):
"""Endpoint to retrieve an existing resource from the backend
Raises a 404 if the requested object does not exist.
"""
cls = self.__class__
if not cls.backend.has_object(pk):
raise ResponseError(404, '')
out = cls.backend.get_object(pk)
return self.serialize(out, format=format)
def update(self, pk, update_info, format=XML):
"""Endpoint to update an existing resource from the backend
Raises a 404 if the requested object does not exist.
"""
cls = self.__class__
if not cls.backend.has_object(pk):
raise ResponseError(404, '')
out = cls.backend.update_object(pk, update_info)
return self.serialize(out, format=format)
def delete(self, pk):
"""Endpoint to delete an existing resource from the backend
Raises a 404 if the requested object does not exist.
"""
cls = self.__class__
if not cls.backend.has_object(pk):
raise ResponseError(404, '')
cls.backend.delete_object(pk)
return ''
def generate_id(self):
"""Generates a random ID that can be used as a UUID or recurly ID
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(32))
class AccountsEndpoint(BaseRecurlyEndpoint):
base_uri = 'accounts'
pk_attr = 'account_code'
backend = accounts_backend
object_type = 'account'
object_type_plural = 'accounts'
template = 'account.xml'
def uris(self, obj):
uri_out = super(AccountsEndpoint, self).uris(obj)
uri_out['adjustments_uri'] = uri_out['object_uri'] + '/adjustments'
if billing_info_backend.has_object(obj[AccountsEndpoint.pk_attr]):
uri_out['billing_info_uri'] = uri_out['object_uri'] + '/billing_info'
uri_out['invoices_uri'] = uri_out['object_uri'] + '/invoices'
uri_out['redemption_uri'] = uri_out['object_uri'] + '/redemptions'
uri_out['subscriptions_uri'] = uri_out['object_uri'] + '/subscriptions'
uri_out['transactions_uri'] = uri_out['object_uri'] + '/transactions'
return uri_out
def create(self, create_info, format=BaseRecurlyEndpoint.XML):
if 'billing_info' in create_info:
billing_info = create_info['billing_info']
billing_info['account'] = create_info['account_code']
billing_info_backend.add_object(create_info[AccountsEndpoint.pk_attr], billing_info)
del create_info['billing_info']
create_info['hosted_login_token'] = self.generate_id()
create_info['created_at'] = current_time().isoformat()
return super(AccountsEndpoint, self).create(create_info, format=format)
def update(self, pk, update_info, format=BaseRecurlyEndpoint.XML):
if 'billing_info' in update_info:
updated_billing_info = update_info['billing_info']
if billing_info_backend.has_object(pk):
billing_info_backend.update_object(pk, updated_billing_info)
else:
updated_billing_info['account'] = pk
billing_info_backend.add_object(pk, updated_billing_info)
del update_info['billing_info']
return super(AccountsEndpoint, self).update(pk, update_info, format=format)
def delete(self, pk):
AccountsEndpoint.backend.update_object(pk, {'state': 'closed'})
billing_info_backend.delete_object(pk)
return ''
# Support for nested resources
# BillingInfo and CouponRedemption are managed by this endpoint, as
# opposed to having their own since Recurly API only provides access to these
# resources through the Account endpoint.
def billing_info_uris(self, obj):
uri_out = {}
uri_out['account_uri'] = recurly.base_uri() + AccountsEndpoint.base_uri + '/' + obj['account']
uri_out['object_uri'] = uri_out['account_uri'] + '/billing_info'
return uri_out
def serialize_billing_info(self, obj, format=BaseRecurlyEndpoint.XML):
if format == BaseRecurlyEndpoint.RAW:
return obj
obj['uris'] = self.billing_info_uris(obj)
return serialize('billing_info.xml', 'billing_info', obj)
@details_route('GET', 'billing_info')
def get_billing_info(self, pk, format=BaseRecurlyEndpoint.XML):
out = billing_info_backend.get_object(pk)
return self.serialize_billing_info(out, format=format)
@details_route('PUT', 'billing_info')
def update_billing_info(self, pk, update_info, format=BaseRecurlyEndpoint.XML):
if billing_info_backend.has_object(pk):
out = billing_info_backend.update_object(pk, update_info)
else:
update_info['account'] = self.pk_attr
out = billing_info_backend.add_object(pk, update_info)
return self.serialize_billing_info(out, format=format)
@details_route('DELETE', 'billing_info')
def delete_billing_info(self, pk):
billing_info_backend.delete_object(pk)
return ''
@details_route('GET', 'transactions', is_list=True)
def get_transactions_list(self, pk, filters=None, format=BaseRecurlyEndpoint.XML):
out = TransactionsEndpoint.backend.list_objects(lambda transaction: transaction['account'] == pk)
return transactions_endpoint.serialize(out, format=format)
@details_route('GET', 'invoices', is_list=True)
def get_invoices_list(self, pk, filters=None, format=BaseRecurlyEndpoint.XML):
out = InvoicesEndpoint.backend.list_objects(lambda invoice: invoice['account'] == pk)
return invoices_endpoint.serialize(out, format=format)
@details_route('GET', 'subscriptions', is_list=True)
def get_subscriptions_list(self, pk, filters=None, format=BaseRecurlyEndpoint.XML):
def filter_subscriptions(subscription):
if filters:
if 'state' in filters and filters['state'][0] == 'live':
filters['state'] = ['active', 'canceled', 'future', 'in_trial']
cond = all(subscription[k] in v for k, v in filters.items())
else:
cond = True
return subscription['account'] == pk and cond
out = SubscriptionsEndpoint.backend.list_objects(filter_subscriptions)
return subscriptions_endpoint.serialize(out, format=format)
@details_route('GET', 'redemptions$', is_list=True)
def get_coupon_redemptions(self, account_code, filters=None, format=BaseRecurlyEndpoint.XML):
account_coupon_redemptions = coupon_redemptions_backend.list_objects(lambda redemption: redemption['account_code'] == account_code)
return coupons_endpoint.serialize_coupon_redemption(account_coupon_redemptions, format=format)
@details_route('DELETE', 'redemptions/([^/ ]+)')
def delete_coupon_redemption(self, account_code, redemption_uuid, format=BaseRecurlyEndpoint.XML):
account_coupon_redemptions = coupon_redemptions_backend.list_objects(
lambda redemption: \
(redemption['account_code'] == account_code and
coupons_endpoint.generate_coupon_redemption_uuid(redemption['coupon'], redemption['account_code']) == redemption_uuid))
if not account_coupon_redemptions:
raise ResponseError(404, '')
coupon_redemptions_backend.delete_object(redemption_uuid)
return ''
class TransactionsEndpoint(BaseRecurlyEndpoint):
base_uri = 'transactions'
backend = transactions_backend
object_type = 'transaction'
object_type_plural = 'transactions'
template = 'transaction.xml'
def __init__(self):
self.registered_errors = {}
return super(TransactionsEndpoint, self).__init__()
def clear_state(self):
"""Clears all registered errors
"""
self.registered_errors = {}
def register_transaction_failure(self, account_code, error_code):
"""Registers an error_code to associate with the given account for all
transactions made by the account
"""
self.registered_errors[account_code] = error_code
def hydrate_foreign_keys(self, obj):
if isinstance(obj['account'], six.string_types):
# hydrate account
obj['account'] = AccountsEndpoint.backend.get_object(obj['account'])
if 'invoice' in obj and isinstance(obj['invoice'], six.string_types):
# hydrate invoice
obj['invoice'] = InvoicesEndpoint.backend.get_object(obj['invoice'])
return obj
def uris(self, obj):
uri_out = super(TransactionsEndpoint, self).uris(obj)
obj['account']['uris'] = accounts_endpoint.uris(obj['account'])
uri_out['account_uri'] = obj['account']['uris']['object_uri']
if 'invoice' in obj:
# To avoid infinite recursion
uri_out['invoice_uri'] = invoices_endpoint.get_object_uri(obj['invoice'])
if 'subscription' in obj:
pseudo_subscription_object = {}
pseudo_subscription_object[SubscriptionsEndpoint.pk_attr] = obj['subscription']
uri_out['subscription_uri'] = subscriptions_endpoint.get_object_uri(pseudo_subscription_object)
if 'original_transaction' in obj:
uri_out['original_transaction_uri'] = transactions_endpoint.get_object_uri(obj['original_transaction'])
return uri_out
def create(self, create_info, format=BaseRecurlyEndpoint.XML):
# Like recurly, creates an invoice that is associated with the
# transaction
account_code = create_info['account'][AccountsEndpoint.pk_attr]
assert AccountsEndpoint.backend.has_object(account_code)
create_info['account'] = account_code
create_info['uuid'] = self.generate_id() # generate id now for invoice
create_info['tax_in_cents'] = 0 # unsupported
create_info['action'] = 'purchase'
create_info['status'] = 'success'
create_info['test'] = True
create_info['voidable'] = True
create_info['refundable'] = True
create_info['created_at'] = current_time().isoformat()
create_info['payment_method'] = 'credit_card'
if 'description' not in create_info:
create_info['description'] = ''
# Check to see if we need to throw an error for card failure
if create_info['account'] in self.registered_errors:
# update the new transaction with error info
create_info['voidable'] = False
create_info['refundable'] = False
create_info['status'] = 'declined'
error_code = self.registered_errors[create_info['account']]
transaction_error = TRANSACTION_ERRORS[error_code]
create_info['transaction_error'] = transaction_error
transaction_xml = super(TransactionsEndpoint, self).create(create_info, format)
error_xml = serialize('transaction_error.xml', 'transaction_error', transaction_error)
if create_info.get('subscription', False):
subscriptions_backend.delete_object(create_info['subscription'])
raise ResponseError(422, '<errors>{0}{1}</errors>'.format(error_xml, transaction_xml))
# Every new transaction creates a new invoice
new_invoice = {'account': account_code,
'uuid': self.generate_id(),
'state': 'collected',
'invoice_number': InvoicesEndpoint.generate_invoice_number(),
'subtotal_in_cents': int(create_info['amount_in_cents']),
'currency': create_info['currency'],
'created_at': create_info['created_at'],
'net_terms': 0,
'collection_method': 'automatic',
# unsupported
'tax_type': 'usst',
'tax_rate': 0}
new_invoice['tax_in_cents'] = new_invoice['subtotal_in_cents'] * new_invoice['tax_rate']
new_invoice['total_in_cents'] = new_invoice['subtotal_in_cents'] + new_invoice['tax_in_cents']
new_invoice['transactions'] = [create_info['uuid']]
InvoicesEndpoint.backend.add_object(new_invoice['invoice_number'], new_invoice)
new_invoice_id = new_invoice[InvoicesEndpoint.pk_attr]
# Every transaction should have a line item as well
transaction_charge_line_item = {'account_code': new_invoice['account'],
'currency': new_invoice['currency'],
'unit_amount_in_cents': int(new_invoice['total_in_cents']),
'description': create_info['description'],
'quantity': 1,
'invoice': new_invoice_id}
if 'subscription' in create_info:
subscription = subscriptions_backend.get_object(create_info['subscription'])
transaction_charge_line_item['start_date'] = dateutil.parser.parse(subscription['current_period_started_at'])
transaction_charge_line_item['end_date'] = dateutil.parser.parse(subscription['current_period_ends_at'])
transaction_charge_line_item = adjustments_endpoint.create(transaction_charge_line_item, format=BaseRecurlyEndpoint.RAW)
InvoicesEndpoint.backend.update_object(new_invoice_id, {'line_items': [transaction_charge_line_item]})
create_info['invoice'] = new_invoice_id
return super(TransactionsEndpoint, self).create(create_info, format)
def delete(self, pk, amount_in_cents=None):
"""As of Nov. 2014, DELETE on transactions is no longer implemented
"""
raise ResponseError(404, '')
class AdjustmentsEndpoint(BaseRecurlyEndpoint):
base_uri = 'adjustments'
backend = adjustments_backend
object_type = 'adjustment'
object_type_plural = 'adjustments'
template = 'adjustment.xml'
defaults = {'state': 'active',
'quantity': 1,
'origin': 'credit',
'product_code': 'basic',
'discount_in_cents': 0,
# unsupported
'tax_exempt': False}
def uris(self, obj):
uri_out = super(AdjustmentsEndpoint, self).uris(obj)
pseudo_account_object = {}
pseudo_account_object[AccountsEndpoint.pk_attr] = obj['account_code']
uri_out['account_uri'] = accounts_endpoint.get_object_uri(pseudo_account_object)
pseudo_invoice_object = {}
pseudo_invoice_object[InvoicesEndpoint.pk_attr] = obj['invoice']
uri_out['invoice_uri'] = invoices_endpoint.get_object_uri(pseudo_invoice_object)
return uri_out
def create(self, create_info, format=BaseRecurlyEndpoint.XML):
create_info['created_at'] = current_time().isoformat()
if int(create_info['unit_amount_in_cents']) >= 0:
create_info['type'] = 'charge'
else:
create_info['type'] = 'credit'
# UNSUPPORTED
create_info['tax_in_cents'] = 0
create_info['total_in_cents'] = int(create_info['unit_amount_in_cents']) + int(create_info['tax_in_cents'])
defaults = AdjustmentsEndpoint.defaults.copy()
defaults.update(create_info)
defaults['total_in_cents'] -= defaults['discount_in_cents']
return super(AdjustmentsEndpoint, self).create(defaults, format)
class InvoicesEndpoint(BaseRecurlyEndpoint):
base_uri = 'invoices'
backend = invoices_backend
object_type = 'invoice'
object_type_plural = 'invoices'
pk_attr = 'invoice_number'
template = 'invoice.xml'
def hydrate_foreign_keys(self, obj):
if isinstance(obj['account'], six.string_types):
# hydrate account
obj['account'] = AccountsEndpoint.backend.get_object(obj['account'])
if 'transactions' in obj:
obj['transactions'] = [TransactionsEndpoint.backend.get_object(transaction_id) if isinstance(transaction_id, six.string_types) else transaction_id for transaction_id in obj['transactions']]
for transaction in obj['transactions']:
transaction['invoice'] = obj
transaction['uris'] = transactions_endpoint.uris(transaction)
if 'line_items' in obj:
obj['line_items'] = [AdjustmentsEndpoint.backend.get_object(adjustment_id) if isinstance(adjustment_id, six.string_types) else adjustment_id for adjustment_id in obj['line_items']]
for adjustment in obj['line_items']:
adjustment['uris'] = adjustments_endpoint.uris(adjustment)
return obj
def uris(self, obj):
uri_out = super(InvoicesEndpoint, self).uris(obj)
uri_out['account_uri'] = accounts_endpoint.get_object_uri(obj['account'])
if 'subscription' in obj:
uri_out['subscription_uri'] = subscriptions_endpoint.get_object_uri({'uuid': obj['subscription']})
if 'original_invoice' in obj:
faux_inv = {}
faux_inv[InvoicesEndpoint.pk_attr] = obj['original_invoice']
uri_out['original_invoice_uri'] = self.get_object_uri(faux_inv)
return uri_out
@details_route('POST', 'refund')
def refund_invoice(self, pk, refund_info, format=BaseRecurlyEndpoint.XML):
"""Refunds the invoice.
There are two ways this can happen:
- Refund individual line items on the invoice
- Refund a specific amount
The outcome will:
- Create a new invoice with adjustments that cancel out the original
invoice (invoice_number = pk)
- Updates the state of associated objects
"""
invoice = InvoicesEndpoint.backend.get_object(pk)
if 'amount_in_cents' in refund_info:
return self._refund_amount(invoice, int(refund_info['amount_in_cents']))
else:
# Hack to get around the singleton hydration of XML
if isinstance(refund_info['line_items'], dict):
refund_info['line_items'] = [refund_info['line_items']]
return self._refund_line_items(invoice, refund_info)
def _refund_amount(self, invoice, amount_in_cents):
"""Refunds a specific amount for the invoice."""
# Create a new transaction that tracks the refund
refund_transaction_info = {
'account': accounts_endpoint.backend.get_object(invoice['account']),
'amount_in_cents': -amount_in_cents,
'currency': 'USD',
'description': 'Refund for Invoice #{}'.format(invoice['invoice_number'])
}
new_transaction = transactions_endpoint.create(refund_transaction_info, format=BaseRecurlyEndpoint.RAW)
# Update transaction to mimic refund transaction
opts = {
'action': 'refund',
'refundable': False,
'original_transaction': TransactionsEndpoint.backend.get_object(invoice['transactions'][0])
}
if 'subscription' in invoice:
opts['subscription'] = invoice['subscription']
TransactionsEndpoint.backend.update_object(new_transaction['uuid'], opts)
# Update adjustments to mimic refund invoice
new_transaction = TransactionsEndpoint.backend.get_object(new_transaction['uuid'])
new_invoice = InvoicesEndpoint.backend.get_object(new_transaction['invoice'])
adjustments = new_invoice['line_items']
new_adjustments = []
for adjustment in adjustments:
new_adjustments.append(AdjustmentsEndpoint.backend.update_object(adjustment['uuid'], {'quantity': -adjustment['quantity']}))
new_invoice = InvoicesEndpoint.backend.update_object(new_invoice['invoice_number'], {'line_items': new_adjustments, 'original_invoice': invoice[InvoicesEndpoint.pk_attr]})
return self.serialize(new_invoice)
def _refund_line_items(self, invoice, refund_info):
"""Refund individual line items on the invoice."""
# Create the refund line items
refund_line_items = self._create_refund_line_items_for(refund_info['line_items'])
# Calculate amount to refund
amount_to_refund = -sum(map(lambda line_item: line_item['unit_amount_in_cents'], refund_line_items))
# New invoice tracking refund
new_invoice = self._create_refund_invoice_for(invoice, amount_to_refund)
new_invoice_id = new_invoice[InvoicesEndpoint.pk_attr]
# Relate the objects
refund_line_items = map(lambda line_item: AdjustmentsEndpoint.backend.update_object(line_item[AdjustmentsEndpoint.pk_attr], {'invoice': new_invoice[InvoicesEndpoint.pk_attr]}), refund_line_items)
new_invoice = InvoicesEndpoint.backend.update_object(new_invoice_id, {'line_items': refund_line_items})
# Update transactions
transactions = map(lambda t_pk: TransactionsEndpoint.backend.get_object(t_pk), invoice['transactions'])
transactions_to_add = self._update_or_create_refund_transactions_for(transactions, new_invoice)
new_invoice = InvoicesEndpoint.backend.update_object(new_invoice_id, {'transactions': transactions_to_add})
return self.serialize(new_invoice)
def _create_refund_line_items_for(self, line_items):
"""Creates refund line items for the given line items.
Returns any new line items that were created
"""
refund_line_items = []
for line_item in line_items:
quantity_to_refund = line_item['adjustment']['quantity']
# TODO: add logic for prorate
line_item = AdjustmentsEndpoint.backend.get_object(line_item['adjustment']['uuid'])
assert int(quantity_to_refund) <= int(line_item['quantity'])
charge_refund_line_item = {'account_code': line_item['account_code'],
'currency': line_item['currency'],
'unit_amount_in_cents': -int(line_item['unit_amount_in_cents']),
'description': 'Refund for {}'.format(line_item['description']),
'quantity': -int(quantity_to_refund)}
charge_refund_line_item = adjustments_endpoint.create(charge_refund_line_item, format=BaseRecurlyEndpoint.RAW)
refund_line_items.append(charge_refund_line_item)
return refund_line_items
def _create_refund_invoice_for(self, invoice, amount_to_refund):
"""Creates the refund invoice for the given invoice."""
# New invoice tracking refund
new_invoice = {'account': invoice['account'],
'uuid': self.generate_id(),
'state': 'collected',
'invoice_number': InvoicesEndpoint.generate_invoice_number(),
'subtotal_in_cents': -int(amount_to_refund),
'currency': invoice['currency'],
'created_at': current_time().isoformat(),
'net_terms': 0,
'collection_method': 'automatic',
'original_invoice': invoice[InvoicesEndpoint.pk_attr],
# unsupported
'tax_type': 'usst',
'tax_rate': 0}
new_invoice['tax_in_cents'] = new_invoice['subtotal_in_cents'] * new_invoice['tax_rate']
new_invoice['total_in_cents'] = new_invoice['subtotal_in_cents'] + new_invoice['tax_in_cents']
new_invoice = InvoicesEndpoint.backend.add_object(new_invoice[InvoicesEndpoint.pk_attr], new_invoice)
return new_invoice
def _update_or_create_refund_transactions_for(self, transactions, new_invoice):
"""
Updates existing transactions to be void (if voidable), or creates a
new transaction to track refund.
"""
# Update state of any associated objects
# If invoice is with transaction, then void/refund the transaction
transactions_to_add = []
for transaction in transactions:
if transaction['voidable']:
TransactionsEndpoint.backend.update_object(transaction['uuid'], {
'status': 'void',
'voidable': False,
'refundable': False # TODO: only for full refunds
})
transactions_to_add.append(transaction['uuid'])
else:
new_transaction = {
'uuid': transactions_endpoint.generate_id(),
'action': 'refund',
'status': 'success',
'test': True,
'voidable': True,
'refundable': False,
'created_at': current_time().isoformat(),
'type': 'credit_card',
'account': new_invoice['account'],
'currency': new_invoice['currency'],
'amount_in_cents': int(new_invoice['total_in_cents']),
'invoice': new_invoice[InvoicesEndpoint.pk_attr],
# unsupported
'tax_in_cents': 0
}
TransactionsEndpoint.backend.add_object(new_transaction['uuid'], new_transaction)
transactions_to_add.append(new_transaction['uuid'])
TransactionsEndpoint.backend.update_object(transaction['uuid'], {'refundable': False})
return transactions_to_add
@staticmethod
def generate_invoice_number():
if InvoicesEndpoint.backend.empty():
return '1000'
return str(max(int(invoice['invoice_number']) for invoice in InvoicesEndpoint.backend.list_objects()) + 1)
class CouponsEndpoint(BaseRecurlyEndpoint):
base_uri = 'coupons'
backend = coupons_backend
object_type = 'coupon'
object_type_plural = 'coupons'
pk_attr = 'coupon_code'
template = 'coupon.xml'
defaults = {'state': 'redeemable',
'applies_to_all_plans': True,
'single_use': False}
def uris(self, obj):
uri_out = super(CouponsEndpoint, self).uris(obj)
uri_out['redemptions_uri'] = uri_out['object_uri'] + '/redemptions'
uri_out['redeem_uri'] = uri_out['object_uri'] + '/redeem'
return uri_out
def create(self, create_info, format=BaseRecurlyEndpoint.XML):
defaults = CouponsEndpoint.defaults.copy()
defaults.update(create_info)
return super(CouponsEndpoint, self).create(defaults, format)
def generate_coupon_redemption_uuid(self, coupon_code, account_code):
return '__'.join([coupon_code, account_code])
def hydrate_coupon_redemption_foreign_keys(self, obj):
if isinstance(obj['coupon'], six.string_types):
obj['coupon'] = CouponsEndpoint.backend.get_object(obj['coupon'])
return obj
def coupon_redemption_uris(self, obj):
uuid = self.generate_coupon_redemption_uuid(obj['coupon']['coupon_code'], obj['account_code'])
uri_out = {}
uri_out['coupon_uri'] = coupons_endpoint.get_object_uri(obj['coupon'])
pseudo_account_object = {}
pseudo_account_object[AccountsEndpoint.pk_attr] = obj['account_code']
uri_out['account_uri'] = accounts_endpoint.get_object_uri(pseudo_account_object)
uri_out['object_uri'] = uri_out['account_uri'] + '/redemptions/' + uuid
return uri_out
def serialize_coupon_redemption(self, obj, format=BaseRecurlyEndpoint.XML):
if isinstance(obj, list):
obj = [self.hydrate_coupon_redemption_foreign_keys(o) for o in obj]
for o in obj:
o['uris'] = self.coupon_redemption_uris(o)
else:
obj = self.hydrate_coupon_redemption_foreign_keys(obj)
obj['uris'] = self.coupon_redemption_uris(obj)
if format == BaseRecurlyEndpoint.RAW:
return obj
elif isinstance(obj, list):
return serialize_list('redemption.xml', 'redemptions', 'redemption', obj)
else:
return serialize('redemption.xml', 'redemption', obj)
@details_route('GET', 'redemptions', is_list=True)
def get_coupon_redemptions(self, pk, filters=None, format=BaseRecurlyEndpoint.XML):
obj_list = coupon_redemptions_backend.list_objects(lambda redemption: redemption['coupon'] == pk)
return self.serialize_coupon_redemption(obj_list, format=format)
@details_route('POST', 'redeem')
def redeem_coupon(self, pk, redeem_info, format=BaseRecurlyEndpoint.XML):
assert CouponsEndpoint.backend.has_object(pk), pk
redeem_info['coupon'] = pk
redeem_info['created_at'] = current_time().isoformat()
redemption_uuid = self.generate_coupon_redemption_uuid(pk, redeem_info['account_code'])
new_redemption = coupon_redemptions_backend.add_object(redemption_uuid, redeem_info)
return self.serialize_coupon_redemption(new_redemption, format=format)
def determine_coupon_discount(self, coupon, charge):
type = coupon['discount_type']
if type == 'percent':
return int(charge * float(coupon['discount_percent']) / 100)
else:
return int(coupon['discount_in_cents'])
class PlansEndpoint(BaseRecurlyEndpoint):
base_uri = 'plans'
backend = plans_backend
pk_attr = 'plan_code'
object_type = 'plan'
object_type_plural = 'plans'
template = 'plan.xml'
defaults = {'plan_interval_unit': 'months',
'plan_interval_length': 1,
'trial_interval_unit': 'months',
'trial_interval_length': 0,
'display_quantity': False,
# unsupported
'tax_exempt': False}
add_on_defaults = {'default_quantity': 1,
'display_quantity_on_hosted_page': False}
def uris(self, obj):
uri_out = super(PlansEndpoint, self).uris(obj)
uri_out['add_ons_uri'] = uri_out['object_uri'] + '/add_ons'
return uri_out
def create(self, create_info, format=BaseRecurlyEndpoint.XML):
create_info['created_at'] = current_time().isoformat()
defaults = PlansEndpoint.defaults.copy()
defaults.update(create_info)
return super(PlansEndpoint, self).create(defaults, format)
def generate_plan_add_on_uuid(self, plan_code, add_on_code):
return '__'.join([plan_code, add_on_code])
def plan_add_on_uris(self, obj):
uri_out = {}
pseudo_plan_object = {}
pseudo_plan_object[PlansEndpoint.pk_attr] = obj['plan']
uri_out['plan_uri'] = plans_endpoint.get_object_uri(pseudo_plan_object)
uri_out['object_uri'] = uri_out['plan_uri'] + '/add_ons/' + obj['add_on_code']
return uri_out
def serialize_plan_add_on(self, obj, format=BaseRecurlyEndpoint.XML):
if format == BaseRecurlyEndpoint.RAW:
return obj
if type(obj) == list:
for o in obj:
o['uris'] = self.plan_add_on_uris(o)
return serialize_list('add_on.xml', 'add_ons', 'add_on', obj)
else:
obj['uris'] = self.plan_add_on_uris(obj)
return serialize('add_on.xml', 'add_on', obj)
@details_route('GET', 'add_ons', is_list=True)
def get_add_on_list(self, pk, filters=None, format=BaseRecurlyEndpoint.XML):
out = plan_add_ons_backend.list_objects(lambda add_on: add_on['plan'] == pk)
return self.serialize_plan_add_on(out, format=format)
@details_route('POST', 'add_ons')
def create_add_on(self, pk, create_info, format=BaseRecurlyEndpoint.XML):
assert PlansEndpoint.backend.has_object(pk)
create_info['plan'] = pk
create_info['created_at'] = current_time().isoformat()
if 'accounting_code' not in create_info:
create_info['accounting_code'] = create_info['add_on_code']
return self.serialize_plan_add_on(plan_add_ons_backend.add_object(self.generate_plan_add_on_uuid(pk, create_info['add_on_code']), create_info), format=format)
class SubscriptionsEndpoint(BaseRecurlyEndpoint):
base_uri = 'subscriptions'
backend = subscriptions_backend
object_type = 'subscription'
object_type_plural = 'subscriptions'
template = 'subscription.xml'
defaults = {'quantity': 1, 'collection_method': 'automatic'}
def _calculate_timedelta(self, units, length):
timedelta_info = {}
timedelta_info[units] = int(length)
return dateutil.relativedelta.relativedelta(**timedelta_info)
def _parse_isoformat(self, isoformat):
return dateutil.parser.parse(isoformat)
def hydrate_foreign_keys(self, obj):
if 'plan' not in obj:
obj['plan'] = PlansEndpoint.backend.get_object(obj['plan_code'])
if 'subscription_add_ons' in obj:
def hydrate_add_ons(add_on):
if isinstance(add_on, six.string_types):
add_on = plan_add_ons_backend.get_object(plans_endpoint.generate_plan_add_on_uuid(obj['plan_code'], add_on))
add_on['unit_amount_in_cents'] = add_on['unit_amount_in_cents'][obj['currency']]
return add_on
obj['subscription_add_ons'] = map(hydrate_add_ons, obj['subscription_add_ons'])
return obj
def uris(self, obj):
uri_out = super(SubscriptionsEndpoint, self).uris(obj)
pseudo_account_object = {}
pseudo_account_object[AccountsEndpoint.pk_attr] = obj['account']
uri_out['account_uri'] = accounts_endpoint.get_object_uri(pseudo_account_object)
if 'invoice' in obj:
pseudo_invoice_object = {}
pseudo_invoice_object[InvoicesEndpoint.pk_attr] = obj['invoice']
uri_out['invoice_uri'] = invoices_endpoint.get_object_uri(pseudo_invoice_object)
uri_out['plan_uri'] = plans_endpoint.get_object_uri(obj['plan'])
uri_out['cancel_uri'] = uri_out['object_uri'] + '/cancel'
uri_out['reactivate_uri'] = uri_out['object_uri'] + '/reactivate'
uri_out['terminate_uri'] = uri_out['object_uri'] + '/terminate'
return uri_out
def create(self, create_info, format=BaseRecurlyEndpoint.XML):
# Like recurly, this will create a new invoice and transaction that
# goes with the new subscription enrollment
account_code = create_info['account'][AccountsEndpoint.pk_attr]
if not AccountsEndpoint.backend.has_object(account_code):
accounts_endpoint.create(create_info['account'])
else:
accounts_endpoint.update(account_code, create_info['account'])
create_info['account'] = account_code
assert plans_backend.has_object(create_info['plan_code'])
plan = plans_backend.get_object(create_info['plan_code'])
now = current_time()
# Trial dates need to be calculated
if 'trial_ends_at' in create_info:
create_info['trial_started_at'] = now.isoformat()
elif plan['trial_interval_length'] > 0:
create_info['trial_started_at'] = now.isoformat()
create_info['trial_ends_at'] = (now + self._calculate_timedelta(plan['trial_interval_unit'], plan['trial_interval_length'])).isoformat()
# Plan start and end date needs to be calculated
if 'starts_at' in create_info:
# A custom start date is specified
create_info['activated_at'] = create_info['starts_at']
# TODO: confirm recurly sets current_period_started_at for future subs
create_info['current_period_started_at'] = create_info['starts_at']
elif 'trial_started_at' in create_info:
create_info['activated_at'] = self._parse_isoformat(create_info['trial_ends_at'])
create_info['current_period_started_at'] = create_info['trial_started_at']
create_info['current_period_ends_at'] = create_info['trial_ends_at']
else:
create_info['activated_at'] = now.isoformat()
create_info['current_period_started_at'] = now.isoformat()
started_at = self._parse_isoformat(create_info['current_period_started_at'])
if now >= started_at:
# Plan already started
if 'first_renewal_date' in create_info:
create_info['current_period_ends_at'] = self._parse_isoformat(create_info['first_renewal_date'])
else:
create_info['current_period_ends_at'] = (started_at + self._calculate_timedelta(plan['plan_interval_unit'], plan['plan_interval_length'])).isoformat()
# Tax calculated based on plan info
# UNSUPPORTED
create_info['tax_in_cents'] = 0
create_info['tax_type'] = 'usst'
create_info['tax_rate'] = 0
# Subscription states
if 'current_period_ends_at' not in create_info:
create_info['state'] = 'future'
else:
create_info['state'] = 'active'
# If there are addons, make sure they exist in the system
if 'subscription_add_ons' in create_info:
add_ons = create_info['subscription_add_ons']
if isinstance(add_ons, dict):
add_ons = add_ons.values()
for add_on in add_ons:
add_on_uuid = plans_endpoint.generate_plan_add_on_uuid(create_info['plan_code'], add_on['add_on_code'])
assert plan_add_ons_backend.has_object(add_on_uuid)
create_info['subscription_add_ons'] = [add_on['add_on_code'] for add_on in add_ons]
defaults = SubscriptionsEndpoint.defaults.copy()
defaults['unit_amount_in_cents'] = plan['unit_amount_in_cents'][create_info['currency']]
defaults.update(create_info)
# TODO: support bulk
new_sub = super(SubscriptionsEndpoint, self).create(defaults, format=BaseRecurlyEndpoint.RAW)
self.hydrate_foreign_keys(new_sub)
if defaults['state'] == 'active':
# if trial_ends_at is set but is not in the future, the trial has ended
if 'trial_started_at' in defaults and \
('trial_ends_at' not in defaults or self._parse_isoformat(defaults['trial_ends_at']) >= datetime.now(tzutc())):
# create a transaction and invoice for the trial
new_transaction = {}
new_transaction['account'] = {}
new_transaction['account'][AccountsEndpoint.pk_attr] = new_sub['account']
new_transaction['amount_in_cents'] = 0
new_transaction['currency'] = new_sub['currency']
new_transaction['subscription'] = new_sub[SubscriptionsEndpoint.pk_attr]
new_transaction = transactions_endpoint.create(new_transaction, format=BaseRecurlyEndpoint.RAW)
new_invoice_id = new_transaction['invoice']
InvoicesEndpoint.backend.update_object(new_invoice_id, {'subscription': new_sub[SubscriptionsEndpoint.pk_attr]})
new_sub = SubscriptionsEndpoint.backend.update_object(defaults['uuid'], {'invoice': new_invoice_id})
else:
# Setup charges first, to calculate total charge to put on the
# invoice and transaction
total = 0
adjustment_infos = []
plan_charge_line_item = {
'account_code': new_sub['account'],
'currency': new_sub['currency'],
'unit_amount_in_cents': int(new_sub['unit_amount_in_cents']),
'description': new_sub['plan']['name'],
'quantity': new_sub['quantity'],
'start_date': self._parse_isoformat(new_sub['current_period_started_at']),
'end_date': self._parse_isoformat(new_sub['current_period_ends_at'])
}
total += plan_charge_line_item['unit_amount_in_cents']
adjustment_infos.append(plan_charge_line_item)
if 'subscription_add_ons' in new_sub:
for add_on in new_sub['subscription_add_ons']:
plan_charge_line_item = {
'account_code': new_sub['account'],
'currency': new_sub['currency'],
'unit_amount_in_cents': int(add_on['unit_amount_in_cents']),
'description': add_on['name'],
'quantity': new_sub['quantity'],
}
total += plan_charge_line_item['unit_amount_in_cents']
adjustment_infos.append(plan_charge_line_item)
# now calculate discounts
coupon_redemptions = accounts_endpoint.get_coupon_redemptions(
new_sub['account'], format=BaseRecurlyEndpoint.RAW)
if coupon_redemptions:
total -= self._apply_coupons(coupon_redemptions, adjustment_infos)
# create a transaction if the subscription is started
new_transaction = {}
new_transaction['account'] = {}
new_transaction['account'][AccountsEndpoint.pk_attr] = new_sub['account']
new_transaction['amount_in_cents'] = total
new_transaction['currency'] = new_sub['currency']
new_transaction['subscription'] = new_sub[SubscriptionsEndpoint.pk_attr]
new_transaction = transactions_endpoint.create(new_transaction, format=BaseRecurlyEndpoint.RAW)
new_invoice_id = new_transaction['invoice']
# Now create accumulated new adjustments for the sub to track line items
adjustments = []
for plan_charge_line_item in adjustment_infos:
plan_charge_line_item['invoice'] = new_invoice_id
plan_charge_line_item = adjustments_endpoint.create(plan_charge_line_item, format=BaseRecurlyEndpoint.RAW)
adjustments.append(plan_charge_line_item[AdjustmentsEndpoint.pk_attr])
InvoicesEndpoint.backend.update_object(new_invoice_id, {'subscription': new_sub[SubscriptionsEndpoint.pk_attr], 'line_items': adjustments})
new_sub = SubscriptionsEndpoint.backend.update_object(defaults['uuid'], {'invoice': new_invoice_id})
return self.serialize(new_sub, format=format)
@details_route('PUT', 'terminate')
def terminate_subscription(self, pk, terminate_info, format=format):
subscription = SubscriptionsEndpoint.backend.get_object(pk)
# assume base transaction exists
transaction = TransactionsEndpoint.backend.list_objects(lambda trans: trans.get('subscription', None) == subscription[SubscriptionsEndpoint.pk_attr])[0]
invoice_number = transaction['invoice']
invoice = InvoicesEndpoint.backend.get_object(invoice_number)
start = self._parse_isoformat(subscription['current_period_started_at'])
end = self._parse_isoformat(subscription['current_period_ends_at'])
now = current_time()
refund_type = terminate_info['refund'][0]
if refund_type == 'partial':
if now > end:
now = end
days_left = (end - now).days
total_days = (end - start).days
refund_amount = int((float(days_left) / total_days) * transaction['amount_in_cents'])
invoice_number = transaction['invoice']
invoices_endpoint.refund_invoice(invoice_number, {'amount_in_cents': refund_amount})
elif refund_type == 'full':
adjustments_to_refund = []
for line_item in invoice['line_items']:
adjustments_to_refund.append({
'adjustment': AdjustmentsEndpoint.backend.get_object(line_item)
})
invoices_endpoint.refund_invoice(invoice_number, {'line_items': adjustments_to_refund})
return self.serialize(SubscriptionsEndpoint.backend.update_object(pk, {
'state': 'expired',
'expires_at': now.isoformat(),
'current_period_ends_at': now.isoformat()
}), format=format)
@details_route('PUT', 'cancel')
def cancel_subscription(self, pk, cancel_info, format=format):
subscription = SubscriptionsEndpoint.backend.get_object(pk)
return self.serialize(SubscriptionsEndpoint.backend.update_object(pk, {
'state': 'canceled',
'expires_at': subscription['current_period_ends_at'],
'canceled_at': current_time().isoformat()
}), format=format)
@details_route('PUT', 'reactivate')
def reactivate_subscription(self, pk, reactivate_info, format=format):
subscription = SubscriptionsEndpoint.backend.get_object(pk)
if not subscription['state'] == 'canceled':
raise ResponseError(400, '')
return self.serialize(SubscriptionsEndpoint.backend.update_object(pk, {
'state': 'active',
'expires_at': None,
'canceled_at': None
}), format=format)
def _apply_coupons(self, coupon_redemptions, adjustment_infos):
total_discounts = 0
for redemption in coupon_redemptions:
for plan_charge_line_item in adjustment_infos:
discount = coupons_endpoint.determine_coupon_discount(
redemption['coupon'],
plan_charge_line_item['unit_amount_in_cents'])
if 'discount_in_cents' in plan_charge_line_item:
plan_charge_line_item['discount_in_cents'] += discount
else:
plan_charge_line_item['discount_in_cents'] = discount
total_discounts += discount
return total_discounts
accounts_endpoint = AccountsEndpoint()
adjustments_endpoint = AdjustmentsEndpoint()
transactions_endpoint = TransactionsEndpoint()
coupons_endpoint = CouponsEndpoint()
invoices_endpoint = InvoicesEndpoint()
plans_endpoint = PlansEndpoint()
subscriptions_endpoint = SubscriptionsEndpoint()
endpoints = [accounts_endpoint,
adjustments_endpoint,
transactions_endpoint,
coupons_endpoint,
invoices_endpoint,
plans_endpoint,
subscriptions_endpoint]
def clear_endpoints():
"""Clear state off of all endpoints. This ensures that no residual state
carries over between mocurly contexts.
"""
transactions_endpoint.clear_state()
|
<filename>ShowVerticalMetrics.glyphsReporter/Contents/Resources/plugin.py
# encoding: utf-8
from __future__ import division, print_function, unicode_literals
###########################################################################################################
#
#
# Reporter Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Reporter
#
#
###########################################################################################################
import objc
from GlyphsApp import *
from GlyphsApp.plugins import *
class ShowVerticalMetrics(ReporterPlugin):
lowestGlyphName = None
tallestGlyphName = None
@objc.python_method
def settings(self):
self.menuName = Glyphs.localize({
'en': u'Vertical Metrics',
'de': u'Vertikalmaße',
'es': u'métricas verticales',
'fr': u'mesures verticales',
})
self.verticalMetrics = (
"hheaAscender",
"hheaDescender",
"typoAscender",
"typoDescender",
"winAscent",
"winDescent",
# "hheaLineGap",
# "typoLineGap",
)
@objc.python_method
def background(self, layer):
# define color:
defaultColor = NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.8, 0.4, 1 )
if Glyphs.defaults["com.mekkablue.ShowVerticalMetrics.color"]:
rgba = [
defaultColor.redComponent(),
defaultColor.greenComponent(),
defaultColor.blueComponent(),
defaultColor.alphaComponent(),
]
colorpref = Glyphs.defaults["com.mekkablue.ShowVerticalMetrics.color"].split(",")
for i in range( min( 4, len(colorpref) ) ):
try:
colorvalue = float( colorpref[i].strip() )
if colorvalue > 1.0:
colorvalue /= 100.0
rgba[i] = colorvalue % 1.0
except:
print("\nWarning: could not convert '%s' into %s value." % (colorpref[i], ("red","green","blue","alpha")[i]))
print("com.mekkablue.ShowVerticalMetrics.color takes comma-separated numbers between 0.0 and 1.0 (or 0 and 100).")
defaultColor = NSColor.colorWithRed_green_blue_alpha_( rgba[0], rgba[1], rgba[2], rgba[3] )
defaultColor.set()
# draw vertical metrics:
thisMaster = layer.associatedFontMaster()
heightsAlreadyUsed = []
# query current view settings:
zoomFactor = self.getScale()
xPosition = self.controller.viewPort.origin.x - self.controller.selectedLayerOrigin.x
shiftToWindowBorder = xPosition / zoomFactor
if thisMaster:
for thisMetric in self.verticalMetrics:
height = thisMaster.customParameters[thisMetric]
if height:
if thisMetric == "winDescent":
height *= -1
alignment = "bottomright"
if height in heightsAlreadyUsed:
alignment = "topright"
if "win" in thisMetric:
alignment = "bottomleft"
else:
heightsAlreadyUsed.append(height)
line = NSBezierPath.bezierPath()
line.moveToPoint_( NSPoint(-50000, height) )
line.lineToPoint_( NSPoint(+50000, height) )
line.setLineWidth_( 1.0/zoomFactor )
line.setLineDash_count_phase_( [1.0/zoomFactor, 3.0/zoomFactor], 2, 3.5/zoomFactor )
line.stroke()
# draw metric names:
if zoomFactor >= 0.07: # only display names when zoomed in enough
self.drawTextAtPoint(
" "+thisMetric+" ",
NSPoint(
(xPosition+80)/zoomFactor,
height+2/zoomFactor if "bottom" in alignment else height,
),
fontColor=defaultColor,
align=alignment
)
# draw tallest and lowest glyphs:
if False: #Glyphs.defaults["com.mekkablue.ShowVerticalMetrics.displayExtremeGlyphs"]:
extremeBezierPaths = self.extremeLayerBezierPathsForFont( thisMaster.font() )
if extremeBezierPaths:
# shift to the left side
try:
lsbShift = extremeBezierPaths.bounds().origin.x/zoomFactor
except:
lsbShift = 0
shift = NSAffineTransform.transform()
shift.translateXBy_yBy_(shiftToWindowBorder-lsbShift,0)
extremeBezierPaths.transformUsingAffineTransform_(shift)
# draw outline:
NSColor.colorWithRed_green_blue_alpha_(1.0, 0.1, 0.3, 0.2).set()
if zoomFactor >= 0.07:
extremeBezierPaths.setLineWidth_( 1.0/zoomFactor )
extremeBezierPaths.stroke()
else:
extremeBezierPaths.fill()
else:
pass
# print("No extreme paths drawn.") # DEBUG
@objc.python_method
def extremeLayerBezierPathsForFont(self, thisFont):
if not self.tallestGlyphName or not self.lowestGlyphName:
self.updateExtremeLayersForFont(thisFont)
tallestGlyph = thisFont.glyphs[self.tallestGlyphName]
lowestGlyph = thisFont.glyphs[self.lowestGlyphName]
tallestLayer = None
lowestLayer = None
if not tallestGlyph or not lowestGlyph:
self.updateExtremeLayersForFont(thisFont)
else:
for tallLayer in tallestGlyph.layers:
if tallestLayer is None:
tallestLayer = tallLayer
elif tallLayer.bounds.origin.y+tallLayer.bounds.size.height > tallestLayer.bounds.origin.y+tallestLayer.bounds.size.height:
tallestLayer = tallLayer
for lowLayer in lowestGlyph.layers:
if lowestLayer is None:
lowestLayer = lowLayer
elif lowLayer.bounds.origin.y < lowestLayer.bounds.origin.y:
lowestLayer = lowLayer
extremeBeziers = NSBezierPath.bezierPath()
for extremeLayer in (lowestLayer, tallestLayer):
if extremeLayer:
extremeBezier = extremeLayer.completeBezierPath
if extremeBezier:
extremeBeziers.appendBezierPath_(extremeBezier)
else:
pass
# print("Cannot get bezierPath for %s." % repr(extremeLayer)) # DEBUG
else:
pass
# print("Extreme Layer empty.") # DEBUG
return extremeBeziers
@objc.python_method
def updateExtremeLayersForFont(self, thisFont):
for thisMaster in thisFont.masters:
self.updateExtremeLayersForMaster(thisMaster)
@objc.python_method
def updateExtremeLayersForMaster(self, thisMaster):
thisFont = thisMaster.font()
mID = thisMaster.id
lowest, highest = 0, 0
for thisGlyph in thisFont.glyphs:
if thisGlyph.export:
thisLayer = thisGlyph.layers[mID]
theseBounds = thisLayer.bounds
if (not self.lowestGlyphName) or theseBounds.origin.y < lowest:
self.lowestGlyphName = thisGlyph.name
lowest = theseBounds.origin.y
if (not self.tallestGlyphName) or (theseBounds.origin.y+theseBounds.size.height) > highest:
self.tallestGlyphName = thisGlyph.name
highest = (theseBounds.origin.y+theseBounds.size.height)
@objc.python_method
def __file__(self):
"""Please leave this method unchanged"""
return __file__
|
import random
import shutil
import cv2
import numpy as np
import os
from subprocess import call, STDOUT
global count
def frame_extraction(video):
if not os.path.exists("./tmp"):
os.makedirs("tmp")
temp_folder = "./tmp"
vidcap = cv2.VideoCapture(video)
count = 0
while True:
success, image = vidcap.read()
if not success:
break
cv2.imwrite(os.path.join(temp_folder, "{:d}.png".format(count)), image)
count += 1
def encode_image(root="./tmp/"):
img2 = cv2.imread("theme.png")
# img 1 how img 2 umbrela
for k in range(0, len(img2)):
f_name = "{}{}.png".format(root, k)
img1 = cv2.imread(f_name)
for i in range(img2.shape[0]):
for j in range(img2.shape[1]):
for l in range(3):
v1 = format(img1[i][j][l], '08b')
v2 = format(img2[i][j][l], '08b')
v3 = v1[:4] + v2[:4]
img1[i][j][l] = int(v3, 2)
cv2.imwrite(f_name, img1)
print("frame img1 {} img2{}".format(img1, img2[i], k))
def decode_image(video):
frame_extraction(video)
root = "./tmp/"
sample = cv2.imread("./tmp/0.png")
width = sample.shape[0]
height = sample.shape[1]
logowidth = sample.shape[0]
logoheight = sample.shape[1]
image_decode = np.zeros((width, height, 3), np.uint8)
logo_decode = np.zeros((logowidth, logoheight, 3), np.uint8)
for k in range(len(logo_decode)):
f_name = "{}{}.png".format(root, k)
img = cv2.imread(f_name)
for i in range(width):
for j in range(height):
for l in range(3):
v1 = format(img[i][j][l], '08b')
if v1 is None:
break
v2 = v1[:4] + chr(random.randint(0, 1) + 48) * 4
v3 = v1[4:] + chr(random.randint(0, 1) + 48) * 4
image_decode[i][j][l] = int(v2, 2)
logo_decode[i][j][l] = int(v3, 2)
cv2.imwrite('last_image_decode.png', image_decode)
cv2.imwrite('theme_image_decode.png', logo_decode)
print("frame img {} decrypt {}, k {}".format(img, image_decode[i], k))
clean_tmp()
def clean_tmp(path="./tmp"):
if os.path.exists(path):
shutil.rmtree(path)
print("tmp files are deleted")
def main():
f_name = input("Enter the name of video with its format")
frame_extraction(f_name)
call(["ffmpeg", "-i", f_name, "-q:a", "0", "-map", "a", "tmp/audio.mp3", "-y"], stdout=open(os.devnull, "w"),
stderr=STDOUT)
encode_image()
call(["ffmpeg", "-i", "tmp/%d.png", "-vcodec", "png", "tmp/video.mp4", "-y"], stdout=open(os.devnull, "w"),
stderr=STDOUT)
call(["ffmpeg", "-i", "tmp/video.mp4", "-i", "tmp/audio.mp3", "-codec", "copy", "video.mov", "-y"],
stdout=open(os.devnull, "w"), stderr=STDOUT)
clean_tmp()
if __name__ == "__main__":
while True:
print("1.Hide a message in video 2.Reveal the secret from video")
print("press any key to exit")
choice = input()
if choice == '1':
main()
elif choice == '2':
decode_image(input("Enter name of video with its format"))
else:
break
|
"""Zhang Gradient Projection Debiasing Baseline Model."""
from __future__ import annotations
from typing import NamedTuple, cast
import ethicml as em
from kit import implements
from kit.torch import CrossEntropyLoss, TrainingMode
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import EPOCH_OUTPUT
import torch
from torch import Tensor, nn
from torch.optim.optimizer import Optimizer
from conduit.data.structures import TernarySample
from conduit.models.base import CdtModel
from conduit.models.utils import aggregate_over_epoch, prediction, prefix_keys
from conduit.types import LRScheduler, Stage
__all__ = ["GPD"]
def compute_proj_grads(*, model: nn.Module, loss_p: Tensor, loss_a: Tensor, alpha: float) -> None:
"""Computes the adversarial-gradient projection term.
:param model: Model whose parameters the gradients are to be computed w.r.t.
:param loss_p: Prediction loss.
:param loss_a: Adversarial loss.
:param alpha: Pre-factor for adversarial loss.
"""
grad_p = torch.autograd.grad(loss_p, tuple(model.parameters()), retain_graph=True)
grad_a = torch.autograd.grad(loss_a, tuple(model.parameters()), retain_graph=True)
def _proj(a: Tensor, b: Tensor) -> Tensor:
return b * torch.sum(a * b) / torch.sum(b * b).clamp(min=torch.finfo(b.dtype).eps)
grad_p = [p - _proj(p, a) - alpha * a for p, a in zip(grad_p, grad_a)]
for param, grad in zip(model.parameters(), grad_p):
param.grad = grad
def compute_grad(*, model: nn.Module, loss: Tensor) -> None:
"""Computes the adversarial gradient projection term.
:param model: Model whose parameters the gradients are to be computed w.r.t.
:param loss: Adversarial loss.
"""
grad_list = torch.autograd.grad(loss, tuple(model.parameters()), retain_graph=True)
for param, grad in zip(model.parameters(), grad_list):
param.grad = grad
class ModelOut(NamedTuple):
s: Tensor
y: Tensor
class GPD(CdtModel):
"""Zhang Mitigating Unwanted Biases."""
def __init__(
self,
*,
adv: nn.Module,
enc: nn.Module,
clf: nn.Module,
lr: float = 3.0e-4,
weight_decay: float = 0.0,
lr_initial_restart: int = 10,
lr_restart_mult: int = 2,
lr_sched_interval: TrainingMode = TrainingMode.epoch,
lr_sched_freq: int = 1,
) -> None:
super().__init__(
lr=lr,
weight_decay=weight_decay,
lr_initial_restart=lr_initial_restart,
lr_restart_mult=lr_restart_mult,
lr_sched_interval=lr_sched_interval,
lr_sched_freq=lr_sched_freq,
)
self.adv = adv
self.enc = enc
self.clf = clf
self._loss_adv_fn = CrossEntropyLoss()
self._loss_clf_fn = CrossEntropyLoss()
self.automatic_optimization = False # Mark for manual optimization
@implements(CdtModel)
@torch.no_grad()
def inference_step(self, batch: TernarySample, *, stage: Stage) -> dict[str, Tensor]:
assert isinstance(batch.x, Tensor)
model_out = self.forward(batch.x)
loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)
logging_dict = {
"loss": loss.item(),
"loss_adv": loss_adv.item(),
"loss_clf": loss_clf.item(),
}
logging_dict = prefix_keys(dict_=logging_dict, prefix=str(stage), sep="/")
self.log_dict(logging_dict)
return {
"targets": batch.y.view(-1),
"subgroup_inf": batch.s.view(-1),
"logits_y": model_out.y,
}
@implements(CdtModel)
def inference_epoch_end(self, outputs: EPOCH_OUTPUT, stage: Stage) -> dict[str, float]:
targets_all = aggregate_over_epoch(outputs=outputs, metric="targets")
subgroup_inf_all = aggregate_over_epoch(outputs=outputs, metric="subgroup_inf")
logits_y_all = aggregate_over_epoch(outputs=outputs, metric="logits_y")
preds_y_all = prediction(logits_y_all)
dt = em.DataTuple(
x=pd.DataFrame(
torch.rand_like(subgroup_inf_all).detach().cpu().numpy(),
columns=["x0"],
),
s=pd.DataFrame(subgroup_inf_all.detach().cpu().numpy(), columns=["s"]),
y=pd.DataFrame(targets_all.detach().cpu().numpy(), columns=["y"]),
)
return em.run_metrics(
predictions=em.Prediction(hard=pd.Series(preds_y_all.detach().cpu().numpy())),
actual=dt,
metrics=[em.Accuracy(), em.RenyiCorrelation(), em.Yanovich()],
per_sens_metrics=[em.Accuracy(), em.ProbPos(), em.TPR()],
)
def _get_losses(
self, model_out: ModelOut, *, batch: TernarySample
) -> tuple[Tensor, Tensor, Tensor]:
loss_adv = self._loss_adv_fn(model_out.s, target=batch.s)
loss_clf = self._loss_clf_fn(model_out.y, target=batch.y)
return loss_adv, loss_clf, loss_adv + loss_clf
@implements(pl.LightningModule)
def training_step(self, batch: TernarySample, batch_idx: int) -> None:
assert isinstance(batch.x, Tensor)
opt = cast(Optimizer, self.optimizers())
opt.zero_grad()
model_out: ModelOut = self.forward(batch.x)
loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)
logging_dict = {
"adv_loss": loss_adv.item(),
"clf_loss": loss_clf.item(),
"loss": loss.item(),
}
logging_dict = prefix_keys(dict_=logging_dict, prefix="train", sep="/")
self.log_dict(logging_dict)
compute_proj_grads(model=self.enc, loss_p=loss_clf, loss_a=loss_adv, alpha=1.0)
compute_grad(model=self.adv, loss=loss_adv)
compute_grad(model=self.clf, loss=loss_clf)
opt.step()
if (self.lr_sched_interval is TrainingMode.step) and (
self.global_step % self.lr_sched_freq == 0
):
sch = cast(LRScheduler, self.lr_schedulers())
sch.step()
if (self.lr_sched_interval is TrainingMode.epoch) and self.trainer.is_last_batch:
sch = cast(LRScheduler, self.lr_schedulers())
sch.step()
@implements(nn.Module)
def forward(self, x: Tensor) -> ModelOut:
embedding = self.enc(x)
y_pred = self.clf(embedding)
s_pred = self.adv(embedding)
return ModelOut(y=y_pred, s=s_pred)
|
import radical.utils
import saga
import os
import random
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore.work_spec import WorkSpec as ws
# setup base logger
baseLogger = core_utils.setup_logger('saga_submitter')
# SAGA submitter
class SAGASubmitter (PluginBase):
# constructor
# constructor define job service with particular adaptor (can be extended to support remote execution)
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
tmpLog = self.make_logger(baseLogger, method_name='__init__')
tmpLog.info("[{0}] SAGA adaptor will be used".format(self.adaptor))
def workers_list(self):
job_service = saga.job.Service(self.adaptor)
workers = []
for j in job_service.jobs():
worker = self.job_service.get_job(j)
workers.append((worker, worker.state))
job_service.close()
return workers
def _get_executable(self, list_of_pandajobs):
'''
Prepare command line to launch payload.
TODO: In general will migrate to specific worker maker
:param list_of_pandajobs - list of job objects, which should be used:
:return: string to execution which will be launched
'''
executable_arr = ['module load python']
for pj in list_of_pandajobs:
executable_arr.append('aprun -d 16 -n 1 ' + pj.jobParams['transformation']
+ ' ' + pj.jobParams['jobPars'])
return executable_arr
def _state_change_cb(self, src_obj, fire_on, value):
tmpLog = self.make_logger(baseLogger, method_name='_state_change_cb')
#self._workSpec.status = self.status_translator(value)
self._workSpec.set_status(self.status_translator(value))
self._workSpec.force_update('status')
try:
tmpLog.debug("Created time: {}".format(src_obj.created))
tmpLog.debug('src obj: {}'.format(src_obj))
except:
tmpLog.debug('FAILED')
tmpLog.info('Worker with BatchID={0} workerID={2} change state to: {1}'.format(self._workSpec.batchID,
self._workSpec.status,
self._workSpec.workerID))
# for compatibility with dummy monitor
f = open(os.path.join(self._workSpec.accessPoint, 'status.txt'), 'w')
f.write(self._workSpec.status)
f.close()
return True
def _execute(self, work_spec):
tmpLog = self.make_logger(baseLogger, method_name='_execute')
job_service = saga.job.Service(self.adaptor)
#sagadateformat_str = 'Tue Nov 7 11:31:10 2017'
#sagadateformat_str = '%a %b %d %H:%M:%S %Y'
try:
os.chdir(work_spec.accessPoint)
tmpLog.info("Walltime: {0} sec. {1} min.".format(work_spec.maxWalltime, work_spec.maxWalltime / 60))
tmpLog.info("Cores: {0}".format(work_spec.nCore))
tmpLog.debug("Worker directory: {0}".format(work_spec.accessPoint))
jd = saga.job.Description()
if self.projectname:
jd.project = self.projectname
# launching job at HPC
jd.wall_time_limit = work_spec.maxWalltime / 60 # minutes
if work_spec.workParams in (None, "NULL"):
jd.executable = "\n".join(self._get_executable(work_spec.jobspec_list))
else:
tmpLog.debug("Work params (executable templatae): \n{0}".format(work_spec.workParams))
exe_str = work_spec.workParams
exe_str = exe_str.format(work_dir=work_spec.accessPoint)
jd.executable = exe_str
# jd.executable = work_spec.workParams.format(work_dir=work_spec.accessPoint)
tmpLog.debug("Command to be launched: \n{0}".format(jd.executable))
jd.total_cpu_count = work_spec.nCore # one node with 16 cores for one job
jd.queue = self.localqueue
jd.working_directory = work_spec.accessPoint # working directory of task
uq_prefix = '{0:07}'.format(random.randint(0, 10000000))
jd.output = os.path.join(work_spec.accessPoint, 'MPI_pilot_stdout_{0}'.format(uq_prefix))
jd.error = os.path.join(work_spec.accessPoint, 'MPI_pilot_stderr_{0}'.format(uq_prefix))
work_spec.set_log_file('stdout', jd.output)
work_spec.set_log_file('stderr', jd.error)
# Create a new job from the job description. The initial state of
# the job is 'New'.
task = job_service.create_job(jd)
self._workSpec = work_spec
task.run()
work_spec.batchID = task.id.split('-')[1][1:-1] #SAGA have own representation, but real batch id easy to extract
tmpLog.info("Worker ID={0} with BatchID={1} submitted".format(work_spec.workerID, work_spec.batchID))
tmpLog.debug("SAGA status: {0}".format(task.state))
# for compatibility with dummy monitor
f = open(os.path.join(work_spec.accessPoint, 'status.txt'), 'w')
f.write(self.status_translator(task.state))
f.close()
job_service.close()
return 0
except saga.SagaException as ex:
# Catch all saga exceptions
tmpLog.error("An exception occurred: (%s) %s " % (ex.type, (str(ex))))
# Trace back the exception. That can be helpful for debugging.
tmpLog.error("\n*** Backtrace:\n %s" % ex.traceback)
work_spec.status = work_spec.ST_failed
return -1
@staticmethod
def status_translator(saga_status):
if saga_status == saga.job.PENDING:
return ws.ST_submitted
if saga_status == saga.job.RUNNING:
return ws.ST_running
if saga_status == saga.job.DONE:
return ws.ST_finished
if saga_status == saga.job.FAILED:
return ws.ST_failed
if saga_status == saga.job.CANCELED:
return ws.ST_cancelled
# submit workers
def submit_workers(self, work_specs):
tmpLog = self.make_logger(baseLogger, method_name='submit_workers')
tmpLog.debug('start nWorkers={0}'.format(len(work_specs)))
retList = []
for workSpec in work_specs:
res = self._execute(workSpec)
if res == 0:
retList.append((True, ''))
else:
retList.append((False, 'Failed to submit worker. Check logs'))
tmpLog.debug('done')
return retList
|
# -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
import logging
from scipy.sparse import lil_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
from microgridRLsimulator.agent.agent import Agent
import pickle
import numpy as np
from copy import deepcopy
from microgridRLsimulator.simulate.forecaster import Forecaster
from microgridRLsimulator.simulate.gridaction import GridAction
from microgridRLsimulator.utils import time_string_for_storing_results, decode_GridState
from microgridRLsimulator.agent.OptimizationAgent import OptimizationAgent
logger = logging.getLogger(__name__)
def plot_training_progress(y_train, y_pred_train, y_test, y_pred_test):
fig, axes = plt.subplots(len(y_train[0]), 1, sharex=True)
fig.suptitle('Train')
for i in range(len(y_train[0])):
axes[i].plot(y_train[:, i], label="Original")
axes[i].plot(y_pred_train[:, i], label="Prediction")
axes[i].legend()
fig, axes = plt.subplots(len(y_test[0]), 1, sharex=True)
fig.suptitle('Test')
for i in range(len(y_test[0])):
axes[i].plot(y_test[:, i], label="Original")
axes[i].plot(y_pred_test[:, i], label="Prediction")
axes[i].legend()
plt.show()
class SLAgent(Agent):
def __init__(self, env, control_horizon_data, simulation_horizon, path_to_stored_experience, path_to_store_models,
features, test_size, shuffle, use_forecasts, models_dict, expert_iterations, scale_outputs=False, n_test_episodes=1):
super().__init__(env)
self.control_horizon = control_horizon_data
self.simulation_horizon = simulation_horizon
self.path_to_stored_experience = path_to_stored_experience
self.path_to_store_models = path_to_store_models + time_string_for_storing_results("experiment",
env.simulator.case)
if not os.path.isdir(self.path_to_store_models):
os.makedirs(self.path_to_store_models)
self.features = features
self.use_forecasts = use_forecasts
self.test_size = test_size
self.shuffle = shuffle
self.grid = self.env.simulator.grid
self.forecaster = None
self.sl_models = None
self.create_models_from_dict(models_dict)
self.inputs = None
self.outputs = None
self.states = None
self.forecasts = None
self.actions = None
self.expert = OptimizationAgent(self.env, self.control_horizon, self.simulation_horizon)
self.scale_outputs = scale_outputs
self.expert_iterations = expert_iterations
self.n_test_episodes = n_test_episodes
@staticmethod
def name():
return "SL"
def reset_agent(self):
self.forecaster = Forecaster(simulator=self.env.simulator, control_horizon=self.control_horizon)
self.grid = self.env.simulator.grid
def train_agent(self):
# Load the datasets
self.load_data()
# Prepare the data
self.process_data()
## Supervised learning model to tune.
for _ in range(self.expert_iterations):
new_states = []
expert_actions = []
for name, model in self.sl_models.items():
x_train, x_test, y_train, y_test = train_test_split(self.inputs, self.outputs,
test_size=self.test_size,
shuffle=self.shuffle)
model.fit(x_train, y_train)
y_pred_train = model.predict(x_train)
y_pred_test = model.predict(x_test)
logger.info("Model: %s Train set error: %d, Test set error: %d" % (
name, mean_squared_error(y_train, y_pred_train),
mean_squared_error(y_test, y_pred_test)))
plot_training_progress(y_train, y_pred_train, y_test, y_pred_test)
model.fit(self.inputs, self.outputs)
new_states_model, expert_actions_model = self.augment_training_agent(model)
new_states += new_states_model
expert_actions += expert_actions_model
self.add_new_data_in_experience(new_states, expert_actions)
for name, model in self.sl_models.items():
model.fit(self.inputs, self.outputs)
self.store_model(name, model)
def simulate_agent(self, agent_options=None):
for name, model in self.sl_models.items():
actions = []
for i in range(1, self.n_test_episodes + 1):
state = self.env.reset()
self.reset_agent()
cumulative_reward = 0.0
done = False
while not done:
state_decoded = decode_GridState(self.env.simulator.grid_states[-1], self.features)
if self.forecasts:
self.forecaster.exact_forecast(self.env.simulator.env_step)
consumption_forecast, pv_forecast = self.forecaster.get_forecast()
final_state = np.concatenate((np.array(state_decoded),
np.array(consumption_forecast), np.array(pv_forecast)))
else:
final_state = np.array(state_decoded)
state_shape = np.shape(final_state)[0]
model_output = model.predict(final_state.reshape(-1, state_shape))[0]
action = self.list_to_GridAction(list(model_output))
actions.append(model_output)
next_state, reward, done, info = self.env.step(state=state, action=action)
cumulative_reward += reward
state = deepcopy(next_state)
print('Finished %s simulation for model %s and the reward is: %d.' % (self.env.purpose, name,
cumulative_reward))
self.env.simulator.store_and_plot(
folder="results/" + time_string_for_storing_results(self.name() + "_" + self.env.purpose + "_from_" + self.env.simulator.start_date.strftime("%m-%d-%Y") + "_to_" + self.env.simulator.end_date.strftime("%m-%d-%Y"),
self.env.simulator.case) + "_" + str(i), agent_options=agent_options)
# plt.figure()
# actions_array = np.array(actions).T
# for a in actions_array:
# plt.plot(a)
# plt.show()
def augment_training_agent(self, model):
state = self.env.reset()
self.reset_agent()
self.expert.reset_agent()
cumulative_reward = 0.0
done = False
new_states = []
expert_actions = []
while not done:
self.expert._create_model(self.env.simulator.env_step)
state_decoded = decode_GridState(self.env.simulator.grid_states[-1], self.features)
expert_action = self.expert.get_optimal_action()[0].to_list()
if self.forecasts:
self.forecaster.exact_forecast(self.env.simulator.env_step)
consumption_forecast, pv_forecast = self.forecaster.get_forecast()
final_state = np.concatenate((np.array(state_decoded),
np.array(consumption_forecast), np.array(pv_forecast)))
else:
final_state = np.array(state_decoded)
new_states.append(final_state)
expert_actions.append(expert_action)
state_shape = np.shape(final_state)[0]
model_output = model.predict(final_state.reshape(-1, state_shape))[0]
action = self.list_to_GridAction(list(model_output))
next_state, reward, done, info = self.env.step(state=state, action=action)
cumulative_reward += reward
state = deepcopy(next_state)
logger.info(' Collected reward is: %d.' % (cumulative_reward))
return new_states, expert_actions
def add_new_data_in_experience(self, new_states, expert_actions):
self.inputs = np.concatenate((self.inputs, np.array(new_states)), axis=0)
if self.scale_outputs:
self.outputs = np.concatenate((self.outputs, self.scaler.transform(np.array(expert_actions))), axis=0)
else:
self.outputs = np.concatenate((self.outputs, np.array(expert_actions)), axis=0)
def list_to_GridAction(self, l):
charge = []
discharge = []
generation = {g.name: 0. for g in self.grid.generators if g.steerable}
for b in self.grid.storages:
charge.append(l[0])
l.pop(0)
for b in self.grid.storages:
discharge.append(l[0])
l.pop(0)
for g in self.grid.generators:
if g.steerable:
generation[g.name] = l[0] if l[0] >= 0.5 * g.min_stable_generation * g.capacity else 0.
l.pop(0)
assert (not l)
return GridAction(generation, charge, discharge)
def load_data(self):
with open(self.path_to_stored_experience + "/" + self.env.simulator.case + "_optimization_experience_" + str(
self.control_horizon) + ".p", "rb") as fp:
self.states, self.forecasts, self.actions = pickle.load(fp)
def process_data(self):
list_X = []
for state in self.states:
values = decode_GridState(state, self.features)
list_X.append(values)
if self.use_forecasts:
forecasts_list = []
for forecast in self.forecasts:
forecasts_list.append(np.concatenate(forecast))
self.inputs = np.concatenate((np.array(list_X), np.array(forecasts_list)),
axis=1) # [consumption, soc1, soc2, ..., PV production, date]
else:
self.inputs = np.array(list_X)
if self.scale_outputs:
self.scaler = MinMaxScaler()
max_generators = [g.capacity for g in self.grid.generators if g.steerable]
max_storages_charge = [b.capacity for b in self.grid.storages]
max_storages_discharge = [b.capacity for b in self.grid.storages]
self.scaler.fit([np.zeros(len(self.actions[0])),
np.array(max_storages_charge + max_storages_discharge + max_generators)])
self.outputs = self.scaler.transform(np.array(self.actions))
# [charge1, charge2 ,..., discharge1, discharge2, ..., genset1, genset2, ...]
else:
self.outputs = np.array(self.actions)
def create_models_from_dict(self, model_dict):
self.sl_models = dict()
for name, model in model_dict.items():
self.sl_models[name] = eval(model)
def store_model(self, model_name, model):
with open(self.path_to_store_models + "/" + model_name + "_" + str(self.control_horizon) + ".p", "wb") as f:
pickle.dump(model, f)
agent_type = SLAgent
|
################################################################################
# populate_obs_instrument_GB_occ.py
#
# Routines to populate fields specific to ground-based instruments.
################################################################################
import numpy as np
import julian
import pdsfile
from config_data import *
import import_util
from populate_obs_mission_groundbased_occ import *
from populate_util import *
################################################################################
# THESE NEED TO BE IMPLEMENTED FOR EVERY INSTRUMENT
################################################################################
### OBS_GENERAL TABLE ###
def _GB_file_spec_helper(**kwargs):
metadata = kwargs['metadata']
index_row = metadata.get('index_row', None)
if index_row is None:
return None
# Format: "/DATA/ESO1M/ES1_EPD.LBL"
file_spec = index_row['FILE_SPECIFICATION_NAME']
volume_id = kwargs['volume_id']
return volume_id + file_spec
def populate_obs_general_GB_opus_id_OCC(**kwargs):
file_spec = _GB_file_spec_helper(**kwargs)
pds_file = pdsfile.PdsFile.from_filespec(file_spec)
try:
opus_id = pds_file.opus_id
except:
opus_id = None
if not opus_id:
import_util.log_nonrepeating_error(
f'Unable to create OPUS_ID for FILE_SPEC "{file_spec}"')
return file_spec.split('/')[-1]
return opus_id
def populate_obs_general_GB_ring_obs_id_OCC(**kwargs):
return None
def populate_obs_general_GB_inst_host_id_OCC(**kwargs):
return 'GB'
def populate_obs_general_GB_data_type_OCC(**kwargs):
return 'OCC'
def populate_obs_general_GB_time1_OCC(**kwargs):
return populate_time1_from_index(**kwargs)
def populate_obs_general_GB_time2_OCC(**kwargs):
return populate_time2_from_index(**kwargs)
def populate_obs_general_GB_target_name_OCC(**kwargs):
return helper_groundbased_target_name(**kwargs)
def populate_obs_general_GB_observation_duration_OCC(**kwargs):
return populate_observation_duration_from_time(**kwargs)
def populate_obs_general_GB_quantity_OCC(**kwargs):
return 'OPDEPTH'
def populate_obs_general_GB_observation_type_OCC(**kwargs):
return 'OCC'
def populate_obs_pds_GB_note_OCC(**kwargs):
return None
def populate_obs_general_GB_primary_file_spec_OCC(**kwargs):
return _GB_file_spec_helper(**kwargs)
def populate_obs_pds_GB_primary_file_spec_OCC(**kwargs):
return _GB_file_spec_helper(**kwargs)
def populate_obs_pds_GB_product_creation_time_OCC(**kwargs):
return populate_product_creation_time_from_index(**kwargs)
# Format: "ESO1M-SR-APPH-4-OCC-V1.0"
def populate_obs_pds_GB_data_set_id_OCC(**kwargs):
return populate_data_set_id_from_index_label(**kwargs)
# Format: "ES1_EGRESS"
def populate_obs_pds_GB_product_id_OCC(**kwargs):
return populate_product_id_from_index(**kwargs)
def populate_obs_general_GB_right_asc1_OCC(**kwargs):
return populate_occ_ra_dec_helper_index_label(**kwargs)[0]
def populate_obs_general_GB_right_asc2_OCC(**kwargs):
return populate_occ_ra_dec_helper_index_label(**kwargs)[1]
def populate_obs_general_GB_declination1_OCC(**kwargs):
return populate_occ_ra_dec_helper_index_label(**kwargs)[2]
def populate_obs_general_GB_declination2_OCC(**kwargs):
return populate_occ_ra_dec_helper_index_label(**kwargs)[3]
### OBS_TYPE_IMAGE TABLE ###
def populate_obs_type_image_GB_image_type_id_OCC(**kwargs):
return None
def populate_obs_type_image_GB_duration_OCC(**kwargs):
return None
def populate_obs_type_image_GB_levels_OCC(**kwargs):
return None
def populate_obs_type_image_GB_lesser_pixel_size_OCC(**kwargs):
return None
def populate_obs_type_image_GB_greater_pixel_size_OCC(**kwargs):
return None
### OBS_WAVELENGTH TABLE ###
def populate_obs_wavelength_GB_wavelength1_OCC(**kwargs):
metadata = kwargs['metadata']
index_label = metadata['index_label']
wl = index_label['WAVELENGTH'] # microns
return wl
def populate_obs_wavelength_GB_wavelength2_OCC(**kwargs):
metadata = kwargs['metadata']
index_label = metadata['index_label']
wl = index_label['WAVELENGTH'] # microns
return wl
def populate_obs_wavelength_GB_wave_res1_OCC(**kwargs):
return None # Not available
def populate_obs_wavelength_GB_wave_res2_OCC(**kwargs):
return None # Not available
def populate_obs_wavelength_GB_wave_no1_OCC(**kwargs):
metadata = kwargs['metadata']
index_label = metadata['index_label']
wl = index_label['WAVELENGTH'] # microns
return 10000 / wl # cm^-1
def populate_obs_wavelength_GB_wave_no2_OCC(**kwargs):
metadata = kwargs['metadata']
index_label = metadata['index_label']
wl = index_label['WAVELENGTH'] # microns
return 10000 / wl # cm^-1
def populate_obs_wavelength_GB_wave_no_res1_OCC(**kwargs):
return None # Not available
def populate_obs_wavelength_GB_wave_no_res2_OCC(**kwargs):
return None # Not available
def populate_obs_wavelength_GB_spec_flag_OCC(**kwargs):
return 'N'
def populate_obs_wavelength_GB_spec_size_OCC(**kwargs):
return None
def populate_obs_wavelength_GB_polarization_type_OCC(**kwargs):
return 'NONE'
### OBS_OCCULTATION TABLE ###
def populate_obs_occultation_GB_occ_type_OCC(**kwargs):
return 'STE'
def populate_obs_occultation_GB_occ_dir_OCC(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
occ_dir = index_row['OCCULTATION_DIRECTION']
if occ_dir == 'INGRESS':
return 'I'
if occ_dir == 'EGRESS':
return 'E'
if occ_dir == 'BOTH':
return 'B'
import_util.log_nonrepeating_error(
f'Unknown OCCULTATION_DIRECTION "{occ_dir}"')
return None
def populate_obs_occultation_GB_body_occ_flag_OCC(**kwargs):
metadata = kwargs['metadata']
supp_index_row = metadata['supp_index_row']
body_occ_flag = supp_index_row['PLANETARY_OCCULTATION_FLAG']
return body_occ_flag
def populate_obs_occultation_GB_optical_depth_min_OCC(**kwargs):
return None # Not available
def populate_obs_occultation_GB_optical_depth_max_OCC(**kwargs):
return None # Not available
def populate_obs_occultation_GB_temporal_sampling_OCC(**kwargs):
return None # Not available
def populate_obs_occultation_GB_quality_score_OCC(**kwargs):
return ("UNASSIGNED", "Unassigned")
def populate_obs_occultation_GB_wl_band_OCC(**kwargs):
metadata = kwargs['metadata']
index_label = metadata['index_label']
wl = index_label['WAVELENGTH'] # microns
if wl > 0.7:
return 'IR'
if wl > 0.4:
return 'VIS'
return 'UV'
def populate_obs_occultation_GB_source_OCC(**kwargs):
target_name, target_name_info = populate_star_name_helper_index_label(
**kwargs)
if target_name_info is None:
return None
return target_name, target_name_info[2]
def populate_obs_occultation_GB_host_OCC(**kwargs):
metadata = kwargs['metadata']
supp_index_row = metadata['supp_index_label']
insthost = supp_index_row['INSTRUMENT_HOST_NAME']
return (insthost, insthost)
### OBS_RING_GEOMETRY TABLE ###
def populate_obs_ring_geometry_GB_ring_radius1_OCC(**kwargs):
metadata = kwargs['metadata']
supp_index_row = metadata['supp_index_row']
radius1 = import_util.safe_column(supp_index_row, 'MINIMUM_RING_RADIUS')
return radius1
def populate_obs_ring_geometry_GB_ring_radius2_OCC(**kwargs):
metadata = kwargs['metadata']
supp_index_row = metadata['supp_index_row']
radius2 = import_util.safe_column(supp_index_row, 'MAXIMUM_RING_RADIUS')
return radius2
def _radial_resolution_helper(**kwargs):
metadata = kwargs['metadata']
supp_index_row = metadata['supp_index_row']
res = import_util.safe_column(supp_index_row, 'RADIAL_RESOLUTION')
return res
def populate_obs_ring_geometry_GB_resolution1_OCC(**kwargs):
return _radial_resolution_helper(**kwargs)
def populate_obs_ring_geometry_GB_resolution2_OCC(**kwargs):
return _radial_resolution_helper(**kwargs)
def populate_obs_ring_geometry_GB_proj_resolution1_OCC(**kwargs):
return _radial_resolution_helper(**kwargs)
def populate_obs_ring_geometry_GB_proj_resolution2_OCC(**kwargs):
return _radial_resolution_helper(**kwargs)
def populate_obs_ring_geometry_GB_phase1_OCC(**kwargs):
return 180.
def populate_obs_ring_geometry_GB_phase2_OCC(**kwargs):
return 180.
def _incidence_helper(**kwargs):
metadata = kwargs['metadata']
supp_index_row = metadata['supp_index_row']
inc = import_util.safe_column(supp_index_row, 'INCIDENCE_ANGLE')
return inc
def populate_obs_ring_geometry_GB_incidence1_OCC(**kwargs):
return _incidence_helper(**kwargs)
def populate_obs_ring_geometry_GB_incidence2_OCC(**kwargs):
return _incidence_helper(**kwargs)
def populate_obs_ring_geometry_GB_center_phase1_OCC(**kwargs):
return 180.
def populate_obs_ring_geometry_GB_center_phase2_OCC(**kwargs):
return 180.
def populate_obs_ring_geometry_GB_center_incidence1_OCC(**kwargs):
return _incidence_helper(**kwargs)
def populate_obs_ring_geometry_GB_center_incidence2_OCC(**kwargs):
return _incidence_helper(**kwargs)
def populate_obs_ring_geometry_GB_ring_intercept_time1_OCC(**kwargs):
return populate_time1_from_index(column='RING_EVENT_START', **kwargs)
def populate_obs_ring_geometry_GB_ring_intercept_time2_OCC(**kwargs):
return populate_time1_from_index(column='RING_EVENT_STOP', **kwargs)
################################################################################
# THESE NEED TO BE IMPLEMENTED FOR EVERY GROUND-BASED INSTRUMENT
################################################################################
################################################################################
# THESE ARE SPECIFIC TO OBS_INSTRUMENT_GB
################################################################################
|
"""
Backend for django cache
"""
import socket
from functools import wraps
from django.core.cache import InvalidCacheBackendError
from django.core.cache.backends.memcached import PyLibMCCache
from .cluster_utils import get_cluster_info
def invalidate_cache_after_error(f):
"""
catch any exception and invalidate internal cache with list of nodes
"""
@wraps(f)
def wrapper(self, *args, **kwds):
try:
return f(self, *args, **kwds)
except Exception:
self.clear_cluster_nodes_cache()
raise
return wrapper
class ElastiCache(PyLibMCCache):
"""
backend for Amazon ElastiCache (memcached) with auto discovery mode
it used pylibmc in binary mode
"""
def __init__(self, server, params):
self.update_params(params)
super(ElastiCache, self).__init__(server, params)
if len(self._servers) > 1:
raise InvalidCacheBackendError(
'ElastiCache should be configured with only one server '
'(Configuration Endpoint)')
if len(self._servers[0].split(':')) != 2:
raise InvalidCacheBackendError(
'Server configuration should be in format IP:port')
self._ignore_cluster_errors = self._options.get(
'IGNORE_CLUSTER_ERRORS', False)
def update_params(self, params):
"""
update connection params to maximize performance
"""
if not params.get('BINARY', True):
raise Warning('To increase performance please use ElastiCache'
' in binary mode')
else:
params['BINARY'] = True # patch params, set binary mode
if 'OPTIONS' not in params:
# set special 'behaviors' pylibmc attributes
params['OPTIONS'] = {
'tcp_nodelay': True,
'ketama': True
}
def clear_cluster_nodes_cache(self):
"""clear internal cache with list of nodes in cluster"""
if hasattr(self, '_cluster_nodes_cache'):
del self._cluster_nodes_cache
def get_cluster_nodes(self):
"""
return list with all nodes in cluster
"""
if not hasattr(self, '_cluster_nodes_cache'):
server, port = self._servers[0].split(':')
try:
self._cluster_nodes_cache = (
get_cluster_info(server, port,
self._ignore_cluster_errors)['nodes'])
except (socket.gaierror, socket.timeout) as err:
raise Exception('Cannot connect to cluster {0} ({1})'.format(
self._servers[0], err
))
return self._cluster_nodes_cache
@property
def _cache(self):
# PylibMC uses cache options as the 'behaviors' attribute.
# It also needs to use threadlocals, because some versions of
# PylibMC don't play well with the GIL.
# instance to store cached version of client
# in Django 1.7 use self
# in Django < 1.7 use thread local
container = getattr(self, '_local', self)
client = getattr(container, '_client', None)
if client:
return client
client = self._lib.Client(self.get_cluster_nodes())
if self._options:
# In Django 1.11, all behaviors are shifted into a behaviors dict
# Attempt to get from there, and fall back to old behavior if the behaviors
# key does not exist
client.behaviors = self._options.get('behaviors', self._options)
container._client = client
return client
@invalidate_cache_after_error
def get(self, *args, **kwargs):
return super(ElastiCache, self).get(*args, **kwargs)
@invalidate_cache_after_error
def get_many(self, *args, **kwargs):
return super(ElastiCache, self).get_many(*args, **kwargs)
@invalidate_cache_after_error
def set(self, *args, **kwargs):
return super(ElastiCache, self).set(*args, **kwargs)
@invalidate_cache_after_error
def set_many(self, *args, **kwargs):
return super(ElastiCache, self).set_many(*args, **kwargs)
@invalidate_cache_after_error
def delete(self, *args, **kwargs):
return super(ElastiCache, self).delete(*args, **kwargs)
|
#!/usr/bin/env python3
# blinkt/display-led-test.py 3.374.570 2019-01-23T22:16:37.530539-06:00 (CST) https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.373
# blinkt/display-led-test.py update with --> production standard 1-5 include Copyright notice close #67
# blinkt/display-led-test.py 3.373.569 2019-01-23T21:51:47.651894-06:00 (CST) https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.372-6-g8d92fb5
# second pass to add production standards
# blinkt/display-led-test.py 3.372.562 2019-01-23T21:36:59.212435-06:00 (CST) https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.371
# first pass to add production standards
# blinkt/display-led-test.py 3.371.561 2019-01-23T21:13:33.503303-06:00 (CST) https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.370
# adjust timimg
# blinkt/display-led-test.py 3.369.559 2019-01-23T20:42:21.093603-06:00 (CST) https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.368
# added led color test during start
# blinkt/display-led-test.py 3.271.430 2019-01-03T14:49:30.532723-06:00 (CST) https://github.com/BradleyA/pi-display.git uadmin six-rpi3b.cptx86.com 3.270
# rename scrollphat.test.py to display-scrollphat-test.py
# larson-1.py 3.175.317 2018-09-29_21:47:03_CDT https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.174
# add green after red
# larson-1.py 3.174.316 2018-09-29_21:36:57_CDT https://github.com/BradleyA/pi-display uadmin six-rpi3b.cptx86.com 3.173
# include larson-1.py
### display-led-test.py - from larson.py
# Copyright (c) 2019 <NAME>
# License is in the online DOCUMENTATION, DOCUMENTATION URL defined below.
###
# production standard 5
import sys
import datetime
import time
import os
import math
import colorsys
from blinkt import set_clear_on_exit, set_pixel, show, set_brightness
# Order of precedence: environment variable (export DEBUG=1), default code
DEBUG = int(os.getenv("DEBUG", 0)) # Set DEBUG, 0 = debug off, 1 = debug on, 'unset DEBUG' to unset environment variable (bash)
###
class color:
BOLD = '\033[1m'
END = '\033[0m'
###
LANGUAGE = os.getenv("LANG")
def display_help():
print("\n{} - Test leds".format(__file__))
print("\nUSAGE\n {}".format(__file__))
print(" {} [--help | -help | help | -h | h | -?]".format(__file__))
print(" {} [--version | -version | -v]".format(__file__))
print("\nDESCRIPTION")
# Displaying help DESCRIPTION in English en_US.UTF-8
print("This script tests the leds during system boot. The script can be run on the")
print("command line. It is designed to be run fron crontab during system boot. It")
print("is configured using, crontab -e")
# Displaying help DESCRIPTION in French
if (LANGUAGE == "fr_CA.UTF-8") or (LANGUAGE == "fr_FR.UTF-8") or (LANGUAGE == "fr_CH.UTF-8"):
print("\n--> {}".format(LANGUAGE))
print("<votre aide va ici>")
print("Souhaitez-vous traduire la section description?")
elif (LANGUAGE != "en_US.UTF-8"):
print("{}{} {} {}[{}] {} {} {} {}:{} {}[INFO]{} {} is not supported, Would you like to help translate the description section?".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END, LANGUAGE))
print("\nEnvironment Variables")
print("If using the bash shell, enter; 'export DEBUG=1' on the command line to set")
print("the DEBUG environment variable to '1' (0 = debug off, 1 = debug on). Use the")
print("command, 'unset DEBUG' to remove the exported information from the DEBUG")
print("environment variable. You are on your own defining environment variables if")
print("you are using other shells.")
print(" DEBUG (default '0')")
print("\nDOCUMENTATION\n https://github.com/BradleyA/pi-display/tree/master/blinkt")
return
# Line number function
from inspect import currentframe
def get_line_no():
cf = currentframe()
return cf.f_back.f_lineno
# Date and time function ISO 8601
def get_date_stamp():
# calculate the offset taking into account daylight saving time
utc_offset_sec = time.altzone if time.localtime().tm_isdst else time.timezone
utc_offset = datetime.timedelta(seconds=-utc_offset_sec)
ISO8601 = datetime.datetime.now().replace(tzinfo=datetime.timezone(offset=utc_offset)).isoformat() + time.strftime(" (%Z)")
# ISO8601 = time.strftime("%Y-%m-%dT%H:%M:%S%z") + time.strftime(" (%Z)") # previous solution
return ISO8601
# Fully qualified domain name
from socket import getfqdn
# FQDN hostname
LOCALHOST = getfqdn()
# Version
with open(__file__) as f:
f.readline()
line2 = f.readline()
line2 = line2.split()
SCRIPT_NAME = line2[1]
SCRIPT_VERSION = line2[2]
f.close()
# Set user variables
if "LOGNAME" in os.environ: LOGNAME = os.getenv("LOGNAME") # Added three lines because USER is not defined in crobtab jobs
if "USER" in os.environ: USER = os.getenv("USER")
else: USER = LOGNAME
#
UID = os.getuid()
GID = os.getgid()
if DEBUG == 1: print("{}{} {} {}[{}] {} {} {} {}:{} {}[DEBUG]{} Setting USER to support crobtab...".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END))
# Default help and version arguments
no_arguments = int(len(sys.argv))
if no_arguments == 2:
# Default help output
if sys.argv[1] == '--help' or sys.argv[1] == '-help' or sys.argv[1] == 'help' or sys.argv[1] == '-h' or sys.argv[1] == 'h' or sys.argv[1] == '-?':
display_help()
sys.exit()
# Default version output
if sys.argv[1] == '--version' or sys.argv[1] == '-version' or sys.argv[1] == 'version' or sys.argv[1] == '-v':
print("{} {}".format(SCRIPT_NAME, SCRIPT_VERSION))
sys.exit()
# Begin script INFO
print("{}{} {} {}[{}] {} {} {} {}:{} {}[INFO]{} Started...".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END))
# DEBUG example
from platform import python_version
#
if DEBUG == 1: print("{}{} {} {}[{}] {} {} {} {}:{} {}[DEBUG]{} Version of python >{}<".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END, python_version()))
###
set_clear_on_exit()
reds = [0, 0, 0, 0, 0, 16, 64, 255, 64, 16, 0, 0, 0, 0, 0]
start_time = time.time()
# Red
if DEBUG == 1: print("{}{} {} {}[{}] {} {} {} {}:{} {}[DEBUG]{} Test Red led".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END))
for count in range(83):
delta = (time.time() - start_time) * 16
offset = int(abs((delta % 16) - 8))
for i in range(8):
set_pixel(i , reds[offset + i], 0, 0)
show()
time.sleep(0.1)
# Green
if DEBUG == 1: print("{}{} {} {}[{}] {} {} {} {}:{} {}[DEBUG]{} Test Green led".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END))
for count in range(37):
delta = (time.time() - start_time) * 16
offset = int(abs((delta % 16) - 8))
for i in range(8):
set_pixel(i , 0, reds[offset + i], 0)
show()
time.sleep(0.1)
# Rainbow
if DEBUG == 1: print("{}{} {} {}[{}] {} {} {} {}:{} {}[DEBUG]{} Test Rainbow led".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END))
set_brightness(0.1)
spacing = 360.0 / 16.0
hue = 0
x = 0
j = 1
#
for j in range(2300):
hue = int(time.time() * 100) % 360
for x in range(8):
offset = x * spacing
h = ((hue + offset) % 360) / 360.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
set_pixel(x, r, g, b)
show()
time.sleep(0.001)
# Done
print("{}{} {} {}[{}] {} {} {} {}:{} {}[INFO]{} Operation finished.".format(color.END, get_date_stamp(), LOCALHOST, __file__, os.getpid(), SCRIPT_VERSION, get_line_no(), USER, UID, GID, color.BOLD, color.END))
###
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
"""
==============================
PyOrganism Regulatory Elements
==============================
:Authors:
<NAME>
:Date:
2012-06-08
:Copyright:
Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.
:File:
elements.py
"""
__all__ = ["Gene", "Product", "Regulator", "TranscriptionFactor", "SigmaFactor",
"NucleoidAssociatedProtein", "Promoter", "TranscriptionUnit", "Operon",
"Conformation", "clear_memory"]
import sys
import logging
from .. import miscellaneous as misc
from ..base import UniqueBase
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(misc.NullHandler())
class Gene(UniqueBase):
def __init__(self, unique_id="", name="", bnumber="", synonyms=None,
position_start=None, position_end=None, strand=None, sequence=None,
gc_content=None, product=None, regulatory_product=None, **kw_args):
super(Gene, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self.bnumber = bnumber
self.synonyms = misc.convert(synonyms, set, set())
self.position_start = misc.convert(position_start, int)
self.position_end = misc.convert(position_end, int)
self.position = (self.position_start, self.position_end)
self.strand = strand
self.sequence = sequence
self.gc_content = misc.convert(gc_content, float)
self.product = product
self.regulatory_product = regulatory_product
self.transcription_units = set()
self.operons = set()
def __contains__(self, name):
if name == self.unique_id:
return True
elif name == self.name:
return True
# need substring test for bnumber for entries with additional info
elif name == self.bnumber:
return True
elif self.synonyms and any(name == syn for syn in self.synonyms if syn):
return True
else:
return False
def get_transcription_units(self):
return self.transcription_units
def get_operons(self):
return self.operons
def print_info(self, stream=sys.stdout):
print("ECK12:", self.unique_id, file=stream)
print("name:", self.name, file=stream)
print("bnumber:", self.bnumber, file=stream)
print("synonyms:", self.synonyms, file=stream)
print("position:", self.position, file=stream)
class Product(UniqueBase):
def __init__(self, unique_id="", name="", molecular_weight=None,
isoelectric_point=None, synonyms=None, go=None, coded_from=None,
**kw_args):
super(Product, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self.molecular_weight = misc.convert(molecular_weight, float)
self.isoelectric_point = misc.convert(isoelectric_point, float)
self.synonyms = misc.convert(synonyms, set, set())
self.go = go
self.coded_from = coded_from
def __contains__(self, name):
if name == self.unique_id:
return True
elif name == self.name:
return True
elif self.synonyms and any(name == syn for syn in self.synonyms if syn):
return True
elif name == self.go:
return True
else:
return False
def get_transcription_units(self):
return set(tu for gene in self.coded_from for tu in gene.transcription_units)
def get_operons(self):
return set(op for gene in self.coded_from for op in gene.operons)
def print_info(self, stream=sys.stdout):
print("ECK12:", self.unique_id, file=stream)
print("name:", self.name, file=stream)
print("synonyms:", self.synonyms, file=stream)
print(self.go, file=stream)
class Regulator(UniqueBase):
def __init__(self, unique_id="", name="", synonyms=None, go=None,
coded_from=None, made_from=None, **kw_args):
super(Regulator, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self.synonyms = misc.convert(synonyms, set, set())
self.go = go
self.coded_from = misc.convert(coded_from, set, set())
self.made_from = misc.convert(made_from, set, set())
def __contains__(self, name):
if name == self.unique_id:
return True
elif name == self.name:
return True
elif not self.synonyms is None and any(name == syn for syn in\
self.synonyms if syn):
return True
elif name == self.go:
return True
else:
return False
def get_transcription_units(self):
return set(tu for gene in self.coded_from for tu in gene.transcription_units)
def get_operons(self):
return set(op for gene in self.coded_from for op in gene.operons)
def print_info(self, stream=sys.stdout):
print("ECK12:", self.unique_id, file=stream)
print("name:", self.name, file=stream)
print("synonyms:", self.synonyms, file=stream)
print(self.go, file=stream)
class TranscriptionFactor(Regulator):
def __init__(self, unique_id="", conformations=None, **kw_args):
super(TranscriptionFactor, self).__init__(unique_id=unique_id, **kw_args)
self.conformations = misc.convert(conformations, set, set())
class SigmaFactor(Regulator):
def __init__(self, unique_id="", **kw_args):
super(SigmaFactor, self).__init__(unique_id=unique_id, **kw_args)
class NucleoidAssociatedProtein(Regulator):
def __init__(self, unique_id="", **kw_args):
super(NucleoidAssociatedProtein, self).__init__(unique_id=unique_id,
**kw_args)
class Conformation(UniqueBase):
def __init__(self, unique_id="", name="", tf=None, state=None,
conformation_type=None, interaction=None, apo_holo=None, **kw_args):
super(Conformation, self).__init__(unique_id=unique_id, **kw_args)
self.name = name
self.t_factor = tf
self.final_state = state
self.type = conformation_type
self.interaction = interaction
self.apo_holo = apo_holo
class Promoter(UniqueBase):
def __init__(self, unique_id="", name="", strand=None, pos_1=None,
sequence=None, sigma_factor=None, note=None, **kw_args):
super(Promoter, self).__init__(unique_id=unique_id,
**kw_args)
self.name = name
self.strand = strand
self.pos_1 = misc.convert(pos_1, int)
self.sigma_factor = misc.convert(sigma_factor, list, list())
self.sequence = sequence
self.note = note
def print_info(self, stream=sys.stdout):
print("ECK12:", self.unique_id, file=stream)
print("name:", self.name, file=stream)
class TranscriptionUnit(UniqueBase):
def __init__(self, unique_id="", name="", promoter=None, operon=None,
genes=None, **kw_args):
super(TranscriptionUnit, self).__init__(unique_id=unique_id,
**kw_args)
self.name = name
self.promoter = promoter
self.operon = operon
self.genes = misc.convert(genes, list, list())
def __len__(self):
return len(self.genes)
def print_info(self, stream=sys.stdout):
print("ECK12:", self.unique_id, file=stream)
print("name:", self.name, file=stream)
print("Genes:", ", ".join([gene.name if gene.name else "?" for gene in self.genes]), file=stream)
class Operon(UniqueBase):
def __init__(self, unique_id="", name="", strand=None, promoters=None, genes=None,
gene_position_start=None, gene_position_end=None,
regulation_position_start=None, regulation_position_end=None, **kw_args):
super(Operon, self).__init__(unique_id=unique_id,
**kw_args)
self.name = name
self.strand = strand
self.gene_position_start = misc.convert(gene_position_start, int)
self.gene_position_end = misc.convert(gene_position_end, int)
self.regulation_position_start = misc.convert(regulation_position_start, int)
self.regulation_position_end = misc.convert(regulation_position_end, int)
self.promoters = misc.convert(promoters, set, set())
self.genes = misc.convert(genes, list, list())
def __len__(self):
return len(self.genes)
def print_info(self, stream=sys.stdout):
print("ECK12:", self.unique_id, file=stream)
print("name:", self.name, file=stream)
print("Genes:", ", ".join([gene.name if gene.name else "?" for gene in self.genes]), file=stream)
def clear_memory():
Gene.clear()
Product.clear()
Regulator.clear()
TranscriptionFactor.clear()
SigmaFactor.clear()
NucleoidAssociatedProtein.clear()
Conformation.clear()
Promoter.clear()
TranscriptionUnit.clear()
Operon.clear()
|
<filename>kcs/steering/__main__.py
"""Calculate the "steering" table
Calculates the ranges for a given dataset, where it matches the CMIP
distribution for yearly temperature change for a given scenario(s).
For example, if the scenario is 90% (percentile), 2050, it will yield
a year-range for the given input data, where the average of that
year-range matches the 90% value in 2050 of the CMIP data. It will
also produce a scale factor to indicate how far off it is, which
should be close to 1. (This value may be significant, since the
year-range is rounded to years, or one may have to go beyond the range
of input data, if e.g. the requested range is large.)
Example usage:
$ python -m kcs.steering tas_change.csv @extra-tas-global-averaged.list \
--scenario G 2050 10 --scenario W 2050 90 \
--scenario G 2085 10 --scenario W 2085 90 \
--rolling-mean 10 --outfile steering.csv
"""
import sys
import argparse
import logging
import pathlib
import itertools
import pandas as pd
import iris
from ..config import read_config, default_config
from ..utils.argparse import parser as kcs_parser
from ..utils.logging import setup as setup_logging
from ..utils.atlist import atlist
from ..utils.attributes import get as get_attrs
from .core import calc
logger = logging.getLogger('steering') # pylint: disable=invalid-name
def read_data(paths, info_from=('attributes', 'filename'),
attributes=None, filename_pattern=None):
"""DUMMY DOC-STRING"""
cubes = [iris.load_cube(str(path)) for path in paths]
# Get the attributes, and create a dataframe with cubes & attributes
dataset = get_attrs(
cubes, paths, info_from=info_from,
attributes=attributes, filename_pattern=filename_pattern)
return dataset
def parse_args():
"""DUMMY DOC-STRING"""
parser = argparse.ArgumentParser(parents=[kcs_parser],
conflict_handler='resolve')
parser.add_argument('csv', help="CSV file with distribution percentiles.")
parser.add_argument('files', nargs='+', help="model of interest datasets")
parser.add_argument('--scenario', required=True, nargs=3, action='append',
help="Specify a scenario. Takes three arguments: name, "
"epoch and percentile. This option is required, and can "
"be used multiple times. Examples would '--scenario G 2050 10', "
"'--scenario H 2085 90'.")
parser.add_argument('--outfile', help="Output CSV file to write the steering "
"table to. If not given, write to standard output.")
parser.add_argument('--timespan', type=int, default=30,
help="Timespan around epoch(s) given in the scenario(s), "
"in years. Default is 30 years.")
parser.add_argument('--rolling-mean', default=0, type=int,
help="Apply a rolling mean to the percentile distribution "
"before computing the scenario temperature increase match. "
"Takes one argument, the window size (in years).")
parser.add_argument('--rounding', type=float,
help="Round the matched temperature increase to a multiple "
"of this value, which should be a positive floating point "
"value. Default is not to round")
parser.add_argument('--reference-period', nargs=2, type=int,
help="Reference period (to normalize our model of interestdata): "
"start and end year. Years are inclusive (i.e., Jan 1 of 'start' "
"up to and including Dec 31 of 'end').")
args = parser.parse_args()
setup_logging(args.verbosity)
read_config(args.config)
if not args.reference_period:
args.reference_period = default_config['data']['extra']['control_period']
args.paths = [pathlib.Path(filename) for filename in args.files]
args.scenarios = [dict(zip(('name', 'epoch', 'percentile'), scenario))
for scenario in args.scenario]
if args.rounding is not None:
if args.rounding <= 0:
raise ValueError('--rounding should be a positive number')
return args
def main():
"""DUMMY DOCSTRING"""
args = parse_args()
logger.debug("%s", " ".join(sys.argv))
logger.debug("Args: %s", args)
paths = list(itertools.chain.from_iterable(atlist(path) for path in args.paths))
dataset = read_data(paths)
percentiles = pd.read_csv(args.csv, index_col=0)
percentiles.index = pd.to_datetime(percentiles.index)
steering = calc(dataset, percentiles, args.scenarios, timespan=args.timespan,
rolling_mean=args.rolling_mean, rounding=args.rounding,
reference_period=args.reference_period)
steering = pd.DataFrame(steering)
if args.outfile:
steering.to_csv(args.outfile, index=False)
else:
print(steering)
logger.info("Done processing: steering table = %s", steering)
if __name__ == '__main__':
main()
|
import find_mxnet
import argparse,logging,os
import mxnet as mx
from symbol_resnet import resnet
import socket
import getpass
def multi_factor_scheduler(begin_epoch, epoch_size, step=[60, 75, 90], factor=0.1):
step_ = [epoch_size * (x-begin_epoch) for x in step if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
def main():
if args.data_type == "cifar10":
args.aug_level = 1
args.num_classes = 10
# depth should be one of 110, 164, 1001,...,which is should fit (args.depth-2)%9 == 0
if((args.depth-2)%9 == 0 and args.depth >= 164):
per_unit = [(args.depth-2)/9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif((args.depth-2)%6 == 0 and args.depth < 164):
per_unit = [(args.depth-2)/6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
units = per_unit*3
symbol = resnet(units=units, num_stage=3, filter_list=filter_list, num_class=args.num_classes,
data_type="cifar10", bottle_neck = bottle_neck, bn_mom=args.bn_mom, workspace=args.workspace,
memonger=args.memonger)
elif args.data_type == "imagenet":
args.num_classes = 1000
if args.depth == 18:
units = [2, 2, 2, 2]
elif args.depth == 34:
units = [3, 4, 6, 3]
elif args.depth == 50:
units = [3, 4, 6, 3]
elif args.depth == 101:
units = [3, 4, 23, 3]
elif args.depth == 152:
units = [3, 8, 36, 3]
elif args.depth == 200:
units = [3, 24, 36, 3]
elif args.depth == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
symbol = resnet(units=units, num_stage=4, filter_list=[64, 256, 512, 1024, 2048] if args.depth >=50
else [64, 64, 128, 256, 512], num_class=args.num_classes, data_type="imagenet", bottle_neck = True
if args.depth >= 50 else False, bn_mom=args.bn_mom, workspace=args.workspace,
memonger=args.memonger)
else:
raise ValueError("do not support {} yet".format(args.data_type))
kv = mx.kvstore.create(args.kv_store)
devs = mx.cpu() if args.gpus is None else [mx.gpu(int(i)) for i in args.gpus.split(',')]
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
if 'log_file' in args and args.log_file is not None:
log_file = args.log_file
log_dir = args.log_dir
log_file_full_name = os.path.join(log_dir, log_file)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logger = logging.getLogger()
handler = logging.FileHandler(log_file_full_name)
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
kv_store_type = ""
if args.kv_store == "dist_sync":
kv_store_type = "bsp"
elif args.kv_store == "dist_async":
kv_store_type = "asp"
elif args.kv_store == "dist_gsync":
kv_store_type = "gsp"
elif args.kv_store == "dist_ssync":
kv_store_type = "ssp"
begin_epoch = args.model_load_epoch if args.model_load_epoch else 0
user = getpass.getuser()
if not os.path.exists("/home/{}/mxnet_model/model/{}/resnet{}/{}".format(user, args.data_type, args.depth, kv_store_type)):
os.makedirs("/home/{}/mxnet_model/model/{}/resnet{}/{}".format(user, args.data_type, args.depth, kv_store_type))
model_prefix = "/home/{}/mxnet_model/model/{}/resnet{}/{}/{}-{}-resnet{}-{}".format(user, args.data_type, args.depth, kv_store_type, kv_store_type, args.data_type, args.depth, kv.rank)
checkpoint = None if not args.savemodel else mx.callback.do_checkpoint(model_prefix)
arg_params = None
aux_params = None
if args.retrain:
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.model_load_epoch)
if args.memonger:
import memonger
symbol = memonger.search_plan(symbol, data=(args.batch_size, 3, 32, 32) if args.data_type=="cifar10"
else (args.batch_size, 3, 224, 224))
splits = 1
part = 0
val_splits = kv.num_workers
val_part = kv.rank
'''yegeyan 2016.10.6'''
if args.kv_store == "dist_sync" or args.kv_store == "dist_async" or args.kv_store == "dist_ssync":
#if args.kv_store == "dist_sync":
splits = kv.num_workers
part = kv.rank
if args.kv_store == "dist_gsync":
if args.data_allocator == 1:
if args.hostname == "gpu-cluster-1":
part = args.cluster1_begin
splits = args.cluster1_end
elif args.hostname == "gpu-cluster-2":
part = args.cluster2_begin
splits = args.cluster2_end
elif args.hostname == "gpu-cluster-3":
part = args.cluster3_begin
splits = args.cluster3_end
elif args.hostname == "gpu-cluster-4":
part = args.cluster4_begin
splits = args.cluster4_end
else:
part = args.cluster5_begin
splits = args.cluster5_end
args.data_proportion = splits - part
else:
splits = kv.num_workers
part = kv.rank
# yegeyan 2017.1.15
epoch_size = args.num_examples / args.batch_size
model_args={}
if args.kv_store == 'dist_sync' or args.kv_store == 'dist_async' or args.kv_store == 'dist_ssync':
#if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
model_args['epoch_size'] = epoch_size
'''yegeyan 2016.12.13'''
if args.kv_store == 'dist_gsync':
if args.data_allocator == 1:
epoch_size *= args.data_proportion
model_args['epoch_size'] = epoch_size
else:
epoch_size /= kv.num_workers
model_args['epoch_size'] = epoch_size
if 'lr_factor' in args and args.lr_factor < 1:
model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
step=max(int(batch_num * args.lr_factor_epoch), 1), # yegeyan 2016.12.13
factor=args.lr_factor)
if 'clip_gradient' in args and args.clip_gradient is not None:
model_args['clip_gradient'] = args.clip_gradient
eval_metrics = ['accuracy']
## TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
# yegeyan 2017.1.4
val_eval_metrics = ['accuracy']
## TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
val_eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "train.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "train_480.rec") if args.aug_level == 1
else os.path.join(args.data_dir, "train_480.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
batch_size = args.batch_size,
pad = 4 if args.data_type == "cifar10" else 0,
fill_value = 127, # only used when pad is valid
rand_crop = True,
max_random_scale = 1.0, # 480 with imagnet, 32 with cifar10
min_random_scale = 1.0 if args.data_type == "cifar10" else 1.0 if args.aug_level == 1 else 0.533, # 256.0/480.0
max_aspect_ratio = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 0.25,
random_h = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90
random_s = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
random_l = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
max_rotate_angle = 0 if args.aug_level <= 2 else 10,
max_shear_ratio = 0 if args.aug_level <= 2 else 0.1,
rand_mirror = True,
shuffle = True,
preprocess_threads = 4,
num_parts = splits,
part_index = part)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "test.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "val_480.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = args.batch_size,
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
rand_crop = False,
rand_mirror = False,
preprocess_threads = 4,
num_parts = val_splits,
part_index = val_part)
model = mx.model.FeedForward(
ctx = devs,
symbol = symbol,
arg_params = arg_params,
aux_params = aux_params,
num_epoch = args.num_epochs,
begin_epoch = begin_epoch,
learning_rate = args.lr,
momentum = args.mom,
wd = args.wd,
#optimizer = 'nag',
optimizer = 'sgd',
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
lr_scheduler = multi_factor_scheduler(begin_epoch, epoch_size, step=[220, 260, 280], factor=0.1)
if args.data_type=='cifar10' else
multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90], factor=0.1),
**model_args
)
model.fit(
X = train,
eval_data = val,
eval_metric = eval_metrics,
val_eval_metric = val_eval_metrics,
kvstore = kv,
batch_end_callback = mx.callback.Speedometer(args.batch_size, 50),
epoch_end_callback = checkpoint,
hostname = socket.gethostbyname_ex(socket.gethostname())[0],
dataset = args.data_type,
staleness = args.staleness,
network_name = "resnet_" + str(args.depth),
lr = args.lr) #yegeyan 2017.5.15
# logging.info("top-1 and top-5 acc is {}".format(model.score(X = val,
# eval_metric = ['acc', mx.metric.create('top_k_accuracy', top_k = 5)])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="command for training resnet-v2")
parser.add_argument('--gpus', type=str, default='0', help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--data-dir', type=str, default='./data/imagenet/', help='the input data directory')
parser.add_argument('--data-type', type=str, default='imagenet', help='the dataset type')
parser.add_argument('--list-dir', type=str, default='./',
help='the directory which contain the training list file')
parser.add_argument('--lr', type=float, default=0.1, help='initialization learning reate')
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--bn-mom', type=float, default=0.9, help='momentum for batch normlization')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=256, help='the batch size')
parser.add_argument('--workspace', type=int, default=512, help='memory space size(MB) used in convolution, if xpu '
' memory is oom, then you can try smaller vale, such as --workspace 256')
parser.add_argument('--depth', type=int, default=50, help='the depth of resnet')
parser.add_argument('--num-classes', type=int, default=1000, help='the class number of your task')
parser.add_argument('--aug-level', type=int, default=2, choices=[1, 2, 3],
help='level 1: use only random crop and random mirror\n'
'level 2: add scale/aspect/hsv augmentation based on level 1\n'
'level 3: add rotation/shear augmentation based on level 2')
parser.add_argument('--num-examples', type=int, default=1281167, help='the number of training examples')
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--model-load-epoch', type=int, default=0,
help='load the model on an epoch using the model-load-prefix')
parser.add_argument('--frequent', type=int, default=50, help='frequency of logging')
parser.add_argument('--memonger', action='store_true', default=False,
help='true means using memonger to save momory, https://github.com/dmlc/mxnet-memonger')
parser.add_argument('--retrain', action='store_true', default=False, help='true means continue training')
parser.add_argument('--log-file', type=str,
help='the name of log file')
parser.add_argument('--log-dir', type=str, default="output",
help='directory of the log file')
parser.add_argument('--num-epochs', type=int, default=1,
help='the number of training epochs')
parser.add_argument('--hostname', type=str, default="gpu-cluster-1",
help='the hostname of this worker')
parser.add_argument('--cluster1-begin', type=float, default=0,
help='the begin of data in cluster1')
parser.add_argument('--cluster1-end', type=float, default=0,
help='the end of data in cluster1')
parser.add_argument('--cluster2-begin', type=float, default=0,
help='the begin of data in cluster2')
parser.add_argument('--cluster2-end', type=float, default=0,
help='the end of data in cluster2')
parser.add_argument('--cluster3-begin', type=float, default=0,
help='the begin of data in cluster3')
parser.add_argument('--cluster3-end', type=float, default=0,
help='the end of data in cluster3')
parser.add_argument('--cluster4-begin', type=float, default=0,
help='the begin of data in cluster4')
parser.add_argument('--cluster4-end', type=float, default=0,
help='the end of data in cluster4')
parser.add_argument('--cluster5-begin', type=float, default=0,
help='the begin of data in cluster5')
parser.add_argument('--cluster5-end', type=float, default=0,
help='the end of data in cluster5')
parser.add_argument('--data_proportion', type=float, default=0,
help='the data proportion')
parser.add_argument('--staleness', type=int, default=0,
help='the staleness of dist_ssync')
parser.add_argument('--savemodel', action='store_true', default=False,
help='true means save model')
parser.add_argument('--data-allocator', type=int, default=0,
help='whether to use data allocator by group')
args = parser.parse_args()
main()
|
<reponame>rummanwaqar/rcommander-core<gh_stars>1-10
import re
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from nodebox.gui.qt.ValueLadder import ValueLadder
from nodebox.util.PyFontify import fontify
whiteRE = re.compile(r"[ \t]+")
commentRE = re.compile(r"[ \t]*(#)")
def findWhitespace(s, pos=0):
m = whiteRE.match(s, pos)
if m is None:
return pos
return m.end()
stringPat = r"q[^\\q\n]*(\\[\000-\377][^\\q\n]*)*q"
stringOrCommentPat = stringPat.replace("q", "'") + "|" + stringPat.replace('q', '"') + "|#.*"
stringOrCommentRE = re.compile(stringOrCommentPat)
def removeStringsAndComments(s):
items = []
while 1:
try:
m = stringOrCommentRE.search(s)
except TypeError:
m = None
if m:
start = m.start()
end = m.end()
items.append(s[:start])
if s[start] != "#":
items.append("X" * (end - start)) # X-out strings
s = s[end:]
else:
items.append(s)
break
return "".join([unicode(el) for el in items])
Config = {}
def loadConfig():
if sys.platform == "darwin":
defaultFontfamily = "Monaco"
defaultFontsize = 11
elif sys.platform == "win32":
defaultFontfamily = "Courier New"
defaultFontsize = 9
else:
defaultFontfamily = "Bitstream Vera Sans Mono"
defaultFontsize = 9
global Config
settings = QSettings()
font = settings.value("font", QVariant(QFont(defaultFontfamily, defaultFontsize)))
font.convert(QVariant.Font)
Config["font"] = font.toPyObject()
colorsLightBackground = (
("normal", "#000000"),
("keyword", "#0000FF"),
("identifier", "#FF0000"),
("string", "#FF00FF"),
("comment", "#808080"))
colorsDarkBackground = (
("normal", "#FFFFFF"),
("keyword", "#00FFFF"),
("identifier", "#AAFF00"),
("string", "#FF55FF"),
("comment", "#808080"))
if QApplication.instance().palette().color(QPalette.Window).valueF() < 0.5:
colors = colorsDarkBackground
else:
colors = colorsLightBackground
for name, color in colors:
Config["%sfontcolor" % name] = settings.value(
"%sfontcolor" % name, QVariant(color)).toString()
def saveConfig():
settings = QSettings()
for key, value in Config.items():
settings.setValue(key, QVariant(value))
class PythonHighlighter(QSyntaxHighlighter):
Formats = {}
def __init__(self, parent=None):
super(PythonHighlighter, self).__init__(parent)
if len(Config) == 0:
loadConfig()
self.initializeFormats()
if isinstance(parent, QTextEdit):
font = PythonHighlighter.Formats["normal"].font()
if sys.platform == "darwin":
if QFontInfo(font).fixedPitch() and font.pointSize() <= 10:
font.setStyleStrategy(QFont.NoAntialias)
parent.setFont(font)
self.stringRe = QRegExp(r"""(:?"["]".*"["]"|'''.*''')""")
self.stringRe.setMinimal(True)
self.tripleSingleRe = QRegExp(r"""'''(?!")""")
self.tripleDoubleRe = QRegExp(r'''"""(?!')''')
@staticmethod
def initializeFormats():
baseFormat = QTextCharFormat()
font = Config["font"]
baseFormat.setFont(font)
for name in ("normal", "keyword", "identifier", "string", "comment"):
format = QTextCharFormat(baseFormat)
format.setForeground(QColor(Config["%sfontcolor" % name]))
PythonHighlighter.Formats[name] = format
def highlightMultiline(self, text):
NORMAL, TRIPLESINGLE, TRIPLEDOUBLE = range(3)
self.setCurrentBlockState(NORMAL)
if text.indexOf(self.stringRe) != -1:
return
# This is fooled by triple quotes inside single quoted strings
for i, state in ((text.indexOf(self.tripleSingleRe),
TRIPLESINGLE),
(text.indexOf(self.tripleDoubleRe),
TRIPLEDOUBLE)):
if self.previousBlockState() == state:
if i == -1:
i = text.length()
self.setCurrentBlockState(state)
self.setFormat(0, i + 3,
PythonHighlighter.Formats["string"])
elif i > -1:
self.setCurrentBlockState(state)
self.setFormat(i, text.length(),
PythonHighlighter.Formats["string"])
def highlightBlock(self, text):
self.setFormat(0, text.length(), PythonHighlighter.Formats["normal"])
for tag, start, end, sublist in fontify(unicode(text), 0):
self.setFormat(start, end-start, PythonHighlighter.Formats[tag])
self.highlightMultiline(text)
class PyDETextView(QTextEdit):
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
self.usesTabs = 0
self.indentSize = 4
self.valueLadder = None
PythonHighlighter(self)
def setFont(self, font):
QTextEdit.setFont(self, font)
self.emit(SIGNAL("fontChanged()"))
def string(self):
return unicode(self.toPlainText())
def insertText_(self, text):
cursor = self.textCursor()
cursor.insertText(text)
def selectedRange(self):
cursor = self.textCursor()
start = location = cursor.selectionStart()
end = cursor.selectionEnd()
length = end - start
return (location, length)
def setSelectedRange_(self, rng):
cursor = QTextCursor(self.textCursor())
cursor.clearSelection()
cursor.setPosition(rng[0], QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.NextCharacter, QTextCursor.KeepAnchor, rng[1])
self.setTextCursor(cursor)
def hideValueLadder(self):
if self.valueLadder is not None:
self.valueLadder.hide()
if self.valueLadder.dirty:
pass
self.valueLadder = None
def mouseReleaseEvent(self, event):
self.hideValueLadder()
QTextEdit.mouseReleaseEvent(self, event)
def mouseMoveEvent(self, event):
if self.valueLadder is not None:
self.valueLadder.mouseDragged_(event)
else:
QTextEdit.mouseMoveEvent(self, event)
def mousePressEvent(self, event):
if int(QApplication.keyboardModifiers()) & Qt.ControlModifier > 0:
screenPoint = viewPoint = event.pos()
c = self.cursorForPosition(screenPoint).position()
txt = self.string()
try:
if txt[c] in "1234567890.":
# Find full number
begin = c
end = c
try:
while txt[begin-1] in "1234567890.":
begin-=1
except IndexError:
pass
try:
while txt[end+1] in "1234567890.":
end+=1
except IndexError:
pass
end+=1
self.valueLadder = ValueLadder(self, eval(txt[begin:end]), (begin,end), screenPoint, viewPoint)
except IndexError:
pass
else:
QTextEdit.mousePressEvent(self, event)
def getLinesForRange(self, rng):
userCursor = self.textCursor()
cursor = QTextCursor(userCursor)
cursor.setPosition(rng[0], QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
start = location = cursor.position()
cursor.setPosition(rng[0]+rng[1], QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
end = cursor.position()
length = end - start
return cursor.selectedText(), (location, length)
def jumpToLine_(self):
from nodebox.gui.qt.AskString import AskString
AskString("Jump to line number:", self._jumpToLineCallback,
parentWindow=self.window())
def _jumpToLineCallback(self, value):
if value is None:
return # user cancelled
try:
lineNo = int(value.strip())
except ValueError:
pass
else:
self.jumpToLine(lineNo)
def jumpToLine(self, lineNo):
try:
lines = self.string().splitlines()
lineNo = min(max(0, lineNo - 1), len(lines))
length_of_prevs = sum([len(line)+1 for line in lines[:lineNo]])
curlen = len(lines[lineNo])
rng = (length_of_prevs, curlen)
self.setSelectedRange_(rng)
except Exception, e:
from nodebox.gui.qt.util import errorAlert
etype, value, tb = sys.exc_info()
errorAlert(None, "(%s: %s)" % (etype, e))
def getIndent(self):
if self.usesTabs:
return "\t"
else:
return self.indentSize * " "
def keyPressEvent(self, event):
if event.key() == Qt.Key_Backspace:
self._delete(event, False)
elif event.key() == Qt.Key_Delete:
self._delete(event, True)
elif event.key() == Qt.Key_Tab:
if not self.tabChangesFocus():
self.insertTab_(event)
elif event.key() in (Qt.Key_Enter, Qt.Key_Return):
self.insertNewline_(event)
elif event.key() == Qt.Key_V and int(QApplication.keyboardModifiers()) & Qt.ControlModifier > 0:
self.paste()
else:
QTextEdit.keyPressEvent(self, event)
def keyReleaseEvent(self, event):
QTextEdit.keyReleaseEvent(self, event)
self.repaint()
def _iterLinesBackwards(self, end, maxChars=8192):
begin = max(0, end - maxChars)
if end > 0:
prevChar = self.string()[end - 1]
if prevChar == "\n":
end += 1
lines, linesRng = self.getLinesForRange((begin, end - begin))
lines = lines[:end - linesRng[0]]
lines = unicode(lines)
linesRng = (linesRng[0], len(lines))
lines = lines.splitlines(True)
lines.reverse()
for line in lines:
nChars = len(line)
yield line, (end - nChars, nChars)
end -= nChars
assert end == linesRng[0]
def _findMatchingParen(self, index, paren):
openToCloseMap = {"(": ")", "[": "]", "{": "}"}
if paren:
stack = [paren]
else:
stack = []
line, lineRng, pos = None, None, None
for line, lineRng in self._iterLinesBackwards(index):
line = removeStringsAndComments(line)
pos = None
for i in range(len(line)-1, -1, -1):
c = line[i]
if c in ")]}":
stack.append(c)
elif c in "([{":
if not stack:
if not paren:
pos = i
break
elif stack[-1] != openToCloseMap[c]:
# mismatch
stack = []
break
else:
stack.pop()
if paren and not stack:
pos = i
break
if not stack:
break
return line, lineRng, pos
def insertNewline_(self, event):
selRng = self.selectedRange()
QTextEdit.keyPressEvent(self, event)
line, lineRng, pos = self._findMatchingParen(selRng[0], None)
if line is None:
return
leadingSpace = ""
if pos is None:
m = whiteRE.match(line)
if m is not None:
leadingSpace = m.group()
else:
leadingSpace = re.sub(r"[^\t]", " ", line[:pos + 1])
line, lineRng = self.getLinesForRange((selRng[0], 0))
line = removeStringsAndComments(line).strip()
if line and line[-1] == ":":
leadingSpace += self.getIndent()
if leadingSpace:
self.insertText_(leadingSpace)
def insertTab_(self, event):
if self.usesTabs:
QTextEdit.keyPressEvent(self, event)
return
self.insertText_("")
selRng = self.selectedRange()
assert selRng[1] == 0
lines, linesRng = self.getLinesForRange(selRng)
lines = unicode(lines)
sel = selRng[0] - linesRng[0]
whiteEnd = findWhitespace(lines, sel)
nSpaces = self.indentSize - (whiteEnd % self.indentSize)
self.insertText_(nSpaces * " ")
sel += nSpaces
whiteEnd += nSpaces
sel = min(whiteEnd, sel + (sel % self.indentSize))
self.setSelectedRange_((sel + linesRng[0], 0))
def paste(self):
app = QApplication.instance()
self.insertText_(app.clipboard().mimeData().text())
def delete(self):
cursor = self.textCursor()
cursor.removeSelectedText()
def _delete(self, event, isForward):
selRng = self.selectedRange()
if self.usesTabs or selRng[1]:
QTextEdit.keyPressEvent(self, event)
return
lines, linesRng = self.getLinesForRange(selRng)
lines = unicode(lines)
sel = selRng[0] - linesRng[0]
whiteEnd = findWhitespace(lines, sel)
whiteBegin = sel
while whiteBegin and lines[whiteBegin-1] == " ":
whiteBegin -= 1
if not isForward:
white = whiteBegin
else:
white = whiteEnd
if white == sel or (whiteEnd - whiteBegin) <= 1:
QTextEdit.keyPressEvent(self, event)
return
nSpaces = whiteEnd % self.indentSize
if nSpaces == 0:
nSpaces = self.indentSize
offset = sel % self.indentSize
if not isForward and offset == 0:
offset = nSpaces
delBegin = sel - offset
delEnd = delBegin + nSpaces
delBegin = max(delBegin, whiteBegin)
delEnd = min(delEnd, whiteEnd)
self.setSelectedRange_((linesRng[0] + delBegin, delEnd - delBegin))
self.insertText_("")
def indent(self):
def indentFilter(lines):
indent = self.getIndent()
indentedLines = []
for line in lines:
if line.strip():
indentedLines.append(indent + line)
else:
indentedLines.append(line)
[indent + line for line in lines[:-1]]
return indentedLines
self._filterLines(indentFilter)
def dedent(self):
def dedentFilter(lines):
indent = self.getIndent()
dedentedLines = []
indentSize = len(indent)
for line in lines:
if line.startswith(indent):
line = line[indentSize:]
dedentedLines.append(line)
return dedentedLines
self._filterLines(dedentFilter)
def comment(self):
def commentFilter(lines):
commentedLines = []
indent = self.getIndent()
pos = 100
for line in lines:
if not line.strip():
continue
pos = min(pos, findWhitespace(line))
for line in lines:
if line.strip():
commentedLines.append(line[:pos] + "#" + line[pos:])
else:
commentedLines.append(line)
return commentedLines
self._filterLines(commentFilter)
def uncomment(self):
def uncommentFilter(lines):
commentedLines = []
commentMatch = commentRE.match
for line in lines:
m = commentMatch(line)
if m is not None:
pos = m.start(1)
line = line[:pos] + line[pos+1:]
commentedLines.append(line)
return commentedLines
self._filterLines(uncommentFilter)
def _filterLines(self, filterFunc):
selRng = self.selectedRange()
lines, linesRng = self.getLinesForRange(selRng)
lines = unicode(lines)
filteredLines = filterFunc(lines.splitlines(True))
filteredLines = "".join(filteredLines)
if lines == filteredLines:
return
self.setSelectedRange_(linesRng)
self.insertText_(filteredLines)
cursor = self.textCursor()
newSelRng = (linesRng[0], len(filteredLines))
self.setSelectedRange_(newSelRng)
def performFindPanelAction(self):
sender = self.sender()
if sender is not None:
tag = str(sender.property("tag").toString())
from nodebox.gui.qt.findreplace import FindReplaceController
self.emit(SIGNAL("performFindPanelAction(int)"), getattr(FindReplaceController, tag))
def jumpToSelection(self):
pass
|
"""
@author: <NAME>, The University of Sheffield, <EMAIL>
References
----------
<NAME>., <NAME>., <NAME>. and <NAME>., 2020. Side Information Dependence as a
Regulariser for Analyzing Human Brain Conditions across Cognitive Experiments.
In Proceedings of the 34th AAAI Conference on Artificial Intelligence (AAAI 2020).
"""
import numpy as np
from numpy.linalg import multi_dot
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.preprocessing import LabelBinarizer
# import cvxpy as cvx
# from cvxpy.error import SolverError
from ..utils.multiclass import score2pred
from ..utils import lap_norm, base_init
from .base import SSLFramework
class SIDeRSVM(SSLFramework):
def __init__(self, C=1.0, kernel='linear', lambda_=1.0, mu=0.0, k_neighbour=3,
manifold_metric='cosine', knn_mode='distance', solver='osqp', **kwargs):
"""Side Information Dependence Regularised Support Vector Machine
Parameters
----------
C : float, optional
param for importance of slack variable, by default 1
kernel : str, optional
'rbf' | 'linear' | 'poly', by default 'linear'
lambda_ : float, optional
param for side information dependence regularisation, by default 1
mu : float, optional
param for manifold regularisation, by default 0
k_neighbour : int, optional
number of nearest numbers for each sample in manifold regularisation,
by default 3
manifold_metric : str, optional
The distance metric used to calculate the k-Neighbors for each
sample point. The DistanceMetric class gives a list of available
metrics. By default 'cosine'.
knn_mode : str, optional
{‘connectivity’, ‘distance’}, by default 'distance'. Type of
returned matrix: ‘connectivity’ will return the connectivity
matrix with ones and zeros, and ‘distance’ will return the
distances between neighbors according to the given metric.
solver : str, optional
quadratic programming solver, [cvxopt, osqp], by default 'osqp'
"""
self.kwargs = kwargs
self.kernel = kernel
self.lambda_ = lambda_
self.mu = mu
self.C = C
self.solver = solver
# self.scaler = StandardScaler()
# self.coef_ = None
# self.X = None
# self.y = None
# self.support_ = None
# self.support_vectors_ = None
# self.n_support_ = None
self.manifold_metric = manifold_metric
self.k_neighbour = k_neighbour
self.knn_mode = knn_mode
self._lb = LabelBinarizer(pos_label=1, neg_label=-1)
def fit(self, X, y, co_variates=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
y : array-like
Label,, shape (nl_samples, ) where nl_samples <= n_samples
co_variates : array-like,
Domain co-variate matrix for input data, shape (n_samples, n_co-variates)
Returns
-------
self
[description]
"""
ker_x, unit_mat, ctr_mat, n = base_init(X, kernel=self.kernel, **self.kwargs)
ker_c = np.dot(co_variates, co_variates.T)
y_ = self._lb.fit_transform(y)
Q_ = unit_mat.copy()
if self.mu != 0:
lap_mat = lap_norm(X, n_neighbour=self.k_neighbour,
metric=self.manifold_metric, mode=self.knn_mode)
Q_ += np.dot(self.lambda_ / np.square(n - 1) *
multi_dot([ctr_mat, ker_c, ctr_mat])
+ self.mu / np.square(n) * lap_mat, ker_x)
else:
Q_ += self.lambda_ * multi_dot([ctr_mat, ker_c, ctr_mat, ker_x]) / np.square(n - 1)
self.coef_, self.support_ = self._solve_semi_dual(ker_x, y_, Q_, self.C, self.solver)
# if self._lb.y_type_ == 'binary':
# self.coef_, self.support_ = self._semi_binary_dual(K, y_, Q_,
# self.C,
# self.solver)
# self.support_vectors_ = X[:nl, :][self.support_]
# self.n_support_ = self.support_vectors_.shape[0]
#
# else:
# coef_list = []
# self.support_ = []
# self.support_vectors_ = []
# self.n_support_ = []
# for i in range(y_.shape[1]):
# coef_, support_ = self._semi_binary_dual(K, y_[:, i], Q_,
# self.C,
# self.solver)
# coef_list.append(coef_.reshape(-1, 1))
# self.support_.append(support_)
# self.support_vectors_.append(X[:nl, :][support_][-1])
# self.n_support_.append(self.support_vectors_[-1].shape[0])
# self.coef_ = np.concatenate(coef_list, axis=1)
self.X = X
self.y = y
return self
def decision_function(self, X):
"""[summary]
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
Returns
-------
array-like
decision scores, shape (n_samples,) for binary classification,
(n_samples, n_class) for multi-class cases
"""
ker_x = pairwise_kernels(X, self.X, metric=self.kernel,
filter_params=True, **self.kwargs)
return np.dot(ker_x, self.coef_) # +self.intercept_
def predict(self, X):
"""Perform classification on samples in X.
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
Returns
-------
array-like
predicted labels, shape (n_samples,)
"""
dec = self.decision_function(X)
if self._lb.y_type_ == 'binary':
y_pred_ = np.sign(dec).reshape(-1, 1)
else:
y_pred_ = score2pred(dec)
return self._lb.inverse_transform(y_pred_)
def fit_predict(self, X, y, co_variates):
"""[summary]
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
y : array-like
Label,, shape (nl_samples, ) where nl_samples <= n_samples
co_variates : array-like,
Domain co-variate matrix for input data, shape (n_samples, n_co-variates)
Returns
-------
array-like
predicted labels, shape (n_samples,)
"""
self.fit(X, y, co_variates)
return self.predict(X)
class SIDeRLS(SSLFramework):
def __init__(self, sigma_=1.0, lambda_=1.0, mu=0.0, kernel='linear',
k=3, knn_mode='distance', manifold_metric='cosine',
class_weight=None, **kwargs):
"""Side Information Dependence Regularised Least Square
Parameters
----------
sigma_ : float, optional
param for model complexity (l2 norm), by default 1.0
lambda_ : float, optional
param for side information dependence regularisation, by default 1.0
mu : float, optional
param for manifold regularisation, by default 0.0
kernel : str, optional
[description], by default 'linear'
k : int, optional
number of nearest numbers for each sample in manifold regularisation,
by default 3
knn_mode : str, optional
{‘connectivity’, ‘distance’}, by default 'distance'. Type of
returned matrix: ‘connectivity’ will return the connectivity
matrix with ones and zeros, and ‘distance’ will return the
distances between neighbors according to the given metric.
manifold_metric : str, optional
The distance metric used to calculate the k-Neighbors for each
sample point. The DistanceMetric class gives a list of available
metrics. By default 'cosine'.
class_weight : [type], optional
[description], by default None
**kwargs:
kernel param
"""
self.kernel = kernel
self.sigma_ = sigma_
self.lambda_ = lambda_
self.mu = mu
# self.classes = None
# self.coef_ = None
# self.X = None
# self.y = None
self.manifold_metric = manifold_metric
self.k = k
self.knn_mode = knn_mode
self.class_weight = class_weight
self._lb = LabelBinarizer(pos_label=1, neg_label=-1)
self.kwargs = kwargs
def fit(self, X, y, co_variates=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
y : array-like
Label,, shape (nl_samples, ) where nl_samples <= n_samples
co_variates : array-like,
Domain co-variate matrix for input data, shape (n_samples, n_co-variates)
Returns
-------
self
[description]
"""
# X, D = cat_data(Xl, Dl, Xu, Du)
nl = y.shape[0]
ker_x, unit_mat, ctr_mat, n = base_init(X, kernel=self.kernel, **self.kwargs)
if type(co_variates) == np.ndarray:
ker_c = np.dot(co_variates, co_variates.T)
else:
ker_c = np.zeros((n, n))
J = np.zeros((n, n))
J[:nl, :nl] = np.eye(nl)
if self.mu != 0:
lap_mat = lap_norm(X, n_neighbour=self.k, mode=self.knn_mode,
metric=self.manifold_metric)
Q_ = self.sigma_ * unit_mat + np.dot(J + self.lambda_ / np.square(n - 1)
* multi_dot([ctr_mat, ker_c, ctr_mat])
+ self.mu / np.square(n) * lap_mat, ker_x)
else:
Q_ = self.sigma_ * unit_mat + np.dot(J + self.lambda_ / np.square(n - 1)
* multi_dot([ctr_mat, ker_c, ctr_mat]), ker_x)
y_ = self._lb.fit_transform(y)
self.coef_ = self._solve_semi_ls(Q_, y_)
self.X = X
self.y = y
return self
def decision_function(self, X):
"""Evaluates the decision function for the samples in X.
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
Returns
-------
array-like
decision scores, shape (n_samples,) for binary classification,
(n_samples, n_class) for multi-class cases
"""
ker_x = pairwise_kernels(X, self.X, metric=self.kernel,
filter_params=True, **self.kwargs)
return np.dot(ker_x, self.coef_) # +self.intercept_
def predict(self, X):
"""Perform classification on samples in X.
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
Returns
-------
array-like
predicted labels, shape (n_samples,)
"""
dec = self.decision_function(X)
if self._lb.y_type_ == 'binary':
y_pred_ = np.sign(dec).reshape(-1, 1)
else:
y_pred_ = score2pred(dec)
return self._lb.inverse_transform(y_pred_)
def fit_predict(self, X, y, co_variates=None):
"""Fit the model according to the given training data and then perform
classification on samples in X.
Parameters
----------
X : array-like
Input data, shape (n_samples, n_features)
y : array-like
Label,, shape (nl_samples, ) where nl_samples <= n_samples
co_variates : array-like,
Domain co-variate matrix for input data, shape (n_samples, n_co-variates)
Returns
-------
array-like
predicted labels, shape (n_samples,)
"""
self.fit(X, y, co_variates)
return self.predict(X)
|
#
# Created by <NAME> on 29/December/2019
#
import time
import math
import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
driver_path = "D:\Program Files\PycharmProjects\Trendyol-Scraper-Crawler\chromedriver.exe"
baseUrl = "https://www.trendyol.com/"
def getAllMainCategories():
r = requests.get(baseUrl)
soup = BeautifulSoup(r.content, "lxml")
mainCategoryFile = open("main_categories.txt", "w+")
categories = soup.find_all("a", attrs={"class": "category-header"})
for category in categories:
print(category.get("href"))
mainCategoryFile.write(baseUrl + category.get("href") + "\n")
mainCategoryFile.close()
def getSubCategories():
r = requests.get(baseUrl)
soup = BeautifulSoup(r.content, "lxml")
subCategoryFile = open("subcategories.txt", "w+")
categories = soup.find_all("a", attrs={"class": "sub-category-header"})
for category in categories:
print(category.get("href"))
subCategoryFile.write(baseUrl + category.get("href") + "\n")
subCategoryFile.close()
def getProductLinks(categoryUrl):
browser = webdriver.Chrome(executable_path=driver_path)
browser.get(categoryUrl)
time.sleep(1)
file = categoryUrl.split("/")
fileName = file[len(file) - 1]
linkFile = open(fileName + ".txt", "w+")
elem = browser.find_element_by_tag_name("body")
description = browser.find_element_by_class_name("dscrptn").text.split(" ")
product_count = int(description[-3])
page_count = math.ceil(product_count / 24)
no_of_pagedowns = page_count * 6
while no_of_pagedowns:
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
no_of_pagedowns -= 1
links = browser.find_elements_by_class_name("p-card-chldrn-cntnr")
print(len(links))
for link in links:
linkFile.write(link.get_attribute("href") + "\n")
browser.close()
def getProductInformation(productFileName):
root = ET.Element(productFileName.split(".")[0])
count = 0
with open(productFileName) as productFile:
lines = productFile.readlines()
for url in lines:
count += 1
print("\n" + count.__str__())
xml_counter = ET.SubElement(root, "product")
print(url)
r = requests.get(url.strip())
soup = BeautifulSoup(r.content, "html.parser")
price = soup.find("span", attrs={"class": "prc-slg"}).text
name = soup.title.text.split(" | ")[0]
description = soup.find("div", attrs={"class": "pr-in-dt-cn"})
spec_list = description.find_all("ul")
spec_spans = spec_list[0].find_all("span")[1]
specs = spec_spans.find_all("li")[1:]
image = soup.find("img", attrs={"class": "ph-gl-img"}).get("src")
xml_name = ET.SubElement(xml_counter, "Name")
xml_name.text = name.strip()
xml_price = ET.SubElement(xml_counter, "Price")
xml_price.text = price.strip()
xml_image = ET.SubElement(xml_counter, "Image")
xml_image.text = image.strip()
xml_desc = ET.SubElement(xml_counter, "Description")
for spec in specs:
spectext = spec.text
if " : " in spectext:
specname = spectext.split(":")[0]
specvalue = spectext.split(":")[1]
# print(specname + " = " + specvalue)
xml_spec = ET.SubElement(xml_desc, specname.strip().replace(" ", "_").replace("(", "").replace(")", ""))
xml_spec.text = specvalue.strip()
r.close()
s = ET.tostring(xml_counter, encoding='utf8')
s = s.decode("utf8")
s = s.split("\n")[1]
print(s)
# browser.close()
s = ET.tostring(root, encoding='utf8')
s = s.decode("utf8")
with open(productFileName.split(".")[0] + ".xml", "w+", encoding="utf-8") as xml_file:
xml_file.write(s)
getProductInformation("oyun-ve-oyun-konsollari.txt")
|
<filename>localstack/services/dynamodbstreams/dynamodbstreams_api.py
import json
import uuid
import hashlib
from flask import Flask, jsonify, request, make_response
from localstack.services import generic_proxy
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str, to_bytes
APP_NAME = 'ddb_streams_api'
app = Flask(APP_NAME)
DDB_STREAMS = {}
DDB_KINESIS_STREAM_NAME_PREFIX = '__ddb_stream_'
ACTION_HEADER_PREFIX = 'DynamoDBStreams_20120810'
SEQUENCE_NUMBER_COUNTER = 1
def add_dynamodb_stream(table_name, view_type='NEW_AND_OLD_IMAGES', enabled=True):
if enabled:
# create kinesis stream as a backend
stream_name = get_kinesis_stream_name(table_name)
aws_stack.create_kinesis_stream(stream_name)
stream = {
'StreamArn': aws_stack.dynamodb_stream_arn(table_name=table_name),
'TableName': table_name,
'StreamLabel': 'TODO',
'StreamStatus': 'ENABLED',
'KeySchema': [],
'Shards': []
}
table_arn = aws_stack.dynamodb_table_arn(table_name)
DDB_STREAMS[table_arn] = stream
def forward_events(records):
global SEQUENCE_NUMBER_COUNTER
kinesis = aws_stack.connect_to_service('kinesis')
for record in records:
if 'SequenceNumber' not in record['dynamodb']:
record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER)
SEQUENCE_NUMBER_COUNTER += 1
table_arn = record['eventSourceARN']
stream = DDB_STREAMS.get(table_arn)
if stream:
table_name = table_name_from_stream_arn(stream['StreamArn'])
stream_name = get_kinesis_stream_name(table_name)
kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO')
@app.route('/', methods=['POST'])
def post_request():
action = request.headers.get('x-amz-target')
data = json.loads(to_str(request.data))
result = {}
kinesis = aws_stack.connect_to_service('kinesis')
if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:
result = {
'Streams': list(DDB_STREAMS.values()),
'LastEvaluatedStreamArn': 'TODO'
}
elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:
for stream in DDB_STREAMS.values():
if stream['StreamArn'] == data['StreamArn']:
result = {
'StreamDescription': stream
}
# get stream details
dynamodb = aws_stack.connect_to_service('dynamodb')
table_name = table_name_from_stream_arn(stream['StreamArn'])
stream_name = get_kinesis_stream_name(table_name)
stream_details = kinesis.describe_stream(StreamName=stream_name)
table_details = dynamodb.describe_table(TableName=table_name)
stream['KeySchema'] = table_details['Table']['KeySchema']
# Replace Kinesis ShardIDs with ones that mimic actual
# DynamoDBStream ShardIDs.
stream_shards = stream_details['StreamDescription']['Shards']
for shard in stream_shards:
shard['ShardId'] = shard_id(stream_name, shard['ShardId'])
stream['Shards'] = stream_shards
break
if not result:
return error_response('Requested resource not found', error_type='ResourceNotFoundException')
elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX:
# forward request to Kinesis API
stream_name = stream_name_from_stream_arn(data['StreamArn'])
stream_shard_id = kinesis_shard_id(data['ShardId'])
result = kinesis.get_shard_iterator(StreamName=stream_name,
ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType'])
elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX:
kinesis_records = kinesis.get_records(**data)
result = {'Records': [], 'NextShardIterator': kinesis_records.get('NextShardIterator')}
for record in kinesis_records['Records']:
result['Records'].append(json.loads(to_str(record['Data'])))
else:
print('WARNING: Unknown operation "%s"' % action)
return jsonify(result)
# -----------------
# HELPER FUNCTIONS
# -----------------
def error_response(message=None, error_type=None, code=400):
if not message:
message = 'Unknown error'
if not error_type:
error_type = 'UnknownError'
if 'com.amazonaws.dynamodb' not in error_type:
error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type
content = {
'message': message,
'__type': error_type
}
return make_response(jsonify(content), code)
def get_kinesis_stream_name(table_name):
return DDB_KINESIS_STREAM_NAME_PREFIX + table_name
def table_name_from_stream_arn(stream_arn):
return stream_arn.split(':table/')[1].split('/')[0]
def stream_name_from_stream_arn(stream_arn):
table_name = table_name_from_stream_arn(stream_arn)
return get_kinesis_stream_name(table_name)
def random_id(stream_arn, kinesis_shard_id):
namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16])
return uuid.uuid5(namespace, kinesis_shard_id).hex
def shard_id(stream_arn, kinesis_shard_id):
return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)])
def kinesis_shard_id(dynamodbstream_shard_id):
return dynamodbstream_shard_id.rsplit('-', 1)[0]
def serve(port, quiet=True):
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
|
import os
from collections import defaultdict
full_names = {
'AR': 'Arabic',
'BG': 'Bulgarian',
'CH': 'Mandarin',
'CH_char': 'Mandarin',
'EN': 'English',
'WU': 'Cantonese',
'CR': 'Croatian',
'CZ': 'Czech',
'FR': 'French',
'FR_lexique': 'French',
'FR_prosodylab': 'French',
'GE': 'German',
'GE_prosodylab': 'German',
'HA': 'Hausa',
'JA': 'Japanese',
'KO': 'Korean',
'KO_jamo': 'Korean',
'RU': 'Russian',
'PO': 'Portuguese',
'PL': 'Polish',
'SP': 'Spanish',
'SA': 'Swahili',
'SW': 'Swedish',
'TA': 'Tamil',
'TH': 'Thai',
'TU': 'Turkish',
'VN': 'Vietnamese',
'UA': 'Ukrainian',
}
root_dir = r'E:\Data\dictionaries\raw'
output_dir = r'E:\Data\dictionaries\cleaned'
os.makedirs(output_dir, exist_ok=True)
def load_file(path):
grapheme_set = set()
phone_set = set()
regular = defaultdict(set)
weird = defaultdict(set)
with open(path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if '\t' in line:
word, pron = line.split('\t')
else:
word, pron = line.split(' ', maxsplit=1)
word = word.lower()
pron = tuple(x for x in pron.split(' ') if x)
skip = False
for p in weird_phone_set:
if p in pron:
skip = True
if skip:
continue
is_weird = False
for c in weird_char_set:
if c in word:
is_weird = True
if word.endswith('-'):
is_weird = True
if is_weird:
weird[word].add(pron)
else:
print(word, pron)
regular[word].add(pron)
grapheme_set.update(word)
phone_set.update(pron)
#print(weird)
print(len(weird))
print(len(regular))
print(regular['zwei'])
print('GRAPH', sorted(grapheme_set))
print('PHONE', phone_set)
return regular
def save_dictionary(word_dict, path):
with open(path, 'w', encoding='utf8') as f:
for word, v in word_dict.items():
for pron in v:
f.write('{}\t{}\n'.format(word, ' '.join(pron)))
for code, land in full_names.items():
if code == 'AR':
weird_char_set = ['<', '>', '.', '1', '2', '4', '5', '6', '8', '9', '0']
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'BG':
continue
weird_char_set = ['a', 'b', 'd', 'e', 'g', 'h', 'i', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's']
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'CH':
continue
weird_char_set = []
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'CH_char':
continue
weird_char_set = []
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'CR':
continue
weird_char_set = ["'"] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'CZ':
continue
weird_char_set = [')', '_', '2']
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'EN':
continue
weird_char_set = []
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'GE':
continue
weird_char_set = ['=', '%', '*', '<', ':', '$', '_', '!', '.', '~'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'GE_prosodylab':
continue
weird_char_set = ['#']
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'FR':
continue
weird_char_set = []
weird_phone_set = []
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'FR_lexique':
continue
weird_char_set = ['.']
weird_phone_set = []
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'FR_prosodylab':
continue
weird_char_set = ['.']
weird_phone_set = []
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'CR':
continue
weird_char_set = ['#']+ [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'HA':
continue
weird_char_set = []
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'JA':
continue
weird_char_set = ['9', '〇', '×', 'A', 'F', 'N', 'T', '・', '%', '&', '(', '+', '0', '1', '2', '3', '4', '5',
'7', '8', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y']
weird_phone_set = []
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'KO':
continue
weird_char_set = ['e', 'i', 'n', 'o', 's'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'KO_jamo':
continue
weird_char_set = ['e', 'i', 'n', 'o', 's'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'PL':
continue
weird_char_set = [] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'PO':
continue
weird_char_set = ['�', '$', '&', '+', '.', '/', ':', '<', '>', '_', '`', '}'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'RU':
continue
weird_char_set = ['c', 'v'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'SA':
continue
weird_char_set = [] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'SP':
continue
weird_char_set = ['<', '>', '^'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'SW':
continue
weird_char_set = ['&', '>'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'TH':
continue
weird_char_set = [',', '.', '<', '>', '"', '-'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'TU':
continue
weird_char_set = ['%', '&', '+', '.', ';', '=', '̇'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'UA':
continue
weird_char_set = ['a', 'b', 'e', 'f', 'g', 'i', 'k', 'l', 'n', 'o', 'p', 'r', 's', 'u', 'w', '–'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'VN':
continue
weird_char_set = ['.'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path)
elif code == 'WU':
continue
weird_char_set = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X', 'a', 'c', 'd', 'e', 'i', 'k', 'n', 'p', 'r', 's', 't', 'w', 'x'] + [str(x) for x in range(10)]
weird_phone_set = ['+hGH']
dict_path = os.path.join(root_dir, '{}_dictionary.txt'.format(code))
new = load_file(dict_path)
new_path = os.path.join(output_dir, '{}_cleaned.txt'.format(code))
save_dictionary(new, new_path) |
#==============================================================================
#
# Program: ParaView
# Module: extract_selection.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
r"""
This module is used by vtkPythonExtractSelection filter.
"""
import paraview
from paraview.vtk import dataset_adapter
from numpy import *
from paraview.vtk.algorithms import *
def __vtk_in1d(a, b):
try:
return in1d(a, b)
except (NameError, ValueError):
# older versions of numpy don't have in1d function.
# in1d was introduced in numpy 1.4.0.
# Additionally in1d could fail for data arrays (I am not entirely sure
# how to resolve that), so in that case too revert back to the slower
# path.
return array([item in b for item in a])
contains = __vtk_in1d
#class _array(object):
# """used to wrap numpy array to add support for == operator
# that compares two array to generate a mask using numpy.in1d() method"""
# def __init__(self, array):
# self.array = array
#
# def __eq__(self, other):
# if type(other) == ndarray or type(other) == list:
# return in1d(self.array, other)
# return self.array == other
def PassBlock(self, iterCD, selection_node):
"""Test if the block passes the block-criteria, if any"""
props = selection_node.GetProperties()
if iterCD.IsA("vtkHierarchicalBoxDataIterator"):
if props.Has(selection_node.HIERARCHICAL_INDEX()):
if iterCD.GetCurrentIndex() != props.Get(selection_node.HIERARCHICAL_INDEX()):
return False
if props.Has(selection_node.HIERARCHICAL_LEVEL()):
if iterCD.GetCurrentLevel() != props.Get(selection_node.HIERARCHICAL_LEVEL()):
return False
elif iterCD.IsA("vtkCompositeDataIterator"):
if props.Has(selection_node.COMPOSITE_INDEX()):
if iterCD.GetCurrentFlatIndex() != props.Get(selection_node.COMPOSITE_INDEX()):
return False
return True
def ExtractElements(self, inputDS, selection, mask):
if mask is None:
# nothing was selected
return None
elif type(mask) == bool:
if mask:
# FIXME: We need to add the "vtkOriginalIds" array.
return inputDS
else:
# nothing was extracted.
return None
else:
# mask must be an array. Process it.
mask_array = dataset_adapter.numpyTovtkDataArray(int8(mask), "_mask_array")
retVal = self.ExtractElements(inputDS, selection, mask_array)
if retVal:
retVal.UnRegister(None)
return retVal
return None
class CompositeDataArrayIterable(object):
""" An iterable that will traverse all data arrays in leaves of a composite
dataset with a given name."""
def __init__(self, dataSet, arrayName, assoc):
if not dataSet.IsA("vtkCompositeDataSet"):
raise TypeError("Input DataSet is not a vtkCompositeDataSet!")
self.Arrays = []
iterCD = dataSet.NewIterator()
while not iterCD.IsDoneWithTraversal():
dataObj = iterCD.GetCurrentDataObject()
pyDataObj = dataset_adapter.WrapDataObject(dataObj)
dsa = dataset_adapter.DataSetAttributes(
dataObj.GetAttributes(assoc), pyDataObj, assoc)
if arrayName in dsa.keys():
self.Arrays.append(dsa[arrayName])
iterCD.GoToNextItem()
iterCD.UnRegister(None)
del iterCD
def __len__(self):
result = 0
for array in self.Arrays:
result += len(array)
return result
def __iter__(self):
import itertools
return itertools.chain.from_iterable(self.Arrays)
def ExecData(self, inputDS, selection, compositeDataSet = None):
"""inputDS is a non-composite data object. If it is a leaf of a composite
data set, pass the entire data set as compositeDataSet."""
selection_node = selection.GetNode(0)
array_association = 1
# convert from vtkSelectionNode's field type to vtkDataObject's field association
if(selection_node.GetFieldType() == 0):
array_association = 1
elif(selection_node.GetFieldType() == 1):
array_association = 0
# wrap the data objects. makes them easier to use.
do = dataset_adapter.WrapDataObject(inputDS)
dsa = dataset_adapter.DataSetAttributes(
inputDS.GetAttributes(array_association),
do, array_association)
# Global operations like global_max, etc require that all processes have
# all array names available on all processors.
# Sync all of the array names if using multiple processes.
# Use empty array by default, then override them with the data from this
# node.
new_locals = {}
if vtkProcessModule.GetProcessModule().GetNumberOfLocalPartitions() > 1:
from mpi4py import MPI
allArrayNames = set([paraview.make_name_valid(name) for name in dsa.keys()])
arrayNames = MPI.COMM_WORLD.allgather(list(allArrayNames))
for rankNames in arrayNames:
for arrayName in rankNames:
allArrayNames.add(arrayName)
for arrayName in allArrayNames:
new_locals[arrayName] = dataset_adapter.VTKArray([])
# define global variables for all the arrays.
for arrayname in dsa.keys():
name = paraview.make_name_valid(arrayname)
array = dsa[arrayname]
if compositeDataSet:
compIter = CompositeDataArrayIterable(
compositeDataSet, arrayname, array_association)
new_locals[name + "_composite"] = compIter
array.composite_iterator = compIter
new_locals[name] = array
new_locals["cell"] = do
new_locals["dataset"] = do
new_locals["input"] = do
new_locals["element"] = do
new_locals["id"] = arange(inputDS.GetNumberOfElements(
array_association))
# evaluate the query expression. The expression should return a mask which
# is either an array or a boolean value.
mask = None
if len(selection_node.GetQueryString()) > 0:
try:
mask = eval(selection_node.GetQueryString(), globals(), new_locals)
except NameError:
pass
# extract the elements from the input dataset using the mask.
extracted_ds = ExtractElements(self, inputDS, selection, mask)
del mask
del new_locals
del do
del dsa
return extracted_ds
def Exec(self, inputDO, selection, outputDO):
selection_node = selection.GetNode(0)
if inputDO.IsA("vtkCompositeDataSet"):
outputDO.CopyStructure(inputDO)
# For composite datasets, iterate over the tree and call ExecData() only
# for this nodes that pass the block-criteria, if any.
iterCD = inputDO.NewIterator()
iterCD.UnRegister(None)
while not iterCD.IsDoneWithTraversal():
if PassBlock(self, iterCD, selection_node):
ds = ExecData(self, iterCD.GetCurrentDataObject(), selection, inputDO)
outputDO.SetDataSet(iterCD, ds)
del ds
iterCD.GoToNextItem()
del iterCD
else:
ds = ExecData(self, inputDO, selection)
if ds:
outputDO.ShallowCopy(ds)
del ds
return True
|
<reponame>mkazin/SubnauticaMap<gh_stars>0
import unittest
from mongoengine import connect
from controller.user_data import UserDataController
from model.map_data import Marker
from model.player_data import PlayerData
class UserDataControllerTestCase(unittest.TestCase):
db = None
player_singleton = None
@classmethod
def setUpClass(cls):
UserDataControllerTestCase.db = connect('testdb', host="mongomock://localhost", port=27017)
UserDataControllerTestCase.db.drop_database('testdb')
UserDataControllerTestCase._create_player()
@classmethod
def tearDownClass(cls):
UserDataControllerTestCase.db.drop_database('testdb')
UserDataControllerTestCase.db.close()
def setUp(self):
self.player = UserDataControllerTestCase.player_singleton
self.player.map_data = []
self.player.save()
def test_create_new_player(self):
created_player = UserDataController.create_new_player(
player_id='player id',
name='name',
email='email',
profile_pic='picture',
email_verified=False,
)
self.assertIsNotNone(created_player)
self.assertEqual(1, len(created_player.map_data))
def test_update_marker_type_when_marker_existed(self):
old_type_name = 'Old Type Name'
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='Test Marker', marker_type_name=old_type_name, color='Old Color')
new_type_name = 'New Type Name'
new_color = 'New Color'
UserDataController.update_marker_type(self.player, old_type_name, new_type_name, new_color)
self._force_db_map_data_reload()
self.assertEqual(1, len(self.player.map_data))
self.assertEqual(new_type_name, self.player.map_data[0].marker_type_name)
self.assertEqual(new_color, self.player.map_data[0].color)
def test_update_marker_type_when_multiple_markers_exist(self):
old_type_name = 'Old Type Name'
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='First Marker', marker_type_name=old_type_name, color='Old Color')
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='Second Marker', marker_type_name=old_type_name, color='Old Color')
new_type_name = 'New Type Name'
new_color = 'New Color'
UserDataController.update_marker_type(self.player, old_type_name, new_type_name, new_color)
self._force_db_map_data_reload()
self.assertEqual(2, len(self.player.map_data))
for marker in self.player.map_data:
self.assertEqual(new_type_name, marker.marker_type_name)
self.assertEqual(new_color, marker.color)
def test_update_marker_type_when_other_marker_types_should_be_ignored(self):
type_name_to_replace = 'Old Type Name'
type_name_which_should_not_be_changed = 'Very different from the old type name'
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='Test Marker',
marker_type_name=type_name_which_should_not_be_changed,
color='A Completely Different Color')
new_type_name = 'New Type Name'
new_color = 'New Color'
UserDataController.update_marker_type(self.player, type_name_to_replace, new_type_name, new_color)
# Force a reload from the database, should find our one marker
self._force_db_map_data_reload()
self.assertEqual(1, len(self.player.map_data))
self.assertEqual(type_name_which_should_not_be_changed, self.player.map_data[0].marker_type_name)
def _create_marker(self, bearing, distance, depth, x, y, name, marker_type_name, color):
new_marker = Marker(
bearing=bearing, distance=distance, depth=depth, x=x, y=y,
name=name, marker_type_name=marker_type_name, color=color)
if not hasattr(self.player, 'map_data'):
self.player.map_data = []
self.player.map_data.append(new_marker)
self.player.save(cascade=True)
def test_find_existing_markers_of_type_name_happy_path(self):
marker_type_name_to_find = 'Old Type Name'
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='First Marker', marker_type_name=marker_type_name_to_find, color='Color')
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='Second Marker', marker_type_name='Different marker type', color='Color')
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='Third Marker', marker_type_name=marker_type_name_to_find, color='Color')
found_markers = UserDataController.find_existing_markers_of_type_name(self.player, marker_type_name_to_find)
self.assertEqual(2, len(found_markers))
found_marker_names = set(list(map(lambda marker: marker.name, found_markers)))
self.assertSetEqual(set(['First Marker', 'Third Marker']), found_marker_names)
def test_find_existing_markers_of_type_name_when_missing(self):
marker_type_name_to_find = 'Type Name'
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name='Second Marker', marker_type_name='Different marker type', color='Color')
found_markers = UserDataController.find_existing_markers_of_type_name(self.player, marker_type_name_to_find)
self.assertEqual(0, len(found_markers))
def test_find_existing_marker_with_name_happy_path(self):
marker_name_to_find = 'Hello Marker'
self._create_marker(bearing=100, distance=456, depth=123, x=5, y=5,
name=marker_name_to_find,
marker_type_name='Marker Type',
color='Marker Color')
found_marker = UserDataController.find_existing_marker_with_name(self.player, marker_name_to_find)
self.assertEqual(marker_name_to_find, found_marker.name)
def test_find_existing_marker_with_name_when_missing(self):
self.assertEqual(0, len(self.player.map_data))
marker_name_to_find = 'No such marker'
found_marker = UserDataController.find_existing_marker_with_name(self.player, marker_name_to_find)
self.assertIsNone(found_marker)
@staticmethod
def _create_player():
player = UserDataController.create_new_player(
player_id='test_id',
name='Test Player',
email='<EMAIL>',
profile_pic='/static/test_user.svg',
email_verified=True,
)
UserDataControllerTestCase.player_singleton = player
def _force_db_map_data_reload(self):
self.player.map_data = []
self.assertEqual(0, len(self.player.map_data))
self.player.reload()
if __name__ == '__main__':
unittest.main()
|
<filename>util/test/chpl_launchcmd.py
#!/usr/bin/env python
"""Run Chapel test (execution only) inside pbs or slurm batch job.
The job name is set from the environment variable CHPL_LAUNCHCMD_NAME_PREFIX
(defaulting to Chpl) and the name of the program being executing. For example,
running `chpl_launchcmd.py ./hello` would use the name Chpl-hello.
The high level overview of what this does:
* Detect slurm or flavor of qsub (either PBSPro or moab).
* If none, raises error.
* Parses number locales and wall time from the test command args so they can
be sent to qsub/slurm.
* Rebuilds the test command.
* Launches the job by passing the test command on stdin to qsub (batch
mode). Slurm jobs just run the chapel executable, setting
CHPL_LAUNCHER_USE_SBATCH=true. Stdout/stderr are directed to a temporary
file designated by the script.
* Polls qstat/squeue with the given job id every second until the status is
complete.
* Prints the contents of the temp files with stdout/stderr from the job to
stdout/stderr.
* Cleans up the temp file and exits.
"""
from __future__ import unicode_literals, with_statement
import argparse
import contextlib
import datetime
import logging
import os
import os.path
import re
import select
import shlex
import shutil
import subprocess
import sys
import tempfile
import time
import xml.etree.ElementTree
# Add the chplenv dir to the python path.
chplenv_dir = os.path.join(os.path.dirname(__file__), '..', 'chplenv')
sys.path.insert(0, os.path.abspath(chplenv_dir))
import chpl_cpu
__all__ = ('main')
def main():
"""Run the program!"""
job = AbstractJob.init_from_environment()
(stdout, stderr) = job.run()
sys.stdout.write(stdout)
sys.stderr.write(stderr)
class AbstractJob(object):
"""Abstract job runner implementation."""
# These class attributes should always be None on the AbstractJob
# class. They *should only* be defined on and accessed from a sub class.
# submit_bin is the program used to submit jobs (i.e. qsub).
submit_bin = None
# status_bin is the program used to query the status of jobs (i.e. qstat,
# squeue)
status_bin = None
# argument name to use when specifying specific nodes (i.e. hostlist,
# mppnodes)
hostlist_resource = None
# argument name for specifying number of nodes (i.e. nodes, mppwidth)
num_nodes_resource = None
# argument name for specifying number of cpus (i.e. mppdepth)
num_cpus_resource = None
# argument name for specifying number of processing elements per node (i.e.
# mppnppn)
processing_elems_per_node_resource = None
# redirect_output decides whether we redirect output directly to the output
# files or whether we let the launcher and queueing system do it.
redirect_output = None
def __init__(self, test_command, reservation_args):
"""Initialize new job runner.
:type test_command: list
:arg test_command: command to run in qsub
:type reservation_args: argparse.Namespace
:arg reservation_args: reservation arguments parsed from cli
"""
self.test_command = test_command
self.num_locales = reservation_args.numLocales
self.walltime = reservation_args.walltime
self.hostlist = reservation_args.hostlist
logging.debug('Created instance of: {0}'.format(self))
def __repr__(self):
"""Return string representation of this instance."""
cls_name = str(type(self))
attrs = ', '.join(map(lambda x: '{0}={1}'.format(x, getattr(self, x, None)),
['test_command', 'num_locales', 'walltime', 'hostlist']))
return '{0}({1})'.format(cls_name, attrs)
def full_test_command(self, output_file, error_file):
"""Returns instance's test_command prefixed with command to change to
testing_dir. This is required to support both PBSPro and moab flavors
of PBS. Whereas moab provides a -d argument when calling qsub, both
support the $PBS_O_WORKDIR argument. Optionally, this can redirect
stdout/stderr directly to the output files to avoid using a spool file.
:type output_file: str
:arg output_file: stdout output file location
:type error_file: str
:arg error_file: stderr output file location
:rtype: list
:returns: command to run in qsub with changedir call and redirection
"""
full_test_command = ['cd', '$PBS_O_WORKDIR', '&&']
# If the first argument of the test command is a file (it should
# always be the executable), then add a "test -f ./execname" call
# before running the command. This works around some potential nfs
# configuration issues that can happen when running from lustre
# mounted over nfs.
if os.path.exists(self.test_command[0]):
logging.debug('Adding "test -f {0}" to launcher command.'.format(
self.test_command[0]))
full_test_command += ['test', '-f', self.test_command[0], '&&']
full_test_command.extend(self.test_command)
if self.redirect_output:
full_test_command.extend(['>{0} 2>{1}'.format(output_file, error_file)])
return full_test_command
@property
def num_cpus(self):
"""Returns the number of cpus that qsub should reserve. PBSPro requires
the cpu reservation be given to both qsub, and aprun.
If cnselect is not callable, raise RuntimeError.
:rtype: int
:returns: Number of cpus to reserve, or -1 if there was no cnselect output
"""
try:
n_cpus = os.environ.get('CHPL_LAUNCHCMD_NUM_CPUS')
if n_cpus is not None:
return n_cpus
logging.debug('Checking for number of cpus to reserve.')
cnselect_proc = subprocess.Popen(
['cnselect', '-Lnumcores'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logging.debug('Communicating with cnselect process.')
stdout, stderr = cnselect_proc.communicate()
except OSError as ex:
raise RuntimeError(ex)
first_line = stdout.split('\n')[0]
if first_line:
return int(first_line)
else:
msg = 'cnselect -Lnumcores had no output.'
logging.error(msg)
raise ValueError(msg)
@property
def job_name(self):
"""Returns job name string from test command and CHPL_LAUNCHCMD_NAME_PREFIX
env var.
:rtype: str
:returns: job name
"""
prefix = os.environ.get('CHPL_LAUNCHCMD_NAME_PREFIX', 'Chpl')
logging.debug('Job name prefix is: {0}'.format(prefix))
cmd_basename = os.path.basename(self.test_command[0])
logging.debug('Test command basname: {0}'.format(cmd_basename))
job_name = '{0}-{1}'.format(prefix, cmd_basename)
logging.info('Job name is: {0}'.format(job_name))
return job_name
@property
def select_suffix(self):
"""Returns suffix for select expression based instance attributes.
:rtype: str
:returns: select expression suffix, or empty string
"""
return ''
@property
def knl(self):
"""Returns True when testing KNL (Xeon Phi).
:rtype: bool
:returns: True when testing KNL
"""
return chpl_cpu.get('target').cpu == 'mic-knl'
def _qsub_command_base(self, output_file, error_file):
"""Returns base qsub command, without any resource listing.
:type output_file: str
:arg output_file: stdout output file location
:type error_file: str
:arg error_file: stderr output file location
:rtype: list
:returns: qsub command as list of strings
"""
submit_command = [self.submit_bin, '-V', '-N', self.job_name]
if not self.redirect_output:
submit_command.extend(['-o', output_file, '-e', error_file])
else:
# even when redirecting output, PBS errors are sent to the error
# stream, so make sure we can find errors if they occur
submit_command.extend(['-j', 'oe', '-o', '{0}.more'.format(error_file)])
if self.walltime is not None:
submit_command.append('-l')
submit_command.append('walltime={0}'.format(self.walltime))
return submit_command
def _qsub_command(self, output_file, error_file):
"""Returns qsub command list. This implementation is the default that works for
standard mpp* options. Subclasses can implement versions that meet their needs.
:type output_file: str
:arg output_file: stdout output file location
:type error_file: str
:arg error_file: stderr output file location
:rtype: list
:returns: qsub command as list of strings
"""
submit_command = self._qsub_command_base(output_file, error_file)
if self.num_locales >= 0:
submit_command.append('-l')
submit_command.append('{0}={1}{2}'.format(
self.num_nodes_resource, self.num_locales, self.select_suffix))
if self.hostlist is not None:
submit_command.append('-l')
submit_command.append('{0}={1}'.format(
self.hostlist_resource, self.hostlist))
if self.num_cpus_resource is not None:
submit_command.append('-l')
submit_command.append('{0}={1}'.format(
self.num_cpus_resource, self.num_cpus))
if self.processing_elems_per_node_resource is not None:
submit_command.append('-l')
submit_command.append('{0}={1}'.format(
self.processing_elems_per_node_resource, 1))
more_l = os.environ.get('CHPL_LAUNCHCMD_QSUB_MORE_L')
if more_l:
submit_command.append('{0}'.format(more_l))
logging.debug('qsub command: {0}'.format(submit_command))
return submit_command
def run(self):
"""Run batch job in subprocess and wait for job to complete. When finished,
returns output as string.
:rtype: str
:returns: stdout/stderr from job
"""
with _temp_dir() as working_dir:
output_file = os.path.join(working_dir, 'test_output.log')
error_file = os.path.join(working_dir, 'test_error_output.log')
input_file = os.path.join(working_dir, 'test_input')
testing_dir = os.getcwd()
job_id = self.submit_job(testing_dir, output_file, error_file, input_file)
logging.info('Test has been queued (job id: {0}). Waiting for output...'.format(job_id))
# TODO: The while condition here should look for jobs that become held,
# are in the queue too long, or ??? and do something
# intelligent. For example, if the job is in the queue longer
# than the walltime, it should probably be deleted (qdel
# <job_id>) and a timeout should be reported. Here are all the
# pbs (torque) job statuses:
#
# C - Job is completed after having run/
# E - Job is exiting after having run.
# H - Job is held.
# Q - job is queued, eligible to run or routed.
# R - job is running.
# T - job is being moved to new location.
# W - job is waiting for its execution time
# (-a option) to be reached.
# S - (Unicos only) job is suspend.
#
# (thomasvandoren, 2014-04-09)
def job_status(job_id, output_file):
"""Returns the status of the job specified by job_id
The status is determined by calling status(job_id). If that
call is successful the result is returned. The exact code
returned is up to status(job_id) but it must support 'C' for
complete, 'Q' for queued/waiting to run, and 'R' for running
status(job_id) can raise a ValueError, which can indicate that
the job has completed *and* been dequeued. If the output file
exists and the job has been dequeued, it is safe to assume it
completed. Otherwise we raise the error
"""
try:
job_status = self.status(job_id)
return job_status
except ValueError as ex:
# ValueError may indicate that the job completed and was
# dequeued before we last checked the status. If the output
# file exists, assume success. Otherwise re raise error
# message.
if os.path.exists(output_file):
return 'C'
raise
exec_start_time = time.time()
alreadyRunning = False
status = job_status(job_id, output_file)
while status != 'C':
if not alreadyRunning and status == 'R':
alreadyRunning = True
exec_start_time = time.time()
time.sleep(.5)
status = job_status(job_id, output_file)
exec_time = time.time() - exec_start_time
# Note that this time isn't very accurate as we don't get the exact
# start or end time, however this does give a better estimate than
# timing the whole binary for cases where the time in the queue is
# large. It tends to be a second or two larger than real exec time
exec_time_file = os.environ.get('CHPL_LAUNCHCMD_EXEC_TIME_FILE')
if exec_time_file != None:
with open(exec_time_file, 'w') as fp:
fp.write('{0:3f}'.format(exec_time))
logging.debug('{0} reports job {1} as complete.'.format(
self.status_bin, job_id))
if not os.path.exists(output_file):
logging.error('Output file from job does not exist at: {0}'.format(
output_file))
raise ValueError('[Error: output file from job (id: {0}) does not exist at: {1}]'.format(
job_id, output_file))
# try removing the file stdin was copied to, might not exist
logging.debug('removing stdin file.')
try:
os.unlink(input_file)
except OSError:
pass
logging.debug('Reading output file.')
with open(output_file, 'r') as fp:
output = fp.read()
logging.debug('Reading error file.')
with open(error_file, 'r') as fp:
error = fp.read()
try:
with open('{0}.more'.format(error_file), 'r') as fp:
error += fp.read()
except:
pass
logging.info('The test finished with output of length {0}.'.format(len(output)))
return (output, error)
def submit_job(self, testing_dir, output_file, error_file, input_file):
"""Submit a new job using ``testing_dir`` as the working dir,
``output_file`` as the location for stdout, and ``error_file`` as the
location for stderr. Returns the job id on success. AbstractJob does
not implement this method. It is the responsibility of the sub class.
:type testing_dir: str
:arg testing_dir: working directory for running test
:type output_file: str
:arg output_file: stdout log filename
:type error_file: str
:arg error_file: stderr log filename
:rtype: str
:returns: job id
"""
raise NotImplementedError('submit_job class method is implemented by sub classes.')
@classmethod
def _detect_job_flavor(cls):
"""Returns appropriate class based on the detected version of pbs or slurm in
the environment.
If neither srun or qsub is not callable, raise RuntimeError.
If MOABHOMEDIR is set in the environment, assume moab and return
MoabJob type.
Otherwise, if qsub is callable assume PBSPro, and return PbsProJob
type.
If srun is callable, assume slurm, and return SlurmJob.
:rtype: type
:returns: SlurmJob, MoabJob, or PbsProJob depending on environment
"""
qsub_callable = False
qsub_version = ''
srun_callable = False
srun_version = ''
def get_output(cmd):
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
logging.debug('Communicating with job process.')
stdout, stderr = proc.communicate()
return stdout
# Detect if qsub is callable, and capture version output.
try:
qsub_version = get_output(['qsub', '--version'])
qsub_callable = True
except OSError:
pass
# Detect if srun is callable, and capture version output.
try:
srun_version = get_output(['srun', '--version'])
srun_callable = True
except OSError:
pass
# Favor slurm, since Cray version of slurm comes with qsub command
# that is wrapper around slurm apis.
if srun_callable:
return SlurmJob
elif qsub_callable and 'MOABHOMEDIR' in os.environ:
return MoabJob
elif qsub_callable and 'CHPL_PBSPRO_USE_MPP' in os.environ:
return MppPbsProJob
elif qsub_callable:
return PbsProJob
else: # not (qsub_callable or srun_callable)
raise RuntimeError('Could not find PBS or SLURM on system.')
def _launch_qsub(self, testing_dir, output_file, error_file):
"""Launch job using qsub and return job id. Raises RuntimeError if
self.submit_bin is anything but qsub.
:type testing_dir: str
:arg testing_dir: working directory for running test
:type output_file: str
:arg output_file: stdout log filename
:type error_file: str
:arg error_file: stderr log filename
:rtype: str
:returns: job id
"""
if self.submit_bin != 'qsub':
raise RuntimeError('_launch_qsub called for non-pbs job type!')
logging.info(
'Starting {0} job "{1}" on {2} nodes with walltime {3} '
'and output file: {4}'.format(
self.submit_bin, self.job_name, self.num_locales,
self.walltime, output_file))
logging.debug('Opening {0} subprocess.'.format(self.submit_bin))
submit_proc = subprocess.Popen(
self._qsub_command(output_file, error_file),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=testing_dir,
env=os.environ.copy()
)
test_command_str = ' '.join(self.full_test_command(output_file, error_file))
logging.debug('Communicating with {0} subprocess. Sending test command on stdin: {1}'.format(
self.submit_bin, test_command_str))
stdout, stderr = submit_proc.communicate(input=test_command_str)
logging.debug('{0} process returned with status {1}, stdout: {2} stderr: {3}'.format(
self.submit_bin, submit_proc.returncode, stdout, stderr))
if submit_proc.returncode != 0:
msg = '{0} failed with exit code {1} and output: {2}'.format(
self.submit_bin, submit_proc.returncode, stdout)
logging.error(msg)
raise ValueError(msg)
job_id = stdout.strip()
return job_id
@classmethod
def init_from_environment(cls):
"""Factory to initialize new job runner instance based on version of
pbs available and command line arguments.
:rtype: AbstractJob
:returns: subclass of AbstractJob based on environment
"""
args, unparsed_args = cls._parse_args()
cls._setup_logging(args.verbose)
logging.info('Num locales is: {0}'.format(args.numLocales))
logging.info('Walltime is set to: {0}'.format(args.walltime))
test_command = cls._get_test_command(args, unparsed_args)
logging.debug('Test command is: {0}'.format(' '.join(test_command)))
if not test_command:
logging.error('No test command provided.')
raise ValueError('No test command found.')
job_flavor = cls._detect_job_flavor()
logging.info('Detected job flavor: {0}'.format(job_flavor.__name__))
return job_flavor(test_command, args)
@classmethod
def status(cls, job_id):
"""Query job stat using ``status_bin``. AbstractJob does not implement this
method. It is the responsibility of the sub class.
:type job_id: str
:arg job_id: job id
:rtype: str
:returns: job status
"""
raise NotImplementedError('status class method is implemented by sub classes.')
@classmethod
def _cli_walltime(cls, walltime_str):
"""Returns walltime_str if it can be parsed by one of the known walltime
formats. Raises ValueError if walltime_str does not match a known format.
:type walltime_str: str
:arg walltime_str: walltime string from command line
:rtype: str
:returns: valid walltime string from command line
"""
try:
seconds = int(walltime_str)
logging.debug('Parsed walltime as integer seconds: {0}'.format(seconds))
return walltime_str
except ValueError:
pass
try:
seconds = float(walltime_str)
logging.debug('Parsed walltime as float seconds: {0}'.format(seconds))
return walltime_str
except ValueError:
pass
# http://www.csc.fi/english/pages/louhi_guide/batch_jobs/commands/qsub
known_formats = [
'%M:%S',
'%H:%M:%S',
'%M:%S.%f',
'%H:%M:%S.%f',
]
for fmt in known_formats:
try:
walltime = datetime.datetime.strptime(walltime_str, fmt)
logging.debug('Parsed walltime as datetime with format {0}: {1}'.format(
fmt, walltime))
return walltime_str
except ValueError:
pass
raise ValueError('Did not recognize walltime: {0}'.format(walltime_str))
@classmethod
def _get_test_command(cls, args, unparsed_args):
"""Returns test command by folding numLocales args into unparsed command line
args.
:type args: argparse.Namespace
:arg args: Namespace from parsing original args
:type unparsed_args: list
:arg unparsed_args: list of unparsed command line args that make up test command
:rtype: list
:returns: command to be tested in qsub
"""
logging.debug('Rebuilding test command from parsed args: {0} and '
'unparsed args: {1}'.format(args, unparsed_args))
if args.numLocales >= 0:
unparsed_args.append('-nl')
unparsed_args.append(str(args.numLocales))
logging.debug('Rebuild test command: {0}'.format(unparsed_args))
return unparsed_args
@classmethod
def _parse_args(cls):
"""Parse and return command line arguments. Returns tuple of Namespace with
parsed args and unparsed args.
"""
class OurFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=OurFormatter)
parser.add_argument('--CHPL_LAUNCHCMD_DEBUG', action='store_true', dest='verbose',
default=('CHPL_LAUNCHCMD_DEBUG' in os.environ),
help=('Verbose output. Setting CHPL_LAUNCHCMD_DEBUG '
'in environment also enables verbose output.'))
parser.add_argument('-nl', '--numLocales', type=int, default=-1,
help='Number locales.')
parser.add_argument('--n', help='Placeholder')
parser.add_argument('--walltime', type=cls._cli_walltime,
help='Timeout as walltime for qsub.')
parser.add_argument('--CHPL_LAUNCHCMD_HOSTLIST', dest='hostlist',
help=('Optional hostlist specification for reserving '
'specific nodes. Can also be set with env var '
'CHPL_LAUNCHCMD_HOSTLIST'))
args, unparsed_args = parser.parse_known_args()
# Allow hostlist to be set in environment variable CHPL_LAUNCHCMD_HOSTLIST.
if args.hostlist is None:
args.hostlist = os.environ.get('CHPL_LAUNCHCMD_HOSTLIST') or None
# It is bad form to use a two character argument with only a single
# dash. Unfortunately, we support it. And unfortunately, python argparse
# thinks --n is the same thing. So, we pull out --n above so we can put it
# back in the unparsed args here.
if args.n:
logging.debug('Found a --n arg. Putting it back in the unparsed args.')
unparsed_args.append('--n={0}'.format(args.n))
return args, unparsed_args
@classmethod
def _qstat(cls, job_id, args=None):
"""Call qstat and return output from stdout.
Raises ValueError if exit code is non-zero.
:type job_id: str
:arg job_id: pbs job id
:type args: list
:arg args: additional arguments to pass qstat
:rtype: str
:returns: qsub job status
"""
if args is None:
args = []
qstat_command = ['qstat'] + args + [job_id]
logging.debug('qstat command to run: {0}'.format(qstat_command))
logging.debug('Opening qstat subprocess.')
qstat_proc = subprocess.Popen(
qstat_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ.copy()
)
logging.debug('Communicating with qstat subprocess.')
stdout, stderr = qstat_proc.communicate()
logging.debug('qstat process returned with status {0}, stdout: {1}, and stderr: {2}'.format(
qstat_proc.returncode, stdout, stderr))
if qstat_proc.returncode != 0:
raise ValueError('Non-zero exit code {0} from qstat: "{1}"'.format(
qstat_proc.returncode, stdout))
else:
return stdout
@classmethod
def _setup_logging(cls, verbose=False):
"""Setup logging to console.
:type verbose: bool
:arg verbose: if True, set log level to DEBUG
"""
# logging module configures default handlers when logging.debug/info/etc
# are called. In order for our basicConfig call to work, we need to get rid
# of those. This is generally a bad practice unless we are absolutely sure
# we are the top level script and we won't break other logging. That's
# probably true here.
#
# See note here: https://docs.python.org/2/library/logging.html#logging.log
logging.root.handlers = []
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.WARN
logging.basicConfig(
level=log_level, format='[%(module)s] %(asctime)s [%(levelname)s] %(msg)s')
logging.debug('Verbose logging enabled.')
class MoabJob(AbstractJob):
"""Moab implementation of pbs job runner."""
submit_bin = 'qsub'
status_bin = 'qstat'
hostlist_resource = 'hostlist'
num_nodes_resource = 'nodes'
num_cpus_resource = None
redirect_output = True
@classmethod
def status(cls, job_id):
"""Query job status using qstat.
:type job_id: str
:arg job_id: pbs job id
:rtype: str
:returns: qsub job status
"""
output = cls._qstat(job_id, args=['-x'])
try:
root = xml.etree.ElementTree.fromstring(output)
return root.find('Job').find('job_state').text
except AttributeError as ex:
logging.exception('Could not find job_state element in xml output: {0}'.format(ex))
logging.error('XML output: {0}'.format(output))
raise
except Exception as ex:
logging.exception('Failed to parse qstat output: {0}'.format(ex))
logging.error('XML output: {0}'.format(output))
raise
def submit_job(self, testing_dir, output_file, error_file, input_file):
"""Launch job using qsub and return job id.
:type testing_dir: str
:arg testing_dir: working directory for running test
:type output_file: str
:arg output_file: stdout log filename
:type error_file: str
:arg error_file: stderr log filename
:rtype: str
:returns: job id
"""
return self._launch_qsub(testing_dir, output_file, error_file)
class PbsProJob(AbstractJob):
"""PBSPro implementation of pbs job runner."""
submit_bin = 'qsub'
status_bin = 'qstat'
hostlist_resource = 'mppnodes'
num_nodes_resource = 'mppwidth'
num_cpus_resource = 'ncpus'
redirect_output = True
@property
def job_name(self):
"""Takes the job_name from the super class, AbstractJob, and returns
the last 15 characters. PBSPro limits job name to 15 characters.
:rtype: str
:returns: pbs job name
"""
super_name = super(PbsProJob, self).job_name
job_name = super_name[-15:]
logging.info('PBSPro job name is: {0}'.format(job_name))
return job_name
@property
def select_suffix(self):
"""Returns suffix for select expression based instance attributes.
:rtype: str
:returns: select expression suffix, or empty string
"""
return ''
@classmethod
def status(cls, job_id):
"""Query job status using qstat.
Assumes ``qstat <job_id>`` output is of the form:
::
Job id Name User Time Use S Queue
---------------- ---------------- ---------------- -------- - -----
1889416.sdb lj tvandoren 00:00:03 R workq
:type job_id: str
:arg job_id: pbs job id
:rtype: str
:returns: qsub job status
"""
output = cls._qstat(job_id)
lines = output.splitlines()
if len(lines) != 3:
logging.error('Unexpected number of lines in qstat output: {0}'.format(output))
raise ValueError('Expected 3 lines of qstat output, not {0}.'.format(len(output)))
header_line = lines[0]
job_line = lines[-1]
# Use regex to find position of status. Then extract the one character
# status from the job line.
pattern = re.compile('\sS\s')
match = pattern.search(header_line)
if match is not None:
status_char = match.start() + 1
return job_line[status_char]
else:
logging.error('Could not find S column in header line of qstat output.')
raise ValueError('Could not find {0} pattern in header line: {1}'.format(
pattern.pattern, header_line))
def _qsub_command(self, output_file, error_file):
"""Returns qsub command list using select/place syntax for resource
lists (as opposed to the deprecated and often disabled mpp* options).
:type output_file: str
:arg output_file: stdout output file location
:type error_file: str
:arg error_file: stderr output file location
:rtype: list
:returns: qsub command as list of strings
"""
submit_command = self._qsub_command_base(output_file, error_file)
select_stmt = None
# Always use place=scatter to get 1 PE per node (mostly). Equivalent
# to mppnppn=1.
select_pattern = 'place=scatter,select={0}'
# When comm=none sub_test/start_test passes -nl -1 (i.e. num locales
# is -1). For the tests to work, reserve one node and the regular
# ncpus (this does not happen by default).
num_locales = self.num_locales
if num_locales == -1:
num_locales = 1
if self.hostlist is not None:
# This relies on the caller to use the correct select syntax.
select_stmt = select_pattern.format(self.hostlist)
select_stmt = select_stmt.replace('<num_locales>', str(num_locales))
elif num_locales > 0:
select_stmt = select_pattern.format(num_locales)
# Do not set ncpus for knl.
if self.num_cpus_resource is not None and not self.knl:
select_stmt += ':{0}={1}'.format(
self.num_cpus_resource, self.num_cpus)
if select_stmt is not None:
select_stmt += self.select_suffix
submit_command += ['-l', select_stmt]
logging.debug('qsub command: {0}'.format(submit_command))
return submit_command
def submit_job(self, testing_dir, output_file, error_file, input_file):
"""Launch job using qsub and return job id.
:type testing_dir: str
:arg testing_dir: working directory for running test
:type output_file: str
:arg output_file: stdout log filename
:type error_file: str
:arg error_file: stderr log filename
:rtype: str
:returns: job id
"""
return self._launch_qsub(testing_dir, output_file, error_file)
class MppPbsProJob(PbsProJob):
"""PBSPro implementation of pbs job runner that uses the mpp* options."""
submit_bin = 'qsub'
status_bin = 'qstat'
hostlist_resource = 'mppnodes'
num_nodes_resource = 'mppwidth'
num_cpus_resource = 'mppdepth' if 'CHPL_PBSPRO_NO_MPPDEPTH' not in os.environ else None
processing_elems_per_node_resource = 'mppnppn'
redirect_output = False
def _qsub_command(self, output_file, error_file):
return AbstractJob._qsub_command(self, output_file, error_file)
class SlurmJob(AbstractJob):
"""SLURM implementation of abstract job runner."""
submit_bin = None
status_bin = 'squeue'
hostlist_resource = 'nodelist'
num_nodes_resource = None
num_cpus_resource = None
@classmethod
def status(cls, job_id):
"""Query job status using squeue.
:type job_id: str
:arg job_id: squeue job id
:rtype: str
:returns: squeue job status
"""
squeue_command = [
'squeue',
'--noheader',
'--format', '%A %T', # "<job_id> <status>"
'--states', 'all',
'--job', job_id,
]
logging.debug('squeue command to run: {0}'.format(squeue_command))
logging.debug('Opening squeue subprocess.')
squeue_proc = subprocess.Popen(
squeue_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ.copy()
)
logging.debug('Communicating with squeue subprocess.')
stdout, stderr = squeue_proc.communicate()
logging.debug('squeue process returned with status {0}, stdout: {1}, stderr: {2}'.format(
squeue_proc.returncode, stdout, stderr))
if squeue_proc.returncode != 0:
raise ValueError('Non-zero exit code {0} from squeue: "{1}"'.format(
squeue_proc.returncode, stdout))
failure_statuses = ['CANCELLED', 'FAILED', 'TIMEOUT',
'BOOT_FAIL', 'NODE_FAIL', 'PREEMPTED']
queued_statuses = ['CONFIGURING', 'PENDING']
status_parts = stdout.split(' ')
if len(status_parts) == 2:
status = status_parts[1].strip()
logging.info('Status for job {0} is: {1}'.format(job_id, status))
if status == 'COMPLETED':
logging.info('Job finished with status: {0}'.format(status))
return 'C'
elif status in failure_statuses:
logging.info('Job finished with status: {0}'.format(status))
return 'C'
elif status in queued_statuses:
return 'Q'
else:
return 'R' # running
else:
raise ValueError('Could not parse output from squeue: {0}'.format(stdout))
def submit_job(self, testing_dir, output_file, error_file, input_file):
"""Launch job using executable. Set CHPL_LAUNCHER_USE_SBATCH=true in
environment to avoid using expect script. The executable will create a
sbatch script and submit it. Parse and return the job id after job is
submitted.
:type testing_dir: str
:arg testing_dir: working directory for running test
:type output_file: str
:arg output_file: stdout log filename
:type error_file: str
:arg error_file: stderr log filename
:rtype: str
:returns: job id
"""
env = os.environ.copy()
env['CHPL_LAUNCHER_USE_SBATCH'] = 'true'
env['CHPL_LAUNCHER_SLURM_OUTPUT_FILENAME'] = output_file
env['CHPL_LAUNCHER_SLURM_ERROR_FILENAME'] = error_file
if select.select([sys.stdin,],[],[],0.0)[0]:
with open(input_file, 'w') as fp:
fp.write(sys.stdin.read())
env['SLURM_STDINMODE'] = input_file
# We could use stdout buffering for other configurations too, but I
# don't think there's any need. Currently, single locale perf testing
# is the only config that has any tests that produce a lot of output
if os.getenv('CHPL_TEST_PERF') != None and self.num_locales <= 1:
env['CHPL_LAUNCHER_SLURM_BUFFER_STDOUT'] = 'true'
cmd = self.test_command[:]
# Add --nodelist into the command line
if self.hostlist is not None:
cmd.append('--{0}={1}'.format(
self.hostlist_resource, self.hostlist))
# Add --walltime back into the command line.
if self.walltime is not None:
cmd.append('--walltime')
cmd.append(self.walltime)
logging.debug('Command to submit job: {0}'.format(cmd))
logging.debug('Opening job subprocess')
submit_proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=testing_dir,
env=env
)
logging.debug('Communicating with job subprocess')
stdout, stderr = submit_proc.communicate()
logging.debug('Job process returned with status {0}, stdout: {1}, stderr: {2}'.format(
submit_proc.returncode, stdout, stderr))
if submit_proc.returncode != 0:
msg = 'Job submission ({0}) failed with exit code {1} and output: {2}'.format(
cmd, submit_proc.returncode, stdout)
logging.error(msg)
raise ValueError(msg)
# Output is: Submitted batch job 106001
id_parts = stdout.split(' ')
if len(id_parts) < 4:
raise ValueError('Could not parse output from sbatch submission: {0}'.format(stdout))
else:
job_id = id_parts[3].strip()
return job_id
@contextlib.contextmanager
def _temp_dir(dir_prefix='chapel-test-tmp'):
"""Context manager that creates a temporary directory in the current working
directory with name of dir_prefix. When the manager exits, the directory is
deleted.
:type dir_prefix: str
:arg dir_prefix: temp dir name prefix
"""
try:
cwd = os.getcwd()
logging.debug('Creating temporary working directory in: {0}'.format(cwd))
tmp_dir = tempfile.mkdtemp(prefix=dir_prefix, dir=cwd)
logging.debug('Yielding temporary directory context manager.')
yield tmp_dir
finally:
logging.debug('Deleting temporary working directory at: {0}'.format(tmp_dir))
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program definition for a distributed layout based on a builder."""
import dataclasses
import itertools
import logging
from typing import Callable, Dict, Optional
from acme import core
from acme import environment_loop
from acme import specs
from acme.jax import inference_server
from acme.jax import networks as networks_lib
from acme.jax import savers
from acme.jax import types
from acme.jax import utils
from acme.jax import variable_utils
from acme.jax.experiments import config
from acme.jax import snapshotter
from acme.utils import counting
from acme.utils import lp_utils
import jax
import launchpad as lp
import reverb
ActorId = int
SnapshotModelFactory = Callable[
[config.AgentNetwork, specs.EnvironmentSpec],
Dict[str, Callable[[core.VariableSource], types.ModelToSnapshot]]]
@dataclasses.dataclass
class CheckpointingConfig:
"""Configuration options for checkpointing.
Attributes:
max_to_keep: Maximum number of checkpoints to keep. Does not apply to replay
checkpointing.
directory: Where to store the checkpoints.
add_uid: Whether or not to add a unique identifier, see
`paths.get_unique_id()` for how it is generated.
replay_checkpointing_time_delta_minutes: How frequently to write replay
checkpoints; defaults to None, which disables periodic checkpointing.
Warning! These are written asynchronously so as not to interrupt other
replay duties, however this does pose a risk of OOM since items that
would otherwise be removed are temporarily kept alive for checkpointing
purposes.
Note: Since replay buffers tend to be quite large O(100GiB), writing can
take up to 10 minutes so keep that in mind when setting this frequency.
"""
max_to_keep: int = 1
directory: str = '~/acme'
add_uid: bool = True
replay_checkpointing_time_delta_minutes: Optional[int] = None
def make_distributed_experiment(
experiment: config.Config,
num_actors: int,
*,
num_learner_nodes: int = 1,
num_actors_per_node: int = 1,
# TODO(kamyar) remove device_prefetch
device_prefetch: bool = True,
prefetch_size: int = 1,
multithreading_colocate_learner_and_reverb: bool = False,
checkpointing_config: Optional[CheckpointingConfig] = None,
make_snapshot_models: Optional[SnapshotModelFactory] = None,
inference_server_config: Optional[
inference_server.InferenceServerConfig] = None,
name='agent',
program: Optional[lp.Program] = None):
"""Builds distributed agent based on a builder."""
if prefetch_size < 0:
raise ValueError(f'Prefetch size={prefetch_size} should be non negative')
if multithreading_colocate_learner_and_reverb and num_learner_nodes > 1:
raise ValueError(
'Replay and learner colocation is not yet supported when the learner is'
' spread across multiple nodes (num_learner_nodes > 1). Please contact'
' Acme devs if this is a feature you want. Got:'
'\tmultithreading_colocate_learner_and_reverb='
f'{multithreading_colocate_learner_and_reverb}'
f'\tnum_learner_nodes={num_learner_nodes}.')
if checkpointing_config is None:
checkpointing_config = CheckpointingConfig()
def build_replay():
"""The replay storage."""
dummy_seed = 1
spec = (
experiment.environment_spec or
specs.make_environment_spec(experiment.environment_factory(dummy_seed)))
return experiment.builder.make_replay_tables(spec)
def build_model_saver(variable_source: core.VariableSource):
environment = experiment.environment_factory(0)
spec = specs.make_environment_spec(environment)
networks = experiment.network_factory(spec)
models = make_snapshot_models(networks, spec)
# TODO(raveman): Decouple checkpointing and snahpshotting configs.
return snapshotter.JAXSnapshotter(
variable_source=variable_source,
models=models,
path=checkpointing_config.directory,
add_uid=checkpointing_config.add_uid)
def build_counter():
return savers.CheckpointingRunner(
counting.Counter(),
key='counter',
subdirectory='counter',
time_delta_minutes=5,
directory=checkpointing_config.directory,
add_uid=checkpointing_config.add_uid,
max_to_keep=checkpointing_config.max_to_keep)
def build_learner(
random_key: networks_lib.PRNGKey,
replay: reverb.Client,
counter: Optional[counting.Counter] = None,
primary_learner: Optional[core.Learner] = None,
):
"""The Learning part of the agent."""
iterator = experiment.builder.make_dataset_iterator(replay)
dummy_seed = 1
spec = (
experiment.environment_spec or
specs.make_environment_spec(experiment.environment_factory(dummy_seed)))
# Creates the networks to optimize (online) and target networks.
networks = experiment.network_factory(spec)
if prefetch_size > 1:
if device_prefetch:
# For backwards compatibility.
device = jax.devices()[0]
iterable = utils.device_put(
iterable=iterator, device=device, split_fn=None)
iterator = utils.prefetch(iterable=iterable, buffer_size=prefetch_size)
else:
iterator = utils.prefetch(iterable=iterator, buffer_size=prefetch_size)
else:
logging.info('Not prefetching the iterator.')
logger = experiment.logger_factory('learner', 'learner_steps', 0)
counter = counting.Counter(counter, 'learner')
learner = experiment.builder.make_learner(random_key, networks, iterator,
logger, replay, counter)
if primary_learner is None:
learner = savers.CheckpointingRunner(
learner,
key='learner',
subdirectory='learner',
time_delta_minutes=5,
directory=checkpointing_config.directory,
add_uid=checkpointing_config.add_uid,
max_to_keep=checkpointing_config.max_to_keep)
else:
learner.restore(primary_learner.save())
# NOTE: This initially synchronizes secondary learner states with the
# primary one. Further synchronization should be handled by the learner
# properly doing a pmap/pmean on the loss/gradients, respectively.
return learner
def build_actor(
random_key: networks_lib.PRNGKey,
replay: reverb.Client,
variable_source: core.VariableSource,
counter: counting.Counter,
actor_id: ActorId,
inference_client: Optional[inference_server.InferenceServer] = None
) -> environment_loop.EnvironmentLoop:
"""The actor process."""
adder = experiment.builder.make_adder(replay)
environment_key, actor_key = jax.random.split(random_key)
# Create environment and policy core.
# Environments normally require uint32 as a seed.
environment = experiment.environment_factory(
utils.sample_uint32(environment_key))
if not inference_client:
networks = experiment.network_factory(
specs.make_environment_spec(environment))
policy_network = experiment.policy_network_factory(networks)
else:
variable_source = variable_utils.ReferenceVariableSource()
policy_network = inference_client
actor = experiment.builder.make_actor(actor_key, policy_network, adder,
variable_source)
# Create logger and counter.
counter = counting.Counter(counter, 'actor')
logger = experiment.logger_factory('actor', 'actor_steps', actor_id)
# Create the loop to connect environment and agent.
return environment_loop.EnvironmentLoop(
environment, actor, counter, logger, observers=experiment.observers)
if not program:
program = lp.Program(name=name)
key = jax.random.PRNGKey(experiment.seed)
replay_node = lp.ReverbNode(
build_replay,
checkpoint_time_delta_minutes=(
checkpointing_config.replay_checkpointing_time_delta_minutes))
replay = replay_node.create_handle()
counter = program.add_node(lp.CourierNode(build_counter), label='counter')
if experiment.max_number_of_steps is not None:
program.add_node(
lp.CourierNode(lp_utils.StepsLimiter, counter,
experiment.max_number_of_steps),
label='counter')
learner_key, key = jax.random.split(key)
learner_node = lp.CourierNode(build_learner, learner_key, replay, counter)
learner = learner_node.create_handle()
variable_sources = [learner]
if multithreading_colocate_learner_and_reverb:
program.add_node(lp.MultiThreadingColocation([learner_node, replay_node]),
label='learner')
else:
program.add_node(replay_node, label='replay')
with program.group('learner'):
program.add_node(learner_node)
# Maybe create secondary learners, necessary when using multi-host
# accelerators.
# Warning! If you set num_learner_nodes > 1, make sure the learner class
# does the appropriate pmap/pmean operations on the loss/gradients,
# respectively.
for _ in range(1, num_learner_nodes):
learner_key, key = jax.random.split(key)
variable_sources.append(
program.add_node(
lp.CourierNode(
build_learner, learner_key, replay,
primary_learner=learner)))
# NOTE: Secondary learners are used to load-balance get_variables calls,
# which is why they get added to the list of available variable sources.
# NOTE: Only the primary learner checkpoints.
# NOTE: Do not pass the counter to the secondary learners to avoid
# double counting of learner steps.
inference_server_node = None
if inference_server_config:
def build_inference_server(random_key: networks_lib.PRNGKey,
variable_source: core.VariableSource):
"""Creates an inference server node to be connected to by the actors."""
# Environments normally require uint32 as a seed.
environment = experiment.environment_factory(random_key)
networks = experiment.network_factory(
specs.make_environment_spec(environment))
policy_network = experiment.policy_network_factory(networks)
if not inference_server_config.batch_size:
# Inference batch size computation:
# - In case of 1 inference device it is efficient to use
# `batch size == num_envs / 2`, so that inference can execute
# in parallel with a subset of environments' steps (it also addresses
# the problem of some environments running slower etc.)
# - In case of multiple inference devices, we just divide the above
# batch size.
# - Batch size can't obviously be smaller than 1.
inference_server_config.batch_size = max(
1, num_actors // (2 * len(jax.local_devices())))
if not inference_server_config.update_period:
inference_server_config.update_period = (
1000 * num_actors // inference_server_config.batch_size)
return inference_server.InferenceServer(
config=inference_server_config,
handler=(policy_network
if callable(policy_network) else vars(policy_network)),
variable_source=variable_source,
devices=jax.local_devices())
with program.group('inference_server'):
inference_server_key, key = jax.random.split(key)
inference_server_node = program.add_node(
lp.CourierNode(
build_inference_server,
inference_server_key,
learner,
courier_kwargs={'thread_pool_size': num_actors}))
with program.group('actor'):
# Create all actor threads.
*actor_keys, key = jax.random.split(key, num_actors + 1)
variable_sources = itertools.cycle(variable_sources)
actor_nodes = [
lp.CourierNode(build_actor, akey, replay, vsource, counter, aid,
inference_server_node)
for aid, (akey, vsource) in enumerate(zip(actor_keys, variable_sources))
]
# Create (maybe colocated) actor nodes.
if num_actors_per_node == 1:
for actor_node in actor_nodes:
program.add_node(actor_node)
else:
for i in range(0, num_actors, num_actors_per_node):
program.add_node(
lp.MultiThreadingColocation(
actor_nodes[i:i + num_actors_per_node]))
def make_actor(random_key: networks_lib.PRNGKey,
policy_network: config.PolicyNetwork,
variable_source: core.VariableSource) -> core.Actor:
return experiment.builder.make_actor(
random_key, policy_network, variable_source=variable_source)
for evaluator in experiment.get_evaluator_factories():
evaluator_key, key = jax.random.split(key)
program.add_node(
lp.CourierNode(evaluator, evaluator_key, learner, counter, make_actor),
label='evaluator')
if make_snapshot_models and checkpointing_config:
program.add_node(lp.CourierNode(build_model_saver, learner),
label='model_saver')
return program
|
<reponame>chen1i/fedlearner
import datetime
import logging
import os
INDEX_TYPE = ('metrics', 'data_join', 'raw_data')
# YYYY-MM-DD'T'hh:mm:ss.SSSSSSZ
_es_datetime_format = 'strict_date_optional_time'
# WARNING: MAPPINGS BELOW ARE COMPATIBILITY MEASURES AND SHOULD NOT BE MODIFIED.
RAW_DATA_MAPPINGS = {
"dynamic": True,
"dynamic_templates": [
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword"
}
}
}
],
"properties": {
"partition": {
"type": "short"
},
"application_id": {
"ignore_above": 128,
"type": "keyword"
},
"event_time": {
"format": _es_datetime_format,
"type": "date"
}
}
}
DATA_JOIN_MAPPINGS = {
"dynamic": True,
# for dynamically adding string fields, use keyword to reduce space
"dynamic_templates": [
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword"
}
}
}
],
"properties": {
"partition": {
"type": "short"
},
"joined": {
"type": "byte"
},
"label": {
"ignore_above": 8,
"type": "keyword"
},
"type": {
"ignore_above": 32,
"type": "keyword"
},
"has_click_id": {
"type": "boolean"
},
"has_example_id": {
"type": "boolean"
},
"application_id": {
"ignore_above": 128,
"type": "keyword"
},
"process_time": {
"format": _es_datetime_format,
"type": "date"
},
"event_time": {
"format": _es_datetime_format,
"type": "date"
}
}
}
METRICS_MAPPINGS = {
"dynamic": True,
"dynamic_templates": [
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "keyword"
}
}
}
],
"properties": {
"name": {
"type": "keyword"
},
"value": {
"type": "float"
},
"date_time": {
"format": _es_datetime_format,
"type": "date"
},
"tags": {
"properties": {
"partition": {
"type": "short"
},
"application_id": {
"ignore_above": 128,
"type": "keyword"
},
"data_source_name": {
"ignore_above": 128,
"type": "keyword"
},
"joiner_name": {
"ignore_above": 32,
"type": "keyword"
},
"role": {
"ignore_above": 16,
"type": "keyword"
},
"event_time": {
"type": "date",
"format": _es_datetime_format
}
}
}
}
}
INDEX_NAME = {'metrics': 'metrics_v2',
'raw_data': 'raw_data',
'data_join': 'data_join'}
INDEX_MAP = {'metrics': METRICS_MAPPINGS,
'raw_data': RAW_DATA_MAPPINGS,
'data_join': DATA_JOIN_MAPPINGS}
CONFIGS = {
'data_join_metrics_sample_rate':
os.environ.get('DATA_JOIN_METRICS_SAMPLE_RATE', 0.3),
'raw_data_metrics_sample_rate':
os.environ.get('RAW_DATA_METRICS_SAMPLE_RATE', 0.01),
'es_batch_size': os.environ.get('ES_BATCH_SIZE', 1000),
'timezone': datetime.timezone(datetime.timedelta(hours=8)) # UTC+8
}
def get_es_template(index_type, es_version):
index_name = INDEX_NAME[index_type]
template = {
"index_patterns": ["{}-*".format(index_name), index_name],
"settings": {
"index": {
"codec": "best_compression",
"routing": {
"allocation": {
"total_shards_per_node": "1"
}
},
"refresh_interval": "60s",
"number_of_shards": "2",
"number_of_replicas": "1",
}
}
}
if es_version == 6:
template['mappings'] = {'_doc': INDEX_MAP[index_type]}
else:
template['mappings'] = INDEX_MAP[index_type]
return template
def convert_to_iso_format(value):
"""
Args:
value: datetime object | bytes | str | int | float.
Value to be converted. Expected to be a numeric in the format of
yyyymmdd or yyyymmddhhnnss, or a datetime object.
Returns: str.
Try to convert a datetime str or numeric to iso format datetime str.
1. Try to convert based on the length of str.
2. Try to convert assuming it is a timestamp.
3. If it does not match any pattern, return iso format of timestamp=0.
Timezone will be set to UTC+8 if unset.
"""
assert isinstance(value, (datetime.datetime, bytes, str, int, float))
if isinstance(value, datetime.datetime):
if value.tzinfo is None:
value = value.replace(tzinfo=CONFIGS['timezone'])
return value.isoformat(timespec='microseconds')
if isinstance(value, bytes):
value = value.decode()
elif isinstance(value, (int, float)):
value = str(value)
# first try to parse datetime from value
try:
if len(value) == 8:
date_time = datetime.datetime.strptime(value, '%Y%m%d')
return date_time.replace(tzinfo=CONFIGS['timezone']) \
.isoformat(timespec='microseconds')
if len(value) == 14:
date_time = datetime.datetime.strptime(value, '%Y%m%d%H%M%S')
return date_time.replace(tzinfo=CONFIGS['timezone']) \
.isoformat(timespec='microseconds')
except ValueError: # Not fitting any of above patterns
pass
# then try to convert assuming it is a timestamp
# not in the same `try` block b/c the length of some strings might be equal
# to 14 but it is not a datetime format string
try:
date_time = datetime.datetime.fromtimestamp(float(value),
tz=CONFIGS['timezone'])
except ValueError: # might be a non-number str
logging.warning('Unable to parse time %s to iso format, '
'defaults to 0.', value)
date_time = datetime.datetime.fromtimestamp(0,
tz=CONFIGS['timezone'])
return date_time.isoformat(timespec='microseconds')
|
<gh_stars>0
"""
Algolia - Slack slash command (+ django webhook) Integration
1. slack에서 /algolia [keyword]를 검색하면 algolia index에서 해당 값이 포함된
검색 결과를 리턴해줌.
(2. slack app내의 "Interactivity & Shortcuts"를 사용해서 django webhook endpoint에
trending keyword를 등록해주는 workflow 완성 - 코드는 backend 서버에 포함)
"""
import os
import requests
from algoliasearch.search_client import SearchClient
from flask import Flask, abort, request
from zappa.asynchronous import task
app = Flask(__name__)
client = SearchClient.create(
os.getenv("ALGOLIA_APPLICATION_ID"), os.getenv("ALGOLIA_SEARCH_API_KEY")
)
def is_request_valid(request):
is_token_valid = request.form["token"] == os.getenv("SLACK_VERIFICATION_TOKEN")
is_team_id_valid = request.form["team_id"] == os.getenv("SLACK_TEAM_ID")
return is_token_valid and is_team_id_valid
def get_links(query):
formatted_q = query.strip().replace(" ", "+")
google_link = f"https://www.google.com/search?q={formatted_q}&oq={formatted_q}"
youtube_link = (
f"https://www.youtube.com/results?search_query={formatted_q}&oq={formatted_q}"
)
return (google_link, youtube_link)
def format_block_for(title, text, image_url):
google_link, youtube_link = get_links(title)
block = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": text + f"<{google_link}|Google> | <{youtube_link}|YouTube>\n",
},
"accessory": {
"type": "image",
"image_url": image_url,
"alt_text": title,
},
}
]
return block
def format_attachments_for(query, video_count, product_count):
google_link, youtube_link = get_links(query)
attachments = [
{
"title": "Register this keyword on server?",
"text": (
f"*Video*: `{video_count}` found\n*Product*: `{product_count}` found\n"
f"<{google_link}|Google it> | <{youtube_link}|YouTube>\n"
),
"fallback": "You are unable to register keyword on server",
"callback_id": "wopr_game",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "register",
"text": "Yes",
"style": "primary",
"type": "button",
"value": query,
"confirm": {
"title": "Are you sure?",
"text": "This will make this keyword go public on production",
"ok_text": "Yes",
"dismiss_text": "No",
},
},
{"name": "game", "text": "No", "type": "button", "value": "no"},
],
}
]
return attachments
@task
def get_algolia_result(response_url, query):
res = client.multiple_queries(
[
{"indexName": os.getenv("ALGOLIA_PRODUCT_INDEX_NAME"), "query": query},
{"indexName": os.getenv("ALGOLIA_VIDEO_INDEX_NAME"), "query": query},
]
)
if video_count := res["results"][1]["nbHits"]:
video = res["results"][1]["hits"][0]
v_block = format_block_for(
video["title"],
f"*Title*: {video['title']}\n*Channel*: {video['yt_channel_name']}\n",
video["video_thumbnail_url"],
)
else:
v_block = []
if product_count := res["results"][0]["nbHits"]:
product = res["results"][0]["hits"][0]
p_block = format_block_for(
product["title"],
f"*Title*: {product['title']}\n*Brand*: {product['brand']}\n",
product["primary_image"],
)
else:
p_block = []
# format messages
blocks = (
p_block
+ v_block
+ [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Searched on Algolia Indices with keyword: *`{query}`*",
},
}
]
)
attachments = format_attachments_for(query, video_count, product_count)
data = {"response_type": "in_channel", "attachments": attachments, "blocks": blocks}
requests.post(response_url, json=data)
@app.route("/trend-register", methods=["POST"])
def trend_register():
query = request.form.get("text")
if not query:
abort(400)
if not is_request_valid(request):
abort(400)
# "in channel" slash command must receive a response in 3 seconds
# so handle the task asynchronously
get_algolia_result(request.form["response_url"], query)
# return empty response so it takes less than 3 seconds
return ("", 204)
|
<reponame>dendisuhubdy/kaggle-rsna<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 18:01:28 2019
Models
"""
from torch import nn
import pytorch_retinanet.dataloader
import pytorch_retinanet.model
import pytorch_retinanet.model_incresv2
import pytorch_retinanet.model_nasnet_mobile
import pytorch_retinanet.model_pnasnet
import pytorch_retinanet.model_resnet
import pytorch_retinanet.model_se_resnext
import pytorch_retinanet.model_xception
from config import IMG_SIZE
class ModelInfo:
"""
Initialises main model parameters
"""
def __init__(self, factory: nn.Module, args: dict, batch_size: int, dataset_args: dict, use_sgd: bool=False, img_size: int=IMG_SIZE):
"""
Args:
factory : base model architectures
args : a dictionary with model arguments
dataset_args : a dictionary with model arguments
batch_size: batch size
img_size: image size to use in training
"""
self.factory = factory
self.args = args
self.batch_size = batch_size
self.dataset_args = dataset_args
self.img_size = img_size
self.use_sgd = use_sgd
# dictionary of models with parameters
MODELS = {
"resnet101_512": ModelInfo(
factory=pytorch_retinanet.model_resnet.resnet101,
args=dict(num_classes=1, pretrained=True),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=20),
),
"resnet152_512": ModelInfo(
factory=pytorch_retinanet.model_resnet.resnet152,
args=dict(num_classes=1, pretrained=True),
img_size=512,
batch_size=4,
dataset_args=dict(augmentation_level=20),
),
"se_resnext101_512": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet"),
img_size=512,
batch_size=3,
dataset_args=dict(),
),
"se_resnext101_dr_512": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet", dropout=0.5),
img_size=512,
batch_size=4,
dataset_args=dict(augmentation_level=20),
),
"se_resnext101_dr0.75_512": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet", dropout=0.75),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=20),
),
'se_resnext101_dr0.75_512_aug1': ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained='imagenet', dropout=0.75),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=1)
),
'se_resnext101_dr0.75_512_aug10': ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained='imagenet', dropout=0.75),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=10)
),
'se_resnext101_dr0.75_512_basic_rotations': ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained='imagenet', dropout=0.75),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=10)
),
'se_resnext101_dr0.75_512_aug21': ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained='imagenet', dropout=0.75),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=21)
),
"se_resnext101_dr_512_without_pretrained": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained=False, dropout=0.5),
img_size=512,
batch_size=4,
dataset_args=dict(augmentation_level=20),
),
"se_resnext101_512_bs12": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet"),
img_size=512,
batch_size=12,
dataset_args=dict(),
),
"se_resnext101_512_bs12_aug20": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet"),
img_size=512,
batch_size=12,
dataset_args=dict(augmentation_level=20),
),
"se_resnext101_512_sgd": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet", dropout=0.5),
img_size=512,
batch_size=4,
use_sgd=True,
dataset_args=dict(augmentation_level=15),
),
"se_resnext101_256": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext101,
args=dict(num_classes=1, pretrained="imagenet"),
img_size=256,
batch_size=12,
dataset_args=dict(),
),
"resnet34_256": ModelInfo(
factory=pytorch_retinanet.model_resnet.resnet34,
args=dict(num_classes=1, pretrained=True),
img_size=256,
batch_size=32,
dataset_args=dict(),
),
"pnas_512": ModelInfo(
factory=pytorch_retinanet.model_pnasnet.pnasnet5large,
args=dict(num_classes=1, pretrained=True),
img_size=512,
batch_size=4,
dataset_args=dict(),
),
"pnas_512_dr": ModelInfo(
factory=pytorch_retinanet.model_pnasnet.pnasnet5large,
args=dict(num_classes=1, pretrained=True, dropout=0.5),
img_size=512,
batch_size=2,
dataset_args=dict(augmentation_level=20),
),
"pnas_512_bs12": ModelInfo(
factory=pytorch_retinanet.model_pnasnet.pnasnet5large,
args=dict(num_classes=1, pretrained=True),
img_size=512,
batch_size=8,
dataset_args=dict(),
),
"pnas_256_aug20": ModelInfo(
factory=pytorch_retinanet.model_pnasnet.pnasnet5large,
args=dict(num_classes=1, pretrained=True),
img_size=256,
batch_size=8,
dataset_args=dict(augmentation_level=20),
),
"inc_resnet_v2_512": ModelInfo(
factory=pytorch_retinanet.model_incresv2.inceptionresnetv2,
args=dict(num_classes=1, pretrained=True),
img_size=512,
batch_size=4,
dataset_args=dict(augmentation_level=20),
),
"inc_resnet_v2_512_dr": ModelInfo(
factory=pytorch_retinanet.model_incresv2.inceptionresnetv2,
args=dict(num_classes=1, pretrained=True, dropout_cls=0.6, dropout_global_cls=0.6),
img_size=512,
batch_size=4,
dataset_args=dict(augmentation_level=20),
),
"inc_resnet_v2_256": ModelInfo(
factory=pytorch_retinanet.model_incresv2.inceptionresnetv2,
args=dict(num_classes=1, pretrained=True),
img_size=256,
batch_size=16,
dataset_args=dict(augmentation_level=20),
),
"resnet50_512": ModelInfo(
factory=pytorch_retinanet.model_resnet.resnet50,
args=dict(num_classes=1, pretrained=True, dropout_cls=0.5, dropout_global_cls=0.5),
img_size=512,
batch_size=12,
dataset_args=dict(augmentation_level=15),
),
"se_resnext50_512": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext50,
args=dict(num_classes=1, pretrained="imagenet", dropout=0.5),
img_size=512,
batch_size=8,
dataset_args=dict(augmentation_level=20),
),
"se_resnext50_512_dr0.8": ModelInfo(
factory=pytorch_retinanet.model_se_resnext.se_resnext50,
args=dict(num_classes=1, pretrained="imagenet", dropout=0.8),
img_size=512,
batch_size=8,
dataset_args=dict(augmentation_level=20),
),
"nasnet_mobile_512": ModelInfo(
factory=pytorch_retinanet.model_nasnet_mobile.nasnet_mobile_model,
args=dict(num_classes=1, pretrained=True, dropout_cls=0.5, dropout_global_cls=0.5, use_l2_features=True),
img_size=512,
batch_size=8,
dataset_args=dict(augmentation_level=20),
),
"xception_512_dr": ModelInfo(
factory=pytorch_retinanet.model_xception.xception_model,
args=dict(num_classes=1, pretrained=True, dropout_cls=0.6, dropout_global_cls=0.6),
img_size=512,
batch_size=6,
dataset_args=dict(augmentation_level=20),
),
}
|
<gh_stars>1-10
from __future__ import absolute_import
from collections import namedtuple
from fnmatch import fnmatch
from parsimonious.grammar import Grammar, NodeVisitor
from parsimonious.exceptions import ParseError # noqa
from sentry.utils.safe import get_path
__all__ = ('parse_rules', 'dump_schema', 'load_schema')
VERSION = 1
# Grammar is defined in EBNF syntax.
ownership_grammar = Grammar(r"""
ownership = line+
line = _ (comment / rule / empty) newline?
rule = _ matcher owners
matcher = _ matcher_tag identifier
matcher_tag = (matcher_type sep)?
matcher_type = "url" / "path"
owners = _ owner+
owner = _ team_prefix identifier
team_prefix = "#"?
comment = ~r"#[^\r\n]*"
# TODO: make more specific
identifier = ~r"\S+"
sep = ":"
space = " "
empty = ""
newline = ~r"[\r\n]"
_ = space*
""")
class Rule(namedtuple('Rule', 'matcher owners')):
"""
A Rule represents a single line in an Ownership file.
This line contains a Matcher and a list of Owners.
"""
def dump(self):
return {
'matcher': self.matcher.dump(),
'owners': [o.dump() for o in self.owners],
}
@classmethod
def load(cls, data):
return cls(
Matcher.load(data['matcher']),
[Owner.load(o) for o in data['owners']],
)
def test(self, data):
return self.matcher.test(data)
class Matcher(namedtuple('Matcher', 'type pattern')):
"""
A Matcher represents a type:pattern pairing for use in
comparing with an Event.
type is either `path` or `url` at this point.
TODO(mattrobenolt): pattern needs to be parsed into a regex
Examples:
url:example.com
path:src/*
src/*
"""
def dump(self):
return {
'type': self.type,
'pattern': self.pattern,
}
@classmethod
def load(cls, data):
return cls(
data['type'],
data['pattern'],
)
def test(self, data):
return getattr(self, 'test_%s' % self.type)(data)
def test_url(self, data):
try:
url = data['request']['url']
except KeyError:
return False
return fnmatch(url, self.pattern)
def test_path(self, data):
for frame in _iter_frames(data):
filename = frame.get('filename') or frame.get('abs_path')
if not filename:
continue
# fnmatch keeps it's own internal cache, so
# there isn't any optimization we can do here
# by using fnmatch.translate before and compiling
# our own regex.
if fnmatch(filename, self.pattern):
return True
return False
class Owner(namedtuple('Owner', 'type identifier')):
"""
An Owner represents a User or Team who owns this Rule.
type is either `user` or `team`.
Examples:
<EMAIL>
#team
"""
def dump(self):
return {
'type': self.type,
'identifier': self.identifier,
}
@classmethod
def load(cls, data):
return cls(
data['type'],
data['identifier'],
)
class OwnershipVisitor(NodeVisitor):
visit_comment = visit_empty = lambda *a: None
def visit_ownership(self, node, children):
return filter(None, children)
def visit_line(self, node, children):
_, line, _ = children
comment_or_rule_or_empty = line[0]
if comment_or_rule_or_empty:
return comment_or_rule_or_empty
def visit_rule(self, node, children):
_, matcher, owners = children
return Rule(matcher, owners)
def visit_matcher(self, node, children):
_, tag, identifier = children
return Matcher(tag, identifier)
def visit_matcher_tag(self, node, children):
if not children:
return 'path'
tag, = children
type, _ = tag
return type[0].text
def visit_owners(self, node, children):
_, owners = children
return owners
def visit_owner(self, node, children):
_, is_team, pattern = children
type = 'team' if is_team else 'user'
# User emails are case insensitive, so coerce them
# to lowercase, so they can be de-duped, etc.
if type == 'user':
pattern = pattern.lower()
return Owner(type, pattern)
def visit_team_prefix(self, node, children):
return bool(children)
def visit_identifier(self, node, children):
return node.text
def generic_visit(self, node, children):
return children or node
def _iter_frames(data):
try:
for frame in get_path(data, 'stacktrace', 'frames', filter=True) or ():
yield frame
except KeyError:
pass
try:
values = get_path(data, 'exception', 'values', filter=True) or ()
except KeyError:
return
for value in values:
try:
for frame in get_path(value, 'stacktrace', 'frames', filter=True) or ():
yield frame
except KeyError:
continue
def parse_rules(data):
"""Convert a raw text input into a Rule tree"""
tree = ownership_grammar.parse(data)
return OwnershipVisitor().visit(tree)
def dump_schema(rules):
"""Convert a Rule tree into a JSON schema"""
return {
'$version': VERSION,
'rules': [r.dump() for r in rules],
}
def load_schema(schema):
"""Convert a JSON schema into a Rule tree"""
if schema['$version'] != VERSION:
raise RuntimeError('Invalid schema $version: %r' % schema['$version'])
return [Rule.load(r) for r in schema['rules']]
|
import datetime
from werkzeug.exceptions import BadRequest
from server.models import db, Assignment, Group, GroupAction, User
from tests import OkTestCase
class TestGroup(OkTestCase):
def setUp(self):
super(TestGroup, self).setUp()
self.setup_course()
def test_invite(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
assert group.has_status(self.user1, 'active')
assert group.has_status(self.user2, 'pending')
assert group.size() == 2
Group.invite(self.user1, self.user3, self.assignment)
assert group.has_status(self.user1, 'active')
assert group.has_status(self.user2, 'pending')
assert group.has_status(self.user3, 'pending')
assert group.size() == 3
def test_invite_not_enrolled(self):
not_enrolled = User(email='<EMAIL>')
db.session.add(not_enrolled)
self.assertRaises(BadRequest, Group.invite, self.user1, not_enrolled, self.assignment)
self.assertRaises(BadRequest, Group.invite, not_enrolled, self.user1, self.assignment)
def test_invite_in_group(self):
Group.invite(self.user1, self.user2, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user1, self.user1, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user1, self.user2, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user2, self.user1, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user2, self.user2, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user2, self.user3, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user3, self.user1, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user3, self.user2, self.assignment)
self.assertRaises(BadRequest, Group.invite, self.user3, self.user3, self.assignment)
def test_invite_full(self):
Group.invite(self.user1, self.user2, self.assignment)
Group.invite(self.user1, self.user3, self.assignment)
Group.invite(self.user1, self.user4, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
assert group.size() == 4
self.assertRaises(BadRequest, Group.invite, self.user1, self.user5, self.assignment)
def test_invite_individual(self):
individual_assignment = Assignment(
name='cal/cs61a/sp16/lab00',
course=self.course,
display_name='Lab 0',
due_date=datetime.datetime.now(),
lock_date=datetime.datetime.now() + datetime.timedelta(days=1),
max_group_size=1)
db.session.add(individual_assignment)
self.assertRaises(BadRequest, Group.invite, self.user1, self.user2, individual_assignment)
def test_locked(self):
Group.invite(self.user1, self.user2, self.assignment)
Group.invite(self.user1, self.user3, self.assignment)
Group.invite(self.user1, self.user4, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
self.assignment.lock_date = datetime.datetime.now() - datetime.timedelta(days=1)
db.session.commit()
self.assertRaises(BadRequest, Group.invite, self.user1, self.user2, self.assignment)
self.assertRaises(BadRequest, group.accept, self.user3)
self.assertRaises(BadRequest, group.decline, self.user3)
self.assertRaises(BadRequest, group.remove, self.user1, self.user2)
def test_accept(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
assert group.has_status(self.user1, 'active')
assert group.has_status(self.user2, 'active')
assert group.size() == 2
def test_accept_not_pending(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
self.assertRaises(BadRequest, group.accept, self.user2)
self.assertRaises(BadRequest, group.accept, self.user3)
def test_decline(self):
Group.invite(self.user1, self.user2, self.assignment)
Group.invite(self.user1, self.user3, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
group.decline(self.user3)
assert group.has_status(self.user1, 'active')
assert group.has_status(self.user2, 'active')
assert Group.lookup(self.user3, self.assignment) is None
assert group.size() == 2
def test_decline_degenerate(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.decline(self.user2)
assert Group.lookup(self.user1, self.assignment) is None
assert Group.lookup(self.user2, self.assignment) is None
def test_decline_not_pending(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
self.assertRaises(BadRequest, group.decline, self.user3)
def test_remove(self):
Group.invite(self.user1, self.user2, self.assignment)
Group.invite(self.user1, self.user3, self.assignment)
Group.invite(self.user1, self.user4, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
group.remove(self.user1, self.user2)
assert group.has_status(self.user1, 'active')
assert Group.lookup(self.user2, self.assignment) is None
assert group.has_status(self.user3, 'pending')
assert group.size() == 3
group.remove(self.user1, self.user3)
assert group.has_status(self.user1, 'active')
assert Group.lookup(self.user3, self.assignment) is None
assert group.size() == 2
def test_remove_self(self):
Group.invite(self.user1, self.user2, self.assignment)
Group.invite(self.user1, self.user3, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
group.accept(self.user3)
group.remove(self.user1, self.user1)
assert Group.lookup(self.user1, self.assignment) is None
assert group.has_status(self.user2, 'active')
assert group.has_status(self.user3, 'active')
def test_remove_degenerate(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.remove(self.user1, self.user1)
assert Group.lookup(self.user1, self.assignment) is None
assert Group.lookup(self.user2, self.assignment) is None
def test_remove_not_in_group(self):
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
group.accept(self.user2)
self.assertRaises(BadRequest, group.remove, self.user2, self.user3)
self.assertRaises(BadRequest, group.remove, self.user3, self.user2)
def test_log(self):
def latest_action():
return GroupAction.query.order_by(GroupAction.id.desc()).first()
Group.invite(self.user1, self.user2, self.assignment)
group = Group.lookup(self.user1, self.assignment)
state = {
'id': group.id,
'assignment_id': group.assignment_id,
'members': []
}
action = latest_action()
assert action.action_type == 'invite'
assert action.user_id == self.user1.id
assert action.target_id == self.user2.id
assert action.group_before == state
state['members'].append({
'user_id': self.user1.id,
'status': 'active'
})
state['members'].append({
'user_id': self.user2.id,
'status': 'pending'
})
assert action.group_after == state
group.accept(self.user2)
action = latest_action()
assert action.action_type == 'accept'
assert action.user_id == self.user2.id
assert action.target_id == self.user2.id
assert action.group_before == state
state['members'][1]['status'] = 'active'
assert action.group_after == state
Group.invite(self.user1, self.user3, self.assignment)
action = latest_action()
assert action.action_type == 'invite'
assert action.user_id == self.user1.id
assert action.target_id == self.user3.id
assert action.group_before == state
state['members'].append({
'user_id': self.user3.id,
'status': 'pending'
})
assert action.group_after == state
group.decline(self.user3)
action = latest_action()
assert action.action_type == 'decline'
assert action.user_id == self.user3.id
assert action.target_id == self.user3.id
assert action.group_before == state
state['members'].pop(2)
assert action.group_after == state
group.remove(self.user2, self.user1)
action = latest_action()
assert action.action_type == 'remove'
assert action.user_id == self.user2.id
assert action.target_id == self.user1.id
assert action.group_before == state
state['members'] = []
assert action.group_after == state
|
#!/opt/homebrew/bin/python3.9
##
## Icebreaker and IceSugar RSMB5 project - RV32I for Lattice iCE40
## With complete open-source toolchain flow using:
## -> yosys
## -> icarus verilog
## -> icestorm project
##
## Tests are written in several languages
## -> Systemverilog Pure Testbench (Vivado)
## -> UVM testbench (Vivado)
## -> PyUvm (Icarus)
## -> Formal either using SVA and PSL (Vivado) or cuncurrent assertions with Yosys
##
## Copyright (c) 2021 <NAME> (<EMAIL>)
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
############################################################################
#### import main packages
############################################################################
import json as j
import pandas as pd
import sys
import template as temp
from string import Template
############################################################################
############################################################################
#### Classes and functions
############################################################################
regfile_type = "regfile"
memory_type = "memory"
# Use to open the JSON file and get the dictionary back
def parse_json() -> dict:
data = {}
with open("./output_all/reg.json", "r") as f:
data = j.load(f)
f.close()
return data
def gen_lists_and_csv(data):
name = []
t_reg = []
address = []
sub_data = data['children']
sw_rd_mask = []
hw_rd_mask = []
sw_wr_mask = []
hw_wr_mask = []
reset_p = []
res = {}
res2 = {}
global is_regfile
global is_memory
for reg in sub_data:
# Check the register aggregation type
if reg['type'] == regfile_type:
is_regfile = True
is_memory = False
elif reg['type'] == memory_type:
is_regfile = False
is_memory = True
# according to the result we create the parameters
t_reg.append(reg['type'])
## check if Memory so that we can print the start and end
if ((not is_regfile) & is_memory):
address.append(reg['memory_adress_start'])
name.append("memory_adress_start")
else:
address.append(reg['absolute_adress'])
name.append(reg['inst_name'])
## Look Inside for children
for x in reg['children']:
t_reg.append(x['type'])
name.append(x['inst_name'])
if ((not is_memory) & is_regfile):
## Get the masks
sw_rd_mask.append(x['sw_read_mask'])
hw_rd_mask.append(x['hw_read_mask'])
sw_wr_mask.append(x['sw_write_mask'])
hw_wr_mask.append(x['hw_write_mask'])
reset_p.append(x['global_reset_value'])
if (x['type'] != "field"):
address.append(x['address_offset'])
if ((not is_regfile) & is_memory):
t_reg.append(memory_type)
name.append("memory_adress_end")
address.append(reg['memory_adress_end'])
## Generate the final dicationary
res = dict(zip(name, address))
res2 = dict(zip(name, t_reg))
rest_dict = dict(zip(name, reset_p))
hwwr_dict = dict(zip(name, hw_wr_mask))
hwrd_dict = dict(zip(name, hw_rd_mask))
swwr_dict = dict(zip(name, sw_wr_mask))
swrd_dict = dict(zip(name, sw_rd_mask))
df = pd.DataFrame(data={"TYPE": t_reg, "NAME": name, "ADDRESS": address})
with open ('./output_all/reg.csv', 'x') as f:
df.to_csv("./output_all/reg.csv", sep=',',index=False)
f.close()
t = Template(temp.param_template+'\n')
d = Template(temp.define_template+'\n')
p = Template(temp.python_const_template+'\n')
with open('./output_all/reg_param.svh', 'x') as f:
## Fristly write the header
f.write(temp.header)
## Start with Params
for x in res.keys():
if res2[x] == regfile_type:
a=t.substitute({'name' : "{}_{}".format(res2[x],x), 'value' : res[x].replace('0x',"32'h")})
elif res2[x] == memory_type:
a=t.substitute({'name' : "{}".format(x), 'value' : res[x].replace('0x',"32'h")})
else:
a=t.substitute({'name' : "register_{}".format(x), 'value' : res[x].replace('0x',"32'h")})
f.write(a)
## Start with Defines
for x in res.keys():
if res2[x] == regfile_type:
b=d.substitute({'name' : "{}_{}".format(res2[x],x), 'value' : res[x].replace('0x',"32'h")})
elif res2[x] == memory_type:
b=d.substitute({'name' : "{}".format(x), 'value' : res[x].replace('0x',"32'h")})
else:
b=d.substitute({'name' : "register_{}".format(x), 'value' : res[x].replace('0x',"32'h")})
f.write(b)
## Start for the Mask
for x in hwwr_dict.keys():
b=d.substitute({'name' : "mask_hwwr_{}".format(x), 'value' : hwwr_dict[x].replace('0x',"32'h")})
f.write(b)
for x in hwrd_dict.keys():
b=d.substitute({'name' : "mask_hwrd_{}".format(x), 'value' : hwrd_dict[x].replace('0x',"32'h")})
f.write(b)
for x in swwr_dict.keys():
b=d.substitute({'name' : "mask_swwr_{}".format(x), 'value' : swwr_dict[x].replace('0x',"32'h")})
f.write(b)
for x in swrd_dict.keys():
b=d.substitute({'name' : "mask_swrd_{}".format(x), 'value' : swrd_dict[x].replace('0x',"32'h")})
f.write(b)
## Start for Resert
for x in rest_dict.keys():
b=d.substitute({'name' : "{}_POR_VALUE".format(x), 'value' : rest_dict[x].replace('0x',"32'h")})
f.write(b)
f.close()
with open('./output_all/reg_python_const.py', 'x') as f:
## Fristly write the header
f.write(temp.header_python)
for x in res.keys():
if res2[x] == regfile_type:
c=p.substitute({'name' : "{}_{}".format(res2[x],x), 'value' : res[x]})
elif res2[x] == memory_type:
c=p.substitute({'name' : "{}".format(x), 'value' : res[x]})
else:
c=p.substitute({'name' : "register_{}".format(x), 'value' : res[x]})
f.write(c)
## Start for the Mask
for x in hwwr_dict.keys():
c=p.substitute({'name' : "mask_hwwr_{}".format(x), 'value' : hwwr_dict[x]})
f.write(c)
for x in hwrd_dict.keys():
c=p.substitute({'name' : "mask_hwrd_{}".format(x), 'value' : hwrd_dict[x]})
f.write(c)
for x in swwr_dict.keys():
c=p.substitute({'name' : "mask_swwr_{}".format(x), 'value' : swwr_dict[x]})
f.write(c)
for x in swrd_dict.keys():
c=p.substitute({'name' : "mask_swrd_{}".format(x), 'value' : swrd_dict[x]})
f.write(c)
## Start for Resert
for x in rest_dict.keys():
c=p.substitute({'name' : "{}_POR_VALUE".format(x), 'value' : rest_dict[x]})
f.write(c)
f.close()
def main():
data_f = parse_json()
gen_lists_and_csv(data_f)
if __name__ == '__main__':
main()
|
<filename>setup.py
#!/usr/bin/env python
from distutils.command.build import build
from distutils.spawn import find_executable
import sys
import os
import subprocess
import errno
from version import get_version
KPROCESSOR = r"""
_ _____
| | | __ \
| | _| |__) | __ ___ ___ ___ ___ ___ ___ _ __
| |/ / ___/ '__/ _ \ / __/ _ \/ __/ __|/ _ \| '__|
| <| | | | | (_) | (_| __/\__ \__ \ (_) | |
|_|\_\_| |_| \___/ \___\___||___/___/\___/|_|
"""
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >=3.6")
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
try:
with open('README.md') as f:
readme = f.read()
except IOError:
readme = ''
if os.path.islink("KP_BUILD"):
os.unlink("KP_BUILD")
if os.path.exists("build/libkProcessor.a"):
os.symlink("build", "KP_BUILD")
def check_exist(dirs):
ALL_EXIST = True
not_found_files = list()
for directory in dirs:
if not (os.path.isdir(directory)):
print(f"[ERROR] | DIR: {directory} does not exist.", file=sys.stderr)
ALL_EXIST = False
not_found_files.append(directory)
if not ALL_EXIST:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), ",".join(not_found_files))
SOURCES = [
'swig_interfaces/kProcessor.i',
]
if not find_executable('swig'):
sys.exit("Error: Building this module requires 'swig' to be installed")
INCLUDES = [
'ThirdParty/ntCard/include',
'include/kProcessor',
'ThirdParty/MQF/include',
'ThirdParty/sdsl-lite/include',
'ThirdParty/kmerDecoder/include',
'ThirdParty/kmerDecoder/lib/parallel-hashmap',
'ThirdParty/kmerDecoder/lib/kseq/include',
]
check_exist(INCLUDES)
LINK_ARGS = [
"-fopenmp",
"-lgomp",
"-lbz2",
"-lz",
"-ldl",
]
kp_build_dir = "KP_BUILD"
LIBRARIES_DIRS = [
f"{kp_build_dir}",
f"{kp_build_dir}/ThirdParty/MQF/src",
"ThirdParty/ntCard",
f"{kp_build_dir}/ThirdParty/sdsl-lite/lib",
f"{kp_build_dir}/ThirdParty/kmerDecoder",
f"{kp_build_dir}/ThirdParty/MQF/ThirdParty/stxxl/lib",
]
check_exist(LIBRARIES_DIRS)
RUNTIME_LIBRARIES_DIRS = [
#Placeholder
]
LIBRARIES = [
'kProcessor',
'sdsl',
'MQF',
'ntcard',
'kmerDecoder',
'stxxl_debug',
]
SWIG_OPTS = [
'-c++',
'-py3',
'-outdir',
'.',
'-Isrc',
'-doxygen',
]
class CustomBuild(build):
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts),
]
kProcessor_module = Extension('_kProcessor',
# runtime_library_dirs=RUNTIME_LIBRARIES_DIRS,
library_dirs=LIBRARIES_DIRS,
libraries=LIBRARIES,
sources=SOURCES,
include_dirs=INCLUDES,
# includes=BLIGHT_HEADERS,
extra_link_args=LINK_ARGS,
extra_compile_args=["-O3", "-Ofast", "-std=c++17", "-fPIC"],
swig_opts=SWIG_OPTS,
)
classifiers = [
"License :: OSI Approved :: Apache Software License",
'Development Status :: 3 - Alpha',
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
setup(name='kProcessor',
version=get_version(),
author="<NAME>, <NAME>, <NAME>",
author_email='<EMAIL>, <EMAIL>, <EMAIL>',
description="""kProcessor Python interface""",
ext_modules=[kProcessor_module],
py_modules=['kProcessor'],
url='https://github.com/dib-lab/kProcessor',
python_requires='>=3.6',
cmdclass={'build': CustomBuild},
license='BSD 3-Clause',
long_description_content_type='text/markdown',
long_description=readme,
classifiers=classifiers,
include_package_data=True,
project_urls={
'Bug Reports': 'https://github.com/dib-lab/kProcessor/issues',
'Source': 'https://github.com/dib-lab/kProcessor',
},
)
if os.path.exists("build/libkProcessor.a") and os.path.islink("KP_BUILD"):
os.unlink("KP_BUILD")
|
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from FlowPlayer import Player, Configuration, ControlsPlugin, ContentPlugin, Clip
from pyjamas import log
class FlowPlayerExample:
def onModuleLoad(self):
self.panel = VerticalPanel()
self.player = self.getPlayer()
# Add the Player to the Panel
self.panel.add(self.player)
RootPanel().add(self.panel)
def getPlayer(self):
"""
Create a player
"""
# Url to the flowplayer flashmovie
url = 'swf/flowplayer-3.1.4.swf'
# Create the initial configuration
config = Configuration()
# Add a Content
plugin = self.getContentTop()
config.addPlugin(plugin)
# Customize Controls, if controls not added,
# default controls will be used
plugin = self.getControls()
config.addPlugin(plugin)
# Add the Common-Clip to configuration
common_clip = Clip()
common_clip.setAttr('autoBuffering', True)
common_clip.setAttr('autoPlay', False)
config.setCommonClip(common_clip)
# Set a playlist
playlist = self.getPlaylist()
config.setPlaylist(playlist)
# Create the Player Object with the initial configuration
#log.writebr('Loading Player')
player = Player(url, config)
# Add Listener to the player
player.addListener(self)
return player
def getPlaylist(self):
"""
Create a playlist
"""
playlist = []
playlist.append(Clip('movies/movie1.flv'))
playlist.append(Clip('movies/movie2.flv'))
playlist.append(Clip('movies/movie3.flv'))
playlist.append(Clip('movies/movie4.flv'))
# Add Listener to the Clips
for clip in playlist:
clip.addListener(self)
return playlist
def getControls(self):
"""
Create and configure the Controls Plugin
"""
controls = ControlsPlugin()
controls.setAttr('height', 20)
controls.setAttr('timeColor', '#5b80b2')
controls.setAttr('durationColor', '#000000')
controls.setAttr('timeBgColor', '#DBDBDB')
controls.setAttr('volumeSliderColor', '#DBDBDB')
controls.setAttr('sliderColor', '#000000')
controls.setAttr('bufferColor', '#DBDBDB')
controls.setAttr('progressColor', '#bbbbbb')
controls.setAttr('backgroundColor', '#FFFFFF')
controls.setAttr('playlist', True)
return controls
def getContentTop(self):
"""
Create and configure a content plugin
"""
content = ContentPlugin(url='swf/flowplayer.content.swf', name='contentTop')
content.setAttr('top', 0)
content.setAttr('left', 0)
content.setAttr('borderRadius', 15)
content.setAttr('borderColor', '#222222')
content.setAttr('width', '100%')
content.setAttr('height', 60)
content.setAttr('backgroundColor', '#112233')
content.setAttr('backgroundGradient', 'low')
content.setAttr('opacity', 0.9)
content.addListener(self)
return content
def getContentBottom(self):
"""
Create and configure another content plugin
"""
content = ContentPlugin(url='swf/flowplayer.content.swf', name='contentBottom')
content.setAttr('bottom', 20)
content.setAttr('left', 0)
content.setAttr('borderRadius', 15)
content.setAttr('borderColor', '#222222')
content.setAttr('width', 1)
content.setAttr('height', 1)
content.setAttr('backgroundColor', '#112233')
content.setAttr('backgroundGradient', 'low')
content.setAttr('opacity', 0.9)
content.addListener(self)
return content
# Player events
def onLoadPlayer(self):
"""
This is a Player Event
Fired if the Player is loaded
"""
#log.writebr('Player loaded')
# Load a Content-Plugin at runtime into the player
content = self.getContentBottom()
self.player.loadPlugin(content)
def onLoadPlugin(self, name):
"""
This is a Player Event
Fired if a plugin is loaded
"""
#log.writebr('Plugin %s loaded' % name)
if name == 'contentBottom':
# Animate the content on bottom, if it is loaded
content = self.player.getPlugin('contentBottom')
props = {'width': 80, 'bottom': 40, 'left': 40, 'height': 30}
content.animate(props)
content.setHtml('Click me')
def onClipAdd(self, clip, index):
"""
This is a Player Event
Fired if a clip is added to playlist
"""
#log.writebr('Clip %s on index %s added' % (clip.url, index))
pass
def onPlaylistReplace(self, clips):
"""
This is a Player Event
Fired if the playlist is replaced
"""
#log.writebr('Playlist is replaced')
pass
def onError(self, args):
"""
This is a Player Event
Fired on an error
"""
log.writebr('Error: %s' % str(args))
# Plugin events
def onClickPlugin(self, plugin):
"""
This is a Plugin Event
Fired if a plugin is clicked
"""
#log.writebr('Plugin %s clicked' % plugin.name)
plugin = self.player.getPlugin(plugin.name)
if plugin.name == 'contentTop':
# Fade out the top content and start playing
plugin.fadeOut()
self.player.play()
if plugin.name == 'contentBottom':
# Fade out the bottom content
plugin.fadeOut()
# Add one more clip at runtime to the playlist
#clip = Clip('movies/movie5.flv')
#clip.addListener(self)
#log.writebr('Add Clip')
#self.player.addClip(clip, 3)
def onAnimatePlugin(self, plugin):
"""
This is a Plugin Event
Fired if a plugin is animated
"""
#log.writebr('Plugin %s animated' % plugin.name)
pass
# Clip events
def onResume(self, clip):
"""
This is a Clip Event
Fired if the player is resumed
"""
#log.writebr('Clip %s resumed' % clip.url)
# Get the contentTop plugin, and fade it out
plugin = self.player.getPlugin('contentTop')
plugin.fadeOut()
def onPause(self, clip):
"""
This is a Clip Event
Fired if the player is paused
"""
#log.writebr('Clip %s paused' % clip.url)
# Get the contentTop plugin, set some Text
# and fade it in
plugin = self.player.getPlugin('contentTop')
plugin.setHtml('<b>%s</b>' % clip.url)
plugin.append('<br>More Text')
plugin.fadeIn()
if __name__ == '__main__':
app = FlowPlayerExample()
app.onModuleLoad()
|
<filename>butcher/rubygems.py
"""Manage rubygems for butcher's internal use.
Much like the rest of Butcher, this is fairly gross.
"""
import os
import subprocess
from twitter.common import app
from twitter.common import log
from butcher import error
app.add_option(
'--gemdir', dest='gem_basedir', default='/var/lib/butcher',
help='Path to our gems directory.')
app.add_option(
'--gem_source', dest='gem_source', default='http://rubygems.org',
help='Rubygems source repository.')
# TODO: Fallback gem_basedir for non-root installs?
# That is, most of the time butcher will come from a .deb that includes or
# depends on the requisite gems. If it isn't, butcher should still be able to
# download and install what it needs in a user's homedir or elsewhere.
# Perhaps there should be a system /etc/butcherrc that can set things like
# gem_basedir in the case of distribution packages, and the default in this
# file could revert to being inside of the butcher work directory?
class RubyGems(app.Module):
def __init__(self):
app.Module.__init__(self, label=__name__,
description='Rubygems wrapper.',
dependencies='butcher')
self.gem_basedir = None
self.gem_source = None
def gem_bindir(self):
return os.path.join(self.gem_basedir, 'bin')
def setup_function(self):
if not app.get_options().gem_basedir:
app.set_option('gem_basedir',
os.path.join(app.get_options().butcher_basedir,
'gems'))
self.gem_basedir = app.get_options().gem_basedir
self.gem_source = app.get_options().gem_source
os.environ['GEM_HOME'] = self.gem_basedir
os.environ['GEM_PATH'] = self.gem_basedir
try:
subprocess.check_output(['gem', '--version'])
except (OSError, subprocess.CalledProcessError):
raise error.ButcherError('gem does not appear to be installed.')
app.register_module(RubyGems())
def install_gem(gemname, version=None, conservative=True, ri=False, rdoc=False,
development=False, format_executable=False, force=False,
gem_source=None):
"""Install a ruby gem."""
cmdline = ['gem', 'install']
if conservative:
cmdline.append('--conservative')
if ri:
cmdline.append('--ri')
else:
cmdline.append('--no-ri')
if rdoc:
cmdline.append('--rdoc')
else:
cmdline.append('--no-rdoc')
if development:
cmdline.append('--development')
if format_executable:
cmdline.append('--format-executable')
if force:
cmdline.append('--force')
if version:
cmdline.extend(['--version', version])
cmdline.extend(['--clear-sources',
'--source', gem_source or RubyGems().gem_source])
cmdline.append(gemname)
msg = 'Installing ruby gem: %s' % gemname
if version:
msg += ' Version requested: %s' % version
log.debug(msg)
try:
subprocess.check_output(cmdline, shell=False)
except (OSError, subprocess.CalledProcessError) as err:
raise error.ButcherError(
'Gem install failed. Error was: %s. Output: %s' % (
err, err.output))
def is_installed(gemname, version=None):
"""Check if a gem is installed."""
cmdline = ['gem', 'list', '-i', gemname]
if version:
cmdline.extend(['-v', version])
try:
subprocess.check_output(cmdline, shell=False)
return True
except (OSError, subprocess.CalledProcessError) as err:
if err.returncode == 1:
return False
else:
raise error.ButcherError(
'Failure running gem. Error was: %s. Output: %s', err,
err.output)
def gem_bindir():
return RubyGems().gem_bindir()
|
<reponame>coderzh/pywasm3
#!/usr/bin/env python3
import os, struct, time
import multiprocessing as mp
import wasm3
import numpy
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "true"
# Set to 44100 for better quality, or 11025 for faster computation
sample_rate = 22050
duration = 164000
buffersize = 128*4
prebuffer = 1024
def draw(c):
print(c, end='', flush=True)
def player(q):
import pygame
pygame.mixer.pre_init(frequency=sample_rate, size=-16, channels=2)
pygame.init()
channel = pygame.mixer.Channel(0)
try:
while True:
chunk = pygame.mixer.Sound(buffer=q.get())
draw("|" if channel.get_queue() else ".")
while channel.get_queue() is not None:
time.sleep(0.01)
channel.queue(chunk)
except (TypeError, BrokenPipeError, KeyboardInterrupt, SystemExit):
pygame.quit()
if __name__ == '__main__':
print("WebAssembly Music by <NAME> - from the executable music competition at Revision demoparty 2021")
print("Source: https://petersalomonsen.com/webassemblymusic/livecodev2/?gist=d71387112368a2692dc1d84c0ab5b1d2")
print("Synthesized: https://soundcloud.com/psalomo/webassembly-music-entry-for-the-revision-2021-executable-music-competition")
print()
q = mp.Queue()
p = mp.Process(target=player, args=(q,))
p.start()
scriptpath = os.path.dirname(os.path.realpath(__file__))
wasm_fn = os.path.join(scriptpath, f"./wasm/music.wasm")
# Prepare Wasm3 engine
env = wasm3.Environment()
rt = env.new_runtime(2*1024)
with open(wasm_fn, "rb") as f:
mod = env.parse_module(f.read())
rt.load(mod)
mod.set_global("SAMPLERATE", sample_rate)
wasm_play = rt.find_function("playEventsAndFillSampleBuffer")
samplebufferL = mod.get_global("samplebuffer")
samplebufferR = samplebufferL + buffersize
def fetch_data():
global buff
wasm_play()
# get data
mem = rt.get_memory(0)
data_l = mem[samplebufferL : samplebufferL + buffersize]
data_r = mem[samplebufferR : samplebufferR + buffersize]
# decode
data_l = numpy.frombuffer(data_l, dtype=numpy.float32)
data_r = numpy.frombuffer(data_r, dtype=numpy.float32)
data = numpy.dstack((data_l, data_r))
return (data.clip(-1,1) * 32767).astype(numpy.int16).tobytes()
try:
buff = b''
progress = 0
while progress < 100:
buff += fetch_data()
progress = int(100*len(buff)/(prebuffer*1024))
if not progress % 5:
draw(f"\rPre-buffering... {progress}%")
q.put(buff)
draw("\n")
buff = b''
t = 0
while t < duration:
t = mod.get_global("currentTimeMillis")
#draw(f"\rT: {t/1000:.3f}s")
buff += fetch_data()
if len(buff) >= 64*1024:
#draw("+")
q.put(buff)
buff = b''
time.sleep(0.01)
q.put(buff) # play the leftover
except (KeyboardInterrupt, SystemExit):
pass
finally:
q.put(None)
q.close()
p.join()
print()
print("Finished")
|
<reponame>kallif003/Sistema-Delivery
def sql_info_pedido(*args):
try:
data = args[0]
cursor = args[1]
telaInfoPedido = args[2]
QtWidgets = args[3]
id2 = int(telaInfoPedido.codigo.text())
listaPedido = []
sql = ("select id, telefone, nome, cep, endereco, numero, bairro, referencia, complemento, "
"taxaEntrega, valorTotal from gerenciarPedido where id = %s" % id2)
cursor.execute(sql)
cliente = cursor.fetchall()
sql = ("select motoboy, motivo from status_pedido where id_pedido = %s" % id2)
cursor.execute(sql)
status = cursor.fetchall()
sql = ("select cartao , voucher, dinheiro, troco, desconto, pix, observacao "
"from pagamento where id_pagamento = %s" % id2)
cursor.execute(sql)
pagamento = cursor.fetchall()
telaInfoPedido.telefone.setText('Tel:' + ' ' + str(cliente[0][1]))
telaInfoPedido.nome.setText('Nome:' + ' ' + str(cliente[0][2]))
telaInfoPedido.cep.setText('Cep:' + str(cliente[0][3]))
telaInfoPedido.endereco.setText('End:' + ' ' + str(cliente[0][4]))
telaInfoPedido.numero.setText('Numero:' + ' ' + str(cliente[0][5]))
telaInfoPedido.bairro.setText('Bairro:' + ' ' + str(cliente[0][6]))
telaInfoPedido.ref.setText('Ref:' + ' ' + str(cliente[0][7]))
telaInfoPedido.complemento.setText('Compl:' + ' ' + str(cliente[0][8]))
telaInfoPedido.taxa.setText('Taxa:' + ' ' + str(cliente[0][9]))
telaInfoPedido.valorTotal.setText('Total:' + ' ' + str(cliente[0][10]))
telaInfoPedido.motoboy.setText('Motoboy:' + ' ' + str(status[0][0]))
telaInfoPedido.cartao.setText('Cartao:' + ' ' + str(pagamento[0][0]))
telaInfoPedido.voucher.setText('Voucher:' + ' ' + str(pagamento[0][1]))
telaInfoPedido.dinheiro.setText('Dinheiro:' + ' ' + str(pagamento[0][2]))
telaInfoPedido.troco.setText('Troco:' + ' ' + str(pagamento[0][3]))
telaInfoPedido.desconto.setText('Desc:' + ' ' + str(pagamento[0][4]))
telaInfoPedido.pix.setText('Pix:' + ' ' + str(pagamento[0][5]))
telaInfoPedido.label.setText(str(status[0][1]))
telaInfoPedido.label.setText(str(pagamento[0][6]))
sql9 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_inteiro where id_int = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql9, values)
inteiro = cursor.fetchall()
sql6 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_met1 where id_met = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql6, values)
metade1 = cursor.fetchall()
sql7 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_met2 where id_met2 = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql7, values)
metade2 = cursor.fetchall()
sql10 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_terco1 where id_terco = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql10, values)
terco1 = cursor.fetchall()
sql11 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_terco2 where id_terco2 = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql11, values)
terco2 = cursor.fetchall()
sql12 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_terco3 where id_terco3 = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql12, values)
terco3 = cursor.fetchall()
sql13 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_quarto1 where id_Qt = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql13, values)
quarto1 = cursor.fetchall()
sql14 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_quarto2 where id_Qt2 = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql14, values)
quarto2 = cursor.fetchall()
sql15 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_quarto3 where id_Qt3 = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql15, values)
quarto3 = cursor.fetchall()
sql16 = ("select id, tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_quarto4 where id_Qt4 = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql16, values)
quarto4 = cursor.fetchall()
sql17 = ("select id_pizza, tamanho, vazio1, adicional, valor, "
"vazio2, vazio3 from per_semAdc where id_semAdc = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql17, values)
adc = cursor.fetchall()
sql18 = ("select id_pizza, tamanho, vazio1, adicional, "
"vazio2, vazio3, valor from per_adc where id_adc = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql18, values)
adc2 = cursor.fetchall()
sql19 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_esfihas where id_esfiha = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql19, values)
esfiha = cursor.fetchall()
sql20 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_lata where id_lata = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql20, values)
lata = cursor.fetchall()
sql21 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_s600 where id_600 = %s and dataa = %s ")
values = (id2, data)
cursor.execute(sql21, values)
s600 = cursor.fetchall()
sql22 = ("select tamanho, parte, sabor, valorProduto, quantidade,"
"subtotal from per_1L where id_1L = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql22, values)
umLitro = cursor.fetchall()
sql23 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_1Lmeio where id_1meio = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql23, values)
umLmeio = cursor.fetchall()
sql24 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_2l where id_2L = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql24, values)
doisLitros = cursor.fetchall()
sql25 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_2Lmeio where id_2meio = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql25, values)
doisLmeio = cursor.fetchall()
sql26 = ("select tamanho, parte, sabor, valorProduto, quantidade, "
"subtotal from per_outros where id_outros = %s and dataa = %s")
values = (id2, data)
cursor.execute(sql26, values)
outros = cursor.fetchall()
if len(inteiro) > 0:
for i in inteiro:
listaPedido.append(i)
for j in adc2:
if j[0] == i[0]:
listaPedido.append(j)
for k in adc:
if k[0] == i[0]:
listaPedido.append(k)
if len(metade1) and len(metade2) > 0:
for j, k in zip(metade1, metade2):
listaPedido.append(j)
listaPedido.append(k)
for c in adc2:
if c[0] == j[0]:
listaPedido.append(c)
for d in adc:
if d[0] == j[0]:
listaPedido.append(d)
if len(terco1) and len(terco2) and len(terco3) > 0:
for l, m, n in zip(terco1, terco2, terco3):
listaPedido.append(l)
listaPedido.append(m)
listaPedido.append(n)
for c in adc2:
if c[0] == l[0]:
listaPedido.append(c)
for d in adc:
if d[0] == l[0]:
listaPedido.append(d)
if len(quarto1) and len(quarto2) and len(quarto3) and len(quarto4) > 0:
for o, p, q, r in zip(quarto1, quarto2, quarto3, quarto4):
listaPedido.append(o)
listaPedido.append(p)
listaPedido.append(q)
listaPedido.append(r)
for c in adc2:
if c[0] == o[0]:
listaPedido.append(c)
for d in adc:
if d[0] == o[0]:
listaPedido.append(d)
if len(esfiha) > 0:
for u in esfiha:
listaPedido.append(u)
if len(lata) > 0:
for v in lata:
listaPedido.append(v)
if len(s600) > 0:
for w in s600:
listaPedido.append(w)
if len(umLitro) > 0:
for x in umLitro:
listaPedido.append(x)
if len(umLmeio) > 0:
for y in umLmeio:
listaPedido.append(y)
if len(doisLitros) > 0:
for z in doisLitros:
listaPedido.append(z)
if len(doisLmeio) > 0:
for a in doisLmeio:
listaPedido.append(a)
if len(outros) > 0:
for b in outros:
listaPedido.append(b)
telaInfoPedido.tableWidget_2.setRowCount(len(listaPedido))
telaInfoPedido.tableWidget_2.setColumnCount(6)
for i in range(0, len(listaPedido)):
for j in range(6):
telaInfoPedido.tableWidget_2.setItem(i, j, QtWidgets.QTableWidgetItem(str(listaPedido[i][j])))
except Exception as erro:
print(erro.__class__)
|
<reponame>VirtualL/home-assistant<filename>homeassistant/components/snmp/device_tracker.py
"""Support for fetching WiFi associations through SNMP."""
import binascii
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
REQUIREMENTS = ['pysnmp==4.4.8']
_LOGGER = logging.getLogger(__name__)
CONF_AUTHKEY = 'authkey'
CONF_BASEOID = 'baseoid'
CONF_COMMUNITY = 'community'
CONF_PRIVKEY = 'privkey'
DEFAULT_COMMUNITY = 'public'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BASEOID): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Inclusive(CONF_AUTHKEY, 'keys'): cv.string,
vol.Inclusive(CONF_PRIVKEY, 'keys'): cv.string,
})
def get_scanner(hass, config):
"""Validate the configuration and return an SNMP scanner."""
scanner = SnmpScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SnmpScanner(DeviceScanner):
"""Queries any SNMP capable Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.entity import config as cfg
self.snmp = cmdgen.CommandGenerator()
self.host = cmdgen.UdpTransportTarget((config[CONF_HOST], 161))
if CONF_AUTHKEY not in config or CONF_PRIVKEY not in config:
self.auth = cmdgen.CommunityData(config[CONF_COMMUNITY])
else:
self.auth = cmdgen.UsmUserData(
config[CONF_COMMUNITY],
config[CONF_AUTHKEY],
config[CONF_PRIVKEY],
authProtocol=cfg.usmHMACSHAAuthProtocol,
privProtocol=cfg.usmAesCfb128Protocol
)
self.baseoid = cmdgen.MibVariable(config[CONF_BASEOID])
self.last_results = []
# Test the router is accessible
data = self.get_snmp_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results
if client.get('mac')]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
# We have no names
return None
def _update_info(self):
"""Ensure the information from the device is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
data = self.get_snmp_data()
if not data:
return False
self.last_results = data
return True
def get_snmp_data(self):
"""Fetch MAC addresses from access point via SNMP."""
devices = []
errindication, errstatus, errindex, restable = self.snmp.nextCmd(
self.auth, self.host, self.baseoid)
if errindication:
_LOGGER.error("SNMPLIB error: %s", errindication)
return
if errstatus:
_LOGGER.error("SNMP error: %s at %s", errstatus.prettyPrint(),
errindex and restable[int(errindex) - 1][0] or '?')
return
for resrow in restable:
for _, val in resrow:
try:
mac = binascii.hexlify(val.asOctets()).decode('utf-8')
except AttributeError:
continue
_LOGGER.debug("Found MAC %s", mac)
mac = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)])
devices.append({'mac': mac})
return devices
|
class Instruccion:
'''This is an abstract class'''
class CrearBD(Instruccion) :
'''
Esta clase representa la funcion para crear una base de datos solo recibe el nombre de la BD
'''
def __init__(self,reemplazar,verificacion,nombre, propietario, modo) :
self.reemplazar = reemplazar
self.verificacion = verificacion
self.nombre = nombre
self.propietario = propietario
self.modo = modo
class CrearTabla(Instruccion) :
'''
Esta clase representa la instrucción crear tabla.
La instrucción crear tabla recibe como parámetro nombre de tabla, lista de columnas y una tabla padre
'''
def __init__(self, nombre, padre, columnas = []) :
self.nombre = nombre
self.columnas = columnas
self.padre = padre
class CrearType(Instruccion) :
'''
Esta clase representa la instrucción crear tipo.
La instrucción crear tipo recibe como parámetro nombre del tipo, lista de valores
'''
def __init__(self, nombre, valores = []) :
self.nombre = nombre
self.valores = valores
class EliminarTabla(Instruccion) :
'''
Esta clase representa la instrucción drope table.
La instrucción drope table recibe como parámetro la existencia y el nombre
'''
def __init__(self, existencia, nombre) :
self.nombre = nombre
self.existencia = existencia
class EliminarDB(Instruccion) :
'''
Esta clase representa la instrucción drope database.
La instrucción drope database recibe como parámetro la existencia y el nombre
'''
def __init__(self, existencia, nombre) :
self.nombre = nombre
self.existencia = existencia
class columnaTabla(Instruccion) :
'''
Esta clase las columnas de una tabla
'''
def __init__(self, id, tipo, valor,zonahoraria, atributos = []) :
self.id = id
self.tipo = tipo
self.valor = valor
self.zonahoraria = zonahoraria
self.atributos = atributos
class llaveTabla(Instruccion) :
'''
Esta clase representa las llaves de una tabla ya sean foraneas o primarias
Tipo= Primaria=True
Tipo= Foreing=False
'''
def __init__(self, tipo,referencia,columnas = [],columnasRef = []) :
self.tipo = tipo
self.referencia = referencia
self.columnas = columnas
self.columnasRef = columnasRef
class atributoColumna(Instruccion) :
'''
Esta clase representa los atributos de una columna
'''
def __init__(self, default,constraint,null,unique,primary,check) :
self.default = default
self.constraint = constraint
self.null = null
self.unique = unique
self.primary = primary
self.check = check
class Insertar(Instruccion):
'''
Estan clase representa los valores a insertar en una tabla
'''
def __init__(self, nombre, columnas, valores=[]) :
self.nombre = nombre
self.columnas = columnas
self.valores = valores
class Actualizar(Instruccion):
'''
Esta clase representa los valores a actualizar de la tabla
'''
def __init__(self, nombre, condicion, valores=[]) :
self.nombre = nombre
self.condicion = condicion
self.valores = valores
class columna_actualizar(Instruccion):
'''
Esta clase representa las columnas a actualizar
'''
def __init__(self, nombre, valor) :
self.nombre = nombre
self.valor = valor
class Eliminar(Instruccion):
'''
Esta clase representa la eliminacion de una tabla
'''
def __init__(self, nombre, condicion):
self.nombre = nombre
self.condicion = condicion
class DBElegida(Instruccion):
'''
Esta clase representa la base de datos elegida
'''
def __init__(self,nombre):
self.nombre = nombre
class MostrarDB(Instruccion):
'''
Esta clase representa las base de datos creadas
'''
class MostrarTB(Instruccion):
'''
Esta clase Muestra Tablas de una bd
'''
class Limite_Select(Instruccion):
'''
Esta clase representa el limit del select
'''
def __init__(self, select, limit, offset):
self.select=select
self.limit=limit
self.offset=offset
class SELECT(Instruccion):
'''
Esta clase representa a una select
'''
def __init__(self, cantidad, parametros, cuerpo, funcion_alias):
self.cantida=cantidad
self.parametros=parametros
self.cuerpo=cuerpo
self.funcion_alias=funcion_alias
class Funcion_Alias(Instruccion):
'''
Esta clase representa un funcion junto a su alias
'''
def __init__(self, nombre, alias):
self.nombre=nombre
self.alias=alias
class CUERPO_SELECT(Instruccion):
'''
Esta clase representa el cuerpo de un select
'''
def __init__(self, b_from, b_join, b_where, b_group, b_having, b_order):
self.b_from=b_from
self.b_join=b_join
self.b_where=b_where
self.b_group=b_group
self.b_having=b_having
self.b_order=b_order
class Orden_Atributo(Instruccion):
'''
Esta clase representa el orden que tendra el atributo
'''
def __init__(self, nombre, direccion, rango):
self.nombre=nombre
self.direccion=direccion
self.rango=rango
class SubQuery(Instruccion):
'''
Esta clase representa a una subquery y su comparacion con la query principal
'''
def __init__(self, condicion, subquery, alias):
self.condicion=condicion
self.subquery=subquery
self.alias=alias
class Valor_From(Instruccion):
'''
Esta clase representa el contenido del from de una consulta
'''
def __init__(self, nombre, subquery, alias):
self.nombre=nombre
self.subquery=subquery
self.alias=alias
class SubQuery_IN(Instruccion):
'''
Esta clase representa el si se declara un in o not in en subquery
'''
def __init__(self, exp, tipo):
self.exp=exp
self.tipo=tipo
class Valor_Select(Instruccion):
'''
Esta clase representa los valores para un select
'''
def __init__(self, nombre, tipo, alias, fun_exp):
self.nombre=nombre
self.tipo=tipo
self.alias=alias
self.fun_exp=fun_exp
class Condicion_WHEN_THEN(Instruccion):
'''
Esta clase representa la condicion when then
'''
def __init__(self, exp, resultado):
self.exp=exp
self.resultado=resultado
class Case(Instruccion):
'''
Esta clase representa la un case
'''
def __init__(self, condicion, sino, alias):
self.condicion=condicion
self.sino=sino
self.alias=alias
#---------------------mmms
class ALTERDBO(Instruccion):
'''
ALTER DATABASE ID CONDICION = VALOR
'''
def __init__(self, Id, TipoCon,valor):
self.Id = Id
self.TipoCon = TipoCon
self.valor = valor
class ALTERTBO(Instruccion):
'''
ALTER TABLE
'''
def __init__(self, Id,cuerpo):
self.Id = Id
self.cuerpo = cuerpo
class ALTERTBO_RENAME(Instruccion):
'''
ALTER TABLE RENAME
'''
def __init__(self, Id1,Id2,operacion):
self.Id1 = Id1
self.Id2 = Id2
self.operacion = operacion
class ALTERTBO_ALTER_PROPIEDADES(Instruccion):
'''
ALTER TABLE ALTER
'''
def __init__(self, prop1,prop2,prop3,prop4,prop5):
self.prop1 = prop1
self.prop2 = prop2
self.prop3 = prop3
self.prop4 = prop4
self.prop5 = prop5
class ALTERTBO_ALTER(Instruccion):
'''
ALTER TABLE ALTER
'''
def __init__(self, instruccion,id,extra):
self.instruccion = instruccion
self.id = id
self.extra = extra
class ALTERTBO_DROP(Instruccion):
'''
ALTER TABLE DROP
'''
def __init__(self, instruccion,id):
self.instruccion = instruccion
self.id = id
class ALTERTBO_ADD(Instruccion):
'''
ALTER TABLE ADD
'''
def __init__(self, id,tipo,valortipo,instruccion,extra):
self.id = id
self.tipo = tipo
self.valortipo = valortipo
self.instruccion = instruccion
self.extra = extra
class ALTERTBO_ADD_EXTRAS(Instruccion):
'''
ALTER TABLE ADD Extras
'''
def __init__(self, instruccion,contenido, id , contenido2):
self.instruccion = instruccion
self.contenido = contenido
self.id = id
self.contenido2 = contenido2
#nuevo hoy 18/12/2020
class ALTERTBO_ALTER_SERIE(Instruccion):
'''
ALTER TABLE ADD Extras
'''
def __init__(self, listaval):
self.listaval = listaval
|
<reponame>biyiklioglu/container-service-extension
# container-service-extension
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
CSE client tests to test validity and functionality of `vcd cse` CLI commands.
Tests these following commands:
$ vcd cse version
$ vcd cse system info
$ vcd cse template list
$ vcd cse ovdc enable ...
$ vcd cse ovdc disable ...
NOTE:
- These tests will install CSE on vCD if CSE is not installed already.
- Edit 'base_config.yaml' for your own vCD instance.
- Testers MUST have an cluster admin user in the org with the same credentials
as system administrator (system administrators cannot deploy clusters).
- Clusters are deleted on test failure, unless 'teardown_clusters'=false in
'base_config.yaml'.
- This test module typically takes ~20 minutes to finish per template.
TODO() by priority
- test pks broker
- test that node rollback works correctly (node rollback is not implemented
yet due to a vcd-side bug, where a partially powered-on VM cannot be force
deleted)
- tests/fixtures to test command accessibility for various
users/roles (vcd_cluster_admin() fixture should be replaced with
a minimum rights user fixture)
- test `vcd cse cluster config test_cluster --save` option (currently does
not work)
- test nfs functionality
- test accessing cluster via kubectl (may be unnecessary)
"""
import collections
import os
import re
import subprocess
import time
import pytest
from pyvcloud.vcd.client import VcdApiVersionObj
from pyvcloud.vcd.vcd_api_version import VCDApiVersion
from system_tests_v2.pytest_logger import PYTEST_LOGGER
from vcd_cli.vcd import vcd
import yaml
from container_service_extension.rde.utils import get_runtime_rde_version_by_vcd_api_version # noqa: E501
from container_service_extension.server.cli.server_cli import cli
import container_service_extension.system_test_framework.environment as env
import container_service_extension.system_test_framework.utils as testutils
OVDC_ENABLE_TEST_PARAM = collections.namedtuple("OvdcEnableParam", "user password org_name ovdc_name disable_before_test expect_failure") # noqa: E501
OVDC_DISABLE_TEST_PARAM = collections.namedtuple("OvdcDisableParam", "user password org_name ovdc_name enable_before_test expect_failure") # noqa: E501
SYSTEM_TOGGLE_TEST_PARAM = collections.namedtuple("SystemToggleTestParam", "user password cluster_name worker_count nfs_count rollback sizing_class storage_profile ovdc_network template_name template_revision expect_failure") # noqa: E501
CLUSTER_APPLY_TEST_PARAM = collections.namedtuple("ClusterApplyTestParam", "user password cluster_name worker_count nfs_count rollback cpu memory sizing_class storage_profile ovdc_network template_name template_revision expected_phase retain_cluster exit_code should_vapp_exist should_rde_exist required_rde_version expect_failure") # noqa: E501
CLUSTER_DELETE_TEST_PARAM = collections.namedtuple("ClusterDeleteTestParam", "user password cluster_name org ovdc expect_failure") # noqa: E501
CLUSTER_UPGRADE_TEST_PARAM = collections.namedtuple("ClusterUpgradeTestParam", "user password cluster_name worker_count nfs_count rollback sizing_class storage_profile ovdc_network upgrade_path expect_failure") # noqa: E501
DEFAULT_CPU_COUNT = 2
DEFAULT_MEMORY_MB = 2048
@pytest.fixture(scope='module', autouse=True)
def cse_server():
"""Fixture to ensure that CSE is installed and running before client tests.
This fixture executes automatically for this module's setup and teardown.
Setup tasks:
- If templates do not exist, install CSE using `--upgrade`
- Run `cse install` to ensure that CSE is registered and AMQP
exchange exists.
- Run CSE server as a subprocess
Teardown tasks:
- Stop CSE server
"""
if env.IS_CSE_SERVER_RUNNING:
# CSE server is already running
yield
return
env.setup_active_config(logger=PYTEST_LOGGER)
if env.is_cse_registered_as_mqtt_ext(logger=PYTEST_LOGGER):
cmd = [
'upgrade',
'--config', env.ACTIVE_CONFIG_FILEPATH,
'--skip-config-decryption'
]
else:
cmd = [
'install',
'--config', env.ACTIVE_CONFIG_FILEPATH,
'--skip-config-decryption'
]
result = env.CLI_RUNNER.invoke(cli, cmd, input='y', catch_exceptions=False)
assert result.exit_code == 0,\
testutils.format_command_info('cse', cmd, result.exit_code,
result.output)
# assign native right bundle to test org
env.publish_right_bundle_to_deployment_org(logger=PYTEST_LOGGER)
# assign rights to cluster admin role
env.assign_native_rights(env.CLUSTER_ADMIN_ROLE_NAME,
["cse:nativeCluster: Full Access",
"cse:nativeCluster: Modify",
"cse:nativeCluster: View"],
logger=PYTEST_LOGGER)
# assign rights to cluster author role
env.assign_native_rights(env.CLUSTER_AUTHOR_ROLE_NAME,
["cse:nativeCluster: Modify",
"cse:nativeCluster: View"],
logger=PYTEST_LOGGER)
# Create missing templates
PYTEST_LOGGER.debug("Creating missing templates")
for template_config in env.TEMPLATE_DEFINITIONS:
cmd = f"template install {template_config['name']} " \
f"{template_config['revision']} " \
f"--config {env.ACTIVE_CONFIG_FILEPATH} " \
f"--ssh-key {env.SSH_KEY_FILEPATH} " \
f"--skip-config-decryption"
result = env.CLI_RUNNER.invoke(
cli, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0,\
testutils.format_command_info('cse', cmd, result.exit_code,
result.output)
PYTEST_LOGGER.debug("Successfully installed template "
f"{template_config['name']} at "
f"revision {template_config['revision']}")
# start cse server as subprocess
cmd = f"cse run -c {env.ACTIVE_CONFIG_FILEPATH} --skip-config-decryption"
if os.name == 'nt':
p = subprocess.Popen(cmd, shell=True)
else:
p = subprocess.Popen(cmd.split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
time.sleep(env.WAIT_INTERVAL * 3) # server takes a little while to set up
PYTEST_LOGGER.debug(p.stdout)
PYTEST_LOGGER.debug("Successfully started the CSE server.")
yield
# terminate cse server subprocess
try:
# check if the subprocess is running or not
if p and p.poll() is None:
if os.name == 'nt':
subprocess.Popen(f"taskkill /f /pid {p.pid} /t")
else:
p.terminate()
PYTEST_LOGGER.debug("Killed CSE server")
except OSError as e:
PYTEST_LOGGER.warning(f"Failed to kill CSE server {e}")
@pytest.fixture
def vcd_sys_admin():
"""Fixture to ensure that we are logged in to vcd-cli as sys admin.
Usage: add the parameter 'vcd_sys_admin' to the test function.
User will have the credentials specified in
'system_tests/base_config.yaml'
Do not use this fixture with the other vcd_role fixtures, as only one
user can be logged in at a time.
"""
config = testutils.yaml_to_dict(env.BASE_CONFIG_FILEPATH)
cmd = f"login {config['vcd']['host']} system " \
f"{config['vcd']['username']} -iwp {config['vcd']['password']} " \
f"-V {env.VCD_API_VERSION_TO_USE}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
cmd = f"org use {env.TEST_ORG}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
# ovdc context may be nondeterministic when there's multiple ovdcs
cmd = f"vdc use {env.TEST_VDC}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
PYTEST_LOGGER.debug("Logged in as sys admin")
yield
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug("Logged out as sys admin")
@pytest.fixture
def vcd_cluster_admin():
"""Fixture to ensure that we are logged in to vcd-cli as cluster admin.
Usage: add the parameter 'vcd_cluster_admin' to the test function.
User will have the credentials specified in
'system_test_framework/environment.py'
Do not use this fixture with the other vcd_role fixtures, as only one
user can be logged in at a time.
"""
config = testutils.yaml_to_dict(env.BASE_CONFIG_FILEPATH)
cmd = f"login {config['vcd']['host']} {env.TEST_ORG} " \
f"{env.CLUSTER_ADMIN_NAME} -iwp {env.CLUSTER_ADMIN_PASSWORD} " \
f"-V {env.VCD_API_VERSION_TO_USE}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
# ovdc context may be nondeterministic when there's multiple ovdcs
cmd = f"vdc use {env.TEST_VDC}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
PYTEST_LOGGER.debug(f"Logged in as {env.CLUSTER_ADMIN_NAME}")
yield
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {env.CLUSTER_ADMIN_NAME}")
@pytest.fixture
def vcd_cluster_author():
"""Fixture to ensure that we are logged in to vcd-cli as vapp author.
Usage: add the parameter 'vcd_k8_author' to the test function.
User will have the credentials specified in
'system_test_framework/environment.py'
Do not use this fixture with the other vcd_role fixtures, as only one
user can be logged in at a time.
"""
config = testutils.yaml_to_dict(env.BASE_CONFIG_FILEPATH)
cmd = f"login {config['vcd']['host']} {env.TEST_ORG} " \
f"{env.CLUSTER_AUTHOR_NAME} -iwp {env.CLUSTER_AUTHOR_PASSWORD} " \
f"-V {env.VCD_API_VERSION_TO_USE}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
# ovdc context may be nondeterministic when there's multiple ovdcs
cmd = f"vdc use {env.TEST_VDC}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
PYTEST_LOGGER.debug(f"Logged in as {env.CLUSTER_AUTHOR_NAME}")
yield
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {env.CLUSTER_AUTHOR_NAME}")
def cleanup_cluster_artifacts():
"""Can be called to remove cluster artifacts such as Vapp and RDE."""
env.delete_all_vapps_with_prefix(
env.SYS_ADMIN_TEST_CLUSTER_NAME,
vdc_href=env.TEST_VDC_HREF,
logger=PYTEST_LOGGER)
env.delete_all_rde_with_prefix(
env.SYS_ADMIN_TEST_CLUSTER_NAME,
logger=PYTEST_LOGGER)
env.delete_all_vapps_with_prefix(
env.CLUSTER_ADMIN_TEST_CLUSTER_NAME,
vdc_href=env.TEST_VDC_HREF,
logger=PYTEST_LOGGER)
env.delete_all_rde_with_prefix(
env.CLUSTER_ADMIN_TEST_CLUSTER_NAME,
logger=PYTEST_LOGGER)
env.delete_all_vapps_with_prefix(
env.CLUSTER_AUTHOR_TEST_CLUSTER_NAME,
vdc_href=env.TEST_VDC_HREF,
logger=PYTEST_LOGGER)
env.delete_all_rde_with_prefix(
env.CLUSTER_AUTHOR_TEST_CLUSTER_NAME,
logger=PYTEST_LOGGER)
@pytest.fixture
def delete_test_clusters():
"""Fixture to ensure that test cluster doesn't exist before or after tests.
Usage: add the parameter 'delete_test_cluster' to the test function.
Setup tasks:
- Delete test cluster vApp
Teardown tasks (only if config key 'teardown_clusters'=True):
- Delete test cluster vApp
"""
cleanup_cluster_artifacts()
yield
if env.TEARDOWN_CLUSTERS:
cleanup_cluster_artifacts()
def test_0010_vcd_cse_version():
"""Test vcd cse version command."""
cmd = "cse version"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
def test_0020_vcd_cse_system_info(vcd_cluster_admin):
"""Test vcd cse system info command."""
cmd = "cse system info"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
assert "version" in result.output
@pytest.fixture
def ovdc_enable_test_case(request):
test_case: OVDC_ENABLE_TEST_PARAM = request.param
# login user
config = testutils.yaml_to_dict(env.BASE_CONFIG_FILEPATH)
pwd = <PASSWORD>
user = test_case.user
org_name = test_case.org_name
if test_case.user == env.SYS_ADMIN_NAME:
user = config['vcd']['username']
pwd = config['vcd']['password']
org_name = 'system'
cmd = f"login {config['vcd']['host']} {org_name} " \
f"{user} -iwp {pwd} " \
f"-V {env.VCD_API_VERSION_TO_USE}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
PYTEST_LOGGER.debug(f"Logged in as {test_case.user}")
assert result.exit_code == 0, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
if test_case.disable_before_test:
# disable ovdc before test
cmd = f"cse ovdc disable --native --org {test_case.org_name} {test_case.ovdc_name} --force" # noqa: E501
env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=True)
yield test_case
# disable ovdc after test
cmd = f"cse ovdc disable --native --org {test_case.org_name} {test_case.ovdc_name} --force" # noqa: E501
env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=True)
# logout
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {test_case.user}")
@pytest.mark.parametrize("ovdc_enable_test_case",
[OVDC_ENABLE_TEST_PARAM(user=env.SYS_ADMIN_NAME, password="", org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, disable_before_test=True, expect_failure=False), # noqa: E501
OVDC_ENABLE_TEST_PARAM(user=env.CLUSTER_AUTHOR_NAME, password=<PASSWORD>.<PASSWORD>_<PASSWORD>_PASSWORD, org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, disable_before_test=True, expect_failure=True), # noqa: E501
OVDC_ENABLE_TEST_PARAM(user=env.CLUSTER_ADMIN_NAME, password=env.CLUSTER_ADMIN_PASSWORD, org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, disable_before_test=True, expect_failure=True), # noqa: E501
# Following test should fail because
# ovdc will be already enabled for native
OVDC_ENABLE_TEST_PARAM(user=env.SYS_ADMIN_NAME, password="", org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, disable_before_test=False, expect_failure=True)], # noqa: E501
indirect=['ovdc_enable_test_case'])
def test_0020_vcd_ovdc_enable(ovdc_enable_test_case: OVDC_ENABLE_TEST_PARAM):
"""Test ovdc enable operation.
Disabling the test ovdc is necessary to avoid errors if there are clusters
left over from previous test execution.
commands:
$ vcd cse ovdc enable -n -o TEST_ORG TEST_VDC
"""
cmd = f"cse ovdc enable {ovdc_enable_test_case.ovdc_name} --native --org {ovdc_enable_test_case.org_name}" # noqa: E501
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=True)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
assert result.exit_code == 0 or ovdc_enable_test_case.expect_failure, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
@pytest.fixture
def ovdc_disable_test_case(request):
test_case: OVDC_DISABLE_TEST_PARAM = request.param
# login user
config = testutils.yaml_to_dict(env.BASE_CONFIG_FILEPATH)
user = test_case.user
pwd = <PASSWORD>_<PASSWORD>
org_name = test_case.org_name
if test_case.user == env.SYS_ADMIN_NAME:
user = config['vcd']['username']
pwd = config['vcd']['password']
org_name = 'system'
cmd = f"login {config['vcd']['host']} {org_name} " \
f"{user} -iwp {pwd} " \
f"-V {env.VCD_API_VERSION_TO_USE}"
env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Logged in as {test_case.user}")
if test_case.enable_before_test:
# disable ovdc before test
cmd = f"cse ovdc enable --native --org {test_case.org_name} {test_case.ovdc_name}" # noqa: E501
env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=True)
yield test_case
# logout
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {test_case.user}")
def create_apply_spec(apply_spec_param):
"""Create apply specification through cse cluster apply --sample command.
:param dict apply_spec_param: Dictionary containing the information
that need to be modified in the initial sample command
:return the dictionary containing the following
- worker count
- nfs count
- template name
- template revision
- network
- sizing class
- storage profile
"""
# run cse sample to generate apply specification
cmd = f"cse cluster apply --sample --native -o {env.APPLY_SPEC_PATH}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
assert result.exit_code == 0, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
PYTEST_LOGGER.debug(f"Using params {apply_spec_param} to create "
"apply specification")
testutils.modify_cluster_apply_spec(env.APPLY_SPEC_PATH, apply_spec_param)
@pytest.fixture
def system_toggle_test_case(request):
param: SYSTEM_TOGGLE_TEST_PARAM = request.param
# login as sysadmin
config = testutils.yaml_to_dict(env.BASE_CONFIG_FILEPATH)
user = config['vcd']['username']
pwd = config['vcd']['password']
org_name = 'system'
cmd = f"login {config['vcd']['host']} {org_name} " \
f"{user} -iwp {pwd} " \
f"-V {env.VCD_API_VERSION_TO_USE}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
PYTEST_LOGGER.debug(f"Logged in as {user}")
cleanup_cluster_artifacts()
# create apply specification
spec_params = {
'worker_count': param.worker_count,
'nfs_count': param.nfs_count,
'rollback': param.rollback,
'template_name': param.template_name,
'template_revision': param.template_revision,
'network': param.ovdc_network,
'sizing_class': param.sizing_class,
'storage_profile': param.storage_profile,
'cluster_name': param.cluster_name
}
create_apply_spec(spec_params)
yield param
cleanup_cluster_artifacts()
# logout
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {user}")
def _follow_apply_output(expect_failure=False):
def validator(output, test_runner_username):
task_wait_command = output.split('\n')[1]
task_wait_command_args = task_wait_command.split()[1:]
# follow cluster apply output
result = env.CLI_RUNNER.invoke(
vcd, task_wait_command_args, catch_exceptions=True)
PYTEST_LOGGER.debug(f"Executing command: {task_wait_command}")
PYTEST_LOGGER.debug(f"User: {test_runner_username}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
if "result: error" in result.output or result.exit_code != 0:
if expect_failure:
PYTEST_LOGGER.debug(f"{task_wait_command} failed as expected. "
f"Exit code {result.exit_code}. "
f"Output: {result.output}")
return True
PYTEST_LOGGER.debug(f"Unexpected failure when executing "
f"'{task_wait_command}'. "
f"Exit code {result.exit_code}. "
f"Output: {result.output}")
return False
return True
return validator
def _follow_delete_output(expect_failure=False):
def validator(output, test_runner_username):
task_wait_command = output.split('\n')[2]
task_wait_command_args = task_wait_command.split()[1:]
# follow cluster delete output
result = env.CLI_RUNNER.invoke(
vcd, task_wait_command_args, catch_exceptions=True)
PYTEST_LOGGER.debug(f"Executing command: {task_wait_command}")
PYTEST_LOGGER.debug(f"User: {test_runner_username}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
if result.exit_code != 0:
if expect_failure:
PYTEST_LOGGER.debug(f"{task_wait_command} failed as expected. "
f"Exit code {result.exit_code}. "
f"Output: {result.output}")
return True
PYTEST_LOGGER.debug(f"Unexpected failure when executing "
f"'{task_wait_command}'. "
f"Exit code {result.exit_code}. "
f"Output: {result.output}")
return False
return True
return validator
@pytest.mark.parametrize(
"system_toggle_test_case",
[
SYSTEM_TOGGLE_TEST_PARAM(
user=env.SYS_ADMIN_NAME,
password=<PASSWORD>,
cluster_name=f"{env.SYS_ADMIN_TEST_CLUSTER_NAME}-s1",
worker_count=0, nfs_count=0, rollback=True,
sizing_class=None, storage_profile=None,
ovdc_network="Invalid_network",
template_name=env.TEMPLATE_DEFINITIONS[0]['name'],
template_revision=env.TEMPLATE_DEFINITIONS[0]['revision'],
expect_failure=False)
],
indirect=['system_toggle_test_case']
)
def test_0030_vcd_cse_system_toggle(system_toggle_test_case: SYSTEM_TOGGLE_TEST_PARAM): # noqa: E501
"""Test `vcd cse system ...` commands.
Test that on disabling CSE, cluster deployments are no longer
allowed, and on enabling CSE, cluster deployments are allowed again.
These commands are combined into 1 test function because only sys admin
can modify the state of CSE server, org admin/tenant can test cluster
deployment to ensure that CSE is disabled/enabled. Also, this avoids
cases such as running the system disable test, and then running the
cluster operations test, which would fail due to CSE server being
disabled).
"""
cmd_list = [
testutils.CMD_BINDER(cmd="cse system disable",
exit_code=0,
validate_output_func=None,
test_user=system_toggle_test_case.user),
testutils.CMD_BINDER(cmd=f"cse cluster apply {env.APPLY_SPEC_PATH}",
exit_code=2,
validate_output_func=None,
test_user=system_toggle_test_case.user),
testutils.CMD_BINDER(cmd="cse system enable",
exit_code=0,
validate_output_func=None,
test_user=system_toggle_test_case.user),
testutils.CMD_BINDER(cmd=f"cse cluster apply {env.APPLY_SPEC_PATH} ",
exit_code=0,
validate_output_func=_follow_apply_output(expect_failure=True), # noqa: E501
test_user=system_toggle_test_case.user)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
def _get_cluster_phase(cluster_name, test_runner_username, org_name=None, vdc_name=None): # noqa: E501
if not org_name and not vdc_name:
org_name = env.TEST_ORG
vdc_name = env.TEST_VDC
cmd_list = [
testutils.CMD_BINDER(
cmd=env.USERNAME_TO_LOGIN_CMD[test_runner_username],
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(
cmd=f"cse cluster info {cluster_name} -o {org_name} -v {vdc_name}",
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
]
result = testutils.execute_commands(cmd_list=cmd_list, logger=PYTEST_LOGGER)[-1] # noqa: E501
if result.exit_code != 0:
raise Exception("Cluster {cluster_name} not found.")
match = re.search(r'phase: (\w+:\w+)', result.output)
return match[1]
def _generate_cluster_apply_tests(test_users=None):
"""Generate cluster apply test cases.
param list test_users: the list of users for which the test cases
should be generated. If not supplied, the tests will be generated for
all the users. (System admin, Cluster admin and Cluster author)
The functions which use this method to generate test cases should have
test_user_name and create_apply_spec as fixture parameters.
:return: list of test cases of the format
(test_user, (...apply_spec_params), expected_phase)
"""
if not test_users:
# test for all the users
test_users = \
[
env.SYS_ADMIN_NAME,
env.CLUSTER_ADMIN_NAME,
env.CLUSTER_AUTHOR_NAME
]
test_cases = []
for user in test_users:
for template in env.TEMPLATE_DEFINITIONS:
test_cases.extend(
[
# Invalid Sizing policy
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case1", # noqa: E501
worker_count=0,
nfs_count=0,
rollback=True,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=None,
memory=None,
sizing_class="Invalid_value",
storage_profile=None,
expected_phase="CREATE:FAILED",
retain_cluster=False,
exit_code=0,
should_rde_exist=False,
should_vapp_exist=False,
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=True
),
# Invalid Storage profile
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=0,
nfs_count=0,
rollback=True,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=None,
memory=None,
sizing_class=None,
storage_profile="Invalid_value",
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case2", # noqa: E501
expected_phase="CREATE:FAILED",
retain_cluster=False,
exit_code=0,
should_rde_exist=False,
should_vapp_exist=False,
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=True
),
# Invalid Network
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=0,
nfs_count=0,
rollback=True,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network="Invalid_value",
cpu=None,
memory=None,
sizing_class=None,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case3", # noqa: E501
expected_phase="CREATE:FAILED",
retain_cluster=False,
exit_code=0,
should_rde_exist=False,
should_vapp_exist=False,
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=True
),
# Invalid network without rollback
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=0,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network="Invalid_value",
cpu=None,
memory=None,
sizing_class=None,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case4", # noqa: E501
expected_phase="CREATE:FAILED",
retain_cluster=False,
exit_code=0,
should_rde_exist=True,
should_vapp_exist=False, # creation of vapp will fail
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=True
),
# cpu/memory and sizing class provided
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=0,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=DEFAULT_CPU_COUNT,
memory=DEFAULT_MEMORY_MB,
sizing_class=env.SIZING_CLASS_NAME,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case5", # noqa: E501
expected_phase="CREATE:FAILED",
retain_cluster=False,
exit_code=2,
should_rde_exist=False,
should_vapp_exist=False,
required_rde_version=['2.0.0'],
expect_failure=True
),
# cluster created with cpu/memory and no sizing class
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=0,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=DEFAULT_CPU_COUNT,
memory=DEFAULT_MEMORY_MB,
sizing_class=None,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case6", # noqa: E501
expected_phase="CREATE:SUCCEEDED",
retain_cluster=True,
exit_code=0,
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['2.0.0'],
expect_failure=False
),
# Resize a cluster created using cpu/memory with sizing
# class
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=1,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=None,
memory=None,
sizing_class=env.SIZING_CLASS_NAME,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case6", # noqa: E501
expected_phase="CREATE:SUCCEEDED", # validation failure # noqa: E501
retain_cluster=True,
exit_code=2, # should be 2?
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['2.0.0'],
expect_failure=True
),
# Resize a cluster created using cpu/memory using
# cpu/memory
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=1,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=DEFAULT_CPU_COUNT,
memory=DEFAULT_MEMORY_MB,
sizing_class=None,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-case6", # noqa: E501
expected_phase="UPDATE:SUCCEEDED",
retain_cluster=False,
exit_code=0,
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['2.0.0'],
expect_failure=False
),
# Create cluster using sizing policy
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
rollback=False,
worker_count=0,
nfs_count=0,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=None,
memory=None,
sizing_class=env.SIZING_CLASS_NAME,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}",
expected_phase="CREATE:SUCCEEDED",
retain_cluster=True,
exit_code=0,
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=False
),
# Resize cluster created with sizing class using cpu/mem
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=1,
nfs_count=1,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=DEFAULT_CPU_COUNT,
memory=DEFAULT_MEMORY_MB,
sizing_class=None,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}",
expected_phase='CREATE:SUCCEEDED', # validation fail
retain_cluster=True,
exit_code=2,
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['2.0.0'],
expect_failure=True
),
# Resize up a valid deployment
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=1,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=None,
memory=None,
sizing_class=env.SIZING_CLASS_NAME,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}",
expected_phase='UPDATE:SUCCEEDED',
retain_cluster=True,
exit_code=0,
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=False
),
# Resize down a valid deployment
CLUSTER_APPLY_TEST_PARAM(
user=user,
password=<PASSWORD>,
worker_count=0,
nfs_count=0,
rollback=False,
template_name=template['name'],
template_revision=template['revision'],
ovdc_network=None,
cpu=None,
memory=None,
sizing_class=env.SIZING_CLASS_NAME,
storage_profile=None,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}",
expected_phase='UPDATE:SUCCEEDED',
retain_cluster=True,
exit_code=0,
should_rde_exist=True,
should_vapp_exist=True,
required_rde_version=['1.0.0', '2.0.0'],
expect_failure=False
)
]
)
return test_cases
@pytest.fixture
def cluster_apply_param(request):
param: CLUSTER_APPLY_TEST_PARAM = request.param
# login as the user
login_cmd = env.USERNAME_TO_LOGIN_CMD[param.user]
env.CLI_RUNNER.invoke(vcd, login_cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Logged in as {param.user}")
PYTEST_LOGGER.debug(f"Parameters used: {param}")
# create apply specification
spec_params = {
'worker_count': param.worker_count,
'nfs_count': param.nfs_count,
'rollback': param.rollback,
'template_name': param.template_name,
'template_revision': param.template_revision,
'network': param.ovdc_network,
'sizing_class': param.sizing_class,
'cpu': param.cpu,
'memory': param.memory,
'storage_profile': param.storage_profile,
'cluster_name': param.cluster_name
}
create_apply_spec(spec_params)
# enable ovdc for cluster creation
cmd = f"cse ovdc enable --native --org {env.TEST_ORG} {env.TEST_VDC}"
env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=True)
yield param
if not param.retain_cluster:
env.delete_rde(param.cluster_name)
env.delete_vapp(param.cluster_name, vdc_href=env.TEST_VDC_HREF)
PYTEST_LOGGER.debug(f"Deleting cluster after test {param.cluster_name}") # noqa: E501
# logout
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {param.user}")
@pytest.mark.parametrize('cluster_apply_param', _generate_cluster_apply_tests(), indirect=['cluster_apply_param']) # noqa: E501
def test_0040_vcd_cse_cluster_apply(cluster_apply_param: CLUSTER_APPLY_TEST_PARAM): # noqa: E501
"""Test 'vcd cse cluster create ...' command for various cse users.
Test cluster creation from different persona's- sys_admin, org_admin
and k8_author. Created clusters will remain in the system for further
command tests - list, resize and delete.
"""
print(f"Running cluster create operation for {cluster_apply_param.user}")
rde_version = get_runtime_rde_version_by_vcd_api_version(
env.VCD_API_VERSION_TO_USE)
if rde_version not in cluster_apply_param.required_rde_version:
# Do not execute the test if not relevant to the RDE version used
print(f"Skipping the test as it is not relevant to CSE configured with RDE version {rde_version}") # noqa: E501
return
exit_code = cluster_apply_param.exit_code
expect_failure = cluster_apply_param.expect_failure
cmd_list = [
testutils.CMD_BINDER(cmd=f"cse cluster apply {env.APPLY_SPEC_PATH} ",
exit_code=exit_code,
validate_output_func=_follow_apply_output(expect_failure=expect_failure), # noqa: E501
test_user=cluster_apply_param.user)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
created_cluster_name = cluster_apply_param.cluster_name
if cluster_apply_param.should_rde_exist:
assert env.rde_exists(created_cluster_name), \
f"Expected RDE to be present for cluster {created_cluster_name}"
assert \
_get_cluster_phase(created_cluster_name, cluster_apply_param.user) == cluster_apply_param.expected_phase, \
f"Expected RDE phase to be {cluster_apply_param.expected_phase}" # noqa: E501
else:
assert not env.rde_exists(created_cluster_name), \
f"Expected RDE to not exist for cluster {created_cluster_name}"
if cluster_apply_param.should_vapp_exist:
assert env.vapp_exists(created_cluster_name, vdc_href=env.TEST_VDC_HREF), \
f"Expected VApp to be present for cluster {created_cluster_name}" # noqa: E501
else:
assert not env.vapp_exists(created_cluster_name, vdc_href=env.TEST_VDC_HREF), \
f"Expected VApp to not be present for cluster {created_cluster_name}" # noqa: E501
if "UPDATE" in cluster_apply_param.expected_phase:
if "SUCCEEDED" in cluster_apply_param.expected_phase:
cmd_list = [
testutils.CMD_BINDER(cmd=f"cse cluster info {created_cluster_name}", # noqa
exit_code=0,
validate_output_func=testutils.generate_validate_node_count_func( # noqa: E501
cluster_name=created_cluster_name,
expected_nodes=cluster_apply_param.worker_count, # noqa: E501
rde_version=get_runtime_rde_version_by_vcd_api_version(env.VCD_API_VERSION_TO_USE), # noqa: E501
logger=PYTEST_LOGGER), # noqa: E501
test_user=cluster_apply_param.user)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
# logout user
env.CLI_RUNNER.invoke(vcd, env.USER_LOGOUT_CMD, catch_exceptions=False)
@pytest.mark.parametrize('test_runner_username', [env.SYS_ADMIN_NAME,
env.CLUSTER_ADMIN_NAME,
env.CLUSTER_AUTHOR_NAME
])
def test_0060_vcd_cse_cluster_list(test_runner_username):
cmd_list = [
testutils.CMD_BINDER(cmd=env.USERNAME_TO_LOGIN_CMD[test_runner_username], # noqa: E501
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(cmd="cse cluster list",
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=env.USER_LOGOUT_CMD,
exit_code=0,
validate_output_func=None,
test_user=test_runner_username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
@pytest.mark.parametrize('test_runner_username', [env.SYS_ADMIN_NAME,
env.CLUSTER_AUTHOR_NAME,
env.CLUSTER_ADMIN_NAME
])
def test_0070_vcd_cse_cluster_info(test_runner_username):
cmd_list = [
testutils.CMD_BINDER(cmd=env.USERNAME_TO_LOGIN_CMD[test_runner_username], # noqa: E501
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=f"cse cluster info {env.USERNAME_TO_CLUSTER_NAME[test_runner_username]}", # noqa: E501
exit_code=0,
validate_output_func=testutils.validate_yaml_output(), # noqa: E501
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=env.USER_LOGOUT_CMD,
exit_code=0,
validate_output_func=None,
test_user=test_runner_username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
@pytest.mark.parametrize('test_runner_username', [env.SYS_ADMIN_NAME,
env.CLUSTER_AUTHOR_NAME,
env.CLUSTER_ADMIN_NAME
])
def test_0080_vcd_cse_cluster_config(test_runner_username):
# Failing for the first call with err:
# Error: Expecting value: line 1 column 1 (char 0)
cmd_list = [
testutils.CMD_BINDER(cmd=env.USERNAME_TO_LOGIN_CMD[test_runner_username], # noqa: E501
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=f"cse cluster config {env.USERNAME_TO_CLUSTER_NAME[test_runner_username]}", # noqa: E501
exit_code=0,
validate_output_func=testutils.validate_yaml_output(), # noqa: E501
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=env.USER_LOGOUT_CMD,
exit_code=0,
validate_output_func=None,
test_user=test_runner_username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
def get_nfs_node_for_2_0_0_cluster(cluster_dict):
return cluster_dict['status']['nodes']['nfs'][0]['name']
def get_nfs_node_for_1_0_0_cluster(cluster_dict):
return cluster_dict['status']['nodes']['nfs'][0]['name']
def get_nfs_node(cluster_info):
cluster_dict = yaml.safe_load(cluster_info)
if 'apiVersion' in cluster_dict:
return get_nfs_node_for_2_0_0_cluster(cluster_dict)
return get_nfs_node_for_1_0_0_cluster(cluster_dict)
def validate_if_node_not_present(node_name):
def validator(output, test_runner_username):
return node_name not in output
return validator
@pytest.fixture
def cluster_delete_nfs_param(request):
username: str = request.param
# login as the user
login_cmd = env.USERNAME_TO_LOGIN_CMD[username]
env.CLI_RUNNER.invoke(vcd, login_cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Logged in as {username}")
# create apply specification
spec_params = {
'worker_count': 0,
'nfs_count': 1,
'rollback': False,
'template_name': env.TEMPLATE_DEFINITIONS[0]['name'],
'template_revision': env.TEMPLATE_DEFINITIONS[0]['revision'],
'network': None,
'sizing_class': env.SIZING_CLASS_NAME,
'cpu': None,
'memory': None,
'storage_profile': None,
'cluster_name': env.USERNAME_TO_CLUSTER_NAME[username]
}
create_apply_spec(spec_params)
# create nfs node
exit_code = 0
expect_failure = False
cmd = f"cse cluster apply {env.APPLY_SPEC_PATH}"
cmd_list = [
testutils.CMD_BINDER(cmd=cmd,
exit_code=exit_code,
validate_output_func=_follow_apply_output(expect_failure=expect_failure), # noqa: E501
test_user=username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
yield username
@pytest.mark.skip(reason='Test disabled')
@pytest.mark.parametrize('cluster_delete_nfs_param',
[env.SYS_ADMIN_NAME,
env.CLUSTER_AUTHOR_NAME,
env.CLUSTER_ADMIN_NAME],
indirect=['cluster_delete_nfs_param'])
def test_0050_vcd_cse_delete_nfs(cluster_delete_nfs_param):
"""Test delete nfs node command."""
test_runner_username = cluster_delete_nfs_param
cluster_name = env.USERNAME_TO_CLUSTER_NAME[test_runner_username]
cmd_list = [
testutils.CMD_BINDER(cmd=env.USERNAME_TO_LOGIN_CMD[test_runner_username], # noqa: E501
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=f"org use {env.TEST_ORG}",
exit_code=0,
validate_output_func=None,
test_user=test_runner_username),
testutils.CMD_BINDER(cmd=f"vdc use {env.TEST_VDC}",
exit_code=0,
validate_output_func=None,
test_user=test_runner_username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
time.sleep(30) # Timeout to wait for RDE update to complete
cmd_list = [
testutils.CMD_BINDER(cmd=f"cse cluster info {env.USERNAME_TO_CLUSTER_NAME[test_runner_username]}", # noqa: E501
exit_code=0,
validate_output_func=None,
test_user=test_runner_username)
]
cmd_results = testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
nfs_node = get_nfs_node(cmd_results[0].output)
cmd_list = [
testutils.CMD_BINDER(cmd=f"cse cluster delete-nfs {cluster_name} {nfs_node}", # noqa: E501
exit_code=0,
validate_output_func=_follow_delete_output(expect_failure=False), # noqa: E501
test_user=test_runner_username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
number_of_attempts = 10
# wait until the status changes to UPDATE:SUCCEEDED for 5 minutes
# (10 attempts with 30s wait each)
status = ""
while status != "UPDATE:SUCCEEDED" and number_of_attempts > 0:
status = _get_cluster_phase(env.USERNAME_TO_CLUSTER_NAME[test_runner_username], test_runner_username) # noqa: E501
number_of_attempts -= 1
time.sleep(30)
assert status == "UPDATE:SUCCEEDED", "Cluster didn't reach UPDATE:SUCCEEDED phase" # noqa: E501
cmd_list = [
testutils.CMD_BINDER(cmd=f"cse cluster info {cluster_name}",
exit_code=0,
validate_output_func=validate_if_node_not_present(nfs_node), # noqa: E501
test_user=test_runner_username)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
@pytest.mark.parametrize("cluster_delete_param",
[
CLUSTER_DELETE_TEST_PARAM(
user=env.CLUSTER_ADMIN_NAME,
password=<PASSWORD>,
cluster_name=f"{env.SYS_ADMIN_TEST_CLUSTER_NAME}", # noqa: E501
org=env.TEST_ORG,
ovdc=env.TEST_VDC,
expect_failure=True),
CLUSTER_DELETE_TEST_PARAM(
user=env.CLUSTER_AUTHOR_NAME,
password=<PASSWORD>,
cluster_name=f"{env.CLUSTER_ADMIN_TEST_CLUSTER_NAME}", # noqa: E501
org=env.TEST_ORG,
ovdc=env.TEST_VDC,
expect_failure=True),
CLUSTER_DELETE_TEST_PARAM(
user=env.SYS_ADMIN_NAME,
password=<PASSWORD>,
cluster_name=f"{env.SYS_ADMIN_TEST_CLUSTER_NAME}", # noqa: E501
org=env.TEST_ORG,
ovdc=env.TEST_VDC,
expect_failure=False),
CLUSTER_DELETE_TEST_PARAM(
user=env.CLUSTER_ADMIN_NAME,
password=<PASSWORD>,
cluster_name=f"{env.CLUSTER_ADMIN_TEST_CLUSTER_NAME}", # noqa: E501
org=env.TEST_ORG,
ovdc=env.TEST_VDC,
expect_failure=False),
# TODO change back to cluster admin deleting
# cluster author's cluster
CLUSTER_DELETE_TEST_PARAM(
user=env.SYS_ADMIN_NAME,
password=<PASSWORD>,
cluster_name=f"{env.CLUSTER_AUTHOR_TEST_CLUSTER_NAME}", # noqa: E501
org=env.TEST_ORG,
ovdc=env.TEST_VDC,
expect_failure=False),
])
def test_0090_vcd_cse_cluster_delete(cluster_delete_param: CLUSTER_DELETE_TEST_PARAM): # noqa: E501
"""Test 'vcd cse cluster delete ...' command for various cse users.
Cluster delete operation on the above create clusters operations-
cluster Author can only delete self created clusters.
cluster admin can delete all cluster in the organization.
"""
cmd_list = [
testutils.CMD_BINDER(cmd=env.USERNAME_TO_LOGIN_CMD[cluster_delete_param.user], # noqa: E501
exit_code=0,
validate_output_func=None,
test_user=cluster_delete_param.user),
testutils.CMD_BINDER(cmd=f"org use {cluster_delete_param.org}",
exit_code=0,
validate_output_func=None,
test_user=env.CLUSTER_ADMIN_NAME),
testutils.CMD_BINDER(cmd=f"cse cluster delete {cluster_delete_param.cluster_name}", # noqa: E501
exit_code=2 if cluster_delete_param.expect_failure else 0, # noqa: E501
validate_output_func=_follow_delete_output(expect_failure=cluster_delete_param.expect_failure), # noqa: E501
test_user=cluster_delete_param.user),
testutils.CMD_BINDER(cmd=env.USER_LOGOUT_CMD,
exit_code=0,
validate_output_func=None,
test_user=env.CLUSTER_AUTHOR_NAME),
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
if not cluster_delete_param.expect_failure:
assert not env.vapp_exists(
cluster_delete_param.cluster_name,
vdc_href=env.TEST_VDC_HREF,
logger=PYTEST_LOGGER), \
f"Cluster {cluster_delete_param.cluster_name} exists when it should not" # noqa: E501
def generate_cluster_upgrade_tests(test_users=None):
"""Generate test cases for upgrade test.
Test format:
user, template_upgrade_path, should_expect_failure
"""
if not test_users:
# test for all the users
test_users = \
[
env.CLUSTER_ADMIN_NAME
]
test_cases = []
for user in test_users:
for upgrade_path in env.TEMPLATE_UPGRADE_PATH_LIST:
test_cases.extend([
CLUSTER_UPGRADE_TEST_PARAM(
user=user,
password=<PASSWORD>,
cluster_name=f"{env.USERNAME_TO_CLUSTER_NAME[user]}-upg",
worker_count=1,
nfs_count=0,
rollback=True,
sizing_class=None,
storage_profile=None,
ovdc_network=env.TEST_NETWORK,
expect_failure=False,
upgrade_path=upgrade_path
)
])
return test_cases
@pytest.fixture
def cluster_upgrade_param(request):
param: CLUSTER_UPGRADE_TEST_PARAM = request.param
# cleanup clusters
env.delete_vapp(param.cluster_name, vdc_href=env.TEST_VDC_HREF)
env.delete_rde(param.cluster_name)
# login as the user
login_cmd = env.USERNAME_TO_LOGIN_CMD[param.user]
env.CLI_RUNNER.invoke(vcd, login_cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Logged in as {param.user}")
# create initial cluster
initial_cluster_template_name = param.upgrade_path[0]['name']
initial_cluster_template_revision = param.upgrade_path[0]['revision']
spec_params = {
'worker_count': param.worker_count,
'nfs_count': param.nfs_count,
'rollback': param.rollback,
'template_name': initial_cluster_template_name,
'template_revision': initial_cluster_template_revision,
'network': param.ovdc_network,
'sizing_class': param.sizing_class,
'storage_profile': param.storage_profile,
'cluster_name': param.cluster_name
}
create_apply_spec(spec_params)
# enable ovdc for cluster creation
cmd = f"cse ovdc enable --native --org {env.TEST_ORG} {env.TEST_VDC}"
env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
# create initial cluster
cmd_list = [
testutils.CMD_BINDER(
cmd=f"cse cluster apply {env.APPLY_SPEC_PATH} ",
exit_code=0,
validate_output_func=_follow_apply_output(expect_failure=False), # noqa: E501
test_user=param.user)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
assert _get_cluster_phase(param.cluster_name, param.user) == 'CREATE:SUCCEEDED', \
"Expected RDE phase to be 'CREATE:SUCCEEDED'" # noqa: E501
PYTEST_LOGGER.debug(f"Created cluster {param.cluster_name}")
yield param
env.delete_rde(param.cluster_name)
env.delete_vapp(param.cluster_name, vdc_href=env.TEST_VDC_HREF)
env.CLI_RUNNER.invoke(vcd, ['logout'])
PYTEST_LOGGER.debug(f"Logged out as {param.user}")
@pytest.mark.skipif(not env.TEST_CLUSTER_UPGRADES,
reason="Configuration specifies 'test_cluster_upgrades' as false") # noqa: E501
@pytest.mark.parametrize('cluster_upgrade_param', generate_cluster_upgrade_tests(test_users=[env.SYS_ADMIN_NAME]), indirect=["cluster_upgrade_param"]) # noqa: E501
def test_0100_cluster_upgrade(cluster_upgrade_param: CLUSTER_UPGRADE_TEST_PARAM): # noqa: E501
upgrade_path = cluster_upgrade_param.upgrade_path
# The initial cluster will be created in the fixture
# 'cluster_upgrade_param'
# The created cluster will be using the first template in
# `cluster_upgrade_param.upgrade_path`.
# The test should proceed by upgrading the initial cluster step by step
# to rest of the templates in the upgrade path
spec = {
'worker_count': cluster_upgrade_param.worker_count,
'nfs_count': cluster_upgrade_param.nfs_count,
'rollback': cluster_upgrade_param.rollback,
'network': cluster_upgrade_param.ovdc_network,
'sizing_class': cluster_upgrade_param.sizing_class,
'storage_profile': cluster_upgrade_param.storage_profile,
'cluster_name': cluster_upgrade_param.cluster_name
}
for template in upgrade_path[1:]:
# create spec
PYTEST_LOGGER.debug(f"Upgrading cluster to {template['name']} {template['revision']}") # noqa: E501
spec['template_name'] = template['name']
spec['template_revision'] = template['revision']
# upgrade the cluster
if VCDApiVersion(env.VCD_API_VERSION_TO_USE) < \
VcdApiVersionObj.VERSION_36.value:
cmd_list = [
testutils.CMD_BINDER(
cmd=f"cse cluster upgrade {spec['cluster_name']} {spec['template_name']} {spec['template_revision']}", # noqa: E501
exit_code=1 if not cluster_apply_param.expect_failure else 0, # noqa: E501
validate_output_func=None,
test_user=cluster_upgrade_param.user)]
else:
create_apply_spec(spec)
cmd_list = [
testutils.CMD_BINDER(
cmd=f"cse cluster apply {env.APPLY_SPEC_PATH} ",
exit_code=0,
validate_output_func=_follow_apply_output(expect_failure=cluster_upgrade_param.expect_failure), # noqa: E501
test_user=cluster_upgrade_param.user)
]
testutils.execute_commands(cmd_list, logger=PYTEST_LOGGER)
assert _get_cluster_phase(spec['cluster_name'], cluster_upgrade_param.user) == 'UPGRADE:SUCCEEDED', \
"Expected RDE phase to be 'UPGRADE:SUCCEEDED'" # noqa: E501
@pytest.mark.parametrize("ovdc_disable_test_case",
[OVDC_DISABLE_TEST_PARAM(user=env.SYS_ADMIN_NAME, password="", org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, enable_before_test=True, expect_failure=False), # noqa: E501
OVDC_DISABLE_TEST_PARAM(user=env.CLUSTER_ADMIN_NAME, password=env.CLUSTER_ADMIN_PASSWORD, org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, enable_before_test=True, expect_failure=True), # noqa: E501
OVDC_DISABLE_TEST_PARAM(user=env.CLUSTER_AUTHOR_NAME, password=env.CLUSTER_AUTHOR_PASSWORD, org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, enable_before_test=True, expect_failure=True), # noqa: E501
OVDC_DISABLE_TEST_PARAM(user=env.SYS_ADMIN_NAME, password="", org_name=env.TEST_ORG, ovdc_name=env.TEST_VDC, enable_before_test=False, expect_failure=True)], # noqa: E501
indirect=['ovdc_disable_test_case'])
def test_0100_vcd_ovdc_disable(ovdc_disable_test_case: OVDC_DISABLE_TEST_PARAM): # noqa: E501
"""Test ovdc disable operation.
commands:
$ vcd cse ovdc disable -n -o TEST_ORG TEST_VDC
"""
cmd = f"cse ovdc disable {env.TEST_VDC} -n -o {env.TEST_ORG}"
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
assert result.exit_code == 0 or ovdc_disable_test_case.expect_failure, \
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
def test_9999_vcd_cse_system_stop(vcd_sys_admin):
"""Test `vcd cse system stop -y`.
This test shuts down CSE service, so '9999' ensures it runs last.
"""
# must disable CSE before attempting to stop
cmd = 'cse system disable'
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
cmd = 'cse system stop'
result = env.CLI_RUNNER.invoke(vcd, cmd.split(), input='y',
catch_exceptions=False)
PYTEST_LOGGER.debug(f"Executing command: {cmd}")
PYTEST_LOGGER.debug(f"Exit code: {result.exit_code}")
PYTEST_LOGGER.debug(f"Output: {result.output}")
assert result.exit_code == 0,\
testutils.format_command_info('vcd', cmd, result.exit_code,
result.output)
|
import numpy as np
import glob
import time
import cv2
import os
import signal
import argparse
import matplotlib as mpl
# For plotting without a screen
mpl.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Pool
from vehicle_detector import VehicleDetector
from multiprocessing import Pool
from utils import draw_bboxes, draw_windows
from tqdm import tqdm
def process_img(vd, img_file, out_dir = 'output_images', process_pool = None):
img = cv2.imread(img_file)
t1 = time.time()
bounding_boxes, heatmap, windows = vd.detect_vehicles(img, process_pool=process_pool)
t2 = time.time()
plt.figure(figsize = (20, 15))
rows = np.ceil((len(windows) + 2) / 2)
all_bboxes = []
i = 1
for scale, cells_per_step, bboxes in windows:
i += 1
plt.subplot(rows, 2, i)
w_tot = len(bboxes)
w_pos = len(list(filter(lambda bbox:bbox[1] >= vd.min_confidence, bboxes)))
w_rej = len(list(filter(lambda bbox:bbox[1] > 0 and bbox[1] < vd.min_confidence, bboxes)))
box_text = 'Scale: {}, Cells per Step: {}, Windows (Total/Positive/Rejected): {}/{}/{}'.format(scale, cells_per_step, w_tot, w_pos, w_rej)
plt.title(box_text)
box_img = draw_windows(np.copy(img), bboxes, min_confidence = vd.min_confidence, lines_thick = (2, 3, 2))
plt.imshow(cv2.cvtColor(box_img, cv2.COLOR_BGR2RGB))
all_bboxes.extend(bboxes)
plt.subplot(rows, 2, 1)
plt.title('Original Image')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
box_img = draw_windows(np.copy(img), all_bboxes, min_confidence = vd.min_confidence, lines_thick = (2, 3, 2))
plt.subplot(rows, 2, i + 1)
w_tot = len(all_bboxes)
w_pos = len(list(filter(lambda bbox:bbox[1] >= vd.min_confidence, all_bboxes)))
w_rej = len(list(filter(lambda bbox:bbox[1] > 0 and bbox[1] < vd.min_confidence, all_bboxes)))
box_text = 'Combined - Min Confidence: {}, Windows (Total/Positive/Rejected): {}/{}/{}'.format(vd.min_confidence, w_tot, w_pos, w_rej)
plt.title(box_text)
plt.imshow(cv2.cvtColor(box_img, cv2.COLOR_BGR2RGB))
plt.tight_layout()
img_prefix = os.path.split(img_file)[-1].split('.')[0]
plt.savefig(os.path.join(out_dir, img_prefix + '_window_search.jpg'))
plt.figure(figsize = (20, 10))
plt.subplot(221)
plt.title('Original Image')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.subplot(222)
plt.title(box_text)
plt.imshow(cv2.cvtColor(box_img, cv2.COLOR_BGR2RGB))
plt.subplot(223)
plt.title('Heatmap')
heatmap_o = vd._heatmap(img, windows, 0)
plt.imshow(heatmap_o, cmap = 'hot')
plt.subplot(224)
heatmap_text = 'Heatmap - Min confidence: {}, Threshold: {}'.format(vd.min_confidence, vd.heat_threshold)
plt.title(heatmap_text)
plt.imshow(heatmap, cmap = 'hot')
plt.tight_layout()
plt.savefig(os.path.join(out_dir, img_prefix + '_heatmap.jpg'))
plt.figure(figsize = (20, 10))
plt.subplot(221)
plt.title('Original Image')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.subplot(222)
plt.title(box_text)
plt.imshow(cv2.cvtColor(box_img, cv2.COLOR_BGR2RGB))
plt.subplot(223)
plt.title(heatmap_text)
plt.imshow(heatmap, cmap = 'hot')
labeled_img = draw_bboxes(np.copy(img), bounding_boxes, (250, 150, 55), 2, fill = True)
plt.subplot(224)
plt.title('Labeled Image')
plt.imshow(cv2.cvtColor(labeled_img, cv2.COLOR_BGR2RGB))
plt.tight_layout()
plt.savefig(os.path.join(out_dir, img_prefix + '_labeled.jpg'))
return t2 - t1
def worker_init():
"""Ignore CTRL+C in the worker process."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image Processing')
parser.add_argument(
'--dir',
type=str,
default='test_images',
help='Images folder'
)
parser.add_argument(
'--out_dir',
type=str,
default='output_images',
help='Destination folder'
)
parser.add_argument(
'--model_file',
type=str,
default=os.path.join('models', 'model.p'),
help='Images folder'
)
parser.add_argument(
'--min_confidence',
type=float,
default=0.3,
help='Min prediction confidence for bounding boxes'
)
parser.add_argument(
'--threshold',
type=float,
default=5,
help='Heatmap threshold'
)
parser.add_argument('--disable-parallel', dest='parallel', action='store_false', help='Disable parallel processing (may decrease feature extraction speed)')
parser.set_defaults(parallel=True)
args = parser.parse_args()
formats = ['jpg', 'png']
imgs = []
for ext in formats:
imgs.extend(glob.glob(os.path.join(args.dir, '*.' + ext)))
vd = VehicleDetector(model_file = args.model_file,
min_confidence = args.min_confidence,
heat_threshold = args.threshold,
smooth_frames = 0)
pool_size = os.cpu_count()
if args.parallel is False or pool_size < 2:
process_pool = None
else:
process_pool = Pool(pool_size, initializer = worker_init)
print('Using {} cores'.format(1 if process_pool is None else pool_size))
try:
t = 0
for img_file in tqdm(imgs, unit = ' images', desc = 'Image Processing'):
img_t = process_img(vd, img_file, out_dir = args.out_dir, process_pool = process_pool)
t += img_t
print('Total/Average time: {:.3f}/{:.3f} s'.format(t, t/len(imgs)))
except Exception as e:
if process_pool is not None:
process_pool.terminate()
raise e
|
<gh_stars>0
from models import Organisation
from datetime import datetime
import csv
filepath = r'E:\db44.csv'
filepath_write = r'E:\db5_new.csv'
arr1 = []
arr2 = []
max_coloumn = 22
dictionary = {}
class Record:
def __init__(self, arr):
self.reg_number = arr[0]
self.reg_date = arr[1]
self.org_net = arr[2]
self.org_name = arr[3]
self.org_city = arr[4]
self.org_state = arr[5]
self.org_inn = arr[6]
self.org_street = arr[7]
self.org_street_n = arr[8]
self.org_phone = arr[9]
self.org_contact = arr[10]
self.org_vpn = arr[11]
self.keys_number = arr[12]
self.keys_date = arr[13]
self.keys_device_name = arr[14]
self.keys_device_id = arr[15]
self.distr_name = arr[16]
self.dist_number = arr[17]
self.distr_date = arr[18]
self.distr_ammount = arr[19]
self.advance_info = arr[20]
def __str__(self):
return (f'{self.reg_number};'
f'{self.reg_date};'
f'{self.org_net};'
f'{self.org_name};'
f'{self.org_state};'
f'{self.org_city};'
f'{self.org_street};'
f'{self.org_street_n};'
f'{self.org_inn};'
f'{self.org_contact};'
f'{self.org_phone};'
f'{self.org_vpn};'
f'{self.keys_number};'
f'{self.keys_date};'
f'{self.keys_device_name};'
f'{self.keys_device_id};'
f'{self.distr_name};'
f'{self.dist_number};'
f'{self.distr_date};'
f'{self.distr_ammount};'
f'{self.advance_info}')
class Application:
def __init__(self):
self.records = []
self.filepath = r'E:\db.csv'
filepath_write = r'E:\db_new.csv'
def add_record(self, rec):
if rec.reg_number + rec.org_inn + rec.org_vpn == '':
return
try:
last_rec = self.records[-1]
except:
last_rec = rec
if rec.reg_number == '':
rec.reg_number = last_rec.reg_number.strip()
rec.reg_date = last_rec.reg_date .strip()
if rec.org_net == '':
rec.org_net = last_rec.org_net.strip()
if rec.org_inn == '':
rec.org_name = last_rec.org_name.strip()
rec.org_state = last_rec.org_state.strip()
rec.org_city = last_rec.org_city.strip()
rec.org_street = last_rec.org_street.strip()
rec.org_street_n = last_rec.org_street_n.strip()
rec.org_inn = last_rec.org_inn.strip()
rec.org_contact = last_rec.org_contact.strip()
rec.org_phone = last_rec.org_phone.strip()
if rec.distr_name == '':
rec.distr_name = last_rec.distr_name.strip()
rec.dist_number = last_rec.dist_number.strip()
rec.distr_date = last_rec.distr_date.strip()
rec.distr_ammount = last_rec.distr_ammount.strip()
if ((rec.keys_device_name == '')
and (last_rec.keys_device_name.lower() == 'usb')):
rec.keys_device_name = last_rec.keys_device_name.strip()
rec.keys_device_id = last_rec.keys_device_id.strip()
rec.keys_number = last_rec.keys_number.strip()
rec.keys_date = last_rec.keys_date.strip()
try:
self.records.append(rec)
except:
print(rec.org_inn)
def create_dict(self):
with open(filepath, encoding='utf-8') as file:
reader = csv.reader(file)
for row in reader:
cur_arr = row[0].split(';')
self.add_record(Record(cur_arr))
return
def write_to_csv(self):
with open(filepath_write, 'wt') as file:
writer = csv.writer(file, delimiter=';')
writer.writerows(self.records)
return
def add_organisation (app):
for org in app.records:
try:
reg_date_convert = datetime.strptime(
org.reg_date, '%d.%m.%Y'
)
except ValueError:
reg_date_convert = datetime(2000, 1, 1)
if not Organisation.objects.filter(
inn=org.org_inn,
).exists():
Organisation.objects.create(
reg_number=int(org.reg_number),
reg_date=reg_date_convert,
inn=org.org_inn,
full_name=org.org_name,
short_name=org.org_name,
town=org.org_city,
address=org.org_address,
phone=org.org_phone,
employee=org.org_contact,
comment=org.advance_info,
)
"""
def org_add(app):
for org in app.records:
if not Distributor.objects.filter(
org_name=org.distr_name
).exists():
Distributor.objects.create(
org_name=org.distr_name,
)
if not Device.objects.filter(
type=org.keys_device_name.lower()).exists():
Device.objects.create(
type=org.keys_device_name.lower(),
)
if not License.objects.filter(
n_license=org.dist_number,
).exists():
distributor = Distributor.objects.filter(
org_name=org.distr_name
)[0]
try:
lic_date_convert = datetime.strptime(
org.distr_date, '%d.%m.%Y'
)
except ValueError:
lic_date_convert = datetime(2000, 1, 1)
try:
ammount_convert = int(
org.distr_ammount.split(' ')[0]
)
except ValueError:
ammount_convert = 0
License.objects.create(
n_license=org.dist_number,
lic_date=lic_date_convert,
distrib_org=distributor,
ammount=ammount_convert
)
try:
keys_date_convert = datetime.strptime(
org.keys_date, '%d.%m.%Y'
)
except ValueError:
keys_date_convert = datetime(2000, 1, 1)
try:
key_number = int(org.keys_number)
except ValueError:
key_number = 0
#if not Event.objects.filter(
# keys_number=key_number,
# vpn_number=org_rec.org_vpn,
#).exists():
organisation = Organisation.objects.filter(
org_inn=org.org_inn
)[0]
device = Device.objects.filter(
type=org.keys_device_name.lower()
)[0]
Vpn.objects.create(
organisation=organisation,
keys_number=key_number,
keys_date=keys_date_convert,
device_name=device,
device_id=org.keys_device_id,
license=License.objects.filter(
n_license=org.dist_number
)[0],
comment=org.advance_info,
vpn_number=org.org_vpn,
)
#dbtable = create_dict()
#org_add(dbtable)
#reglament_add(dbtable)
"""
if __name__ == '__main__':
app = Application()
app.create_dict()
#add_organisation(app) |
<filename>old/gridsearchXGboostR.py<gh_stars>10-100
# The process of performing random search with cross validation is:
# 1. Set up a grid of hyperparameters to evaluate
# 2. Randomly sample a combination of hyperparameters
# 3. Create a model with the selected combination
# 4. Evaluate the model using cross validation
# 5. Decide which hyperparameters worked the best
#https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
#https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
#https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
#https://xgboost.readthedocs.io/en/latest/parameter.html
#--------------------------------------------------------------------------
# Name : gridsearchGradientBoosting.py
# Version : 1.0
# Author : E.Taskesen
# Contact : <EMAIL>
# Date : Dec. 2018
#--------------------------------------------------------------------------
#
'''
NOTE:
IF you see something like this: training data did not have the following fields: f73, f40, f66, f147, f62, f39, f2, f83, f127, f84, f54, f97, f114, f102, f49, f7, f8, f56, f23, f107, f138, f28, f71, f152, f80, f57, f46, f58, f139, f121, f140, f20, f45, f113, f5, f60, f135, f101, f68, f76, f65, f41, f99, f131, f109, f117, f13, f100, f128, f52, f15, f50, f95, f124, f19, f12, f43, f137, f33, f22, f32, f72, f142, f151, f74, f90, f48, f122, f133, f26, f79, f94, f18, f10, f51, f0, f53, f92, f29, f115, f143, f14, f116, f47, f69, f82, f34, f89, f35, f6, f132, f16, f118, f31, f96, f59, f75, f1, f110, f61, f108, f25, f21, f11, f17, f85, f150, f3, f98, f24, f77, f103, f112, f91, f144, f70, f86, f119, f55, f130, f106, f44, f36, f64, f67, f4, f145, f37, f126, f88, f93, f104, f81, f149, f27, f136, f146, f30, f38, f42, f141, f134, f120, f105, f129, f9, f148, f87, f125, f123, f111, f78, f63
Then, it may be caused by the incompatibility of sklearn's CalibratedClassifierCV and pandas.DataFrame
Or your data has 0 in it!
Just replace the last element with a very small number, like so:
X=X.replace(0,0.0000001)
https://github.com/dmlc/xgboost/issues/2334
'''
#%% Libraries
import xgboost
#from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import RandomizedSearchCV
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
import time
from sklearn.model_selection import train_test_split
#%% Gridsearch for GradientBoostingRegressor
def gridsearchXGboostR(X, y, cv=10, n_iter=20, n_jobs=1, verbose=True):
if verbose==True: verbose=2
n_jobs=np.maximum(n_jobs,1)
# print "Checkinf for NaN and Inf"
# print "np.inf=", np.where(np.isnan(X))
# print "is.inf=", np.where(np.isinf(X))
# print "np.max=", np.max(abs(X))
# [X_train, X_test, y_train, y_test] = train_test_split(X.iloc[:-1,:].values, y.iloc[:-1].values, train_size=0.8, test_size=0.2)
min_child_weight = [0.5, 1.0, 3.0, 5.0, 7.0, 10.0]
n_estimators = [100, 250, 300, 500]
gamma = [0, 0.25, 0.5, 1.0]
subsample = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# Maximum depth of each tree
max_depth = [2, 3, 4, 5, 10, 15]
silent = [False]
learning_rate = [0.001, 0.01, 0.1, 0.2, 0,3]
colsample_bylevel = [0.4, 0.6, 0.8, 1.0]
colsample_bytree = [0.4, 0.6, 0.8, 1.0]
reg_lambda = [0.1, 1.0, 5.0, 10.0, 50.0, 100.0]
num_round=[10,50,100]
# Control the balance of positive and negative weights, useful for unbalanced classes.
scale_pos_weight = [1]
hyperparameter_grid = {
# 'min_child_weight': min_child_weight,
'n_estimators': n_estimators,
'gamma': gamma,
'subsample': subsample,
'max_depth': max_depth,
'silent': silent,
'learning_rate': learning_rate,
'colsample_bylevel': colsample_bylevel,
'colsample_bytree': colsample_bytree,
'reg_lambda': reg_lambda,
'scale_pos_weight': scale_pos_weight,
# 'num_round':num_round,
}
# Create the model to use for hyperparameter tuning
model = xgboost.XGBRegressor()
# Set up the random search with 5-fold cross validation
random_cv = RandomizedSearchCV(model,
hyperparameter_grid,
cv=cv,
n_iter=n_iter,
n_jobs=n_jobs,
verbose=verbose,
scoring='neg_mean_absolute_error', #neg_mean_squared_error
return_train_score = False,
refit=True, #Refit an estimator using the best found parameters on the whole dataset.
)
# Fit on the training data
# random_cv = xgboost.XGBRegressor()
# X.dropna(inplace=True)
# y.dropna(inplace=True)
# X = X.fillna(X.mean())
# np.where(X.values >= np.finfo(np.float64).max)
# np.isnan(X.values.any())
# col_mask=X.isnull().any(axis=0).sum()
# row_mask=X.isnull().any(axis=1).sum()
# X[X==np.inf]=np.nan
# X.fillna(X.mean(), inplace=True)
# IND=X.asmatrix(columns=['ColumnA', 'ColumnB'])
# np.isnan(IND).any()
if 'pandas' in str(type(X)):
X = X.as_matrix().astype(np.float)
if 'pandas' in str(type(y)):
y = y.as_matrix().astype(np.float)
search_time_start = time.time()
random_cv.fit(X, y)
# Show some results:
if verbose:
print("Randomized search time:", time.time() - search_time_start)
report(random_cv.cv_results_)
# Find the best combination of settings
model=random_cv.best_estimator_
# random_cv.best_score_
# random_cv.best_params_
# random_cv.best_index_
# random_cv.cv_results_['params'][search.best_index_]
# random_results = pd.DataFrame(random_cv.cv_results_).sort_values('mean_test_score', ascending = False)
# bestparams=random_cv.cv_results_['params'][random_cv.best_index_]
return(model,random_cv)
#%% Report best scores
def report(results, n_top=5):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(results['mean_test_score'][candidate], results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
#%% END |
"""
Copyright 2015-2019, Institute for Systems Biology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from builtins import str
from builtins import map
from past.builtins import basestring
import collections
import json
import logging
import time
import sys
import re
import datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.models import User
from django.db.models import Count
from django.shortcuts import render, redirect
from django.urls import reverse
from django.utils import formats
from django.core.exceptions import ObjectDoesNotExist
# from django.core.mail import send_mail
from sharing.service import send_email_message
from django.contrib import messages
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google_helpers.directory_service import get_directory_resource
from google_helpers.bigquery.bq_support import BigQuerySupport
from google_helpers.stackdriver import StackDriverLogger
from cohorts.metadata_helpers import get_sample_metadata
from googleapiclient.errors import HttpError
from visualizations.models import SavedViz
from cohorts.models import Cohort, Cohort_Perms
from projects.models import Program
from workbooks.models import Workbook
from accounts.models import GoogleProject, UserOptInStatus
from accounts.sa_utils import get_nih_user_details
# from notebooks.notebook_vm import check_vm_stat
from allauth.socialaccount.models import SocialAccount
from django.http import HttpResponse, JsonResponse
from django.template.loader import get_template
from google_helpers.bigquery.service import get_bigquery_service
from google_helpers.bigquery.feedback_support import BigQueryFeedbackSupport
from solr_helpers import query_solr_and_format_result, build_solr_query, build_solr_facets
from projects.models import Attribute, DataVersion, DataSource
from solr_helpers import query_solr_and_format_result
import requests
debug = settings.DEBUG
logger = logging.getLogger('main_logger')
OPEN_ACL_GOOGLE_GROUP = settings.OPEN_ACL_GOOGLE_GROUP
BQ_ATTEMPT_MAX = 10
WEBAPP_LOGIN_LOG_NAME = settings.WEBAPP_LOGIN_LOG_NAME
BQ_ECOSYS_BUCKET = settings.BQ_ECOSYS_STATIC_URL
CITATIONS_BUCKET = settings.CITATIONS_STATIC_URL
IDP = settings.IDP
def _needs_redirect(request):
appspot_host = '^.*{}\.appspot\.com.*$'.format(settings.GCLOUD_PROJECT_ID.lower())
return re.search(appspot_host, request.META.get('HTTP_HOST', '')) and not re.search(appspot_host, settings.BASE_URL)
def convert(data):
# if debug: print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(list(map(convert, iter(list(data.items())))))
elif isinstance(data, collections.Iterable):
return type(data)(list(map(convert, data)))
else:
return data
def _decode_list(data):
# if debug: print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
rv = []
for item in data:
if isinstance(item, str):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
# if debug: print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
rv = {}
for key, value in list(data.items()):
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
@never_cache
def landing_page(request):
mitelman_url = settings.MITELMAN_URL
logger.info("[STATUS] Received landing page view request at {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return render(request, 'isb_cgc/landing.html', {'mitelman_url': mitelman_url })
# Redirect all requests for the old landing page location to isb-cgc.org
def domain_redirect(request):
try:
return redirect(settings.BASE_URL) if _needs_redirect(request) else landing_page(request)
except Exception as e:
logger.error("[ERROR] While handling domain redirect:")
logger.exception(e)
return landing_page(request)
'''
Displays the privacy policy
'''
@never_cache
def privacy_policy(request):
return render(request, 'isb_cgc/privacy.html', {'request': request, })
'''
Returns css_test page used to test css for general ui elements
'''
def css_test(request):
# if debug: print >> sys.stderr,'Called '+sys._getframe().f_code.co_name
return render(request, 'isb_cgc/css_test.html', {'request': request})
'''
Returns page that has user details
'''
@login_required
def user_detail_login(request):
user_id = request.user.id
return user_detail(request, user_id)
@login_required
def user_detail(request, user_id):
if debug: logger.debug('Called ' + sys._getframe().f_code.co_name)
if int(request.user.id) == int(user_id):
user = User.objects.get(id=user_id)
social_account = SocialAccount.objects.get(user_id=user_id, provider='google')
user_status_obj = UserOptInStatus.objects.filter(user=user).first()
if user_status_obj and user_status_obj.opt_in_status == UserOptInStatus.YES:
user_opt_in_status = "Opted-In"
elif user_status_obj and user_status_obj.opt_in_status == UserOptInStatus.NO:
user_opt_in_status = "Opted-Out"
else:
user_opt_in_status = "N/A"
user_details = {
'date_joined': user.date_joined,
'email': user.email,
'extra_data': social_account.extra_data,
'first_name': user.first_name,
'id': user.id,
'last_login': user.last_login,
'last_name': user.last_name,
'user_opt_in_status': user_opt_in_status
}
user_details['gcp_list'] = len(GoogleProject.objects.filter(user=user, active=1))
forced_logout = 'dcfForcedLogout' in request.session
nih_details = get_nih_user_details(user_id, forced_logout)
for key in list(nih_details.keys()):
user_details[key] = nih_details[key]
return render(request, 'isb_cgc/user_detail.html',
{'request': request,
'idp': IDP,
'user': user,
'user_details': user_details
})
else:
return render(request, '403.html')
@login_required
def bucket_object_list(request):
if debug: logger.debug('Called ' + sys._getframe().f_code.co_name)
credentials = GoogleCredentials.get_application_default()
service = discovery.build('storage', 'v1', credentials=credentials, cache_discovery=False)
req = service.objects().list(bucket='isb-cgc-dev')
resp = req.execute()
object_list = None
if 'items' in resp:
object_list = json.dumps(resp['items'])
return HttpResponse(object_list)
# Extended login view so we can track user logins
def extended_login_view(request):
redirect_to = 'dashboard'
if request.COOKIES and request.COOKIES.get('login_from', '') == 'new_cohort':
redirect_to = 'cohort'
try:
# Write log entry
st_logger = StackDriverLogger.build_from_django_settings()
log_name = WEBAPP_LOGIN_LOG_NAME
user = User.objects.get(id=request.user.id)
st_logger.write_text_log_entry(
log_name,
"[WEBAPP LOGIN] User {} logged in to the web application at {}".format(user.email,
datetime.datetime.utcnow())
)
# If user logs in for the second time, or user has not completed the survey, opt-in status changes to NOT_SEEN
user_opt_in_stat_obj = UserOptInStatus.objects.filter(user=user).first()
if user_opt_in_stat_obj:
if user_opt_in_stat_obj.opt_in_status == UserOptInStatus.NEW or \
user_opt_in_stat_obj.opt_in_status == UserOptInStatus.SKIP_ONCE:
user_opt_in_stat_obj.opt_in_status = UserOptInStatus.NOT_SEEN
user_opt_in_stat_obj.save()
elif user_opt_in_stat_obj.opt_in_status == UserOptInStatus.SEEN:
user_opt_in_stat_obj.opt_in_status = UserOptInStatus.SKIP_ONCE
user_opt_in_stat_obj.save()
except Exception as e:
logger.exception(e)
return redirect(reverse(redirect_to))
# Callback for recording the user's agreement to the warning popup
def warn_page(request):
request.session['seenWarning'] = True
return JsonResponse({'warning_status': 'SEEN'}, status=200)
'''
DEPRECATED - Returns Results from text search
'''
@login_required
def search_cohorts_viz(request):
if debug: logger.debug('Called ' + sys._getframe().f_code.co_name)
q = request.GET.get('q', None)
result_obj = {
'q': q
}
if q:
cohort_results = Cohort.objects.search(q)
list = []
for cohort in cohort_results:
list.append({
'id': cohort.id,
'name': cohort.name,
'last_date_saved': formats.date_format(cohort.last_date_saved, 'DATETIME_FORMAT'),
'owner': cohort.get_owner().email,
'samples': len(cohort.samples_set.all())
})
result_obj['cohorts'] = list
list = []
viz_results = SavedViz.objects.search(q)
for viz in viz_results:
list.append({
'id': viz.id,
'name': viz.name,
'last_date_saved': formats.date_format(viz.last_date_saved, 'DATETIME_FORMAT'),
'plots': len(viz.plot_set.all()),
'owner': viz.get_owner().email
})
result_obj['visualizations'] = list
return HttpResponse(json.dumps(result_obj), status=200)
# get_image_data which allows for URI arguments, falls through to get_image_data(request, slide_barcode)
def get_image_data_args(request):
file_uuid = None
if request.GET:
file_uuid = request.GET.get('file_uuid', None)
elif request.POST:
file_uuid = request.POST.get('file_uuid', None)
if file_uuid:
file_uuid = (None if re.compile(r'[^A-Za-z0-9\-]').search(file_uuid) else file_uuid)
return get_image_data(request, file_uuid)
# Given a slide_barcode, returns image metadata in JSON format
def get_image_data(request, file_uuid):
status = 200
result = {}
if not file_uuid:
status = 503
result = {
'message': "There was an error while processing this request: a valid file UUID was not supplied."
}
else:
try:
img_data_query = """
SELECT slide_barcode, level_0__width AS width, level_0__height AS height, mpp_x, mpp_y, file_gcs_url, sample_barcode, case_barcode, file_gdc_id
FROM [isb-cgc:metadata.TCGA_slide_images]
WHERE file_gdc_id = '{}';
"""
query_results = BigQuerySupport.execute_query_and_fetch_results(img_data_query.format(file_uuid))
if query_results and len(query_results) > 0:
result = {
'slide-barcode': query_results[0]['f'][0]['v'],
'Width': query_results[0]['f'][1]['v'],
'Height': query_results[0]['f'][2]['v'],
'MPP-X': query_results[0]['f'][3]['v'],
'MPP-Y': query_results[0]['f'][4]['v'],
'FileLocation': query_results[0]['f'][5]['v'],
'TissueID': query_results[0]['f'][0]['v'],
'sample-barcode': query_results[0]['f'][6]['v'],
'case-barcode': query_results[0]['f'][7]['v'],
'file-uuid': query_results[0]['f'][8]['v'],
'img-type': ('Diagnostic Image' if query_results[0]['f'][0]['v'].split("-")[-1].startswith(
"DX") else 'Tissue Slide Image' if query_results[0]['f'][0]['v'].split("-")[-1].startswith(
"TS") else "N/A")
}
sample_metadata = get_sample_metadata(result['sample-barcode'])
result['disease-code'] = sample_metadata['disease_code']
result['project'] = sample_metadata['project']
else:
result = {
'msg': 'File UUID {} was not found.'.format(file_uuid)
}
except Exception as e:
logger.error("[ERROR] While attempting to retrieve image data for {}:".format(file_uuid))
logger.exception(e)
status = '503'
result = {
'message': "There was an error while processing this request."
}
return JsonResponse(result, status=status)
def get_tbl_preview(request, proj_id, dataset_id, table_id):
status = 200
MAX_ROW = 8
if not proj_id or not dataset_id or not table_id:
status = 503
result = {
'message': "There was an error while processing this request: one or more required parameters (project id, dataset_id or table_id) were not supplied."
}
else:
try:
bq_service = get_bigquery_service()
dataset = bq_service.datasets().get(projectId=proj_id, datasetId=dataset_id).execute()
is_public = False
for access_entry in dataset['access']:
# print(access_entry)
if access_entry.get('role') == 'READER' and access_entry.get('specialGroup') == 'allAuthenticatedUsers':
is_public = True
break
if is_public:
tbl_data=bq_service.tables().get(projectId=proj_id, datasetId=dataset_id, tableId=table_id).execute()
if tbl_data.get('type') == 'VIEW' and tbl_data.get('view') and tbl_data.get('view').get('query'):
view_query_template = '''#standardSql
{query_stmt}
LIMIT {max}'''
view_query = view_query_template.format(query_stmt=tbl_data['view']['query'], max=MAX_ROW)
response = bq_service.jobs().query(
projectId=settings.BIGQUERY_PROJECT_ID,
body={ 'query': view_query }).execute()
else:
response = bq_service.tabledata().list(projectId=proj_id, datasetId=dataset_id, tableId=table_id,
maxResults=MAX_ROW).execute()
if response and int(response['totalRows']) > 0:
result = {
'rows': response['rows']
}
else:
status = 200
result = {
'msg': 'No record has been found for table { proj_id }{ dataset_id }{ table_id }.'.format(
proj_id=proj_id,
dataset_id=dataset_id,
table_id=table_id)
}
else:
status = 401
result = {
'message': "Preview is not available for this table/view."
}
except Exception as e:
if type(e) is HttpError and e.resp.status == 403:
logger.error(
"[ERROR] Access to preview table [{ proj_id }.{ dataset_id }.{ table_id }] was denied.".format(
proj_id=proj_id,
dataset_id=dataset_id,
table_id=table_id))
result = {
'message': "Your access to preview this table [{ proj_id }.{ dataset_id }.{ table_id }] was denied.".format(
proj_id=proj_id,
dataset_id=dataset_id,
table_id=table_id)
}
status = 403
else:
logger.error(
"[ERROR] While attempting to retrieve preview data for { proj_id }.{ dataset_id }.{ table_id } table.".format(
proj_id=proj_id,
dataset_id=dataset_id,
table_id=table_id))
logger.exception(e)
status = '503'
result = {
'message': "There was an error while processing this request."
}
return JsonResponse(result, status=status)
def dicom(request, study_uid=None):
template = 'isb_cgc/dicom.html'
context = {
'study_uid': study_uid,
'dicom_viewer': settings.DICOM_VIEWER
}
return render(request, template, context)
@login_required
def test_solr_data(request):
status=200
try:
start = time.time()
filters = json.loads(request.GET.get('filters', '{}'))
versions = json.loads(request.GET.get('versions', '[]'))
source_type = request.GET.get('source', DataSource.SOLR)
versions = DataVersion.objects.filter(data_type__in=versions) if len(versions) else DataVersion.objects.filter(
active=True)
programs = Program.objects.filter(active=1,is_public=1,owner=User.objects.get(is_superuser=1,is_active=1,is_staff=1))
if len(filters):
programs = programs.filter(id__in=[int(x) for x in filters.keys()])
results = {}
for prog in programs:
results[prog.id] = {}
prog_versions = prog.dataversion_set.filter(id__in=versions)
sources = prog.get_data_sources(source_type=source_type).filter(version__in=prog_versions)
prog_filters = filters.get(str(prog.id), None)
attrs = sources.get_source_attrs(for_ui=True)
for source in sources:
solr_query = build_solr_query(prog_filters, with_tags_for_ex=True) if prog_filters else None
solr_facets = build_solr_facets(attrs['sources'][source.id]['attrs'], filter_tags=solr_query['filter_tags'] if solr_query else None, unique='case_barcode')
query_set = []
if solr_query:
for attr in solr_query['queries']:
attr_name = re.sub("(_btw|_lt|_lte|_gt|_gte)", "", attr)
# If an attribute is not in this program's attribute listing, then it's ignored
if attr_name in attrs['list']:
# If the attribute is from this source, just add the query
if attr_name in attrs['sources'][source.id]['list']:
query_set.append(solr_query['queries'][attr])
# If it's in another source for this program, we need to join on that source
else:
for ds in sources:
if ds.id != source.id and attr_name in attrs['sources'][ds.id]['list']:
query_set.append(("{!join %s}" % "from={} fromIndex={} to={}".format(
ds.shared_id_col, ds.name, source.shared_id_col
)) + solr_query['queries'][attr])
else:
logger.warning("[WARNING] Attribute {} not found in program {}".format(attr_name,prog.name))
solr_result = query_solr_and_format_result({
'collection': source.name,
'facets': solr_facets,
'fqs': query_set
})
results[prog.id][source.name] = solr_result
stop = time.time()
results['elapsed_time'] = "{}s".format(str(stop-start))
except Exception as e:
logger.error("[ERROR] While trying to fetch Solr metadata:")
logger.exception(e)
results = {'msg': 'Encountered an error'}
status=500
return JsonResponse({'result': results}, status=status)
def camic(request, file_uuid=None):
if debug: logger.debug('Called ' + sys._getframe().f_code.co_name)
context = {}
if not file_uuid:
messages.error("Error while attempting to display this pathology image: a file UUID was not provided.")
return redirect(reverse('cohort_list'))
images = [{'file_uuid': file_uuid, 'thumb': '', 'type': ''}]
template = 'isb_cgc/camic_single.html'
context['files'] = images
context['camic_viewer'] = settings.CAMIC_VIEWER
context['img_thumb_url'] = settings.IMG_THUMBS_URL
return render(request, template, context)
@login_required
def igv(request):
if debug: logger.debug('Called ' + sys._getframe().f_code.co_name)
req = request.GET or request.POST
build = req.get('build','hg38')
checked_list = json.loads(req.get('checked_list','{}'))
readgroupset_list = []
bam_list = []
# This is a POST request with all the information we already need
if len(checked_list):
for item in checked_list['gcs_bam']:
bam_item = checked_list['gcs_bam'][item]
id_barcode = item.split(',')
bam_list.append({
'sample_barcode': id_barcode[1], 'gcs_path': id_barcode[0], 'build': build, 'program': bam_item['program']
})
# This is a single GET request, we need to get the full file info from Solr first
else:
sources = DataSource.objects.filter(source_type=DataSource.SOLR, version=DataVersion.objects.get(data_type=DataVersion.FILE_DATA, active=True, build=build))
gdc_ids = list(set(req.get('gdc_ids','').split(',')))
if not len(gdc_ids):
messages.error(request,"A list of GDC file UUIDs was not provided. Please indicate the files you wish to view.")
else:
if len(gdc_ids) > settings.MAX_FILES_IGV:
messages.warning(request,"The maximum number of files which can be viewed in IGV at one time is {}.".format(settings.MAX_FILES_IGV) +
" Only the first {} will be displayed.".format(settings.MAX_FILES_IGV))
gdc_ids = gdc_ids[:settings.MAX_FILES_IGV]
for source in sources:
result = query_solr_and_format_result(
{
'collection': source.name,
'fields': ['sample_barcode','file_gdc_id','file_name_key','index_file_name_key', 'program_name', 'access'],
'query_string': 'file_gdc_id:("{}") AND data_format:("BAM")'.format('" "'.join(gdc_ids)),
'counts_only': False
}
)
if 'docs' not in result or not len(result['docs']):
messages.error(request,"IGV compatible files corresponding to the following UUIDs were not found: {}.".format(" ".join(gdc_ids))
+ "Note that the default build is HG38; to view HG19 files, you must indicate the build as HG19: &build=hg19")
saw_controlled = False
for doc in result['docs']:
if doc['access'] == 'controlled':
saw_controlled = True
bam_list.append({
'sample_barcode': doc['sample_barcode'],
'gcs_path': "{};{}".format(doc['file_name_key'],doc['index_file_name_key']),
'build': build,
'program': doc['program_name']
})
if saw_controlled:
messages.info(request,"Some of the requested files require approved access to controlled data - if you receive a 403 error, double-check your current login status with DCF.")
context = {
'readgroupset_list': readgroupset_list,
'bam_list': bam_list,
'base_url': settings.BASE_URL,
'service_account': settings.OAUTH2_CLIENT_ID,
'build': build,
}
return render(request, 'isb_cgc/igv.html', context)
def path_report(request, report_file=None):
if debug: logger.debug('Called ' + sys._getframe().f_code.co_name)
context = {}
try:
if not path_report:
messages.error(
"Error while attempting to display this pathology report: a report file name was not provided.")
return redirect(reverse('cohort_list'))
uri = "https://nci-crdc.datacommons.io/user/data/download/{}?protocol=gs".format(report_file)
response = requests.get(uri)
if response.status_code != 200:
logger.warning("[WARNING] From IndexD: {}".format(response.text))
raise Exception("Received a status code of {} from IndexD.".format(str(response.status_code)))
anon_signed_uri = response.json()['url']
template = 'isb_cgc/path-pdf.html'
context['path_report_file'] = anon_signed_uri
except Exception as e:
logger.error("[ERROR] While trying to load Pathology report:")
logger.exception(e)
logger.error("Attempted URI: {}".format(uri))
return render(request, '500.html')
return render(request, template, context)
# Because the match for vm_ is always done regardless of its presense in the URL
# we must always provide an argument slot for it
#
def health_check(request, match):
return HttpResponse('')
def help_page(request):
return render(request, 'isb_cgc/help.html')
def about_page(request):
return render(request, 'isb_cgc/about.html')
def citations_page(request):
citations_file_name = 'mendeley_papers.json'
citations_file_path = CITATIONS_BUCKET + citations_file_name
citations = requests.get(citations_file_path).json()
return render(request, 'isb_cgc/citations.html', citations)
def vid_tutorials_page(request):
return render(request, 'isb_cgc/video_tutorials.html')
def how_to_discover_page(request):
return render(request, 'how_to_discover_page.html')
def contact_us(request):
return render(request, 'isb_cgc/contact_us.html')
def bq_meta_search(request, table_id=""):
bq_filter_file_name = 'bq_meta_filters.json'
bq_filter_file_path = BQ_ECOSYS_BUCKET + bq_filter_file_name
bq_filters = requests.get(bq_filter_file_path).json()
bq_filters['selected_table_full_id'] = table_id
return render(request, 'isb_cgc/bq_meta_search.html', bq_filters)
def bq_meta_data(request):
bq_meta_data_file_name = 'bq_meta_data.json'
bq_meta_data_file_path = BQ_ECOSYS_BUCKET + bq_meta_data_file_name
bq_meta_data = requests.get(bq_meta_data_file_path).json()
bq_useful_join_file_name = 'bq_useful_join.json'
bq_useful_join_file_path = BQ_ECOSYS_BUCKET + bq_useful_join_file_name
bq_useful_join = requests.get(bq_useful_join_file_path).json()
for bq_meta_data_row in bq_meta_data:
useful_joins = []
row_id = bq_meta_data_row['id']
for join in bq_useful_join:
if join['id'] == row_id:
useful_joins = join['joins']
break
bq_meta_data_row['usefulJoins'] = useful_joins
return JsonResponse(bq_meta_data, safe=False)
def programmatic_access_page(request):
return render(request, 'isb_cgc/programmatic_access.html')
def workflow_page(request):
return render(request, 'isb_cgc/workflow.html')
@login_required
def dashboard_page(request):
context = {}
display_count = 6
try:
# Cohort List
isb_superuser = User.objects.get(is_staff=True, is_superuser=True, is_active=True)
public_cohorts = Cohort_Perms.objects.filter(user=isb_superuser, perm=Cohort_Perms.OWNER).values_list('cohort',
flat=True)
cohort_perms = Cohort_Perms.objects.select_related('cohort').filter(user=request.user, cohort__active=True).exclude(
cohort__id__in=public_cohorts)
cohorts_count = cohort_perms.count()
cohorts = Cohort.objects.filter(id__in=cohort_perms.values_list('cohort__id',flat=True), active=True).order_by('-last_date_saved')[:display_count]
# Program List
ownedPrograms = request.user.program_set.filter(active=True)
sharedPrograms = Program.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)
programs = ownedPrograms | sharedPrograms
programs_count = programs.distinct().count()
programs = programs.distinct().order_by('-last_date_saved')[:display_count]
# Workbook List
userWorkbooks = request.user.workbook_set.filter(active=True)
sharedWorkbooks = Workbook.objects.filter(shared__matched_user=request.user, shared__active=True, active=True)
workbooks = userWorkbooks | sharedWorkbooks
workbooks_count = workbooks.distinct().count()
workbooks = workbooks.distinct().order_by('-last_date_saved')[:display_count]
# # Notebook VM Instance
# user_instances = request.user.instance_set.filter(active=True)
# user = User.objects.get(id=request.user.id)
# gcp_list = GoogleProject.objects.filter(user=user, active=1)
# vm_username = request.user.email.split('@')[0]
# client_ip = get_ip_address_from_request(request)
# logger.debug('client_ip: '+client_ip)
# client_ip_range = ', '.join([client_ip])
#
# if user_instances:
# user_vm = user_instances[0]
# machine_name = user_vm.name
# project_id = user_vm.gcp.project_id
# zone = user_vm.zone
# result = check_vm_stat(project_id, zone, machine_name)
# status = result['status']
# else:
# # default values to fill in fields in form
# project_id = ''
# # remove special characters
# machine_header = re.sub(r'[^A-Za-z0-9]+', '', vm_username.lower())
# machine_name = '{}-jupyter-vm'.format(machine_header)
# zone = 'us-central1-c'
# status = 'NOT FOUND'
#
# notebook_vm = {
# 'user': vm_username,
# 'project_id': project_id,
# 'name': machine_name,
# 'zone': zone,
# 'client_ip_range': client_ip_range,
# 'status': status
# }
# Gene & miRNA Favorites
genefaves = request.user.genefavorite_set.filter(active=True).order_by('-last_date_saved')[:display_count]
genefaves_count = request.user.genefavorite_set.filter(active=True).count()
# Variable Favorites
varfaves = request.user.variablefavorite_set.filter(active=True).order_by('-last_date_saved')[:display_count]
varfaves_count = request.user.variablefavorite_set.filter(active=True).count()
context = {
'request': request,
'cohorts': cohorts,
'cohorts_count': cohorts_count,
'programs': programs,
'programs_count': programs_count,
'workbooks': workbooks,
'workbooks_count': workbooks_count,
'genefaves': genefaves,
'genefaves_count': genefaves_count,
'varfaves': varfaves,
'varfaves_count': varfaves_count,
# 'optinstatus': opt_in_status
# 'notebook_vm': notebook_vm,
# 'gcp_list': gcp_list,
}
except Exception as e:
logger.error("[ERROR] While prepping dashboard:")
logger.exception(e)
messages.error(request, "Encountered an error while building the dashboard - please contact the administrator.")
return render(request, 'isb_cgc/dashboard.html', context)
@login_required
def opt_in_check_show(request):
try:
obj, created = UserOptInStatus.objects.get_or_create(user=request.user)
result = (obj.opt_in_status == UserOptInStatus.NOT_SEEN)
except Exception as e:
result = False
return JsonResponse({
'result': result
})
@login_required
def opt_in_update(request):
# If user logs in for the second time, opt-in status changes to NOT_SEEN
error_msg = ''
opt_in_selection = ''
redirect_url = ''
if request.POST:
opt_in_selection = request.POST.get('opt-in-selection')
try:
user_opt_in_stat_obj = UserOptInStatus.objects.filter(user=request.user).first()
feedback_form_link = request.build_absolute_uri(reverse('opt_in_form_reg_user'))
if user_opt_in_stat_obj:
user_opt_in_stat_obj.opt_in_status = UserOptInStatus.SEEN
user_opt_in_stat_obj.save()
if opt_in_selection == 'yes' or opt_in_selection == 'no':
feedback_form_link_template = feedback_form_link + '?opt_in_selection={opt_in_selection}'
feedback_form_link_params = feedback_form_link_template.format(opt_in_selection=opt_in_selection)
redirect_url = feedback_form_link_params
except Exception as e:
error_msg = '[Error] There has been an error while updating your subscription status.'
logger.exception(e)
logger.error(error_msg)
return JsonResponse({
'redirect-url': redirect_url,
'error_msg': error_msg
})
def send_feedback_form(user_email, firstName, lastName, formLink):
status = None
message = None
try:
email_template = get_template('sharing/email_opt_in_form.html')
ctx = {
'firstName': firstName,
'lastName': lastName,
'formLink': formLink
}
message_data = {
'from': settings.NOTIFICATION_EMAIL_FROM_ADDRESS,
'to': user_email,
'subject': 'Join the ISB-CGC community!',
'text':
('Dear {firstName} {lastName},\n\n' +
'ISB-CGC is funded by the National Cancer Institute (NCI) to provide cloud-based tools and data to the cancer research community.\n' +
'Your feedback is important to the NCI and us.\n' +
'Please help us by filling out this form:\n' +
'{formLink}\n' +
'Thank you.\n\n' +
'ISB-CGC team').format(firstName=firstName, lastName=lastName, formLink=formLink),
'html': email_template.render(ctx)
}
send_email_message(message_data)
except Exception as e:
status = 'error'
message = '[Error] There has been an error while trying to send an email to {}.'.format(user_email)
return {
'status': status,
'message': message
}
@login_required
def form_reg_user(request):
return opt_in_form(request);
def opt_in_form(request):
template = 'isb_cgc/opt_in_form.html'
opt_in_status = 'opt-in'
if request.user.is_authenticated:
user = request.user
first_name = user.first_name
last_name = user.last_name
email = user.email
opt_in_status_obj = UserOptInStatus.objects.filter(user=user).first()
if opt_in_status_obj and opt_in_status_obj.opt_in_status == UserOptInStatus.NO:
opt_in_status = 'opt-out'
selection = request.GET.get('opt_in_selection') if request.GET.get('opt_in_selection') else ''
if selection == 'yes':
opt_in_status = 'opt-in'
elif selection == 'no':
opt_in_status = 'opt-out'
else:
email = request.GET.get('email') if request.GET.get('email') else ''
first_name = request.GET.get('first_name') if request.GET.get('first_name') else ''
last_name = request.GET.get('last_name') if request.GET.get('last_name') else ''
form = {'first_name': first_name,
'last_name': last_name,
'email': email,
'opt_in_status': opt_in_status
}
return render(request, template, form)
@csrf_protect
def opt_in_form_submitted(request):
msg = ''
error_msg = ''
template = 'isb_cgc/opt_in_form_submitted.html'
# get values and update optin status
first_name= request.POST.get('first-name')
last_name= request.POST.get('last-name')
email= request.POST.get('email')
affiliation= request.POST.get('affiliation')
feedback= request.POST.get('feedback')
subscribed = (request.POST.get('subscribed') == 'opt-in')
try:
users = User.objects.filter(email__iexact=email)
if len(users) > 0:
user = users.first()
user_opt_in_stat_obj = UserOptInStatus.objects.filter(user=user).first()
if user_opt_in_stat_obj:
user_opt_in_stat_obj.opt_in_status = UserOptInStatus.YES if subscribed else UserOptInStatus.NO
user_opt_in_stat_obj.save()
# record to store in bq table
feedback_row = {
"email": email,
"first_name": first_name,
"last_name": last_name,
"affiliation": affiliation,
"subscribed": subscribed,
"feedback": feedback,
"submitted_time": datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
}
BigQueryFeedbackSupport.add_rows_to_table([feedback_row])
# send a notification to <EMAIL> about the entry
if settings.IS_UAT:
logger.info("[STATUS] UAT: sent email for feedback")
else:
# send a notification to <EMAIL> about the entry
send_feedback_notification(feedback_row)
msg = 'We thank you for your time and suggestions.'
else:
error_msg = 'We were not able to find a user with the given email. Please check with us again later.'
logger.error(error_msg)
except Exception as e:
error_msg = 'We were not able to submit your feedback due to some errors. Please check with us again later.'
logger.exception(e)
logger.error(error_msg)
message = {
'msg': msg,
'error_msg': error_msg
}
return render(request, template, message)
def send_feedback_notification(feedback_dict):
try:
message_data = {
'from': settings.NOTIFICATION_EMAIL_FROM_ADDRESS,
'to': settings.NOTIFICATION_EMAIL_TO_ADDRESS,
'subject': '[ISB-CGC] A user feedback has been submitted.',
'text':
('We have just received a user feedback from ISB-CGC WebApp at {timestamp} (UTC).\n\n' +
'Here is what has been received:\n\n---------------------------------------\n' +
'First Name: {firstName}\n' +
'Last Name: {lastName}\n' +
'E-mail: {email}\n' +
'Affiliation: {affiliation}\n' +
'Subscribed: {subscribed}\n' +
'Feedback: {feedback}\n\n---------------------------------------\n' +
'Thank you.\n\n' +
'ISB-CGC team').format(
timestamp=feedback_dict['submitted_time'],
firstName=feedback_dict['first_name'],
lastName=feedback_dict['last_name'],
email=feedback_dict['email'],
affiliation=feedback_dict['affiliation'],
subscribed=('Yes' if feedback_dict['subscribed'] else 'No'),
feedback=feedback_dict['feedback'])}
send_email_message(message_data)
except Exception as e:
logger.error('[Error] Error has occured while sending out feedback notifications to {}.'.format(settings.NOTIFICATION_EMAIL_TO_ADDRESS))
logger.exception(e) |
<reponame>awslabs/aws-crt-builder<filename>tests/test_project.py
import os
import unittest
import unittest.mock as mock
from builder.core.project import Project
from builder.core.spec import BuildSpec
from builder.actions.script import Script
import builder.core.api # force API to load and expose the virtual module
here = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(here, 'data')
# base config -- copy for tests
_test_proj_config = {
'name': 'test-proj',
'search_dirs': [test_data_dir],
'path': here,
}
def _collect_steps(step):
"""
collect the list of steps
"""
def _collect_steps_impl(out, curr):
if isinstance(curr, list):
for s in curr:
_collect_steps_impl(out, s)
elif isinstance(curr, Script):
out.append(str(curr))
_collect_steps_impl(out, curr.commands)
else:
out.append(str(curr))
stack = []
_collect_steps_impl(stack, step)
return stack
def _fuzzy_find_step(step_stack, step, name):
"""
attempt to find a step name or value that either matches name or contains name as a fragment
:return: tuple(step, stack idx) | None
"""
for i in range(len(step_stack)):
s = step_stack[i]
if s == name or name in s:
return s, i
return None
def _step_exists(step, name):
"""
test if the step [name] exists in the set of [step]s
"""
step_stack = _collect_steps(step)
return _fuzzy_find_step(step_stack, step, name) is not None
def _dump_step(step):
import pprint
steps = _collect_steps(step)
pprint.pprint(steps, width=240)
class TestProject(unittest.TestCase):
def setUp(self):
# remove possible inter test behavior
Project._projects.clear()
def _format_step(self, step):
step_stack = _collect_steps(step)
return "\n".join(step_stack)
def _assert_step_contains(self, step, name):
if not _step_exists(step, name):
steps = self._format_step(step)
self.fail(f"{name} not contained in stack:\n{steps}")
def _assert_step_not_contains(self, step, name):
if _step_exists(step, name):
steps = self._format_step(step)
self.fail(f"unexpected step {name} found in stack:\n{steps}")
def _assert_step_contains_all(self, step, names, ordered=True):
for name in names:
self._assert_step_contains(step, name)
if ordered:
stack = _collect_steps(step)
steps = [_fuzzy_find_step(stack, step, name) for name in names]
step_indices = [t[1] for t in steps]
steps_in_order = all(step_indices[i] <= step_indices[i+1] for i in range(len(step_indices) - 1))
formatted_steps = self._format_step(step)
self.assertTrue(
steps_in_order, f"steps exist but not in order expected:\nexpected:{names}\nfound:\n{formatted_steps}")
def test_build_defaults(self):
"""cmake build step should be default when not specified and toolchain exists"""
p = Project(**_test_proj_config.copy())
mock_env = mock.Mock(name='MockEnv')
steps = p.build(mock_env)
self._assert_step_contains(steps, 'cmake build')
def test_override_build_steps(self):
"""explict build steps take precedence"""
config = _test_proj_config.copy()
config['build_steps'] = ['foo']
p = Project(**config)
mock_env = mock.Mock(name='MockEnv')
steps = p.build(mock_env)
self._assert_step_contains(steps, 'foo')
def test_upstream_builds_first(self):
"""upstream dependencies should be built first"""
config = _test_proj_config.copy()
config['upstream'] = [
{'name': 'lib-1'}
]
p = Project(**config)
mock_env = mock.Mock(name='MockEnv', config=config)
steps = p.pre_build(mock_env)
self._assert_step_contains_all(steps, ['build dependencies', 'build lib-1'])
def test_default_test_step(self):
"""downstream tests should build by default"""
config = _test_proj_config.copy()
p = Project(**config)
m_toolchain = mock.Mock(name='mock toolchain', cross_compile=False)
mock_env = mock.Mock(name='MockEnv', config=config,
toolchain=m_toolchain)
steps = p.test(mock_env)
self._assert_step_contains(steps, 'test')
def test_downstream_tests_build_by_default(self):
"""downstream tests should build by default"""
config = _test_proj_config.copy()
config['downstream'] = [
{
'name': 'lib-1'
}
]
p = Project(**config)
mock_env = mock.Mock(name='MockEnv', config=config)
steps = p.build_consumers(mock_env)
self._assert_step_contains_all(steps, ['test lib-1'])
def test_downstream_tests_do_not_build(self):
"""downstream tests should not be built if requested"""
config = _test_proj_config.copy()
config['downstream'] = [
{
'name': 'lib-1',
'run_tests': False
}
]
p = Project(**config)
mock_env = mock.Mock(name='MockEnv', config=config)
steps = p.build_consumers(mock_env)
self._assert_step_not_contains(steps, 'test lib-1')
def test_downstream_post_build_runs_before_tests(self):
"""downstream post_build_steps should run before tests"""
config = _test_proj_config.copy()
config['downstream'] = [
{
'name': 'lib-1'
}
]
p = Project(**config)
mock_env = mock.Mock(name='MockEnv', config=config)
steps = p.build_consumers(mock_env)
self._assert_step_contains_all(steps, ['post build lib-1', 'test lib-1'])
def test_explicit_upstream_branch(self):
"""upstream with specific revision should override the detected branch"""
config = _test_proj_config.copy()
config['upstream'] = [
{
'name': 'lib-1',
'revision': 'explicit-branch'
}
]
p = Project(**config)
spec = mock.Mock(name='MockBuildSpec', spec=BuildSpec, target='linux')
deps = p.get_dependencies(spec)
self.assertEqual('explicit-branch', deps[0].revision)
def test_upstream_targets_filtered_for_spec(self):
"""upstream with specific targets should only be applied if target matches current spec"""
config = _test_proj_config.copy()
config['upstream'] = [
{
'name': 'lib-1',
'targets': ['linux']
}
]
p = Project(**config)
spec = mock.Mock(name='MockBuildSpec', spec=BuildSpec, target='macos')
dependencies = p.get_dependencies(spec)
self.assertEqual(0, len(dependencies), "dependencies should have filtered upstream with specific target")
def test_project_source_dir_replaced(self):
"""project specific dependency variables should be replaced"""
config = _test_proj_config.copy()
config['upstream'] = [
{
'name': 'lib-1'
}
]
p = Project(**config)
m_spec = mock.Mock(name='MockBuildSpec', spec=BuildSpec, target='macos')
dependencies = p.get_dependencies(m_spec)
m_env = mock.Mock(name='MockEnv', config=config)
steps = dependencies[0].post_build(m_env)
self._assert_step_contains(steps, "{}/gradlew postBuildTask".format(os.path.join(test_data_dir, "lib-1")))
|
<reponame>tungstenfabric/tf-charms
#!/usr/bin/env python3
import json
import sys
import yaml
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
config,
log,
is_leader,
leader_get,
leader_set,
relation_id,
relation_get,
relation_ids,
related_units,
status_set,
relation_set,
local_unit,
remote_unit,
open_port,
close_port,
)
import contrail_analytics_utils as utils
import common_utils
hooks = Hooks()
config = config()
@hooks.hook("install.real")
def install():
status_set("maintenance", "Installing...")
# TODO: try to remove this call
common_utils.fix_hostname()
common_utils.container_engine().install()
utils.update_charm_status()
# NOTE: do not open port until haproxy can fail
# https://bugs.launchpad.net/charm-haproxy/+bug/1792939
# open_port(8081, "TCP")
@hooks.hook("config-changed")
def config_changed():
utils.update_nrpe_config()
# Charm doesn't support changing container runtime (check for empty value after upgrade).
if config.changed("container_runtime") and config.previous("container_runtime"):
raise Exception("Configuration parameter container_runtime couldn't be changed")
if config.changed("control-network"):
_update_cluster()
if is_leader() and _address_changed(local_unit(), common_utils.get_ip()):
_update_analytics()
_update_analyticsdb()
common_utils.container_engine().config_changed()
utils.pull_images()
utils.update_charm_status()
_notify_proxy_services()
# leave it as latest - in case of exception in previous steps
# config.changed doesn't work sometimes (when we saved config in this hook before)
if config.get("saved-image-tag") != config["image-tag"]:
utils.update_ziu("image-tag")
config["saved-image-tag"] = config["image-tag"]
config.save()
def _value_changed(rel_data, rel_key, cfg_key):
if rel_key not in rel_data:
# data is absent in relation. it means that remote charm doesn't
# send it due to lack of information
return
value = rel_data[rel_key]
if value is not None and value != config.get(cfg_key):
config[cfg_key] = value
elif value is None and config.get(cfg_key) is not None:
config.pop(cfg_key, None)
def _update_analytics(rid=None):
rids = [rid] if rid else relation_ids("contrail-analytics")
if not rids:
return
cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict())
ip_list = '[]'
if len(cluster_info) >= config.get("min-cluster-size"):
ip_list = json.dumps(list(cluster_info.values()))
settings = {"analytics_ips": ip_list}
for rid in rids:
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook("contrail-analytics-relation-joined")
def contrail_analytics_joined():
_update_analytics(rid=relation_id())
@hooks.hook("contrail-analytics-relation-changed")
def contrail_analytics_changed():
data = relation_get()
_value_changed(data, "auth-mode", "auth_mode")
_value_changed(data, "auth-info", "auth_info")
_value_changed(data, "orchestrator-info", "orchestrator_info")
_value_changed(data, "rabbitmq_hosts", "rabbitmq_hosts")
_value_changed(data, "maintenance", "maintenance")
_value_changed(data, "controller_ips", "controller_ips")
_value_changed(data, "controller_data_ips", "controller_data_ips")
config.save()
# TODO: handle changing of all values
# TODO: set error if orchestrator is changing and container was started
utils.update_ziu("analytics-changed")
utils.update_charm_status()
_notify_proxy_services()
@hooks.hook("contrail-analytics-relation-departed")
def contrail_analytics_departed():
units = [unit for rid in relation_ids("contrail-analytics")
for unit in related_units(rid)]
if not units:
keys = ["auth_info", "auth_mode", "orchestrator_info", "rabbitmq_hosts",
"controller_ips", "controller_data_ips"]
for key in keys:
config.pop(key, None)
utils.update_charm_status()
_notify_proxy_services()
def _update_analyticsdb(rid=None):
rids = [rid] if rid else relation_ids("contrail-analyticsdb")
if not rids:
return
cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict())
ip_list = '[]'
if len(cluster_info) >= config.get("min-cluster-size"):
ip_list = json.dumps(list(cluster_info.values()))
settings = {
"unit-type": "analytics",
"analytics_ips": ip_list
}
for rid in rids:
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook("contrail-analyticsdb-relation-joined")
def contrail_analyticsdb_joined():
_update_analyticsdb(rid=relation_id())
@hooks.hook("contrail-analyticsdb-relation-changed")
def contrail_analyticsdb_changed():
data = relation_get()
_value_changed(data, "analyticsdb_ips", "analyticsdb_ips")
config.save()
utils.update_ziu("analyticsdb-changed")
utils.update_charm_status()
@hooks.hook("contrail-analyticsdb-relation-departed")
def contrail_analyticsdb_departed():
units = [unit for rid in relation_ids("contrail-analyticsdb")
for unit in related_units(rid)]
if not units:
config.pop("analyticsdb_ips", None)
utils.update_charm_status()
def _update_cluster(rid=None):
rids = [rid] if rid else relation_ids("analytics-cluster")
if not rids:
return
settings = {"unit-address": common_utils.get_ip()}
for rid in rids:
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook("analytics-cluster-relation-joined")
def analytics_cluster_joined():
_update_cluster(rid=relation_id())
@hooks.hook("analytics-cluster-relation-changed")
def analytics_cluster_changed():
data = relation_get()
log("Peer relation changed with {}: {}".format(
remote_unit(), data))
ip = data.get("unit-address")
if not ip:
log("There is no unit-address in the relation")
elif is_leader():
unit = remote_unit()
if _address_changed(unit, ip):
_update_analytics()
_update_analyticsdb()
utils.update_charm_status()
utils.update_ziu("cluster-changed")
@hooks.hook("analytics-cluster-relation-departed")
def analytics_cluster_departed():
if not is_leader():
return
unit = remote_unit()
cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict())
cluster_info.pop(unit, None)
log("Unit {} departed. Cluster info: {}".format(unit, str(cluster_info)))
settings = {"cluster_info": json.dumps(cluster_info)}
leader_set(settings=settings)
_update_analytics()
_update_analyticsdb()
utils.update_charm_status()
def _update_tls(rid=None):
rids = [rid] if rid else relation_ids("tls-certificates")
if not rids:
return
config['tls_present'] = True
settings = common_utils.get_tls_settings(common_utils.get_ip())
for rid in rids:
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook('tls-certificates-relation-joined')
def tls_certificates_relation_joined():
# in cross-model relations we have to provide own name to be sure that we'll find it in response
relation_set(unit_name=local_unit().replace('/', '_'))
_update_tls(rid=relation_id())
@hooks.hook('tls-certificates-relation-changed')
def tls_certificates_relation_changed():
# it can be fired several times without server's cert
if common_utils.tls_changed(utils.MODULE, relation_get()):
_notify_proxy_services()
utils.update_nrpe_config()
utils.update_charm_status()
@hooks.hook('tls-certificates-relation-departed')
def tls_certificates_relation_departed():
config['tls_present'] = False
common_utils.tls_changed(utils.MODULE, None)
_notify_proxy_services()
utils.update_nrpe_config()
utils.update_charm_status()
def _address_changed(unit, ip):
cluster_info = common_utils.json_loads(leader_get("cluster_info"), dict())
if unit in cluster_info and ip == cluster_info[unit]:
return False
cluster_info[unit] = ip
log("Cluster info: {}".format(str(cluster_info)))
settings = {"cluster_info": json.dumps(cluster_info)}
leader_set(settings=settings)
return True
@hooks.hook("update-status")
def update_status():
utils.update_ziu("update-status")
utils.update_charm_status()
@hooks.hook("upgrade-charm")
def upgrade_charm():
_update_cluster()
saved_info = common_utils.json_loads(leader_get("cluster_info"), dict())
if is_leader() and not saved_info:
current_info = utils.get_cluster_info("unit-address", common_utils.get_ip())
log("Cluster info: {}".format(str(current_info)))
settings = {"cluster_info": json.dumps(current_info)}
leader_set(settings=settings)
_update_analytics()
_update_analyticsdb()
_notify_proxy_services()
# to update config flags and certs params if any was changed
_update_tls()
utils.update_charm_status()
def _notify_proxy_services(rid=None):
rids = [rid] if rid else relation_ids("http-services")
if not rids:
return
vip = config.get("vip")
common_utils.configure_ports(close_port if vip else open_port, ["8081"])
data = list() if not vip else common_utils.http_services(
"contrail-analytics-api", str(vip), 8081)
settings = {"services": yaml.dump(data)}
for rid in rids:
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook("http-services-relation-joined")
def http_services_joined():
vip = config.get("vip")
if not vip:
raise Exception("VIP must be set for allow relation to haproxy")
_notify_proxy_services(rid=relation_id())
@hooks.hook('nrpe-external-master-relation-changed')
def nrpe_external_master_relation_changed():
utils.update_nrpe_config()
@hooks.hook("stop")
def stop():
utils.stop_analytics()
utils.remove_created_files()
@hooks.hook("leader-settings-changed")
def leader_settings_changed():
_update_analytics()
_update_analyticsdb()
utils.update_charm_status()
@hooks.hook("leader-elected")
def leader_elected():
current_info = utils.get_cluster_info("unit-address", common_utils.get_ip())
saved_info = common_utils.json_loads(leader_get("cluster_info"), dict())
log("Cluster current info: {}".format(str(current_info)))
log("Cluster saved info: {}".format(str(saved_info)))
if not saved_info:
log("Cluster info: {}".format(str(current_info)))
settings = {
"cluster_info": json.dumps(current_info)
}
leader_set(settings=settings)
_update_analytics()
_update_analyticsdb()
utils.update_charm_status()
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log("Unknown hook {} - skipping.".format(e))
if __name__ == "__main__":
main()
|
<reponame>aprilnovak/openmc
from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
import scipy.stats
from openmc.data.reaction import REACTION_NAME
from . import _dll, Nuclide
from .core import _FortranObjectWithID
from .error import _error_handler, AllocationError, InvalidIDError
from .filter import _get_filter
__all__ = ['Tally', 'tallies', 'global_tallies', 'num_realizations']
# Tally functions
_dll.openmc_extend_tallies.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
_dll.openmc_extend_tallies.restype = c_int
_dll.openmc_extend_tallies.errcheck = _error_handler
_dll.openmc_get_tally_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_tally_index.restype = c_int
_dll.openmc_get_tally_index.errcheck = _error_handler
_dll.openmc_global_tallies.argtypes = [POINTER(POINTER(c_double))]
_dll.openmc_global_tallies.restype = c_int
_dll.openmc_global_tallies.errcheck = _error_handler
_dll.openmc_tally_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_tally_get_id.restype = c_int
_dll.openmc_tally_get_id.errcheck = _error_handler
_dll.openmc_tally_get_filters.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_int)]
_dll.openmc_tally_get_filters.restype = c_int
_dll.openmc_tally_get_filters.errcheck = _error_handler
_dll.openmc_tally_get_n_realizations.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_tally_get_n_realizations.restype = c_int
_dll.openmc_tally_get_n_realizations.errcheck = _error_handler
_dll.openmc_tally_get_nuclides.argtypes = [
c_int32, POINTER(POINTER(c_int)), POINTER(c_int)]
_dll.openmc_tally_get_nuclides.restype = c_int
_dll.openmc_tally_get_nuclides.errcheck = _error_handler
_dll.openmc_tally_get_scores.argtypes = [
c_int32, POINTER(POINTER(c_int)), POINTER(c_int)]
_dll.openmc_tally_get_scores.restype = c_int
_dll.openmc_tally_get_scores.errcheck = _error_handler
_dll.openmc_tally_results.argtypes = [
c_int32, POINTER(POINTER(c_double)), POINTER(c_int*3)]
_dll.openmc_tally_results.restype = c_int
_dll.openmc_tally_results.errcheck = _error_handler
_dll.openmc_tally_set_filters.argtypes = [c_int32, c_int, POINTER(c_int32)]
_dll.openmc_tally_set_filters.restype = c_int
_dll.openmc_tally_set_filters.errcheck = _error_handler
_dll.openmc_tally_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_tally_set_id.restype = c_int
_dll.openmc_tally_set_id.errcheck = _error_handler
_dll.openmc_tally_set_nuclides.argtypes = [c_int32, c_int, POINTER(c_char_p)]
_dll.openmc_tally_set_nuclides.restype = c_int
_dll.openmc_tally_set_nuclides.errcheck = _error_handler
_dll.openmc_tally_set_scores.argtypes = [c_int32, c_int, POINTER(c_char_p)]
_dll.openmc_tally_set_scores.restype = c_int
_dll.openmc_tally_set_scores.errcheck = _error_handler
_dll.openmc_tally_set_type.argtypes = [c_int32, c_char_p]
_dll.openmc_tally_set_type.restype = c_int
_dll.openmc_tally_set_type.errcheck = _error_handler
_SCORES = {
-1: 'flux', -2: 'total', -3: 'scatter', -4: 'nu-scatter',
-9: 'absorption', -10: 'fission', -11: 'nu-fission', -12: 'kappa-fission',
-13: 'current', -18: 'events', -19: 'delayed-nu-fission',
-20: 'prompt-nu-fission', -21: 'inverse-velocity', -22: 'fission-q-prompt',
-23: 'fission-q-recoverable', -24: 'decay-rate'
}
def global_tallies():
"""Mean and standard deviation of the mean for each global tally.
Returns
-------
list of tuple
For each global tally, a tuple of (mean, standard deviation)
"""
ptr = POINTER(c_double)()
_dll.openmc_global_tallies(ptr)
array = as_array(ptr, (4, 3))
# Get sum, sum-of-squares, and number of realizations
sum_ = array[:, 1]
sum_sq = array[:, 2]
n = num_realizations()
# Determine mean
if n > 0:
mean = sum_ / n
else:
mean = sum_.copy()
# Determine standard deviation
nonzero = np.abs(mean) > 0
stdev = np.empty_like(mean)
stdev.fill(np.inf)
if n > 1:
stdev[nonzero] = np.sqrt((sum_sq[nonzero]/n - mean[nonzero]**2)/(n - 1))
return list(zip(mean, stdev))
def num_realizations():
"""Number of realizations of global tallies."""
return c_int32.in_dll(_dll, 'n_realizations').value
class Tally(_FortranObjectWithID):
"""Tally stored internally.
This class exposes a tally that is stored internally in the OpenMC
library. To obtain a view of a tally with a given ID, use the
:data:`openmc.capi.tallies` mapping.
Parameters
----------
uid : int or None
Unique ID of the tally
new : bool
When `index` is None, this argument controls whether a new object is
created or a view of an existing object is returned.
index : int or None
Index in the `tallies` array.
Attributes
----------
id : int
ID of the tally
filters : list
List of tally filters
mean : numpy.ndarray
An array containing the sample mean for each bin
nuclides : list of str
List of nuclides to score results for
num_realizations : int
Number of realizations
results : numpy.ndarray
Array of tally results
std_dev : numpy.ndarray
An array containing the sample standard deviation for each bin
"""
__instances = WeakValueDictionary()
def __new__(cls, uid=None, new=True, index=None):
mapping = tallies
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A tally with ID={} has already '
'been allocated.'.format(uid))
index = c_int32()
_dll.openmc_extend_tallies(1, index, None)
_dll.openmc_tally_set_type(index, b'generic')
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
tally_id = c_int32()
_dll.openmc_tally_get_id(self._index, tally_id)
return tally_id.value
@id.setter
def id(self, tally_id):
_dll.openmc_tally_set_id(self._index, tally_id)
@property
def filters(self):
filt_idx = POINTER(c_int32)()
n = c_int()
_dll.openmc_tally_get_filters(self._index, filt_idx, n)
return [_get_filter(filt_idx[i]) for i in range(n.value)]
@filters.setter
def filters(self, filters):
# Get filter indices as int32_t[]
n = len(filters)
indices = (c_int32*n)(*(f._index for f in filters))
_dll.openmc_tally_set_filters(self._index, n, indices)
@property
def mean(self):
n = self.num_realizations
sum_ = self.results[:, :, 1]
if n > 0:
return sum_ / n
else:
return sum_.copy()
@property
def nuclides(self):
nucs = POINTER(c_int)()
n = c_int()
_dll.openmc_tally_get_nuclides(self._index, nucs, n)
return [Nuclide(nucs[i]).name if nucs[i] > 0 else 'total'
for i in range(n.value)]
@nuclides.setter
def nuclides(self, nuclides):
nucs = (c_char_p * len(nuclides))()
nucs[:] = [x.encode() for x in nuclides]
_dll.openmc_tally_set_nuclides(self._index, len(nuclides), nucs)
@property
def num_realizations(self):
n = c_int32()
_dll.openmc_tally_get_n_realizations(self._index, n)
return n.value
@property
def results(self):
data = POINTER(c_double)()
shape = (c_int*3)()
_dll.openmc_tally_results(self._index, data, shape)
return as_array(data, tuple(shape[::-1]))
@property
def scores(self):
scores_as_int = POINTER(c_int)()
n = c_int()
try:
_dll.openmc_tally_get_scores(self._index, scores_as_int, n)
except AllocationError:
return []
else:
scores = []
for i in range(n.value):
if scores_as_int[i] in _SCORES:
scores.append(_SCORES[scores_as_int[i]])
elif scores_as_int[i] in REACTION_NAME:
scores.append(REACTION_NAME[scores_as_int[i]])
else:
scores.append(str(scores_as_int[i]))
return scores
@scores.setter
def scores(self, scores):
scores_ = (c_char_p * len(scores))()
scores_[:] = [x.encode() for x in scores]
_dll.openmc_tally_set_scores(self._index, len(scores), scores_)
@property
def std_dev(self):
results = self.results
std_dev = np.empty(results.shape[:2])
std_dev.fill(np.inf)
n = self.num_realizations
if n > 1:
# Get sum and sum-of-squares from results
sum_ = results[:, :, 1]
sum_sq = results[:, :, 2]
# Determine non-zero entries
mean = sum_ / n
nonzero = np.abs(mean) > 0
# Calculate sample standard deviation of the mean
std_dev[nonzero] = np.sqrt(
(sum_sq[nonzero]/n - mean[nonzero]**2)/(n - 1))
return std_dev
def ci_width(self, alpha=0.05):
"""Confidence interval half-width based on a Student t distribution
Parameters
----------
alpha : float
Significance level (one minus the confidence level!)
Returns
-------
float
Half-width of a two-sided (1 - :math:`alpha`) confidence interval
"""
half_width = self.std_dev.copy()
n = self.num_realizations
if n > 1:
half_width *= scipy.stats.t.ppf(1 - alpha/2, n - 1)
return half_width
class _TallyMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_tally_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return Tally(index=index.value)
def __iter__(self):
for i in range(len(self)):
yield Tally(index=i + 1).id
def __len__(self):
return c_int32.in_dll(_dll, 'n_tallies').value
def __repr__(self):
return repr(dict(self))
tallies = _TallyMapping()
|
#!/usr/bin/env python
import asyncio
import sys
request_1 = """Content-Length: 4642
{
"jsonrpc": "2.0",
"id": 0,
"method": "initialize",
"params": {
"processId": 3406,
"rootPath": "/home/eckhart/Entwicklung/DHParser/examples/json",
"rootUri": "file:///home/eckhart/Entwicklung/DHParser/examples/json",
"capabilities": {
"workspace": {
"applyEdit": true,
"workspaceEdit": {
"documentChanges": true
},
"didChangeConfiguration": {
"dynamicRegistration": true
},
"didChangeWatchedFiles": {
"dynamicRegistration": true
},
"symbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
]
}
},
"executeCommand": {
"dynamicRegistration": true
},
"configuration": true,
"workspaceFolders": true
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": true
},
"synchronization": {
"dynamicRegistration": true,
"willSave": true,
"willSaveWaitUntil": true,
"didSave": true
},
"completion": {
"dynamicRegistration": true,
"contextSupport": true,
"completionItem": {
"snippetSupport": true,
"commitCharactersSupport": true,
"documentationFormat": [
"markdown",
"plaintext"
],
"deprecatedSupport": true
},
"completionItemKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
]
}
},
"hover": {
"dynamicRegistration": true,
"contentFormat": [
"markdown",
"plaintext"
]
},
"signatureHelp": {
"dynamicRegistration": true,
"signatureInformation": {
"documentationFormat": [
"markdown",
"plaintext"
]
}
},
"definition": {
"dynamicRegistration": true
},
"references": {
"dynamicRegistration": true
},
"documentHighlight": {
"dynamicRegistration": true
},
"documentSymbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
]
}
},
"codeAction": {
"dynamicRegistration": true
},
"codeLens": {
"dynamicRegistration": true
},
"formatting": {
"dynamicRegistration": true
},
"rangeFormatting": {
"dynamicRegistration": true
},
"onTypeFormatting": {
"dynamicRegistration": true
},
"rename": {
"dynamicRegistration": true
},
"documentLink": {
"dynamicRegistration": true
},
"typeDefinition": {
"dynamicRegistration": true
},
"implementation": {
"dynamicRegistration": true
},
"colorProvider": {
"dynamicRegistration": true
}
}
},
"trace": "verbose",
"workspaceFolders": [
{
"uri": "file:///home/eckhart/Entwicklung/DHParser/examples/json",
"name": "json"
}
]
}
}
"""
request_2 = """Content-Length: 60
{"jsonrpc":"2.0","id":0,"method":"initialized","params":{}}
"""
async def initialization(host='127.0.0.1', port=8888):
try:
reader, writer = await asyncio.open_connection(host, port)
print('request_1')
writer.write(request_1.encode())
response = (await reader.read(8192)).decode()
print(response)
print('request_2')
writer.write(request_2.encode())
print('r_2 sent')
# response = (await reader.read(8192)).decode()
# print('r_2 response received')
# print(response)
writer.close()
if sys.version_info >= (3, 7):
await writer.wait_closed()
except ConnectionRefusedError:
print("Could not connect to server %s on port %i" % (host, port))
if __name__ == '__main__':
asyncio.run(initialization())
|
<reponame>cbwang2016/PKUAutoElective2<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: main.py
# modified: 2019-09-11
import os
import time
from optparse import OptionParser
from multiprocessing import Process, Manager, Queue
from autoelective import __version__, __date__
from autoelective.config import AutoElectiveConfig
from autoelective.parser import load_course_csv
from autoelective.logger import ConsoleLogger
from autoelective.loop import main as run_main_loop
from autoelective.monitor import main as run_monitor
from autoelective.const import SIGNAL_KILL_ALL_PROCESSES
from autoelective._internal import userInfo as _userInfo # ugly !
def task_run_loop(userInfo):
config = AutoElectiveConfig() # create singleton first
cout = ConsoleLogger("main")
signals = Queue()
p = Process(target=run_main_loop, name="Main", args=(signals, userInfo))
p.daemon = True
p.start()
while True:
try:
signal = signals.get() # block process
except KeyboardInterrupt as e:
cout.info("Process %s is killed" % os.getpid())
return
time.sleep(0.1) # wait a minute
if signal == SIGNAL_KILL_ALL_PROCESSES:
if p.is_alive():
p.terminate()
cout.info("Process %s is killed" % p.name)
break
def task_run_loop_with_monitor(userInfo):
config = AutoElectiveConfig() # create singleton first
cout = ConsoleLogger("main")
signals = Queue()
with Manager() as manager:
# shared objects
goals = manager.list(load_course_csv())
ignored = manager.list()
status = manager.dict()
status["main_loop"] = 0
status["login_loop"] = 0
status["error_count"] = 0
status["errors"] = manager.dict()
args = (signals, userInfo, goals, ignored, status)
pList = [
Process(target=run_main_loop, name="Main", args=args),
Process(target=run_monitor, name="Monitor", args=args),
]
for p in pList:
p.daemon = True
p.start()
while True:
try:
signal = signals.get() # block process
except KeyboardInterrupt as e:
cout.info("Process %s is killed" % os.getpid())
return
time.sleep(0.1) # wait a minute
if signal == SIGNAL_KILL_ALL_PROCESSES:
for p in pList:
if p.is_alive():
p.terminate()
cout.info("Process %s is killed" % p.name)
break
def main():
parser = OptionParser(
description='PKU Auto-Elective Tool v%s (%s)' % (__version__, __date__),
version=__version__,
)
# MARK: custom input files
parser.add_option(
'--config',
dest='CONFIG_INI',
metavar="FILE",
help='custom config file encoded with utf8',
)
parser.add_option(
'--course-csv-utf8',
dest='COURSE_UTF8_CSV',
metavar="FILE",
help='custom course.csv file encoded with utf8',
)
parser.add_option(
'--course-csv-gbk',
dest='COURSE_GBK_CSV',
metavar="FILE",
help='custom course.csv file encoded with gbk',
)
# MARK: boolean (flag) options
parser.add_option(
'--with-monitor',
dest='with_monitor',
action='store_true',
default=False,
help='run the monitor process simultaneously',
)
options, args = parser.parse_args()
run_task = task_run_loop
# MARK: setup userInfo
userInfo = {}
if options.CONFIG_INI is not None:
userInfo["CONFIG_INI"] = options.CONFIG_INI
if options.COURSE_UTF8_CSV is not None:
userInfo["COURSE_UTF8_CSV"] = options.COURSE_UTF8_CSV
if options.COURSE_GBK_CSV is not None:
userInfo["COURSE_GBK_CSV"] = options.COURSE_GBK_CSV
# MAKR: handle boolean (flag) options
if options.with_monitor:
run_task = task_run_loop_with_monitor
_userInfo.update(userInfo) # setup userInfo first
run_task(userInfo)
if __name__ == '__main__':
main()
|
<reponame>mt-upc/SHAS
import argparse
from pathlib import Path
import yaml
def create_xml_content(
segmentation: list[dict],
lang_text: list[str],
split: str,
src_lang: str,
tgt_lang: str,
is_src: bool,
) -> list[str]:
"""
Args:
segmentation (list): content of the yaml file
lang_text (list): content of the transcription or translation txt file
split (str): the split name
src_lang (str): source language id
tgt_lang (str): target language id
is_src (bool): whether lang_text is transcriptions
Returns:
xml_content (list)
"""
xml_content = []
xml_content.append('<?xml version="1.0" encoding="UTF-8"?>')
xml_content.append("<mteval>")
if is_src:
xml_content.append(f'<srcset setid="{split}" srclang="{src_lang}">')
else:
xml_content.append(
f'<refset setid="{split}" srclang="{src_lang}" trglang="{tgt_lang}" refid="ref">'
)
prev_talk_id = -1
for sgm, txt in zip(segmentation, lang_text):
talk_id = sgm["wav"].split(".wav")[0]
if prev_talk_id != talk_id:
if prev_talk_id != -1:
xml_content.append("</doc>")
# add content (some does not matter, but is added to replicate the required format)
xml_content.append(f'<doc docid="{talk_id}" genre="lectures">')
xml_content.append("<keywords>does, not, matter</keywords>")
xml_content.append("<speaker><NAME></speaker>")
xml_content.append(f"<talkid>{talk_id}</talkid>")
xml_content.append("<description>Blah blah blah.</description>")
xml_content.append("<title>Title</title>")
seg_id = 0
prev_talk_id = talk_id
seg_id += 1
xml_content.append(f'<seg id="{seg_id}">{txt}</seg>')
xml_content.append("</doc>")
if is_src:
xml_content.append("</srcset>")
else:
xml_content.append("</refset>")
xml_content.append("</mteval")
return xml_content
def original_segmentation_to_xml(
path_to_yaml: str, path_to_src_txt: str, path_to_tgt_txt: str, path_to_output: str
):
"""
Given a segmentation yaml, and the transcriptions/translations files
creates two xml files (one for source and one for target)
that can be used by the mwerSegmenter tool to align the
translations of a segmentation with the references
"""
split = Path(path_to_yaml).stem
src_lang = Path(path_to_src_txt).suffix
tgt_lang = Path(path_to_tgt_txt).suffix
path_to_output = Path(path_to_output)
with open(path_to_yaml, "r") as yaml_file:
segmentation = yaml.load(yaml_file, Loader=yaml.BaseLoader)
with open(path_to_src_txt, "r") as src_lang_file:
src_lang_text = src_lang_file.read().splitlines()
if src_lang != tgt_lang:
with open(path_to_tgt_txt, "r") as tgt_lang_file:
tgt_lang_text = tgt_lang_file.read().splitlines()
# remove examples with empty source or target
src_lang_text_, tgt_lang_text_ = [], []
for src, tgt in zip(src_lang_text, tgt_lang_text):
if src and tgt:
src_lang_text_.append(src)
tgt_lang_text_.append(tgt)
src_xml_content = create_xml_content(
segmentation, src_lang_text_, split, src_lang, tgt_lang, True
)
src_path = path_to_output / f"{split}{src_lang}.xml"
with open(src_path, "w", encoding="UTF-8") as xml_file:
for line in src_xml_content:
xml_file.write(line + "\n")
print(f"Saved source transcriptions at {src_path}")
if src_lang != tgt_lang:
tgt_xml_content = create_xml_content(
segmentation, tgt_lang_text_, split, src_lang, tgt_lang, False
)
tgt_path = path_to_output / f"{split}{tgt_lang}.xml"
with open(tgt_path, "w", encoding="UTF-8") as xml_file:
for line in tgt_xml_content:
xml_file.write(line + "\n")
print(f"Saved target translations at {tgt_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--path_to_yaml",
"-y",
required=True,
type=str,
help="absolute path to the yaml of the segmentation",
)
parser.add_argument(
"--path_to_src_txt",
"-s",
required=True,
type=str,
help="absolute path to the text file of the transcriptions",
)
parser.add_argument(
"--path_to_tgt_txt",
"-t",
required=True,
type=str,
help="absolute path to the text file of the translations",
)
parser.add_argument(
"--path_to_output",
"-o",
required=True,
type=str,
help="absolute path to the directory of the output xmls",
)
args = parser.parse_args()
original_segmentation_to_xml(
args.path_to_yaml,
args.path_to_src_txt,
args.path_to_tgt_txt,
args.path_to_output,
)
|
#!/usr/bin/env python
u"""
ecco_monthly_harmonics.py
Written by <NAME> (10/2021)
Reads monthly ECCO ocean bottom pressure anomalies and converts to
spherical harmonic coefficients
INPUTS:
ECCO Near Real-Time models
kf080i: Kalman filter analysis
https://ecco.jpl.nasa.gov/drive/files/NearRealTime/KalmanFilter/
dr080i: RTS smoother analysis
https://ecco.jpl.nasa.gov/drive/files/NearRealTime/Smoother/
ECCO2 Cube92 models
Cube92
ECCO version 4 models
V4r3: Version 4, Revision 3
V4r4: Version 4, Revision 4
COMMAND LINE OPTIONS:
-D X, --directory X: Working data directory
-Y X, --year X: Years to run
-l X, --lmax X: maximum spherical harmonic degree
-m X, --mmax X: maximum spherical harmonic order
-n X, --love X: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
--reference X: Reference frame for load love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
-F X, --format X: Input and output data format
ascii
netcdf
HDF5
-V, --verbose: Output information for each output file
-M X, --mode X: Permission mode of directories and files
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
dateutil: powerful extensions to datetime
https://dateutil.readthedocs.io/en/stable/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
h5py: Pythonic interface to the HDF5 binary data format.
https://www.h5py.org/
PROGRAM DEPENDENCIES:
plm_holmes.py: computes fully-normalized associated Legendre polynomials
read_love_numbers.py: reads Load Love Numbers from Han and Wahr (1995)
ref_ellipsoid.py: calculate reference parameters for common ellipsoids
norm_gravity.py: calculates the normal gravity for locations on an ellipsoid
gen_pressure_stokes.py: converts a pressure field into spherical harmonics
harmonics.py: spherical harmonic data class for processing GRACE/GRACE-FO
destripe_harmonics.py: calculates the decorrelation (destriping) filter
and filters the GRACE/GRACE-FO coefficients for striping errors
ncdf_read_stokes.py: reads spherical harmonic netcdf files
ncdf_stokes.py: writes output spherical harmonic data to netcdf
hdf5_read_stokes.py: reads spherical harmonic HDF5 files
hdf5_stokes.py: writes output spherical harmonic data to HDF5
spatial.py: spatial data class for reading, writing and processing data
ncdf_read.py: reads input spatial data from netCDF4 files
hdf5_read.py: reads input spatial data from HDF5 files
ncdf_write.py: writes output spatial data to netCDF4
hdf5_write.py: writes output spatial data to HDF5
time.py: utilities for calculating time operations
utilities.py: download and management utilities for files
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
use output harmonic file wrapper routine to write to file
Updated 09/2021: use GRACE/GRACE-FO month to calendar month converters
Updated 07/2021: can use input files to define command line arguments
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 03/2021: automatically update years to run based on current time
Updated 02/2021: separate inputs to gen_pressure_stokes
Updated 01/2021: added Cube92 choice to input model types
outputs from gen_pressure_stokes are now harmonics objects
Updated 12/2020: use argparse to set command line parameters
using spatial and harmonics modules for read/write operations
added more love number options. using utilities from time module
Updated 10/2019: changing Y/N flags to True/False
Updated 06/2019: recommending kf080i for the Kalman filtered solution
Updated 10/2018: separated gen_pressure_stokes into separate function
Updated 07/2018: output index and date files in separate loop for all files
Updated 03/2018: use realistic geometry from bathymetry and local gravity
simplified love number extrapolation if LMAX is greater than 696
Updated 01/2018: using getopt to set parameters
Updated 08/2017: convert from geodetic coordinates to geocentric
Updated 08/2016: fixed find_new_files function with previous updates
Updated 06/2016: can use dr080g model, using __future__ print option
Updated 05/2016: complete rewrite of program
Written 05/2013
"""
from __future__ import print_function
import os
import re
import logging
import netCDF4
import argparse
import numpy as np
import gravity_toolkit.time
import gravity_toolkit.spatial
import gravity_toolkit.harmonics
import gravity_toolkit.utilities as utilities
from gravity_toolkit.plm_holmes import plm_holmes
from gravity_toolkit.read_love_numbers import read_love_numbers
from model_harmonics.gen_pressure_stokes import gen_pressure_stokes
from geoid_toolkit.ref_ellipsoid import ref_ellipsoid
from geoid_toolkit.norm_gravity import norm_gravity
#-- PURPOSE: convert monthly ECCO OBP data to spherical harmonics
def ecco_monthly_harmonics(ddir, MODEL, YEARS, LMAX=0, MMAX=None,
LOVE_NUMBERS=0, REFERENCE=None, DATAFORM=None, VERBOSE=False,
MODE=0o775):
#-- create logger for verbosity level
loglevel = logging.INFO if VERBOSE else logging.CRITICAL
logging.basicConfig(level=loglevel)
#-- input and output subdirectory
input_sub = 'ECCO_{0}_AveRmvd_OBP'.format(MODEL)
output_sub = 'ECCO_{0}_AveRmvd_OBP_CLM_L{1:d}'.format(MODEL,LMAX)
#-- upper bound of spherical harmonic orders (default = LMAX)
MMAX = np.copy(LMAX) if not MMAX else MMAX
#-- output string for both LMAX == MMAX and LMAX != MMAX cases
order_str = 'M{0:d}'.format(MMAX) if (MMAX != LMAX) else ''
#-- output file format
output_file_format = 'ECCO_{0}_AveRmvd_OBP_CLM_L{1:d}{2}_{3:03d}.{4}'
#-- Creating subdirectory if it doesn't exist
if (not os.access(os.path.join(ddir,output_sub), os.F_OK)):
os.makedirs(os.path.join(ddir,output_sub),MODE)
#-- input/output data file format
suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')
#-- parameters for each model
if MODEL in ('kf080i','dr080i'):
#-- grid step size
dlon,dlat = (1.0,1.0)
#-- grid extent
LAT_MAX = 78.5
extent = [0.5,359.5,-LAT_MAX,LAT_MAX]
input_depth_file = os.path.join(ddir,'depth.nc')
input_geoid_file = os.path.join(ddir,'egm_2008.nc')
#-- indices to read
indices = np.arange(1,2*LAT_MAX+2).astype(np.int64)
elif MODEL in ('Cube92',):
#-- grid step size
dlon,dlat = (0.25,0.25)
#-- grid extent
extent = [0.125,359.875,-89.875,89.875]
input_depth_file = os.path.join(ddir,'DEPTH.2020.1440x720.nc')
input_geoid_file = os.path.join(ddir,'EGM_2008.1440x720.nc')
#-- indices to read (all)
indices = Ellipsis
elif MODEL in ('V4r3','V4r4'):
#-- grid step size
dlon,dlat = (0.5,0.5)
#-- grid extent
extent = [-179.75,179.75,-89.75,89.75]
input_depth_file = os.path.join(ddir,'DEPTH.2020.720x360.nc')
input_geoid_file = os.path.join(ddir,'EGM_2008.720x360.nc')
#-- indices to read (all)
indices = Ellipsis
#-- input grid dimensions
glon = np.arange(extent[0],extent[1]+dlon,dlon)
glat = np.arange(extent[2],extent[3]+dlat,dlat)
#-- create mesh grids of datasets
gridlon,gridlat = np.meshgrid(glon,glat)
#-- read geoid and depth to calculate bathymetry
depth = ncdf_depth(input_depth_file, indices=indices)
geoid_undulation,gridstep = ncdf_geoid(input_geoid_file, indices=indices)
bathymetry = geoid_undulation - depth
#-- Earth Parameters
ellipsoid_params = ref_ellipsoid('WGS84')
#-- semimajor axis of ellipsoid [m]
a_axis = ellipsoid_params['a']
#-- first numerical eccentricity
ecc1 = ellipsoid_params['ecc1']
#-- convert from geodetic latitude to geocentric latitude
#-- geodetic latitude in radians
latitude_geodetic_rad = np.pi*gridlat/180.0
#-- prime vertical radius of curvature
N = a_axis/np.sqrt(1.0 - ecc1**2.*np.sin(latitude_geodetic_rad)**2.)
#-- calculate X, Y and Z from geodetic latitude and longitude
X = (N+bathymetry)*np.cos(latitude_geodetic_rad)*np.cos(np.pi*gridlon/180.0)
Y = (N+bathymetry)*np.cos(latitude_geodetic_rad)*np.sin(np.pi*gridlon/180.0)
Z = (N * (1.0 - ecc1**2.0) + bathymetry) * np.sin(latitude_geodetic_rad)
R = np.sqrt(X**2.0 + Y**2.0 + Z**2.0)
#-- calculate geocentric latitude and convert to degrees
latitude_geocentric = 180.0*np.arctan(Z / np.sqrt(X**2.0 + Y**2.0))/np.pi
#-- colatitude in radians
theta = (90.0 - latitude_geocentric)*np.pi/180.0
#-- calculate normal gravity at latitudes and bathymetry
gamma_h,dgamma_dh = norm_gravity(latitude_geocentric,bathymetry,'WGS84')
#-- read load love numbers
LOVE = load_love_numbers(LMAX,LOVE_NUMBERS=LOVE_NUMBERS,REFERENCE=REFERENCE)
#-- calculate Legendre polynomials
PLM,dPLM = plm_holmes(LMAX,np.cos(theta[:,0]))
#-- regular expression pattern to find files and extract dates
regex_years = r'\d+' if (YEARS is None) else '|'.join(map(str,YEARS))
args = (MODEL, regex_years, suffix[DATAFORM])
rx = re.compile(r'ECCO_{0}_AveRmvd_OBP_({1})_(\d+).{2}$'.format(*args))
#-- find input ECCO OBP files
FILES = [fi for fi in os.listdir(os.path.join(ddir,input_sub)) if rx.match(fi)]
#-- for each input file
for f in sorted(FILES):
#-- extract dates from file
year,month = np.array(rx.findall(f).pop(), dtype=np.int64)
#-- read input data file
if (DATAFORM == 'ascii'):
obp_data = gravity_toolkit.spatial(spacing=[dlon,dlat],
nlat=150,nlon=360,extent=extent).from_ascii(
os.path.join(ddir,input_sub,f))
elif (DATAFORM == 'netCDF4'):
obp_data = gravity_toolkit.spatial().from_netCDF4(
os.path.join(ddir,input_sub,f))
elif (DATAFORM == 'HDF5'):
obp_data = gravity_toolkit.spatial().from_HDF5(
os.path.join(ddir,input_sub,f))
#-- replace fill value points with 0
obp_data.replace_invalid(0.0)
#-- calculate spherical harmonics from pressure/gravity ratio
obp_Ylms = gen_pressure_stokes(obp_data.data, gamma_h, R,
glon, latitude_geocentric[:,0], LMAX=LMAX, MMAX=MMAX,
PLM=PLM, LOVE=LOVE)
obp_Ylms.time = np.copy(obp_data.time)
obp_Ylms.month = gravity_toolkit.time.calendar_to_grace(year,month)
#-- output spherical harmonic data file
args = (MODEL, LMAX, order_str, obp_Ylms.month, suffix[DATAFORM])
FILE = output_file_format.format(*args)
obp_Ylms.to_file(os.path.join(ddir,output_sub,FILE),format=DATAFORM)
#-- change the permissions mode of the output file to MODE
os.chmod(os.path.join(ddir,output_sub,FILE),MODE)
#-- Output date ascii file
output_date_file = 'ECCO_{0}_OBP_DATES.txt'.format(MODEL)
fid1 = open(os.path.join(ddir,output_sub,output_date_file), 'w')
#-- date file header information
print('{0:8} {1:^6} {2:^5}'.format('Mid-date','GRACE','Month'), file=fid1)
#-- index file listing all output spherical harmonic files
output_index_file = 'index.txt'
fid2 = open(os.path.join(ddir,output_sub,output_index_file),'w')
#-- find all available output files
args = (MODEL, LMAX, suffix[DATAFORM])
output_regex=r'ECCO_{0}_AveRmvd_OBP_CLM_L{1:d}_([-]?\d+).{2}'.format(*args)
#-- find all output ECCO OBP harmonic files (not just ones created in run)
output_files = [fi for fi in os.listdir(os.path.join(ddir,output_sub))
if re.match(output_regex,fi)]
for fi in sorted(output_files):
#-- extract GRACE month
grace_month, = np.array(re.findall(output_regex,fi),dtype=np.int64)
YY,MM = gravity_toolkit.time.grace_to_calendar(grace_month)
tdec, = gravity_toolkit.time.convert_calendar_decimal(YY, MM)
#-- full path to output file
full_output_file = os.path.join(ddir,output_sub,fi)
#-- print date, GRACE month and calendar month to date file
fid1.write('{0:11.6f} {1:03d} {2:02.0f}\n'.format(tdec,grace_month,MM))
#-- print output file to index
print(full_output_file.replace(os.path.expanduser('~'),'~'), file=fid2)
#-- close the date and index files
fid1.close()
fid2.close()
#-- set the permissions level of the output date and index files to MODE
os.chmod(os.path.join(ddir,output_sub,output_date_file), MODE)
os.chmod(os.path.join(ddir,output_sub,output_index_file), MODE)
#-- PURPOSE: read ECCO2 depth file
#-- ftp://mit.ecco-group.org/ecco_for_las/grid_fields/
def ncdf_depth(FILENAME, indices=Ellipsis):
with netCDF4.Dataset(FILENAME,'r') as fileID:
depth = np.array(fileID.variables['depth'][indices,:])
fill_value = fileID.variables['depth']._FillValue
depth[depth == fill_value] = 0.0
return depth
#-- PURPOSE: read geoid height netCDF4 files from read_gfz_geoid_grids.py
def ncdf_geoid(FILENAME, indices=Ellipsis):
with netCDF4.Dataset(FILENAME,'r') as fileID:
geoid_undulation = np.array(fileID.variables['geoid'][indices,:])
gridstep = [float(s) for s in fileID.gridstep.split(',')]
return (geoid_undulation,np.squeeze(gridstep))
#-- PURPOSE: read load love numbers for the range of spherical harmonic degrees
def load_love_numbers(LMAX, LOVE_NUMBERS=0, REFERENCE='CF'):
"""
Reads PREM load Love numbers for the range of spherical harmonic degrees
and applies isomorphic parameters
Arguments
---------
LMAX: maximum spherical harmonic degree
Keyword arguments
-----------------
LOVE_NUMBERS: Load Love numbers dataset
0: Han and Wahr (1995) values from PREM
1: Gegout (2005) values from PREM
2: Wang et al. (2012) values from PREM
REFERENCE: Reference frame for calculating degree 1 love numbers
CF: Center of Surface Figure (default)
CM: Center of Mass of Earth System
CE: Center of Mass of Solid Earth
Returns
-------
kl: Love number of Gravitational Potential
hl: Love number of Vertical Displacement
ll: Love number of Horizontal Displacement
"""
#-- load love numbers file
if (LOVE_NUMBERS == 0):
#-- PREM outputs from Han and Wahr (1995)
#-- https://doi.org/10.1111/j.1365-246X.1995.tb01819.x
love_numbers_file = utilities.get_data_path(
['data','love_numbers'])
header = 2
columns = ['l','hl','kl','ll']
elif (LOVE_NUMBERS == 1):
#-- PREM outputs from Gegout (2005)
#-- http://gemini.gsfc.nasa.gov/aplo/
love_numbers_file = utilities.get_data_path(
['data','Load_Love2_CE.dat'])
header = 3
columns = ['l','hl','ll','kl']
elif (LOVE_NUMBERS == 2):
#-- PREM outputs from Wang et al. (2012)
#-- https://doi.org/10.1016/j.cageo.2012.06.022
love_numbers_file = utilities.get_data_path(
['data','PREM-LLNs-truncated.dat'])
header = 1
columns = ['l','hl','ll','kl','nl','nk']
#-- LMAX of load love numbers from Han and Wahr (1995) is 696.
#-- from Wahr (2007) linearly interpolating kl works
#-- however, as we are linearly extrapolating out, do not make
#-- LMAX too much larger than 696
#-- read arrays of kl, hl, and ll Love Numbers
hl,kl,ll = read_love_numbers(love_numbers_file, LMAX=LMAX, HEADER=header,
COLUMNS=columns, REFERENCE=REFERENCE, FORMAT='tuple')
#-- return a tuple of load love numbers
return (hl,kl,ll)
#-- Main program that calls ecco_monthly_harmonics()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Reads monthly ECCO ocean bottom pressure
anomalies and converts to spherical harmonic coefficients
""",
fromfile_prefix_chars="@"
)
parser.convert_arg_line_to_args = utilities.convert_arg_line_to_args
#-- command line parameters
parser.add_argument('model',
metavar='MODEL', type=str, nargs='+',
default=['kf080i','dr080i'],
choices=['kf080i','dr080i','Cube92','V4r3','V4r4'],
help='ECCO Model')
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- years to run
now = gravity_toolkit.time.datetime.datetime.now()
parser.add_argument('--year','-Y',
type=int, nargs='+', default=range(2000,now.year+1),
help='Years of model outputs to run')
#-- maximum spherical harmonic degree and order
parser.add_argument('--lmax','-l',
type=int, default=60,
help='Maximum spherical harmonic degree')
parser.add_argument('--mmax','-m',
type=int, default=None,
help='Maximum spherical harmonic order')
#-- different treatments of the load Love numbers
#-- 0: Han and Wahr (1995) values from PREM
#-- 1: Gegout (2005) values from PREM
#-- 2: Wang et al. (2012) values from PREM
parser.add_argument('--love','-n',
type=int, default=0, choices=[0,1,2],
help='Treatment of the Load Love numbers')
#-- option for setting reference frame for gravitational load love number
#-- reference frame options (CF, CM, CE)
parser.add_argument('--reference',
type=str.upper, default='CF', choices=['CF','CM','CE'],
help='Reference frame for load Love numbers')
#-- input and output data format (ascii, netCDF4, HDF5)
parser.add_argument('--format','-F',
type=str, default='netCDF4', choices=['ascii','netCDF4','HDF5'],
help='Input and output data format')
#-- print information about each input and output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local directories and files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files')
args,_ = parser.parse_known_args()
#-- for each ECCO model
for MODEL in args.model:
#-- run program
ecco_monthly_harmonics(args.directory, MODEL, args.year,
LMAX=args.lmax, MMAX=args.mmax, LOVE_NUMBERS=args.love,
REFERENCE=args.reference, DATAFORM=args.format,
VERBOSE=args.verbose, MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
|
<reponame>hpcaitech/ColossalAI-Examples
import math
import torch
from colossalai import nn as col_nn
from colossalai.registry import LAYERS, MODELS
from torch import dtype, nn
__all__ = [
'mixer_s32',
'mixer_s16',
'mixer_b32',
'mixer_b16',
'mixer_l32',
'mixer_l16',
'mixer_h14',
]
_init_rules = dict(
torch=dict(
transformer=dict(
weight_initializer=col_nn.init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer=col_nn.init.xavier_uniform_(a=1, scale=1),
),
),
jax=dict(
transformer=dict(
weight_initializer=col_nn.init.xavier_uniform_(),
bias_initializer=col_nn.init.normal_(std=1e-6),
),
),
)
@LAYERS.register_module
class MlpBlock(nn.Module):
def __init__(self,
hidden_dim: int,
mlp_dim: int,
dropout: float=0.,
dtype: dtype = None,
bias: bool = True,
init_method: str = 'torch'):
super(MlpBlock, self).__init__()
self.Linear = col_nn.Linear(hidden_dim,
mlp_dim,
dtype=dtype,
bias=bias,
**_init_rules[init_method]['transformer'])
self.dropout = col_nn.Dropout(dropout)
self.GELU = nn.GELU()
self.Linear1 = col_nn.Linear(mlp_dim,
hidden_dim,
dtype=dtype,
bias=bias,
**_init_rules[init_method]['transformer'])
def forward(self, x):
x = self.Linear(x)
x = self.GELU(x)
x = self.Linear1(x)
return x
@LAYERS.register_module
class MixerBlock(nn.Module):
def __init__(self,
num_tokens: int,
hidden_dim: int,
tokens_mlp_dim: int,
channels_mlp_dim: int,
layernorm_epsilon: float = 1e-6,
dtype: dtype = None):
super(MixerBlock, self).__init__()
self.ln_token = col_nn.LayerNorm(normalized_shape=hidden_dim, eps=layernorm_epsilon, dtype=dtype)
self.token_mix = MlpBlock(num_tokens, tokens_mlp_dim)
self.ln_channel = col_nn.LayerNorm(normalized_shape=hidden_dim, eps=layernorm_epsilon, dtype=dtype)
self.channel_mix = MlpBlock(hidden_dim, channels_mlp_dim)
def forward(self, x):
out = self.ln_token(x).transpose(1, 2)
x = x + self.token_mix(out).transpose(1, 2)
out = self.ln_channel(x)
x = x + self.channel_mix(out)
return x
@LAYERS.register_module
class MlpMixer(nn.Module):
def __init__(self,
num_classes: int,
num_blocks: int,
patch_size: int,
hidden_dim: int,
tokens_mlp_dim: int,
channels_mlp_dim: int,
image_size=224,
layernorm_epsilon: float = 1e-6,
dtype: dtype = None,
bias: bool = True,
init_method: str = 'torch'):
super(MlpMixer, self).__init__()
num_tokens = (image_size // patch_size)**2
self.patch_emb = nn.Conv2d(3, hidden_dim, kernel_size=patch_size, stride=patch_size, bias=False)
self.mlp = nn.Sequential(*[MixerBlock(num_tokens, hidden_dim, tokens_mlp_dim, channels_mlp_dim,layernorm_epsilon,dtype) for _ in range(num_blocks)])
self.ln = col_nn.LayerNorm(normalized_shape=hidden_dim, eps=layernorm_epsilon, dtype=dtype)
self.fc =col_nn.Linear(hidden_dim,
num_classes,
dtype=dtype,
bias=bias,
**_init_rules[init_method]['transformer'])
def forward(self, x):
x = self.patch_emb(x)
x = x.flatten(2).transpose(1, 2)
x = self.mlp(x)
x = self.ln(x)
x = x.mean(dim=1)
x = self.fc(x)
return x
@MODELS.register_module
def mixer_s32(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 8, patch_size, 512, 256, 2048, image_size, **kwargs)
@MODELS.register_module
def mixer_s16(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 8, patch_size, 16, 512, 256, 2048, **kwargs)
@MODELS.register_module
def mixer_b32(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 12, patch_size, 32, 768, 384, 3072, **kwargs)
@MODELS.register_module
def mixer_b16(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 12, patch_size, 16, 768, 384, 3072, **kwargs)
@MODELS.register_module
def mixer_l32(num_classes=1000,image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 24, patch_size, 32, 1024, 512, 4096, **kwargs)
@MODELS.register_module
def mixer_l16(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 24, patch_size, 16, 1024, 512, 4096, **kwargs)
@MODELS.register_module
def mixer_h14(num_classes=1000, image_size=224, patch_size=32,**kwargs):
return MlpMixer(num_classes, 32, patch_size, 14, 1280, 640, 5120, **kwargs)
|
<reponame>DarthReca/AML-Project
import argparse
from typing import Tuple
import torch
from optimizer_helper import get_optim_and_scheduler
from torch import nn
from torch.utils.data import DataLoader
from resnet import ResNet, Classifier
import os
#### Implement Step1
def _do_epoch(
args: argparse.Namespace,
feature_extractor: ResNet,
rot_cls: Classifier,
obj_cls: Classifier,
source_loader: DataLoader,
optimizer: torch.optim.Optimizer,
device: torch.device,
) -> Tuple[float, float, float, float]:
"""
Ideally we do an epoch and return all useful losses and accuracies.
Parameters
----------
args : argparse.Namespace
Namespace with various args.
feature_extractor : ResNet
Feature extractor for images.
rot_cls : Classifier
Rotation classifier (?).
obj_cls : Classifier
Object classifier (?).
source_loader : DataLoader
DataLoader of the source domain.
optimizer : torch.optim.Optimizer
Our gradient optimizer.
device : torch.device
Where to put tensors.
Returns
-------
class_loss: float
Loss for object recognition
class_acc: float
Accuracy for object recognition
rot_loss: float
Loss for rotation recognition
rot_acc: float
Accuracy for rotation recognition
"""
criterion = nn.CrossEntropyLoss()
feature_extractor.train()
obj_cls.train()
rot_cls.train()
correct_classes = 0
correct_rotations = 0
for (data, class_label, rotated_data, rotated_label) in source_loader:
data, class_label, rotated_data, rotated_label = (
data.to(device),
class_label.to(device),
rotated_data.to(device),
rotated_label.to(device),
)
optimizer.zero_grad()
# Extract features
original_features = feature_extractor(data)
rotated_features = feature_extractor(rotated_data)
# Pass features to classifiers
class_scores = obj_cls(original_features)
# Here we have to concatenate tensors as suggested by the architecture
rotation_scores = rot_cls(torch.cat([original_features, rotated_features], 1))
# Now we can check the losses
class_loss = criterion(class_scores, class_label)
rot_loss = criterion(rotation_scores, rotated_label)
loss = class_loss + args.weight_RotTask_step1 * rot_loss
loss.backward()
optimizer.step()
# Find which is the index that corresponds to the highest "probability"
class_prediction = torch.argmax(class_scores, dim=1)
rotation_prediction = torch.argmax(rotation_scores, dim=1)
# Update counters
correct_classes += torch.sum(class_prediction == class_label).item()
correct_rotations += torch.sum(rotation_prediction == rotated_label).item()
acc_cls = correct_classes / len(source_loader.dataset)
acc_rot = correct_rotations / len(source_loader.dataset)
return class_loss, acc_cls, rot_loss, acc_rot
def step1(
args: argparse.Namespace,
feature_extractor: ResNet,
rot_cls: Classifier,
obj_cls: Classifier,
source_loader: DataLoader,
device: torch.device,
) -> None:
optimizer, scheduler = get_optim_and_scheduler(
feature_extractor,
rot_cls,
obj_cls,
args.epochs_step1,
args.learning_rate,
args.train_all,
)
for epoch in range(args.epochs_step1):
print("Epoch: ", epoch)
class_loss, acc_cls, rot_loss, acc_rot = _do_epoch(
args, feature_extractor, rot_cls, obj_cls, source_loader, optimizer, device
)
print(
"Class Loss %.4f, Class Accuracy %.4f,Rot Loss %.4f, Rot Accuracy %.4f"
% (class_loss, acc_cls, rot_loss, acc_rot)
)
if epoch % 10 == 0:
if not os.path.isdir("weights"):
os.mkdir("weights")
torch.save(
feature_extractor.state_dict(), f"weights/feature_extractor_{epoch}.pth"
)
torch.save(obj_cls.state_dict(), f"weights/object_classifier_{epoch}.pth")
torch.save(rot_cls.state_dict(), f"weights/rotation_classifier_{epoch}.pth")
scheduler.step()
|
<filename>data/external/repositories_2to3/196684/Kaggle_xBle-master/model_library.py
import pandas as pd
import numpy as np
import pickle as pickle
import os
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVR, SVC
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge
from sklearn import neighbors
import xgboost as xgb
from hyperopt import fmin, tpe, hp, Trials, STATUS_OK, STATUS_FAIL
from hyperopt.mongoexp import MongoTrials
from utils import *
from param import config
import time
import sys
import multiprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD, Adadelta, Adagrad
global trials_counter
def keras_model():
model = Sequential()
model.add(Dense(33, 20, init='uniform', activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(20, 10, init='uniform', activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(10, 1, init='uniform', activation='linear'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
#model.fit(X_train, y_train, nb_epoch=20, batch_size=16)
#score = model.evaluate(X_test, y_test, batch_size=16)
return model
from nolearn.lasagne import NeuralNet
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import DropoutLayer
from lasagne.updates import adagrad, nesterov_momentum
from lasagne.nonlinearities import softmax, sigmoid
from lasagne.objectives import categorical_crossentropy, binary_crossentropy
def lasagne_model(num_features, num_classes):
layers = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('dense2', DenseLayer),
('dropout2', DropoutLayer),
('output', DenseLayer)]
model = NeuralNet(layers=layers,
input_shape=(None, num_features),
#objective_loss_function=binary_crossentropy,
dense0_num_units=1024,
dropout0_p=0.4, #0.1,
dense1_num_units=512,
dropout1_p=0.4, #0.1,
dense2_num_units=256,
dropout2_p=0.4, #0.1,
output_num_units=num_classes,
output_nonlinearity=sigmoid,
regression=True,
update=nesterov_momentum, #adagrad,
update_momentum=0.9,
update_learning_rate=0.004,
eval_size=0.2,
verbose=0,
max_epochs=30) #15)
return model
def train_model(path, x_train, y_train, x_test, y_test, feat, param_best_dic):
model_list = []
for model in config.model_list:
if "%s_%s"%(feat, model) in param_best_dic:
model_list.append(model)
######
# approach different model
# Deep Learning Model
if model_list.count('dnn') > 0:
model_type = 'dnn'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = deep_model()
model.fit(x_train, y_train, nb_epoch=2, batch_size=16)
pred_val = model.predict( x_test, batch_size=16 )
pred_val = pred_val.reshape( pred_val.shape[0] )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# Nearest Neighbors
if model_list.count('knn') > 0:
model_type = 'knn'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
n_neighbors = model_param['n_neighbors']
weights = model_param['weights']
model = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights)
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# linear regression
if model_list.count('linear') > 0:
model_type = 'linear'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = LinearRegression()
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# logistic regression
if model_list.count('logistic') > 0:
model_type = 'logistic'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = LogisticRegression()
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# SVM regression
if model_list.count('svr') > 0:
model_type = 'svr'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = SVR(C=model_param['C'], epsilon=model_param['epsilon'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# random forest regression
if model_list.count('rf') > 0:
model_type = 'rf'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = RandomForestRegressor(n_estimators=model_param['n_estimators'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print('Done!')
# extra tree regression
if model_list.count('extratree') > 0:
model_type = 'extratree'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = ExtraTreesRegressor(n_estimators=model_param['n_estimators'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# GBRT regression
if model_list.count('gbf') > 0:
model_type = 'gbf'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
model = GradientBoostingRegressor(n_estimators=model_param['n_estimators'])
if type(x_train) != np.ndarray:
model.fit( x_train.toarray(), y_train )
pred_val = model.predict( x_test.toarray() )
else:
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# xgboost tree
if model_list.count('xgb_tree') > 0:
model_type = 'xgb_tree'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
params = model_param
num_rounds = model_param['num_rounds']
#create a train and validation dmatrices
xgtrain = xgb.DMatrix(x_train, label=y_train)
xgval = xgb.DMatrix(x_test, label=y_test)
#train using early stopping and predict
watchlist = [(xgtrain, "train"),(xgval, "val")]
#model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=100, feval=gini_metric)
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=model_param['early_stopping_rounds'])
pred_val = model.predict( xgval, ntree_limit=model.best_iteration )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# xgboost rank pairwise
if model_list.count('xgb_rank') > 0:
model_type = 'xgb_rank'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
params = model_param
num_rounds = model_param['num_rounds']
#create a train and validation dmatrices
xgtrain = xgb.DMatrix(x_train, label=y_train)
xgval = xgb.DMatrix(x_test, label=y_test)
#train using early stopping and predict
watchlist = [(xgtrain, "train"),(xgval, "val")]
#model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=100, feval=gini_metric)
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=model_param['early_stopping_rounds'])
pred_val = model.predict( xgval, ntree_limit=model.best_iteration )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
# xgboost linear
if model_list.count('xgb_linear') > 0:
model_type = 'xgb_linear'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s training..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
params = model_param
num_rounds = model_param['num_rounds']
#create a train and validation dmatrices
xgtrain = xgb.DMatrix(x_train, label=y_train)
xgval = xgb.DMatrix(x_test, label=y_test)
#train using early stopping and predict
watchlist = [(xgtrain, "train"),(xgval, "val")]
#model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=100, feval=gini_metric)
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=model_param['early_stopping_rounds'])
pred_val = model.predict( xgval )
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
if model_list.count('xgb_art') > 0:
model_type = 'xgb_art'
pred_file = "%s/%s_%s.pred.pkl" %(path, feat, model_type)
if config.update_model.count(model_type) > 0 or os.path.exists(pred_file) is False:
print("%s trainning..." % model_type)
model_param = param_best_dic["%s_%s" %(feat, model_type)]
params = model_param
num_rounds = model_param['num_rounds']
offset = int(model_param['valid_size'] * y_train.shape[0]) + 1 # just for 4000
#if type(x_train) != np.ndarray:
# x_train = x_train.toarray()
# x_test = x_test.toarray()
xgtrain = xgb.DMatrix(x_train[offset:, :], label=y_train[offset:])
xgval = xgb.DMatrix(x_train[:offset, :], label=y_train[:offset])
watchlist = [(xgtrain, "train"), (xgval, "val")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds = model_param['early_stopping_rounds'])
xgtest = xgb.DMatrix(x_test)
pred_val1 = model.predict(xgtest, ntree_limit=model.best_iteration)
# reverse train, and log label
x_train_tmp = x_train[::-1, :]
y_train_tmp = np.log(y_train[::-1])
xgtrain = xgb.DMatrix(x_train_tmp[offset:, :], label=y_train_tmp[offset:])
xgval = xgb.DMatrix(x_train_tmp[:offset, :], label=y_train_tmp[:offset])
watchlist = [(xgtrain, "train"), (xgval, "val")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds = model_param['early_stopping_rounds'])
pred_val2 = model.predict(xgtest, ntree_limit=model.best_iteration)
pred_val = pred_val1*1.5 + pred_val2*8.5
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Done!")
######
def one_model():
# load feat names
#feat_names = config.feat_names
feat_names = ['label']
model_type = "extratree"
model_param = config.param_spaces[model_type]
## load best params for each model (feat, model)
#with open("%s/model_best_params" %config.data_folder) as f:
# param_best_dic = pickle.load(f)
## supply the extra parameter from config.param_spaces
#for feat in config.feat_names:
# for model in config.model_list:
# if param_best_dic.has_key("%s_%s"%(feat, model)):
# param_space = config.param_spaces[model]
# for key in param_space.keys():
# if param_best_dic["%s_%s"%(feat, model)].has_key(key) is False:
# param_best_dic["%s_%s"%(feat, model)][key] = param_space[key]
#print param_best_dic
# load feat, cross validation
for iter in range(config.kiter):
for fold in range(config.kfold):
for feat in feat_names:
print("Gen pred for (iter%d, fold%d, %s) cross validation" %(iter, fold, feat))
with open("%s/iter%d/fold%d/train.%s.feat.pkl" %(config.data_folder, iter, fold, feat), 'rb') as f:
[x_train, y_train] = pickle.load(f)
with open("%s/iter%d/fold%d/valid.%s.feat.pkl" %(config.data_folder, iter, fold, feat), 'rb') as f:
[x_test, y_test] = pickle.load(f)
path = "%s/iter%d/fold%d" %(config.data_folder, iter, fold)
#train_model(path, x_train, y_train, x_val, y_val, feat, param_best_dic)
pred_val = hyperopt_library(model_type, model_param, x_train, y_train, x_test, y_test)
print("ml score is %f" %ml_score(y_test, pred_val))
break
## load feat, train/test
#for feat in feat_names:
# print "Gen pred for (%s) all test data" %(feat)
# with open("%s/all/train.%s.feat.pkl" %(config.data_folder, feat), 'rb') as f:
# [x_train, y_train] = pickle.load(f)
# with open("%s/all/test.%s.feat.pkl" %(config.data_folder, feat), 'rb') as f:
# [x_test, y_test] = pickle.load(f)
# path = "%s/all" %(config.data_folder)
# train_model(path, x_train, y_train, x_test, y_test, feat, param_best_dic)
def hyperopt_wrapper(param, model_type, feat):
global trials_counter
trials_counter += 1
gini_cv_mean, gini_cv_std = hyperopt_obj(param, model_type, feat, trials_counter)
return -gini_cv_mean
def hyperopt_obj(model_param, model_type, feat, trials_counter):
######
gini_cv = np.zeros((config.kiter, config.kfold), dtype=float)
if config.nthread == 1 or model_type.count('xgb') > 0: # single process
if model_type.count('xgb') > 0:
model_param['nthread'] = config.max_core
print(model_param)
for iter in range(config.kiter):
for fold in range(config.kfold):
# load data
path = "%s/iter%d/fold%d" %(config.data_folder, iter, fold)
with open("%s/train.%s.feat.pkl" %(path, feat), 'rb') as f:
[x_train, y_train] = pickle.load(f)
with open("%s/valid.%s.feat.pkl" %(path, feat), 'rb') as f:
[x_test, y_test] = pickle.load(f)
pred_val = hyperopt_library(model_type, model_param, x_train, y_train, x_test, y_test)
# save the pred for cross validation
pred_file = "%s/%s_%s@%d.pred.pkl" %(path, feat, model_type, trials_counter)
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print("Cross Validation %d_%d, score %f" %(iter, fold, ml_score(y_test, pred_val)))
#if model_type == 'logistic':
# y_test = y_test / np.linalg.norm(y_test)
gini_cv[iter, fold] = ml_score(y_test, pred_val)
else: # multiprocess
manager = multiprocessing.Manager()
gini_cv = manager.list()
lock = multiprocessing.Lock()
mp_list = []
for iter in range(config.kiter):
for fold in range(config.kfold):
mp = ModelProcess(lock, iter, fold, feat, model_type, model_param, gini_cv)
mp_list.append(mp)
for mp in mp_list:
mp.start()
for mp in mp_list:
mp.join()
gini_cv_mean = np.mean(gini_cv)
gini_cv_std = np.std(gini_cv)
print("Mean %f, Std %f" % (gini_cv_mean, gini_cv_std))
# save the pred for train/test
# load data
path = "%s/all" %(config.data_folder)
with open("%s/train.%s.feat.pkl" %(path, feat), 'rb') as f:
[x_train, y_train] = pickle.load(f)
f.close()
with open("%s/test.%s.feat.pkl" %(path, feat), 'rb') as f:
[x_test, y_test] = pickle.load(f)
f.close()
if model_type.count('xgb') > 0:
model_param['nthread'] = config.max_core
pred_val = hyperopt_library(model_type, model_param, x_train, y_train, x_test, y_test, "all")
# save the pred for train/test
pred_file = "%s/%s_%s@%d.pred.pkl" %(path, feat, model_type, trials_counter)
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
f.close()
return gini_cv_mean, gini_cv_std
######
## preprocessing the feature data
# 1. standardization
# 2. normalization
# 3. binarization
# 4. encoding categorical feature
# 5. imputation of missing values
##
def preprocess_data(x_train, x_test):
# log(x+1)
x_train = np.array(x_train)
x_test = np.array(x_test)
x_train = np.log(x_train.astype(int)+1)
x_test = np.log(x_test.astype(int)+1)
# standazition
sc = StandardScaler(copy=True, with_mean=True, with_std=True)
sc.fit(x_train)
x_train = sc.transform(x_train)
x_test = sc.transform(x_test)
return x_train, x_test
def hyperopt_library(model_type, model_param, x_train, y_train, x_test, y_test, type="valid"):
try:
# preprocess data for xgb model
if (model_type.count('xgb') > 0 and model_type != 'xgb_rank' and model_type != 'xgb_count') or model_type == 'lasagne':
x_train, x_test = preprocess_data(x_train, x_test)
# training
if model_type == 'keras':
print("%s training..." % model_type)
model = keras_model()
model.fit(x_train, y_train, nb_epoch=2, batch_size=16)
pred_val = model.predict( x_test, batch_size=16 )
pred_val = pred_val.reshape( pred_val.shape[0] )
return pred_val
if model_type == 'lasagne':
print("%s training..." % model_type)
x_train = np.array(x_train).astype(np.float32)
x_test = np.array(x_test).astype(np.float32)
num_features = x_train.shape[1]
num_classes = 1
model = lasagne_model(num_features, num_classes)
model.fit(x_train, y_train)
pred_val = model.predict(x_test)
pred_val = np.array(pred_val).reshape(len(pred_val),)
return pred_val
# Nearest Neighbors, regression
if model_type.count('knn') > 0:
print("%s training..." % model_type)
n_neighbors = model_param['n_neighbors']
weights = model_param['weights']
model = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights)
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# Nearest Neighbors, classifier
if model_type.count('knnC') > 0:
print("%s training..." % model_type)
n_neighbors = model_param['n_neighbors']
weights = model_param['weights']
model = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights)
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# linear regression
if model_type == 'linear':
print("%s training..." % model_type)
model = LinearRegression()
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# logistic regression
if model_type == 'logistic':
print("%s training..." % model_type)
model = LogisticRegression()
#y_train = y_train / np.linalg.norm(y_train)
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# kernal ridge regression
if model_type == 'ridge':
print("%s training..." % model_type)
model = Ridge(alpha=model_param['alpha'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# lasso regression
if model_type == 'lasso':
print("%s training..." % model_type)
model = Ridge(alpha=model_param['alpha'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# SVM regression
if model_type == 'svr':
print("%s training..." % model_type)
model = SVR(kernel=model_param['kernel'], C=model_param['C'], epsilon=model_param['epsilon'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# SVM classification
if model_type == 'svc':
print("%s training..." % model_type)
model = SVC(C=model_param['C'], epsilon=model_param['epsilon'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# random forest regression
if model_type == 'rf':
print("%s training..." % model_type)
model = RandomForestRegressor(n_estimators=model_param['n_estimators'], n_jobs=-1)
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# random forest classification
if model_type == 'rfC':
print("%s training..." % model_type)
model = RandomForestClassifier(n_estimators=model_param['n_estimators'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# extra tree regression
if model_type == 'extratree':
print("%s training..." % model_type)
model = ExtraTreesRegressor(n_estimators=model_param['n_estimators'], max_features=model_param['max_features'], max_depth=model_param['max_depth'], n_jobs=-1, verbose=1, oob_score=True, bootstrap=True)
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# extra tree classification
if model_type == 'extratreeC':
print("%s training..." % model_type)
model = ExtraTreesClassifier(n_estimators=model_param['n_estimators'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# GBRT regression
if model_type == 'gbf':
print("%s training..." % model_type)
model = GradientBoostingRegressor(n_estimators=model_param['n_estimators'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# GBRT classification
if model_type == 'gbfC':
print("%s training..." % model_type)
model = GradientBoostingClassifier(n_estimators=model_param['n_estimators'], subsample=model_param['subsample'], max_depth=model_param['max_depth'])
model.fit( x_train, y_train )
pred_val = model.predict( x_test )
return pred_val
# xgboost
if model_type.count('xgb_binary') > 0 or model_type.count('xgb_log') > 0 or model_type.count('xgb_auc') > 0:
print("%s training..." % model_type)
params = model_param
num_rounds = model_param['num_rounds']
#create a train and validation dmatrices
xgtrain = xgb.DMatrix(x_train, label=y_train)
xgval = xgb.DMatrix(x_test)
return pred_val
#train using early stopping and predict
watchlist = [(xgtrain, "train")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=120)
pred_val = model.predict( xgval )
return pred_val
if model_type == 'xgb_rank' or model_type == 'xgb_count':
print("%s training..." % model_type)
params = model_param
num_rounds = model_param['num_rounds']
#create a train and validation dmatrices
xgtrain = xgb.DMatrix(x_train, label=y_train)
xgval = xgb.DMatrix(x_test)
#train using early stopping and predict
watchlist = [(xgtrain, "train")]
#model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=100, feval=gini_metric)
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=120)
pred_val = model.predict( xgval, ntree_limit=model.best_iteration )
return pred_val
if model_type.count('xgb_linear') > 0:
print("%s training..." % model_type)
params = model_param
num_rounds = model_param['num_rounds']
#create a train and validation dmatrices
xgtrain = xgb.DMatrix(x_train, label=y_train)
if type == "all":
xgval = xgb.DMatrix(x_test)
watchlist = [(xgtrain, "train")]
else:
xgval = xgb.DMatrix(x_test, label=y_test)
watchlist = [(xgtrain, "train"), (xgval, "val")]
#model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=100, feval=gini_metric)
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=model_param['early_stopping_rounds'])
pred_val = model.predict( xgval )
return pred_val
if model_type.count('xgb_multi') > 0:
print("%s training..." % model_type)
params = model_param
num_rounds = model_param['num_rounds']
xgtrain = xgb.DMatrix(x_train, label=(y_train - 1))
xgval = xgb.DMatrix(x_test)
watchlist = [(xgtrain, "train")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=120)
pred_val = model.predict( xgval )
return pred_val
if model_type.count('xgb_tree_auc') or model_type.count('xgb_tree_log') > 0 or model_type.count('xgb_fix') > 0 or model_type.count('xgb_fix_log') > 0:
print("%s training..." % model_type)
params = model_param
num_rounds = model_param['num_rounds']
xgtrain = xgb.DMatrix(x_train, label=y_train)
if type=="all":
xgval = xgb.DMatrix(x_test)
watchlist = [(xgtrain, "train")]
else:
xgval = xgb.DMatrix(x_test, label=y_test)
watchlist = [(xgtrain, "train"), (xgval, "val")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds=model_param['early_stopping_rounds'])
pred_val = model.predict( xgval, ntree_limit=model.best_iteration )
return pred_val
if model_type.count('xgb_art') > 0:
print("%s trainning..." % model_type)
params = model_param
num_rounds = model_param['num_rounds']
#offset = int(model_param['valid_size'] * y_train.shape[0]) + 1
offset = int(model_param['valid_size'] * y_train.shape[0]) + 1
if type(x_train) != np.ndarray:
x_train = x_train.toarray()
x_test = x_test.toarray()
xgtrain = xgb.DMatrix(x_train[offset:, :], label=y_train[offset:])
xgval = xgb.DMatrix(x_train[:offset, :], label=y_train[:offset])
watchlist = [(xgtrain, "train"), (xgval, "val")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds = model_param['early_stopping_rounds'])
xgtest = xgb.DMatrix(x_test)
pred_val1 = model.predict(xgtest, ntree_limit=model.best_iteration)
# reverse train, and log label
x_train_tmp = x_train[::-1, :]
y_train_tmp = np.log(y_train[::-1])
xgtrain = xgb.DMatrix(x_train_tmp[offset:, :], label=y_train_tmp[offset:])
xgval = xgb.DMatrix(x_train_tmp[:offset, :], label=y_train_tmp[:offset])
watchlist = [(xgtrain, "train"), (xgval, "val")]
model = xgb.train(params, xgtrain, num_rounds, watchlist, early_stopping_rounds = model_param['early_stopping_rounds'])
pred_val2 = model.predict(xgtest, ntree_limit=model.best_iteration)
pred_val = pred_val1*1.5 + pred_val2*8.5
return pred_val
except Exception as err:
print(err)
print("Function error.")
pred_val = [0] * len(y_test)
return pred_val
class ModelProcess(multiprocessing.Process):
def __init__(self, lock, iter, fold, feat, model_type, model_param, gini_cv):
multiprocessing.Process.__init__(self)
self.lock = lock
self.iter = iter
self.fold = fold
self.feat = feat
self.model_type = model_type
self.model_param = model_param
self.gini_cv = gini_cv
def run(self):
path = "%s/iter%d/fold%d" %(config.data_folder, self.iter, self.fold)
with open("%s/train.%s.feat.pkl" %(path, self.feat), 'rb') as f:
[x_train, y_train] = pickle.load(f)
f.close()
with open("%s/valid.%s.feat.pkl" %(path, self.feat), 'rb') as f:
[x_test, y_test] = pickle.load(f)
f.close()
pred_val = hyperopt_library(self.model_type, self.model_param, x_train, y_train, x_test, y_test)
# save the pred for cross validation
pred_file = "%s/%s_%s@%d.pred.pkl" %(path, self.feat, self.model_type, trials_counter)
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
f.close()
#if self.model_type == 'logistic':
# y_test = y_test / np.linalg.norm(y_test)
self.gini_cv.append( ml_score(y_test, pred_val) )
def hyperopt_main():
feat_names = config.feat_names
model_list = config.model_list
param_best_dic = {}
#model_file = "%s/model_best_params" %config.data_folder
#if os.path.exists(model_file):
# with open(model_file, 'rb') as f:
# param_best_dic = pickle.load(f)
# f.close()
for feat in feat_names:
for model in model_list:
model_key = "%s_%s"%(feat, model)
if (model_key in param_best_dic) is False:
print("Training model %s_%s ......" %(feat, model))
model_param = config.param_spaces[model]
global trials_counter
trials_counter = 5
trials = Trials()
#trials = MongoTrials('mongo://172.16.13.7/hazard/jobs', exp_key='exp%d'%trials_counter)
obj = lambda p: hyperopt_wrapper(p, model, feat)
tmp_max_evals = config.hyper_max_evals
best_params = fmin(obj, model_param, algo=tpe.suggest, trials=trials, max_evals=tmp_max_evals)
print(best_params)
#param_best_dic[model_key] = best_params
#with open(model_file, 'wb') as f:
# pickle.dump(param_best_dic, f, -1)
#f.close()
##
# use outer model,
# such as C++, R, Java
import subprocess
import pylibfm
from scipy import sparse
def outer_model():
rgf_cv = []
fm_cv = []
feat = 'label'
for iter in range(config.kiter):
for fold in range(config.kfold):
path = '%s/iter%d/fold%d'%(config.data_folder, iter, fold)
# rgf model
cmd = 'perl outer/call_exe.pl ./outer/rgf train_predict %s/train_predict'%path
cmd = './outer/libfm -task r -dim "1,1,8" -iter 100 -method sgd -learn_rate 0.01 -regular "0,0,0.01" -train %s/fm.train -test %s/fm.test -out %s/%s_fm.pred'%(path, path, path, feat)
print(cmd)
subprocess.call(cmd, shell=True)
# fm model
cmd = './outer/libfm -task r -dim "1,1,8" -iter 100 -method sgd -learn_rate 0.01 -regular "0,0,0.01" -train %s/fm.train -test %s/fm.test -out %s/%s_fm.pred'%(path, path, path, feat)
print(cmd)
#cmd = './outer/libfm -task r -dim "1,1,2" -train %s/fm.train -test %s/fm.test -out %s/%s_fm.pred'%(path, path, path, feat)
subprocess.call(cmd, shell=True)
y_pred = np.loadtxt('%s/%s_fm.pred'%(path, feat))
with open('%s/valid.true.pkl'%path, 'rb') as f:
y_true = pickle.load(f)
fm_cv.append(ml_score(y_true, y_pred))
print("AUC is ", ml_score(y_true, y_pred))
with open('%s/%s.fm.pred.pkl'%(path,feat), 'wb') as f:
pickle.dump(y_pred, f, -1)
###############################
#with open("%s/train.%s.feat.pkl" %(path, feat), 'rb') as f:
# [x_train, y_train] = pickle.load(f)
#with open("%s/valid.%s.feat.pkl" %(path, feat), 'rb') as f:
# [x_test, y_test] = pickle.load(f)
#x_train = np.array(x_train).astype(np.double)
#x_test = np.array(x_test).astype(np.double)
#fm = pylibfm.FM()
#fm.fit(sparse.csr_matrix(x_train), y_train)
#y_pred = fm.predict(sparse.csr_matrix(x_test))
#print "AUC is ", ml_score(y_test, y_pred)
#break
print("libFM AUC is ", np.mean(fm_cv))
if __name__ == '__main__':
start_time = time.time()
# write your code here
# apply different model on different feature, generate model library
print("Code start at %s" %time.ctime())
flag = sys.argv[1]
if flag == "train":
## generate pred by best params
one_model()
if flag == "hyperopt":
## hyper parameter search
hyperopt_main()
if flag == "outer":
outer_model()
end_time = time.time()
print("cost time %f" %( (end_time - start_time)/1000 ))
|
<filename>examples/example_CascadedEmbeddings.py
# The testing module requires faiss
# So if you don't have that, then this import will break
from pytorch_metric_learning import losses, miners, samplers, trainers, testers
import pytorch_metric_learning.utils.logging_presets as logging_presets
import numpy as np
from torchvision import datasets, models, transforms
import torch
import logging
from utils_for_examples import MLP, Identity, ListOfModels
logging.getLogger().setLevel(logging.INFO)
import pytorch_metric_learning
logging.info("VERSION %s"%pytorch_metric_learning.__version__)
##############################
########## Training ##########
##############################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# In this example, we'll take multiple trunks and embedders,
# and encapsulate them into a single trunk and embedder model.
# Note that CascadedEmbeddings does not necessarily require a complicated setup like this.
# CascadedEmbeddings just assumes that the output of your embedder
# should be partitioned into different sections, as specified by the init argument
# "embedding_sizes".
trunk1 = models.shufflenet_v2_x0_5(pretrained=True)
trunk2 = models.shufflenet_v2_x1_0(pretrained=True)
trunk3 = models.resnet18(pretrained=True)
all_trunks = [trunk1, trunk2, trunk3]
trunk_output_sizes = []
for T in all_trunks:
trunk_output_sizes.append(T.fc.in_features)
T.fc = Identity()
trunk = ListOfModels(all_trunks)
trunk = torch.nn.DataParallel(trunk.to(device))
# Set the embedders. Each embedder takes a corresponding trunk model output, and outputs 64-dim embeddings.
all_embedders = []
for s in trunk_output_sizes:
all_embedders.append(MLP([s, 64]))
# The output of embedder will be of size 64*3.
embedder = ListOfModels(all_embedders, input_sizes=trunk_output_sizes)
embedder = torch.nn.DataParallel(embedder.to(device))
# Set optimizers
trunk_optimizer = torch.optim.Adam(trunk.parameters(), lr=0.00001, weight_decay=0.00005)
embedder_optimizer = torch.optim.Adam(embedder.parameters(), lr=0.00001, weight_decay=0.00005)
# Set the image transforms
train_transform = transforms.Compose([transforms.Resize(256),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=227),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
val_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(227),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Set the datasets
train_dataset = datasets.CIFAR100(root="CIFAR100_Dataset", train=True, transform=train_transform, download=True)
val_dataset = datasets.CIFAR100(root="CIFAR100_Dataset", train=False, transform=val_transform, download=True)
# Set the loss functions. loss0 will be applied to the first embedder, loss1 to the second embedder etc.
loss0 = losses.TripletMarginLoss(margin=0.01)
loss1 = losses.MultiSimilarityLoss(alpha=0.1, beta=40, base=0.5)
loss2 = losses.ArcFaceLoss(margin=30, num_classes=100, embedding_size=64).to(device)
# Set the mining functions. In this example we'll apply mining to the 2nd and 3rd cascaded outputs.
miner1 = miners.MultiSimilarityMiner(epsilon=0.1)
miner2 = miners.HDCMiner(filter_percentage=0.25)
# Set the dataloader sampler
sampler = samplers.MPerClassSampler(train_dataset.targets, m=4)
# Set other training parameters
batch_size = 32
num_epochs = 2
iterations_per_epoch = 100
# Package the above stuff into dictionaries.
models = {"trunk": trunk, "embedder": embedder}
optimizers = {"trunk_optimizer": trunk_optimizer, "embedder_optimizer": embedder_optimizer}
loss_funcs = {"metric_loss_0": loss0, "metric_loss_1": loss1, "metric_loss_2": loss2}
mining_funcs = {"post_gradient_miner_1": miner1, "post_gradient_miner_2": miner2}
record_keeper, _, _ = logging_presets.get_record_keeper("example_logs", "example_tensorboard")
hooks = logging_presets.get_hook_container(record_keeper)
dataset_dict = {"val": val_dataset}
model_folder = "example_saved_models"
# Create the tester
tester = testers.GlobalEmbeddingSpaceTester(end_of_testing_hook=hooks.end_of_testing_hook)
end_of_epoch_hook = hooks.end_of_epoch_hook(tester, dataset_dict, model_folder)
trainer = trainers.CascadedEmbeddings(models=models,
optimizers=optimizers,
batch_size=batch_size,
loss_funcs=loss_funcs,
mining_funcs=mining_funcs,
iterations_per_epoch=iterations_per_epoch,
dataset=train_dataset,
sampler=sampler,
end_of_iteration_hook=hooks.end_of_iteration_hook,
end_of_epoch_hook=end_of_epoch_hook,
embedding_sizes=[64, 64, 64])
trainer.train(num_epochs=num_epochs)
|
from .. import util
import duo_client.admin
from .base import TestAdmin
class TestAdmins(TestAdmin):
# Uses underlying paging
def test_get_admins(self):
response = self.client_list.get_admins()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_admins_with_limit(self):
response = self.client_list.get_admins(limit='20')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_admins_with_limit_offset(self):
response = self.client_list.get_admins(limit='20', offset='2')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_admins_with_offset(self):
response = self.client_list.get_admins(offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_admins_iterator(self):
response = self.client_list.get_admins_iterator()
response = next(response)
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_external_password_mgmt_statuses(self):
response = self.client_list.get_external_password_mgmt_statuses()
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins/password_mgmt')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_external_password_mgmt_statuses_with_limit(self):
response = self.client_list.get_external_password_mgmt_statuses(
limit='20')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins/password_mgmt')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['0'],
})
def test_get_external_password_mgmt_statusesg_with_limit_offset(self):
response = self.client_list.get_external_password_mgmt_statuses(
limit='20', offset='2')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins/password_mgmt')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['20'],
'offset': ['2'],
})
def test_get_external_password_mgmt_statuses_with_offset(self):
response = self.client_list.get_external_password_mgmt_statuses(
offset=9001)
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins/password_mgmt')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_get_external_password_mgmt_status_for_admin(self):
response = self.client_list.get_external_password_mgmt_status_for_admin(
'DFAKEADMINID')
response = response[0]
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/admins/DFAKEADMINID/password_mgmt')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
})
def test_update_admin_password_mgmt_status(self):
response = self.client_list.update_admin_password_mgmt_status(
'DFAKEADMINID', has_external_password_mgmt='False')
response = response[0]
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/admin/v1/admins/DFAKEADMINID/password_mgmt')
self.assertEqual(
util.params_to_dict(response['body']),
{
'account_id': [self.client.account_id],
'has_external_password_mgmt': ['False']
})
def test_update_admin_password_mgmt_status_set_password(self):
response = self.client_list.update_admin_password_mgmt_status(
'DFAKEADMINID', has_external_password_mgmt='True', password='<PASSWORD>')
response = response[0]
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/admin/v1/admins/DFAKEADMINID/password_mgmt')
self.assertEqual(
util.params_to_dict(response['body']),
{
'account_id': [self.client.account_id],
'has_external_password_mgmt': ['True'],
'password': ['<PASSWORD>']
})
|
<gh_stars>0
import sys
import tempfile
import hashlib
import os
from shutil import rmtree, copy2
from bioconda_utils.recipe import Recipe as bioconda_utils_Recipe
from birg.filesystem import Filesystem
from birg.buildscript import BuildScript
from birg.recipe import Recipe
from birg.utils import (
copytree,
calculate_md5_checksum,
download_and_unpack_source,
)
def create_recipe(bioconda_recipe_path, recipe_path, strategy):
# Load meta.yaml file and instantiate Recipe object
temp_folder_name = hashlib.md5(recipe_path.encode("utf-8")).hexdigest()
recipes_pkg_path = os.path.join(bioconda_recipe_path, "recipes", temp_folder_name)
try:
os.mkdir(recipes_pkg_path)
copytree(recipe_path, recipes_pkg_path)
bioconda_recipe = bioconda_utils_Recipe.from_file(
bioconda_recipe_path, recipes_pkg_path
)
finally:
rmtree(recipes_pkg_path)
name = bioconda_recipe.get("package/name")
version = bioconda_recipe.get("package/version")
recipe = Recipe(name, version, recipe_path, strategy)
# Parse values from file to Recipe object
try:
recipe.add_source_url(bioconda_recipe.get("source/url"))
except KeyError:
sys.exit(
"No source url was found in the given meta.yaml file, please add a source url"
)
recipe.add_build_number(bioconda_recipe.get("build/number", "0"))
try:
recipe.add_checksum_sha256(bioconda_recipe.get("source/sha256"))
except KeyError:
recipe.add_checksum_md5(
bioconda_recipe.get(
"source/md5", calculate_md5_checksum(bioconda_recipe.get("source/url"))
)
)
build_requirements = bioconda_recipe.get("requirements/build", [])
for requirement in build_requirements:
recipe.add_requirement(requirement, "build")
host_requirements = bioconda_recipe.get("requirements/host", [])
for requirement in host_requirements:
recipe.add_requirement(requirement, "host", host_only=True)
run_requirements = bioconda_recipe.get("requirements/run", [])
for requirement in run_requirements:
recipe.add_requirement(requirement, "run")
try:
recipe.add_test_commands(bioconda_recipe.get("test/commands"))
except KeyError:
pass
try:
recipe.add_test_files_with_list(bioconda_recipe.get("test/files"))
except KeyError:
pass
try:
recipe.add_patches_with_list(bioconda_recipe.get("source/patches"), recipe_path)
except KeyError:
pass
# Conda will not accept the compiler dependency given by bioconda
try:
build_requirements = recipe.recipe_dict["requirements"]["build"]
if "compiler_c" in build_requirements:
recipe.recipe_dict["requirements"]["build"].remove("compiler_c")
recipe.recipe_dict["requirements"]["build"].append("{{compiler('c')}}")
if "compiler_cxx" in build_requirements:
recipe.recipe_dict["requirements"]["build"].remove("compiler_cxx")
recipe.recipe_dict["requirements"]["build"].append("{{compiler('cxx')}}")
except KeyError:
if strategy == "cmake":
recipe.add_requirement("{{ compiler('c') }}", "build")
recipe.add_requirement("cmake", "build")
recipe.add_requirement("make", "build")
elif strategy == "autoconf":
recipe.add_requirement("make", "build")
recipe.add_requirement("autoconf", "build")
recipe.add_requirement("automake", "build")
recipe.add_requirement("{{ compiler('c') }}", "build")
if strategy.startswith("python"):
try:
host_environment = recipe.recipe_dict["requirements"]["host"]
if not any(map(lambda req: req.startswith("python"), host_environment)):
if strategy == "python2":
recipe.add_requirement("python =2.7", "host")
else:
recipe.add_requirement("python >=3", "host")
except KeyError:
if strategy == "python2":
recipe.add_requirement("python =2.7", "host")
else:
recipe.add_requirement("python >=3", "host")
try:
recipe.script = bioconda_recipe.get("build/script")
except KeyError:
pass
try:
recipe.add_command_imports(bioconda_recipe.get("test/imports"))
except KeyError:
pass
try:
recipe.add_entry_point(bioconda_recipe.get("build/entry_points"))
except KeyError:
pass
try:
recipe.add_noarch(bioconda_recipe.get("build/noarch"))
except KeyError:
pass
try:
recipe.add_test_requires(bioconda_recipe.get("test/requires"))
except KeyError:
pass
recipe.increment_build_number()
return recipe
def create_build_script(recipe, args, filesystem):
if recipe.script is None:
build_script = BuildScript(recipe.name, args.recipe_path, args.strategy, filesystem)
exact_buildscript_path = os.path.join(recipe.path, "build.sh")
with open(exact_buildscript_path, "r") as fp:
build_script._lines = fp.readlines()
else:
build_script = BuildScript(recipe.name, args.recipe_path, args.strategy, filesystem, recipe.script)
return build_script
def preprocess(args, bioconda_recipe_path):
recipe = create_recipe(bioconda_recipe_path, args.recipe_path, args.strategy)
with tempfile.TemporaryDirectory() as tmpdir:
download_and_unpack_source(recipe.url, tmpdir)
source_path = os.path.join(tmpdir, "source")
source_code_path = os.path.join(source_path, os.listdir(source_path)[0])
filesystem = Filesystem(source_code_path)
build_script = create_build_script(recipe, args, filesystem)
return ([recipe], [build_script])
|
<gh_stars>10-100
"""
Set of functions used to parse and transform email headers.
"""
from __future__ import unicode_literals
import datetime
import email
import chardet
import six
from django.utils import timezone
from django.utils.html import escape
from django.utils.formats import date_format
from modoboa.lib.email_utils import EmailAddress
from modoboa.lib.signals import get_request
__all__ = [
'parse_from', 'parse_to', 'parse_message_id', 'parse_date',
'parse_reply_to', 'parse_cc', 'parse_subject'
]
# date and time formats for email list
# according to https://en.wikipedia.org/wiki/Date_format_by_country
# and https://en.wikipedia.org/wiki/Date_and_time_representation_by_country
DATETIME_FORMATS = {
"cs": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"de": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"en": {'SHORT': 'l, P', 'LONG': 'N j, Y P'},
"es": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"fr": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"it": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"ja_JP": {'SHORT': 'l, P', 'LONG': 'N j, Y P'},
"nl": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"pl_PL": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"pt_PT": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"pt_BR": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"ru": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
"sv": {'SHORT': 'l, H:i', 'LONG': 'd. N Y H:i'},
}
def to_unicode(value):
"""Try to convert a string to unicode."""
condition = (
value is None or isinstance(value, six.text_type)
)
if condition:
return value
try:
value = value.decode("utf-8")
except UnicodeDecodeError:
pass
else:
return value
try:
res = chardet.detect(value)
except UnicodeDecodeError:
return value
if res["encoding"] == "ascii":
return value
return value.decode(res["encoding"])
def parse_address(value, **kwargs):
"""Parse an email address."""
addr = EmailAddress(value)
if kwargs.get("raw"):
return to_unicode(addr.fulladdress)
if addr.name:
return u"<span title={}>{}</span>".format(
to_unicode(addr.address), escape(to_unicode(addr.name)))
return u"<span>{}</span>".format(to_unicode(addr.address))
def parse_address_list(values, **kwargs):
"""Parse a list of email addresses."""
lst = values.split(",")
result = []
for addr in lst:
result.append(parse_address(addr, **kwargs))
return result
def parse_from(value, **kwargs):
"""Parse a From: header."""
return [parse_address(value, **kwargs)]
def parse_to(value, **kwargs):
"""Parse a To: header."""
return parse_address_list(value, **kwargs)
def parse_cc(value, **kwargs):
"""Parse a Cc: header."""
return parse_address_list(value, **kwargs)
def parse_reply_to(value, **kwargs):
"""Parse a Reply-To: header.
"""
return parse_address_list(value, **kwargs)
def parse_date(value, **kwargs):
"""Parse a Date: header."""
tmp = email.utils.parsedate_tz(value)
if not tmp:
return value
ndate = datetime.datetime.fromtimestamp(email.utils.mktime_tz(tmp))
if ndate.tzinfo is not None:
tz = timezone.get_current_timezone()
ndate = tz.localize(datetime.datetime.fromtimestamp(ndate))
current_language = get_request().user.language
if datetime.datetime.now() - ndate > datetime.timedelta(7):
fmt = "LONG"
else:
fmt = "SHORT"
return date_format(
ndate,
DATETIME_FORMATS.get(current_language, DATETIME_FORMATS.get("en"))[fmt]
)
def parse_message_id(value, **kwargs):
"""Parse a Message-ID: header."""
return value.strip('\n')
def parse_subject(value, **kwargs):
"""Parse a Subject: header."""
from modoboa.lib import u2u_decode
try:
subject = u2u_decode.u2u_decode(value)
except UnicodeDecodeError:
subject = value
return to_unicode(subject)
|
<filename>ds_project/yolov2/losses.py
import math
import torch
import torch.nn as nn
from utils import bbox_ious
'''
Code was taken from https://github.com/uvipen/Yolo-v2-pytorch
'''
class YoloLoss(nn.modules.loss._Loss):
def __init__(self, num_classes, anchors, device, reduction=32, coord_scale=1.0, noobject_scale=1.0,
object_scale=5.0, class_scale=1.0, thresh=0.6):
super(YoloLoss, self).__init__()
self.num_classes = num_classes
self.num_anchors = len(anchors)
self.anchor_step = len(anchors[0])
self.anchors = torch.Tensor(anchors)
self.reduction = reduction
self.coord_scale = coord_scale
self.noobject_scale = noobject_scale
self.object_scale = object_scale
self.class_scale = class_scale
self.thresh = thresh
self.device = device
def forward(self, output, target):
batch_size = output.data.size(0)
height = output.data.size(2)
width = output.data.size(3)
# Get x,y,w,h,conf,cls
output = output.view(batch_size, self.num_anchors, -1, height * width)
coord = torch.zeros_like(output[:, :, :4, :])
coord[:, :, :2, :] = output[:, :, :2, :].sigmoid()
coord[:, :, 2:4, :] = output[:, :, 2:4, :]
conf = output[:, :, 4, :].sigmoid()
cls = output[:, :, 5:, :].contiguous().view(batch_size * self.num_anchors, self.num_classes,
height * width).transpose(1, 2).contiguous().view(-1,
self.num_classes)
# Create prediction boxes
pred_boxes = torch.FloatTensor(batch_size * self.num_anchors * height * width, 4)
lin_x = torch.range(0, width - 1).repeat(height, 1).view(height * width)
lin_y = torch.range(0, height - 1).repeat(width, 1).t().contiguous().view(height * width)
anchor_w = self.anchors[:, 0].contiguous().view(self.num_anchors, 1)
anchor_h = self.anchors[:, 1].contiguous().view(self.num_anchors, 1)
pred_boxes = pred_boxes.to(self.device)
lin_x = lin_x.to(self.device)
lin_y = lin_y.to(self.device)
anchor_w = anchor_w.to(self.device)
anchor_h = anchor_h.to(self.device)
pred_boxes[:, 0] = (coord[:, :, 0].detach() + lin_x).view(-1)
pred_boxes[:, 1] = (coord[:, :, 1].detach() + lin_y).view(-1)
pred_boxes[:, 2] = (coord[:, :, 2].detach().exp() * anchor_w).view(-1)
pred_boxes[:, 3] = (coord[:, :, 3].detach().exp() * anchor_h).view(-1)
pred_boxes = pred_boxes.cpu()
# Get target values
coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls = self.build_targets(pred_boxes, target, height, width)
coord_mask = coord_mask.expand_as(tcoord)
tcls = tcls[cls_mask].view(-1).long()
cls_mask = cls_mask.view(-1, 1).repeat(1, self.num_classes)
tcoord = tcoord.to(self.device)
tconf = tconf.to(self.device)
coord_mask = coord_mask.to(self.device)
conf_mask = conf_mask.to(self.device)
tcls = tcls.to(self.device)
cls_mask = cls_mask.to(self.device)
conf_mask = conf_mask.sqrt()
cls = cls[cls_mask].view(-1, self.num_classes)
# Compute losses
mse = nn.MSELoss(size_average=False)
ce = nn.CrossEntropyLoss(size_average=False)
self.loss_coord = self.coord_scale * mse(coord * coord_mask, tcoord * coord_mask) / batch_size
self.loss_conf = mse(conf * conf_mask, tconf * conf_mask) / batch_size
self.loss_cls = self.class_scale * 2 * ce(cls, tcls) / batch_size
self.loss_tot = self.loss_coord + self.loss_conf + self.loss_cls
return self.loss_tot, self.loss_coord, self.loss_conf, self.loss_cls
def build_targets(self, pred_boxes, ground_truth, height, width):
batch_size = len(ground_truth)
conf_mask = torch.ones(batch_size, self.num_anchors, height * width, requires_grad=False) * self.noobject_scale
coord_mask = torch.zeros(batch_size, self.num_anchors, 1, height * width, requires_grad=False)
cls_mask = torch.zeros(batch_size, self.num_anchors, height * width, requires_grad=False).byte()
tcoord = torch.zeros(batch_size, self.num_anchors, 4, height * width, requires_grad=False)
tconf = torch.zeros(batch_size, self.num_anchors, height * width, requires_grad=False)
tcls = torch.zeros(batch_size, self.num_anchors, height * width, requires_grad=False)
for b in range(batch_size):
if len(ground_truth[b]) == 0:
continue
# Build up tensors
cur_pred_boxes = pred_boxes[
b * (self.num_anchors * height * width):(b + 1) * (self.num_anchors * height * width)]
if self.anchor_step == 4:
anchors = self.anchors.clone()
anchors[:, :2] = 0
else:
anchors = torch.cat([torch.zeros_like(self.anchors), self.anchors], 1)
gt = torch.zeros(len(ground_truth[b]), 4)
for i, anno in enumerate(ground_truth[b]):
gt[i, 0] = (anno[0] + anno[2] / 2) / self.reduction
gt[i, 1] = (anno[1] + anno[3] / 2) / self.reduction
gt[i, 2] = anno[2] / self.reduction
gt[i, 3] = anno[3] / self.reduction
# Set confidence mask of matching detections to 0
iou_gt_pred = bbox_ious(gt, cur_pred_boxes)
mask = (iou_gt_pred > self.thresh).sum(0) >= 1
conf_mask[b][mask.view_as(conf_mask[b])] = 0
# Find best anchor for each ground truth
gt_wh = gt.clone()
gt_wh[:, :2] = 0
iou_gt_anchors = bbox_ious(gt_wh, anchors)
_, best_anchors = iou_gt_anchors.max(1)
# Set masks and target values for each ground truth
for i, anno in enumerate(ground_truth[b]):
gi = min(width - 1, max(0, int(gt[i, 0])))
gj = min(height - 1, max(0, int(gt[i, 1])))
best_n = best_anchors[i]
iou = iou_gt_pred[i][best_n * height * width + gj * width + gi]
coord_mask[b][best_n][0][gj * width + gi] = 1
cls_mask[b][best_n][gj * width + gi] = 1
conf_mask[b][best_n][gj * width + gi] = self.object_scale
tcoord[b][best_n][0][gj * width + gi] = gt[i, 0] - gi
tcoord[b][best_n][1][gj * width + gi] = gt[i, 1] - gj
tcoord[b][best_n][2][gj * width + gi] = math.log(max(gt[i, 2], 1.0) / self.anchors[best_n, 0])
tcoord[b][best_n][3][gj * width + gi] = math.log(max(gt[i, 3], 1.0) / self.anchors[best_n, 1])
tconf[b][best_n][gj * width + gi] = iou
tcls[b][best_n][gj * width + gi] = int(anno[4])
return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls |
#!/bin/python3
from binance.client import Client
from datetime import datetime, timezone
import os
# load in settings,
# load in pot
# get 4hour data from binance
# See if any buy/sells need to be placed
# execute transactions
# save transactions to separate file
# save pot file
## Pot concept :- When saving pot, if a sell is being placed ...update pot total with new pot total. If buying, leave pot total alone
# api info
api_key = os.environ["api_key"]
api_secret = os.environ["api_secret"]
# filenames
transactions = "invest/transactions.txt"
settings = "invest/settings.txt"
pot = "invest/pot.txt"
testing = True
base_coin = "USDT"
coins =[]
pot_total = 0
#load settings information from Read Only file
def loadCoins(settings):
coins_array = []
try:
with open(settings) as c:
content = c.readlines()
except IOError:
print("Failed to load in file for coin data")
return ["IOError"]
except Exception as e:
print("General exception triggered: ", e)
return ["General Error"]
for line in content:
if "Coin" in line and len(line.split(":")) == 3 and line.startswith("C"):
coinInfo = line.split(":");
coin = {
"ticker":coinInfo[1].strip('\n'),
"weighting":coinInfo[2].strip('\n'),
"active":None,
"purchase_price":None
}
coins_array.append(coin)
if(testing): print(coins_array)
return coins_array
def loadPot(coins_array, pot_file):
content = ""
try:
with open(pot_file) as p:
content = p.readlines()
except IOError:
print("Failed to load in file for coin data")
return ["IOError"]
except Exception (e):
print("General exception triggered: ", e)
return ["General Error"]
if content != "":
for line in content:
if not line.startswith("#"):
if line.startswith("Total"):
pot_total = int(line.split(":")[1].strip("/n"))
else:
coinInfo = line.split(":")
for coin in coins_array:
if coin["ticker"] == coinInfo[0]:
coin["active"] = coinInfo[1]
coin["purchase_price"] = int(coinInfo[2].strip("\n"))
coin["MA7"] = float(coinInfo[3])
coin["MA21"] = float(coinInfo[4])
coin["coins_purchased"] = float(coinInfo[5])
if testing: print(pot_total)
if testing: print(coins_array)
return coins_array
def checkBinance(ticker, current_time, api_key, api_secret, base_coin):
api_data = None
try:
client = Client(api_key, api_secret)
api_data = client.get_historical_klines(ticker+base_coin, Client.KLINE_INTERVAL_4HOUR, current_time)
# if testing: print("------------------------------")
# if testing: print(coin["ticker"])
# if testing: print("------------------------------")
if testing: print(api_data)
except:
if testing: print("API failed to respond")
api_data = None # reset any modifications the try statement made
return api_data
# Calulate the moving averages and return the values
# input: api_data, (string)coinName
# output: tuple 7 item moving average, 21 item moving average
def getMovingAverages(api_data, coin_name):
shortCalc = 7
longCalc = 21
maShort = 0
maLong = 0
if api_data:
try:
api_len = len(api_data)
counter = 0
for i in range(api_len-1, -1, -1):
if counter < longCalc:
maLong += float(api_data[i][4])
if counter < shortCalc:
maShort += float(api_data[i][4])
else:
break
counter += 1
maLong = maLong/longCalc
maShort = maShort/shortCalc
except Exception as e:
print("Failed to calculate moving averages for: "+coin_name)
print(e)
maShort = None
maLong = None
return maShort, maLong
def makeTradeDecision(ma7, ma21):
return ma7 < ma21
def executeTrade(coin, tradeType, coin_price, pot_total):
if tradeType == "buy":
amount_to_invest = pot_total * float(coin["weighting"])
coin["active"] = "Active"
coin["purchase_price"] = coin_price
coin["coins_purchased"] = amount_to_invest / coin_price
return coin, pot_total
elif tradeType == "sell":
coins_p = coin["coins_purchased"]
coins_w = coin["weighting"]
pot_total += (float(coin_price) * float(coins_p)) - (float(pot_total) * float(coins_w))
coin["active"] = "None"
coin["purchase_price"] = 0
coin["coins_purchased"] = 0
return coin, pot_total
def recordTransaction(transaction_file, date, coin, transaction_type):
with open(transaction_file, "a") as t:
t.write(str(date) +" , " + coin["ticker"] +" , " + transaction_type +" , " + str(coin["purchase_price"]) +" , " + str(coin["coins_purchased"]) + "\n")
return True
# Replace with database queries later
def savePot(coins_array, pot):
with open(pot, "w") as p:
p.write("### Current Pot - this file edited automatically ###\n")
p.write("### <Coin Ticker>:Active/None:<coin purchase price>:MA7:MA21:<Coins Purchased>\n")
p.write("Total:"+str(pot_total)+"\n")
for coin in coins_array:
p.write(coin["ticker"]+":"+coin["active"]+":"+str(coin["purchase_price"])+":"+str(coin["MA7"])+":"+str(coin["MA21"])+ str(coin["coins_purchased"]) +"\n")
def entry():
current_time = datetime.now(timezone.utc).strftime("%Y/%m/%d") # UTC date YYYY/MM/DD
coins = loadCoins(settings)
if coins == ["IOError"] or ["General Error"]:
exit()
coins = loadPot(coins, pot)
for coin in coins:
api_data = checkBinance(coin["ticker"], current_time, api_key, api_secret, base_coin)
ma7, ma21 = getMovingAverages(api_data, coin["ticker"])
if ma7 and ma21:
ma7_above_ma21 = makeTradeDecision(ma7, ma21)
prev_ma7_above_ma21 = makeTradeDecision(coin["MA7"], coin["MA21"])
if ma7_above_ma21 and not prev_ma7_above_ma21:
# .. and not in a trade ... buy
if testing: print("Buy Trade recommended for coin: "+ coin["ticker"])
if coin["active"]: coin = executeTrade(coin, "buy", api_data[4])
pass
elif prev_ma7_above_ma21 and not ma7_above_ma21:
# .. and in a trade ... sell
if testing: print("Sell Trade recommended for coin: "+ coin["ticker"])
if not coin["active"]: coin, pot_total = executeTrade(coin, "sell", api_data[4])
pass
coin["MA7"] = ma7
coin["MA21"] = ma21
coins = savePot(coins, pot)
return True
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.resource.boss.resource import *
from intern.service.boss.httperrorlist import HTTPErrorList
from requests import HTTPError
from intern.service.boss import BaseVersion
from intern.service.boss.v1 import BOSS_API_VERSION
class MetadataService_1(BaseVersion):
def __init__(self):
BaseVersion.__init__(self)
@property
def version(self):
"""Return the API Version for this implementation
"""
return BOSS_API_VERSION
def list(self, resource, url_prefix, auth, session, send_opts):
"""List metadata keys associated with the given resource.
Args:
resource (intern.resource.boss.BossResource): List keys associated with this resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list): List of key names.
Raises:
requests.HTTPError on failure.
"""
req = self.get_metadata_request(
resource, 'GET', 'application/json', url_prefix, auth)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
keys_dict = resp.json()
return keys_dict['keys']
err = ('List failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
raise HTTPError(err, request = req, response = resp)
def create(self, resource, keys_vals, url_prefix, auth, session, send_opts):
"""Create the given key-value pairs for the given resource.
Will attempt to create all key-value pairs even if a failure is encountered.
Args:
resource (intern.resource.boss.BossResource): List keys associated with this resource.
keys_vals (dictionary): The metadata to associate with the resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
HTTPErrorList on failure.
"""
success = True
exc = HTTPErrorList('At least one key-value create failed.')
for pair in keys_vals.items():
key = pair[0]
value = pair[1]
req = self.get_metadata_request(
resource, 'POST', 'application/json', url_prefix, auth,
key, value)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 201:
continue
err = (
'Create failed for {}: {}:{}, got HTTP response: ({}) - {}'
.format(resource.name, key, value, resp.status_code, resp.text))
exc.http_errors.append(HTTPError(err, request=req, response=resp))
success = False
if not success:
raise exc
def get(self, resource, keys, url_prefix, auth, session, send_opts):
"""Get metadata key-value pairs associated with the given resource.
Args:
resource (intern.resource.boss.BossResource): Get key-value pairs associated with this resource.
keys (list): Keys to retrieve.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(dictionary): The requested metadata for the given resource.
Raises:
HTTPErrorList on failure.
"""
resDict = {}
success = True
exc = HTTPErrorList('At least one key-value update failed.')
for key in keys:
req = self.get_metadata_request(
resource, 'GET', 'application/json', url_prefix, auth, key)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
resDict[key] = resp.json()['value']
else:
err = ('Get failed on {}, got HTTP response: ({}) - {}'.format(
resource.name, resp.status_code, resp.text))
exc.http_errors.append(HTTPError(err, request=req, response=resp))
success = False
if not success:
raise exc
return resDict
def update(self, resource, keys_vals, url_prefix, auth, session, send_opts):
"""Update the given key-value pairs for the given resource.
Keys must already exist before they may be updated. Will attempt to
update all key-value pairs even if a failure is encountered.
Args:
resource (intern.resource.boss.BossResource): Update values associated with this resource.
keys_vals (dictionary): The metadata to update for the resource.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
HTTPErrorList on failure.
"""
success = True
exc = HTTPErrorList('At least one key-value update failed.')
for pair in keys_vals.items():
key = pair[0]
value = pair[1]
req = self.get_metadata_request(
resource, 'PUT', 'application/json', url_prefix, auth,
key, value)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
continue
err = (
'Update failed for {}: {}:{}, got HTTP response: ({}) - {}'
.format(resource.name, key, value, resp.status_code, resp.text))
exc.http_errors.append(HTTPError(err, request=req, response=resp))
success = False
if not success:
raise exc
def delete(self, resource, keys, url_prefix, auth, session, send_opts):
"""Delete metadata key-value pairs associated with the given resource.
Will attempt to delete all given key-value pairs even if a failure
occurs.
Args:
resource (intern.resource.boss.BossResource): Delete key-value pairs associated with this resource.
keys (list): Keys to delete.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Raises:
HTTPErrorList on failure.
"""
success = True
exc = HTTPErrorList('At least one key-value update failed.')
for key in keys:
req = self.get_metadata_request(
resource, 'DELETE', 'application/json', url_prefix, auth, key)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 204:
continue
err = (
'Delete failed for {}: {}, got HTTP response: ({}) - {}'
.format(resource.name, key, resp.status_code, resp.text))
exc.http_errors.append(HTTPError(err, request=req, response=resp))
success = False
if not success:
raise exc
|
import pytest
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import kornia.geometry.epipolar as epi
import test_common as utils
class TestEssentialFromFundamental:
def test_smoke(self, device, dtype):
F_mat = torch.rand(1, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(1, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(1, 3, 3, device=device, dtype=dtype)
E_mat = epi.essential_from_fundamental(F_mat, K1, K2)
assert E_mat.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 2, 4, 7])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
F_mat = torch.rand(B, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(B, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(1, 3, 3, device=device, dtype=dtype) # check broadcasting
E_mat = epi.essential_from_fundamental(F_mat, K1, K2)
assert E_mat.shape == (B, 3, 3)
@pytest.mark.xfail(reason="TODO: fix #685")
def test_from_to_fundamental(self, device, dtype):
F_mat = torch.rand(1, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(1, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(1, 3, 3, device=device, dtype=dtype)
E_mat = epi.essential_from_fundamental(F_mat, K1, K2)
F_hat = epi.fundamental_from_essential(E_mat, K1, K2)
assert_allclose(F_mat, F_hat, atol=1e-4, rtol=1e-4)
def test_shape_large(self, device, dtype):
F_mat = torch.rand(1, 2, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(1, 2, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(1, 1, 3, 3, device=device, dtype=dtype) # check broadcasting
E_mat = epi.essential_from_fundamental(F_mat, K1, K2)
assert E_mat.shape == (1, 2, 3, 3)
def test_from_fundamental(self, device, dtype):
scene = utils.generate_two_view_random_scene(device, dtype)
F_mat = scene['F']
K1 = scene['K1']
K2 = scene['K2']
E_mat = epi.essential_from_fundamental(F_mat, K1, K2)
F_hat = epi.fundamental_from_essential(E_mat, K1, K2)
F_mat_norm = epi.normalize_transformation(F_mat)
F_hat_norm = epi.normalize_transformation(F_hat)
assert_allclose(F_mat_norm, F_hat_norm)
def test_gradcheck(self, device):
F_mat = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
K1 = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
K2 = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
assert gradcheck(epi.essential_from_fundamental,
(F_mat, K1, K2,), raise_exception=True)
class TestRelativeCameraMotion:
def test_smoke(self, device, dtype):
R1 = torch.rand(1, 3, 3, device=device, dtype=dtype)
t1 = torch.rand(1, 3, 1, device=device, dtype=dtype)
R2 = torch.rand(1, 3, 3, device=device, dtype=dtype)
t2 = torch.rand(1, 3, 1, device=device, dtype=dtype)
R, t = epi.relative_camera_motion(R1, t1, R2, t2)
assert R.shape == (1, 3, 3)
assert t.shape == (1, 3, 1)
@pytest.mark.parametrize("batch_size", [1, 3, 5, 8, ])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
R1 = torch.rand(B, 3, 3, device=device, dtype=dtype)
t1 = torch.rand(B, 3, 1, device=device, dtype=dtype)
R2 = torch.rand(1, 3, 3, device=device, dtype=dtype) # check broadcasting
t2 = torch.rand(B, 3, 1, device=device, dtype=dtype)
R, t = epi.relative_camera_motion(R1, t1, R2, t2)
assert R.shape == (B, 3, 3)
assert t.shape == (B, 3, 1)
def test_translation(self, device, dtype):
R1 = torch.tensor([[
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
]], device=device, dtype=dtype)
t1 = torch.tensor([[[10.], [0.], [0.]]]).type_as(R1)
R2 = epi.eye_like(3, R1)
t2 = epi.vec_like(3, t1)
R_expected = R1.clone()
t_expected = -t1
R, t = epi.relative_camera_motion(R1, t1, R2, t2)
assert_allclose(R_expected, R)
assert_allclose(t_expected, t)
def test_rotate_z(self, device, dtype):
R1 = torch.tensor([[
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
]], device=device, dtype=dtype)
R2 = torch.tensor([[
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 1.],
]], device=device, dtype=dtype)
t1 = epi.vec_like(3, R1)
t2 = epi.vec_like(3, R2)
R_expected = R2.clone()
t_expected = t1
R, t = epi.relative_camera_motion(R1, t1, R2, t2)
assert_allclose(R_expected, R)
assert_allclose(t_expected, t)
def test_gradcheck(self, device):
R1 = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
R2 = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
t1 = torch.rand(1, 3, 1, device=device, dtype=torch.float64)
t2 = torch.rand(1, 3, 1, device=device, dtype=torch.float64)
assert gradcheck(epi.relative_camera_motion,
(R1, t1, R2, t2,), raise_exception=True)
class TestEssentalFromRt:
def test_smoke(self, device, dtype):
R1 = torch.rand(1, 3, 3, device=device, dtype=dtype)
t1 = torch.rand(1, 3, 1, device=device, dtype=dtype)
R2 = torch.rand(1, 3, 3, device=device, dtype=dtype)
t2 = torch.rand(1, 3, 1, device=device, dtype=dtype)
E_mat = epi.essential_from_Rt(R1, t1, R2, t2)
assert E_mat.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 3, 5, 8, ])
def test_shape(self, batch_size, device, dtype):
B: int = batch_size
R1 = torch.rand(B, 3, 3, device=device, dtype=dtype)
t1 = torch.rand(B, 3, 1, device=device, dtype=dtype)
R2 = torch.rand(1, 3, 3, device=device, dtype=dtype) # check broadcasting
t2 = torch.rand(B, 3, 1, device=device, dtype=dtype)
E_mat = epi.essential_from_Rt(R1, t1, R2, t2)
assert E_mat.shape == (B, 3, 3)
@pytest.mark.xfail(reason="TODO: fix #685")
def test_from_fundamental_Rt(self, device, dtype):
scene = utils.generate_two_view_random_scene(device, dtype)
E_from_Rt = epi.essential_from_Rt(
scene['R1'], scene['t1'], scene['R2'], scene['t2'])
E_from_F = epi.essential_from_fundamental(
scene['F'], scene['K1'], scene['K2'])
E_from_Rt_norm = epi.normalize_transformation(E_from_Rt)
E_from_F_norm = epi.normalize_transformation(E_from_F)
# TODO: occasionally failed with error > 0.04
assert_allclose(E_from_Rt_norm, E_from_F_norm, rtol=1e-3, atol=1e-3)
def test_gradcheck(self, device):
R1 = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
R2 = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
t1 = torch.rand(1, 3, 1, device=device, dtype=torch.float64)
t2 = torch.rand(1, 3, 1, device=device, dtype=torch.float64)
assert gradcheck(epi.essential_from_Rt,
(R1, t1, R2, t2,), raise_exception=True)
class TestDecomposeEssentialMatrix:
def test_smoke(self, device, dtype):
E_mat = torch.rand(1, 3, 3, device=device, dtype=dtype)
R1, R2, t = epi.decompose_essential_matrix(E_mat)
assert R1.shape == (1, 3, 3)
assert R2.shape == (1, 3, 3)
assert t.shape == (1, 3, 1)
@pytest.mark.parametrize("batch_shape", [
(1, 3, 3), (2, 3, 3), (2, 1, 3, 3), (3, 2, 1, 3, 3),
])
def test_shape(self, batch_shape, device, dtype):
E_mat = torch.rand(batch_shape, device=device, dtype=dtype)
R1, R2, t = epi.decompose_essential_matrix(E_mat)
assert R1.shape == batch_shape
assert R2.shape == batch_shape
assert t.shape == batch_shape[:-1] + (1,)
def test_gradcheck(self, device):
E_mat = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
def eval_rot1(input):
return epi.decompose_essential_matrix(input)[0]
def eval_rot2(input):
return epi.decompose_essential_matrix(input)[1]
def eval_vec(input):
return epi.decompose_essential_matrix(input)[2]
assert gradcheck(eval_rot1, (E_mat,), raise_exception=True)
assert gradcheck(eval_rot2, (E_mat,), raise_exception=True)
assert gradcheck(eval_vec, (E_mat,), raise_exception=True)
class TestMotionFromEssential:
def test_smoke(self, device, dtype):
E_mat = torch.rand(1, 3, 3, device=device, dtype=dtype)
Rs, Ts = epi.motion_from_essential(E_mat)
assert Rs.shape == (1, 4, 3, 3)
assert Ts.shape == (1, 4, 3, 1)
@pytest.mark.parametrize("batch_shape", [
(1, 3, 3), (2, 3, 3), (2, 1, 3, 3), (3, 2, 1, 3, 3),
])
def test_shape(self, batch_shape, device, dtype):
E_mat = torch.rand(batch_shape, device=device, dtype=dtype)
Rs, Ts = epi.motion_from_essential(E_mat)
assert Rs.shape == batch_shape[:-2] + (4, 3, 3)
assert Ts.shape == batch_shape[:-2] + (4, 3, 1)
def test_two_view(self, device, dtype):
scene = utils.generate_two_view_random_scene(device, dtype)
R1, t1 = scene['R1'], scene['t1']
R2, t2 = scene['R2'], scene['t2']
E_mat = epi.essential_from_Rt(R1, t1, R2, t2)
R, t = epi.relative_camera_motion(R1, t1, R2, t2)
t = torch.nn.functional.normalize(t, dim=1)
Rs, ts = epi.motion_from_essential(E_mat)
rot_error = (Rs - R).abs().sum((-2, -1))
vec_error = (ts - t).abs().sum((-1))
rtol: float = 1e-4
assert (rot_error < rtol).any() & (vec_error < rtol).any()
def test_gradcheck(self, device):
E_mat = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
def eval_rot(input):
return epi.motion_from_essential(input)[0]
def eval_vec(input):
return epi.motion_from_essential(input)[1]
assert gradcheck(eval_rot, (E_mat,), raise_exception=True)
assert gradcheck(eval_vec, (E_mat,), raise_exception=True)
class TestMotionFromEssentialChooseSolution:
def test_smoke(self, device, dtype):
E_mat = torch.rand(1, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(1, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(1, 3, 3, device=device, dtype=dtype)
x1 = torch.rand(1, 1, 2, device=device, dtype=dtype)
x2 = torch.rand(1, 1, 2, device=device, dtype=dtype)
R, t, X = epi.motion_from_essential_choose_solution(E_mat, K1, K2, x1, x2)
assert R.shape == (1, 3, 3)
assert t.shape == (1, 3, 1)
assert X.shape == (1, 1, 3)
@pytest.mark.parametrize("batch_size, num_points", [
(1, 3), (2, 3), (2, 8), (3, 2),
])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
E_mat = torch.rand(B, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(B, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(1, 3, 3, device=device, dtype=dtype) # check for broadcasting
x1 = torch.rand(B, N, 2, device=device, dtype=dtype)
x2 = torch.rand(B, 1, 2, device=device, dtype=dtype) # check for broadcasting
R, t, X = epi.motion_from_essential_choose_solution(E_mat, K1, K2, x1, x2)
assert R.shape == (B, 3, 3)
assert t.shape == (B, 3, 1)
assert X.shape == (B, N, 3)
def test_masking(self, device, dtype):
E_mat = torch.rand(2, 3, 3, device=device, dtype=dtype)
K1 = torch.rand(2, 3, 3, device=device, dtype=dtype)
K2 = torch.rand(2, 3, 3, device=device, dtype=dtype)
x1 = torch.rand(2, 10, 2, device=device, dtype=dtype)
x2 = torch.rand(2, 10, 2, device=device, dtype=dtype)
R, t, X = epi.motion_from_essential_choose_solution(E_mat, K1, K2, x1[:, 1:-1, :], x2[:, 1:-1, :])
mask = torch.zeros(2, 10, dtype=torch.bool, device=device)
mask[:, 1:-1] = True
Rm, tm, Xm = epi.motion_from_essential_choose_solution(E_mat, K1, K2, x1, x2, mask=mask)
assert_allclose(R, Rm)
assert_allclose(t, tm)
assert_allclose(X, Xm[:, 1:-1, :])
@pytest.mark.parametrize("num_points", [10, 15, 20])
def test_unbatched(self, num_points, device, dtype):
N = num_points
E_mat = torch.rand(3, 3, device=device, dtype=dtype)
K1 = torch.rand(3, 3, device=device, dtype=dtype)
K2 = torch.rand(3, 3, device=device, dtype=dtype)
x1 = torch.rand(N, 2, device=device, dtype=dtype)
x2 = torch.rand(N, 2, device=device, dtype=dtype)
R, t, X = epi.motion_from_essential_choose_solution(E_mat, K1, K2, x1[1:-1, :], x2[1:-1, :])
assert R.shape == (3, 3)
assert t.shape == (3, 1)
assert X.shape == (N - 2, 3)
mask = torch.zeros(N, dtype=torch.bool, device=device)
mask[1:-1] = True
Rm, tm, Xm = epi.motion_from_essential_choose_solution(E_mat, K1, K2, x1, x2, mask=mask)
assert_allclose(R, Rm)
assert_allclose(t, tm)
assert_allclose(X, Xm[1:-1, :])
def test_two_view(self, device, dtype):
scene = utils.generate_two_view_random_scene(device, dtype)
E_mat = epi.essential_from_Rt(
scene['R1'], scene['t1'], scene['R2'], scene['t2'])
R, t = epi.relative_camera_motion(
scene['R1'], scene['t1'], scene['R2'], scene['t2'])
t = torch.nn.functional.normalize(t, dim=1)
R_hat, t_hat, X_hat = epi.motion_from_essential_choose_solution(
E_mat, scene['K1'], scene['K2'], scene['x1'], scene['x2'])
assert_allclose(t, t_hat)
assert_allclose(R, R_hat, rtol=1e-4, atol=1e-4)
def test_gradcheck(self, device):
E_mat = torch.rand(1, 3, 3, device=device, dtype=torch.float64, requires_grad=True)
K1 = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
K2 = torch.rand(1, 3, 3, device=device, dtype=torch.float64)
x1 = torch.rand(1, 2, 2, device=device, dtype=torch.float64)
x2 = torch.rand(1, 2, 2, device=device, dtype=torch.float64)
assert gradcheck(epi.motion_from_essential_choose_solution,
(E_mat, K1, K2, x1, x2,), raise_exception=True)
|
import unittest
import rpy3.rinterface as rinterface
import rpy3.rlike.container as rlc
rinterface.initr()
class SexpClosureTestCase(unittest.TestCase):
def setUp(self):
self.console = rinterface.get_writeconsole()
def noconsole(x):
pass
rinterface.set_writeconsole(noconsole)
def tearDown(self):
rinterface.set_writeconsole(self.console)
def testNew(self):
x = "a"
self.assertRaises(ValueError, rinterface.SexpClosure, x)
def testTypeof(self):
sexp = rinterface.globalenv.get("plot")
self.assertEquals(sexp.typeof, rinterface.CLOSXP)
def testRError(self):
sum = rinterface.baseenv["sum"]
letters = rinterface.baseenv["letters"]
self.assertRaises(rinterface.RRuntimeError, sum, letters)
def testClosureenv(self):
parse = rinterface.baseenv["parse"]
exp = parse(text = rinterface.SexpVector(["function(x) { x[y] }", ],
rinterface.STRSXP))
fun = rinterface.baseenv["eval"](exp)
vec = rinterface.baseenv["letters"]
self.assertRaises(rinterface.RRuntimeError, fun, vec)
fun.closureenv()["y"] = rinterface.SexpVector([1, ],
rinterface.INTSXP)
self.assertEquals('a', fun(vec)[0])
fun.closureenv()["y"] = rinterface.SexpVector([2, ],
rinterface.INTSXP)
self.assertEquals('b', fun(vec)[0])
def testCallS4SetClass(self):
# R's package "methods" can perform uncommon operations
r_setClass = rinterface.globalenv.get('setClass')
r_representation = rinterface.globalenv.get('representation')
attrnumeric = rinterface.SexpVector(["numeric", ],
rinterface.STRSXP)
classname = rinterface.SexpVector(['Track', ], rinterface.STRSXP)
classrepr = r_representation(x = attrnumeric,
y = attrnumeric)
r_setClass(classname,
classrepr)
def testRcallOrdDict(self):
ad = rlc.OrdDict((('a', rinterface.SexpVector([2, ],
rinterface.INTSXP)),
('b', rinterface.SexpVector([1, ],
rinterface.INTSXP)),
(None, rinterface.SexpVector([5, ],
rinterface.INTSXP)),
('c', rinterface.SexpVector([0, ],
rinterface.INTSXP))))
mylist = rinterface.baseenv['list'].rcall(ad.items(),
rinterface.globalenv)
names = [x for x in mylist.do_slot("names")]
for i in range(4):
self.assertEquals(('a', 'b', '', 'c')[i], names[i])
def testRcallOrdDictEnv(self):
def parse(x):
rparse = rinterface.baseenv.get('parse')
res = rparse(text = rinterface.StrSexpVector((x,)))
return res
ad = rlc.OrdDict( ((None, parse('sum(x)')),) )
env_a = rinterface.baseenv['new.env']()
env_a['x'] = rinterface.IntSexpVector([1,2,3])
sum_a = rinterface.baseenv['eval'].rcall(ad.items(),
env_a)
self.assertEquals(6, sum_a[0])
env_b = rinterface.baseenv['new.env']()
env_b['x'] = rinterface.IntSexpVector([4,5,6])
sum_b = rinterface.baseenv['eval'].rcall(ad.items(),
env_b)
self.assertEquals(15, sum_b[0])
def testErrorInCall(self):
mylist = rinterface.baseenv['list']
self.assertRaises(ValueError, mylist, 'foo')
def testMissingArg(self):
parse = rinterface.baseenv["parse"]
exp = parse(text=rinterface.SexpVector(["function(x) { missing(x) }"],
rinterface.STRSXP))
fun = rinterface.baseenv["eval"](exp)
nonmissing = rinterface.SexpVector([0, ], rinterface.INTSXP)
missing = rinterface.MissingArg
self.assertEquals(False, fun(nonmissing)[0])
self.assertEquals(True, fun(missing)[0])
def testScalarConvertInteger(self):
self.assertEquals('integer',
rinterface.baseenv["typeof"](1)[0])
def testScalarConvertLong(self):
self.assertEquals('integer',
rinterface.baseenv["typeof"](long(1))[0])
def testScalarConvertDouble(self):
self.assertEquals('double',
rinterface.baseenv["typeof"](1.0)[0])
def testScalarConvertBoolean(self):
self.assertEquals('logical',
rinterface.baseenv["typeof"](True)[0])
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(SexpClosureTestCase)
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
|
from flask_pymongo import PyMongo
from flask import Flask, flash, render_template, redirect, request, url_for, \
session, flash, Markup
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
import os
from os import path
if path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.config['SECRET_KEY'] = os.urandom(32)
mongo = PyMongo(app)
# Taking the books and comments table and display the data on home page
@app.route('/')
def index():
''' function to display all books on the home page'''
books = list(mongo.db.bookInfo.find())
comments = list(mongo.db.comments.find())
return render_template('index.html', books=books, comments=comments)
# Register Page
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
# If the username already exist a flash message will notify the user
if existing_user:
flash("Username already taken")
return redirect(url_for("register"))
# Take user's username and password and save in database
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# Save user's details into the session cookie
session["user"] = request.form.get("username").lower()
flash("Registration sucessfull")
return redirect(url_for("index", username=session["user"]))
return render_template("register.html")
# Login Page
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == "POST":
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# Check if the password matches with the password
# from the database for that user
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Nice to see you, {}!".format(
request.form.get("username")))
return redirect(url_for(
"index", username=session["user"]))
else:
# If the password is not valid a flash message
# will inform the user of invalid credentials
flash("Incorrect credentials.")
return redirect(url_for("login"))
else:
# If the username is not registered a flash message will inform the
# user of invalid credentials
flash("Incorrect credentials")
return redirect(url_for("login"))
return render_template('login.html')
# User Profile Page
@app.route("/profile/<username>", methods=['GET', 'POST'])
def profile(username):
# Get the session user's username from the database
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
# Display user's username on the page
if session["user"]:
return render_template("profile.html", username=username)
return redirect(url_for("profile"))
# Logout
@app.route("/logout")
def logout():
# Remove user from current session cookie
flash("You have been logged out. See you soon!")
session.pop("user")
return redirect(url_for("login"))
# Delete profile
@app.route("/delete-profile/<user_id>", methods=["GET", "POST"])
def delete_profile(user_id):
# Take the session user username and removes from database
mongo.db.users.remove({"username": session["user"]})
# Clears the cache after the username has been deleted
session.clear()
flash("Your profile has been deleted.")
return redirect(url_for("index"))
# Add comment to the books
@app.route("/add-comment/<book_id>", methods=["GET", "POST"])
def add_comment(book_id):
# Get the id of the book for which we want to comment
book = mongo.db.bookInfo.find_one({"_id": ObjectId(book_id)})
if request.method == "POST":
# New comment is saved in the correct format for the comments table
new_comment = {
"title": book["title"],
"comment": request.form.get("comment"),
"username": session["user"]
}
# New comment is added to the comments table
mongo.db.comments.insert_one(new_comment)
flash("Comment added")
return redirect(url_for("index"))
return render_template("add-comment.html", book=book)
# Delete comment
@app.route("/delete-comment/<comment_id>", methods=["GET", "POST"])
def delete_comment(comment_id):
# Remove the comment by using the comment id
mongo.db.comments.remove({"_id": ObjectId(comment_id)})
flash("Comment deleted")
return redirect(url_for("index"))
# Update comment
@app.route("/update-comment/<comment_id>", methods=["GET", "POST"])
def update_comment(comment_id):
comments = mongo.db.comments.find_one({"_id": ObjectId(comment_id)})
if request.method == "POST":
# The comment found by id is updated with the new comment
mongo.db.comments.update({'_id': ObjectId(comment_id)}, {
"title": comments["title"],
"comment": request.form.get("comment"),
"username": session["user"]
})
flash("Comment updated")
return redirect(url_for("index"))
return render_template("update-comment.html", comments=comments)
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=False)
|
<filename>src/fattoush/driver/sauce.py
# (c) 2014 Mind Candy Ltd. All Rights Reserved.
# Licensed under the MIT License; you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://opensource.org/licenses/MIT.
"""
Interface between Sauce Labs and lettuce driver
"""
from abc import ABCMeta, abstractmethod
import base64
import hmac
import json
import urllib2
class SauceInterface(object):
__metaclass__ = ABCMeta
@abstractmethod
def set_session_configuration(self, **conf):
pass
@abstractmethod
def fail_session(self):
pass
@abstractmethod
def add_file_to_storage(self, binary_string, server_path,
overwrite=False):
return "sauce:storage:NAME"
class AbstractSauceBase(SauceInterface):
__metaclass__ = ABCMeta
session_text_template = "SauceOnDemandSessionID={0} job-name={1}"
public_url_template = "https://saucelabs.com/jobs/{0}?auth={1}"
user = "saucelabs_username"
key = "saucelabs_api_key"
default_headers = {}
def __init__(self, config, browser):
"""
:type config: fattoush.config.FattoushConfig
:type browser: fattoush.driver.driver.Driver
"""
# Bind copy of class defaults to instance
self.default_headers = self.default_headers.copy()
self.wd_hub = config.command_executor
self.session_name = config.name
self.browser = browser
self.job_id = browser.session_id
self.combined_key = "{0}:{1}".format(self.user, self.key)
self.token = hmac.new(self.combined_key,
self.job_id).hexdigest()
self.public_url = self.public_url_template.format(self.job_id,
self.token)
@abstractmethod
def request(self, endpoint, method='GET', body=None,
extra_headers=None):
return "Response"
def set_session_configuration(self, **conf):
body_content = json.dumps(conf)
endpoint = '/rest/v1/{0}/jobs/{1}'.format(self.user,
self.job_id)
self.request(endpoint, 'PUT', body_content)
def fail_session(self):
self.set_session_configuration(passed=False)
def add_file_to_storage(self, binary_string, server_path,
overwrite=False):
endpoint = '/rest/v1/storage/%s/%s?overwrite=%s' % (
self.user.strip('/'),
server_path.strip('/'),
('false', 'true')[overwrite])
ret = self.request(endpoint=endpoint, method='POST',
body=binary_string)
return "sauce-storage:%s" % server_path, ret
@property
def scenario_details(self):
return {
"user": self.user,
"job_id": self.job_id,
"session_name": self.session_name,
"key": self.combined_key,
"token": self.token,
"public_url": self.public_url,
}
class Local(AbstractSauceBase):
"""
Local Interface for testing
"""
def request(self, endpoint, method='GET', body=None,
extra_headers=None):
print ("Would [{0}]{1} with body of {2} (extra-headers={3})"
.format(method, endpoint, body, extra_headers))
def __init__(self, config, browser):
super(Local, self).__init__(config, browser)
class Sauce(AbstractSauceBase):
"""
Connect to real Saucelabs infrastructure
"""
def __init__(self, config, browser):
self.user = config.server["user"]
self.key = config.server["key"]
self._url_ = "http://saucelabs.com/{0}"
super(Sauce, self).__init__(config, browser)
self.session_text = self.session_text_template.format(
self.job_id, self.session_name)
self.b64_key = base64.encodestring(self.combined_key)[:-1]
self.default_headers["Authorization"] = "Basic {0}".format(
self.b64_key)
print self.session_text
print self.public_url
def _url(self, endpoint):
return self._url_.format(endpoint.lstrip('/'))
def _headers(self, extra_headers):
headers = self.default_headers.copy()
if extra_headers is not None:
headers.update(extra_headers)
return headers
def request(self, endpoint, method='GET', body=None,
extra_headers=None):
request = urllib2.Request(url=self._url(endpoint),
data=body,
headers=self._headers(extra_headers))
request.get_method = lambda: method
return urllib2.urlopen(request)
|
<reponame>HARDROCO/WHO-MAIL-ME<filename>src/WMM_visual.py
# ##### Coding: utf-8 ####
# See README.md for information and usage about the project
# Copyright (c) 2020 HARDROCO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT A short and simple permissive license with conditions
# only requiring preservation of copyright and license notices.
# Licensed works, modifications, and larger works may be distributed
# under different terms and without source code.
# See the MIT license in license.txt file or in
# https://choosealicense.com/licenses/mit/
#
# =======================================================================
# import libraries
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import mysql.connector as sql
import sys
print('Starting program...')
print('loading lybraries --> OK')
# SQL CONNECTION
conn = sql.connect(
host="your host",
user="your user",
password="<PASSWORD>",
database="mail_mbox"
)
cur = conn.cursor() # buffered = True # emergency case to bypass error in cursor, but it s better to limit 1 fetchone command
print('Connecting to SQL --> OK\n')
print('''>>> ¡WARNING! This program run under the MIT License, please read about it
in the license file attached to the project.. <<<''')
# HEADER PROGRAM
print('''\n==================================================\n
WHO MAIL ME - Vizualization program
Created by HARDROCO\n
==================================================''')
print('''This program will plot graphics from the mail's Database
created with the gathering program. The graphics will be
created in the next order:\n
1. Top Mail received from each Organization in your mail box
2. Mail received each the year by a specific Organization
3. Mail received each the month by a specific Organization
4. Mail received each the day by a specific Organization\n''')
# CONTINUE OR QUIT THE PROGRAM
print('Do you want to start the program?\n')
user_op = (input('Select Enter to continue or N to quit :')).upper()
option_n = 'N'
if user_op == option_n:
print('Program finished, Good bye!')
sys.exit()
else:
print('Running program...\n')
# -------------------------------------------------------------------
print('''---GRAPHIC 1---\n
1. Top Mail received from each Organization in your mail box\n''')
# DATAFRAME 1
comand = "SELECT counts_id, Org, count FROM Counts ORDER BY count DESC"
df = pd.read_sql(comand, conn)
#print('df1 created --> ok\n')
# TOP ORG
print('>>> Choose how many organizations you want to chart:\n')
# data frame lenght and control vizulization
first_value = 1
last_value = len(df['counts_id'])
top_value = int(
input(f'Enter a number ({first_value} - {last_value}): '))
df_top_show = df.head(top_value)
#print('control variables --> OK')
# FUNCTIONS
# BARPLOT GRAPH
def graph_barplot_mbox(g_x, g_y, g_title, x_lb, y_lb, dis, pltt):
print('loading graph...')
sns.set(style="darkgrid")
plt.figure(figsize=(15, 12))
ax = sns.barplot(g_x, g_y, palette=pltt)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
fontsize='11'
)
plt.title(g_title,
fontweight='bold',
fontsize='x-large',
color='orange'
)
plt.subplots_adjust(top=0.7)
plt.xlabel(x_lb, fontweight='bold', color='orange',
fontsize='17', horizontalalignment='center')
plt.ylabel(y_lb, fontweight='bold', color='orange',
fontsize='17', horizontalalignment='center')
# data on bars
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + dis,
'{:1.0f}'.format(height),
ha="center")
return plt.show()
# COUNTPLOT GRAPH
def graph_countplot_mbox(g_x, g_title, x_lb, y_lb, dis, pltt):
print('loading graph...')
sns.set(style="darkgrid")
plt.figure(figsize=(15, 12))
ax = sns.countplot(g_x, palette=pltt)
plt.xticks(
horizontalalignment='right',
fontweight='light',
fontsize='11'
)
plt.title(g_title,
fontweight='bold',
fontsize='x-large',
color='orange'
)
plt.subplots_adjust(top=0.7)
plt.xlabel(x_lb, fontweight='bold', color='orange',
fontsize='17', horizontalalignment='center')
plt.ylabel(y_lb, fontweight='bold', color='orange',
fontsize='17', horizontalalignment='center')
# data on bars
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + dis,
height,
ha="center")
return plt.show()
#print('functions load --> OK ')
# GRAPH 1 - FULL HISTORY
print(f'\nTop {top_value} Organizations:\n\n', df_top_show, '\n')
# graph axis
grap_x, grap_y = df_top_show['Org'], df_top_show['count']
grap_title = f'''Top {top_value} Organizations' emails received
Org vs Count'''
# graphic parameter
x_label = 'Organization'
y_label = 'Count'
dis_lbl = 7
palette = "deep"
graph_barplot_mbox(grap_x, grap_y, grap_title,
x_label, y_label, dis_lbl, palette)
# HEADER GRAPH 2
print(f'''\n---GRAPHIC 2---\n
2. Mail received each year by a specific Organization\n''')
print(f'''>>> Choose an Org's ID from the "top {top_value} Org" showed
before or choose a number from the range below to show the
mails received from it.
Range ( {first_value} - {last_value} )\n ''')
# VIZUALIZATION DATA
mail_org = int(input(f'Org ID: '))
org_obj = df[df['counts_id'] == mail_org]['Org']
org_clean = org_obj.iloc[0]
print(f'Org Selected --> {org_clean}\n')
# DATAFRAME 2
comand_2 = '''SELECT Counts.Org,mails.day, mails.month, mails.year
FROM counts JOIN mails ON Counts.counts_id = mails.counts_id
WHERE mails.counts_id = %s ORDER BY year DESC;'''
df_2 = pd.read_sql(comand_2, conn, params=(str(mail_org),))
#print('Dataframe 2 created --> OK')
# DATES - GRAPH 2
column_y_m = ['year', 'month']
group_y_m = df_2.groupby(column_y_m)
column_y = 'year'
group_y = df_2.groupby(column_y)
#print('DataFrame grouped --> OK\n')
# GRAPH 2 - YEAR
print(f'''\n{org_clean} YEAR GRAPH\n''')
# graph axis
grap_x_y = df_2['year']
# graphic parameter
grap_title_y = f'''Mail received by year from {org_clean}
Year vs Count'''
x_label_y = 'Year'
y_label_y = 'Count'
dis_lbl_y = 7
palette_y = 'BrBG'
total_count_y = df_2['Org'].count()
graph_countplot_mbox(grap_x_y, grap_title_y, x_label_y,
y_label_y, dis_lbl_y, palette_y)
print(
f'\nTotal mail received by Year from {org_clean} = ', total_count_y, '\n')
# HEADER GRAPH 3
print(f'''---GRAPHIC 3---\n
3. Mail received each month from {org_clean}\n''')
print(f'''>>> Choose a especific date to see the mails
received from {org_clean}\n
Years available:''')
# DATES - GRAPH 3
# show years
yea_cl = list()
for name_group, group in group_y:
yea_cl.append(name_group)
print(yea_cl)
año = int(input('Year to consult: '))
year_g = group_y.get_group(año)
print(f'''\nMonths available in {año}:''')
# show months
mon_cl = list()
for mon in year_g['month']:
if mon in mon_cl:
continue
else:
mon_cl.append(mon)
print(mon_cl)
month = int(input('Month to consult: '))
month_g = group_y_m.get_group((año, month))
# GRAPH 3 - MONTH
print(f'''\n{org_clean} YEAR {año} - MONTH GRAPH\n''')
# graphic axis
grap_x_m = year_g['month']
# graphic parameter
grap_title_m = f'''Mail received by month from {org_clean} - {año}
month vs Count'''
x_label_m = 'month'
y_label_m = 'Count'
dis_lbl_m = 0.5
palette_m = 'BuPu'
total_count_m = year_g['month'].count()
graph_countplot_mbox(grap_x_m, grap_title_m, x_label_m,
y_label_m, dis_lbl_m, palette_m)
print(
f'\nTotal Mail received by month for {org_clean} = ', total_count_m, '\n')
# HEADER GRAPH 4
print(f'''---GRAPHIC 4---\n
4. Mail received each day from {org_clean}\n''')
# GRAPH 4 - day
print(f'''{org_clean} YEAR {año} - MONTH {month} - DAY GRAPH\n''')
# graphic axis
grap_x_d = month_g['day']
# graphic parameter
grap_title_d = f'''Mail received by Day from {org_clean} - {año}/{month}
Day vs Count'''
x_label_d = 'Day'
y_label_d = 'Count'
dis_lbl_d = 0.02
palette_d = 'CMRmap'
total_count_d = month_g['day'].count()
graph_countplot_mbox(grap_x_d, grap_title_d, x_label_d,
y_label_d, dis_lbl_d, palette_d)
print(f'\nTotal Mail received by day from {org_clean} = ', total_count_d, '\n')
cur.close()
# END PROGRAM
print('Program finished.')
print(f'''\n==================================================\n
WHO MAIL ME - Vizualization program
Created by HARDROCO\n
==================================================''')
|
from datetime import datetime
from re import sub
from decimal import Decimal
from json_handler import JsonHandler
import writers
jh = JsonHandler()
LEAGUES_DATA = jh.load_leagues()
dt = datetime
def league_id_to_league_name(league_id):
for leagues in LEAGUES_DATA:
league_name = list(leagues.values())[1]
league_ids = list(leagues.values())[0]
if int(league_id) in league_ids:
return league_name
return ''
def league_id_to_league_abbreviation(league_id):
for leagues in LEAGUES_DATA:
league_abbr = list(leagues.keys())[0]
league_ids = list(leagues.values())[0]
if int(league_id) in league_ids:
return league_abbr
return ''
def format_date(date_format):
if '-' in date_format:
splitter = '-'
elif '/' in date_format:
splitter = '/'
elif '.' in date_format:
splitter = '.'
else:
splitter = ' '
return splitter.join(["%" + char for char in date_format.split(splitter)])
def datetime(datetime_str, date_format):
"""Converts the API datetime string to the local user datetime."""
datetime = dt.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
if datetime.year == dt.now().year:
date_format = date_format.replace("%Y", '').replace("%y", '').rstrip("-")
return dt.strftime(datetime, date_format + ' %H:%M')
def date(date_str, date_format):
"""Converts the API date string to the local user date."""
date = dt.strptime(date_str, '%Y-%m-%d')
if date.year == dt.now().year:
date_format = date_format.replace("%Y", '').replace("%y", '').rstrip("-")
return dt.strftime(date, date_format)
def time(time_str):
"""Converts the API time string to the local user time."""
return dt.strftime(dt.strptime(time_str, '%H:%M:%S'), '%H:%M')
def prediction_to_msg(prediction):
if prediction == '1':
return 'win for the home-team'
elif prediction.upper() == 'X':
return 'draw'
else:
return 'win for the away-team'
def player_name(name):
if name is None:
return ''
try:
name = name.split(' ', 1)
except AttributeError:
return name
if len(name) == 1:
name = name[0]
else:
name = name[1]
return name
def team_id_to_team_name(team_id, home_team):
return team_id == str(home_team)
def goal_type_to_prefix(goal_type):
if goal_type == "goal":
return ''
elif goal_type == "penalty":
return ' P'
elif goal_type == "own-goal":
return ' OG'
def events_to_pretty_goals(events, home_goals, away_goals):
# no home or away-goals scored (0-0)
if home_goals == 0 and away_goals == 0:
return []
# home scored and away didn't (x-0)
if home_goals > 0 and away_goals == 0:
return writers.Stdout.get_pretty_goals_clean_sheet("home", events)
# home didn't score and away did (0-x)
if home_goals == 0 and away_goals > 0:
return writers.Stdout.get_pretty_goals_clean_sheet("away", events)
# both teams scored at least once
if home_goals > 0 and away_goals > 0:
return writers.Stdout.get_pretty_goals(events)
def float_to_currency(f):
f = '{:,.2f}'.format(float(f))
f = Decimal(sub(r'[^\d.]', '', f))
return f
|
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The MobileCoin Foundation
"""
The purpose of this strategy is to test client behavior with a mobilecoind wallet.
Example setup and usage:
```
python3 test_client.py --key-dir ../../../target/sample_data/master/keys/
```
"""
import argparse
import concurrent.futures
import grpc
import mobilecoind_api_pb2
import mobilecoind_api_pb2_grpc
import os
import time
from accounts import *
from google.protobuf.empty_pb2 import Empty
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--mobilecoind-host",
default="localhost",
type=str,
help="Mobilecoind host")
parser.add_argument("--mobilecoind-port",
default="4444",
type=str,
help="Mobilecoind port")
parser.add_argument("--key-dir",
type=str,
help="Path to directory of account_keys")
parser.add_argument("--max-seconds",
type=int,
default=40,
help="Number of seconds to wait for a tx to clearn")
return parser.parse_args()
def run_test(stub, amount, monitor_id, dest, max_seconds):
resp = stub.GetBalance(
mobilecoind_api_pb2.GetBalanceRequest(monitor_id=monitor_id))
starting_balance = resp.balance
print("Starting balance prior to transfer:", starting_balance)
tx_stats = {}
sync_start = time.time()
wait_for_accounts_sync(stub, [monitor_id, dest.monitor_id], 3)
print("Time to sync:", time.time() - sync_start)
tx_resp = stub.SendPayment(
mobilecoind_api_pb2.SendPaymentRequest(
sender_monitor_id=monitor_id,
sender_subaddress=0,
outlay_list=[
mobilecoind_api_pb2.Outlay(
value=amount,
receiver=dest.public_address,
)
],
fee=0,
tombstone=0,
))
tx_stats[0] = {
'start': time.time(),
'time_delta': None,
'tombstone': tx_resp.sender_tx_receipt.tombstone,
'block_delta': None,
'status': TransferStatus.pending,
'receipt': tx_resp,
}
stats = poll(monitor_id, tx_stats, stub)
# FIXME: Move max seconds check inside polling
assert tx_stats[0]['time_delta'] < max_seconds, "Did not clear in time"
assert tx_stats[0]['status'] == TransferStatus.success, "Transfer did not succeed"
return stats
if __name__ == '__main__':
args = parse_args()
print(args)
stub = connect(args.mobilecoind_host, args.mobilecoind_port)
accounts = [
load_key_and_register("{}/{}".format(args.key_dir, k), stub)
for k in sorted(
filter(lambda x: x.endswith(".json"), os.listdir(args.key_dir)))
]
monitor_ids = [a.monitor_id for a in accounts]
# Go through each account and have all their friends transact to them
for i, account_data in enumerate(accounts):
wait_for_accounts_sync(stub, monitor_ids, 3)
# Get starting balance
resp = stub.GetBalance(
mobilecoind_api_pb2.GetBalanceRequest(monitor_id=account_data.monitor_id))
balance = resp.balance
print("Starting balance for account", i, ":", resp)
# Note: due to the transaction fee, we can't assume we have enough funds
# to divide equally among all our friends, so add an extra factor.
amount = 10 # int(balance / (len(accounts)*10))
# Create a pool of transfers to all other accounts
print("Transferring", amount, "each to", len(accounts), "accounts")
# FIXME: no reason it can't also send to itself
src_accounts = {a.monitor_id for a in accounts}
src_accounts.remove(account_data.monitor_id)
for i, src in enumerate(src_accounts):
stats = run_test(stub, amount, src, account_data, args.max_seconds)
print("Test", i, "succeeded:", stats)
print("All transfers successful")
|
"""Pytorch dataset object that loads MNIST dataset as bags."""
import numpy as np
import torch
import torch.utils.data as data_utils
from torchvision import datasets, transforms
class MnistBags(data_utils.Dataset):
def __init__(self, target_number=9, mean_bag_length=10, var_bag_length=2, num_bag=250, seed=1, train=True):
self.target_number = target_number
self.mean_bag_length = mean_bag_length
self.var_bag_length = var_bag_length
self.num_bag = num_bag
self.train = train
self.r = np.random.RandomState(seed)
self.num_in_train = 60000
self.num_in_test = 10000
if self.train:
self.train_bags_list, self.train_labels_list = self._create_bags()
else:
self.test_bags_list, self.test_labels_list = self._create_bags()
def _create_bags(self):
if self.train:
loader = data_utils.DataLoader(datasets.MNIST('../datasets',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=self.num_in_train,
shuffle=False)
else:
loader = data_utils.DataLoader(datasets.MNIST('../datasets',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=self.num_in_test,
shuffle=False)
for (batch_data, batch_labels) in loader:
all_imgs = batch_data
all_labels = batch_labels
bags_list = []
labels_list = []
for i in range(self.num_bag):
bag_length = np.int(self.r.normal(self.mean_bag_length, self.var_bag_length, 1))
if bag_length < 1:
bag_length = 1
if self.train:
indices = torch.LongTensor(self.r.randint(0, self.num_in_train, bag_length))
else:
indices = torch.LongTensor(self.r.randint(0, self.num_in_test, bag_length))
labels_in_bag = all_labels[indices]
labels_in_bag = labels_in_bag >= self.target_number
bags_list.append(all_imgs[indices])
labels_list.append(labels_in_bag)
return bags_list, labels_list
def __len__(self):
if self.train:
return len(self.train_labels_list)
else:
return len(self.test_labels_list)
def __getitem__(self, index):
if self.train:
bag = self.train_bags_list[index]
label = [max(self.train_labels_list[index]), self.train_labels_list[index]]
else:
bag = self.test_bags_list[index]
label = [max(self.test_labels_list[index]), self.test_labels_list[index]]
return bag, label
if __name__ == "__main__":
train_loader = data_utils.DataLoader(MnistBags(target_number=9,
mean_bag_length=10,
var_bag_length=2,
num_bag=100,
seed=1,
train=True),
batch_size=1,
shuffle=True)
test_loader = data_utils.DataLoader(MnistBags(target_number=9,
mean_bag_length=10,
var_bag_length=2,
num_bag=100,
seed=1,
train=False),
batch_size=1,
shuffle=False)
len_bag_list_train = []
mnist_bags_train = 0
for batch_idx, (bag, label) in enumerate(train_loader):
len_bag_list_train.append(int(bag.squeeze(0).size()[0]))
mnist_bags_train += label[0].numpy()[0]
print('Number positive train bags: {}/{}\n'
'Number of instances per bag, mean: {}, max: {}, min {}\n'.format(
mnist_bags_train, len(train_loader),
np.mean(len_bag_list_train), np.min(len_bag_list_train), np.max(len_bag_list_train)))
len_bag_list_test = []
mnist_bags_test = 0
for batch_idx, (bag, label) in enumerate(test_loader):
len_bag_list_test.append(int(bag.squeeze(0).size()[0]))
mnist_bags_test += label[0].numpy()[0]
print('Number positive test bags: {}/{}\n'
'Number of instances per bag, mean: {}, max: {}, min {}\n'.format(
mnist_bags_test, len(test_loader),
np.mean(len_bag_list_test), np.min(len_bag_list_test), np.max(len_bag_list_test))) |
<filename>reports/tests.py
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Reports: test suites
"""
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from treeio.core.models import User, Group, Perspective, ModuleSetting
from treeio.reports.models import Report, Chart
class ReportsModelsTest(TestCase):
"Reports Models Tests"
def test_model_report(self):
"Test Report Model"
obj = Report(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_chart(self):
"Test Chart Model"
report = Report(name='test')
report.save()
obj = Chart(name='test', report=report)
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
class ReportsViewsTest(TestCase):
"Reports functional tests for views"
username = "test"
password = "password"
prepared = False
def setUp(self):
"Initial Setup"
if not self.prepared:
self.group, created = Group.objects.get_or_create(name='test')
duser, created = DjangoUser.objects.get_or_create(
username=self.username)
duser.set_password(self.password)
duser.save()
self.user, created = User.objects.get_or_create(user=duser)
self.user.save()
perspective, created = Perspective.objects.get_or_create(
name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.report = Report(name='test')
self.report.set_default_user()
self.report.save()
self.chart = Chart(name='test_chart', report=self.report)
self.chart.set_default_user()
self.chart.save()
self.client = Client()
self.prepared = True
######################################
# Testing views when user is logged in
######################################
def test_reports_login(self):
"Testing /reports/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': <PASSWORD>})
self.assertRedirects(response, '/')
response = self.client.get(reverse('reports'))
self.assertEquals(response.status_code, 200)
def test_index_login(self):
"Testing /reports/index/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': <PASSWORD>})
self.assertRedirects(response, '/')
response = self.client.get(reverse('reports_index'))
self.assertEquals(response.status_code, 200)
def test_index_owned(self):
"Testing /reports/owned/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': <PASSWORD>})
self.assertRedirects(response, '/')
response = self.client.get(reverse('reports_index'))
self.assertEquals(response.status_code, 200)
# Charts
def test_chart_add(self):
"Testing /reports/chart/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('reports_chart_add'))
self.assertEquals(response.status_code, 200)
def test_chart_delete_login(self):
"Testing /reports/chart/delete/<chart_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('reports_chart_delete', args=[self.chart.id]))
self.assertEquals(response.status_code, 200)
# Reports
def test_report_add(self):
"Testing /reports/report/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': <PASSWORD>})
self.assertRedirects(response, '/')
response = self.client.get(reverse('reports_report_add'))
self.assertEquals(response.status_code, 200)
def test_report_delete_login(self):
"Testing /reports/report/delete/<report_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': <PASSWORD>})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('reports_report_delete', args=[self.report.id]))
self.assertEquals(response.status_code, 200)
######################################
# Testing views when user is not logged in
######################################
def test_reports_out(self):
"Testing /reports/"
response = self.client.get(reverse('reports'))
self.assertRedirects(response, reverse('user_login'))
def test_index_out(self):
"Testing /reports/index/"
response = self.client.get(reverse('reports_index'))
self.assertRedirects(response, reverse('user_login'))
def test_index_owned_out(self):
"Testing /reports/owned/"
response = self.client.get(reverse('reports_index'))
self.assertRedirects(response, reverse('user_login'))
# Charts
def test_chart_add_out(self):
"Testing /reports/chart/add/"
response = self.client.get(reverse('reports_chart_add'))
self.assertRedirects(response, reverse('user_login'))
def test_chart_add_typed_out(self):
"Testing /reports/chart/add/<report_id>"
response = self.client.get(
reverse('reports_chart_add', args=[self.report.id]))
self.assertRedirects(response, reverse('user_login'))
def test_chart_edit_out(self):
"Testing /reports/chart/edit/<chart_id>"
response = self.client.get(
reverse('reports_chart_edit', args=[self.chart.id]))
self.assertRedirects(response, reverse('user_login'))
def test_chart_delete_out(self):
"Testing /reports/chart/delete/<chart_id>"
response = self.client.get(
reverse('reports_chart_delete', args=[self.chart.id]))
self.assertRedirects(response, reverse('user_login'))
# Reports
def test_report_add_out(self):
"Testing /reports/report/add/"
response = self.client.get(reverse('reports_report_add'))
self.assertRedirects(response, reverse('user_login'))
def test_report_view_out(self):
"Testing /reports/report/view/<report_id>"
response = self.client.get(
reverse('reports_report_view', args=[self.report.id]))
self.assertRedirects(response, reverse('user_login'))
def test_report_edit_out(self):
"Testing /reports/report/edit/<report_id>"
response = self.client.get(
reverse('reports_report_edit', args=[self.report.id]))
self.assertRedirects(response, reverse('user_login'))
def test_report_delete_out(self):
"Testing /reports/report/delete/<report_id>"
response = self.client.get(
reverse('reports_report_delete', args=[self.report.id]))
self.assertRedirects(response, reverse('user_login'))
|
<reponame>pulumi/pulumi-aws-native<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetAccessPointResult',
'AwaitableGetAccessPointResult',
'get_access_point',
'get_access_point_output',
]
@pulumi.output_type
class GetAccessPointResult:
def __init__(__self__, alias=None, arn=None, network_origin=None, policy=None, policy_status=None):
if alias and not isinstance(alias, str):
raise TypeError("Expected argument 'alias' to be a str")
pulumi.set(__self__, "alias", alias)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if network_origin and not isinstance(network_origin, str):
raise TypeError("Expected argument 'network_origin' to be a str")
pulumi.set(__self__, "network_origin", network_origin)
if policy and not isinstance(policy, dict):
raise TypeError("Expected argument 'policy' to be a dict")
pulumi.set(__self__, "policy", policy)
if policy_status and not isinstance(policy_status, dict):
raise TypeError("Expected argument 'policy_status' to be a dict")
pulumi.set(__self__, "policy_status", policy_status)
@property
@pulumi.getter
def alias(self) -> Optional[str]:
"""
The alias of this Access Point. This alias can be used for compatibility purposes with other AWS services and third-party applications.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the specified accesspoint.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="networkOrigin")
def network_origin(self) -> Optional['AccessPointNetworkOrigin']:
"""
Indicates whether this Access Point allows access from the public Internet. If VpcConfiguration is specified for this Access Point, then NetworkOrigin is VPC, and the Access Point doesn't allow access from the public Internet. Otherwise, NetworkOrigin is Internet, and the Access Point allows access from the public Internet, subject to the Access Point and bucket access policies.
"""
return pulumi.get(self, "network_origin")
@property
@pulumi.getter
def policy(self) -> Optional[Any]:
"""
The Access Point Policy you want to apply to this access point.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="policyStatus")
def policy_status(self) -> Optional['outputs.PolicyStatusProperties']:
return pulumi.get(self, "policy_status")
class AwaitableGetAccessPointResult(GetAccessPointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccessPointResult(
alias=self.alias,
arn=self.arn,
network_origin=self.network_origin,
policy=self.policy,
policy_status=self.policy_status)
def get_access_point(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccessPointResult:
"""
The AWS::S3::AccessPoint resource is an Amazon S3 resource type that you can use to access buckets.
:param str name: The name you want to assign to this Access Point. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the access point name.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:s3:getAccessPoint', __args__, opts=opts, typ=GetAccessPointResult).value
return AwaitableGetAccessPointResult(
alias=__ret__.alias,
arn=__ret__.arn,
network_origin=__ret__.network_origin,
policy=__ret__.policy,
policy_status=__ret__.policy_status)
@_utilities.lift_output_func(get_access_point)
def get_access_point_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccessPointResult]:
"""
The AWS::S3::AccessPoint resource is an Amazon S3 resource type that you can use to access buckets.
:param str name: The name you want to assign to this Access Point. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the access point name.
"""
...
|
<filename>src/day13.py
from typing import NamedTuple, Set, List
import pytest
class Point(NamedTuple):
x: int
y: int
@classmethod
def from_string(cls, input_string: str) -> "Point":
[x, y] = input_string.strip().split(",")
return Point(x=int(x), y=int(y))
@pytest.mark.parametrize(
"input_string, expected",
[
("", set()),
("1, 2", {Point(1, 2)}),
("86,99", {Point(86, 99)}),
("1,2\n3,4\n5,6\n", {Point(1, 2), Point(3, 4), Point(5, 6)}),
(
"1,2\n3,4\n5,6\n\nfold along x=42\nfold along y=47",
{Point(1, 2), Point(3, 4), Point(5, 6)},
),
],
)
def test_parse_input_points(input_string, expected):
assert parse_input_points(input_string) == expected
def parse_input_points(input_string) -> Set[Point]:
if not input_string:
return set()
result = set()
for line in input_string.strip().split("\n\n")[0].split("\n"):
result.add(Point.from_string(line))
return result
class Fold(NamedTuple):
dimension: str
coordinate: int
@classmethod
def from_string(cls, input_string: str) -> "Fold":
[dimension, coordinate] = input_string.strip()[11:].split("=")
return Fold(dimension=dimension, coordinate=int(coordinate))
@pytest.mark.parametrize(
"input_string, expected",
[
("", []),
("1,2\n\nfold along x=42", [Fold("x", 42)]),
("1,2\n\nfold along x=42\nfold along y=47", [Fold("x", 42), Fold("y", 47)]),
],
)
def test_parse_input_folds(input_string, expected):
assert parse_input_folds(input_string) == expected
def parse_input_folds(input_string: str) -> List[Fold]:
if not input_string:
return []
result = []
for line in input_string.strip().split("\n\n")[1].split("\n"):
result.append(Fold.from_string(line))
return result
@pytest.mark.parametrize(
"points, a_fold, expected",
[
(set(), Fold("x", 3), set()),
({Point(0, 0)}, Fold("x", 3), {Point(0, 0)}),
(
{Point(0, 0), Point(1, 1), Point(2, 2)},
Fold("x", 3),
{Point(0, 0), Point(1, 1), Point(2, 2)},
),
({Point(0, 0), Point(2, 0)}, Fold("x", 1), {Point(0, 0)}),
({Point(0, 0), Point(3, 0)}, Fold("x", 2), {Point(0, 0), Point(1, 0)}),
(
{Point(1, 0), Point(4, 0), Point(5, 1), Point(6, 2)},
Fold("x", 3),
{Point(1, 0), Point(2, 0), Point(1, 1), Point(0, 2)},
),
(
{Point(0, 1), Point(0, 4), Point(1, 5), Point(2, 6)},
Fold("y", 3),
{Point(0, 1), Point(0, 2), Point(1, 1), Point(2, 0)},
),
],
)
def test_fold(points, a_fold, expected):
assert fold(points=points, a_fold=a_fold) == expected
def fold(points: Set[Point], a_fold: Fold) -> Set[Point]:
if a_fold.dimension == "x":
return fold_on_x(a_fold, points)
elif a_fold.dimension == "y":
return fold_on_y(a_fold, points)
else:
raise Exception(f"Cannot fold on {a_fold.dimension}")
def fold_on_x(a_fold, points):
kept_points = {point for point in points if point.x < a_fold.coordinate}
folded_points = {
Point(x=(2 * a_fold.coordinate - point.x), y=point.y)
for point in points
if point.x > a_fold.coordinate
}
return kept_points.union(folded_points)
def fold_on_y(a_fold, points):
kept_points = {point for point in points if point.y < a_fold.coordinate}
folded_points = {
Point(x=point.x, y=(2 * a_fold.coordinate - point.y))
for point in points
if point.y > a_fold.coordinate
}
return kept_points.union(folded_points)
@pytest.fixture
def aoc_input_text() -> str:
return """6,10
0,14
9,10
0,3
10,4
4,11
6,0
6,12
4,1
0,13
10,12
3,4
3,0
8,4
1,10
2,14
8,10
9,0
fold along y=7
fold along x=5"""
@pytest.fixture
def aoc_result() -> Set[Point]:
return {
Point(0, 0),
Point(1, 0),
Point(2, 0),
Point(3, 0),
Point(4, 0),
Point(0, 1),
Point(0, 2),
Point(0, 3),
Point(0, 4),
Point(4, 1),
Point(4, 2),
Point(4, 3),
Point(4, 4),
Point(1, 4),
Point(2, 4),
Point(3, 4),
}
def test_do_all_folds(aoc_input_text, aoc_result):
assert do_all_folds(aoc_input_text) == aoc_result
def do_all_folds(input_string: str) -> Set[Point]:
points = parse_input_points(input_string)
folds = parse_input_folds(input_string)
for a_fold in folds:
points = fold(points=points, a_fold=a_fold)
return points
@pytest.mark.parametrize(
"points, expected",
[
(set(), ""),
({Point(0, 0)}, "#"),
({Point(0, 0), Point(0, 1)}, "##"),
({Point(0, 0), Point(1, 0)}, "#\n#"),
({Point(0, 0), Point(1, 1)}, "#.\n.#"),
],
)
def test_print_output(points, expected):
assert print_output(points) == expected
def print_output(points: Set[Point]) -> str:
if not points:
return ""
min_x = min([point.x for point in points])
assert min_x >= 0
min_y = min([point.y for point in points])
assert min_y >= 0
max_x = max([point.x for point in points])
max_y = max([point.y for point in points])
result = ""
for y in range(max_y + 1):
for x in range(max_x + 1):
if Point(x, y) in points:
result += "#"
else:
result += "."
result += "\n"
return result.strip()
def test_print_output_aoc_example(aoc_result):
assert (
print_output(aoc_result)
== """#####
#...#
#...#
#...#
#####"""
)
def part_a(filepath: str):
with open(filepath, "r") as file:
input_text = file.read()
points = parse_input_points(input_text)
a_fold = parse_input_folds(input_text)[0]
return len(fold(points=points, a_fold=a_fold))
def part_b(filepath: str):
with open(filepath, "r") as file:
input_text = file.read()
return print_output(do_all_folds(input_text))
if __name__ == "__main__":
day = 13
input_file = f"../puzzle_input/day{day}.txt"
print(f"The answer to {day}A is: {part_a(input_file)}")
print(f"The answer to {day}B is:\n{part_b(input_file)}")
|
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
import decimal
from .. enums import *
from .. properties import *
from . utils import *
from . formatter import Formatter
class TimeFormatter(Formatter):
"""
This formatter tool formats given time in seconds into reasonable time
representation. The formatting can be fully automatic based on current
'domain' and 'precision' or it can be controlled by defining the 'template'
property.
The 'template' should be specified by the parts or granularity to use,
optionally with additional formatting (e.g. '{h}:{m:.02.0f}:{s:.02.0f}').
The available parts, which can be used are defined by the pero.TIME
enum. If there is no specific formatting of a part defined directly by the
template, a default formatting or a custom sub-template is used. The parts
sub-templates are definable by corresponding properties (e.g. 'h_template'
for hours or 's_template' for seconds, s_template='{:.0f}s').
For different purposes, different style of rounding might be necessary. For
example the normal half-rounding should be used for axis labels, however, in
case of time counters the value should always be rounded down. This behavior
can be specified by the 'rounding' property as any item from the
pero.ROUNDING enum.
Properties:
template: str, None or UNDEF
Specifies the main template to be used instead of automatic
formatting.
d_template: str, None or UNDEF
Specifies the template to be used for days formatting.
h_template: str, None or UNDEF
Specifies the template to be used for hours formatting.
m_template: str, None or UNDEF
Specifies the template to be used for minutes formatting.
s_template: str, None or UNDEF
Specifies the template to be used for seconds formatting.
ms_template: str, None or UNDEF
Specifies the template to be used for milliseconds formatting.
us_template: str, None or UNDEF
Specifies the template to be used for microseconds formatting.
ns_template: str, None or UNDEF
Specifies the template to be used for nanoseconds formatting.
rounding: str
Specifies the rounding style as any item from the pero.ROUNDING
enum.
separator: str or UNDEF
Specifies the separator to be used between individual parts.
add_units: bool
Specifies whether the units should be added to individual parts.
"""
template = StringProperty(UNDEF, dynamic=False, nullable=True)
d_template = StringProperty(UNDEF, dynamic=False, nullable=True)
h_template = StringProperty(UNDEF, dynamic=False, nullable=True)
m_template = StringProperty(UNDEF, dynamic=False, nullable=True)
s_template = StringProperty(UNDEF, dynamic=False, nullable=True)
ms_template = StringProperty(UNDEF, dynamic=False, nullable=True)
us_template = StringProperty(UNDEF, dynamic=False, nullable=True)
ns_template = StringProperty(UNDEF, dynamic=False, nullable=True)
rounding = EnumProperty(ROUNDING.HALFUP, enum=ROUNDING, dynamic=False)
separator = StringProperty(UNDEF, dynamic=False)
add_units = BoolProperty(False, dynamic=False)
def __init__(self, **overrides):
"""Initializes a new instance of TimeFormatter."""
super().__init__(**overrides)
# init buffers
self._template = None
self._templates_multi = None
self._templates_single = None
self._is_dirty = True
# get available parts (units, factor)
self._parts = sorted(TIME_FACTORS.items(), key=lambda d: d[1], reverse=True)
# bind events
self.bind(EVT_PROPERTY_CHANGED, self._on_time_formatter_property_changed)
def format(self, value, *args, **kwargs):
"""
Formats a given value using time formatting.
Args:
value: float
Time in seconds.
Returns:
str
Formatted label.
"""
# init formatting
if self._is_dirty:
self._init_formatting()
# get template
template = self._template
if not template:
template = self._make_template(value)
# init parts
parts = {x[0]:0 for x in self._parts}
# split time
last = None
for units, f in self._parts:
key = "{%s:" % units
if key in template:
# get part value
val = value / float(f)
value -= int(val) * f
parts[units] = decimal.Decimal(val)
# remove fractions from higher part
if last:
parts[last] = int(parts[last])
last = units
# init context
context = decimal.Context()
if self.rounding == ROUNDING.FLOOR:
context.rounding = decimal.ROUND_DOWN
elif self.rounding == ROUNDING.CEIL:
context.rounding = decimal.ROUND_UP
# format with correct rounding
with decimal.localcontext(context):
return template.format(**parts)
def _init_formatting(self):
"""Initializes formatting based on current settings."""
# reset
self._template = None
self._is_dirty = False
# init templates
self._init_templates()
# use custom template
if self.template:
self._template = self._expand_template(self.template, False)
return
# check domain
if not self.domain:
return
# make template
self._template = self._make_template(abs(self.domain))
def _init_templates(self):
"""Initializes templates."""
# init default parts templates
self._templates_multi = {
DAYS: "{%s:.0f}" % DAYS,
HOURS: "{%s:.0f}" % HOURS,
MINUTES: "{%s:02.0f}" % MINUTES,
SECONDS: "{%s:02.0f}" % SECONDS,
MSECONDS: "{%s:03.0f}" % MSECONDS,
USECONDS: "{%s:03.0f}" % USECONDS,
NSECONDS: "{%s:03.0f}" % NSECONDS}
# init default singles templates
self._templates_single = {x[0]: ("{%s:.2f}" % x[0]) for x in self._parts}
# get user-defined templates
for units, f in self._parts:
# get part template
template = self.get_property(units+"_template")
if not template:
continue
# ensure part name is present
template = template.replace("{:", "{%s:" % units)
# store template
self._templates_multi[units] = template
self._templates_single[units] = template
def _make_template(self, domain):
"""Creates template to cover expected range."""
# check current precision
precision = domain
if self.precision and self.precision < domain:
precision = self.precision
# get required parts
template = []
for units, f in self._parts:
if domain >= f:
template.append(units)
if precision >= f:
break
# get separator
separator = self.separator
if separator is UNDEF:
separator = " " if self.add_units else ":"
# init template
template = separator.join("{%s}" % x for x in template)
# expand parts
return self._expand_template(template, self.add_units)
def _expand_template(self, template, add_units):
"""Expands template parts."""
# replace parts in template
for key, tmpl in self._templates_multi.items():
# make full key
tag = "{%s}" % key
# add units
if add_units:
tmpl = tmpl + " %s" % key
# check for singles
if template == tag:
units = " %s" % key if add_units else ""
tmpl = self._templates_single[key] + units
return template.replace(tag, tmpl)
# replace in template
template = template.replace(tag, tmpl)
return template
def _on_time_formatter_property_changed(self, evt=None):
"""Called after a property has changed."""
self._is_dirty = True
|
<reponame>mjirik/cellid
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.contrib.auth.forms import UserCreationForm
# Imaginary function to handle an uploaded file.
# from .imageprocessing import handle_uploaded_file
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import ImageQuatroForm
from .models import ImageQuatro, CellImage
from django.http import Http404
import glob
from django.core.files import File
from django.conf import settings
import os.path as op
# Create your views here.
def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
fn = op.abspath(__file__)
print(fn)
import html.parser
fnhtml = html.escape(fn)
# latest_question_list
context = {'pth': fn,
"range110": list(range(1, 10))}
return render(request, 'imviewer/index.html', context)
@login_required()
def not_home_anymore(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
fn = op.abspath(__file__)
print(fn)
import html.parser
fnhtml = html.escape(fn)
# latest_question_list
context = {'pth': fn,
"range110": list(range(1, 10))}
return render(request, 'imviewer/index.html', context)
def login_redirect(request):
return redirect('imviewer/login')
def register(request):
if request.method =='POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('imviewer/')
else:
form = UserCreationForm()
args = {'form': form}
return render(request, 'imviewer/reg_form.html', args)
def model_form_upload(request):
if request.method == 'POST':
form = ImageQuatroForm(request.POST, request.FILES)
if form.is_valid():
form.save()
from . import imageprocessing
# imageprocessing.quatrofile_processing()
return redirect('imviewer/home/')
else:
form = ImageQuatroForm()
return render(request, 'imviewer/model_form_upload.html', {
'form': form
})
from django.views import generic
class ImageQuatroListView(generic.ListView):
model = ImageQuatro
class ImageQuatroDetailView(generic.DetailView):
model = ImageQuatro
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ImageQuatroDetailView, self).get_context_data(**kwargs)
# Get the blog from id and add it to the context
context['some_data'] = 'This is just some data'
datadir = context["imagequatro"].outputdir
print("get_context_data")
print(datadir)
import glob
filelist = glob.glob(datadir + "/serazeno/*.png")
filelist.sort()
imagequatro = super(ImageQuatroDetailView, self).get_object()
imagequatro.imagequatro_preview=filelist[0]
imagequatro.save()
context["cellimages"] = filelist
return context
def home(request):
documents = ImageQuatro.objects.all()
# print("home page render")
# print(dir(documents))
# print(documents[0])
return render(request, 'imviewer/home.html', {'documents': documents})
pass
def ImageQuatroProcessView(request, pk):
try:
iq = ImageQuatro.objects.get(pk=pk)
except ImageQuatro.DoesNotExist:
raise Http404("Book does not exist")
# book_id=get_object_or_404(Book, pk=pk)
print("imagqquatra processing, pk=" + str(pk))
from . import imageprocessing
order2id = imageprocessing.quatrofile_processing(
multicell_fitc=iq.multicell_fitc.path,
multicell_dapi=iq.multicell_dapi.path,
singlecell_fitc=iq.singlecell_fitc.path,
singlecell_dapi=iq.singlecell_dapi.path,
outputpath=iq.outputdir
)
filelist = glob.glob(op.join(iq.outputdir , "serazeno/*.png"))
filelist.sort()
for i, fl in enumerate(filelist):
cellim = CellImage(imagequatro=iq, penalty=float(i))
# cellim.image = fl
flrel = op.relpath(fl, settings.MEDIA_ROOT)
cellim.image = flrel
cellim.multicelloverview_id = order2id[i]
cellim.save()
mco = op.relpath(op.join(iq.outputdir, "Popisky.png"), settings.MEDIA_ROOT)
sco = op.relpath(op.join(iq.outputdir, "hledana.png"), settings.MEDIA_ROOT)
iq.multicell_overview = mco
iq.singlecell_overview = sco
iq.save()
return render(
request,
'imviewer/imagequatro_process.html',
context={'imagequatro': iq, }
)
|
<reponame>lupino3/jupyter-Kqlmagic<gh_stars>0
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""A module that manage package version.
"""
from Kqlmagic.constants import Constants
from bs4 import BeautifulSoup
from markdown import markdown
_KQL_URL = "http://aka.ms/kdocs"
_APPINSIGHTS_URL= "https://docs.microsoft.com/en-us/azure/application-insights/app-insights-overview?toc=/azure/azure-monitor/toc.json"
_LOGANALYTICS_URL = "https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-queries?toc=/azure/azure-monitor/toc.json"
_AZUREMONITOR_URL = "https://docs.microsoft.com/en-us/azure/azure-monitor/"
_KUSTO_URL = "https://docs.microsoft.com/en-us/azure/data-explorer/"
_NEED_SUPPORT_SECTION = """## Need Support?
- **Have a feature request for Kqlmagic?** Please post it on [User Voice](https://feedback.azure.com/forums/913690-azure-monitor) to help us prioritize
- **Have a technical question?** Ask on [Stack Overflow with tag "Kqlmagic"](https://stackoverflow.com/questions/tagged/Kqlmagic)
- **Need Support?** Every customer with an active Azure subscription has access to [support](https://docs.microsoft.com/en-us/azure/azure-supportability/how-to-create-azure-support-request) with guaranteed response time. Consider submitting a ticket and get assistance from Microsoft support team
- **Found a bug?** Please help us fix it by thoroughly documenting it and [filing an issue](https://github.com/Microsoft/jupyter-Kqlmagic/issues/new).
"""
_HELP_OPTIONS = ""
_HELP_COMMANDS = """## Overview
Except submitting kql queries, few other commands are included that may help using the Kqlmagic.<br>
- Only one command can be executed per magic transaction.<br>
- A command must start with a double hyphen-minus ```--```<br>
- If command is not specified, the default command ```"submit"``` is assumed, that submits the query.<br>
## Commands
The following commands are supported:<br>
- **submit** - Execute the query and return result. <br>
- Options can be used to customize the behavior of the transaction.<br>
- The query can parametrized.<br>
- This is the default command.<br>
<br>
- **version** - Displays the current version string.<br>
<br>
- **usage** - Displays usage of Kqlmagic.<br>
<br>
- **help "topic"** - Display information about the topic.<br>
- To get the list of all the topics, execute ```%kql --help "help"```<br>
<br>
- **palette - Display information about the current or other named color palette.<br>
- The behaviour of this command will change based on the specified option:
- -palette_name, -palette_colors, palette_reverse, -palette_desaturation, execute ```%kql --palette -palette_name "Reds"```<br>
<br>
- **palettes - Display information about all available palettes.<br>
- The behaviour of this command will change based on the specified option:
- -palette_colors, palette_reverse, -palette_desaturation, execute ```%kql --palettes -palette_desaturation 0.75```<br>
<br>
- **cache - Enables caching query results to a cache folder, or disbale. <br>
- To enable caching to folder XXX, execute: ```%kql --cache "XXX"```<br>
- To disable caching, execute: ```%kql --cache None```<br>
- Once results are cached, the results can be used by enabling the use of the cache, with the --use_cache command.<br>
<br>
- **use_cache - Enables use of cached results from a cache folder. <br>
- To enable use of cache from folder XXX, execute: ```%kql --use_cache "XXX"```<br>
- To disable use of cache, execute: ```%kql --use_cache None```<br>
- Once enabled, intead of quering the data source, the results are retreived from the cache.<br>
<br>
## Examples:
```%kql --version```<br><br>
```%kql --usage```<br><br>
```%kql --help "help"```<br><br>
```%kql --help "options"```<br><br>
```%kql --help "conn"```<br><br>
```%kql --palette -palette_name "Reds"```<br><br>
```%kql --cache "XXX"```<br><br>
```%kql --use_cache None```<br><br>
```%kql --submit appinsights://appid='DEMO_APP';appkey='DEMO_KEY' pageViews | count```<br><br>
```%kql --palettes -palette_desaturation 0.75```
```%kql pageViews | count```
"""
_FAQ = """
"""
_USAGE = """## Usage:
**line usage:** ```%kql [command] [conn] [result <<] [options] [query]```
**cell usage:** ```%%kql [conn] [result <<] [options] [query] [[EMPTY-LINE [result <<] [options]]*```<br>
- **command** - The command to be executed.
- If not specified the query will be submited, if exist.<br>
- All commands start with a double hyphen-minus ```--```<br>
- To get more information, execute ```%kql --help "commands"```<br>
<br>
- **conn** - Connection string or reference to a connection to Azure Monitor resource.<br>
- If not specified the current (last created or used) connection will be used.<br>
- The conncan be also specified in the options parts, using the option ```-conn```<br>
- To get more information, execute ```%kql --help "conn"```<br>
<br>
- **result** - Python variable name that will be assigned with the result of the query.<br>
- Query results are always assigned to ```_``` and to ```_kql_raw_result_``` python variables.<br>
- If not specified and is last query in the cell, the results will be displayed.<br>
<br>
- **options** - Options that customize the behavior of the Kqlmagic for this transaction only.<br>
- All options start with a hyphen-minus ```-```<br>
- To get more information, execute ```%kql --help "options"```<br>
<br>
- **query** - Kusto Query language (kql) query that will be submited to the specified connection or to the current connection.<br>
- The query can be also sepcified in the options parts, using the option ```-query```<br>
- To get more information, browse https://docs.microsoft.com/en-us/azure/kusto/query/index
<br>
## Examples:
```%kql --version```<br><br>
```%kql --usage```<br><br>
```%%kql appinsights://appid='DEMO_APP';appkey='DEMO_KEY'
pageViews
| where client_City != ''
| summarize count() by client_City
| sort by count_
| limit 10```<br><br>
```%%kql pageViews | where client_City != ''
| summarize count() by client_City | sort by count_
| limit 10```<br><br>
```%kql DEMO_APP@appinsights pageViews | count```
## Get Started Notebooks:
* [Get Started with Kqlmagic for Kusto](https://mybinder.org/v2/gh/Microsoft/jupyter-Kqlmagic/master?filepath=notebooks%2FQuickStart.ipynb)
* [Get Started with Kqlmagic for Application Insights](https://mybinder.org/v2/gh/Microsoft/jupyter-Kqlmagic/master?filepath=notebooks%2FQuickStartAI.ipynb)
* [Get Started with Kqlmagic for Log Analytics](https://mybinder.org/v2/gh/Microsoft/jupyter-Kqlmagic/master?filepath=notebooks%2FQuickStartLA.ipynb)
* [Parametrize your Kqlmagic query with Python](https://mybinder.org/v2/gh/Microsoft/jupyter-Kqlmagic/master?filepath=notebooks%2FParametrizeYourQuery.ipynb)
* [Choose colors palette for your Kqlmagic query chart result](https://mybinder.org/v2/gh/Microsoft/jupyter-Kqlmagic/master?filepath=notebooks%2FColorYourCharts.ipynb)
""" +_NEED_SUPPORT_SECTION
_HELP_HELP = """## Overview
Help command is a tool to get more information on a topics that are relevant to Kqlmagic.
t
usage: ```%kql --help "topic"```<br>
## Topics
- **usage** - How to use the Kqlmagic.<br>
<br>
- **conn** - Lists the available connection string variation, and how their are used to authenticatie to data sources.<br>
<br>
- **query** / **kql** - [Reference to resources Kusto Queru language, aka kql, documentation](""" +_KQL_URL+ """)<br>
<br>
- **options** - Lists the available options, and their behavior impact on the submit query command.<br>
<br>
- **commands** - Lists the available commands, and what they do.<br>
<br>
- **faq** - Lists frequently asked quetions and answers.<br>
<br>
- **help** - This help.<br>
<br>
- **AzureMonitor**- [Reference to resources Azure Monitor tools](""" +_AZUREMONITOR_URL+ """)<br>
Azure Monitor, which now includes Log Analytics and Application Insights, provides sophisticated tools for collecting and analyzing telemetry that allow you to maximize the performance and availability of your cloud and on-premises resources and applications. It helps you understand how your applications are performing and proactively identifies issues affecting them and the resources they depend on.
<br>
- **AzureDataExplorer** / **kusto**- [Reference to resources Azure Data Explorer (kusto) service](""" +_KUSTO_URL+ """)<br>
Azure Data Explorer is a fast and highly scalable data exploration service for log and telemetry data. It helps you handle the many data streams emitted by modern software, so you can collect, store, and analyze data. Azure Data Explorer is ideal for analyzing large volumes of diverse data from any data source, such as websites, applications, IoT devices, and more.
<br>
- **LogAnalytics**- [Reference to resources Log Analytics service](""" +_LOGANALYTICS_URL+ """)<br>
Log data collected by Azure Monitor is stored in Log Analytics which collects telemetry and other data from a variety of sources and provides a query language for advanced analytics.
<br>
- **ApplicationInsights** / **AppInsights**- [Reference to resources Application Insights service](""" +_APPINSIGHTS_URL+ """)<br>
Application Insights is an extensible Application Performance Management (APM) service for web developers on multiple platforms. Use it to monitor your live web application. It will automatically detect performance anomalies. It includes powerful analytics tools to help you diagnose issues and to understand what users actually do with your app. It's designed to help you continuously improve performance and usability. It works for apps on a wide variety of platforms including .NET, Node.js and J2EE, hosted on-premises or in the cloud. It integrates with your DevOps process, and has connection points to a variety of development tools. It can monitor and analyze telemetry from mobile apps by integrating with Visual Studio App Center.
<br>
""" +_NEED_SUPPORT_SECTION
_HELP_CONN = """## Overview
- To get data from Azure Monitor data resources, the user need to authenticate itself, and if it has the right permission,
he would be able to query that data resource.
- The current supported data sources are: Azure Data Explorer (kusto) clusters, Application Insights, Log Analytics and Cache.
- Cache data source is not a real data source, it retrieves query results that were cached, but it can only retreive results queries that were executed before, new queries or modified queries won't work.
to get more information on cache data source, execute ```help "cache"```
- The user can connect to multiple data resources.
- Once a connection to a data resource is established, it gets a name of the form <resource>@<data-source>.
- Reference to a data resource can be by connection string, connection name, or current connection (last connection used).
- If connection is not specified, current connection (last connection used) will be used.
- To submit queries, at least one connection to a data resource must be established.
- When a connection is specified, and it is a new connection string, the authentication and authorization is validated authomatically, by submiting
a validation query ```range c from 1 to 10 step 1 | count```, and if the correct result returns, the connection is established.
- An initial connection can be specified as an environment variable.
- if specified it will be established when Kqlmagic loads.
- The variable name is ```KQLMAGIC_CONNECTION_STR```
## Authentication methods:
* AAD Username/password - Provide your AAD username and password.
* AAD application - Provide your AAD tenant ID, AAD app ID and app secret.
* AAD code - Provide only your AAD username, and authenticate yourself using a code, generated by ADAL.
* certificate - Provide your AAD tenant ID, AAD app ID, certificate and certificate-thumbprint (supported only with Azure Data Explorer)
* appid/appkey - Provide you application insight appid, and appkey (supported only with Application Insights)
* anonymous - No authentication. For the case that you run your data source locally.
## Connect to Azure Data Explorer (kusto) data resource ```<database or alias>@<cluster>```
Few options to authenticate with Azure Data Explorer (Kusto) data resources:<br>
```%kql azuredataexplorer://code;cluster='<cluster-name>';database='<database-name>';alias='<database-friendly-name>'```<br><br>
```%kql azuredataexplorer://tenant='<tenant-id>';clientid='<aad-appid>';clientsecret='<aad-appkey>';cluster='<cluster-name>';database='<database-name>';alias='<database-friendly-name>'```<br><br>
```%kql azuredataexplorer://tenant='<tenant-id>';certificate='<certificate>';certificate_thumbprint='<thumbprint>';cluster='<cluster-name>';database='<database-name>';alias='<database-friendly-name>'```<br><br>
```%kql azuredataexplorer://tenant='<tenant-id>';certificate_pem_file='<pem_filename>';certificate_thumbprint='<thumbprint>';cluster='<cluster-name>';database='<database-name>';alias='<database-friendly-name>'```<br><br>
```%kql azuredataexplorer://username='<username>';password='<password>';cluster='<cluster-name>';database='<database-name>';alias='<database-friendly-name>'```<br><br>
```%kql azuredataexplorer://anonymous;cluster='<cluster-name>';database='<database-name>';alias='<database-friendly-name>'```<br><br>
Notes:<br>
- username/password works only on corporate network.<br>
- alias is optional.<br>
- if credentials are missing, and a previous connection was established the credentials will be inherited.<br>
- if secret (password / clientsecret / thumbprint) is missing, user will be prompted to provide it.<br>
- if cluster is missing, and a previous connection was established the cluster will be inherited.<br>
- if tenant is missing, and a previous connection was established the tenant will be inherited.<br>
- if only the database change, a new connection can be set as follow:
```<new-database-name>@<cluster-name>```<br>
- **a not quoted value, is a python expression, that is evaluated and its result is used as the value. This is how you can parametrize the connection string**
## Connect to Log Analytics data resources ```<workspace or alias>@loganalytics```
Few options to authenticate with Log Analytics:<br>
```%kql loganalytics://code;workspace='<workspace-id>';alias='<workspace-friendly-name>'```<br><br>
```%kql loganalytics://tenant='<tenant-id>';clientid='<aad-appid>';clientsecret='<aad-appkey>';workspace='<workspace-id>';alias='<workspace-friendly-name>'```<br><br>
```%kql loganalytics://username='<username>';password='<password>';workspace='<workspace-id>';alias='<workspace-friendly-name>'```<br><br>
```%kql loganalytics://anonymous;workspace='<workspace-id>';alias='<workspace-friendly-name>'```<br><br>
Notes:<br>
- authentication with appkey works only for the demo.<br>
- username/password works only on corporate network.<br>
- alias is optional.<br>
- if credentials are missing, and a previous connection was established the credentials will be inherited.<br>
- if secret (password / clientsecret) is missing, user will be prompted to provide it.<br>
- if tenant is missing, and a previous connection was established the tenant will be inherited.<br>
- **a not quoted value, is a python expression, that is evaluated and its result is used as the value. This is how you can parametrize the connection string**
## Connect to Application Insights data resources ```<appid or alias>@appinsights```
Few options to authenticate with Apllication Insights:<br><br>
```%kql appinsights://appid='<app-id>';appkey='<app-key>';alias='<appid-friendly-name>'```<br><br>
```%kql appinsights://code;appid='<app-id>';alias='<appid-friendly-name>'```<br><br>
```%kql appinsights://tenant='<tenant-id>';clientid='<aad-appid>';clientsecret='<aad-appkey>';appid='<app-id>';alias='<appid-friendly-name>'```<br><br>
```%kql appinsights://username='<username>';password='<password>';appid='<app-id>';alias='<appid-friendly-name>'```<br><br>
```%kql appinsights://anonymous;appid='<app-id>';alias='<appid-friendly-name>'```<br><br>
Notes:<br>
- username/password works only on corporate network.<br>
- alias is optional.<br>
- if credentials are missing, and a previous connection was established the credentials will be inherited.<br>
- if secret (password / clientsecret / appkey) is missing, user will be prompted to provide it.<br>
- if tenant is missing, and a previous connection was established the tenant will be inherited.<br>
- **a not quoted value, is a python expression, that is evaluated and its result is used as the value. This is how you can parametrize the connection string**
""" +_NEED_SUPPORT_SECTION
_HELP = {
"query" : _KQL_URL,
"kql": _KQL_URL,
"appinsights": _APPINSIGHTS_URL,
"applicationinsights": _APPINSIGHTS_URL,
"loganalytics": _LOGANALYTICS_URL,
"azuremonitor": _AZUREMONITOR_URL,
"kusto": _KUSTO_URL,
"azuredataexplorer": _KUSTO_URL,
"conn" : _HELP_CONN,
"options" : "",
"help" : _HELP_HELP,
"usage" : _USAGE,
"commands" : _HELP_COMMANDS,
"cache" : "",
"faq" : "",
}
class UrlReference(object):
""" A wrapper class that holds a url reference.
Parameters
----------
name : str
Name of the url.
url : str
Reference url.
button : str
A string to be presented on a button, that on click will open the url
"""
def __init__(self, name: str, url: str, button_text: str):
self.name = name
self.url = url
self.button_text = button_text
class MarkdownString(object):
""" A class that holds a markdown string.
can present the string as markdown, html, and text
"""
def __init__(self, markdown_string: str):
self.markdown_string = markdown_string
# Printable unambiguous presentation of the object
def __repr__(self):
html = self._repr_html_()
return ''.join(BeautifulSoup(html, features="lxml").findAll(text=True))
def _repr_html_(self):
return markdown(self.markdown_string)
def _repr_markdown_(self):
return self.markdown_string
def __str__(self):
return self.__repr__()
def execute_usage_command() -> MarkdownString:
""" execute the usage command.
command that returns the usage string that will be displayed to the user.
Returns
-------
MarkdownString object
The usage string wrapped by an object that enable markdown, html or text display.
"""
return execute_help_command("usage")
def execute_faq_command() -> MarkdownString:
""" execute the faq command.
command that returns the faq string that will be displayed to the user.
Returns
-------
MarkdownString object
The faq string wrapped by an object that enable markdown, html or text display.
"""
return execute_help_command("faq")
def execute_help_command(topic: str) -> MarkdownString:
""" execute the help command.
command that return the help topic string that will be displayed to the user.
Returns
-------
MarkdownString object
The help topic string wrapped by an object that enable markdown, html or text display of the topic.
"""
help_topic_string = _HELP.get(topic.strip().lower().replace("_", "").replace("-", ""))
if help_topic_string is None:
raise ValueError("{0} unknown help topic".format(topic))
if help_topic_string.startswith("http"):
button_text = "popup {0} reference ".format(topic)
return UrlReference(topic, help_topic_string, button_text)
elif help_topic_string == '':
help_topic_string = "Sorry, not implemented yet."
return MarkdownString(help_topic_string)
|
import numbers
import numpy
from Calculator import Calculator
from Function import Function
from Operator import Operator
from Queue import Queue
from Stack import Stack
class TestHandler:
_test_items = ["firstItem", "secondItem", "thirdItem", "lastItem"]
def run(self):
self._test_queue()
self._test_stack()
self._test_func()
self._test_operator()
self._test_calc()
self._test_create_output_queue()
self._test_calculate_expression()
def _test_queue(self):
queue = Queue()
assert queue.is_empty()
for item in self._test_items:
queue.push(item)
assert queue.size() == 4, "expected queue to be of size 4"
assert (
queue.peek() == "firstItem"
), "expected first item in queue to be firstItem"
popped = queue.pop()
assert queue.size() == 3
assert queue.peek() == "secondItem"
assert popped == "firstItem"
while not queue.is_empty():
queue.pop()
assert queue.is_empty()
def _test_stack(self):
stack = Stack()
assert stack.is_empty()
for item in self._test_items:
stack.push(item)
assert stack.size() == 4, "expected stack to be of size 4"
assert stack.peek() == "lastItem", "expected top item in stack to be lastItem"
popped = stack.pop()
assert stack.size() == 3
assert stack.peek() == "thirdItem"
assert popped == "lastItem"
while not stack.is_empty():
stack.pop()
assert stack.is_empty()
def _test_func(self):
exp_func = Function(numpy.exp)
sin_func = Function(numpy.sin)
assert isinstance(exp_func, Function)
assert not isinstance(numpy.exp, Function)
assert exp_func.execute(sin_func.execute(0)) == 1.0
def _test_operator(self):
add_op = Operator(operation=numpy.add, strength=0)
multiply_op = Operator(operation=numpy.multiply, strength=1)
assert add_op.execute(1, multiply_op.execute(2, 3)) == 7
def _test_calc(self):
calc = Calculator()
assert calc.functions["EXP"].execute(
calc.operators["ADD"].execute(1, calc.operators["MULTIPLY"].execute(2, 3))
) == numpy.exp(7)
def _test_create_output_queue(self):
test_queue = Queue()
test_queue.push("exp")
test_queue.push("(")
test_queue.push(1)
test_queue.push("add")
test_queue.push(2)
test_queue.push("multiply")
test_queue.push(3)
test_queue.push(")")
calc = Calculator(input_queue=test_queue)
output_queue = calc.create_output_queue()
assert str(output_queue) == "1, 2, 3, multiply, add, exp, "
def _test_calculate_expression(self):
text1 = "exp (1 add 2 multiply 3)"
text2 = (
"((15 DIVIDE (7 SUBTRACT (1 ADD 1))) MULTIPLY 3) SUBTRACT (2 ADD (1 ADD 1))"
)
calc = Calculator()
input_queue1 = calc.create_input_queue(text1)
assert str(input_queue1) == "EXP, (, 1.0, ADD, 2.0, MULTIPLY, 3.0, ), "
output_queue1 = calc.create_output_queue()
input_queue2 = calc.create_input_queue(text2)
assert (
str(input_queue2)
== "(, (, 15.0, DIVIDE, (, 7.0, SUBTRACT, (, 1.0, ADD, 1.0, ), ), ), MULTIPLY, 3.0, ), SUBTRACT, (, 2.0, ADD, (, 1.0, ADD, 1.0, ), ), "
)
output_queue2 = calc.create_output_queue()
assert (
str(output_queue2)
== "15.0, 7.0, 1.0, 1.0, add, subtract, true_divide, 3.0, multiply, 2.0, 1.0, 1.0, add, add, subtract, "
)
assert str(calc.calculate_expression(text1)).replace(", ", "") == str(
numpy.exp(7)
)
assert str(calc.calculate_expression(text2)).replace(", ", "") == "5.0"
|
<reponame>lee14257/delphi-epidata-py
from dataclasses import dataclass, field
from enum import Enum
from datetime import date
from urllib.parse import urlencode
from typing import (
Any,
Dict,
Final,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
TypedDict,
Union,
cast,
)
from epiweeks import Week
from pandas import DataFrame, CategoricalDtype
from ._parse import parse_api_date, parse_api_week, parse_api_date_or_week, fields_to_predicate
EpiDateLike = Union[int, str, date, Week]
EpiRangeDict = TypedDict("EpiRangeDict", {"from": EpiDateLike, "to": EpiDateLike})
EpiRangeLike = Union[int, str, "EpiRange", EpiRangeDict, date, Week]
def format_date(d: EpiDateLike) -> str:
if isinstance(d, date):
# YYYYMMDD
return d.strftime("%Y%m%d")
if isinstance(d, Week):
return cast(str, d.cdcformat())
return str(d)
def format_item(value: EpiRangeLike) -> str:
"""Cast values and/or range to a string."""
if isinstance(value, (date, Week)):
return format_date(value)
if isinstance(value, Enum):
return str(value.value)
if isinstance(value, EpiRange):
return str(value)
if isinstance(value, dict) and "from" in value and "to" in value:
return f"{format_date(value['from'])}-{format_date(value['to'])}"
return str(value)
def format_list(values: Union[EpiRangeLike, Iterable[EpiRangeLike]]) -> str:
"""Turn a list/tuple of values/ranges into a comma-separated string."""
list_values = values if isinstance(values, (list, tuple, set)) else [values]
return ",".join([format_item(value) for value in list_values])
EPI_RANGE_TYPE = TypeVar("EPI_RANGE_TYPE", int, date, str, Week)
@dataclass(repr=False)
class EpiRange(Generic[EPI_RANGE_TYPE]):
"""
Range object for dates/epiweeks
"""
start: EPI_RANGE_TYPE
end: EPI_RANGE_TYPE
def __post_init__(self) -> None:
# swap if wrong order
# complicated construct for typing inference
if self.end < self.start:
self.start, self.end = self.end, self.start
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
return f"{format_date(self.start)}-{format_date(self.end)}"
EpiDataResponse = TypedDict("EpiDataResponse", {"result": int, "message": str, "epidata": List})
EpiRangeParam = Union[EpiRangeLike, Iterable[EpiRangeLike]]
StringParam = Union[str, Iterable[str]]
IntParam = Union[int, Iterable[int]]
class EpiDataFormatType(str, Enum):
"""
possible formatting options for API calls
"""
json = "json"
classic = "classic"
csv = "csv"
jsonl = "jsonl"
class InvalidArgumentException(Exception):
"""
exception for an invalid argument
"""
class OnlySupportsClassicFormatException(Exception):
"""
the endpoint only supports the classic message format, due to an non-standard behavior
"""
class EpidataFieldType(Enum):
"""
field type
"""
text = 0
int = 1
float = 2
date = 3
epiweek = 4
categorical = 5
bool = 6
date_or_epiweek = 7
@dataclass
class EpidataFieldInfo:
"""
meta data information about an return field
"""
name: Final[str] = ""
type: Final[EpidataFieldType] = EpidataFieldType.text
description: Final[str] = ""
categories: Final[Sequence[str]] = field(default_factory=list)
CALL_TYPE = TypeVar("CALL_TYPE")
def add_endpoint_to_url(url: str, endpoint: str) -> str:
if not url.endswith("/"):
url += "/"
url += endpoint
return url
class AEpiDataCall:
"""
base epidata call class
"""
_base_url: Final[str]
_endpoint: Final[str]
_params: Final[Mapping[str, Union[None, EpiRangeLike, Iterable[EpiRangeLike]]]]
meta: Final[Sequence[EpidataFieldInfo]]
meta_by_name: Final[Mapping[str, EpidataFieldInfo]]
only_supports_classic: Final[bool]
def __init__(
self,
base_url: str,
endpoint: str,
params: Mapping[str, Union[None, EpiRangeLike, Iterable[EpiRangeLike]]],
meta: Optional[Sequence[EpidataFieldInfo]] = None,
only_supports_classic: bool = False,
) -> None:
self._base_url = base_url
self._endpoint = endpoint
self._params = params
self.only_supports_classic = only_supports_classic
self.meta = meta or []
self.meta_by_name = {k.name: k for k in self.meta}
def _verify_parameters(self) -> None:
# hook for verifying parameters before sending
pass
def _formatted_paramters(
self, format_type: Optional[EpiDataFormatType] = None, fields: Optional[Iterable[str]] = None
) -> Mapping[str, str]:
"""
format this call into a [URL, Params] tuple
"""
all_params = dict(self._params)
if format_type and format_type != EpiDataFormatType.classic:
all_params["format"] = format_type
if fields:
all_params["fields"] = fields
return {k: format_list(v) for k, v in all_params.items() if v is not None}
def request_arguments(
self, format_type: Optional[EpiDataFormatType] = None, fields: Optional[Iterable[str]] = None
) -> Tuple[str, Mapping[str, str]]:
"""
format this call into a [URL, Params] tuple
"""
formatted_params = self._formatted_paramters(format_type, fields)
full_url = self._full_url()
return full_url, formatted_params
def _full_url(self) -> str:
"""
combines the endpoint with the given base url
"""
return add_endpoint_to_url(self._base_url, self._endpoint)
def request_url(
self,
format_type: Optional[EpiDataFormatType] = None,
fields: Optional[Iterable[str]] = None,
) -> str:
"""
format this call into a full HTTP request url with encoded parameters
"""
self._verify_parameters()
u, p = self.request_arguments(format_type, fields)
query = urlencode(p)
if query:
return f"{u}?{query}"
return u
def __repr__(self) -> str:
return f"EpiDataCall(endpoint={self._endpoint}, params={self._formatted_paramters()})"
def __str__(self) -> str:
return self.request_url()
def _parse_value(
self, key: str, value: Union[str, float, int, None], disable_date_parsing: Optional[bool] = False
) -> Union[str, float, int, date, None]:
meta = self.meta_by_name.get(key)
if not meta or value is None:
return value
if meta.type == EpidataFieldType.date_or_epiweek and not disable_date_parsing:
return parse_api_date_or_week(value)
if meta.type == EpidataFieldType.date and not disable_date_parsing:
return parse_api_date(value)
if meta.type == EpidataFieldType.epiweek and not disable_date_parsing:
return parse_api_week(value)
if meta.type == EpidataFieldType.bool:
return bool(value)
return value
def _parse_row(
self, row: Mapping[str, Union[str, float, int, None]], disable_date_parsing: Optional[bool] = False
) -> Mapping[str, Union[str, float, int, date, None]]:
if not self.meta:
return row
return {k: self._parse_value(k, v, disable_date_parsing) for k, v in row.items()}
def _as_df(
self,
rows: Sequence[Mapping[str, Union[str, float, int, date, None]]],
fields: Optional[Iterable[str]] = None,
disable_date_parsing: Optional[bool] = False,
) -> DataFrame:
pred = fields_to_predicate(fields)
columns: List[str] = [info.name for info in self.meta if pred(info.name)]
df = DataFrame(rows, columns=columns or None)
data_types: Dict[str, Any] = {}
for info in self.meta:
if not pred(info.name) or df[info.name].isnull().values.all():
continue
if info.type == EpidataFieldType.bool:
data_types[info.name] = bool
elif info.type == EpidataFieldType.categorical:
data_types[info.name] = CategoricalDtype(categories=info.categories or None, ordered=True)
elif info.type == EpidataFieldType.int:
data_types[info.name] = int
elif info.type in (EpidataFieldType.date, EpidataFieldType.epiweek, EpidataFieldType.date_or_epiweek):
data_types[info.name] = int if disable_date_parsing else "datetime64"
elif info.type == EpidataFieldType.float:
data_types[info.name] = float
else:
data_types[info.name] = str
if data_types:
df = df.astype(data_types)
return df
|
import tensorflow as tf
from tensorflow.contrib.layers import flatten
import pickle
from tensorflow.examples.tutorials.mnist import input_data
import random
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import cv2
import glob
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
%matplotlib inline
training_file = "train.p"
validation_file="valid.p"
testing_file = "test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
assert(len(X_train) == len(y_train))
assert(len(X_valid) == len(y_valid))
assert(len(X_test) == len(y_test))
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
#code that has been commented below has been left here for debug purposes
#plt.figure(figsize = (1,1))
#plt.imshow(image)
EPOCHS = 60
BATCH_SIZE = 20
# Number of training examples
n_train = len(X_train)
# Number of validation examples
n_validation = len(X_valid)
# Number of testing examples.
n_test = len(X_test)
# dtermine the shape of an traffic sign image?
image_shape = X_train[0].shape
# Number of unique classes/labels there are in the dataset.
n_classes = 43
# Printing out all relavant information before processing.
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Training my model using the following architecture.
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation and dropout.
conv1 = tf.nn.dropout((tf.nn.relu(conv1)), 0.9, noise_shape=None, seed=1, name=None)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout((conv1), 0.99, noise_shape=None, seed=None, name=None)
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
#conv2 = tf.nn.dropout((conv2), 0.1, noise_shape=None, seed=None, name=None)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
tf.nn.dropout((conv2), 0.99, noise_shape=None, seed=None, name=None)
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
tf.nn.dropout((flatten(conv2)), 0.9, noise_shape=None, seed=1, name=None)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
fc1 = tf.nn.dropout((tf.matmul(fc0, fc1_W) + fc1_b), 0.99, noise_shape=None, seed=1, name=None)
# SOLUTION: Activation & dropout
#fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout((tf.nn.relu(fc1)), 0.99, noise_shape=None, seed=1, name=None)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation & Dropout
fc2 = tf.nn.dropout((tf.nn.relu(fc2)), 0.99, noise_shape=None, seed=5, name=None)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
### Calculate and report the accuracy on the training and validation set.
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
def plot_signs(signs, nrows = 1, ncols=1, labels=None):
fig, axs = plt.subplots(ncols, nrows, figsize=(15, 8))
axs = axs.ravel()
for index, title in zip(range(len(signs)), signs):
axs[index].imshow(signs[title])
axs[index].set_title(labels[index], fontsize=10)
return()
def normalize_image(image):
return (image - 128.) / 128.
def gray_scale(image):
# Convert to grayscale
gray_scale_image = np.sum(image/3, axis=3, keepdims=True)
return gray_scale_image
### Load the images and plot them here.
sign_text = np.genfromtxt('signnames.csv', skip_header=1, dtype=[('myint','i8'), ('mystring','S55')], delimiter=',')
number_of_images_to_display = 20
signs = {}
labels = {}
for i in range(number_of_images_to_display):
index = random.randint(0, n_train-1)
labels[i] = sign_text[y_train[index]][1].decode('ascii')
signs[i] = X_train[index]
plot_signs(signs, 5, 4, labels)
# Finding/Displaying Distribution of unique elements in train, test and validation arrays
train_unique, counts_train = np.unique(y_train, return_counts=True)
plt.bar(train_unique, counts_train)
plt.grid()
plt.title("\nTrain Dataset Distribution")
plt.show()
test_unique, counts_test = np.unique(y_test, return_counts=True)
plt.bar(test_unique, counts_test)
plt.grid()
plt.title("Test Dataset Distribution")
plt.show()
valid_unique, counts_valid = np.unique(y_valid, return_counts=True)
plt.bar(valid_unique, counts_valid)
plt.grid()
plt.title("Valid Dataset Distribution")
plt.show()
### Pre-processing pipeline
#Normalize images
X_train = normalize_image(X_train)
X_valid = normalize_image(X_valid)
X_test = normalize_image(X_test)
#Gray Scale images
X_train = gray_scale(X_train)
X_valid = gray_scale(X_valid)
X_test = gray_scale(X_test)
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
rate = 0.00065
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_valid, y_valid)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
### Calculating the accuracy for these 5 new images in the lines of code below
### Loading images and plotting them
test_images = glob.glob('test_images/test*.jpg')
global X_test_new
X_test_new = []
for image in test_images:
img=mpimg.imread(image)
f, (ax) = plt.subplots(1, 1, figsize=(24, 9))
f.tight_layout()
ax.imshow(img)
ax.set_title('New Image', fontsize=25)
X_test_new.append(img)
X_test_new = np.asarray(X_test_new)
### Pre-processing pipeline
#Normalize images
X_test_new = normalize_image(X_test_new)
#Gray Scale images
X_test_new = gray_scale(X_test_new)
### Calculating the accuracy for these 5 new images.
real_labels = [11, 25, 18, 3, 1]
with tf.Session() as sess:
print("Testing...")
sess.run(tf.global_variables_initializer())
train_run_saver = tf.train.import_meta_graph('lenet.meta')
train_run_saver.restore(sess, "./lenet")
test_accuracy = evaluate(X_test_new, real_labels)
print("Test Set Accuracy = {:.3f}".format(test_accuracy))
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=3)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph('./lenet.meta')
saver.restore(sess, "./lenet")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: X_test_new})
my_top_k = sess.run(top_k, feed_dict={x: X_test_new})
global softmax_index
softmax_index1 = []
softmax_index2 = []
softmax_index3 = []
index1_1 = np.argwhere(y_valid == my_top_k[1][0][0])[0]
softmax_index1.append(index1_1)
index1_2 = np.argwhere(y_valid == my_top_k[1][0][1])[0]
softmax_index2.append(index1_2)
index1_3 = np.argwhere(y_valid == my_top_k[1][0][2])[0]
softmax_index3.append(index1_3)
index2_1 = np.argwhere(y_valid == my_top_k[1][1][0])[0]
softmax_index1.append(index2_1)
index2_2 = np.argwhere(y_valid == my_top_k[1][1][1])[0]
softmax_index2.append(index2_2)
index2_3 = np.argwhere(y_valid == my_top_k[1][1][2])[0]
softmax_index3.append(index2_3)
index3_1 = np.argwhere(y_valid == my_top_k[1][2][0])[0]
softmax_index1.append(index3_1)
index3_2 = np.argwhere(y_valid == my_top_k[1][2][1])[0]
softmax_index2.append(index3_2)
index3_3 = np.argwhere(y_valid == my_top_k[1][2][2])[0]
softmax_index3.append(index3_3)
index4_1 = np.argwhere(y_valid == my_top_k[1][3][0])[0]
softmax_index1.append(index4_1)
index4_2 = np.argwhere(y_valid == my_top_k[1][3][1])[0]
softmax_index2.append(index4_2)
index4_3 = np.argwhere(y_valid == my_top_k[1][3][2])[0]
softmax_index3.append(index4_3)
index5_1 = np.argwhere(y_valid == my_top_k[1][4][0])[0]
softmax_index1.append(index5_1)
index5_2 = np.argwhere(y_valid == my_top_k[1][4][1])[0]
softmax_index2.append(index5_2)
index5_3 = np.argwhere(y_valid == my_top_k[1][4][2])[0]
softmax_index3.append(index5_3)
for i, image in enumerate(X_test_new):
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(12, 4))
fig.suptitle('Softmax')
ax1.imshow(X_test_new[i])
ax1.set_title('input')
ax2.imshow(X_valid[softmax_index1[i]].squeeze(), cmap='gray')
ax2.set_title('Top Guess: SignID {} ({:.0f}%)'.format(my_top_k[1][i][0], 100*my_top_k[0][i][0]))
ax3.imshow(X_valid[softmax_index2[i]].squeeze(), cmap='gray')
ax3.set_title('2nd Guess: SignID {} ({:.0f}%)'.format(my_top_k[1][i][1], 100*my_top_k[0][i][1]))
ax4.imshow(X_valid[softmax_index3[i]].squeeze(), cmap='gray')
ax4.set_title('3rd Guess: SignID {} ({:.0f}%)'.format(my_top_k[1][i][2], 100*my_top_k[0][i][2]))
|
# !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
# pylint: disable=abstract-method
"""Bert Training Script."""
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torchmetrics import Accuracy
from sklearn.metrics import accuracy_score
from torch import nn
from transformers import AdamW, BertModel
class BertNewsClassifier(pl.LightningModule): #pylint: disable=too-many-ancestors,too-many-instance-attributes
"""Bert Model Class."""
def __init__(self, **kwargs):
"""Initializes the network, optimizer and scheduler."""
super(BertNewsClassifier, self).__init__() #pylint: disable=super-with-arguments
self.pre_trained_model_name = "bert-base-uncased" #pylint: disable=invalid-name
self.bert_model = BertModel.from_pretrained(self.pre_trained_model_name)
for param in self.bert_model.parameters():
param.requires_grad = False
self.drop = nn.Dropout(p=0.2)
# assigning labels
self.class_names = ["World", "Sports", "Business", "Sci/Tech"]
n_classes = len(self.class_names)
self.fc1 = nn.Linear(self.bert_model.config.hidden_size, 512)
self.out = nn.Linear(512, n_classes)
# self.bert_model.embedding = self.bert_model.embeddings
# self.embedding = self.bert_model.embeddings
self.scheduler = None
self.optimizer = None
self.args = kwargs
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
self.preds = []
self.target = []
def compute_bert_outputs( #pylint: disable=no-self-use
self, model_bert, embedding_input, attention_mask=None, head_mask=None
):
"""Computes Bert Outputs.
Args:
model_bert : the bert model
embedding_input : input for bert embeddings.
attention_mask : attention mask
head_mask : head mask
Returns:
output : the bert output
"""
if attention_mask is None:
attention_mask = torch.ones( #pylint: disable=no-member
embedding_input.shape[0], embedding_input.shape[1]
).to(embedding_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(model_bert.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(
-1
).unsqueeze(-1)
head_mask = head_mask.expand(
model_bert.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(model_bert.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * model_bert.config.num_hidden_layers
encoder_outputs = model_bert.encoder(
embedding_input, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = model_bert.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return outputs
def forward(self, input_ids, attention_mask=None):
""" Forward function.
Args:
input_ids: Input data
attention_maks: Attention mask value
Returns:
output - Type of news for the given news snippet
"""
embedding_input = self.bert_model.embeddings(input_ids)
outputs = self.compute_bert_outputs(
self.bert_model, embedding_input, attention_mask
)
pooled_output = outputs[1]
output = torch.tanh(self.fc1(pooled_output))
output = self.drop(output)
output = self.out(output)
return output
def training_step(self, train_batch, batch_idx):
"""Training the data as batches and returns training loss on each
batch.
Args:
train_batch Batch data
batch_idx: Batch indices
Returns:
output - Training loss
"""
input_ids = train_batch["input_ids"].to(self.device)
attention_mask = train_batch["attention_mask"].to(self.device)
targets = train_batch["targets"].to(self.device)
output = self.forward(input_ids, attention_mask)
_, y_hat = torch.max(output, dim=1) #pylint: disable=no-member
loss = F.cross_entropy(output, targets)
self.train_acc(y_hat, targets)
self.log("train_acc", self.train_acc.compute())
self.log("train_loss", loss)
return {"loss": loss, "acc": self.train_acc.compute()}
def test_step(self, test_batch, batch_idx):
"""Performs test and computes the accuracy of the model.
Args:
test_batch: Batch data
batch_idx: Batch indices
Returns:
output - Testing accuracy
"""
input_ids = test_batch["input_ids"].to(self.device)
attention_mask = test_batch["attention_mask"].to(self.device)
targets = test_batch["targets"].to(self.device)
output = self.forward(input_ids, attention_mask)
_, y_hat = torch.max(output, dim=1) #pylint: disable=no-member
test_acc = accuracy_score(y_hat.cpu(), targets.cpu())
self.test_acc(y_hat, targets)
self.preds += y_hat.tolist()
self.target += targets.tolist()
self.log("test_acc", self.test_acc.compute())
return {"test_acc": torch.tensor(test_acc)} #pylint: disable=no-member
def validation_step(self, val_batch, batch_idx):
"""Performs validation of data in batches.
Args:
val_batch: Batch data
batch_idx: Batch indices
Returns:
output - valid step loss
"""
input_ids = val_batch["input_ids"].to(self.device)
attention_mask = val_batch["attention_mask"].to(self.device)
targets = val_batch["targets"].to(self.device)
output = self.forward(input_ids, attention_mask)
_, y_hat = torch.max(output, dim=1) #pylint: disable=no-member
loss = F.cross_entropy(output, targets)
self.val_acc(y_hat, targets)
self.log("val_acc", self.val_acc.compute())
self.log("val_loss", loss, sync_dist=True)
return {"val_step_loss": loss, "acc": self.val_acc.compute()}
def configure_optimizers(self):
"""Initializes the optimizer and learning rate scheduler.
Returns:
output - Initialized optimizer and scheduler
"""
self.optimizer = AdamW(self.parameters(), lr=self.args.get("lr", 0.001))
self.scheduler = {
"scheduler":
torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
mode="min",
factor=0.2,
patience=2,
min_lr=1e-6,
verbose=True,
),
"monitor":
"val_loss",
}
return [self.optimizer], [self.scheduler]
|
<reponame>smoe/SSUsearch
#! /usr/bin/env python
# corrent ssu rRNA gene copy number based on the taxon copy number table from
# copyrigter.
# by gjr; 080614
"""
Corrent ssu rRNA gene copy number based on the taxon copy number table from
copyrigter.
% python copyrighter.py <copy.table> <sample.gg.taxonomy> <outfile.table>
"""
import sys
import os
import collections
#EXCLUDE = ['Archaea', 'Eukaryota', 'unknown']
EXCLUDE = []
LEVELS = 7
def read_refcopy(f):
"""
Parse a taxon string to copy number table
Return a dictionary with taxon as key and copy number as value
Parameters
----------
f : str
filename of taxon string to copy number table
Returns
-------
dictionary:
with taxon as key and copy number as value
"""
d_refcopy = {}
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
if line == '':
continue
_lis = line.split('\t')
taxa, num, = _lis[:2]
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
# the parsing of taxa works for both mothur output and this
taxa = taxa.rstrip(';') # for mothur classfy.seqs output
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip() # for copyrigher copy table ' ;' separater
if item.endswith(')'):
item = item.rsplit('(', 1)[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
# green gene taxonomy has sapce
item = item.replace(' ', '_')
item = item.lower()
if item == '':
item = 'Unclassifed'
elif item == 'unknown':
item = 'Unclassifed'
elif item == 'other':
item = 'Unclassifed'
elif item == 'unassigned':
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
length = len(lis2)
assert length <= LEVELS, '> {} levels found ({})'.format(
LEVELS, length)
if length != LEVELS:
lis2 = lis2 + ['Unclassified']*(LEVELS - length)
tu = tuple(lis2)
d_refcopy[tu] = float(num)
return d_refcopy
def read_mothur_taxonomy(f):
"""
Parse mothur classify.seqs output
Parameters:
-----------
f : str
file name of .taxonomy file from classify.seqs
Returns:
--------
generator
an iterable (generator) of tuples (each level of taxonomy)
"""
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
name, taxa = line.rstrip().split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
# the parsing of taxa works for both mothur output and this
taxa = taxa.rstrip(';') # for mothur classfy.seqs output
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip() # for copyrigher copy table ' ;' separater
if item.endswith(')'):
item = item.rsplit('(', 1)[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
# green gene taxonomy has sapce
item = item.replace(' ', '_')
item = item.lower()
if item == '':
item = 'Unclassifed'
elif item == 'unknown':
item = 'Unclassifed'
elif item == 'unclassified':
item = 'Unclassifed'
elif item == 'other':
item = 'Unclassifed'
elif item == 'unassigned':
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
length = len(lis2)
assert length == LEVELS, 'levels ({}) is not ({})'.format(
length, LEVELS)
yield tuple(lis2)
def main():
if len(sys.argv) != 4:
mes = ('Usage: python {} <copy.table> '
'<sample.gg.taxonomy> <outfile.table>')
print >> sys.stderr, mes.format(os.path.basename(sys.argv[0]))
sys.exit(1)
copytable = sys.argv[1]
taxonfile = sys.argv[2]
outfile = sys.argv[3]
d_refcopy = read_refcopy(copytable)
g_taxonomy = read_mothur_taxonomy(taxonfile)
d_count = collections.Counter(g_taxonomy)
missing_taxon_cnt = 0
temp_summ = 0
temp_cnt = 0
for key in d_count:
if key in d_refcopy:
temp_cnt += 1
temp_summ += d_refcopy[key]
average_copy = temp_summ*1.0/temp_cnt
for key in d_count:
if key in d_refcopy:
copy = d_refcopy[key]
else:
copy = average_copy
missing_taxon_cnt += 1
print >> sys.stderr, '{} is missing in copyrighter'.format(
';'.join(key))
d_count[key] = d_count[key]/copy
_mes = '{0:d} taxons are not found in copyrighter, {1:.1f} copy per genome is used'
print >> sys.stderr, _mes.format(missing_taxon_cnt, average_copy)
with open(outfile, 'wb') as fw:
for key, cnt in sorted(d_count.items()):
taxon_string = ';'.join(key)
print >> fw, '{}\t{}'.format(taxon_string, cnt)
if __name__ == '__main__':
main()
|
import socket
import sys
import thread
import Tkinter as tk
from time import sleep
from protocol import *
LINE = "----------------------------------------"
class ChatRoomClient():
def __init__(self, username):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.username = username
def connect(self, host, port, timeout=10):
self.sock.connect((host, port))
self.sock.settimeout(timeout)
def setUserName(self, username):
self.username = username
def close(self):
self.sock.close()
def receive(self):
return self.sock.recv(RECV_BUFFER)
def send(self, message):
self.sock.sendall(message)
class ChatFrame(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.publicText = tk.Text(self, width=60, height=20)
self.inputText = tk.Text(self, width=60, height=5)
self.sendButoon = tk.Button(self, text='send', command=self._send)
self.clearButton = tk.Button(self, text='clear', command=self._clear)
self.exitButton = tk.Button(self, text='exit', command=self._exit)
self._createWidgets()
self.setStyle()
self.grid()
thread.start_new_thread(self._receiveMessage, ())
def setStyle(self):
pass
def _createWidgets(self):
self.publicText.grid(column=0, row=0, columnspan=3)
self.inputText.grid(column=0, row=1, columnspan=3, rowspan=2)
self.sendButoon.grid(column=2, row=3)
self.clearButton.grid(column=1, row=3)
self.exitButton.grid(column=0, row=3)
self.publicText.insert(tk.INSERT, "Welcome to Chatroom!\n")
def _send(self):
msg = self.inputText.get(1.0, tk.END).strip()
if msg is None or len(msg) == 0:
return
package = generateRequest('SEND', client.username, msg)
client.send(package)
self.inputText.delete(1.0, tk.END)
def _clear(self):
self.publicText.delete(1.0, tk.END)
def _receiveMessage(self):
while True:
sleep(SLEEP_TIME)
try:
package = client.receive()
print(package)
print LINE
req = handleReuest(package)
# Handle with the package received.
if req.getType() == 'SEND':
msg = req.getData()
time = readTime(req.getTime())
output = req.getName() + " " + time + "\n" + msg + "\n"
self.publicText.insert(tk.INSERT, output)
elif req.getType() == 'SYST':
msg = req.getData()
time = readTime(req.getTime())
output = msg + " (" + time + ") " + "\n"
self.publicText.insert(tk.INSERT, output)
except socket.error:
continue
def _exit(self):
try:
package = generateRequest('EXIT', username)
client.send(package)
client.close()
except:
pass
sys.exit(0)
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage : python client.py hostname port')
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
RECV_BUFFER = 4096
SLEEP_TIME = 0.5
username = raw_input("Please enter your name: ").strip()
client = ChatRoomClient(username)
try:
client.connect(host, port)
print("Connection succeeded.")
except Exception as e:
print e
print("Connection failed.")
sys.exit(0)
msg = "Hello, I'm " + username + "."
package = generateRequest('HELLO', username, msg)
client.send(package)
while True:
package = client.receive()
req = handleReuest(package)
if req.getType() != "ERROR":
break
print "username illegal, please input a new one"
username = raw_input("Please enter your name: ").strip()
print LINE
msg = "Hello, I'm " + username + "."
package = generateRequest('HELLO', username, msg)
client.send(package)
client.setUserName(username)
app = ChatFrame()
app.master.title(username + '@chatroom')
app.mainloop()
app._exit()
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import rospy
import math
import numpy as np
from robot.robot import Robot
from robot.obstacle import Obstacle
ORBIT_KP_V = -0.5
ORBIT_KP_W = 4.2
#REMAINING_RANGE_V = 20
#REMAINING_RANGE_YAW = 7
class Behavior(Robot,Obstacle):
def __init__(self):
self.penalty_angle = []
def Orbit(self, goal_ang, REMAINING_RANGE_YAW = 5):
orbit_radius = 33.5 # 22.5 + 11 cm
velocity = goal_ang
# velocity = velocity if abs(velocity) < 45 else 45 # maximum speed
w = (velocity / orbit_radius)
v_x = 0
v_y = velocity * ORBIT_KP_V
v_yaw = w * ORBIT_KP_W
o_yaw = v_yaw if abs(v_yaw) > 0.2 else 0.2 * np.sign(v_yaw) # 0.2 is minimum speed
remaining_yaw = o_yaw
if abs(remaining_yaw) < REMAINING_RANGE_YAW:
arrived = True
else:
arrived = False
return v_x, v_y, o_yaw, arrived
def Go2Point(self, tx, ty, tyaw, REMAINING_RANGE_V = 10, REMAINING_RANGE_YAW = 5):
robot_info = self.GetRobotInfo()
v_x = tx - robot_info['location']['x']
v_y = ty - robot_info['location']['y']
o_x, o_y = self.Rotate(v_x, v_y, robot_info['location']['yaw'] * -1)
v_yaw = tyaw - robot_info['location']['yaw']
if abs(v_yaw - 360) < abs(v_yaw):
o_yaw = v_yaw - 360
elif abs(v_yaw + 360) < abs(v_yaw):
o_yaw = v_yaw + 360
else:
o_yaw = v_yaw
remaining_v = math.sqrt(o_x**2 + o_y**2)
remaining_yaw = o_yaw
if remaining_v < REMAINING_RANGE_V and abs(remaining_yaw) < REMAINING_RANGE_YAW:
arrived = True
else:
arrived = False
return o_x, o_y, o_yaw, arrived
def relative_goal(self, goal_dis, goal_ang, ball_dis, ball_ang):
if ball_dis < 100 and goal_dis > 150:
ball_x = ball_dis * math.cos(math.radians(ball_ang)) #機器人看球的座標
ball_y = ball_dis * math.sin(math.radians(ball_ang))
door_x = goal_dis * math.cos(math.radians(goal_ang)) #機器人看門的座標
door_y = goal_dis * math.sin(math.radians(goal_ang))
defence_x = ( 10000 * ball_x + door_x ) / 2 #防守位置
defence_y = ( 10000 * ball_y + door_y ) / 2
defence_yaw = ball_ang
elif ball_dis < 150 and goal_dis < 150:
ball_x = ball_dis * math.cos(math.radians(ball_ang)) #機器人看球的座標
ball_y = ball_dis * math.sin(math.radians(ball_ang))
door_x = goal_dis * math.cos(math.radians(goal_ang)) #機器人看門的座標
door_y = goal_dis * math.sin(math.radians(goal_ang))
defence_x = 0 #avoid to go to the goal area
defence_y = 0
defence_yaw = ball_ang
else:
ball_x = ball_dis * math.cos(math.radians(ball_ang)) #機器人看球的座標
ball_y = ball_dis * math.sin(math.radians(ball_ang))
door_x = goal_dis * math.cos(math.radians(goal_ang)) #機器人看門的座標
door_y = goal_dis * math.sin(math.radians(goal_ang))
defence_x = ( ball_x + door_x ) / 2 #防守位置
defence_y = ( ball_y + door_y ) / 2
defence_yaw = ball_ang
return defence_x , defence_y , defence_yaw
def relative_ball(self, goal_dis, goal_ang, ball_dis, ball_ang):
if ball_dis < 100 and goal_dis > 150:
ball_x = ball_dis * math.cos(math.radians(ball_ang)) #機器人看球的座標
ball_y = ball_dis * math.sin(math.radians(ball_ang))
door_x = goal_dis * math.cos(math.radians(goal_ang)) #機器人看門的座標
door_y = goal_dis * math.sin(math.radians(goal_ang))
defence_x = (10000*ball_x + door_x ) / 10 #avoid to go to the goal area
defence_y = (10000*ball_y + door_y ) / 10
defence_yaw = ball_ang
elif ball_dis < 150 and goal_dis < 150:
ball_x = ball_dis * math.cos(math.radians(ball_ang)) #機器人看球的座標
ball_y = ball_dis * math.sin(math.radians(ball_ang))
door_x = goal_dis * math.cos(math.radians(goal_ang)) #機器人看門的座標
door_y = goal_dis * math.sin(math.radians(goal_ang))
defence_x = 0 #avoid to go to the goal area
defence_y = 0
defence_yaw = ball_ang
else:
ball_x = ball_dis * math.cos(math.radians(ball_ang)) #機器人看球的座標
ball_y = ball_dis * math.sin(math.radians(ball_ang))
door_x = goal_dis * math.cos(math.radians(goal_ang)) #機器人看門的座標
door_y = goal_dis * math.sin(math.radians(goal_ang))
defence_x = (7.5*ball_x +door_x ) / 10 #防守位置
defence_y = (7.5*ball_y +door_y ) / 10
defence_yaw = ball_ang
return defence_x , defence_y , defence_yaw
def PenaltyTurning(self, side, run_yaw, dest_ang):
robot_info = self.GetObjectInfo()
position = self.GetRobotInfo()
front_ang = math.degrees(position['imu_3d']['yaw'])
v_yaw = front_ang - dest_ang
if run_yaw == 0:
v_yaw = robot_info[side]['ang']
v_x = 0
v_y = 0
return v_x, v_y, v_yaw
def Post_up(self, goal_dis, goal_ang,ranges, angle_increment):
self.__goal_dis = goal_dis
self.__goal_ang = goal_ang
self.__ranges = ranges
self.__angle_increment = angle_increment
self.raw , object_dis= self.state(ranges)
self.edit = self.filter(self.raw)
obstacle_force_x , obstacle_force_y = self.Obstacle_segmentation(self.edit ,angle_increment , object_dis)
if obstacle_force_x == 0 and obstacle_force_y == 0 :
v_x = goal_dis * math.cos(math.radians(goal_ang))
v_y = goal_dis * math.sin(math.radians(goal_ang))
v_yaw = goal_ang
return v_x , v_y , v_yaw
else :
v_x,v_y,v_yaw = self.Force_Calculation(obstacle_force_x , obstacle_force_y ,goal_ang, goal_dis,0)
return v_x, v_y, v_yaw
|
import pyrr
from PyFlow.Core import(
FunctionLibraryBase,
IMPLEMENT_NODE
)
from PyFlow.Core.Common import *
class QuatLib(FunctionLibraryBase):
'''doc string for QuatLib'''
def __init__(self,packageName):
super(QuatLib, self).__init__(packageName)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def zeroQuat():
'''Returns zero quaternion.'''
return pyrr.Quaternion()
@staticmethod
@IMPLEMENT_NODE(returns=('StringPin', ''), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatToString(q=('QuatPin', pyrr.Quaternion())):
'''Convert to quat to str'''
return str(q)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion.from_x_rotation(0.0)), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromXRotation(theta=('FloatPin', 0.0)):
'''Creates a new Quaternion with a rotation around the X-axis.'''
return pyrr.Quaternion.from_x_rotation(theta)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion.from_y_rotation(0.0)), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromYRotation(theta=('FloatPin', 0.0)):
'''Creates a new Quaternion with a rotation around the Y-axis.'''
return pyrr.Quaternion.from_y_rotation(theta)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion.from_z_rotation(0.0)), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromZRotation(theta=('FloatPin', 0.0)):
'''Creates a new Quaternion with a rotation around the Z-axis.'''
return pyrr.Quaternion.from_z_rotation(theta)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion.from_matrix(pyrr.Matrix33())), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromMatrix33(m=('Matrix33Pin', pyrr.Matrix33())):
'''Creates a Quaternion from the specified Matrix33.'''
return pyrr.Quaternion.from_matrix(m)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion.from_matrix(pyrr.Matrix44())), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromMatrix44(m=('Matrix44Pin', pyrr.Matrix44())):
'''Creates a Quaternion from the specified Matrix44.'''
return pyrr.Quaternion.from_matrix(m)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromEulers(a=('FloatPin', 0.0), b=('FloatPin', 0.0), c=('FloatPin', 0.0)):
'''Creates a Quaternion from the specified Euler angles.'''
return pyrr.Quaternion.from_eulers([a, b, c])
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromInverseOfEulers(a=('FloatPin', 0.0), b=('FloatPin', 0.0), c=('FloatPin', 0.0)):
'''Creates a Quaternion from the specified Euler angles.'''
return pyrr.Quaternion.from_inverse_of_eulers([a, b, c])
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), nodeType=NodeTypes.Pure, meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatFromAxisRotation(a=('FloatPin', 0.0), b=('FloatPin', 0.0), c=('FloatPin', 0.0), theta=('FloatPin', 0.0)):
'''Creates a new Quaternion with a rotation around the specified axis.'''
return pyrr.Quaternion.from_axis_rotation([a, b, c], theta)
@staticmethod
@IMPLEMENT_NODE(returns=('FloatVector3Pin', pyrr.Vector3()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatApplyToVector(q=('QuatPin', pyrr.Quaternion()), v=('FloatVector3Pin', pyrr.Vector3())):
'''Rotates a vector by a quaternion.'''
return pyrr.Vector3(pyrr.quaternion.apply_to_vector(quat=q, vec=v))
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', 0.0), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatX(q=('QuatPin', pyrr.Quaternion())):
'''Returns the x component of the quat.'''
return q.x
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', 0.0), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatY(q=('QuatPin', pyrr.Quaternion())):
'''Returns the y component of the quat.'''
return q.y
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', 0.0), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatZ(q=('QuatPin', pyrr.Quaternion())):
'''Returns the z component of the quat.'''
return q.z
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', 0.0), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatW(q=('QuatPin', pyrr.Quaternion())):
'''Returns the w component of the quat.'''
return q.w
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', 0.0), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatAngle(q=('QuatPin', pyrr.Quaternion())):
'''Returns the angle around the axis of rotation of this Quaternion as a float.'''
return q.angle
@staticmethod
@IMPLEMENT_NODE(returns=('FloatVector3Pin', pyrr.Vector3()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatAxis(q=('QuatPin', pyrr.Quaternion())):
'''Returns the axis of rotation of this Quaternion as a Vector3.'''
return q.axis
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatConjugate(q=('QuatPin', pyrr.Quaternion())):
'''Returns the conjugate of this Quaternion.\nThis is a Quaternion with the opposite rotation.'''
return q.conjugate
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatCross(q=('QuatPin', pyrr.Quaternion()), other=('QuatPin', pyrr.Quaternion())):
'''Returns the cross of this Quaternion and another.\nThis is the equivalent of combining Quaternion rotations (like Matrix multiplication).'''
return q.cross(other)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatDot(q=('QuatPin', pyrr.Quaternion()), other=('QuatPin', pyrr.Quaternion())):
'''Returns the dot of this Quaternion and another.'''
return q.dot(other)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatInverse(q=('QuatPin', pyrr.Quaternion())):
'''Returns the inverse of this quaternion.'''
return q.inverse
@staticmethod
@IMPLEMENT_NODE(returns=('BoolPin', False), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatIsIdentity(q=('QuatPin', pyrr.Quaternion())):
'''Returns True if the Quaternion has no rotation (0.,0.,0.,1.).'''
return q.is_identity
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', False), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatLength(q=('QuatPin', pyrr.Quaternion())):
'''Returns the length of this Quaternion.'''
return q.length
@staticmethod
@IMPLEMENT_NODE(returns=('FloatPin', False), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatSquaredLength(q=('QuatPin', pyrr.Quaternion())):
'''Calculates the squared length of a quaternion.\nUseful for avoiding the performanc penalty of the square root function.'''
return pyrr.quaternion.squared_length(q)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatLerp(q1=('QuatPin', pyrr.Quaternion()), q2=('QuatPin', pyrr.Quaternion()), t=('FloatPin', 0.0)):
'''Interpolates between q1 and q2 by t. The parameter t is clamped to the range [0, 1].'''
return q1.lerp(q2, t)
@staticmethod
@IMPLEMENT_NODE(returns=('Matrix33Pin', pyrr.Matrix33()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatAsMatrix33(q=('QuatPin', pyrr.Quaternion())):
'''Returns a Matrix33 representation of this Quaternion.'''
return q.matrix33
@staticmethod
@IMPLEMENT_NODE(returns=('Matrix44Pin', pyrr.Matrix44()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatAsMatrix44(q=('QuatPin', pyrr.Quaternion())):
'''Returns a Matrix44 representation of this Quaternion.'''
return q.matrix44
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatNegative(q=('QuatPin', pyrr.Quaternion())):
'''Returns the negative of the Quaternion.'''
return q.negative
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatNormalize(q=('QuatPin', pyrr.Quaternion())):
'''Returns a normalized version of this Quaternion as a new Quaternion.'''
return q.normalized
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatPower(q=('QuatPin', pyrr.Quaternion()), exp=('FloatPin', 0.0), result=("Reference", ('BoolPin', False))):
'''Returns a new Quaternion representing this Quaternion to the power of the exponent. Checks for identify quaternion'''
try:
powered = q.power(exp)
result(True)
return powered
except:
result(False)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatSlerp(q1=('QuatPin', pyrr.Quaternion()), q2=('QuatPin', pyrr.Quaternion()), t=('FloatPin', 0.0)):
'''Spherically interpolates between quat1 and quat2 by t. The parameter t is clamped to the range [0, 1].'''
return q1.slerp(q2, t)
@staticmethod
@IMPLEMENT_NODE(returns=('BoolPin', False), meta={'Category': 'Math|Quaternion', 'Keywords': []})
def quatIsZeroLength(q=('QuatPin', pyrr.Quaternion())):
'''Checks if a quaternion is zero length.'''
return pyrr.quaternion.is_zero_length(q)
@staticmethod
@IMPLEMENT_NODE(returns=('QuatPin', pyrr.Quaternion()), meta={'Category': 'Math|Quaternion', 'Keywords': ['*']})
def quatMult(q1=('QuatPin', pyrr.Quaternion()), q2=('QuatPin', pyrr.Quaternion())):
'''"*" operator for quaternions.'''
return q1 * q2
|
'''
Date: 12-17-2018
Problem description:
===================
This problem was asked by Google.
Given an array of integers and a number k, where 1 <= k <= length of the array,
compute the maximum values of each subarray of length k.
For example, given array = [10, 5, 2, 7, 8, 7] and k = 3,
we should get: [10, 7, 8, 8], since:
10 = max(10, 5, 2)
7 = max(5, 2, 7)
8 = max(2, 7, 8)
8 = max(7, 8, 7)
Do this in O(n) time and O(k) space. You can modify the input array in-place
and you do not need to store the results.
You can simply print them out as you compute them.
Algorithm:
=========
Input: list of integers and int k
Output: integer value
Psuedo code:
1. Check edge cases
2. Iterate the len of list, find max value using built-in function int.max()
'''
import time
#
# brute force
#
def maxValInArray(arr, k):
''' check edge case '''
assert k >= 1
assert k <= len(arr)
if k == 1:
#print( arr )
return arr
else:
subarr = list()
listofmax = list()
while (len(arr) >= k):
x = 0
for x in range(x, (k+x)):
subarr.append(arr[x])
#listofmax.append(sorted(subarr, reverse=True)[:1])
listofmax.append(max(subarr))
subarr = []
arr.pop(0)
#print (listofmax)
return listofmax
#
# O(n) time and O(k) space
#
def maxValsList(arr, k):
''' check edge case '''
assert k >= 1
assert k <= len(arr)
if k == 1:
#print( arr )
return arr
else:
retarr = list()
while len(arr) >= k:
#print(max([a[i] for i in range(k)]))
retarr.append(max([arr[i] for i in range(k)]))
arr.pop(0)
return retarr
def test_code():
A = [10, 5, 2, 7, 8, 7]
K = 3
assert maxValInArray(A, K) == [10, 7, 8, 8]
A = [10, 5, 2, 7, 8, 7]
assert maxValsList(A, K) == [10, 7, 8, 8]
A = [10, 5, 2, 7, 8, 7]
K = 1
assert maxValsList(A, K) == [10, 5, 2, 7, 8, 7]
if __name__ == "__main__":
A = [10, 5, 2, 7, 8, 7]
K = 3
print ("Original array: {}".format(A))
starttime = time.time()
print( maxValInArray(A, K))
endtime = time.time()
print("Elapsed time in brute force methob: {} secs".format(endtime - starttime))
A = [10, 5, 2, 7, 8, 7]
starttime = time.time()
print( maxValsList(A, K))
endtime = time.time()
print("Elapsed time in O(n) method: {} secs".format(endtime - starttime))
'''
Run-time output:
===============
$ python codechallenge-06.py
Original array: [10, 5, 2, 7, 8, 7]
[10, 7, 8, 8]
Elapsed time in brute force methob: 0.000123023986816 secs
[10, 7, 8, 8]
Elapsed time in O(n) method: 0.000108003616333 secs
$ pytest codechallenge-06.py
========================================= test session starts ==========================================
platform linux2 -- Python 2.7.13, pytest-3.6.3, py-1.5.4, pluggy-0.6.0
rootdir: /home/markn/devel/py-src/DailyCodeChallenge, inifile:
collected 1 item
codechallenge-06.py . [100%]
======================================= 1 passed in 0.06 seconds =======================================
'''
|
# Copyright (c) <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from spdx.parsers import rdfbuilders
from spdx.parsers import tagvaluebuilders
from spdx.parsers import validations
from spdx.parsers.builderexceptions import SPDXValueError
from spdx.parsers.builderexceptions import CardinalityError
from spdx.parsers.builderexceptions import OrderError
class CreationInfoBuilder(rdfbuilders.CreationInfoBuilder):
def __init__(self):
super(CreationInfoBuilder, self).__init__()
class ExternalDocumentRefsBuilder(rdfbuilders.ExternalDocumentRefBuilder):
def __init__(self):
super(ExternalDocumentRefsBuilder, self).__init__()
class EntityBuilder(rdfbuilders.EntityBuilder):
def __init__(self):
super(EntityBuilder, self).__init__()
class SnippetBuilder(rdfbuilders.SnippetBuilder):
def __init__(self):
super(SnippetBuilder, self).__init__()
class ReviewBuilder(rdfbuilders.ReviewBuilder):
def __init__(self):
super(ReviewBuilder, self).__init__()
class PackageBuilder(rdfbuilders.PackageBuilder):
def __init__(self):
super(PackageBuilder, self).__init__()
class DocBuilder(tagvaluebuilders.DocBuilder):
def __init__(self):
super(DocBuilder, self).__init__()
def set_doc_spdx_id(self, doc, doc_spdx_id_line):
"""
Set the document SPDX Identifier.
Raise SPDXValueError if malformed value, CardinalityError
if already defined.
"""
if not self.doc_spdx_id_set:
if (
doc_spdx_id_line == "SPDXRef-DOCUMENT"
or validations.validate_doc_spdx_id(doc_spdx_id_line)
):
doc.spdx_id = doc_spdx_id_line
self.doc_spdx_id_set = True
return True
else:
raise SPDXValueError("Document::SPDXID")
else:
raise CardinalityError("Document::SPDXID")
def set_doc_comment(self, doc, comment):
"""
Set document comment.
Raise CardinalityError if comment already set.
"""
if not self.doc_comment_set:
self.doc_comment_set = True
doc.comment = comment
else:
raise CardinalityError("Document::Comment")
def set_doc_namespace(self, doc, namespace):
"""
Set the document namespace.
Raise SPDXValueError if malformed value.
Raise CardinalityError if already defined.
"""
if not self.doc_namespace_set:
self.doc_namespace_set = True
if validations.validate_doc_namespace(namespace):
doc.namespace = namespace
return True
else:
raise SPDXValueError("Document::Namespace")
else:
raise CardinalityError("Document::Comment")
class LicenseBuilder(tagvaluebuilders.LicenseBuilder):
def __init__(self):
super(LicenseBuilder, self).__init__()
def set_lic_name(self, doc, name):
"""
Set license name.
Raise SPDXValueError if name is not str or utils.NoAssert
Raise CardinalityError if it is already set
Raise OrderError if no license id defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_name_set:
self.extr_lic_name_set = True
if validations.validate_extr_lic_name(name, True):
self.extr_lic(doc).full_name = name
return True
else:
raise SPDXValueError("ExtractedLicense::Name")
else:
raise CardinalityError("ExtractedLicense::Name")
else:
raise OrderError("ExtractedLicense::Name")
def set_lic_text(self, doc, text):
"""
Set license name.
Raise CardinalityError if it is already set.
Raise OrderError if no license id defined.
"""
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
self.extr_lic(doc).text = text
return True
else:
raise CardinalityError("ExtractedLicense::text")
else:
raise OrderError("ExtractedLicense::text")
def set_lic_comment(self, doc, comment):
"""
Set license comment.
Raise CardinalityError if it is already set.
Raise OrderError if no license ID defined.
"""
if self.has_extr_lic(doc):
if not self.extr_lic_comment_set:
self.extr_lic_comment_set = True
self.extr_lic(doc).comment = comment
return True
else:
raise CardinalityError("ExtractedLicense::comment")
else:
raise OrderError("ExtractedLicense::comment")
class FileBuilder(rdfbuilders.FileBuilder):
def __init__(self):
super(FileBuilder, self).__init__()
def set_file_notice(self, doc, text):
"""
Set file notice
Raise OrderError if no package or file defined.
Raise CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
self.file_notice_set = True
self.file(doc).notice = text
return True
else:
raise OrderError("File::Notice")
def set_file_type(self, doc, type_value):
"""
Wrap rdfbuilders.FileBuilder.set_file_type to match the different
fileType representations.
"""
type_dict = {
"fileType_source": "SOURCE",
"fileType_binary": "BINARY",
"fileType_archive": "ARCHIVE",
"fileType_other": "OTHER",
}
return super(FileBuilder, self).set_file_type(doc, type_dict.get(type_value))
class AnnotationBuilder(tagvaluebuilders.AnnotationBuilder):
def __init__(self):
super(AnnotationBuilder, self).__init__()
def add_annotation_comment(self, doc, comment):
"""
Set the annotation comment.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
"""
if len(doc.annotations) != 0:
if not self.annotation_comment_set:
self.annotation_comment_set = True
doc.annotations[-1].comment = comment
return True
else:
raise CardinalityError("AnnotationComment")
else:
raise OrderError("AnnotationComment")
class RelationshipBuilder(tagvaluebuilders.RelationshipBuilder):
def __init__(self):
super(RelationshipBuilder, self).__init__()
def add_relationship_comment(self, doc, comment):
"""
Set the relationship comment.
Raise CardinalityError if already set.
Raise OrderError if no annotator defined before.
"""
if len(doc.relationships) != 0:
if not self.relationship_comment_set:
self.relationship_comment_set = True
doc.relationships[-1].comment = comment
return True
else:
raise CardinalityError("RelationshipComment")
else:
raise OrderError("RelationshipComment")
class Builder(
DocBuilder,
CreationInfoBuilder,
ExternalDocumentRefsBuilder,
EntityBuilder,
SnippetBuilder,
ReviewBuilder,
LicenseBuilder,
FileBuilder,
PackageBuilder,
AnnotationBuilder,
RelationshipBuilder,
):
"""
SPDX document builder.
"""
def __init__(self):
super(Builder, self).__init__()
# FIXME: this state does not make sense
self.reset()
def reset(self):
"""
Reset builder's state for building new documents.
Must be called between usage with different documents.
"""
# FIXME: this state does not make sense
self.reset_creation_info()
self.reset_document()
self.reset_package()
self.reset_file_stat()
self.reset_reviews()
self.reset_annotations()
self.reset_relationship()
self.reset_extr_lics()
|
#! /usr/bin/env python
"""Script to generate the raw sub-package APIs
Basically just drives OpenGLGenerator with options to produce
the various modules we want...
"""
import os, sys, logging, re, compileall
import openglgenerator
from OpenGL import platform
try:
from OpenGL import GL
except (ImportError, AttributeError), err:
pass
# put our OpenGL directory on the search path, just in case...
sys.path.insert( 0, os.path.abspath( '..' ) )
log = logging.getLogger( 'generateraw' )
MODULE_DEFINITIONS = [
('GL', ('gl[A-Z0-9].*','GL_.*')),
('GLU',('glu[A-Z0-9].*','GLU[_a-z0-9].*')),
('GLUT', ('glut[A-Z0-9].*','GLUT[_a-z0-9].*')),
('GLE', None),
('GLX', None),
('WGL', ('wgl.*','WGL.*',)),
('AGL', None),
]
def filterModules( arguments ):
"""Filter the set of modules according to command-line options
Basically no args == do everything, otherwise only process modules
declared here...
"""
if arguments:
definitions = [
x for x in MODULE_DEFINITIONS
if x[0] in arguments
]
else:
definitions = MODULE_DEFINITIONS
return definitions
def main():
baseModules = [
'OpenGL.constants',
]
known_symbols = openglgenerator.OpenGLGenerator.loadKnownSymbols(
baseModules
)
definedSymbols = known_symbols.copy()
for (module,expressions) in filterModules( sys.argv[1:] ):
log.info( "Processing module: %s", module )
if expressions:
expressions = [re.compile(e) for e in expressions]
xmlFile = '%s.xml'%( module.lower(), )
directory = '../OpenGL/raw/%(module)s'%locals()
try:
os.makedirs( directory )
except OSError, err:
pass
constantsFile = os.path.join( directory, 'constants.py' )
rawFile = os.path.join( directory, '__init__.py' )
open( rawFile, 'w' ).close()
annotationsFile = os.path.join( directory, 'annotations.py' )
dll = getattr( platform, module, None )
if dll and os.path.isfile( xmlFile ):
log.info( "Found DLL: %s and have XML source file: %s", dll, xmlFile )
# first the constants file...
log.info( "Generating constants %s", constantsFile )
gen = openglgenerator.OpenGLGenerator(
open(constantsFile,'w'),
generate_comments = False,
searched_dlls = [ dll ],
known_symbols = definedSymbols,
module_header = '''"""Constants for OpenGL.%(module)s
Automatically generated by the generateraw script, do not edit!
"""
'''%locals(),
)
items = gen.load_typedefs( xmlFile , types = [
openglgenerator.codegenerator.typedesc.Variable, # ick!
], expressions = expressions)
gen.produce( items )
gen.output.close()
log.info( "Generating raw API %s", rawFile )
constantSymbols = gen.loadKnownSymbols(
['OpenGL.raw.%(module)s.constants'%locals()],
flags = gen.EXPORT_SYMBOL, # don't import, do export
doReload = True,
)
constantSymbols.update( definedSymbols )
constantSymbols.update( known_symbols )
gen = openglgenerator.OpenGLGenerator(
open(rawFile,'w'),
generate_comments = True,
searched_dlls = [ dll ],
known_symbols = constantSymbols,
module_header = '''# -*- coding: iso-8859-1 -*-
"""Raw (C-style) API for OpenGL.%(module)s
Automatically generated by the generateraw script, do not edit!
"""
from OpenGL.raw.%(module)s.constants import *
'''%locals(),
)
items = gen.load_typedefs( xmlFile, expressions = expressions )
gen.produce( items )
gen.output.close()
log.info( "Generating annotations %s", annotationsFile )
gen = openglgenerator.OpenGLGenerator(
open(annotationsFile,'w'),
generate_comments = True,
searched_dlls = [ dll ],
emitters = [ openglgenerator.OpenGLDecorator() ],
known_symbols = definedSymbols,
module_header = '''"""Array-size annotations for OpenGL.raw.%(module)s
Automatically generated by the generateraw script, do not edit!
"""
from OpenGL.raw import %(module)s as raw
'''%locals(),
)
items = gen.load_typedefs( xmlFile, types = [
openglgenerator.codegenerator.typedesc.Function, # ick!
], expressions = expressions)
gen.produce( items )
gen.output.close()
log.info( """Suppressing future output of already-defined functions/structures: %s""", module )
definedSymbols.update(
gen.loadKnownSymbols(
['OpenGL.raw.%(module)s'%locals()],
flags = 0, # neither import nor export from future operations...
doReload = True,
)
)
definedSymbols.update(
gen.loadKnownSymbols(
['OpenGL.raw.%(module)s.constants'%locals()],
flags = 0, # suppress future export of the constants
doReload = True,
)
)
definedSymbols.update( known_symbols )
if module == 'GL':
# filter out the higher GL version stuff as well...
# obviously you need to have the version stuff generated already
# to make this work!
for version in ('1_2','1_3','1_4','1_5','2_0'):
log.info( 'Suppressing exports from Core GL Version %s', version )
definedSymbols.update(
gen.loadKnownSymbols(
['OpenGL.raw.GL.VERSION.GL_%(version)s'%locals()],
flags = 0, # suppress future export of the constants
doReload = True,
)
)
path = '../OpenGL/raw/%(module)s'%locals()
log.info( 'Forcing recompilation of %s', path )
compileall.compile_dir(path, maxlevels=2, force=True, quiet=True)
if __name__ == "__main__":
logging.basicConfig()
#logging.getLogger( 'codegenerator' ).setLevel( logging.DEBUG )
log.setLevel( logging.INFO )
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.