content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements composite forecasters."""
__author__ = ["mloning"]
__all__ = [
"ColumnEnsembleForecaster",
"EnsembleForecaster",
"TransformedTargetForecaster",
"ForecastingPipeline",
"DirectTabularRegressionForecaster",
"DirectTimeSeriesRegressionForecaster",
"MultioutputTabularRegressionForecaster",
"MultioutputTimeSeriesRegressionForecaster",
"RecursiveTabularRegressionForecaster",
"RecursiveTimeSeriesRegressionForecaster",
"DirRecTabularRegressionForecaster",
"DirRecTimeSeriesRegressionForecaster",
"StackingForecaster",
"MultiplexForecaster",
"ReducedForecaster",
"make_reduction",
]
from sktime.forecasting.compose._column_ensemble import ColumnEnsembleForecaster
from sktime.forecasting.compose._ensemble import EnsembleForecaster
from sktime.forecasting.compose._pipeline import TransformedTargetForecaster
from sktime.forecasting.compose._pipeline import ForecastingPipeline
from sktime.forecasting.compose._reduce import DirRecTabularRegressionForecaster
from sktime.forecasting.compose._reduce import DirRecTimeSeriesRegressionForecaster
from sktime.forecasting.compose._reduce import DirectTabularRegressionForecaster
from sktime.forecasting.compose._reduce import DirectTimeSeriesRegressionForecaster
from sktime.forecasting.compose._reduce import MultioutputTabularRegressionForecaster
from sktime.forecasting.compose._reduce import MultioutputTimeSeriesRegressionForecaster
from sktime.forecasting.compose._reduce import RecursiveTabularRegressionForecaster
from sktime.forecasting.compose._reduce import RecursiveTimeSeriesRegressionForecaster
from sktime.forecasting.compose._stack import StackingForecaster
from sktime.forecasting.compose._multiplexer import MultiplexForecaster
from sktime.forecasting.compose._reduce import ReducedForecaster
from sktime.forecasting.compose._reduce import make_reduction
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pandas
import numpy as np
from sklearn import preprocessing
from sklearn import neighbors
from sklearn.model_selection import StratifiedKFold, cross_val_score
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Tira limite de vizualição do dataframe quando printado
pandas.set_option('display.max_columns', None)
pandas.set_option('display.max_rows', None)
SEED = 42
np.random.seed(SEED)
# Full train set
train_file = "../datasets/train.csv"
def get_train_set(filepath, size=0.20):
dataset = pandas.read_csv(train_file)
test_size = 1.0 - size
# use 20% of the train to search best params
train, _ = train_test_split(dataset,
test_size=test_size,
random_state=SEED)
return train
# KNN Params
def generate_knn_params():
metrics = ["manhattan", "euclidean", "chebyshev", "minkowski"]
n_neighbors = [x for x in range(3, 50) if x % 2 != 0]
params = []
for metric in metrics:
for i, n in enumerate(n_neighbors):
params.append({
"id": metric[0:3].upper() + str(n),
"metric": metric,
"n_neighbors": n
})
return params
def setup_kfold(X, Y, n_splits):
kf = StratifiedKFold(n_splits=n_splits, random_state=SEED)
kf.get_n_splits(X)
return kf
def run_knn_score(X, Y, params, kfold):
print("Busca de Parametros KNN")
all_scores = []
for param in params:
clf = neighbors.KNeighborsClassifier(metric=param["metric"],
n_neighbors=param["n_neighbors"])
scores = cross_val_score(clf, X, Y, cv=kfold)
mean = scores.mean()
all_scores.append({
"id": param["id"],
"metric": param["metric"],
"n_neighbors": param["n_neighbors"],
"result": mean
})
print("%s | %0.4f" % (param["id"], mean))
best = max(all_scores, key=lambda s: s["result"])
print(f"Best param: {best}")
print(all_scores)
return all_scores
def plot(scores):
# options
plt.figure(figsize=(25, 8))
plt.margins(x=0.005)
plt.rc('font', size=14)
plt.xticks(rotation=90)
plt.grid(linestyle='--')
x = list(map(lambda x: x["id"], scores)) # names
y = list(map(lambda x: x["result"], scores)) # scores
plt.suptitle('Busca de Parametros KNN')
plt.plot(x, y, 'o--')
plt.show()
def print_markdown_table(scores):
print("Variação | *metric* | *n_neighbors* | Acurácia média")
print("------ | ------- | -------- | ----------")
for s in scores:
name = s["id"]
metric = s["metric"]
n = s["n_neighbors"]
result = '{:0.4f}'.format(s["result"])
print(f"{name} | {metric} | {n} | {result}")
K_SPLITS = 10
# split train set by 20%
train = get_train_set(train_file, 0.20)
# separate class from other columns
X = train.values[:, :-1]
Y = train['y']
# KFold
kfold = setup_kfold(X, Y, K_SPLITS)
# Generate params
params = generate_knn_params()
# Run scoring for best params
scores = run_knn_score(X, Y, params, kfold)
# plot
plot(scores)
print_markdown_table(scores)
|
nilq/baby-python
|
python
|
from django.test import TestCase
from foia_hub.models import Agency, Office
from foia_hub.scripts.load_agency_contacts import (
load_data, update_reading_rooms, add_request_time_statistics,
extract_tty_phone, extract_non_tty_phone, build_abbreviation)
example_office1 = {
'address': {
'address_lines': ['line 1', 'line 2'],
'street': '75 Hawthorne Street',
'city': 'San Francisco',
'state': 'CA',
'zip': '94105'
},
'emails': ['williams.deborah@epa.gov'],
'keywords': ['keyword 1', 'keyword 2'],
'misc': {'U.S. EPA, Region 9': 'Regional Freedom of Information\
Officer, Phone: 415-947-4251'},
'name': 'Region 9 (States: AZ, CA, HI, NV, AS, GU)',
'phone': '415-947-4251',
'public_liaison': {'name': 'Deborah Williams', 'phone': ['703-516-5555']},
'request_form': 'http://www.epa.gov/foia/requestform.html',
'service_center': {'name': 'Timbo Two', 'phone': ['415-947-4251']},
'top_level': False,
'website': 'http://www.epa.gov/region09/foia/index.html'
}
example_sub_office = {
'abbreviation': 'R9',
'address': {
'address_lines': ['line 1', 'line 2'],
'street': '75 Hawthorne Street',
'city': 'San Francisco',
'state': 'CA',
'zip': '94105'
},
'emails': ['williams.deborah@epa.gov'],
'common_requests': ['common request 1'],
'description': 'The mission of this sub is...',
'keywords': ['keyword 1', 'keyword 2'],
'misc': {'U.S. EPA, Region 10': 'Regional Freedom of Information\
Officer, Phone: (415) 947-4251'},
'name': 'Region 10 (States: AK, ID, OR, WA)',
'no_records_about': ['no records about 1'],
'phone': '415-947-4251',
'public_liaison': {'name': 'Deborah Williams', 'phone': ['703-516-5555']},
'request_form': 'http://www.epa.gov/foia/requestform.html',
'service_center': {'name': 'Timbo', 'phone': ['415-947-4251']},
'top_level': True,
'website': 'http://www.epa.gov/region09/foia/index.html'
}
example_agency = {
'abbreviation': 'EPA',
'address': {
'address_lines': [
'Larry Gottesman',
'National Freedom of Information Officer',
'(2882T)'],
'street': '1200 Pennsylvania Avenue, NW',
'city': 'Washinton',
'state': 'DC',
'zip': '20460'
},
'common_requests': ['common request 1'],
'departments': [example_office1, example_sub_office],
'description': 'The mission of EPA is to protect',
'keywords': ['Acid Rain', 'Agriculture'],
'name': 'Environmental Protection Agency',
'no_records_about': ['no records about 1'],
}
class LoaderTest(TestCase):
def test_load_data(self):
""" Check that agency data is loaded correctly """
load_data(example_agency)
# Check that agency elements are loaded
a = Agency.objects.get(name='Environmental Protection Agency')
self.assertEqual('environmental-protection-agency', a.slug)
self.assertEqual('The mission of EPA is to protect', a.description)
self.assertEqual(['Acid Rain', 'Agriculture'], a.keywords)
self.assertEqual(['common request 1'], a.common_requests)
self.assertEqual(['no records about 1'], a.no_records_about)
# Check that elements from top-level (sub_agency) offices are loaded
sub_a = Agency.objects.get(
name='Region 10 (States: AK, ID, OR, WA)')
self.assertEqual(
'region-10-states-ak-id-or-wa', sub_a.slug)
self.assertEqual(['keyword 1', 'keyword 2'], sub_a.keywords)
self.assertEqual(a, sub_a.parent)
# Ensure that abbreviations are not overwritten
self.assertEqual('R9', sub_a.abbreviation)
self.assertEqual(['common request 1'], sub_a.common_requests)
self.assertEqual(['no records about 1'], sub_a.no_records_about)
self.assertEqual(
'The mission of this sub is...', sub_a.description)
# Check that elements from regular offices are loaded
o = Office.objects.get(
name='Region 9 (States: AZ, CA, HI, NV, AS, GU)')
self.assertEqual(
'environmental-protection-agency-' +
'-region-9-states-az-ca-hi-nv-as-gu', o.slug)
def test_multi_load(self):
""" Ensures that old data are set to null on second load """
# Load one
load_data(example_agency)
sub_a = Agency.objects.get(
name='Region 10 (States: AK, ID, OR, WA)')
self.assertEqual(sub_a.person_name, 'Timbo')
self.assertEqual(sub_a.public_liaison_name, 'Deborah Williams')
self.assertEqual(sub_a.address_lines, ['line 1', 'line 2'])
self.assertEqual(sub_a.zip_code, '94105')
self.assertEqual(sub_a.state, 'CA')
self.assertEqual(sub_a.city, 'San Francisco')
self.assertEqual(sub_a.street, '75 Hawthorne Street')
# Deleting values
del (example_sub_office['service_center']['name'],
example_sub_office['public_liaison']['name'],
example_sub_office['address']['address_lines'],
example_sub_office['address']['zip'],
example_sub_office['address']['state'],
example_sub_office['address']['city'],
example_sub_office['address']['street']
)
# Load two test
load_data(example_agency)
sub_a = Agency.objects.get(
name='Region 10 (States: AK, ID, OR, WA)')
self.assertEqual(sub_a.person_name, None)
self.assertEqual(sub_a.public_liaison_name, None)
self.assertEqual(sub_a.address_lines, [])
self.assertEqual(sub_a.zip_code, None)
self.assertEqual(sub_a.state, None)
self.assertEqual(sub_a.city, None)
self.assertEqual(sub_a.street, None)
class LoadingTest(TestCase):
fixtures = ['agencies_test.json', 'offices_test.json']
def test_update_reading_rooms(self):
""" Test if reading rooms are added properly """
reading_room_data = {
'reading_rooms': [
['Electronic Reading Room', 'http://agency.gov/err/'],
['Pre-2000 Reading Room', 'http://agency.gov/pre-2000/rooms']]
}
agency = Agency.objects.get(slug='department-of-homeland-security')
update_reading_rooms(agency, reading_room_data)
agency.save()
# Retrieve saved
dhs = Agency.objects.get(slug='department-of-homeland-security')
self.assertEqual(2, len(dhs.reading_room_urls.all()))
reading_room_1 = dhs.reading_room_urls.get(
link_text='Electronic Reading Room')
self.assertEqual(
'Electronic Reading Room',
reading_room_1.link_text)
self.assertEqual(
'http://agency.gov/err/',
reading_room_1.url)
reading_room_2 = dhs.reading_room_urls.get(
link_text='Pre-2000 Reading Room')
self.assertEqual(
'Pre-2000 Reading Room',
reading_room_2.link_text)
self.assertEqual(
'http://agency.gov/pre-2000/rooms',
reading_room_2.url)
def test_add_delete_reading_rooms(self):
""" Add a reading room. Then, remove a reading room (by omission)
during a subsequent load. The reading rooms in the database should
reflect these changes (the removed reading room should not be there.
"""
census = Office.objects.get(
slug='department-of-commerce--census-bureau')
all_rooms = census.reading_room_urls.all().count()
self.assertEqual(0, all_rooms)
data = {
'reading_rooms': [
['Url One', 'http://urlone.gov'],
['Url Two', 'http://urltwo.gov']]}
update_reading_rooms(census, data)
all_rooms = census.reading_room_urls.all()
self.assertEqual(2, len(all_rooms))
data = {
'reading_rooms': [
['Url One', 'http://urlone.gov'],
['Url Three', 'http://urlthree.gov']]}
update_reading_rooms(census, data)
rr_count = census.reading_room_urls.all().count()
self.assertEqual(2, rr_count)
def test_add_stats(self):
"""
Confirms all latest records are loaded, no empty records
are created, and records with a value of `less than one`
are flagged.
"""
# Load data
agency = Agency.objects.get(slug='department-of-homeland-security')
data = {'request_time_stats': {
'2012': {'simple_median_days': '2'},
'2014': {'simple_median_days': 'less than 1'}
}}
add_request_time_statistics(data, agency)
# Verify that only one stat was added
self.assertEqual(len(agency.stats_set.all()), 1)
# Verify latest data is returned when it exists
retrieved = agency.stats_set.filter(
stat_type='S').order_by('-year').first()
self.assertEqual(retrieved.median, 1)
# Verify that `less than one` records are flagged
retrieved = agency.stats_set.filter(
stat_type='S').order_by('-year').first()
self.assertEqual(retrieved.less_than_one, True)
# Load test 2
agency = Agency.objects.get(slug='department-of-homeland-security')
data = {'request_time_stats': {
'2015': {'simple_median_days': '3',
'complex_median_days': '3'}}}
add_request_time_statistics(data, agency)
# Verify latest old data is overwritten when new data is updated
self.assertEqual(len(agency.stats_set.all()), 2)
def test_extract_tty_phone(self):
""" Test: from a service center entry, extract the TTY phone if it
exists. """
service_center = {
'phone': ['202-555-5555 (TTY)', '202-555-5551']
}
tty_phone = extract_tty_phone(service_center)
self.assertEqual('202-555-5555 (TTY)', tty_phone)
service_center['phone'] = ['202-555-5551']
tty_phone = extract_tty_phone(service_center)
self.assertEqual(None, tty_phone)
service_center['phone'] = [
'202-555-5555 (TTY)', '202-555-5552 (TTY)', '202-555-5551']
tty_phone = extract_tty_phone(service_center)
self.assertEqual('202-555-5555 (TTY)', tty_phone)
def test_extract_non_tty_phone(self):
""" Test that extract non-tty phone numbers from a list works. If there
aren't any, this defaults to TTY numbers (and tests that)"""
public_liaison = {
'phone': ['202-555-5551', '202-555-5555 (TTY)']
}
phone = extract_non_tty_phone(public_liaison)
self.assertEqual('202-555-5551', phone)
# No non-tty number
public_liaison['phone'] = ['202-555-5552 (TTY)']
phone = extract_non_tty_phone(public_liaison)
self.assertEqual('202-555-5552 (TTY)', phone)
public_liaison['phone'] = []
phone = extract_non_tty_phone(public_liaison)
self.assertEqual(None, phone)
def test_build_abbreviation(self):
""" Test that abbreviations are built correctly """
sub_agency_name = "Administrative Conference of the United States"
self.assertEqual("ACUS", build_abbreviation(sub_agency_name))
sub_agency_name = "U.S. Customs & Border Protection"
self.assertEqual("USCBP", build_abbreviation(sub_agency_name))
|
nilq/baby-python
|
python
|
import picobox
@picobox.pass_("conf")
def session(conf):
class Session:
connection = conf["connection"]
return Session()
@picobox.pass_("session")
def compute(session):
print(session.connection)
box = picobox.Box()
box.put("conf", {"connection": "sqlite://"})
box.put("session", factory=session)
with picobox.push(box):
compute()
|
nilq/baby-python
|
python
|
#pg.72 ex13 parameters, unpacking,variables
#sd3 combine input with aargv to make a script that gets more input from the user
from sys import argv
#read the WYSS section for how to run this
script, first, second, third = argv
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third)
a1 = input("Parameter A1:")
a2 = input("Parameter A2:")
print(f"Parameter A1 is {a1}, parameter A2 is {a2}")
|
nilq/baby-python
|
python
|
import unittest
import sys
from PyQt5.QtWidgets import QApplication, QDialog
from ui import DisclaimerDialog
app = QApplication(sys.argv)
disclaimer_dialog = QDialog()
disclaimer_dialog_ui = DisclaimerDialog.Ui_dialog()
disclaimer_dialog_ui.setupUi(disclaimer_dialog)
class DisclaimerDialogTests(unittest.TestCase):
def test_defaults(self):
'''Test the defaults'''
self.assertEqual(disclaimer_dialog_ui.label.text(),"Only reports supported by selected vendor will be retrieved!")
def test_button(self):
okWidget = disclaimer_dialog_ui.buttonBox.Ok
self.assertIsNotNone(okWidget)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import json
with open('04_movies_save.json', 'r', encoding='UTF-8') as fr:
movies = json.load(fr)
with open('04_notfound_save.json', 'r', encoding='UTF-8') as fr:
not_found = json.load(fr)
with open('02_rating_save.json', 'r', encoding='UTF-8') as fr:
ratings = json.load(fr)
new_rating = []
new_movies = []
complete = {}
for movie in movies:
if not_found.get(str(movie['pk'])):
continue
else:
new_movies.append(movie)
complete[movie['pk']] = movie['fields']['name']
for rating in ratings:
if not_found.get(str(rating['fields']['movie'])):
continue
else:
new_rating.append(rating)
with open('06_rating.json', 'w', encoding='UTF-8') as fp:
json.dump(new_rating, fp, ensure_ascii=False, indent=4)
with open('06_movie.json', 'w', encoding='UTF-8') as fp:
json.dump(new_movies, fp, ensure_ascii=False, indent=4)
with open('06_complete.json', 'w', encoding='UTF-8') as fp:
json.dump(complete, fp, ensure_ascii=False, indent=4)
|
nilq/baby-python
|
python
|
# Collaborators (including web sites where you got help: (enter none if you didn't need help)
name=input("please enter your name: ")
age=input("please enter your age: ")
grade=input("please enter your grade: ")
school=input("please enter your school: ")
directory={}
directory.update({'name':name, 'age':age,'grade':grade,'school':school})
for key_name, value_name in directory.items():
print(f"Your {key_name} is {value_name}")
|
nilq/baby-python
|
python
|
import logging
from os import access
import azure.functions as func
import mysql.connector
import ssl
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
from azure.identity import DefaultAzureCredential, AzureCliCredential, ChainedTokenCredential, ManagedIdentityCredential
managed_identity = ManagedIdentityCredential()
scope = "https://management.azure.com"
token = managed_identity.get_token(scope)
access_token = token.token
crtpath = 'BaltimoreCyberTrustRoot.crt.pem'
#crtpath = 'DigiCertGlobalRootCA.crt.pem'
# Connect to MySQL
cnx = mysql.connector.connect(
user="mymsiuser",
password=access_token,
host="mysqldevSUFFIXflex.mysql.database.azure.com",
port=3306,
ssl_ca=crtpath,
tls_versions=['TLSv1.2']
)
logging.info(cnx)
# Show databases
cursor = cnx.cursor()
cursor.execute("SHOW DATABASES")
result_list = cursor.fetchall()
# Build result response text
result_str_list = []
for row in result_list:
row_str = ', '.join([str(v) for v in row])
result_str_list.append(row_str)
result_str = '\n'.join(result_str_list)
return func.HttpResponse(
result_str,
status_code=200
)
|
nilq/baby-python
|
python
|
import functools
from bargeparse.cli import cli
def command(*args, param_factories=None):
"""
Decorator to create a CLI from the function's signature.
"""
def decorator(func):
func._subcommands = []
func.subcommand = functools.partial(
subcommand, func, param_factories=param_factories
)
@functools.wraps(func)
def wrapper(*args, no_bargeparse: bool = False, **kwargs):
# If there are args or kwargs, then assume that func() is being called
# directly and is not from the command line.
if len(args) > 0 or len(kwargs) > 0 or no_bargeparse:
return func(*args, **kwargs)
cli(func, param_factories=param_factories)
wrapper.is_bargeparse_command = True
return wrapper
if len(args) > 0 and callable(args[0]):
return decorator(args[0])
else:
return decorator
def subcommand(parent_command, *args, param_factories=None):
"""
Decorator to register a function as a subcommand of a given parent command.
"""
def decorator(func):
parent_command._subcommands.append(func)
return func
if len(args) > 0 and callable(args[0]):
return decorator(args[0])
else:
return decorator
|
nilq/baby-python
|
python
|
#pylint:skip-file
import sys
from argparse import ArgumentParser
import networkx as nx
def main(argv):
parser = ArgumentParser()
parser.add_argument('-i', '--input_file', help='Input .dot file',
required=True)
parser.add_argument('-s', '--start_id', help='Start ID (inclusive)',
required=True)
parser.add_argument('-f', '--finish_id', help='Finish ID (inclusive)', required=True)
parser.add_argument('-o', '--output_file', help='Output .dot file', required=True)
args = parser.parse_args(args=argv)
graph = nx.DiGraph(nx.drawing.nx_pydot.read_dot(args.input_file))
new_graph = nx.DiGraph()
start_key = None
for node_key in nx.lexicographical_topological_sort(graph):
id_portion = node_key.split()[0]
has_id = id_portion.isdigit()
if has_id:
curr_id = int(id_portion)
if curr_id == int(args.start_id):
start_key = node_key
break
if start_key is None:
raise RuntimeError("Could not find the node with ID {} to start from!".format(args.start_id))
for edge in nx.edge_bfs(graph, start_key, orientation='ignore'):
from_key, to_key, _ = edge
id_portion = from_key.split()[0]
has_id = id_portion.isdigit()
end_key = from_key
if has_id:
curr_id = int(id_portion)
if curr_id >= int(args.finish_id):
break
node_data = graph.nodes[from_key]
new_graph.add_node(from_key, **node_data)
edge_data = graph.edges[from_key, to_key]
new_graph.add_edge(from_key, to_key, **edge_data)
# for edge in nx.edge_bfs(graph, end_key, reverse=True):
# from_key, to_key = edge
# if from_key == start_key:
# break
# node_data = graph.nodes[from_key]
# new_graph.add_node(from_key, **node_data)
# edge_data = graph.edges[from_key, to_key]
# new_graph.add_edge(from_key, to_key, **edge_data)
nx.drawing.nx_pydot.write_dot(new_graph, args.output_file)
if __name__ == '__main__':
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
import os
import sys
from .toolkit import *
__version__ = '1.1.0'
class ToolkitCompileFileCommand(compiler.ES6_Toolkit_Compile_File):
def run(self):
self.execute()
class ToolkitDumpJsCommand(compiler.ES6_Toolkit_Dump_JS):
def run(self, edit, compiled_js):
self.execute(edit, compiled_js)
|
nilq/baby-python
|
python
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.gui.CILoadingScreen
from direct.gui.DirectGui import OnscreenText
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.Transitions import Transitions
from lib.coginvasion.base import FileUtility
loadernotify = directNotify.newCategory('CILoadingScreen')
class CILoadingScreen:
def __init__(self):
self.transitions = Transitions(loader)
def createMenu(self):
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
self.version_lbl = OnscreenText(text='ver-' + game.version, scale=0.06, pos=(-1.32,
-0.97,
-0.97), align=TextNode.ALeft, fg=(0.9,
0.9,
0.9,
7))
def beginLoadGame(self):
phasesToScan = [
'models', 'phase_3/models', 'phase_3.5/models', 'phase_4/models']
self.models = FileUtility.findAllModelFilesInVFS(phasesToScan)
for model in self.models:
loader.loadModel(model)
loader.progressScreen.tick()
doneInitLoad()
self.destroy()
def loadModelDone(self, array):
self.modelsLoaded += 1
if self.modelsLoaded == len(self.models):
doneInitLoad()
self.destroy()
def destroy(self):
self.version_lbl.destroy()
|
nilq/baby-python
|
python
|
from unittest import TestCase
from musicscore.musicxml.groups.common import Voice
from musicscore.musicxml.elements.fullnote import Pitch
from musicscore.musicxml.elements.note import Note, Duration
class Test(TestCase):
def setUp(self) -> None:
self.note = Note()
self.note.add_child(Pitch())
self.note.add_child(Duration())
def test_voice(self):
self.note.add_child(Voice('1'))
result = '''<note>
<pitch>
<step>C</step>
<octave>4</octave>
</pitch>
<duration>1</duration>
<voice>1</voice>
</note>
'''
self.assertEqual(self.note.to_string(), result)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@author: Raven
@contact: aducode@126.com
@site: https://github.com/aducode
@file: __init__.py
@time: 2016/1/31 23:57
"""
import types
from type import Any
from type import Null
from type import Bool
from type import Byte
from type import Int16
from type import Int32
from type import Int64
from type import Character
from type import Float
from type import Double
from type import Decimal
from type import Datetime
from type import String
from type import List
from type import Set
from type import Map
from type import KeyValue
from type import array as __array
from type import Array as __Array
from type import enum
from type import Serializable
from type import serializable, member
b = Byte
c = Character
Short = s = Int16
Int = i = Int32
Long = l = Int64
f = Float
d = Double
decimal = Decimal
def Array(para):
"""
:param para:
:return:
"""
if isinstance(para, types.TypeType):
return __array(para)
else:
return __Array(para)
|
nilq/baby-python
|
python
|
import json
import matplotlib.pyplot as plt
import sys
import os
from matplotlib.backends.backend_pdf import PdfPages
from random import randrange
import re
import traceback
from datetime import datetime
import argparse
import operator
import matplotlib.dates as mdate
def buildChart(name, x,y, label1, x2,y2, label2):
# plot
fig, ax = plt.subplots()
#colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
colors = [
'xkcd:orange',
'xkcd:royal blue',
'xkcd:forest green',
'xkcd:green',
'xkcd:purple',
'xkcd:blue',
'xkcd:pink',
'xkcd:brown',
'xkcd:red',
'xkcd:light blue',
'xkcd:teal',
'xkcd:light green',
'xkcd:magent',
'xkcd:yellow',
'xkcd:sky blue',
'xkcd:grey',
'xkcd:lime green',
'xkcd:violet',
'xkcd:dark green',
'xkcd:olive',
'xkcd:dark purple',
'xkcd:tan',
'xkcd:black',
'xkcd:beige',
'xkcd:peach',
'xkcd:indigo',
'xkcd:mustard'
]
markers = [
'+',
'o',
'^',
'.',
'v',
's',
'd',
'o',
]
lss = [
':',
'-.',
'--',
'-',
]
# put all at the same beginning
x = [ xi - x[0] for xi in x]
x2 = [ xi - x2[0] for xi in x2]
xsecs = mdate.epoch2num(x)
plt.plot_date(xsecs,y,
marker=markers[0],
color=colors[0],
ls=lss[0],
label=label1)
x2secs = mdate.epoch2num(x2)
plt.plot_date(x2secs,y2,
marker=markers[1],
color=colors[1],
ls=lss[1],
label=label2)
plt.xlabel('Time (day hh:mm)')
plt.ylabel('Objective function')
ax.legend(loc='upper right', fontsize='medium')
#fig.subplots_adjust(bottom=0.9)
plt.xticks(rotation=45, ha='right')
fig.tight_layout()
#plt.axis([0, len(results), 0, max(y)])
# plt.savefig(pp, format='pdf')
# pp.close()
plt.savefig('../../Results/Final/GRASPvsBRKGA/graphs/' + name + '.png')
plt.show()
plt.close()
if __name__ == '__main__':
results_folder = '../../Results/Final/GRASPvsBRKGA'
parser = argparse.ArgumentParser()
parser.add_argument("f1",help="file1 where to read results from")
parser.add_argument("f2",help="file2 where to read results from")
args = parser.parse_args()
# json.load,
results1 = json.load(open(args.f1,'r'))
results2 = json.load(open(args.f2,'r'))
# create x, y, x2, y2
x=[]
y=[]
for elem in results1:
if "end" in elem.keys():
continue
objf = elem["objf"]
t = elem["time"]
if objf == -1:
continue
else:
x.append(t)
y.append(objf)
x2=[]
y2=[]
for elem in results2:
if "end" in elem.keys():
continue
objf = elem["objf"]
t = elem["time"]
if objf == -1:
continue
else:
x2.append(t)
y2.append(objf)
# labels
if args.f1.find('brkga') >-1:
label1='BRKGA'
label2='GRASP'
else:
label2='BRKGA'
label1='GRASP'
# send to plot function
buildChart('comparison_' + '{0:%Y%m%d_%H-%M-%S}'.format(datetime.now()), x,y, label1, x2,y2, label2)
|
nilq/baby-python
|
python
|
import argparse
import sys
import os
from subprocess import call, check_output
def main():
action = parse_commandline()
action()
def parse_commandline():
parser = argparse.ArgumentParser(
description='A simple program to compile and run OpenCV programs',
formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest='subcommand')
add_build_parser(subparsers)
if len(sys.argv) == 1:
print_help(parser, bail=True)
args = parser.parse_args()
subcommands_actions = {
'build': build_action
}
subcommand_action = subcommands_actions.get(args.subcommand)
if subcommand_action is not None:
return lambda: subcommand_action(args)
else:
print_help(parser, bail=True)
def build_action(args):
sources = args.sources
output = 'result.out'
if args.output is not None:
output = args.output
if len(args.sources) == 1:
if args.output is None:
src = args.sources[0]
output = '{}.out'.format(src[:src.rfind('.')])
is_release = False
if args.release:
is_release = True
to_execute = args.execute
arguments = args.arguments
is_verbose = args.verbose
cc = ['g++', '-std=c++14']
flags = [
'-ggdb',
'-pipe',
'-Wundef',
'-Wstrict-overflow=5',
'-Wsign-promo',
'-Woverloaded-virtual',
'-Wold-style-cast',
'-Wctor-dtor-privacy',
'-Wformat=2',
'-Winvalid-pch',
'-Wmissing-include-dirs',
'-Wpacked',
'-Wpadded',
'-Wall',
'-Wextra',
'-pedantic',
'-Wdouble-promotion',
'-Wshadow',
'-Wfloat-equal',
'-Wcast-align',
'-Wcast-qual',
'-Wwrite-strings',
'-Wconversion',
'-Wsign-conversion',
'-Wmissing-declarations',
'-Wredundant-decls',
'-Wdisabled-optimization',
'-Winline',
'-Wswitch-default',
'-Wswitch-enum',
'-Wuseless-cast',
'-Wlogical-op',
'-Wzero-as-null-pointer-constant',
'-Wnoexcept',
'-Wstrict-null-sentinel']
if is_release:
flags = ['-O2', '-pipe', '-s', '-DNDEBUG', '-Wall',
'-D_FORTIFY_SOURCE=1', '-fstack-protector-strong'
'-Wdisabled-optimization', '-Wstack-protector', '-Winline']
opencv_cflags_libs_raw = check_output(
['pkg-config', 'opencv', '--cflags', '--libs'])
opencv_cflags_libs = opencv_cflags_libs_raw.decode().split()
compiler_call = cc + flags + ['-o', output] + sources + opencv_cflags_libs
if is_verbose:
print('Compiler call:')
print(' '.join(compiler_call), end='\n\n')
retcode = call(compiler_call)
if retcode != 0:
print('Failed building check your code', file=sys.stderr)
exit(1)
if to_execute:
execute_arguments = [os.path.abspath(output)]
if arguments is not None:
execute_arguments += arguments
if is_verbose:
print('Program call:')
print(' '.join(execute_arguments))
call(execute_arguments)
def add_build_parser(subparsers):
build_parser = subparsers.add_parser(
'build', description='Use this sub-command to build the OpenCV program')
build_parser.add_argument(
'-s',
'--sources',
required=True,
metavar='SOURCE_FILE',
type=str,
dest='sources',
nargs='+',
help='OpenCV C++ source files')
build_parser.add_argument(
'-o',
'--output',
required=False,
metavar='OUTPUT_FILE',
type=str,
dest='output',
help="OpenCV C++ output file")
build_parser.add_argument(
'-a',
'--arguments',
required=False,
metavar='ARGUMENT',
type=str,
dest='arguments',
nargs='+',
help='arguments to pass to the output file')
exclusive_compilation_mode_group = build_parser.add_mutually_exclusive_group(
required=False)
exclusive_compilation_mode_group.add_argument(
'-r',
required=False,
dest='release',
action='store_true',
help='Enable release compilation')
exclusive_compilation_mode_group.add_argument(
'-d',
required=False,
dest='debug',
action='store_true',
help='Enable debug compilation')
build_parser.add_argument(
'-x',
required=False,
dest='execute',
action='store_true',
help='Enable automatic execution of the output file')
build_parser.add_argument(
'-v',
required=False,
dest='verbose',
action='store_true',
help='Enable verbose mode')
def print_help(parser, message=None, bail=False):
if message is not None:
print('Error Message: {}'.format(message), file=sys.stderr)
parser.print_help(file=sys.stderr)
if bail:
exit(1)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R multi_gpu_test_py
import unittest
import k2
import torch
class TestMultiGPU(unittest.TestCase):
def _test_ragged(self):
if torch.cuda.is_available() is False:
print('skip it since CUDA is not available')
return
if torch.cuda.device_count() < 2:
print('skip it since number of GPUs is 1')
return
if not k2.with_cuda:
return
device0 = torch.device('cuda', 0)
device1 = torch.device('cuda', 1)
torch.cuda.set_device(device1)
r0 = k2.RaggedInt('[ [[0] [1]] ]').to(device0)
r1 = k2.RaggedInt('[ [[0] [1]] ]').to(device1)
assert torch.cuda.current_device() == 1
r0 = k2.ragged.remove_axis(r0, 0)
r1 = k2.ragged.remove_axis(r1, 0)
expected_r0 = k2.RaggedInt('[[0] [1]]').to(device0)
expected_r1 = k2.RaggedInt('[[0] [1]]').to(device1)
assert torch.all(torch.eq(r0.row_splits(1), expected_r0.row_splits(1)))
assert torch.all(torch.eq(r1.row_splits(1), expected_r1.row_splits(1)))
assert torch.all(torch.eq(r0.row_ids(1), expected_r0.row_ids(1)))
assert torch.all(torch.eq(r1.row_ids(1), expected_r1.row_ids(1)))
assert r0.num_elements() == expected_r0.num_elements()
assert r1.num_elements() == expected_r1.num_elements()
try:
# will throw an exception because they two are not on
# the same device
assert torch.all(
torch.eq(r0.row_splits(1), expected_r1.row_splits(1)))
except RuntimeError as e:
print(e)
assert torch.cuda.current_device() == 1
def test_fsa(self):
if torch.cuda.is_available() is False:
print('skip it since CUDA is not available')
return
if torch.cuda.device_count() < 2:
print('skip it since number of GPUs is 1')
return
if not k2.with_cuda:
return
device0 = torch.device('cuda', 0)
device1 = torch.device('cuda', 1)
torch.cuda.set_device(device1)
s = '''
0 1 1 0.1
1 2 -1 0.2
2
'''
fsa0 = k2.Fsa.from_str(s).to(device0).requires_grad_(True)
fsa1 = k2.Fsa.from_str(s).to(device1).requires_grad_(True)
fsa0 = k2.create_fsa_vec([fsa0, fsa0])
fsa1 = k2.create_fsa_vec([fsa1, fsa1])
tot_scores0 = fsa0.get_forward_scores(True, True)
(tot_scores0[0] * 2 + tot_scores0[1]).backward()
tot_scores1 = fsa1.get_forward_scores(True, True)
(tot_scores1[0] * 2 + tot_scores1[1]).backward()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
def main():
print "plugin_b"
|
nilq/baby-python
|
python
|
# -*- coding: utf8 -*-
csv_columns = [
'DATE-OBS',
'TIME-OBS',
'FILENAME',
'OBSTYPE',
'OBJECT',
'NOTES',
'EXPTIME',
'RA',
'DEC',
'FILTERS',
'FILTER1',
'AIRMASS',
'DECPANGL',
'RAPANGL',
'NEXTEND'
]
|
nilq/baby-python
|
python
|
import json
from tracardi_plugin_sdk.action_runner import ActionRunner
from tracardi_plugin_sdk.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent
from tracardi_plugin_sdk.domain.result import Result
from tracardi_json_from_objects.model.models import Configuration
def validate(config: dict):
return Configuration(**config)
class ConvertAction(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload):
dot = self._get_dot_accessor(payload)
path = dot[self.config.to_json]
result = json.dumps(dict(path), default=str)
return Result(port="payload", value={"json": result})
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module='tracardi_json_from_objects.plugin',
className='ConvertAction',
inputs=["payload"],
outputs=['payload'],
version='0.6.0.1',
license="MIT",
author="Patryk Migaj",
init={
"to_json": None
},
form=Form(groups=[
FormGroup(
fields=[
FormField(
id="to_json",
name="Path to data",
description="Path to data to be serialized to JSON. "
"E.g. profile@stats.counters.boughtProducts",
component=FormComponent(type="dotPath", props={"label": "Field path"})
)
]
)
]),
),
metadata=MetaData(
name='To JSON',
desc='This plugin converts objects to JSON',
type='flowNode',
width=200,
height=100,
icon='json',
group=["Data processing"]
)
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import mock
import pytest
from data_pipeline.expected_frequency import ExpectedFrequency
from data_pipeline.producer import Producer
from tests.factories.base_factory import MessageFactory
@pytest.mark.usefixtures(
"configure_teams",
"config_benchmark_containers_connections"
)
@pytest.mark.benchmark
class TestBenchProducer(object):
@pytest.yield_fixture
def patch_monitor_init_start_time_to_now(self):
with mock.patch(
'data_pipeline.client._Monitor.get_monitor_window_start_timestamp',
return_value=int(time.time())
) as patched_start_time:
yield patched_start_time
@pytest.yield_fixture
def dp_producer(self, team_name):
with Producer(
producer_name='producer_1',
team_name=team_name,
expected_frequency_seconds=ExpectedFrequency.constantly,
use_work_pool=False
) as producer:
yield producer
def test_publish(self, benchmark, dp_producer):
def setup():
return [MessageFactory.create_message_with_payload_data()], {}
# Publishing a message takes 1ms on average.
# Messages are flushed every 100ms.
# config::kafka_producer_flush_time_limit_seconds
#
# Perform 2000 rounds to ensure 20 flushes.
benchmark.pedantic(dp_producer.publish, setup=setup, rounds=2000)
|
nilq/baby-python
|
python
|
"""
Copyright (c) 2021 Heureka Group a.s. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import ChainMap
from collections.abc import MutableMapping
import logging
import os
from pathlib import Path
import string
import types
from typing import Optional, Union, Any, Callable
import warnings
_logger = logging.getLogger(__name__)
class Config(MutableMapping):
"""
Main object holding the configuration.
"""
__slots__ = (
'config_files', 'env_prefix', 'config_files_env_var', '_loaded', '_converters',
'_override_layer', '_env_layer', '_file_layer', '_default_layer'
)
autoload = True
"""bool: Whether to automatically trigger load() on item access or configuration test (if not loaded yet)."""
expansion_glob_pattern = '*.cnf.py'
"""str: Pattern used to expand a directory, when passed instead of a config file."""
def __init__(
self,
*config_files: Union[str, Path],
env_prefix: str = 'APP_',
config_files_env_var: Optional[str] = 'CONFIG'
):
"""
Create configuration object, init empty layers.
Args:
*config_files: Configuration files to load to the file layer.
env_prefix: Prefix of all env vars handled by this library (set to empty string to disable prefixing).
config_files_env_var: Name of env var containing colon delimited list of files to prepend to `config_files`.
Set to `None` to disable this behavior.
"""
_check_safe_env_name(env_prefix)
_check_safe_env_name(config_files_env_var)
self.config_files = config_files
self.env_prefix = env_prefix
self.config_files_env_var = config_files_env_var
self._loaded = False
self._converters = {}
"""Holds converter functions to be called every time when converting env variable."""
self._override_layer = {}
"""Layer holding runtime directive overrides, if any."""
self._env_layer = {}
"""Layer holding directives loaded from environment variables, if any."""
self._file_layer = ChainMap()
"""Layer holding directives loaded from file(s), if any."""
self._default_layer = {}
"""Layer holding default value for every initialized directive."""
def init(self, key: str, converter: Callable[[str], Any], default=None):
"""
Initialize configuration directive.
Args:
key: Case-sensitive directive name which is used everywhere (in env vars, in config files, in defaults).
converter: Function, which is called when converting env variable value to Python.
default: Directive default value.
"""
if key == self.config_files_env_var:
raise KeyError('Conflict between directive name and `config_files_env_var` name.')
_check_safe_env_name(key)
self._loaded = False
self._default_layer[key] = default
self._converters[key] = converter
if converter == bool:
warnings.warn('Using bool as converter is unsafe as it will treat all nonempty strings as True. '
'Use llconfig.converters.bool_like converter instead.', stacklevel=3)
def load(self):
"""
Load env layer and file layer.
There is no need to call this explicitly when `autoload` is turned on, but it may be useful to trigger
possible env vars conversion errors as soon as possible.
Raises:
ValueError: When conversion fails for any of env vars.
"""
self._load_env_vars()
self._load_files()
self._loaded = True
def _load_env_vars(self):
_logger.debug('loading env vars')
for prefixed_key, value in os.environ.items():
if not prefixed_key.startswith(self.env_prefix):
continue
key = prefixed_key[len(self.env_prefix):]
if key not in self._default_layer:
continue
try:
self._env_layer[key] = self._converters[key](value)
except Exception as e:
raise ValueError('Conversion error for environment variable "{}".'.format(self.env_prefix + key)) from e
_logger.info('env vars loaded')
def _load_files(self):
_logger.debug('loading config files')
paths = []
if self.config_files_env_var:
env_var = self.env_prefix + self.config_files_env_var
_logger.debug('getting list of config files from env var "{}"'.format(env_var))
env_var_val = os.environ.get(env_var)
if env_var_val:
paths.extend(Path(p) for p in env_var_val.split(':'))
if self.config_files:
paths.extend(Path(p) for p in self.config_files)
config_files = []
for p in paths:
if p.is_dir():
config_files.extend(self._expand_dir(p))
else:
config_files.append(p)
_logger.debug('list of config files to load: {}'.format(config_files))
self._file_layer.maps[:] = [self._load_file(f) for f in config_files]
_logger.info('config files loaded')
def _expand_dir(self, path: Path):
"""
Returns:
List[Path]: Contents of given path non-recursively expanded using `expansion_glob_pattern`, sorted by file
name in reverse order.
"""
files = path.glob(self.expansion_glob_pattern)
files = filter(lambda f: f.is_file(), files)
files = sorted(files, key=lambda f: f.name, reverse=True)
return list(files)
def _load_file(self, file: Path):
"""
Execute given file and parse config directives from it.
Returns:
Dict[str, Any]: Global namespace of executed file filtered to contain only initialized config keys.
"""
_logger.debug('loading file: "{}"'.format(file))
d = types.ModuleType(file.stem)
d.__file__ = file.name
exec(compile(file.read_bytes(), file.name, 'exec'), d.__dict__)
return {key: getattr(d, key) for key in dir(d) if key in self._default_layer}
def get_namespace(self, namespace: str, lowercase: bool = True, trim_namespace: bool = True):
"""
Returns:
Dict[str, Any]: Dict containing a subset of configuration options matching the specified namespace.
See Also:
http://flask.pocoo.org/docs/1.0/api/#flask.Config.get_namespace
"""
if not namespace:
raise ValueError('Namespace must not be empty.')
res = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
res[key] = v
return res
def __len__(self):
return len(self._default_layer)
def __iter__(self):
return iter(self._default_layer)
def __getitem__(self, key):
if not self._loaded and self.autoload:
self.load()
# add a bit of syntactic sugar
if isinstance(key, slice):
return self.get_namespace(key.start)
if key in self._override_layer:
return self._override_layer[key]
if key in self._env_layer:
return self._env_layer[key]
if key in self._file_layer:
return self._file_layer[key]
# search in _default_layer is intended to possibly fail
return self._default_layer[key]
def __setitem__(self, key: str, val):
if key not in self._default_layer:
raise KeyError('Overriding uninitialized key is prohibited.')
self._override_layer[key] = val
def __delitem__(self, key: str):
del self._override_layer[key]
def __repr__(self):
return '<{} {!r}>'.format(self.__class__.__name__, dict(self))
# https://stackoverflow.com/a/2821183/570503
_ENV_SAFE_CHARSET = set(string.ascii_uppercase + string.digits + '_')
"""Set[str]: Set of characters considered to be safe for environment variable names."""
def _check_safe_env_name(name, stacklevel=3): # this function => Config object => caller of Config object == 3 levels
if not all(ch in _ENV_SAFE_CHARSET for ch in name):
warnings.warn('Name "{}" is unsafe for use in environment variables.'.format(name), stacklevel=stacklevel)
|
nilq/baby-python
|
python
|
from django.test import TestCase, RequestFactory, Client
from chat.views import UnarchiveMessageHealthProfessionalView
from chat.models import Message
from user.models import HealthProfessional, Patient
class TestUnarchiveMessageHealthProfessionalView(TestCase):
def setUp(self):
self.health_professional = HealthProfessional.objects.create(name='User Test',
email='test@teste.com',
sex='M',
phone='1111111111',
is_active=True)
self.patient = Patient.objects.create(name='User Test',
email='testpatient@teste.com',
sex='M',
phone='1111111111',
is_active=True)
self.view = UnarchiveMessageHealthProfessionalView()
self.view_class = UnarchiveMessageHealthProfessionalView
self.factory = RequestFactory()
self.client = Client()
# Create Message 1.
self.message = Message()
self.message.text = "meu texto"
self.message.subject = "Assunto"
self.message.user_from = self.health_professional
self.message.user_to = self.patient
self.message.is_active_health_professional = False
self.message.pk = '1'
self.message.save()
def test_post_outbox_true(self):
request = self.factory.post('/')
request.user = self.health_professional
self.view.request = request
self.view.object = self.message
message = self.view_class.post(request, pk=1)
self.assertEqual(message.status_code, 302)
|
nilq/baby-python
|
python
|
from py4jps.resources import JpsBaseLib
import os
from tqdm import tqdm
import time
import numpy as np
import pandas as pd
from SPARQLWrapper import SPARQLWrapper, CSV, JSON, POST
from shapely import geometry, wkt, ops
# read csv with regional code and WKT strings
df = pd.read_csv('scotland_lsoa_populations/scottish_LSOA.csv')
wkt = df['WKT'].values
code = df['DataZone'].values
# Code to upload 100 polygons at a time for speed
total = len(code)
n_compile = total / 100
remainder = total % 100
n_compile = int(n_compile)
len_query = np.zeros(n_compile+2)
for i in range(1,len(len_query)-1):
len_query[i] = len_query[i-1] + 100
len_query[-1] = len_query[-2] + remainder
for g in tqdm(range(len(len_query)-1)):
i = len_query[g]
# Start of SPARQL query
query='''
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ons_t: <http://statistics.data.gov.uk/def/statistical-geography#>
PREFIX gsp: <http://www.opengis.net/ont/geosparql#>
PREFIX ons: <http://statistics.data.gov.uk/id/statistical-geography/>
PREFIX abox: <http://www.theworldavatar.com/kb/ontogasgrid/offtakes_abox/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
INSERT DATA
{{
'''
middle_num = int(len_query[g+1]-len_query[g])
# Iterating over 100 (or less) regions
for j in range(middle_num):
addition = 'abox:%s_geometry rdf:type gsp:Geometry . \n '%(code[int(i+j)]) # geometry instance (regional code used for URI)
query += addition
addition = 'ons:%s gsp:hasGeometry abox:%s_geometry . \n '%(code[int(i+j)],code[int(i+j)]) # associating region with geometry
# NOTE: the region iteself is not defined here as it's class (statistical geography) because it was already defined
query += addition
addition = 'abox:%s_geometry gsp:asWKT "%s" . \n '%(code[int(i+j)],wkt[int(i+j)]) # adding WKT string property to geometry instance
query += addition
# end of SPARQL query
query += '}}'
# namespace and endpoint to update triple-store
DEF_NAMESPACE = 'ontogasgrid'
LOCAL_KG = "http://localhost:9999/blazegraph"
LOCAL_KG_SPARQL = LOCAL_KG + '/namespace/'+DEF_NAMESPACE+'/sparql'
sparql = SPARQLWrapper(LOCAL_KG_SPARQL)
sparql.setMethod(POST) # POST query, not GET
sparql.setQuery(query)
start = time.time()
ret = sparql.query().convert()
end = time.time()
|
nilq/baby-python
|
python
|
"""
Customer Class including visualization.
"""
import random
import pandas as pd
import numpy as np
from a_star import find_path
from SupermarketMapClass import SupermarketMap
import constants
class Customer:
""" customer class including visualization."""
# possible states of a customer
STATES = ['checkout', 'dairy', 'drinks', 'entrance', 'fruit', 'spices']
# transition probability matrix
TPM = pd.read_csv('tpm.csv', index_col=[0])
# row and col range of each state
STATE_ROW_COL = {
'entrance':[[10], [14, 15]],
'fruit':[[2,3,4,5,6], [14, 15]],
'spices':[[2,3,4,5,6], [10, 11]],
'dairy':[[2,3,4,5,6], [6, 7]],
'drinks':[[2,3,4,5,6], [2, 3]],
'checkout':[[10], [2, 3]],
}
# grid of supermarket map for calculating customer path
GRID = np.array([
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
])
# possible moves of a customer
POSSIBLE_MOVES = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1)]
def __init__(self, name, state, supermarket):
"""
name : str
state : str : one of STATE
supermarket : a supermarket object
"""
self.name = name
self.state_before = state # starting state
self.state_after = state
self.row_before = 10 # starting row
self.col_before = 14 # starting column
self.row_after = 10
self.col_after = 14
self.path = [] # path between start and after; row,col, calculated with a* algorithm
self.path_row_col = [] # row, col on the path in 1second resolution
self.supermarket = supermarket # Supermarket instance
marketmap = SupermarketMap() # instanciate marketmap
avatar = SupermarketMap().extract_tile(7, 2)
self.supermarketmap = marketmap # SupermarketMap instance
self.avatar = avatar # a numpy array containing a 32x32 tile image
def __repr__(self):
return f'<Customer {self.name} in {self.state}>'
def is_active(self):
"""returns True if the customer has not reached the checkout yet."""
return self.state_after != 'checkout'
def next_state_rowcol(self):
"""update state, row, col before and after state transition.
"""
# state before and after propagation
self.state_before = self.state_after
transition_probs = list(Customer.TPM.loc[Customer.TPM.index==self.state_before].values[0])
self.state_after = random.choices(Customer.STATES, weights=transition_probs)[0]
# row and col before and after propagation
self.row_before = self.row_after
self.col_before = self.col_after
# randomly chose row_after, col_after depending on the state_after
if self.state_after == 'fruit':
self.row_after = random.choice(Customer.STATE_ROW_COL['fruit'][0])
self.col_after = random.choice(Customer.STATE_ROW_COL['fruit'][1])
elif self.state_after == 'spices':
self.row_after = random.choice(Customer.STATE_ROW_COL['spices'][0])
self.col_after = random.choice(Customer.STATE_ROW_COL['spices'][1])
elif self.state_after == 'dairy':
self.row_after = random.choice(Customer.STATE_ROW_COL['dairy'][0])
self.col_after = random.choice(Customer.STATE_ROW_COL['dairy'][1])
elif self.state_after == 'drinks':
self.row_after = random.choice(Customer.STATE_ROW_COL['drinks'][0])
self.col_after= random.choice(Customer.STATE_ROW_COL['drinks'][1])
elif self.state_after == 'checkout':
self.row_after = random.choice(Customer.STATE_ROW_COL['checkout'][0])
self.col_after = random.choice(Customer.STATE_ROW_COL['checkout'][1])
def path_between_states(self):
"""calculate path between row,col before and after state transition."""
start_given = (self.row_before, self.col_before) # row, col before state transition
finish_given = (self.row_after, self.col_after) # row, col after state transition
# find_path based on a* algorithm
path = find_path(Customer.GRID, start_given, finish_given, Customer.POSSIBLE_MOVES)
# if empty path fillin values to enable next step interpolation into 1s resolution
if start_given == finish_given:
path = [(self.row_before, self.col_before), (self.row_after, self.col_after)]
self.path = path
def draw_sec(self, frame, i_sec):
"""draw customer on i-th second of the path"""
if self in self.supermarket.customers:
row_i = self.path_row_col[i_sec,0]
col_i = self.path_row_col[i_sec,1]
if self.supermarketmap.contents[row_i][col_i] == '.':
x = col_i * constants.TILE_SIZE
y = row_i * constants.TILE_SIZE
frame[y:y+constants.TILE_SIZE, x:x+constants.TILE_SIZE] = self.avatar
# to do : avoide overlapping customer
|
nilq/baby-python
|
python
|
from pathlib import Path
__version__ = '0.2.1'
TOOL_DIR = Path('~/.proteotools_software').expanduser()
COMET = TOOL_DIR / 'comet' / 'comet.linux.exe'
MSGF = TOOL_DIR / 'msgfplus' / 'MSGFPlus.jar'
TANDEM = TOOL_DIR / 'tandem' / 'bin' / 'static_link_ubuntu' / 'tandem.exe'
TPP = TOOL_DIR / 'tpp' / 'tpp_6-0-0.sif'
THERMORAWFILEPARSER = TOOL_DIR / 'ThermoRawFileParser' / 'ThermoRawFileParser.exe'
PROTEOWIZARD = TOOL_DIR / 'proteowizard' / 'proteowizard'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-17 22:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Requirement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('requirement_city', models.CharField(blank=True, max_length=35, null=True)),
('shipment_date', models.DateField(blank=True, null=True)),
('from_city', models.CharField(blank=True, max_length=35, null=True)),
('tonnage', models.CharField(blank=True, max_length=35, null=True)),
('no_of_vehicles', models.CharField(blank=True, max_length=35, null=True)),
('to_city', models.CharField(blank=True, max_length=35, null=True)),
('material', models.CharField(blank=True, max_length=35, null=True)),
('type_of_truck', models.CharField(blank=True, max_length=35, null=True)),
('rate', models.CharField(blank=True, max_length=35, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('deleted_on', models.DateTimeField(blank=True, null=True)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='requirement', to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
from azfs.cli.constants import WELCOME_PROMPT
from click.testing import CliRunner
from azfs.cli import cmd
def test_cmd():
result = CliRunner().invoke(cmd)
# result.stdout
assert result.stdout == f"{WELCOME_PROMPT}\n"
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'Fraunhofer IDMT'
# imports
import torch
from nn_modules import cls_fe_nnct, cls_basic_conv1ds, cls_fe_sinc, cls_embedder
def build_frontend_model(flag, device='cpu:0', exp_settings={}):
if exp_settings['use_sinc']:
print('--- Building Sinc Model ---')
analysis = cls_fe_sinc.SincAnalysisSmooth(in_size=exp_settings['ft_size'],
out_size=exp_settings['ft_size_space'],
hop_size=exp_settings['hop_size'], exp_settings=exp_settings)
elif exp_settings['use_rand_enc']:
print('--- Building Simple Random Conv1D Encoder ---')
analysis = cls_basic_conv1ds.ConvEncoder(in_size=exp_settings['ft_size'],
out_size=exp_settings['ft_size_space'],
hop_size=exp_settings['hop_size'], exp_settings=exp_settings)
else:
print('--- Building Cosine Model ---')
analysis = cls_fe_nnct.AnalysiSmooth(in_size=exp_settings['ft_size'], out_size=exp_settings['ft_size_space'],
hop_size=exp_settings['hop_size'], exp_settings=exp_settings)
if exp_settings['use_simple_conv_dec']:
print('--- Building Simple Random Conv1D Decoder ---')
synthesis = cls_basic_conv1ds.ConvDecoder(ft_size=exp_settings['ft_size_space'],
kernel_size=exp_settings['ft_syn_size'],
hop_size=exp_settings['hop_size'], exp_settings=exp_settings)
else:
print('--- Building cosine-based decoder ---')
synthesis = cls_fe_nnct.Synthesis(ft_size=exp_settings['ft_size_space'],
kernel_size=exp_settings['ft_syn_size'],
hop_size=exp_settings['hop_size'], exp_settings=exp_settings)
if flag == 'testing':
print('--- Loading Model ---')
analysis.load_state_dict(torch.load('results/analysis_' + exp_settings['exp_id'] + '.pytorch',
map_location={'cuda:1': device}))
synthesis.load_state_dict(torch.load('results/synthesis_' + exp_settings['exp_id'] + '.pytorch',
map_location={'cuda:1': device}))
tot_params = sum(p.numel() for p in analysis.parameters() if p.requires_grad) +\
sum(p.numel() for p in synthesis.parameters() if p.requires_grad)
print('Total Number of Parameters: %i' % tot_params)
if torch.has_cuda:
analysis = analysis.cuda()
synthesis = synthesis.cuda()
return analysis, synthesis
def build_mc_synthesis(flag, device='cuda:0', exp_settings={}, sep='save_id'):
synthesis = cls_fe_nnct.Synthesis2C2S(ft_size=exp_settings['ft_size_space'],
kernel_size=exp_settings['ft_syn_size'],
hop_size=exp_settings['hop_size'], exp_settings=exp_settings)
if flag == 'testing':
print('--- Loading Model ---')
synthesis.load_state_dict(torch.load('results/mc_synthesis_' + sep + exp_settings['exp_id'] + '_100_.pytorch',
map_location={'cuda:1': device}))
tot_params = sum(p.numel() for p in synthesis.parameters() if p.requires_grad)
print('Total Number of Parameters: %i' % tot_params)
if torch.has_cuda:
synthesis = synthesis.cuda()
return synthesis
def build_discriminator(flag, device='cpu:0', exp_settings={}):
emd_function = cls_embedder.Embedder(exp_settings=exp_settings)
if flag == 'testing':
print('--- Loading Previous State ---')
emd_function.load_state_dict(torch.load('results/disc_' + exp_settings['exp_id'] + '.pytorch',
map_location={'cuda:1': device}))
if torch.has_cuda:
emd_function = emd_function.cuda()
return emd_function
# EOF
|
nilq/baby-python
|
python
|
import collections
import heapq
import json
from typing import List, Optional
from binarytree import Node
def twoSum(nums, target):
compliment_set = collections.defaultdict(int)
for i, number in enumerate(nums):
compliment = target - number
if compliment in compliment_set:
return [compliment_set[compliment], i]
compliment_set[number] = i
return []
def maxProfit(prices: List[int]) -> int:
min_so_far = prices[0]
max_profit = 0
for i, price in enumerate(prices):
max_profit = max(max_profit, price - min_so_far)
min_so_far = min(min_so_far, price)
return max_profit
def containsDuplicate(nums: List[int]) -> bool:
value_set = set()
for num in nums:
if num in value_set:
return True
value_set.add(num)
return False
def productExceptSelf(nums: List[int]) -> List[int]:
prefix = [0 for _ in range(len(nums))]
postfix = [0 for _ in range(len(nums))]
for i, num in enumerate(nums):
if i == 0:
prefix[i] = num
else:
prefix[i] = prefix[i - 1] * num
for j in range(len(nums) - 1, -1, -1):
num = nums[j]
if j == len(nums) - 1:
postfix[j] = num
else:
postfix[j] = postfix[j + 1] * num
result = [0 for _ in range(len(nums))]
for i in range(len(result)):
if i == 0:
result[i] = 1 * postfix[i + 1]
elif i == len(result) - 1:
result[i] = 1 * prefix[i - 1]
else:
result[i] = prefix[i - 1] * postfix[i + 1]
return result
def maxSubArray(nums: List[int]) -> int:
if not nums:
return 0
max_at_position = nums[0]
result = nums[0]
for i in range(1, len(nums)):
num = nums[i]
max_at_position = max(num, num + max_at_position)
result = max(result, max_at_position)
return result
def maxProduct(nums: List[int]) -> int:
if not nums:
return 0
max_product_at_position = nums[0]
min_product_at_position = nums[0]
result = nums[0]
for i in range(1, len(nums)):
num = nums[i]
max_product_at_position, min_product_at_position = max(num, num * max_product_at_position,
num * min_product_at_position), min(num,
num * max_product_at_position,
num * min_product_at_position)
result = max(result, max_product_at_position)
return result
def findMin(nums: List[int]) -> int:
left = 0
right = len(nums) - 1
result = float('inf')
while left <= right:
if nums[left] < nums[right]:
result = min(result, nums[left])
break
middle_index = (left + right) // 2
result = min(result, nums[middle_index])
if nums[middle_index] >= nums[left]:
left = middle_index + 1
else:
right = middle_index - 1
return result
def search(nums: List[int], target: int) -> int:
left = 0
right = len(nums) - 1
while left <= right:
middle_index = (left + right) // 2
middle_value = nums[middle_index]
if middle_value == target:
return middle_index
if nums[left] < nums[right]:
if middle_value < target:
left = middle_index + 1
else:
right = middle_index - 1
elif middle_value >= nums[left]:
left = middle_index + 1
else:
right = middle_index - 1
return -1
def threeSum(nums: List[int]) -> List[List[int]]:
nums.sort()
result = []
def two_sum(numbers, target):
compliment_set = set()
i = 0
while i < len(numbers):
number = numbers[i]
compliment = target - number
if compliment in compliment_set:
result.append([-target, compliment, number])
while i != len(numbers) - 1 and number == numbers[i + 1]:
i += 1
compliment_set.add(number)
i += 1
for i, num in enumerate(nums):
if i == 0 or num != nums[i - 1]:
two_sum(nums[i + 1:], -num)
return result
def maxArea(height: List[int]) -> int:
max_area = float('-inf')
pointer_start = 0
pointer_end = len(height) - 1
while pointer_end > pointer_start:
max_area = max(min(height[pointer_start], height[pointer_end]) * (pointer_end - pointer_start), max_area)
if height[pointer_start] < height[pointer_end]:
pointer_start += 1
else:
pointer_end -= 1
return max_area
def lengthOfLongestSubstring(s: str) -> int:
pointer_start = 0
character_set = set()
result = 0
for pointer_end, character in enumerate(s):
while character in character_set:
character_set.remove(s[pointer_start])
pointer_start += 1
character_set.add(character)
result = max(pointer_end - pointer_start + 1, result)
return result
def characterReplacement(s: str, k: int) -> int:
character_set = set(s)
result = 0
for character in character_set:
pointer_start = 0
flipped_chars = 0
for pointer_end, read_character in enumerate(s):
while flipped_chars == k and read_character != character:
if s[pointer_start] != character:
flipped_chars -= 1
pointer_start += 1
if read_character != character:
flipped_chars += 1
result = max(result, pointer_end - pointer_start + 1)
return result
def minWindow(s: str, t: str) -> str:
start_pointer = 0
valid = False
t_character_set = collections.defaultdict(int)
s_character_set = collections.defaultdict(int)
result = ''
min_window = float('inf')
for character in t:
t_character_set[character] += 1
def check_valid():
if len(t_character_set) == len(s_character_set):
for key, value in s_character_set.items():
if value < t_character_set[key]:
return False
return True
else:
return False
for end_pointer, character in enumerate(s):
if character in t_character_set:
s_character_set[character] += 1
if check_valid():
valid = True
while valid:
if end_pointer - start_pointer + 1 < min_window:
result = s[start_pointer:end_pointer + 1]
min_window = len(result)
if s[start_pointer] in s_character_set:
s_character_set[s[start_pointer]] -= 1
if s_character_set[s[start_pointer]] == 0:
del s_character_set[s[start_pointer]]
valid = False
start_pointer += 1
return result
def isAnagram(s: str, t: str) -> bool:
s_count_dict = collections.defaultdict(int)
for character in s:
s_count_dict[character] += 1
for character in t:
if character not in s_count_dict:
return False
s_count_dict[character] -= 1
if s_count_dict[character] == 0:
del s_count_dict[character]
return not s_count_dict
def group_anagrams(strs):
result = collections.defaultdict(list)
for word in strs:
temp = [0 for _ in range(26)]
for letter in word:
temp[ord(letter) - ord('a')] += 1
result[tuple(temp)].append(word)
return result.values()
def isPalindrome(s: str) -> bool:
pointer_start = 0
pointer_end = len(s) - 1
while pointer_start < pointer_end:
if not s[pointer_start].isalpha():
pointer_start += 1
elif not s[pointer_end].isalpha():
pointer_end -= 1
elif s[pointer_start].lower() != s[pointer_end].lower():
return False
else:
pointer_start += 1
pointer_end -= 1
return True
def isValid(s: str) -> bool:
square = 0
paren = 0
curly = 0
for character in s:
if character == '}':
if not curly:
return False
curly -= 1
elif character == '{':
curly += 1
elif character == ']':
if not square:
return False
square -= 1
elif character == '[':
square += 1
elif character == ')':
if not paren:
return False
paren -= 1
elif character == '(':
paren += 1
else:
return False
return not square and not paren and not curly
def setZeroes(matrix: List[List[int]]) -> None:
zeros_x = set()
zeros_y = set()
for x, row in enumerate(matrix):
for y, value in enumerate(row):
if value == 0:
zeros_x.add(x)
zeros_y.add(y)
for x, row in enumerate(matrix):
for y, value in enumerate(row):
if x in zeros_x or y in zeros_y:
matrix[x][y] = 0
def spiralOrder(matrix: List[List[int]]) -> List[int]:
directions = [[0, 1], [1, 0], [0, -1], [-1, 0]]
direction_pointer = 0
result = []
position = [0, 0]
def get_next_position(x, y):
nonlocal direction_pointer
for i in range(len(directions)):
x_direction, y_direction = directions[(i + direction_pointer) % len(directions)]
x_target, y_target = x + x_direction, y + y_direction
if 0 <= x_target < len(matrix) and 0 <= y_target < len(matrix[0]) and matrix[x_target][
y_target] is not None:
direction_pointer = (i + direction_pointer) % len(directions)
return [x_target, y_target]
return []
while position:
x, y = position
result.append(matrix[x][y])
matrix[x][y] = None
position = get_next_position(x, y)
return result
def exist(board: List[List[str]], word: str) -> bool:
directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]
def yield_valid_direction(x, y, letter):
for x_direction, y_direction in directions:
x_target, y_target = x + x_direction, y + y_direction
if 0 <= x_target < len(board) and 0 <= y_target < len(board[0]):
if board[x_target][y_target] == letter:
yield x_target, y_target
def traverse(x, y, word_remaining):
if len(word_remaining) == 1:
return True
board[x][y], temp = None, board[x][y]
for x_direction, y_direction in yield_valid_direction(x, y, word_remaining[1]):
if traverse(x_direction, y_direction, word_remaining[1:]):
return True
board[x][y] = temp
return False
for x, row in enumerate(board):
for y, value in enumerate(row):
if value == word[0]:
if traverse(x, y, word):
return True
return False
def climb_stairs_recursive(n: int) -> int:
if n == 1:
return 1
elif n == 2:
return 2
return climb_stairs_recursive(n - 1) + climb_stairs_recursive(n - 2)
def climb_stairs_memoization(n):
memo = {}
def climb_stairs_recursive(n):
if n == 1:
memo[n] = 1
elif n == 2:
memo[n] = 2
else:
memo[n] = climb_stairs_memoization(n - 1) + climb_stairs_recursive(n - 2)
return memo[n]
return climb_stairs_recursive(n)
def climb_stairs_bottom_up(n):
result = [0 for _ in range(0, n + 1)]
for i in range(1, n + 1):
if i == 1:
result[i] = 1
elif i == 2:
result[i] = 2
else:
result[i] = result[i - 1] + result[i - 2]
return result[-1]
"""
Base case of recursion is no amount remaining.
Return the number of coins as solution.
Else return min of iterative approach.
"""
def coin_change(coins: List[int], amount: int) -> int:
def coin_change_recursive(coins, num_coins, amount_remaining):
solution = float('inf')
if amount_remaining == 0:
return num_coins
for coin in coins:
if amount_remaining - coin >= 0:
solution = min(coin_change_recursive(coins, num_coins + 1, amount_remaining - coin), solution)
return solution
result = coin_change_recursive(coins, 0, amount)
if result == float('inf'):
return -1
return result
def coin_change_memoization(coins, amount):
memo = {}
def coin_change_recursive(number_of_coins, amount_remaining):
if amount_remaining not in memo or memo[amount_remaining] > number_of_coins:
memo[amount_remaining] = number_of_coins
for coin in coins:
if amount_remaining - coin >= 0:
coin_change_recursive(number_of_coins + 1, amount_remaining - coin)
coin_change_recursive(0, amount)
if 0 not in memo:
return -1
return memo[0]
def coin_change_iterative(coins, amount):
result = [float('inf') for _ in range(amount + 1)]
result[0] = 0
for coin in coins:
for x in range(coin, amount + 1):
result[x] = min(result[x], result[x - coin] + 1)
if amount == 0:
return amount
if result[-1] == float('inf'):
return -1
return int(result[-1])
def maxDepth(root: Optional[Node]) -> int:
def traverse(node):
if not node:
return 0
return max(traverse(node.left), traverse(node.right)) + 1
return traverse(root)
def same_tree(p, q):
if not p and not q:
return True
if (p and not q) or (q and not p) or p.val != q.val:
return False
return same_tree(p.left, q.left) and same_tree(p.right, q.right)
def invertTree(root: Optional[Node]) -> Optional[Node]:
if root:
root.left, root.right = invertTree(root.right), invertTree(root.left)
return root
def maxPathSum(root: Optional[Node]) -> int:
result = 0
def traverse(node):
if node:
nonlocal result
left = traverse(node.left)
right = traverse(node.right)
result = max(result, left + right + node.val, node.val, left + node.val, right + node.val)
return max(left + node.val, right + node.val, node.val)
return 0
traverse(root)
return result
def levelOrder(root):
result = []
queue = collections.deque([[0, root]])
if not root:
return result
while queue:
level, node = queue.popleft()
if level == len(result):
result.append([node.val])
else:
result[level].append(node.val)
if node.left:
queue.append([level + 1, node.left])
if node.right:
queue.append([level + 1, node.right])
return result
class TreeCodec:
def serialize(self, root):
def traverse(node):
result = []
if node:
result.append(node.val)
result.extend(traverse(node.left))
result.extend(traverse(node.right))
return result
return [None]
return json.dumps({'traversal': traverse(root)})
def deserialize(self, data):
traversal = collections.deque(json.loads(data)['traversal'])
def rebuild():
if traversal[0] is None:
return traversal.popleft()
node = Node(traversal.popleft())
node.left = rebuild()
node.right = rebuild()
return node
return rebuild()
def isSubtree(root: Optional[Node], subRoot: Optional[Node]) -> bool:
def is_same(node_a, node_b):
if (node_b and not node_a) or (node_a and not node_b):
return False
if node_b and node_a:
return node_a.val == node_b.val and is_same(node_a.left, node_b.left) and is_same(node_a.right,
node_b.right)
return True
def traverse(node):
if node:
if node.val == subRoot.val:
if is_same(node, subRoot):
return True
return traverse(node.left) or traverse(node.right)
return False
return traverse(root)
def buildTree(preorder: List[int], inorder: List[int]) -> Optional[Node]:
index_mapping = {value: i for i, value in enumerate(inorder)}
preorder = collections.deque(preorder)
def traverse(left, right):
if left <= right:
node = Node(preorder.popleft())
node.left = traverse(left, index_mapping[node.val] - 1)
node.right = traverse(index_mapping[node.val] + 1, right)
return node
return traverse(0, len(preorder) - 1)
def isValidBST(root: Optional[Node]) -> bool:
def traverse(node, low, high):
if node:
if node.val <= low or node.val >= high:
return False
return traverse(node.left, low, node.val) and traverse(node.right, node.val, high)
return True
return traverse(root, float('-inf'), float('inf'))
def kthSmallest(root: Optional[Node], k: int) -> int:
counter = 0
def traverse(node):
nonlocal counter
if node:
left = traverse(node.left)
if left is not None:
return left
counter += 1
if counter == k:
return node.val
right = traverse(node.right)
if right is not None:
return right
return None
return traverse(root)
def lowestCommonAncestor(root: Node, p: Node, q: Node) -> Node:
def traverse(node):
if node:
if node == p or node == q:
return node
left = traverse(node.left)
right = traverse(node.right)
if left and right:
return node
return left or right
return traverse(root)
class TrieNode:
def __init__(self):
self.word = None
self.children = collections.defaultdict(TrieNode)
class Trie:
def __init__(self):
self.head = TrieNode()
def insert(self, word: str) -> None:
def recursive_insert(node, word_remaining):
if not word_remaining:
node.word = word
else:
letter = word_remaining[0]
if letter not in node.children:
node.children[letter] = TrieNode()
recursive_insert(node.children[letter], word_remaining[1:])
recursive_insert(self.head, word)
def search(self, word: str) -> bool:
def recursive_search(node, word_remaining):
if not word_remaining:
return node.word is not None
else:
letter = word_remaining[0]
if letter not in node.children:
return False
return recursive_search(node.children[letter], word_remaining[1:])
return recursive_search(self.head, word)
def startsWith(self, prefix: str) -> bool:
def recursive_mode(node, word_remaining):
if not word_remaining:
return True
letter = word_remaining[0]
if letter not in node.children:
return False
return recursive_mode(node.children[letter], word_remaining[1:])
return recursive_mode(self.head, prefix)
class WordDictionaryNode:
def __init__(self):
self.word = None
self.children = collections.defaultdict(WordDictionaryNode)
class WordDictionary:
def __init__(self):
self.head = WordDictionaryNode()
def addWord(self, word: str) -> None:
def recursive_add(node, word_remaining):
if not word_remaining:
node.word = word
else:
letter = word_remaining[0]
if letter not in node.children:
node.children[letter] = WordDictionaryNode()
recursive_add(node.children[letter], word_remaining[1:])
recursive_add(self.head, word)
def search(self, word: str) -> bool:
def recursive_search(node, word_remaining):
if not word_remaining:
return node.word is not None
else:
letter = word_remaining[0]
if letter == '.':
return any([recursive_search(x, word_remaining[1:]) for x in node.children.values()])
elif letter in node.children:
return recursive_search(node.children[letter], word_remaining[1:])
return False
return recursive_search(self.head, word)
class TrieNode:
def __init__(self, word=None):
self.word = word
self.children = collections.defaultdict(TrieNode)
class Trie:
def __init__(self):
self.head = TrieNode()
def add_word(self, word):
def recurse_add(node, word_remaining):
if not word_remaining:
node.word = word
else:
if word_remaining[0] not in node.children:
node.children[word_remaining[0]] = TrieNode()
recurse_add(node.children[word_remaining[0]], word_remaining[1:])
recurse_add(self.head, word)
def traverse_position(self, board, x, y):
directions = [[-1, 0], [1, 0], [0, 1], [0, -1]]
def recursive_traverse(node, x, y, visited):
result = []
if node.word:
result.append(node.word)
node.word = None
for x_direction, y_direction in directions:
x_target, y_target = x + x_direction, y + y_direction
if 0 <= x_target < len(board) and 0 <= y_target < len(board[0]):
letter = board[x_target][y_target]
if letter in node.children and (x_target, y_target) not in visited:
child_results, delete_child = recursive_traverse(node.children[letter], x_target, y_target,
visited | {(x_target, y_target)})
result.extend(child_results)
if delete_child:
del node.children[letter]
if not node.word and not node.children:
return result, True
return result, False
letter = board[x][y]
result = []
if letter in self.head.children:
result, delete_child = recursive_traverse(self.head.children[letter], x, y, {(x, y)})
if delete_child:
del self.head.children[letter]
return result
def findWords(board: List[List[str]], words: List[str]) -> List[str]:
trie = Trie()
for word in words:
trie.add_word(word)
result = []
for x, row in enumerate(board):
for y, value in enumerate(row):
result.extend(trie.traverse_position(board, x, y))
return result
class ListNode:
def __init__(self, value=None):
self.value = value
self.next = None
self.previous = None
def print_list(head: ListNode):
result = []
while head:
result.append(head.value)
head = head.next
print(result)
def reverseList(head: Optional[ListNode]) -> Optional[ListNode]:
dummy_head = None
while head:
head.next, head, dummy_head = dummy_head, head.next, head
return dummy_head
def hasCycle(head: Optional[ListNode]) -> bool:
if not head:
return False
slow, head = head, head.next
while head and head.next:
if head == slow:
return True
slow = slow.next
head = head.next.next
return False
def mergeTwoLists(list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
result = ListNode()
dummy_head = result
while list1 and list2:
if list1.value < list2.value:
result.next, result, list1 = list1, list1, list1.next
else:
result.next, result, list2 = list2, list2, list2.next
if list1:
result.next = list1
if list2:
result.next = list2
return dummy_head.next
def mergeKLists(lists: List[Optional[ListNode]]) -> Optional[ListNode]:
heap = []
for node_head in lists:
heapq.heappush(heap, [node_head.value, node_head])
dummy_head = ListNode()
result = dummy_head
while heap:
value, node_head = heapq.heappop(heap)
result.next, result, node_head = node_head, node_head, node_head.next
if node_head:
heapq.heappush(heap, [node_head.value, node_head])
return dummy_head.next
def removeNthFromEnd(head: Optional[ListNode], n: int) -> Optional[ListNode]:
dummy_head = ListNode()
dummy_head.next = head
cursor = dummy_head
for _ in range(n):
if not head:
return dummy_head.next
head = head.next
while head:
head = head.next
cursor = cursor.next
cursor.next = cursor.next.next
return dummy_head.next
def reorderList(head: Optional[ListNode]) -> None:
def get_mid_node_and_index(node):
fast_pointer = node
while fast_pointer and fast_pointer.next:
node = node.next
fast_pointer = fast_pointer.next.next
return node
dummy_head = ListNode()
dummy_head.next = head
stack = []
middle_node = get_mid_node_and_index(head)
while middle_node:
stack.append(middle_node)
middle_node = middle_node.next
while stack and head.next:
head.next, head.next.next = stack.pop(), head.next
head = head.next.next
head.next = None
class Node:
def __init__(self, val=0, neighbors=None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
def cloneGraph(node: Node) -> Node:
node_map = collections.defaultdict(Node)
def recursive_build_map(node):
node_map[node] = Node(node.val)
for adjacent in node.neighbors:
if adjacent not in node_map:
recursive_build_map(adjacent)
recursive_build_map(node)
visited_set = {node}
def recursive_link_nodes(node):
new_node = node_map[node]
new_node.neighbors = [node_map[x] for x in node.neighbors]
for adjacent in node.neighbors:
if adjacent not in visited_set:
visited_set.add(adjacent)
recursive_link_nodes(adjacent)
recursive_link_nodes(node)
return node_map[node]
def canFinish(numCourses: int, prerequisites: List[List[int]]) -> bool:
def get_graph():
graph = collections.defaultdict(list)
in_degree = {i: 0 for i in range(numCourses)}
for destination, origin in prerequisites:
graph[origin].append(destination)
in_degree[destination] += 1
return graph, in_degree
graph, in_degree = get_graph()
queue = collections.deque([])
visited = set()
for key, value in in_degree.items():
if value == 0:
queue.append(key)
visited.add(key)
while queue:
node_id = queue.popleft()
for adjacent in graph[node_id]:
if adjacent not in visited:
in_degree[adjacent] -= 1
if in_degree[adjacent] == 0:
visited.add(adjacent)
queue.append(adjacent)
return len(visited) == numCourses
def numIslands(grid: List[List[str]]) -> int:
directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]
def yield_valid_directions(x, y):
for x_direction, y_direction in directions:
x_target, y_target = x + x_direction, y + y_direction
if 0 <= x_target < len(grid) and 0 <= y_target < len(grid[0]):
if grid[x_target][y_target] == "1":
yield x_target, y_target
def traverse(x, y):
for x_direction, y_direction in yield_valid_directions(x, y):
grid[x_direction][y_direction] = 0
traverse(x_direction, y_direction)
result = 0
for x, row in enumerate(grid):
for y, value in enumerate(row):
if value == "1":
result += 1
grid[x][y] = 0
traverse(x, y)
return result
|
nilq/baby-python
|
python
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
import numpy as np
def test(model, imgL,imgR,disp_true):
model.eval()
imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
#---------
mask = disp_true < 192
#----
if imgL.shape[2] % 16 != 0:
times = imgL.shape[2]//16
top_pad = (times+1)*16 -imgL.shape[2]
else:
top_pad = 0
if imgL.shape[3] % 16 != 0:
times = imgL.shape[3]//16
right_pad = (times+1)*16-imgL.shape[3]
else:
right_pad = 0
imgL = F.pad(imgL,(0,right_pad, top_pad,0))
imgR = F.pad(imgR,(0,right_pad, top_pad,0))
with torch.no_grad():
output3 = model(imgL,imgR)
output3 = torch.squeeze(output3)
if top_pad !=0:
img = output3[:,top_pad:,:]
else:
img = output3
if len(disp_true[mask])==0:
loss = 0
else:
loss = torch.mean(torch.abs(img[mask]-disp_true[mask])) # end-point-error
return loss.data.cpu(), img[mask]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class hourglass(nn.Module):
def __init__(self, inplanes):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes*2)) #+conv2
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes)) #+x
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super(disparityregression, self).__init__()
self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)),[1,maxdisp,1,1])).cuda(), requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0],1,x.size()[2],x.size()[3])
out = torch.sum(x*disp,1)
return out
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride,bias=False),
nn.BatchNorm3d(out_planes))
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False), \
nn.BatchNorm2d(out_planes))
|
nilq/baby-python
|
python
|
from datetime import datetime
from config import InputConfig
from .base import BaseDataLoader
from ..market import BaseMarket
from ..renderers import BaseRenderer
class BackTestDataLoader(BaseDataLoader):
def __init__(
self,
market: BaseMarket,
renderer: BaseRenderer,
input_config: InputConfig,
window_size_offset: int = 1,
):
super().__init__(
market,
renderer,
input_config,
window_size_offset
)
self.step = 0
def get_batch_size(self) -> int:
return 1
def get_first_batch_start_datetime(self) -> datetime:
batch_start_datetime = self.input_config.start_datetime + self.step * self.input_config.data_frequency.timedelta
self.step = self.step + 1
return batch_start_datetime
|
nilq/baby-python
|
python
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000096"
addresses_name = "2020-02-03T10:27:29.701109/Democracy_Club__07May2020Dacorum.CSV"
stations_name = "2020-02-03T10:27:29.701109/Democracy_Club__07May2020Dacorum.CSV"
elections = ["2020-05-07"]
csv_delimiter = ","
def station_record_to_dict(self, record):
if record.polling_place_id == "1297":
record = record._replace(polling_place_easting="507211")
record = record._replace(polling_place_northing="204366")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
if record.addressline6 in [
"AL3 8LR",
]:
return None
return rec
|
nilq/baby-python
|
python
|
from django.db.models import Q
from .constants import (
STOP_WORDS,
)
from .models import (
WORD_DOCUMENT_JOIN_STRING,
DocumentRecord,
TokenFieldIndex,
)
from .tokens import tokenize_content
def _tokenize_query_string(query_string):
"""
Returns a list of WordDocumentField keys to fetch
based on the query_string
"""
# We always lower case. Even Atom fields are case-insensitive
query_string = query_string.lower()
branches = query_string.split(" or ")
# Split into [(fieldname, query)] tuples for each branch
field_queries = [
tuple(x.split(":", 1)) if ":" in x else (None, x)
for x in branches
]
# Remove empty queries
field_queries = [x for x in field_queries if x[1].strip()]
# By this point, given the following query:
# pikachu OR name:charmander OR name:"Mew Two" OR "Mr Mime"
# we should have:
# [(None, "pikachu"), ("name", "charmander"), ("name", '"mew two"'), (None, '"mr mime"')]
# Note that exact matches will have quotes around them
result = [
[
"exact" if x[1][0] == '"' and x[1][-1] == '"' else "word",
x[0],
x[1].strip('"')
]
for x in field_queries
]
# Expand
# For non exact matches, we may have multiple tokens separated by spaces that need
# to be expanded into seperate entries
start_length = len(result)
for i in range(start_length):
kind, field, content = result[i]
if kind == "exact":
continue
# Split on punctuation, remove double-spaces
content = tokenize_content(content)
content = [x.replace(" ", "") for x in content]
if len(content) == 1:
# Do nothing, this was a single token
continue
else:
# Replace this entry with the first token
result[i][-1] = content[0]
# Append the rest to result
for token in content[1:]:
result.append(("word", field, token))
# Remove empty entries, and stop-words and then tuple-ify
result = [
(kind, field, content)
for (kind, field, content) in result
if content and content not in STOP_WORDS
]
# Now we should have
# [
# ("word", None, "pikachu"), ("word", "name", "charmander"),
# ("exact", "name", 'mew two'), ("exact", None, 'mr mime')
# ]
return result
def _append_exact_word_filters(filters, prefix, field, string):
start = "%s%s%s" % (prefix, string, WORD_DOCUMENT_JOIN_STRING)
end = "%s%s%s%s" % (prefix, string, WORD_DOCUMENT_JOIN_STRING, chr(0x10FFFF))
if not field:
filters |= Q(pk__gte=start, pk__lt=end)
else:
filters |= Q(pk__gte=start, pk__lt=end, field_name=field)
return filters
def _append_startswith_word_filters(filters, prefix, field, string):
start = "%s%s" % (prefix, string)
end = "%s%s%s" % (prefix, string, chr(0x10FFFF))
if not field:
filters |= Q(pk__gte=start, pk__lt=end)
else:
filters |= Q(pk__gte=start, pk__lt=end, field_name=field)
return filters
def _append_stemming_word_filters(filters, prefix, field, string):
# FIXME: Implement
return filters
def build_document_queryset(
query_string, index,
use_stemming=False,
use_startswith=False,
):
assert(index.id)
tokenization = _tokenize_query_string(query_string)
if not tokenization:
return DocumentRecord.objects.none()
filters = Q()
# All queries need to prefix the index
prefix = "%s%s" % (str(index.id), WORD_DOCUMENT_JOIN_STRING)
for kind, field, string in tokenization:
if kind == "word":
filters = _append_exact_word_filters(filters, prefix, field, string)
if use_startswith:
filters = _append_startswith_word_filters(
filters, prefix, field, string
)
if use_stemming:
filters = _append_stemming_word_filters(
filters, prefix, field, string,
)
else:
raise NotImplementedError("Need to implement exact matching")
document_ids = set([
TokenFieldIndex.document_id_from_pk(x)
for x in TokenFieldIndex.objects.filter(filters).values_list("pk", flat=True)
])
return DocumentRecord.objects.filter(pk__in=document_ids)
|
nilq/baby-python
|
python
|
import torch
import suppixpool_CUDA as spx_gpu
import numpy as np
class SupPixPoolFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, img, spx):
spx = spx.to(torch.int)
K = spx.max()+1
assert(spx.size()[-2:]==img.size()[-2:])
# print(np.all(np.arange(K)==np.unique(spx.cpu().numpy())))
# print "used K: ", K
out = spx_gpu.forward(img, spx, K)
outputs, indices = out
# print("(max, min) indices: ", indices.max(), indices.min())
# print("number of -1: ", indices.eq(-1).sum())
# print indices
# assert np.all(indices.cpu().numpy()>=0)
ctx.save_for_backward(indices, img, spx, K)
return outputs
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
indices, img, spx, K = ctx.saved_tensors
grad_input, = spx_gpu.backward(grad_output.contiguous(), img, spx, indices, K)
return grad_input, torch.zeros_like(spx)
class SupPixPool(torch.nn.Module):
def __init__(self):
super(SupPixPool, self).__init__()
def forward(self, img, spx):
return SupPixPoolFunction.apply(img, spx)
class SupPixUnpool(torch.nn.Module):
def __init__(self):
super(SupPixUnpool, self).__init__()
def forward(self, pooled, spx):
outShape = pooled.size()[0:2]+spx.size()[-2:]
out = pooled.new_zeros(outShape)
for batch in xrange(pooled.size()[0]):
out[batch, :, :, :] = pooled[batch, :, spx[batch,:,:]]
return out
|
nilq/baby-python
|
python
|
"""Illustrates more advanced features like inheritance, mutability,
and user-supplied constructors.
"""
from simplestruct import Struct, Field
# Default values on fields work exactly like default values for
# constructor arguments. This includes the restriction that
# a non-default argument cannot follow a default argument.
class AxisPoint(Struct):
x = Field(default=0)
y = Field(default=0)
print('==== Default values ====')
p1 = AxisPoint(x=2)
print(p1) # AxisPoint(x=2, y=0)
p2 = AxisPoint(y=3)
print(p2) # AxisPoint(x=0, y=3)
# Subclasses by default do not inherit fields, but this can
# be enabled with a class-level flag.
class Point2D(Struct):
x = Field
y = Field
class Point3D(Point2D):
_inherit_fields = True
z = Field
print('\n==== Inheritance ====')
p = Point3D(1, 2, 3)
print(p) # Point3D(x=1, y=2, z=3)
# The flag must be redefined on each subclass that wants to
# inherit fields.
# The list of fields can be programmatically accessed via the
# _struct attribute.
print(p._struct) # (<field object>, <field object>, <field object>)
print([f.name for f in p._struct]) # ['x', 'y', 'z']
# Equality does not hold on different types, even if they are
# in the same class hierarchy and share the same fields.
class Point3D_2(Point3D):
_inherit_fields = True
p2 = Point3D_2(1, 2, 3)
print(p == p2) # False
# Structs are immutable by default, but this can be disabled
# with a class-level flag.
class MutablePoint(Struct):
_immutable = False
x = Field
y = Field
print('\n==== Mutability ====')
p = Point2D(1, 2)
try:
p.x = 3
except AttributeError as e:
print(e)
p = MutablePoint(1, 2)
p.x = 3
print(p) # MutablePoint(3, 2)
# Mutable structs can't be hashed (analogous to Python lists, dicts, sets).
try:
hash(p)
except TypeError as e:
print(e)
# Like other classes, a Struct is free to define its own constructor.
# The arguments are the declared fields, in order of their declaration.
#
# Fields are initialized in __new__(). A subclass that overrides
# __new__() must call super.__new__() (not type.__new__()).
# __init__() does not need to call super().__init__() or do any work
# on behalf of the Struct system.
#
# If the fields have default values, these are substituted in before
# calling the constructor. Thus providing default parameter values
# in the constructor argument list is meaningless.
class DoublingVector2D(Struct):
x = Field
y = Field
def __new__(cls, x, y):
print('Vector2D.__new__() has been called')
return super().__new__(cls, x, y)
def __init__(self, x, y):
# There is no need to call super().__init__().
# The field values self.x and self.y have already been
# initialized by __new__().
# Before the call to __init__(), the instance attribute
# _initialized is set to False. It is changed to True
# once __init__() has finished executing. If there are
# multiple __init__() calls chained via super(), it is
# changed once the outermost call returns.
assert not self._initialized
# Despite the fact that this Struct is immutable, we
# are free to reassign fields until the flag is set.
# Likewise, we may not hash this instance until the
# flag is set.
self.x *= 2
self.y *= 2
try:
hash(self)
except TypeError as e:
print(e)
# We can create additional non-field attributes.
self.magnitude = (self.x**2 + self.y**2) ** .5
# Since magnitude is not declared as a field, it is not
# considered during equality comparison, hashing, pretty
# printing, etc. Non-field attributes are generally
# incidental to the value of the Struct, or else can be
# deterministically derived from the fields. They can
# be overwritten at any time, whether or not the Struct
# is immutable.
# Alternatively, We could define magnitude as a @property,
# but then it would be recomputed each time it is used.
print('\n==== Custom constructor ====')
v = DoublingVector2D(1.5, 2)
print(v) # DoublingVector2D(x=3, y=4)
print(v.magnitude) # 5.0
|
nilq/baby-python
|
python
|
def main():
print()
print("Result = ((c + ~d) * b) * ~(d + a * e)")
print()
print_table_header()
for i in reversed(range(0, 2**5)):
print_row(i)
def print_table_header():
print("| a | b | c | d | e | Result |")
print("|-----|-----|-----|-----|-----|---------|")
def print_row(i):
a, b, c, d, e = list_from_int(i)
res = result(a, b, c, d, e)
print(f'| {a} | {b} | {c} | {d} | {e} | {res} |')
def list_from_int(i):
return map(int, list('{:05b}'.format(i)))
def result(a, b, c, d, e):
return bool((c + (not d)) * b) * (not (d + a * e))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import tifffile
import h5py
import warnings
import os
TIFF_FORMATS = ['.tiff', '.tif']
H5_FORMATS = ['.h5', '.hdf']
LIF_FORMATS = ['.lif']
def read_tiff_voxel_size(file_path):
"""
Implemented based on information found in https://pypi.org/project/tifffile
"""
def _xy_voxel_size(tags, key):
assert key in ['XResolution', 'YResolution']
if key in tags:
num_pixels, units = tags[key].value
return units / num_pixels
# return default
return 1.
with tifffile.TiffFile(file_path) as tiff:
image_metadata = tiff.imagej_metadata
if image_metadata is not None:
z = image_metadata.get('spacing', 1.)
else:
# default voxel size
z = 1.
tags = tiff.pages[0].tags
# parse X, Y resolution
y = _xy_voxel_size(tags, 'YResolution')
x = _xy_voxel_size(tags, 'XResolution')
# return voxel size
return [z, y, x]
def read_h5_voxel_size_file(file_path, h5key):
with h5py.File(file_path, "r") as f:
return read_h5_voxel_size(f, h5key)
def read_h5_voxel_size(f, h5key):
ds = f[h5key]
# parse voxel_size
if 'element_size_um' in ds.attrs:
voxel_size = ds.attrs['element_size_um']
else:
warnings.warn('Voxel size not found, returning default [1.0, 1.0. 1.0]', RuntimeWarning)
voxel_size = [1.0, 1.0, 1.0]
return voxel_size
def load_h5(path, key, slices=None, safe_mode=False):
with h5py.File(path, 'r') as f:
if key is None:
key = list(f.keys())[0]
if safe_mode and key not in list(f.keys()):
return None, (1, 1, 1)
if slices is None:
file = f[key][...]
else:
file = f[key][slices]
voxel_size = read_h5_voxel_size(f, key)
return file, voxel_size
def load_tiff(path):
file = tifffile.imread(path)
try:
voxel_size = read_tiff_voxel_size(path)
except:
# ZeroDivisionError could happen while reading the voxel size
warnings.warn('Voxel size not found, returning default [1.0, 1.0. 1.0]', RuntimeWarning)
voxel_size = [1.0, 1.0, 1.0]
return file, voxel_size
def load_lif():
raise NotImplementedError
def smart_load(path, key=None, default=load_tiff):
_, ext = os.path.splitext(path)
if ext in H5_FORMATS:
return load_h5(path, key)
elif ext in TIFF_FORMATS:
return load_tiff(path)
elif ext in LIF_FORMATS:
return load_lif(path)
else:
print(f"No default found for {ext}, reverting to default loader")
return default(path)
def create_h5(path, stack, key, voxel_size=(1.0, 1.0, 1.0), mode='a'):
with h5py.File(path, mode) as f:
f.create_dataset(key, data=stack, compression='gzip')
# save voxel_size
f[key].attrs['element_size_um'] = voxel_size
def del_h5_key(path, key, mode='a'):
with h5py.File(path, mode) as f:
if key in f:
del f[key]
f.close()
def rename_h5_key(path, old_key, new_key, mode='r+'):
''' Rename the 'old_key' dataset to 'new_key' '''
with h5py.File(path, mode) as f:
if old_key in f:
f[new_key] = f[old_key]
del f[old_key]
f.close()
def rename_h5_attr(path, key, old_attr, new_attr, mode='r+'):
''' Rename the attribute of dataset 'key' from 'old_attr' to 'new_attr' '''
with h5py.File(path, mode) as f:
pass
# http://api.h5py.org/h5a.html#h5py.h5a.rename
# h5py.h5a.rename(myfile.id, b"name", b"newname")
def create_tiff(path, stack, voxel_size):
# taken from: https://pypi.org/project/tifffile docs
z, y, x = stack.shape
stack.shape = 1, z, 1, y, x, 1 # dimensions in TZCYXS order
spacing, y, x = voxel_size
resolution = (1. / x, 1. / y)
# Save output results as tiff
tifffile.imsave(path,
data=stack,
dtype=stack.dtype,
imagej=True,
resolution=resolution,
metadata={'axes': 'TZCYXS', 'spacing': spacing, 'unit': 'um'})
|
nilq/baby-python
|
python
|
import os,sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, os.pardir))
sys.path.append(ROOT_DIR)
from analysis.pymo.parsers import BVHParser
from analysis.pymo.data import Joint, MocapData
from analysis.pymo.preprocessing import *
from analysis.pymo.viz_tools import *
from analysis.pymo.writers import *
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
#%%
p = BVHParser()
# f1="data/dance_full/shadermotion_justdance/bvh/justdance_0.bvh"
# f2="data/dance_full/kth_streetdance_data/bvh/Streetdance_001.bvh"
f1=sys.argv[1] #target file
scale=float(sys.argv[2]) #scale
output=sys.argv[3] #output file
# f2=sys.argv[2] #file from which to source the offsets of the skeleton (bone names and hierarchy should be the same)
# f2="/home/guillefix/code/mt-lightning/data/dance_full/kth_streetdance_data/bvh/Streetdance_001.bvh"
data1 = p.parse(f1)
# data2 = p.parse(f2)
data1.skeleton
for name, bone in data1.skeleton.items():
bone["offsets"] = [x*scale for x in bone["offsets"]]
data1.skeleton[name]=bone
data1.values["Hips_Xposition"] *= scale
data1.values["Hips_Yposition"] *= scale
data1.values["Hips_Zposition"] *= scale
writer = BVHWriter()
with open(output,'w') as f:
writer.write(data1, f)
# data1.skeleton
#
# data2.skeleton
|
nilq/baby-python
|
python
|
import argparse
import numpy as np
import os
import matplotlib.pyplot as plt
import PIL.Image as Image
import torch
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn import decomposition
from scipy.sparse import csr_matrix
import torchvision
import torch.nn as nn
from torchvision import transforms
import torch.nn.functional as F
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import copy
from sklearn.datasets import fetch_openml
import numpy as np
import matplotlib.pyplot as plt
import os
import PIL.Image as Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, TensorDataset
from IPython import display
import torch.optim as optim
device='cuda:0' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--query_datapath', type=str, default = None)
parser.add_argument('--target_datapath', type=str, default = None)
parser.add_argument('--supervised_datapath', type=str, default = None) # this is actually 9k data
parser.add_argument('--supervised_labels', type=str, default = None) # this is actually 9k data
parser.add_argument('--testing_query_input', type=str, default = None) # this is actually 9k data
parser.add_argument('--output_testing_query_labels', type=str, default = None) # this is actually 9k data
parser.add_argument('--output_qt_labels', type=str, default = None) # this is actually 9k data
parser.add_argument('--output_classifier', type=str, default = None)
args=parser.parse_args()
# if not os.path.exists(args.savedir):
# os.makedirs(args.savedir)
# *******************************************************LOADING DATA******************************************************
X_target=np.load(args.target_datapath)
X_query=np.load(args.query_datapath)
X = np.concatenate((X_query, X_target))
# X = X_target
# oneshot_data=np.load(path+'sample_images.npy')
oneshot_data=np.load(args.supervised_datapath)
print('shape of oneshot_data', oneshot_data.shape)
#applying minibatch kmeans
X = -1*((X)/255. -1.) #for making it a sparse matrix
# X = (X)/255.
print('x ki shape', X.shape)
X=X.reshape((-1,28*28)) #shape 640k, 784
x_oneshot = -1*(oneshot_data.reshape((-1, 28*28))/(255.) -1.) #shape 10, 784
# x_oneshot = oneshot_data.reshape((-1, 28*28))/(255.) #shape 10, 784
# X = np.concatenate((X, x_oneshot))
x_oneshot_target = x_oneshot #from 0th class to 8th class, 9th dropped as its no where in the images i THINK
# x_oneshot_target = x_oneshot[:-1] #from 0th class to 8th class, 9th dropped as its no where in the images i THINK
print('shape of X', X.shape)
print('shape of x_oneshot', x_oneshot.shape)
print('shape of x_oneshot_target', x_oneshot_target.shape)
print('X \n', X)
print('x_oneshot \n', x_oneshot)
print('x_oneshot_target \n', x_oneshot_target)
X = X.reshape(-1, 1, 28, 28)
print(X.shape)
class CustomTensorDataset_pair(Dataset):
"""TensorDataset with support of transforms.
"""
def __init__(self, tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
x = self.tensors[0][index]
# print(x.shape)
if self.transform:
x = self.transform(x)
y = self.tensors[1][index]
return x, y
def __len__(self):
return self.tensors[0].size(0)
# we have supervised data (10) and unsuper vised data (1280000) which is X
# apply transformations on X
# X can be first shuffled
shuffler = np.random.permutation(X.shape[0])
X = X[shuffler]
X = torch.tensor(X)
# X = X[:9000]
X = X[:18000]
print('shape of X now after sampling for making final unsup data = ', X.shape)
#now sequentially select batches of X and apply transformations
# select transformations
# t0 = transforms.RandomApply()
t1 = transforms.RandomRotation(20)
# t2 = transforms.RandomCrop((28, 28), padding = 4)
t2 = transforms.RandomCrop((28, 28))
t3 = transforms.RandomPerspective()
trans = transforms.Compose([transforms.ToPILImage(), t1, t2, t3, transforms.ToTensor()])
unsup_dataset = CustomTensorDataset_pair(tensors = (X.float(), X), transform=trans)
unsup_train_loader = torch.utils.data.DataLoader(unsup_dataset, batch_size=180)
#making supervised dataset ---- unsupervised is already made above
sup_onsht_data = torch.tensor(x_oneshot_target.reshape(-1, 1, 28, 28))
# sup_onsht_labels = torch.tensor([i for i in range(9)])
sup_onsht_labels = torch.tensor(np.load(args.supervised_labels))
shuffler = np.random.permutation(sup_onsht_data.shape[0])
sup_onsht_data = sup_onsht_data[shuffler]
sup_onsht_labels = sup_onsht_labels[shuffler]
print(sup_onsht_labels, sup_onsht_labels.shape)
print('supervised datashape = ', sup_onsht_data.shape)
# sup_dataset = CustomTensorDataset(tensors = sup_onsht_data)
num_batches = len(unsup_train_loader)
# sup_data = torch.cat([sup_onsht_data for i in range(num_batches)], dim = 0)
# sup_labels = torch.cat([sup_onsht_labels for i in range(num_batches)], dim = 0)
sup_data = sup_onsht_data
sup_labels = sup_onsht_labels
print(sup_data.shape)
sup_dataset = CustomTensorDataset_pair(tensors = (sup_data.float(), sup_labels), transform=trans)
# sup_dataset = CustomTensorDataset_pair(tensors = (sup_data, sup_labels))
sup_train_loader = torch.utils.data.DataLoader(sup_dataset, batch_size = 90, shuffle = False)
print(len(sup_train_loader))
print('sup and unsup trainloader shape = ', len(sup_train_loader), len(unsup_train_loader))
X_target=np.load(args.target_datapath)
X = X_target
X = -1*((X)/255. -1.) #for making it a sparse matrix
print('x ki shape', X.shape)
X=X.reshape((-1,28*28)) #shape 640k, 784
print('Xtarget shape', X)
batchsize = 128
target_loader = DataLoader(X.reshape(-1, 1, 28, 28), batch_size=batchsize, shuffle=False)
def predict(model, device, test_loader, use_cuda):
model.eval()
predictions = []
with torch.no_grad():
for data in test_loader:
data = data.to(device)
output = model(data.float())
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
predictions.extend(pred.tolist())
# print(predictions)
return np.array(predictions)
def is_set_correct(array):
# print(array)
# print(set(array))
if len(set(array)) >= 8:
return True
return False
def clustering_accuracy(labels):
#labels are of shape (totalsmall images in all sudoku which is divisible by 64,)
labels = labels.reshape((labels.shape[0]//64, -1))
labels = labels.reshape((-1, 8, 8))
# print(labels.shape)
# print(labels[0])
# print(labels[10000])
subatomic_correct = 0
correct = 0
total = 0
#now we have labels of correct shape
final_bool_arr = np.array([True for i in range(labels.shape[0])])
for i in range(8):
k = i * 2 if i<4 else (i-4) * 2
j= (i // 4) * 4
# print(k, j)
# if(np.all(np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, :, i])) == True or np.all(np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, i, :])) == True or np.all(np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, k:k+2, j:j+4].reshape(-1, 8))) !=True ):
# correct+=1
# total+=1
arr1 = np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, :, i])
arr2 = np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, i, :])
arr3 = np.apply_along_axis(is_set_correct, axis = 1, arr = labels[:, k:k+2, j:j+4].reshape(-1, 8))
arr = arr1*arr2*arr3
# arr = arr1*arr2
assert(arr.shape[0] == labels.shape[0] and len(arr.shape) == 1)
final_bool_arr *= arr
subatomic_correct += arr1.sum() + arr2.sum() + arr3.sum()
# subatomic_correct += arr1.sum() + arr2.sum()
return final_bool_arr.sum()/final_bool_arr.shape[0], subatomic_correct/(3*8*labels.shape[0])
# classifier network
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5, padding = 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(400, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 9)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, (2, 2))
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, (2, 2))
x = x.view(-1, np.prod(x.size()[1:]))
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
model = LeNet().to(device)
test_batch_size=1000
epochs=25
lr=0.1
gamma=0.987
no_cuda=False
seed=1
log_interval=100
save_model=False
use_cuda = not no_cuda and torch.cuda.is_available()
torch.manual_seed(seed)
device = torch.device("cuda" if use_cuda else "cpu")
optimizer = optim.Adam(model.parameters(), lr=0.0002)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
for epoch in range(epochs):
model.train()
acc = 0
for batch_idx, (Y, X) in enumerate(zip(unsup_train_loader, sup_train_loader)):
(Xtrans, Xnotrans)= Y
(Xsup, labels) = X
Xtrans, Xnotrans, Xsup, labels = Xtrans.to(device), Xnotrans.to(device), Xsup.to(device), labels.to(device)
optimizer.zero_grad()
# print(Xtrans.shape, Xnotrans.shape, Xsup.shape, labels.shape)
softmax = nn.Softmax(dim=1)
temp_model = copy.deepcopy(model).eval()
sup_out = model(Xsup.float())
with torch.no_grad():
unsup_notrans_out = softmax(temp_model(Xnotrans.float()))
unsup_trans_out = softmax(model(Xtrans.float()))
loss_sup = nn.CrossEntropyLoss()
loss_unsup = nn.BCELoss()
l2unsup = loss_unsup(unsup_trans_out, unsup_notrans_out)
l1sup = loss_sup(sup_out, labels.long())
total_loss = (l2unsup+ 10*l1sup)
acc += (torch.argmax(sup_out, dim=1).long() == labels.long()).sum().item()/(labels.shape[0])
total_loss.backward()
optimizer.step()
print('epoch = {}, loss1sup = {}, loss2usup = {}, acc = {}'.format(epoch, l1sup.item(), l2unsup.item(), acc/(batch_idx+1)))
if(epoch% 5 == 0):
target_labels = predict(model, device, target_loader, True)
print(clustering_accuracy(target_labels))
torch.save(model, args.output_classifier)
#classify query+target images and save
X_target=np.load(args.target_datapath)
X_query=np.load(args.query_datapath)
X = np.concatenate((X_query, X_target))
X = -1*((X)/255. -1.) #for making it a sparse matrix
print('x ki shape', X.shape)
X=X.reshape((-1,28*28)) #shape 640k, 784
model.eval()
# targetset = TensorDataset(X[40000:] ,data_Y[40000:])
batchsize = 128
data_loader = DataLoader(X.reshape(-1, 1, 28, 28), batch_size=batchsize, shuffle=False)
def predict(model, device, test_loader, use_cuda):
model.eval()
predictions = []
with torch.no_grad():
for data in test_loader:
data = data.to(device)
output = model(data.float())
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
predictions.extend(pred.tolist())
# print(predictions)
return np.array(predictions)
data_labels = predict(model, device, data_loader, True)
data_labels.shape
#save labels of query and target
np.save(args.output_qt_labels, data_labels)
#TESTING QUERY
X=[] #this will contain 28,28 images
# i = 0
for img_name in sorted(os.listdir(args.testing_query_input)):
# i+=1
# if(i ==3):
# break
img = np.array(Image.open(os.path.join(args.testing_query_input,img_name))) # 224,224 = 64 * 28,28
sub_imgs=np.split(img,8)
sub_imgs=[np.split(x_,8,axis=1) for x_ in sub_imgs]
sub_imgs=np.array(sub_imgs) # 8,8,28,28
sub_imgs=sub_imgs.reshape((-1,28,28))
X.append(sub_imgs)
X=np.array(X)
X_input_query=X.reshape((-1,28,28))
X_input_query = -1*((X_input_query)/255. -1.) #for making it a sparse matrix
batchsize = 128
data_loader = DataLoader(X_input_query.reshape(-1, 1, 28, 28), batch_size=batchsize, shuffle=False)
def predict(model, device, test_loader, use_cuda):
model.eval()
predictions = []
with torch.no_grad():
for data in test_loader:
data = data.to(device)
output = model(data.float())
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
predictions.extend(pred.tolist())
# print(predictions)
return np.array(predictions)
data_labels = predict(model, device, data_loader, True)
print(data_labels.shape)
#save labels of query and target
np.save(args.output_testing_query_labels, data_labels)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#%% Packages
import numpy as np
import os, matplotlib
#matplotlib.use('Agg')
#from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import tensorflow.keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
from sklearn.utils import class_weight
#import matplotlib.pyplot as plt
from glob import glob
import cv2, random, argparse
import utils
# %% Command line arguements
parser = argparse.ArgumentParser(description='Framework for training and evaluation.')
parser.add_argument(
'--dataset', '-d',
help="1 -- smear baseline, 2 -- smear pipeline, 3 -- LBC pipeline",
type=int,
choices=[1, 2, 3],
default=1)
parser.add_argument(
'--architecture', '-a',
help="choose a network architecture",
choices=['ResNet50', 'DenseNet201'],
default='ResNet50')
parser.add_argument(
'--pretrain', '-p',
help="use pre-trained weights on ImageNet",
type=int,
choices=[0, 1],
default=0)
parser.add_argument(
'--fold', '-f',
help="Dataset 1&2: 3 folds; Dataset 3: 2 folds.",
type=int,
choices=[1, 2, 3],
default=1)
parser.add_argument(
'--index', '-i',
help="index for multiple training to get STD",
type=int,
# choices=[1, 2, 3],
default=1)
parser.add_argument(
'--mode', '-m',
help="train or test",
choices=['train', 'test'],
default='train')
parser.add_argument(
'--savefile', '-s',
help="if save results to csv files",
type=int,
choices=[0, 1],
default=0)
args = parser.parse_args()
# %% Parameters
#args.dataset = 1
#args.architecture = 'ResNet50'
#args.pretrain = 1
#args.fold = 1
#args.index = 1
#args.mode = 'train'
DATASET = args.dataset
ARCHI_NAME = args.architecture
PRETRAIN = args.pretrain
FOLD = args.fold
INDEX = args.index
MODE = args.mode
# log dir
#if ARCHI_NAME == 'ResNet50':
# PRETRAIN = 0
# DIR_LOG = f"./logs/resScratch/fold{FOLD}/"
#elif ARCHI_NAME == 'DenseNet201':
# if PRETRAIN == 0:
# DIR_LOG = f"./logs/denseScratch/fold{FOLD}/"
# else:
# DIR_LOG = f"./logs/densePretrain/fold{FOLD}/"
DIR_LOG = f"./logs/dataset_{DATASET}/{ARCHI_NAME}_pre{PRETRAIN}/"
if not os.path.exists(DIR_LOG):
os.makedirs(DIR_LOG)
WEIGHT_PATH = DIR_LOG + f"data{DATASET}_{ARCHI_NAME}_pre{PRETRAIN}_fold{FOLD}_{INDEX}.hdf5"
# training parameter
if ARCHI_NAME == 'ResNet50':
if DATASET == 1:
BATCH_SIZE = 128
EPOCHS = 30
else:
BATCH_SIZE = 512
EPOCHS = 50
elif ARCHI_NAME == 'DenseNet201':
if DATASET == 1:
BATCH_SIZE = 128
EPOCHS = 20
else:
BATCH_SIZE = 256
EPOCHS = 30
if PRETRAIN == 1:
EPOCHS = 5
# data dir
if DATASET in [1, 2]:
DIR_TRAIN_DATA = f"./Datasets/dataset{DATASET}/data_train{FOLD}/"
DIR_TEST_DATA = f"./Datasets/dataset{DATASET}/data_test{FOLD}/"
elif DATASET == 3:
if FOLD == 1:
DIR_TRAIN_DATA = f"./Datasets/dataset{DATASET}/train/"
DIR_TEST_DATA = f"./Datasets/dataset{DATASET}/test/"
elif FOLD == 2:
DIR_TRAIN_DATA = f"./Datasets/dataset{DATASET}/test/"
DIR_TEST_DATA = f"./Datasets/dataset{DATASET}/train/"
else:
raise ValueError("FOLD must be in [1, 2] for Dataset 3.")
#MODEL_PATH = DIR_LOG + "ResNet_aug.h5"
if PRETRAIN == 0 and DATASET == 1:
IMG_SHAPE = (80, 80, 1)
SAMPLE_SHAPE = (80, 80, 1)
else:
IMG_SHAPE = (80, 80, 3)
SAMPLE_SHAPE = (80, 80, 3)
# %% Load data
if MODE == 'train':
X_train, Y_train = utils.load_set(DIR_TRAIN_DATA, IMG_SHAPE, SAMPLE_SHAPE)
(X_test, Y_test,
indices, index_slide,
slides_cls0, slides_cls1) = utils.load_set(
DIR_TEST_DATA, IMG_SHAPE, SAMPLE_SHAPE, is_per_slide=True)
#%% Create the model
if ARCHI_NAME == 'ResNet50':
model = utils.build_resnet(input_shape=SAMPLE_SHAPE, classes=2, pretrain=PRETRAIN)
elif ARCHI_NAME == 'DenseNet201':
model = utils.build_densenet(input_shape=SAMPLE_SHAPE, classes=2, pretrain=PRETRAIN)
#%% Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
#%% Train with augmentation
if MODE == 'train':
train_datagen = ImageDataGenerator(
rescale=1./255,
preprocessing_function=utils.aug_non_inter,
validation_split=0.1) # set validation split
# elif ARCHI_NAME == 'DenseNet201':
# train_datagen = ImageDataGenerator(
# rescale=1./255,
## featurewise_center=True,
## featurewise_std_normalization=True,
# preprocessing_function=utils.aug_non_inter,
# validation_split=0.1) # set validation split
train_datagen.fit(X_train)
train_generator = train_datagen.flow(
X_train, Y_train,
batch_size=BATCH_SIZE,
subset='training') # set as training data
class_weights = class_weight.compute_class_weight(
'balanced',
np.argmax(np.unique(Y_train, axis=0), axis=1),
np.argmax(Y_train, axis=1))
#class_weights = {0: 3.100251889168766, 1: 1.0}
validation_generator = train_datagen.flow(
X_train, Y_train,
batch_size=BATCH_SIZE,
subset='validation') # set as validation data
# Callbacks
mc = ModelCheckpoint(WEIGHT_PATH, monitor='val_loss', save_best_only=True, verbose=1)
es = EarlyStopping(monitor='val_loss', patience=15, verbose=1, restore_best_weights=True)
if PRETRAIN == 0:
rp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)
else:
rp = ReduceLROnPlateau(monitor='val_loss', factor=0.4, patience=0, verbose=1)
# if ARCHI_NAME == 'ResNet50':
# rp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)
# elif ARCHI_NAME == 'DenseNet201':
# rp = ReduceLROnPlateau(monitor='val_loss', factor=0.4, patience=0, verbose=1)
# Training
history = model.fit_generator(
generator = train_generator,
# steps_per_epoch = len(train_generator),
epochs = EPOCHS,
verbose=1,
class_weight = class_weights,
validation_data = validation_generator,
# validation_steps = len(validation_generator),
callbacks=[mc, es, rp])
# %% Evaluate model
test_datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
rescale=1./255)
#test_datagen.fit(X_test)
test_generator = test_datagen.flow(
X_test, Y_test,
shuffle=False,
batch_size=BATCH_SIZE)
# Restore the saved best model
model.load_weights(WEIGHT_PATH)
# Confution Matrix and Classification Report
#test_generator.reset()
Y_pred = model.predict_generator(
generator = test_generator,
steps=len(test_generator),
verbose=1)
Y_pred = np.argmax(Y_pred, axis=1)
target_names = ['Cancer', 'Healthy']
dict_metrics = utils.evaluate(Y_test, Y_pred, target_names)
#utils.plot_confusion_matrix(metrics['cm'], target_names, normalize=True)
for metric in dict_metrics:
print(dict_metrics[metric])
if args.savefile == 1:
utils.write_results(dict_metrics, args)
utils.write_per_slide_results(
Y_test, Y_pred,
dict_metrics, args,
indices, index_slide, slides_cls0, slides_cls1)
# %% Save model
#model.save(MODEL_PATH)
#%% Plot learning curve
if MODE == 'train':
utils.accuracy_curve(history, DIR_LOG)
#%%
|
nilq/baby-python
|
python
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from thrift.py.client.sync_client import SyncClient
from thrift.py.client.sync_client_factory import get_client
# @manual=//thrift/lib/py3lite/client/test:test_service-py
from thrift.py.test.TestService import add_args, add_result
from thrift.py3lite.test.test_server import server_in_another_process
class TestServiceClient(SyncClient):
def add(self, num1: int, num2: int) -> int:
result = self._send_request(
"TestService", "add", add_args(num1=num1, num2=num2), add_result
)
return result.success
class SyncClientTests(unittest.TestCase):
def test_basic(self) -> None:
with server_in_another_process() as path:
with get_client(TestServiceClient, path=path) as client:
self.assertEqual(3, client.add(1, 2))
|
nilq/baby-python
|
python
|
import os
import sys
from yaku.scheduler \
import \
run_tasks
from yaku.context \
import \
get_bld, get_cfg
import yaku.tools
def configure(conf):
ctx.load_tool("python_2to3")
def build(ctx):
builder = ctx.builders["python_2to3"]
files = []
for r, ds, fs in os.walk("foo"):
files.extend([os.path.join(r, f) for f in fs])
builder.convert("", files)
if __name__ == "__main__":
ctx = get_cfg()
configure(ctx)
ctx.setup_tools()
ctx.store()
ctx = get_bld()
build(ctx)
try:
run_tasks(ctx)
finally:
ctx.store()
|
nilq/baby-python
|
python
|
from sklearn.ensemble import IsolationForest
class IsolationModel:
"""
Simple Isolation Model based on contamination
"""
def __init__(self, data):
self.normalized_data = (data - data.mean()) / data.std()
self.iso = IsolationForest(contamination=.001, behaviour='new')
self.iso.fit(self.normalized_data)
self.iso.predict(self.normalized_data)
def predict_outlier(self, data):
return self.iso.predict(data)
from models.isolation_model import IsolationModel
import backtrader as bt
import pandas as pd
import numpy as np
class IsolationStrategy(bt.Strategy):
'''
Explanation:
The isolation forest identifies what it deems to be anomalies,
overbought or oversold opportunities for entry. I append known data
after fitting the isolation forest for the next day, making it an
online unsupervised learningalgorithm.
Current Issue: Positioning, Sizing, Exposure
'''
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self, data):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataopen = self.datas[0].open
self.datahigh = self.datas[0].high
self.datalow = self.datas[0].low
self.dataclose = self.datas[0].close
self.datavolume = self.datas[0].volume
self.model_data = pd.read_csv(data)
self.buyOut = False
self.sellOut = False
self.orderPosition = 0
self.cooldown = 7
# This is the code that gets copied into the trading system
def next(self):
self.log(self.dataclose[0])
# Construct dataframe to predict
x = pd.DataFrame(
data=[[
self.dataopen[0], self.datahigh[0], self.datalow[0],
self.dataclose[0], self.datavolume[0]
]], columns='Open High Low Close Volume'.split()
)
# Create the model with all known data for normalization
model = IsolationModel(self.model_data)
# Append today's data for tomorrow's normalization
self.model_data = self.model_data.append(x, ignore_index=True)
# Dataframe to help normalize x
mean_to_normalize = pd.DataFrame(data=[[
np.mean(self.model_data['Open']), np.mean(self.model_data['High']),
np.mean(self.model_data['Low']), np.mean(self.model_data['Close']),
np.mean(self.model_data['Volume'])
]], columns='Open High Low Close Volume'.split())
# Dataframe to help normalize x
std_to_normalize = pd.DataFrame(data=[[
np.std(self.model_data['Open']), np.std(self.model_data['High']),
np.std(self.model_data['Low']), np.std(self.model_data['Close']),
np.std(self.model_data['Volume'])
]], columns='Open High Low Close Volume'.split())
# x is normalized as a parameter
normalized_x = (x - mean_to_normalize) / std_to_normalize
"""
# Write updated Data to CSV - To be included in the live system
self.model_data.to_csv('FB.csv', index=False)
"""
# Same but opposite conditions
if model.predict_outlier(normalized_x) == -1 & \
(self.dataclose[0] > np.mean(self.model_data['Close'])):
self.log('SELL CREATE, %.2f' % self.dataclose[0])
if not self.orderPosition == 0:
self.sell(size=1)
self.orderPosition -= 1
# Same but opposite conditions
if model.predict_outlier(normalized_x) == -1 & \
(self.dataclose[0] < np.mean(self.model_data['Close'])) & \
(self.cooldown == 0):
self.log('BUY CREATE, %.2f' % self.dataclose[0])
self.buy(size=1)
self.orderPosition += 1
self.cooldown = 7
if self.cooldown > 0:
self.cooldown -= 1
import backtrader as bt
import pyfolio as pf
def backtesting_engine(symbol, strategy, fromdate, todate, args=None):
"""
Primary function for backtesting, not entirely parameterized
"""
# Backtesting Engine
cerebro = bt.Cerebro()
# Add a Strategy if no Data Required for the model
if args is None:
cerebro.addstrategy(strategy)
# If the Strategy requires a Model and therefore data
elif args is not None:
cerebro.addstrategy(strategy, args)
# Retrieve Data from Alpaca
data = bt.feeds.YahooFinanceData(
dataname=symbol,
fromdate=fromdate, # datetime.date(2015, 1, 1)
todate=todate, # datetime.datetime(2016, 1, 1)
reverse=False
)
# Add Data to Backtesting Engine
cerebro.adddata(data)
# Set Initial Portfolio Value
cerebro.broker.setcash(100000.0)
# Add Analysis Tools
cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='sharpe')
cerebro.addanalyzer(bt.analyzers.Returns, _name='returns')
cerebro.addanalyzer(bt.analyzers.SQN, _name='sqn')
cerebro.addanalyzer(bt.analyzers.DrawDown, _name='drawdown')
cerebro.addanalyzer(bt.analyzers.PositionsValue, _name='posval')
cerebro.addanalyzer(bt.analyzers.PyFolio, _name='pyfolio')
# Starting Portfolio Value
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run the Backtesting Engine
backtest = cerebro.run()
# Print Analysis and Final Portfolio Value
print(
'Final Portfolio Value: %.2f' % cerebro.broker.getvalue()
)
print(
'Return: ', backtest[0].analyzers.returns.get_analysis()
)
print(
'Sharpe Ratio: ', backtest[0].analyzers.sharpe.get_analysis()
)
print(
'System Quality Number: ', backtest[0].analyzers.sqn.get_analysis()
)
print(
'Drawdown: ', backtest[0].analyzers.drawdown.get_analysis()
)
print(
'Active Position Value: ', backtest[0].analyzers.posval.get_analysis()
)
print(
'Pyfolio: ', backtest[0].analyzers.pyfolio.get_analysis()
)
# Print Analysis and Final Portfolio Value
pyfoliozer = backtest[0].analyzers.getbyname('pyfolio')
returns, positions, transactions, gross_lev = pyfoliozer.get_pf_items()
# See if we can add regular FB data to compare against returns of algo
pf.create_full_tear_sheet(
returns, positions=positions, transactions=transactions
)
# TODO: Create pipeline: Optimization -> Testing essentially
class BacktestingPipeline:
"""
Pipeline for in sample optimization and out of sample testing
"""
pass
from datetime import datetime
from strategies.isolation_strategy import IsolationStrategy
from tools.backtesting_tools import backtesting_engine
"""
Script for backtesting strategies
"""
if __name__ == '__main__':
# Run backtesting engine
backtesting_engine(
'TICKER', IsolationStrategy, args='DATA.csv',
fromdate=datetime(2018, 1, 1), todate=datetime(2019, 1, 1)
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module with several helper functions
"""
import os
import collections
import re
def file_extensions_get(fname_list):
"""Returns file extensions in list
Args:
fname_list (list): file names, eg ['a.csv','b.csv']
Returns:
list: file extensions for each file name in input list, eg ['.csv','.csv']
"""
return [os.path.splitext(fname)[-1] for fname in fname_list]
def file_extensions_all_equal(ext_list):
"""Checks that all file extensions are equal.
Args:
ext_list (list): file extensions, eg ['.csv','.csv']
Returns:
bool: all extensions are equal to first extension in list?
"""
return len(set(ext_list))==1
def file_extensions_contains_xls(ext_list):
# Assumes all file extensions are equal! Only checks first file
return ext_list[0] == '.xls'
def file_extensions_contains_xlsx(ext_list):
# Assumes all file extensions are equal! Only checks first file
return ext_list[0] == '.xlsx'
def file_extensions_contains_csv(ext_list):
# Assumes all file extensions are equal! Only checks first file
return (ext_list[0] == '.csv' or ext_list[0] == '.txt')
def file_extensions_valid(ext_list):
"""Checks if file list contains only valid files
Notes:
Assumes all file extensions are equal! Only checks first file
Args:
ext_list (list): file extensions, eg ['.csv','.csv']
Returns:
bool: first element in list is one of ['.csv','.txt','.xls','.xlsx']?
"""
ext_list_valid = ['.csv','.txt','.xls','.xlsx']
return ext_list[0] in ext_list_valid
def columns_all_equal(col_list):
"""Checks that all lists in col_list are equal.
Args:
col_list (list): columns, eg [['a','b'],['a','b','c']]
Returns:
bool: all lists in list are equal?
"""
return all([l==col_list[0] for l in col_list])
def list_common(_list, sort=True):
l = list(set.intersection(*[set(l) for l in _list]))
if sort:
return sorted(l)
else:
return l
def list_unique(_list, sort=True):
l = list(set.union(*[set(l) for l in _list]))
if sort:
return sorted(l)
else:
return l
def list_tofront(_list,val):
return _list.insert(0, _list.pop(_list.index(val)))
def cols_filename_tofront(_list):
return list_tofront(_list,'filename')
def df_filename_tofront(dfg):
cfg_col = dfg.columns.tolist()
return dfg[cols_filename_tofront(cfg_col)]
def check_valid_xls(fname_list):
ext_list = file_extensions_get(fname_list)
if not file_extensions_all_equal(ext_list):
raise IOError('All file types and extensions have to be equal')
if not(file_extensions_contains_xls(ext_list) or file_extensions_contains_xlsx(ext_list)):
raise IOError('Only .xls, .xlsx files can be processed')
return True
def compare_pandas_versions(version1, version2):
def cmp(a, b):
return (a > b) - (a < b)
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
|
nilq/baby-python
|
python
|
from . import fcn8_resnet, fcn8_vgg16
def get_base(base_name, exp_dict, n_classes):
if base_name == "fcn8_resnet":
model = fcn8_resnet.FCN8()
elif base_name == "fcn8_vgg16":
model = fcn8_vgg16.FCN8_VGG16(n_classes=n_classes)
else:
raise ValueError('%s does not exist' % base_name)
return model
|
nilq/baby-python
|
python
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The datastore models for upload tokens and related data."""
from __future__ import absolute_import
import logging
import uuid
from google.appengine.ext import ndb
from dashboard.models import internal_only_model
# 10 minutes should be enough for keeping the data in memory because processing
# histograms takes 3.5 minutes in the 90th percentile.
_MEMCACHE_TIMEOUT = 60 * 10
class State(object):
PENDING = 0
PROCESSING = 1
FAILED = 2
COMPLETED = 3
def StateToString(state):
if state == State.PENDING:
return 'PENDING'
if state == State.PROCESSING:
return 'PROCESSING'
if state == State.FAILED:
return 'FAILED'
if state == State.COMPLETED:
return 'COMPLETED'
class Token(internal_only_model.InternalOnlyModel):
"""Token is used to get state of request.
Token can contain multiple Measurement. One per each histogram in the
request. States of nested Measurements affect state of the Token.
Even though Token and Measurements contain related data we do not combine
them into one entity group. Token can contain 1000+ measurements. So doing
such amount of updates of one entity group is too expencive.
"""
_use_memcache = True
_memcache_timeout = _MEMCACHE_TIMEOUT
internal_only = ndb.BooleanProperty(default=True, indexed=False)
state_ = ndb.IntegerProperty(
name='state', default=State.PENDING, indexed=False)
error_message = ndb.StringProperty(indexed=False, default=None)
creation_time = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
update_time = ndb.DateTimeProperty(auto_now=True, indexed=False)
temporary_staging_file_path = ndb.StringProperty(indexed=False, default=None)
@property
def state(self):
measurements = self.GetMeasurements()
if not measurements:
return self.state_
all_states = [child.state for child in measurements if child is not None]
all_states.append(self.state_)
if all(s == State.PENDING for s in all_states):
return State.PENDING
if any(s in (State.PROCESSING, State.PENDING) for s in all_states):
return State.PROCESSING
if any(s == State.FAILED for s in all_states):
return State.FAILED
return State.COMPLETED
@classmethod
def UpdateObjectState(cls, obj, state, error_message=None):
if obj is None:
return
return obj.UpdateState(state, error_message)
def UpdateState(self, state, error_message=None):
assert error_message is None or state == State.FAILED
self.state_ = state
if error_message is not None:
# In some cases the error_message (e.message field) can actually be not
# a string.
self.error_message = str(error_message)
self.put()
# Note that state here does not reflect the state of upload overall (since
# "state_" doesn't take measurements into account). Token and Measurements
# aren't connected by entity group, so the information about final state
# would be stale.
logging.info('Upload completion token updated. Token id: %s, state: %s',
self.key.id(), StateToString(self.state_))
@ndb.tasklet
def AddMeasurement(self, test_path, is_monitored):
"""Creates measurement, associated to the current token."""
measurement = Measurement(
id=str(uuid.uuid4()),
test_path=test_path,
token=self.key,
monitored=is_monitored)
yield measurement.put_async()
logging.info(
'Upload completion token measurement created. Token id: %s, '
'measurement test path: %r', self.key.id(), measurement.test_path)
raise ndb.Return(measurement)
def GetMeasurements(self):
return Measurement.query(Measurement.token == self.key).fetch()
class Measurement(internal_only_model.InternalOnlyModel):
"""Measurement represents state of added histogram.
Measurement is uniquely defined by the full path to the test (for example
master/bot/test/metric/page) and parent token key.
"""
_use_memcache = True
_memcache_timeout = _MEMCACHE_TIMEOUT
internal_only = ndb.BooleanProperty(default=True)
token = ndb.KeyProperty(kind='Token', indexed=True)
test_path = ndb.StringProperty(indexed=True)
state = ndb.IntegerProperty(default=State.PROCESSING, indexed=False)
error_message = ndb.StringProperty(indexed=False, default=None)
update_time = ndb.DateTimeProperty(auto_now=True, indexed=False)
monitored = ndb.BooleanProperty(default=False, indexed=False)
histogram = ndb.KeyProperty(kind='Histogram', indexed=True, default=None)
@classmethod
def GetByPath(cls, test_path, token_id):
if test_path is None or token_id is None:
return None
# Data here can be a bit stale here.
return Measurement.query(
ndb.AND(Measurement.test_path == test_path,
Measurement.token == ndb.Key('Token', token_id))).get()
@classmethod
@ndb.tasklet
def UpdateStateByPathAsync(cls,
test_path,
token_id,
state,
error_message=None):
assert error_message is None or state == State.FAILED
obj = cls.GetByPath(test_path, token_id)
if obj is None:
if test_path is not None and token_id is not None:
logging.warning(
'Upload completion token measurement could not be found. '
'Token id: %s, measurement test path: %s', token_id, test_path)
return
obj.state = state
if error_message is not None:
# In some cases the error_message (e.message field) can actually be not
# a string.
obj.error_message = str(error_message)
yield obj.put_async()
logging.info(
'Upload completion token measurement updated. Token id: %s, '
'measurement test path: %s, state: %s', token_id, test_path,
StateToString(state))
|
nilq/baby-python
|
python
|
import struct
from itertools import permutations
class bref3:
def __init__(self, filename):
self.stream = open(filename, 'rb')
self.snvPerms = list(permutations(['A','C','G','T']))
def readRecords(self):
# read the magic number
if self.read_int() != 2055763188:
raise ValueError('file is not in bref3 format')
program = self.read_utf()
samples = self.read_string_array()
nHaps = 2*len(samples)
recList = []
nRecs = self.read_int()
print(f'Reading {nRecs} records!')
while(nRecs != 0):
self.readDataBlock(samples, recList, nRecs)
nRecs = self.read_int()
return recList
def read_string_array(self):
length = self.read_int()
entries = [self.read_utf() for _ in range(length)]
return entries
def readByteLengthStringArray(self):
length = self.read_unsigned_byte()
array = []
for j in range(0,length):
array.append(self.read_utf())
return array
def readDataBlock(self,samples, recList, nRecs):
# Chrom for all records in data block
chrom = self.read_utf()
# Number of distinct allele sequences in sequence coded records
nSeqs = self.read_unsigned_short()
# index of sequence carried by each haplotype at sequence-coded records
hap2Seq = []
for j in range(0,2*len(samples)):
hap2Seq.append(self.read_unsigned_short())
print(f'On chrom {chrom}, {nSeqs} distinct alleles here:{hap2Seq}')
for j in range(0,nRecs):
rec = self.readRecord(chrom,samples,nSeqs,hap2Seq)
recList.append(rec)
def readRecord(self, chrom, samples, nSeqs, hap2Seq):
marker = self.readMarker(chrom)
coding = self.read_byte()
if coding == 0:
print(f"{marker['id']}:seq coded")
return self.readSeqCodedRecord(samples,marker,nSeqs,hap2Seq)
elif coding == 1:
print(f"{marker['id']}:allele coded")
return self.readAlleleCodedRecord(samples, marker)
def readMarker(self, chrom):
marker = dict()
marker['pos'] = self.read_int()
marker['id'] = self.readByteLengthStringArray()
alleleCode = self.read_byte()
if alleleCode == -1:
marker['alleles'] = self.read_string_array()
marker['end'] = self.read_int()
else:
marker['nAlleles'] = 1 + (alleleCode & 0b11)
permIndex = (alleleCode >> 2)
marker['alleles'] = self.snvPerms[permIndex][0:marker['nAlleles']]
marker['end'] = -1
return marker
def readSeqCodedRecord(self,samples,marker,nSeqs,hap2Seq):
seq2Allele = []
for _ in range(nSeqs):
seq2Allele.append(self.read_unsigned_byte())
hap2Allele = []
for x in hap2Seq:
hap2Allele.append(seq2Allele[x])
record = dict()
record['marker'] = marker
record['samples'] = samples
record['hap2Allele'] = hap2Allele
return record
def readAlleleCodedRecord(self,samples,marker):
nHaps = 2*len(samples)
nAlleles = len(marker['alleles'])
hapIndices = []
majorAllele = -1
for j in range(0,nAlleles):
hapIndices.append(self.readIntArray())
if hapIndices[j] is None:
majorAllele = j
hap2Allele = []
for j in range(0,nHaps):
hap2Allele.append(majorAllele)
for j in range(0,len(hapIndices)):
if hapIndices[j] != None:
for hap in hapIndices[j]:
hap2Allele[hap] = j
record = dict()
record['marker'] = marker
record['samples'] = samples
record['hapToAllele'] = hap2Allele
return record
def readIntArray(self):
length = self.read_int()
if length == -1:
return None
else:
array = []
for j in range(0,length):
array.append(self.read_int())
return array
def read_boolean(self):
return struct.unpack('?', self.stream.read(1))[0]
def read_byte(self):
return struct.unpack('b', self.stream.read(1))[0]
def read_unsigned_byte(self):
return struct.unpack('B', self.stream.read(1))[0]
def read_char(self):
return chr(struct.unpack('>H', self.stream.read(2))[0])
def read_double(self):
return struct.unpack('>d', self.stream.read(8))[0]
def read_float(self):
return struct.unpack('>f', self.stream.read(4))[0]
def read_short(self):
return struct.unpack('>h', self.stream.read(2))[0]
def read_unsigned_short(self):
return struct.unpack('>H', self.stream.read(2))[0]
def read_long(self):
return struct.unpack('>q', self.stream.read(8))[0]
def read_utf(self):
utf_length = struct.unpack('>H', self.stream.read(2))[0]
return self.stream.read(utf_length).decode('utf-8')
def read_int(self):
return struct.unpack('>i', self.stream.read(4))[0]
|
nilq/baby-python
|
python
|
"""Utilities for make the code run both on Python2 and Python3.
"""
import sys
PY2 = sys.version_info[0] == 2
# urljoin
if PY2:
from urlparse import urljoin
else:
from urllib.parse import urljoin
# Dictionary iteration
if PY2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
# string and text types
if PY2:
text_type = unicode
string_types = (str, unicode)
numeric_types = (int, long)
else:
text_type = str
string_types = (str,)
numeric_types = (int,)
if PY2:
is_iter = lambda x: x and hasattr(x, 'next')
else:
is_iter = lambda x: x and hasattr(x, '__next__')
# imap
if PY2:
from itertools import imap
else:
imap = map
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2021, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime, decimal, json, logging, os, re, sys
import boto3, requests, six
from pytz import utc
__version__ = None
LOGGER = logging.getLogger(__name__)
class JSONEncoder(json.JSONEncoder):
def default(self, obj): #pylint: disable=method-hidden
# parameter is called `o` in json.JSONEncoder.
if hasattr(obj, 'isoformat'):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return float(obj)
return super(JSONEncoder, self).default(obj)
class LastRunCache(object):
"""
Cache for last run on a log file.
"""
def __init__(self, filename):
self.filename = filename
self.last_run_logs = {}
self.load()
def load(self):
if os.path.exists(self.filename):
with open(self.filename) as last_run:
self.last_run_logs = json.load(
last_run, object_hook=datetime_hook)
def save(self):
if not os.path.isdir(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
with open(self.filename, 'w') as last_run:
json.dump(self.last_run_logs, last_run, cls=JSONEncoder, indent=2)
def more_recent(self, logname, last_modified, update=False):
result = (not logname in self.last_run_logs
or self.last_run_logs[logname] < last_modified)
if result and update:
self.last_run_logs[logname] = last_modified
return result
def as_keyname(filename, logsuffix=None, prefix=None, ext='.log'):
"""
The keyname returned is in a format as expected by AWS S3
(i.e. no leading '/') whether `filename` is an absolute path or
a subdirectory of the current path.
"""
filename = filename.lstrip('/')
result = filename
if ext.startswith('.'):
ext = ext[1:]
if logsuffix:
look = re.match(r'^(\S+\.%s)(\S*)$' % ext, filename)
if look:
result = look.group(1) + logsuffix + look.group(2)
if prefix:
result = "%s/%s" % (prefix.strip('/'), result)
return result
def as_filename(key_name, logsuffix=None, prefix=None, ext='.log'):
result = key_name
if ext.startswith('.'):
ext = ext[1:]
if logsuffix:
look = re.match(r'^(\S+\.%s)%s(\S*)$' % (ext, logsuffix), key_name)
if look:
result = look.group(1) + look.group(2)
if prefix is not None:
if result.startswith(prefix):
result = result[len(prefix):]
result = result.lstrip('/')
return result
def as_logname(key_name, logsuffix=None, prefix=None, ext='.log'):
if ext.startswith('.'):
ext = ext[1:]
result = as_filename(key_name, logsuffix=logsuffix, prefix=prefix)
look = re.match(r'(\S+\.%s)((-\S+)\.gz)' % ext, result)
if look:
result = look.group(1)
return result
def datetime_hook(json_dict):
for key, value in list(six.iteritems(json_dict)):
for fmt in ("%Y-%m-%dT%H:%M:%S.%f+00:00", "%Y-%m-%dT%H:%M:%S+00:00"):
try:
json_dict[key] = datetime.datetime.strptime(value, fmt)
if json_dict[key].tzinfo is None:
json_dict[key] = json_dict[key].replace(tzinfo=utc)
break
except ValueError:
pass
if not isinstance(json_dict[key], datetime.datetime):
LOGGER.warning("%s: cannot convert '%s' to a datetime object.",
key, value)
return json_dict
def get_last_modified(item):
return item['LastModified']
def list_local(lognames, prefix=None, list_all=False):
"""
Returns a list of rotated log files with their timestamp.
Example:
[{ "Key": "/var/log/nginx/www.example.com.log-20160106.gz",
"LastModified": "Mon, 06 Jan 2016 00:00:00 UTC"},
{ "Key": "/var/log/nginx/www.example.com.log-20160105.gz",
"LastModified": "Mon, 05 Jan 2016 00:00:00 UTC"},
]
"""
results = []
for logname in lognames:
dirname = os.path.dirname(logname)
_, ext = os.path.splitext(logname)
if prefix:
prefixed_dirname = prefix + dirname
else:
prefixed_dirname = dirname
if os.path.isdir(prefixed_dirname):
for filename in os.listdir(prefixed_dirname):
fullpath = os.path.join(dirname, filename)
prefixed_fullpath = os.path.join(prefixed_dirname, filename)
if (as_logname(fullpath, ext=ext) == logname
and (list_all or not fullpath == logname)):
mtime = datetime.datetime.fromtimestamp(
os.path.getmtime(prefixed_fullpath), tz=utc)
results += [{"Key": fullpath, "LastModified": mtime}]
return results
def list_s3(bucket, lognames, prefix=None, time_from_logsuffix=False):
"""
Returns a list of rotated log files present in a bucket
with their timestamp.
Example:
[{ "Key": "var/log/nginx/www.example.com.log-0ce5c29636da94d4c-20160106.gz",
"LastModified": "Mon, 06 Jan 2016 00:00:00 UTC"},
{ "Key": "var/log/nginx/www.example.com.log-0ce5c29636da94d4c-20160105.gz",
"LastModified": "Mon, 05 Jan 2016 00:00:00 UTC"},
]
"""
results = []
s3_resource = boto3.resource('s3')
for logname in lognames:
logprefix = os.path.splitext(logname)[0].lstrip('/')
if prefix:
logprefix = "%s/%s" % (prefix.strip('/'), logprefix)
for s3_key in s3_resource.Bucket(bucket).objects.filter(
Prefix=logprefix):
logkey = as_logname(s3_key.key, prefix=prefix)
if logname.startswith('/'):
logkey = '/' + logkey
if logkey == logname:
look = re.match(r'\S+-(\d\d\d\d\d\d\d\d)\.gz', s3_key.key)
if time_from_logsuffix and look:
last_modified = datetime.datetime.strptime(
look.group(1), "%Y%m%d")
else:
last_modified = s3_key.last_modified
if last_modified.tzinfo is None:
last_modified = last_modified.replace(tzinfo=utc)
results += [{"Key": s3_key.key, "LastModified": last_modified}]
return results
def list_updates(local_items, s3_items, logsuffix=None, prefix=None):
"""
Returns two lists of updated files. The first list is all the files
in the list *s3_items* which are more recent that files in the list
*local_items*.
The second returned list is all the files in the list *local_items*
which are more recent that files in the list *s3_items*.
Example:
[{ "Key": "abc.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:00 UTC"},
{ "Key": "def.txt",
"LastModified": "Mon, 05 Jan 2015 12:00:001 UTC"},
]
"""
local_results = []
local_index = {}
for local_val in local_items:
local_index[as_keyname(local_val['Key'],
logsuffix=logsuffix, prefix=prefix)] = local_val
for s3_val in s3_items:
s3_key = s3_val['Key']
local_val = local_index.get(s3_key, None)
if local_val:
local_datetime = local_val['LastModified']
s3_datetime = s3_val['LastModified']
if s3_datetime > local_datetime:
local_results += [s3_val]
else:
local_results += [s3_val]
s3_results = []
s3_index = {}
for s3_val in s3_items:
s3_index[as_filename(s3_val['Key'],
logsuffix=logsuffix, prefix=prefix)] = s3_val
for local_val in local_items:
local_key = local_val['Key'].lstrip('/')
s3_val = s3_index.get(local_key, None)
if s3_val:
s3_datetime = s3_val['LastModified']
local_datetime = local_val['LastModified']
if local_datetime > s3_datetime:
s3_results += [local_val]
else:
s3_results += [local_val]
return local_results, s3_results
def download_updated_logs(lognames,
local_prefix=None, logsuffix=None,
bucket=None, s3_prefix=None,
last_run=None, list_all=False,
time_from_logsuffix=False):
"""
Fetches log files which are on S3 and more recent that specified
in last_run and returns a list of filenames.
"""
#pylint:disable=too-many-arguments,too-many-locals
local_update, _ = list_updates(
list_local(lognames, prefix=local_prefix, list_all=list_all),
list_s3(bucket, lognames, prefix=s3_prefix,
time_from_logsuffix=time_from_logsuffix),
logsuffix=logsuffix, prefix=s3_prefix)
downloaded = []
s3_resource = boto3.resource('s3')
for item in sorted(local_update, key=get_last_modified):
keyname = item['Key']
filename = as_filename(keyname, prefix=s3_prefix)
if filename.startswith('/'):
filename = '.' + filename
logname = as_logname(filename)
if not last_run or last_run.more_recent(
logname, item['LastModified'], update=True):
s3_key = s3_resource.Object(bucket, keyname)
if not s3_key.storage_class or s3_key.storage_class == 'STANDARD':
LOGGER.info("download %s to %s\n" % (
keyname, os.path.abspath(filename)))
if not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
s3_key.download_file(filename)
downloaded += [filename]
else:
LOGGER.info("skip %s (on %s storage)\n" % (
keyname, s3_key.storage_class))
# It is possible some files were already downloaded as part of a previous
# run so we construct the list of recent files here.
downloaded = []
for item in sorted(list_local(lognames,
prefix=local_prefix, list_all=False), key=get_last_modified):
keyname = item['Key']
filename = as_filename(keyname, prefix=s3_prefix)
if filename.startswith('/'):
filename = '.' + filename
logname = as_logname(filename)
if not last_run or last_run.more_recent(
logname, item['LastModified'], update=True):
downloaded += [filename]
return downloaded
def upload_log(s3_location, filename, logsuffix=None):
"""
Upload a local log file to an S3 bucket. If logsuffix is ``None``,
the instance-id will be automatically added as a suffix in the log filename.
"""
headers = {'ContentType': 'text/plain'}
if filename.endswith('.gz'):
headers.update({'ContentEncoding': 'gzip'})
parts = s3_location[5:].split('/')
s3_bucket = parts[0]
s3_prefix = '/'.join(parts[1:])
if not logsuffix:
# https://github.com/boto/boto3/issues/313
resp = requests.get('http://instance-data/latest/meta-data/instance-id')
logsuffix = resp.text
if logsuffix.startswith('i-'):
logsuffix = logsuffix[1:]
keyname = as_keyname(
filename, logsuffix=logsuffix, prefix=s3_prefix)
LOGGER.info("Upload %s ... to s3://%s/%s\n"
% (filename, s3_bucket, keyname))
s3_client = boto3.client('s3')
s3_client.upload_file(filename, s3_bucket, keyname, ExtraArgs=headers)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import codecs
import logging
from pathlib import Path
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from regions import CircleSkyRegion
from gammapy.modeling import Fit
from gammapy.data import DataStore
from gammapy.datasets import (
MapDataset,
)
from gammapy.modeling.models import (
PowerLawSpectralModel,
PointSpatialModel,
LogParabolaSpectralModel,
GaussianSpatialModel,
ShellSpatialModel,
SkyModel,
)
from gammapy.maps import MapAxis, WcsGeom, Map
from gammapy.makers import (
MapDatasetMaker,
SafeMaskMaker,
)
from gammapy.estimators import ExcessMapEstimator
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
ENERGY_AXIS = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 20), unit="TeV", name="energy", interp="log"
)
ENERGY_AXIS_TRUE = MapAxis.from_edges(
np.logspace(-1.0, 1.5, 40), unit="TeV", name="energy_true", interp="log"
)
GEOM = WcsGeom.create(
skydir=(0, 0), npix=(350, 350), binsz=0.02, frame="galactic", axes=[ENERGY_AXIS]
)
REGION = CircleSkyRegion(center = SkyCoord(0,0,frame='galactic', unit='deg'), radius= 0.5*u.deg)
def get_observations():
# Select observations
data_store = DataStore.from_dir("../cta-galactic-center/input/index/gps")
obs_id = [110380, 111140, 111159]
return data_store.get_observations(obs_id)
def make_map_dataset(observations):
stacked = MapDataset.create(geom=GEOM, energy_axis_true=ENERGY_AXIS_TRUE)
dataset_maker = MapDatasetMaker(selection=["background", "exposure", "psf", "edisp"])
safe_mask_masker = SafeMaskMaker(methods=["offset-max", "aeff-default"], offset_max=2.5 * u.deg)
for obs in observations:
cutout = stacked.cutout(obs.pointing_radec, width="5 deg")
dataset = dataset_maker.run(cutout, obs)
dataset = safe_mask_masker.run(dataset, obs)
stacked.stack(dataset)
return stacked
def simulate_counts(stacked):
spectral_model_1 = PowerLawSpectralModel(index = 1.95, amplitude="5e-12 cm-2 s-1 TeV-1", reference="1 TeV")
spatial_model_1 = PointSpatialModel(lon_0 = "0 deg", lat_0 = "0 deg", frame='galactic')
model_1 = SkyModel(spectral_model_1, spatial_model_1, name='source 1')
spectral_model_2 = LogParabolaSpectralModel(alpha = 2.1, beta =0.01, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV")
spatial_model_2 = GaussianSpatialModel(lon_0 = "0.4 deg", lat_0 = "0.15 deg", sigma=0.2*u.deg, frame='galactic')
model_2 = SkyModel(spectral_model_2, spatial_model_2, name='source 2')
spectral_model_3 = PowerLawSpectralModel(index = 2.7, amplitude="5e-11 cm-2 s-1 TeV-1", reference="1 TeV")
spatial_model_3 = ShellSpatialModel(lon_0 = "0.06 deg", lat_0 = "0.6 deg", radius=0.6*u.deg,width=0.3*u.deg,frame='galactic')
model_3 = SkyModel(spectral_model_3, spatial_model_3, name='source 3')
stacked.models = [model_1, model_2, model_3]
stacked.fake(0)
return stacked
def make_significance_map(stacked):
stacked.models = []
e = ExcessMapEstimator("0.1deg")
result = e.run(stacked)
return result['sqrt_ts']
def fit_models(stacked):
spectral_model_fit_1 = PowerLawSpectralModel(index = 2, amplitude="0.5e-12 cm-2 s-1 TeV-1", reference="1 TeV")
spectral_model_fit_1.amplitude.min = 0
spatial_model_fit_1 = PointSpatialModel(lon_0 = "0 deg", lat_0 = "0 deg", frame='galactic')
model_fit_1 = SkyModel(spectral_model_fit_1, spatial_model_fit_1, name='source 1 fit')
spectral_model_fit_2 = LogParabolaSpectralModel(alpha = 2, beta =0.01, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV")
spectral_model_fit_2.amplitude.min = 0
spectral_model_fit_2.beta.min = 0
spatial_model_fit_2 = GaussianSpatialModel(lon_0 = "0.4 deg", lat_0 = "0.15 deg", sigma=0.2*u.deg, frame='galactic')
model_fit_2 = SkyModel(spectral_model_fit_2, spatial_model_fit_2, name='source 2 fit')
spectral_model_fit_3 = PowerLawSpectralModel(index = 2, amplitude="3e-11 cm-2 s-1 TeV-1", reference="1 TeV")
spectral_model_fit_3.amplitude.min = 0
spatial_model_fit_3 = ShellSpatialModel(lon_0 = "0.06 deg", lat_0 = "0.6 deg", radius=0.5*u.deg,width=0.2*u.deg,frame='galactic')
model_fit_3 = SkyModel(spectral_model_fit_3, spatial_model_fit_3, name='source 3 fit')
stacked.models = [model_fit_1, model_fit_2, model_fit_3]
fit = Fit()
result = fit.run(stacked)
return stacked.models
def make_residual_map(stacked, models):
stacked.models = models
e = ExcessMapEstimator("0.1deg")
result = e.run(stacked)
return result['sqrt_ts']
def make_contribution_to_region(stacked, models, region):
spec = stacked.to_spectrum_dataset(region, containment_correction=True)
so1 = SkyModel(models[0].spectral_model)
spec.models = [so1]
npred_1 = Map.from_geom(spec.counts.geom)
npred_1.data = spec.npred_signal().data
so2 = SkyModel(models[1].spectral_model)
spec.models = [so2]
npred_2 = Map.from_geom(spec.counts.geom)
npred_2.data = spec.npred_signal().data
npred_2.data *= models[1].spatial_model.integrate_geom(spec.counts.geom).quantity.to_value('')
so3 = SkyModel(models[2].spectral_model)
spec.models = [so3]
npred_3 = Map.from_geom(spec.counts.geom)
npred_3.data = spec.npred_signal().data
npred_3.data *= models[2].spatial_model.integrate_geom(spec.counts.geom).quantity.to_value('')
return spec.excess, npred_1, npred_2, npred_3
if __name__ == "__main__":
path = Path(".")
observations = get_observations()
stacked = make_map_dataset(observations)
stacked = simulate_counts(stacked)
filename = path / "significance_map.fits"
ts_map = make_significance_map(stacked)
log.info(f"Writing {filename}")
ts_map.write(filename, overwrite=True)
filename = path / "best-fit-model.yaml"
models = fit_models(stacked)
log.info(f"Writing {filename}")
models.write(filename, overwrite=True, write_covariance=False)
filename = path / "residual_map.fits"
residual_map = make_residual_map(stacked, models)
log.info(f"Writing {filename}")
residual_map.write(filename, overwrite=True)
excess, npred_1, npred_2, npred_3 = make_contribution_to_region(stacked, models, REGION)
filename_excess = path / "excess_counts.fits"
log.info(f"Writing {filename_excess}")
excess.write(filename_excess, format="ogip", overwrite=True)
filename_source1 = path / "npred_1.fits"
log.info(f"Writing {filename_source1}")
npred_1.write(filename_source1, format="ogip", overwrite=True)
filename_source2 = path / "npred_2.fits"
log.info(f"Writing {filename_source2}")
npred_2.write(filename_source2, format="ogip", overwrite=True)
filename_source3 = path / "npred_3.fits"
log.info(f"Writing {filename_source3}")
npred_3.write(filename_source3, format="ogip", overwrite=True)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.utils as vutils
from torch.autograd import Variable
from model import _netlocalD, _netG
from pre_data import pre_data
from tqdm import tqdm
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
flag_use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if flag_use_cuda else "cpu")
class Trainer:
def __init__(self, opt) -> None:
self.opt = opt
self.init_model()
self.init_cfg()
def init_cfg(self):
if self.opt.manualSeed is None:
self.opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", self.opt.manualSeed)
random.seed(self.opt.manualSeed)
torch.manual_seed(self.opt.manualSeed)
if flag_use_cuda:
torch.cuda.manual_seed_all(self.opt.manualSeed)
cudnn.benchmark = True
# setup loss & optimizer
self.criterion = nn.BCELoss()
self.criterionMSE = nn.MSELoss()
self.optimizerD = optim.Adam(self.netD.parameters(), lr=self.opt.lr)
self.optimizerG = optim.Adam(self.netG.parameters(), lr=self.opt.lr)
#setpu paras
self.wtl2 = float(self.opt.wtl2)
self.overlapL2Weight = 10
self.input_real = torch.FloatTensor(self.opt.batchSize, 3, self.opt.imageSize, self.opt.imageSize)
self.input_cropped = torch.FloatTensor(self.opt.batchSize, 3, self.opt.imageSize, self.opt.imageSize)
self.label = torch.FloatTensor(self.opt.batchSize)
self.real_label = 1
self.fake_label = 0
self.real_center = torch.FloatTensor(self.opt.batchSize, 3, int(self.opt.imageSize/2), int(self.opt.imageSize/2))
if flag_use_cuda:
self.input_real, self.input_cropped,self.label = self.input_real.to(device),self.input_cropped.to(device), self.label.to(device)
self.real_center = self.real_center.to(device)
self.criterion.to(device)
self.criterionMSE.to(device)
print("Using %s" % device)
self.input_real = Variable(self.input_real)
self.input_cropped = Variable(self.input_cropped)
self.label = Variable(self.label)
self.real_center = Variable(self.real_center)
def init_model(self):
self.netG = _netG(self.opt)
# self.netG = _netG_block(self.opt)
if self.opt.netG != '':
self.netG.load_state_dict(torch.load(self.opt.netG, map_location=lambda storage, location: storage)['state_dict'])
self.netD = _netlocalD(self.opt)
if self.opt.netD != '':
self.netD.load_state_dict(torch.load(self.opt.netD,map_location=lambda storage, location: storage)['state_dict'])
if flag_use_cuda:
self.netD.to(device)
self.netG.to(device)
def train(self, dataloader_train, dataloader_valid = None, iter_max = None):
iter_max = iter_max if iter_max is not None else self.opt.niter
self.schedulerD = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizerD, iter_max)
self.schedulerG = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizerG, iter_max)
for epoch in range(iter_max):
pbar = tqdm(total = len(dataloader_train))
for i, data in enumerate(dataloader_train):
real_cpu, _ = data
self.real_center_cpu = real_cpu[:,:,int(self.opt.imageSize/4):int(self.opt.imageSize/4)+int(self.opt.imageSize/2),int(self.opt.imageSize/4):int(self.opt.imageSize/4)+int(self.opt.imageSize/2)]
batch_size = real_cpu.size(0)
self.input_real.resize_(real_cpu.size()).copy_(real_cpu)
self.input_cropped.resize_(real_cpu.size()).copy_(real_cpu)
self.real_center.resize_(self.real_center_cpu.size()).copy_(self.real_center_cpu)
self.input_cropped[:,0,int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred),int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred)] = 2*117.0/255.0 - 1.0
self.input_cropped[:,1,int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred),int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred)] = 2*104.0/255.0 - 1.0
self.input_cropped[:,2,int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred),int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred)] = 2*123.0/255.0 - 1.0
# train with real
self.netD.zero_grad()
self.label.resize_(batch_size).fill_(self.real_label)
self.label = torch.reshape(self.label, (self.label.shape[0], 1))
output = self.netD(self.real_center)
errD_real = self.criterion(output, self.label)
errD_real.backward()
D_x = output.mean()
# train with fake
fake = self.netG(self.input_cropped)
self.label.fill_(self.fake_label)
output = self.netD(fake.detach())
errD_fake = self.criterion(output, self.label)
errD_fake.backward()
D_G_z1 = output.mean()
errD = errD_real + errD_fake
self.optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
self.netG.zero_grad()
self.label.fill_(self.real_label) # fake labels are real for generator cost
output = self.netD(fake)
errG_D = self.criterion(output, self.label)
# errG_D.backward(retain_variables=True)
errG_norm = self.criterionMSE(fake,self.real_center)
self.wtl2Matrix = self.real_center.clone()
self.wtl2Matrix.fill_(self.wtl2*self.overlapL2Weight)
self.wtl2Matrix[:,:,int(self.opt.overlapPred):int(self.opt.imageSize/2 - self.opt.overlapPred),int(self.opt.overlapPred):int(self.opt.imageSize/2 - self.opt.overlapPred)] = self.wtl2
errG_l2 = (fake-self.real_center).pow(2)
errG_l2 = errG_l2 * self.wtl2Matrix
errG_l2 = errG_l2.mean()
errG = (1-self.wtl2) * errG_D + self.wtl2 * errG_l2
errG.backward()
D_G_z2 = output.mean()
self.optimizerG.step()
# print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f / %.4f l_D(x): %.4f l_D(G(z)): %.4f'
# % (epoch, iter_max, i, len(dataloader_train),
# errD.item(), errG_D.item(),errG_l2.item(), D_x,D_G_z1, ))
pbar.update(1)
pbar.set_description("[%d/%d][%d/%d], errG = %.4f"%(epoch, iter_max, i, len(dataloader_train), errG_norm.item()))
pbar.close()
self.schedulerD.step()
self.schedulerG.step()
if dataloader_valid is not None:
self.test(dataloader_valid, epoch)
# do checkpointing
torch.save({'epoch': 0,
'state_dict':self.netG.state_dict()},
'result/model/self.netG_streetview.pth' )
torch.save({'epoch': 0,
'state_dict':self.netD.state_dict()},
'result/model/netlocalD.pth' )
def test(self, dataloader_test, epoch, flag_save = True):
with torch.no_grad():
errG_list = []
for i, data in enumerate(dataloader_test):
real_cpu, _ = data
self.real_center_cpu = real_cpu[:,:,int(self.opt.imageSize/4):int(self.opt.imageSize/4)+int(self.opt.imageSize/2),int(self.opt.imageSize/4):int(self.opt.imageSize/4)+int(self.opt.imageSize/2)]
batch_size = real_cpu.size(0)
self.input_real.resize_(real_cpu.size()).copy_(real_cpu)
self.input_cropped.resize_(real_cpu.size()).copy_(real_cpu)
self.real_center.resize_(self.real_center_cpu.size()).copy_(self.real_center_cpu)
self.input_cropped[:,0,int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred),int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred)] = 2*117.0/255.0 - 1.0
self.input_cropped[:,1,int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred),int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred)] = 2*104.0/255.0 - 1.0
self.input_cropped[:,2,int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred),int(self.opt.imageSize/4+self.opt.overlapPred):int(self.opt.imageSize/4+self.opt.imageSize/2-self.opt.overlapPred)] = 2*123.0/255.0 - 1.0
fake = self.netG(self.input_cropped)
errG = self.criterionMSE(fake, self.real_center)
recon_image = self.input_cropped.clone()
recon_image[:,:,int(self.opt.imageSize/4):int(self.opt.imageSize/4+self.opt.imageSize/2),int(self.opt.imageSize/4):int(self.opt.imageSize/4+self.opt.imageSize/2)] = fake
errG_list.append(errG.item())
if flag_save and i < 5:
vutils.save_image(real_cpu, 'result/real/real_samples_batch_%03d_%03d.png' % (epoch, i), normalize=True)
vutils.save_image(self.input_cropped, 'result/cropped/cropped_samples_batch_%03d_%03d.png' % (epoch, i), normalize=True)
vutils.save_image(recon_image, 'result/recon/recon_center_samples_batch_%03d_%03d.png' % (epoch, i), normalize=True)
print("errG = ", np.mean(errG_list))
# # custom weights initialization called on self.netG and self.netD
# def weights_init(self, m):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1:
# m.weight.data.normal_(0.0, 0.02)
# elif classname.find('BatchNorm') != -1:
# m.weight.data.normal_(1.0, 0.02)
# m.bias.data.fill_(0)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0621
"""
Train a text generating LSTM on Slovenian poems and prose
- first train a few epochs on Slovenian poetry and prose (to learn basics of the language) (from <http://lit.ijs.si/>)
- afterwards train at least additional epochs on target texts (to fine-tune) (from I.D.I.O.T <http://id.iot.si/>)
Based on <https://github.com/fchollet/keras/commits/master/examples/lstm_text_generation.py> and <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>.
"""
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Embedding, LSTM
from keras.utils.visualize_util import plot
import numpy as np
import random
import os
import codecs
import re
import sys
# defaults
epochs_all = 2
epochs_target = 100
maxlen = 40
step = 3
model_yaml = "./out/model.yaml"
model_png = "./out/model.png"
weights_all_ffmt = "./out/weights_all.{}.hdf5"
weights_target_ffmt = "./out/weights_target.{}.hdf5"
# read datasets
def read_text(dir):
text = ""
for filename in os.listdir(dir):
if filename.endswith(".txt"):
f = codecs.open(os.path.join(dir, filename), 'r', encoding='utf8')
t = f.read()
t = re.sub('\r', '', t)
t = re.sub('\t| +', ' ', t)
t = re.sub(u'…', '...', t)
t = re.sub(u'—', '-', t)
t = re.sub(u'»', '>', t)
t = re.sub(u'«', '<', t)
t = re.sub(u'’', "'", t)
t = re.sub(u'[^A-ZČĆŠŽÄËÏÖÜa-zčćšžäëïöüß0-9 .,!?:;+-~*/$%&()<>\'\n]', '', t)
t = re.sub('\([^ ]\) +', '\1 ', t)
text += t
f.close()
print(" corpus '{}' (length {})".format(dir, len(text)))
return text
print("read datasets...")
text = ""
text += read_text("./slovenian-poetry")
text += read_text("./slovenian-prose")
text_target = read_text("./idiot")
text += text_target
chars = set(text)
print(" total length: {}, chars: {}".format(len(text), len(chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
def vectorization(text, chars, maxlen, step):
# cut all text in semi-redundant sequences of maxlen characters
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print(" cut sentences: {}".format(len(sentences)))
# one-hot encoding for X and y
#X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
#y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
#for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# X[i, t, char_indices[char]] = 1
# y[i, char_indices[next_chars[i]]] = 1
# character embeddings for X, one-hot encoding for y
X = np.zeros((len(sentences), maxlen), dtype=np.int32)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t] = char_indices[char]
y[i, char_indices[next_chars[i]]] = 1
print(" shapes: {} {}".format(X.shape, y.shape))
return X, y
print("vectorization...")
X, y = vectorization(text, chars, maxlen=maxlen, step=step)
X_target, y_target = vectorization(text_target, chars, maxlen=maxlen, step=step)
# build model
# (2 stacked LSTM)
print("build model...")
model = Sequential()
model.add(Embedding(input_dim=len(chars), output_dim=512, input_length=maxlen, mask_zero=True)
)
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=True, input_shape=(maxlen, len(chars))))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
with open(model_yaml, 'w') as f:
model.to_yaml(stream=f)
model.summary()
plot(model, to_file=model_png, show_shapes=True)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# train model on all datasets
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
print("train model on all datasets...")
for iteration in range(0, epochs_all):
print()
print('-' * 50)
print('Iteration', iteration)
if os.path.isfile(weights_all_ffmt.format(iteration)):
model.load_weights(weights_all_ffmt.format(iteration))
continue
model.fit(X, y, batch_size=128, nb_epoch=1)
model.save_weights(weights_all_ffmt.format(iteration), overwrite=True)
# output some sample generated text
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print(u'----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
#x = np.zeros((1, maxlen, len(chars)))
x = np.zeros((1, maxlen))
for t, char in enumerate(sentence):
#x[0, t, char_indices[char]] = 1.
x[0, t] = char_indices[char]
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print("train model on target datasets...")
for iteration in range(epochs_all, epochs_target):
print()
print('-' * 50)
print('Iteration', iteration)
if os.path.isfile(weights_target_ffmt.format(iteration)):
model.load_weights(weights_target_ffmt.format(iteration))
continue
model.fit(X_target, y_target, batch_size=128, nb_epoch=1)
model.save_weights(weights_target_ffmt.format(iteration), overwrite=True)
# output some sample generated text
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print(u'----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
#x = np.zeros((1, maxlen, len(chars)))
x = np.zeros((1, maxlen))
for t, char in enumerate(sentence):
#x[0, t, char_indices[char]] = 1.
x[0, t] = char_indices[char]
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
import os
import torch
import numpy as np
from sklearn.preprocessing import StandardScaler
class QSODataset(torch.utils.data.Dataset):
"""QSO spectra iterator."""
def __init__(self, filepath, partition, wavelength_threshold=1290.,
subsample=1, log_transform=False, standardize=True,
drop_outliers=False, scaler=None):
self.log_transform = log_transform
self.standardize = standardize
self.scaler = scaler
print(f"Creating {partition} dataset from file: {filepath}")
data = np.load(filepath)[partition].astype(np.float32)
wave = np.load(filepath)['wave'].astype(np.float32)
data = data[:, (wave >= 1191.5) & (wave < 2900.)]
wave = wave[(wave >= 1191.5) & (wave < 2900.)]
data, wave = data[:, ::subsample], wave[::subsample]
# Drop spectra with negative flux values
n = len(data)
mask = ~np.any(data < 0, axis=1)
data = data[mask]
print(f"Dropped {n - len(data)} spectra with negative continua values.")
if log_transform:
data = np.log(data)
if standardize:
if not self.scaler:
self.scaler = StandardScaler()
self.scaler.fit(data)
data = self.scaler.transform(data)
# Drop spectra with flux >5 sig from dataset mean by wavelength
if drop_outliers:
n = len(data)
mask = ~np.any(np.abs(data) > 5., axis=1)
data = data[mask]
print(f"Dropped {n - len(data)} spectra as outliers.")
print("Data shape:", data.shape)
self.data = torch.from_numpy(data)
self.idx = int(np.sum(wave < wavelength_threshold))
self.wave = wave
self.lya_wave = wave[:self.idx]
self.mean_ = self.scaler.mean_[:self.idx]
self.scale_ = self.scaler.scale_[:self.idx]
self.data_dim = self.idx
self.context_dim = len(wave) - self.idx
def inverse_transform(self, x):
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
if self.standardize:
if x.shape[1] == self.data_dim + self.context_dim:
x = self.scaler.inverse_transform(x)
elif x.shape[1] == self.data_dim:
x = x * self.scale_ + self.mean_
if self.log_transform:
x = np.exp(x)
return x
def __getitem__(self, i):
example = self.data[i]
data = example[:self.idx]
context = example[self.idx:]
return data, context
def __len__(self):
return len(self.data)
|
nilq/baby-python
|
python
|
from xnemogcm import open_domain_cfg, open_nemo
from xnemogcm.nemo import nemo_preprocess
import os
from pathlib import Path
import xarray as xr
TEST_PATH = Path(os.path.dirname(os.path.abspath(__file__)))
def test_options_for_files():
"""Test options to provide files"""
domcfg = open_domain_cfg(
datadir=TEST_PATH / "data/domcfg_1_file",
)
datadir = TEST_PATH / "data/nemo"
# 1. Provide datadir and no files
open_nemo(datadir=datadir, files=None, domcfg=domcfg)
open_nemo(datadir=datadir, files="", domcfg=domcfg)
open_nemo(datadir=datadir, files=[], domcfg=domcfg)
# 2. Provide datadir and files
files = ["BASIN_grid_T.nc", "BASIN_grid_U.nc"]
open_nemo(datadir=datadir, files=files, domcfg=domcfg)
# 3. Don't provide datadir but files
open_nemo(datadir=None, files=datadir.glob("*grid*.nc"), domcfg=domcfg)
open_nemo(datadir="", files=datadir.glob("*grid*.nc"), domcfg=domcfg)
open_nemo(datadir=[], files=datadir.glob("*grid*.nc"), domcfg=domcfg)
# 4. Don't provide anything => error
try:
open_nemo(datadir=None, files=None, domcfg=domcfg)
except FileNotFoundError:
pass
def test_no_file_provided_or_wrong_name():
"""Test exception raised if no file is found"""
domcfg = open_domain_cfg(
datadir=TEST_PATH / "data/domcfg_1_file",
)
try:
open_nemo(datadir=TEST_PATH, domcfg=domcfg)
except FileNotFoundError:
pass
try:
open_nemo(
files=(TEST_PATH / "data/domcfg_1_file").glob("domain*"), domcfg=domcfg
)
except ValueError:
pass
def test_open_nemo():
"""Test opening of nemo files"""
domcfg = open_domain_cfg(
datadir=TEST_PATH / "data/domcfg_1_file",
)
nemo_ds = open_nemo(
datadir=TEST_PATH / "data/nemo",
domcfg=domcfg,
)
def test_use_preprocess():
"""Test opening of one nemo file and preprocess it by hand"""
domcfg = open_domain_cfg(
datadir=TEST_PATH / "data/domcfg_1_file",
)
ds_raw = xr.open_dataset(TEST_PATH / "data/nemo/BASIN_grid_T.nc")
ds_raw.encoding["source"] = "BASIN_grid_T.nc"
ds = nemo_preprocess(ds_raw, domcfg)
assert "x_c" in ds
assert "t" in ds
assert ds.thetao.attrs["arakawa_point_type"] == "T"
|
nilq/baby-python
|
python
|
"""Unit test package for ipgeo."""
|
nilq/baby-python
|
python
|
"""Top-level package for SimpleBBox."""
__author__ = """Sergey Matyunin"""
__email__ = 'serge-m@users.noreply.github.com'
__version__ = '0.0.10'
|
nilq/baby-python
|
python
|
import os
import json
import datetime
from performance.driver.core.classes import Reporter
from performance.driver.core.eventfilters import EventFilter
from performance.driver.core.events import StartEvent, ParameterUpdateEvent
class RawReporter(Reporter):
"""
The **Raw Reporter** is creating a raw dump of the results in the results
folder in JSON format.
::
reporters:
- class: reporter.RawReporter
# Where to dump the results
filename: "results-raw.json"
# [Optional] Include event traces
events:
# [Optional] Include events that pass through the given expression
include: FilterExpression
# [Optional] Exclude events that pass through the given expression
exclude: FilterExpression
# [Optional] Group the events to their traces
traces: yes
The JSON structure of the data included is the following:
.. code-block:: js
{
// Timing information
"time": {
"started": "",
"completed": ""
},
// The configuration used to run this test
"config": {
...
},
// The values for the indicators
"indicators": {
"indicator": 1.23,
...
},
// The metadata of the run
"meta": {
"test": "1-app-n-instances",
...
},
// Raw dump of the timeseries for every phase
"raw": [
{
// One or more status flags collected in this phase
"flags": {
"status": "OK"
},
// The values of all parameter (axes) in this phase
"parameters": {
"apps": 1,
"instances": 1
},
// The time-series values for every phase
"values": {
"metricName": [
// Each metric is composed of the timestamp of it's
// sampling time and the value
[
1499696193.822527,
11
],
...
]
}
}
],
// Summarised dump of the raw timeseries above, in the same
// structure
"sum": [
{
// One or more status flags collected in this phase
"flags": {
"status": "OK"
},
// The values of all parameter (axes) in this phase
"parameters": {
"apps": 1,
"instances": 1
},
// The summarised values of each timeseries
"values": {
"metricName": {
// Here are the summarisers you selected in the `metric`
// configuration parameter.
"sum": 123.4,
"mean": 123.4,
...
}
}
}
]
}
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timeStarted = datetime.datetime.now().isoformat()
# Do some delayed-initialization when the system is ready
self.eventbus.subscribe(self.handleStartEvent, order=10,
events=(StartEvent, ))
# Event-tracing configuration
self.includeFilter = None
self.excludeFilter = None
self.eventTraces = {}
def handleStartEvent(self, event):
"""
Start tracing, if requested
"""
# Check the config and subscribe
config = self.getRenderedConfig()
if 'events' in config:
# Get events config
eventsConfig = config.get('events')
if not type(eventsConfig) is dict:
eventsConfig = {}
# Config include/exclude filter
includeExpr = eventsConfig.get('include', '*')
self.logger.info("Events collected: {}".format(includeExpr))
self.includeFilter = EventFilter(includeExpr).start(None, self.handleInclude)
if 'exclude' in eventsConfig:
# TODO: When we have negation on the EventFilter fix this
raise ValueError('Exclude filter is currently not supported')
# Start subscription to all events
self.eventbus.subscribe(self.handleEvent, order=10)
def handleInclude(self, event):
"""
Handle events passing through the include filter
"""
# TODO: When we have negation on the EventFilter handle negative matches
# Locate the tracing bin where to place this event
for i in event.traceids:
if i in self.eventTraces:
if not event in self.eventTraces[i]:
self.eventTraces[i].add(event)
return
def handleEvent(self, event):
"""
Handle incoming event
"""
# A ParameterUpdate event starts a new trace
if type(event) is ParameterUpdateEvent:
trace = min(filter(lambda x: type(x) is int, event.traceids))
self.eventTraces[trace] = set([event])
# Every other event passes through the include filter
self.includeFilter.handle(event)
def dump(self, summarizer):
"""
Dump summarizer values to the csv file
"""
# Get the fiename to write into
config = self.getRenderedConfig()
filename = config.get('filename', 'results-raw.json')
# Create missing directory for the files
os.makedirs(os.path.abspath(os.path.dirname(filename)), exist_ok=True)
# Prepare results object
results = {
'time': {
'started': self.timeStarted,
'completed': datetime.datetime.now().isoformat()
},
'config': self.getRootConfig().config,
'raw': summarizer.raw(),
'sum': summarizer.sum(),
'indicators': summarizer.indicators(),
'meta': self.getMeta()
}
# Collect results
if self.eventTraces:
traces = []
for traceEvents in self.eventTraces.values():
root = next(filter(
lambda x: type(x) is ParameterUpdateEvent, traceEvents))
events = []
# Serialize events
for event in traceEvents:
events.append(event.toDict())
# Compose trace record
traces.append({
'parameters': root.parameters,
'events': events
})
# Put traces on the result
results['events'] = traces
# Dump the results
self.logger.info("Saving raw results on {}".format(filename))
with open(filename, 'w') as f:
f.write(json.dumps(results, sort_keys=True, indent=2))
|
nilq/baby-python
|
python
|
# SecretPlots
# Copyright (c) 2019. SecretBiology
#
# Author: Rohit Suratekar
# Organisation: SecretBiology
# Website: https://github.com/secretBiology/SecretPlots
# Licence: MIT License
# Creation: 05/10/19, 7:52 PM
#
# Bar Locations
import numpy as np
from SecretPlots.constants import *
from SecretPlots.managers._axis import AxisManager
from SecretPlots.managers.location._base import LocationManager
from SecretPlots.objects import Data
from SecretPlots.utils import Log
class BarLocations(LocationManager):
@property
def plot_type(self):
return PLOT_BAR
def validate(self, data: Data):
self._log.info("Valid data is provided for the BarPlot")
return True
def _simple_bars(self, data: Data):
self._log.info("Calculating positions for simple Bars")
points = []
for loc in data.positions:
points.append((
self.major + loc[1] * (self.width + self.major_gap),
self.minor
))
return points
def _stacked_bars(self, data: Data):
self._log.info("Calculating positions for Stacked Bars")
points = []
stack = None
last_col = 0
for loc, value in zip(data.positions, data.value):
if np.isnan(value):
value = 0
self._log.warn("NaN value found, ignoring its effect")
m1, m2 = loc
if stack is None:
stack = self.minor
if m1 != last_col:
stack = self.minor
last_col += 1
points.append((
self.major + m1 * (self.width + self.major_gap),
stack
))
stack += value + self.minor_gap
return points
def get(self, data: Data) -> list:
self.validate(data)
if data.type in [Data.SINGLE_VALUED, Data.SIMPLE_CATEGORICAL]:
return self._simple_bars(data)
elif data.type in [Data.COMPLEX_CATEGORICAL, Data.MATRIX]:
return self._stacked_bars(data)
elif data.type == Data.POINTS:
if data.is_single_point:
return self._simple_bars(data)
else:
return self._stacked_bars(data)
else:
self._log.error("This data type is nor supported for BarPlot")
class BarGroupLocations(LocationManager):
def __init__(self, am: AxisManager, om, log: Log):
super().__init__(am, om, log)
self.group_gap = am.group_gap
@property
def plot_type(self):
return PLOT_GROUPED_BAR
def validate(self, data: Data):
self._log.info("Data validated fo GroupedBarPlot")
return True
def _simple_bars(self, data: Data):
self._log.info("Calculating positions for simple Bars")
points = []
for loc in data.positions:
points.append((
self.major + loc[1] * (self.width + self.major_gap),
self.minor
))
return points
def _grouped_bars(self, data: Data):
self._log.info("Calculating positions for Grouped Bars")
points = []
bars = -1
for loc, value in zip(data.positions, data.value):
m1, m2 = loc
bars += 1
points.append((
self.major + bars * (self.width + self.major_gap) +
m1 * self.group_gap,
self.minor
))
return points
def get(self, data: Data) -> list:
self.validate(data)
if data.type in [Data.SINGLE_VALUED, Data.SIMPLE_CATEGORICAL]:
return self._simple_bars(data)
elif data.type in [Data.COMPLEX_CATEGORICAL, Data.MATRIX]:
return self._grouped_bars(data)
elif data.type == Data.POINTS:
if data.is_single_point:
return self._simple_bars(data)
else:
return self._grouped_bars(data)
else:
self._log.error("This data type is nor supported for "
"GroupedBarPlot")
class HistLocations(LocationManager):
@property
def plot_type(self):
return PLOT_HIST
def __init__(self, am: AxisManager, om, log: Log, bins=None):
super().__init__(am, om, log)
if bins is None:
bins = "autp"
self.bins = bins
self._hist_options = {}
def validate(self, data: Data):
if data.type != Data.SIMPLE_CATEGORICAL:
self._log.warn("Data will be flatten for histogram")
self._log.info("Valid data is provided for Histogram")
def get(self, data: Data):
self.validate(data)
bins, _ = np.histogram(data.value, self.bins, **self._hist_options)
return [(self.major + x * (self.width + self.major_gap), self.minor)
for x in range(len(bins))]
def hist_options(self, **kwargs):
self._hist_options = {**self._hist_options, **kwargs}
|
nilq/baby-python
|
python
|
#Naive vowel removal.
removeVowels = "EQuis sapiente illo autem mollitia alias corrupti reiciendis aut. Molestiae commodi minima omnis illo officia inventore. Quisquam sint corporis eligendi corporis voluptatum eos. Natus provident doloremque reiciendis vel atque quo. Quidem"
charToRemove = ['a', 'e', 'i', 'o', 'u']
print(removeVowels)
for char in charToRemove:
removeVowels = removeVowels.replace(char, "")
removeVowels = removeVowels.replace(char.upper(), "")
print(removeVowels)
|
nilq/baby-python
|
python
|
#Write a program that asks the user for a number n and prints the sum of the numbers 1 to n
start=1
print("Please input your number")
end=input()
sum=0
while end.isdigit()==False:
print("Your input is not a valid number, please try again")
end=input()
for i in range(start,int(end)+1):
sum=sum+i
print("Sum from 1 to {} is {}".format(end,sum))
|
nilq/baby-python
|
python
|
import os.path
import pathlib
import subprocess
import sys
import urllib
from typing import Dict, List, Optional, Tuple
# Path component is a node in a tree.
# It's the equivalent of a short file/directory name in a file system.
# In our abstraction, it's represented as arbitrary bag of attributes
TestPathComponent = Dict[str, str]
# TestPath is a full path to a node in a tree from the root
# It's the equivalent of an absolute file name in a file system
TestPath = List[TestPathComponent]
def parse_test_path(tp_str: str) -> TestPath:
"""Parse a string representation of TestPath."""
if tp_str == '':
return []
ret = [] # type: TestPath
for component_str in tp_str.split('#'):
if component_str == '&':
# Technically, this should be mapped to {None:None}. But because the
# TestPath definition is now Dict[str, str], not Dict[Optional[str],
# Optinal[str]], we cannot add it. Fixing this definition needs to
# fix callers not to assume they are always str. In practice, this
# is a rare case. Do not appent {None: None} now...
# ret.append({None: None})
continue
first = True
component = {}
for kv in component_str.split('&'):
if first:
first = False
if kv:
(component['type'], component['name']) = _parse_kv(kv)
else:
(k, v) = _parse_kv(kv)
component[k] = v
ret.append(component)
return ret
def _parse_kv(kv: str) -> Tuple[str, str]:
kvs = kv.split('=')
if len(kvs) != 2:
raise ValueError('Malformed TestPath component: ' + kv)
return (_decode_str(kvs[0]), _decode_str(kvs[1]))
def unparse_test_path(tp: TestPath) -> str:
"""Create a string representation of TestPath."""
ret = []
for component in tp:
s = ''
pairs = []
if component.get('type', None) and component.get('name', None):
s += _encode_str(component['type']) + \
'=' + _encode_str(component['name'])
for k, v in component.items():
if k not in ('type', 'name'):
pairs.append((k, v))
else:
for k, v in component.items():
if not k or not v:
continue
pairs.append((k, v))
if len(pairs) == 0:
s = '&'
pairs = sorted(pairs, key=lambda p: p[0])
for (k, v) in pairs:
s += '&'
s += _encode_str(k) + '=' + _encode_str(v)
ret.append(s)
return '#'.join(ret)
def _decode_str(s: str) -> str:
return urllib.parse.unquote(s)
def _encode_str(s: str) -> str:
return s.replace('%', '%25').replace('=', '%3D').replace('#', '%23').replace('&', '%26')
def _relative_to(p: pathlib.Path, base: str) -> pathlib.Path:
if sys.version_info[0:2] >= (3, 6):
return p.resolve(strict=False).relative_to(base)
else:
try:
resolved = p.resolve()
except:
resolved = p
return resolved.relative_to(base)
class FilePathNormalizer:
"""Normalize file paths based on the Git repository root
Some test runners output absolute file paths. This is not preferrable when
making statistical data on tests as the absolute paths can vary per machine
or per run. FilePathNormalizer guesses the relative paths based on the Git
repository root.
"""
def __init__(self,
base_path: Optional[str] = None,
no_base_path_inference: bool = False):
self._base_path = base_path
self._no_base_path_inference = no_base_path_inference
self._inferred_base_path = None # type: Optional[str]
def relativize(self, p: str) -> str:
return str(self._relativize(pathlib.Path(os.path.normpath(p))))
def _relativize(self, p: pathlib.Path) -> pathlib.Path:
if not p.is_absolute():
return p
if self._base_path:
return _relative_to(p, self._base_path)
if self._no_base_path_inference:
return p
if not self._inferred_base_path:
self._inferred_base_path = self._auto_infer_base_path(p)
if self._inferred_base_path:
return _relative_to(p, self._inferred_base_path)
return p
def _auto_infer_base_path(self, p: pathlib.Path) -> Optional[str]:
p = p.parent
while p != p.root and not p.exists():
p = p.parent
try:
toplevel = subprocess.check_output(
['git', 'rev-parse', '--show-superproject-working-tree'],
cwd=str(p),
stderr=subprocess.DEVNULL,
universal_newlines=True).strip()
if toplevel:
return toplevel
return subprocess.check_output(
['git', 'rev-parse', '--show-toplevel'],
cwd=str(p),
stderr=subprocess.DEVNULL,
universal_newlines=True).strip()
except subprocess.CalledProcessError as e:
# Cannot infer the Git repo. Continue with the abs path...
return None
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# 4DML Transformation Utility
#
# (C) 2002-2006 Silas S. Brown (University of Cambridge Computer Laboratory,
# Cambridge, UK, http://ssb22.user.srcf.net )
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml_in import makePrintable
from cli import parseCommandLine
from error import TransformError
import version, client, stats
import sys,os
try:
sys.setcheckinterval(100) # default 10
except: pass # hack for jython
def main():
try:
(runGUI,model,input)=parseCommandLine()
stats.numPoints = len(input.thePoints)
# print makePrintable(input.convertToXML())
if runGUI:
from gui import doGUI
doGUI(input, model, runGUI==2)
# **** don't like the "magic" number 2
else:
sys.stderr.write("Transforming...")
result = client.doTransform((input,model))
result = makePrintable(result)
sys.stderr.write(" done\n")
print result
except TransformError:
sys.stderr.write(" error\n")
stats.was_error = 1
try:
useAnsi = os.environ.has_key("COLORTERM")
except NameError:
useAnsi = 1 # jython hack
if useAnsi: ansiColour(15)
sys.stderr.write("%s\n" % (sys.exc_info()[1],))
if useAnsi: ansiColour(7)
sys.exit(1)
def ansiColour(foreground=15,background=0):
sys.stderr.write("\x1b[%dm\x1b[%d;%dm" % ((background&7)+40,(foreground&8)!=0,(foreground&7)+30))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from click.testing import CliRunner
from flag_slurper.cli import cli
from flag_slurper.autolib.models import CredentialBag, Credential
from flag_slurper.conf.project import Project
def test_add_credentials(db):
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'add', 'root', 'cdc'])
assert result.exit_code == 0
assert result.output == "[+] Added root:cdc\n"
count = CredentialBag.select().where(CredentialBag.username == 'root', CredentialBag.password == 'cdc').count()
assert count == 1
def test_ls_credentials(db):
CredentialBag.create(username='root', password='cdc')
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'ls'])
assert result.exit_code == 0
assert result.output == "Username:Password\nroot:cdc\n"
def test_rm_credential(db):
CredentialBag.create(username='root', password='cdc')
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'rm', 'root', 'cdc'])
assert result.exit_code == 0
count = CredentialBag.select().where(CredentialBag.username == 'root', CredentialBag.password == 'cdc').count()
assert count == 0
def test_rm_credentials(db):
CredentialBag.create(username='root', password='cdc')
CredentialBag.create(username='root', password='root')
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'rm', 'root'])
assert result.exit_code == 0
count = CredentialBag.select().where(CredentialBag.username == 'root').count()
assert count == 0
def test_show_empty_creds(db):
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'show', 'root'])
assert result.exit_code == 3
assert result.output == "No credentials matching this query\n"
def test_show_username(service):
bag = CredentialBag.create(username='root', password='cdc')
Credential.create(bag=bag, service=service, state='works')
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'show', 'root:cdc'])
assert result.exit_code == 0
assert result.output == "Credential: root:cdc\n" \
"------------ [ Found Credentials ] ------------\n" \
"1/www.team1.isucdc.com:80: works\n\n\n\n"
def test_show_empty_bag(db):
CredentialBag.create(username='root', password='cdc')
runner = CliRunner()
result = runner.invoke(cli, ['creds', 'show', 'root:cdc'])
assert result.exit_code == 0
assert result.output == "Credential: root:cdc\n" \
"------------ [ Found Credentials ] ------------\n" \
"This credential bag has no hits\n\n\n\n"
def test_creds_no_project():
p = Project.get_instance()
p.project_data = None
runner = CliRunner()
result = runner.invoke(cli, ['-np', 'creds', 'ls'])
assert result.exit_code == 4
assert result.output == "[!] Credentials commands require an active project\n"
|
nilq/baby-python
|
python
|
import os
from dotenv import dotenv_values
from algofi_amm.v0.client import AlgofiAMMTestnetClient
from ..utils import compiledContract, Account
from algofi_amm.v0.client import AlgofiAMMClient
from algosdk.v2client.algod import AlgodClient
from algosdk.future import transaction
from random import randint
def startup():
"""
Initialize an algofi amm testnet client and a creator account.
"""
# Local Algod Address
ALGOD_TOKEN = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
ALGOD_ADDRESS = "http://localhost:4001"
creator_account = get_creator_account()
# We have to use a local algod client because the algoexplorer api does not support get function anymore
amm_client = AlgofiAMMTestnetClient(
algod_client=AlgodClient(ALGOD_TOKEN, ALGOD_ADDRESS),
indexer_client=None,
user_address=creator_account.getAddress(),
)
return amm_client, creator_account
def get_creator_account():
"""
Securely load key-pair from mnemonic file
.env file in the testing folder containing mnemonic = your 25 words
"""
# Securely load key-pair from mnemonic file
my_path = os.path.abspath(os.path.dirname(__file__))
ENV_PATH = os.path.join(my_path, ".env")
user = dotenv_values(ENV_PATH)
return Account.FromMnemonic(user["mnemonic"])
def newTestToken(client: AlgofiAMMClient, creator: Account) -> int:
"""
Transaction to create a new test asset.
"""
randomNumber = randint(0, 99)
txn = transaction.AssetConfigTxn(
sender=creator.getAddress(),
sp=client.algod.suggested_params(),
total=10**12,
default_frozen=False,
unit_name=f"UST{randomNumber}",
asset_name=f"USTest{randomNumber}",
manager=creator.getAddress(),
reserve=None,
freeze=None,
clawback=None,
strict_empty_address_check=False,
url=None,
metadata_hash=None,
decimals=0,
)
# Sign with secret key of creator
stxn = txn.sign(creator.getPrivateKey())
# Send the transaction to the network and retrieve the txid.
txid = client.algod.send_transaction(stxn)
print("Asset Creation Transaction ID: {}".format(txid))
# Wait for the transaction to be confirmed
confirmed_txn = transaction.wait_for_confirmation(client.algod, txid, 4)
print("TXID: ", txid)
print("Result confirmed in round: {}".format(confirmed_txn["confirmed-round"]))
try:
ptx = client.algod.pending_transaction_info(txid)
us_test_id = ptx["asset-index"]
# print(client.indexer.accounts(asset_id=us_test_id)["accounts"]["created-assets"])
return us_test_id
except Exception as e:
print(e)
def update_metapool(algod_client: AlgodClient, creator: Account, metapool_app_id: int):
"""
Update an Existing Metapool
"""
approval_program, clear_program = compiledContract(algod_client)
# create unsigned transaction
txn = transaction.ApplicationUpdateTxn(
creator.getAddress(),
algod_client.suggested_params(),
metapool_app_id,
approval_program,
clear_program,
)
# sign, send, await
stxn = txn.sign(creator.getPrivateKey())
txid = algod_client.send_transaction(stxn)
confirmed_txn = transaction.wait_for_confirmation(algod_client, txid, 4)
print("TXID: ", txid)
print("Result confirmed in round: {}".format(confirmed_txn["confirmed-round"]))
try:
ptx = algod_client.pending_transaction_info(txid)
app_id = ptx["txn"]["txn"]["apid"]
print("Updated existing app-id: ", app_id)
except Exception as e:
print(e)
def is_close(a, b, e=1):
return abs(a - b) <= e
|
nilq/baby-python
|
python
|
import unittest
import sys
from gluon.globals import Request
db = test_db
#execfile("applications/Problematica/controllers/default.py", globals())
class TestClass(unittest.TestCase):
# def setUp(self):
#request = Request() # Use a clean Request object
def test_search(self):
output_id = []
user_list = [5]
#input for the method
output_users = PicaUser.search("Khoa")
for users in output_users:
output_id.append(users.get_id())
self.assertEqual(user_list, output_id)
def test_search2(self):
output_id = []
user_list = []
#input for the method
output_users = PicaUser.search("axasfqsfdasd")
for users in output_users:
output_id.append(users.get_id())
self.assertEqual(user_list, output_id)
def test_is_found_in_database(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
self.assertTrue(test_user.is_found_in_database())
def test_is_found_in_database2(self):
test_user_id = 6
test_user = PicaUser(test_user_id)
self.assertFalse(test_user.is_found_in_database())
def test_is_user_same_as(self):
test_user_id_1 = 1
test_user_id_2 = 2
test_user_1 = PicaUser(test_user_id_1)
test_user_2 = PicaUser(test_user_id_2)
#We want false because the 2 users are clearly not the same
self.assertFalse(test_user_1.is_user_same_as(test_user_2))
def test_get_id(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
self.assertEqual(test_user.get_id(), test_user_id)
def test_get_bio(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_bio = "Hi I'm Khoa Luong :)"
self.assertEqual(test_user.get_bio(), test_bio)
def test_get_academic_fields(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_acad_fields = "Video Games :)"
self.assertEqual(test_user.get_academic_fields(), test_acad_fields)
def test_firstname(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_firstname = "Khoa"
self.assertEqual(test_user.get_firstname(), test_firstname)
def test_firstname2(self):
test_user_id = 2
test_user = PicaUser(test_user_id)
test_firstname = "kfir"
self.assertEqual(test_user.get_firstname(), test_firstname)
def test_lastname(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_lastname = "Luong"
self.assertEqual(test_user.get_lastname(), test_lastname)
def test_get_capitalized_fullname(self):
test_user_id = 2
test_user = PicaUser(test_user_id)
test_caps_fullname = "Kfir Dolev"
self.assertEqual(test_user.get_capitalized_fullname(), test_caps_fullname)
def test_get_URL(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_user_url = "/profile/5"
self.assertEqual(test_user.get_URL(), test_user_url)
def test_get_submitted_solutions(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
solutions = test_user.get_submitted_solutions()
self.assertEqual(solutions[0].get_id(), 31)
def test_get_solved_problems(self):
test_user_id = 5
empty_list = []
test_user = PicaUser(test_user_id)
solved_problems = test_user.get_solved_problems()
self.assertEqual(solved_problems, empty_list)
def test_get_total_bounty_won(self):
test_user_id = 2
test_bounty = 1100
test_user = PicaUser(test_user_id)
self.assertEqual(test_user.get_total_bounty_won(), test_bounty)
def test_get_num_problems_solved(self):
test_user_id = 5
test_num_solved = 0
test_user = PicaUser(test_user_id)
self.assertEqual(test_user.get_num_problems_solved(), test_num_solved)
def test_get_num_problems_solved2(self):
test_user_id = 2
test_num_solved = 1
test_user = PicaUser(test_user_id)
self.assertEqual(test_user.get_num_problems_solved(), test_num_solved)
def test_get_donations(self):
test_user_id = 4
test_user = PicaUser(test_user_id)
test_donation_id = 5
test_donation = PicaDonation(test_donation_id)
donations = test_user.get_donations()
if len(donations) > 0:
self.assertEqual(donations[0].get_amount(), test_donation.get_amount())
else:
self.assertEqual(len(donations), 1)
def test_get_donated_problems(self):
test_user_id = 4
test_user = PicaUser(test_user_id)
donated_problems = test_user.get_donated_problems()
test_problem_id = 45
if len(donated_problems) > 0:
self.assertEqual(donated_problems[0].get_id(), test_problem_id)
else:
self.assertEqual(len(donated_problems), 1)
def test_get_donated_problems2(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
donated_problems = test_user.get_donated_problems()
self.assertEqual(len(donated_problems), 2)
def test_get_total_money_donated(self):
test_user_id = 4
test_user = PicaUser(test_user_id)
test_donation_total = 120
self.assertEqual(test_user.get_total_money_donated(), test_donation_total)
def test_get_total_money_donated2(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_donation_total = 1000
self.assertEqual(test_user.get_total_money_donated(), test_donation_total)
def test_get_clean_total_money_donated(self):
test_user_id = 5
test_user = PicaUser(test_user_id)
test_clean_donation_total = "1.0K"
self.assertEqual(test_user.get_clean_total_money_donated(), test_clean_donation_total)
def test_set_bio(self):
test_user_id = 5
test_new_bio = "Hi I'm Khoa"
test_user = PicaUser(test_user_id)
new_bio = test_user.set_bio(test_new_bio)
self.assertEqual(new_bio, test_user.get_bio())
def test_set_academic_fields(self):
test_user_id = 5
test_new_field = "Science"
test_user = PicaUser(test_user_id)
new_field = test_user.set_academic_fields(test_new_field)
self.assertEqual(test_new_field, test_user.get_academic_fields())
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestClass))
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if (len(test_result.failures) > 0) | (len(test_result.errors) > 0):
ret = 1
else:
ret = 0
sys.exit(ret)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Adjacency List
# Q4.1 - Route Between Nodes
class AdjacencyList:
def __init__(self, numOfNodes=None):
if numOfNodes is not None and numOfNodes > 0:
self.matrix = [[] for _ in range(numOfNodes)]
self.numOfNodes = numOfNodes
self.matrixVisited = []
self.searchReturnValue = None
self.path = ""
self.searchFor = None
# [1:-1] is a python trick to remove brackets from a list
def __str__(self):
returnStr = ""
for index, result in enumerate(self.matrix):
returnStr+=str(index) + ": " + str(result)[1:-1] + "\n"
return returnStr
def add(self, Node=None, directedEdgeTo=None):
if Node == None or directedEdgeTo == None:
return None
else:
try:
self.matrix[Node].append(directedEdgeTo)
except IndexError:
return None
# need the recursed parameter to set the values of
# self.matrixVisited to the number of Nodes available.
def depthFirstSearch(self, searchValue, node=0, recursed=False):
if recursed == False:
self.matrixVisited = [False] * self.numOfNodes
self.searchReturnValue = None
self.searchFor = searchValue
if node == self.searchFor:
self.searchReturnValue = node
return self.searchReturnValue
if len(self.matrix) == 0 or self.matrixVisited == True:
return self.searchReturnValue
self.matrixVisited[node] = True
for m in self.matrix[node]:
if m == self.searchFor:
self.searchReturnValue = m
if self.matrixVisited[m] == False:
self.depthFirstSearch(searchValue, m, True)
return self.searchReturnValue
def depthFirstSearchPath(self, searchValue, node=0, recursed=False):
if recursed == False:
self.matrixVisited = [False] * self.numOfNodes
self.searchReturnValue = None
self.searchFor = searchValue
self.path = str(node)
if node == self.searchFor:
self.searchReturnValue = node
return self.path
if len(self.matrix) == 0 or self.matrixVisited == True:
return self.searchReturnValue
self.matrixVisited[node] = True
self.path += " -> "
for m in self.matrix[node]:
if m == self.searchFor:
self.searchReturnValue = m
if self.matrixVisited[m] == False:
self.path += str(m)
self.depthFirstSearchPath(searchValue, m, True)
if self.path[-1:] != ' ': # return if complete path
return self.path
def breadthFirstSearch(self, searchValue, node=0):
# searchValue can never be greater than number of Nodes
# or less than 0
if searchValue > self.numOfNodes or searchValue < 0:
return None
# this can find values in multiple graphs
for i in range(self.numOfNodes):
for m in self.matrix[i]:
if m == searchValue:
return m
# because searchValue == Node number - solution is trivial and
# should never reach this next line
return None
# See graphs on page 106 and number list on middle of page
# Cracking the Coding Interview, 6th Edition
if __name__ == "__main__":
Lst = AdjacencyList(7)
Lst.add(0,1)
Lst.add(1,2)
Lst.add(2,0)
Lst.add(2,3)
Lst.add(3,2)
Lst.add(4,6)
Lst.add(5,4)
Lst.add(6,5)
print(Lst)
# First variable for depthFirstSearchPath is the node to search for
# second variable is for the root node to search from
# There's two directed graphs in the node
print("depthFirstSearchPath(5,4): " + str(Lst.depthFirstSearchPath(5,4)))
print("depthFirstSearchPath(3): " + str(Lst.depthFirstSearchPath(3)))
print("self.path: " + str(Lst.path))
|
nilq/baby-python
|
python
|
#!python
"""Django's command-line utility for administrative tasks.
This file was auto generated by the Django toolchain. Type python manage.py
--help to see a list of available commands.
"""
import os
import sys
def main() -> None:
"""manage.py entry point."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ciukune.core.settings')
try:
from django.core.management import execute_from_command_line # pylint: disable=import-outside-toplevel
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("sop", delay=1.5)
tuiModel = testDispatcher.tuiModel
dataList = (
"version=1.4",
'bypassNames=boss, ff_lamp, ffs, gcamera, hgcd_lamp, ne_lamp, uv_lamp, wht_lamp',
'bypassedNames=boss, ne_lamp, wht_lamp',
'gotoFieldStages="slew", "hartmann", "calibs", "guider", "cleanup"',
'gotoFieldState="done","OK","off","done","done","done","done"',
'gotoField_arcTime=5, 7.6',
'gotoField_flatTime=4.3, 5.1',
'gotoField_guiderFlatTime=7.6, 3.9',
'gotoField_guiderTime=5, 10',
'doApogeeScienceStages="expose"',
'doApogeeScienceState="done","OK","idle"',
'doApogeeScience_ditherSeq="ABBA","ABBA"',
'doApogeeScience_seqCount=2,2',
'doApogeeScience_expTime=500.0,500.0',
'doApogeeScience_sequenceState="ABBAABBA",8',
'doApogeeScience_comment="a comment",""',
'doBossScienceStages="expose"',
'doBossScienceState="done","OK","idle"',
'doBossScience_nExp=3, 3',
'doBossScience_expTime=13.3, 10',
'doMangaDitherStages="expose", "dither"',
'doMangaDitherState="done","OK","done","done"',
'doMangaDither_expTime=25.3,30',
'doMangaDither_dithers="NS","NSE"',
'doMangaSequenceStages="expose", "calibs", "dither"',
'doMangaSequenceState="idle","OK","idle","idle","idle"',
'doMangaSequence_count=3,3',
'doMangaSequence_dithers="NSE","NSE"',
'doMangaSequence_expTime=900.0,900.0',
'doMangaSequence_arcTime=4.0,4.0', # ignored
'doMangaSequence_ditherSeq=NSENSENSE,0',
'gotoGangChangeStages="domeFlat", "slew"',
'gotoGangChangeState="done","some text","done","done"',
'gotoGangChange_alt=30.0, 45.0', # ignored
'gotoInstrumentChangeStages="slew"',
'gotoInstrumentChangeState="done","a bit of text","done"',
'doApogeeSkyFlatsStages="expose"',
'doApogeeSkyFlatsState="done","some text","done"',
'doApogeeSkyFlats_ditherSeq="A","AB"',
'doApogeeSkyFlats_expTime="400","500"',
'doApogeeDomeFlatStages="domeFlat"',
'doApogeeDomeFlatState="done","gang connector is not at the cartridge!","done"',
'doBossCalibsStages="bias", "dark", "flat", "arc", "cleanup"',
'doBossCalibsState="done","some text","done","done","done","done","done"',
'doBossCalibs_nBias=3, 4',
'doBossCalibs_nDark=10, 7',
'doBossCalibs_darkTime=31.2, 15',
'doBossCalibs_nFlat=5, 5',
'doBossCalibs_flatTime=22.3, 14',
'doBossCalibs_guiderFlatTime=12.3, 13',
'doBossCalibs_nArc=2, 5',
'doBossCalibs_arcTime=5.0, 6.0',
'gotoStowStages="slew"',
'gotoStowState="aborted","a bit of text","done"',
'survey="APOGEE-2&MaNGA", Other',
)
guiderDataList = (
# 'cartridgeLoaded=8, 7549, A, 56841, 1',
'cartridgeLoaded=19,0,A,-1,-1',
'survey="APOGEE-2&MaNGA", Something',
)
animDataSet = (
(
'surveyCommands=gotoField, doBossCalibs, gotoInstrumentChange',
'gotoFieldStages="hartmann","guider","cleanup"',
'gotoFieldState="running","guider","done","running","pending"',
'doBossCalibsState="running","flat","done","done","running","pending","pending"',
),
(
'gotoFieldState="running","cleanup","done","done","running"',
'doBossCalibsState="running","arc","done","done","done","running","pending"',
'bypassedNames=ne_lamp, wht_lamp',
),
(
'gotoFieldState="done","","done","done","done"',
'bypassedNames=wht_lamp',
'gotoFieldState="done","done","done","done","done"',
'doBossCalibsState="failed","cleanup failed","done","done","done","done","failed"',
),
(
'surveyCommands=gotoStow, gotoField, doBossScience, doBossCalibs, gotoInstrumentChange',
'gotoFieldStages="slew","hartmann","calibs","cleanup"',
'gotoFieldState="running","","off","running","pending","pending"',
'bypassedNames',
),
(
'gotoFieldState="running","","off","running","running","pending"',
),
(
'gotoFieldState="aborted","aborting","off","running","done","aborted"',
),
(
'gotoFieldState="failed","something went wrong","off","done","done","failed"',
),
)
def start():
testDispatcher.dispatch(dataList)
testDispatcher.dispatch(guiderDataList, actor="guider")
def animate():
testDispatcher.runDataSet(animDataSet)
|
nilq/baby-python
|
python
|
# Changes to this file by The Tavutil Authors are in the Public Domain.
# See the Tavutil UNLICENSE file for details.
#******************************************************************************\
#* Copyright (c) 2003-2004, Martin Blais
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are
#* met:
#*
#* * Redistributions of source code must retain the above copyright
#* notice, this list of conditions and the following disclaimer.
#*
#* * Redistributions in binary form must reproduce the above copyright
#* notice, this list of conditions and the following disclaimer in the
#* documentation and/or other materials provided with the distribution.
#*
#* * Neither the name of the Martin Blais, Furius, nor the names of its
#* contributors may be used to endorse or promote products derived from
#* this software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************\
"""Automatic completion for optparse module.
This module provide automatic bash completion support for programs that use the
optparse module. The premise is that the optparse options parser specifies
enough information (and more) for us to be able to generate completion strings
esily. Another advantage of this over traditional completion schemes where the
completion strings are hard-coded in a separate bash source file, is that the
same code that parses the options is used to generate the completions, so the
completions is always up-to-date with the program itself.
In addition, we allow you specify a list of regular expressions or code that
define what kinds of files should be proposed as completions to this file if
needed. If you want to implement more complex behaviour, you can instead
specify a function, which will be called with the current directory as an
argument.
You need to activate bash completion using the shell script function that comes
with optcomplete (see http://furius.ca/optcomplete for more details).
"""
__version__ = "$Revision$"
__author__ = "Martin Blais <blais@furius.ca>"
## Bash Protocol Description
## -------------------------
##
## `COMP_CWORD'
## An index into `${COMP_WORDS}' of the word containing the current
## cursor position. This variable is available only in shell
## functions invoked by the programmable completion facilities (*note
## Programmable Completion::).
##
## `COMP_LINE'
## The current command line. This variable is available only in
## shell functions and external commands invoked by the programmable
## completion facilities (*note Programmable Completion::).
##
## `COMP_POINT'
## The index of the current cursor position relative to the beginning
## of the current command. If the current cursor position is at the
## end of the current command, the value of this variable is equal to
## `${#COMP_LINE}'. This variable is available only in shell
## functions and external commands invoked by the programmable
## completion facilities (*note Programmable Completion::).
##
## `COMP_WORDS'
## An array variable consisting of the individual words in the
## current command line. This variable is available only in shell
## functions invoked by the programmable completion facilities (*note
## Programmable Completion::).
##
## `COMPREPLY'
## An array variable from which Bash reads the possible completions
## generated by a shell function invoked by the programmable
## completion facility (*note Programmable Completion::).
import os
import re
import sys
import types
from optparse import OptionParser
from os import listdir
from os.path import *
from pprint import pprint, pformat
debugfn = None # for debugging only
class AllCompleter:
"""Completes by listing all possible files in current directory."""
def __call__(self, pwd, line, point, prefix, suffix):
return os.listdir(pwd)
class NoneCompleter:
"""Generates empty completion list."""
def __call__(self, pwd, line, point, prefix, suffix):
return []
class DirCompleter:
"""Completes by listing subdirectories only."""
def __init__(self, directory=None):
self.directory = directory
def __call__(self, pwd, line, point, prefix, suffix):
if self.directory:
pwd = self.directory
return [path for path in listdir(pwd) if isdir(join(pwd, path))]
class RegexCompleter:
"""Completes by filtering all possible files with the given list of
regexps."""
def __init__(self, regexlist, always_dirs=True):
self.always_dirs = always_dirs
if isinstance(regexlist, types.StringType):
regexlist = [regexlist]
self.regexlist = []
for r in regexlist:
if isinstance(r, types.StringType):
r = re.compile(r)
self.regexlist.append(r)
def __call__(self, pwd, line, point, prefix, suffix):
dn = dirname(prefix)
if dn:
pwd = dn
files = os.listdir(pwd)
ofiles = []
for fn in files:
for r in self.regexlist:
if r.match(fn):
if dn:
fn = join(dn, fn)
ofiles.append(fn)
break
if self.always_dirs and isdir(fn):
ofiles.append(fn + '/')
return ofiles
class ListCompleter:
"""Completes by filtering using a fixed list of strings."""
def __init__(self, stringlist):
self.olist = stringlist
def __call__(self, pwd, line, point, prefix, suffix):
return self.olist
def extract_word(line, point):
"""Return a prefix and suffix of the enclosing word. The character under
the cursor is the first character of the suffix."""
wsre = re.compile('[ \t]')
if point < 0 or point > len(line):
return '', ''
preii = point - 1
while preii >= 0:
if wsre.match(line[preii]):
break
preii -= 1
preii += 1
sufii = point
while sufii < len(line):
if wsre.match(line[sufii]):
break
sufii += 1
return line[preii : point], line[point : sufii]
def autocomplete(parser,
arg_completer=None, # means use default.
opt_completer=None,
subcmd_completer=None,
subcommands=None):
"""Automatically detect if we are requested completing and if so generate
completion automatically from given parser.
'parser' is the options parser to use.
'arg_completer' is a callable object that gets invoked to produce a list of
completions for arguments completion (oftentimes files).
'opt_completer' is the default completer to the options that require a
value. 'subcmd_completer' is the default completer for the subcommand
arguments.
If 'subcommands' is specified, the script expects it to be a map of
command-name to an object of any kind. We are assuming that this object is
a map from command name to a pair of (options parser, completer) for the
command. If the value is not such a tuple, the method
'autocomplete(completer)' is invoked on the resulting object.
This will attempt to match the first non-option argument into a subcommand
name and if so will use the local parser in the corresponding map entry's
value. This is used to implement completion for subcommand syntax and will
not be needed in most cases."""
# If we are not requested for complete, simply return silently, let the code
# caller complete. This is the normal path of execution.
if 'OPTPARSE_AUTO_COMPLETE' not in os.environ:
return
# Set default completers.
if arg_completer is None:
arg_completer = NoneCompleter()
if opt_completer is None:
opt_completer = NoneCompleter()
if subcmd_completer is None:
## subcmd_completer = arg_completer
subcmd_completer = NoneCompleter()
# By default, completion will be arguments completion, unless we find out
# later we're trying to complete for an option.
completer = arg_completer
#
# Completing...
#
# Fetching inputs... not sure if we're going to use these.
# zsh's bashcompinit does not pass COMP_WORDS, replace with
# COMP_LINE for now...
if not os.environ.has_key('COMP_WORDS'):
os.environ['COMP_WORDS'] = os.environ['COMP_LINE']
cwords = os.environ['COMP_WORDS'].split()
cline = os.environ['COMP_LINE']
cpoint = int(os.environ['COMP_POINT'])
cword = int(os.environ['COMP_CWORD'])
# If requested, try subcommand syntax to find an options parser for that
# subcommand.
if subcommands:
assert isinstance(subcommands, types.DictType)
value = guess_first_nonoption(parser, subcommands)
if value:
if isinstance(value, types.ListType) or \
isinstance(value, types.TupleType):
parser = value[0]
if len(value) > 1 and value[1]:
# override completer for command if it is present.
completer = value[1]
else:
completer = subcmd_completer
return autocomplete(parser, completer)
else:
# Call completion method on object. This should call
# autocomplete() recursively with appropriate arguments.
if hasattr(value, 'autocomplete'):
return value.autocomplete(subcmd_completer)
else:
sys.exit(1) # no completions for that command object
# Extract word enclosed word.
prefix, suffix = extract_word(cline, cpoint)
# The following would be less exact, but will work nonetheless .
# prefix, suffix = cwords[cword], None
# Look at previous word, if it is an option and it requires an argument,
# check for a local completer. If there is no completer, what follows
# directly cannot be another option, so mark to not add those to
# completions.
optarg = False
try:
# Look for previous word, which will be containing word if the option
# has an equals sign in it.
prev = None
if cword < len(cwords):
mo = re.search('(--.*)=(.*)', cwords[cword])
if mo:
prev, prefix = mo.groups()
if not prev:
prev = cwords[cword - 1]
if prev and prev.startswith('-'):
option = parser.get_option(prev)
if option:
if option.nargs > 0:
optarg = True
if hasattr(option, 'completer'):
completer = option.completer
elif option.type != 'string':
completer = NoneCompleter()
else:
completer = opt_completer
# Warn user at least, it could help him figure out the problem.
elif hasattr(option, 'completer'):
raise SystemExit(
"Error: optparse option with a completer "
"does not take arguments: %s" % str(option))
except KeyError:
pass
completions = []
# Options completion.
if not optarg and (not prefix or prefix.startswith('-')):
completions += parser._short_opt.keys()
completions += parser._long_opt.keys()
# Note: this will get filtered properly below.
# File completion.
if completer and (not prefix or not prefix.startswith('-')):
# Call appropriate completer depending on type.
if isinstance(completer, types.StringType) or \
isinstance(completer, types.ListType) or \
isinstance(completer, types.TupleType):
completer = RegexCompleter(completer)
completions += completer(os.getcwd(), cline, cpoint, prefix, suffix)
elif isinstance(completer, types.FunctionType) or \
isinstance(completer, types.LambdaType) or \
isinstance(completer, types.ClassType) or \
isinstance(completer, types.ObjectType):
completions += completer(os.getcwd(), cline, cpoint, prefix, suffix)
# Filter using prefix.
if prefix:
completions = filter(lambda x: x.startswith(prefix), completions)
# Print result.
print(' '.join(completions))
# Print debug output (if needed). You can keep a shell with 'tail -f' to
# the log file to monitor what is happening.
if debugfn:
f = open(debugfn, 'a')
print >> f, '---------------------------------------------------------'
print >> f, 'CWORDS', cwords
print >> f, 'CLINE', cline
print >> f, 'CPOINT', cpoint
print >> f, 'CWORD', cword
print >> f, '\nShort options'
print >> f, pformat(parser._short_opt)
print >> f, '\nLong options'
print >> f, pformat(parser._long_opt)
print >> f, 'Prefix/Suffix:', prefix, suffix
print >> f, 'completions', completions
f.close()
# Exit with error code (we do not let the caller continue on purpose, this
# is a run for completions only.)
sys.exit(1)
def error_override(self, msg):
"""Hack to keep OptionParser from writing to sys.stderr when
calling self.exit from self.error"""
self.exit(2, msg=None)
def guess_first_nonoption(gparser, subcmds_map):
"""Given a global options parser, try to guess the first non-option without
generating an exception. This is used for scripts that implement a
subcommand syntax, so that we can generate the appropriate completions for
the subcommand."""
import copy
gparser = copy.deepcopy(gparser)
def print_usage_nousage (self, file=None):
pass
gparser.print_usage = print_usage_nousage
prev_interspersed = gparser.allow_interspersed_args # save state to restore
gparser.disable_interspersed_args()
cwords = os.environ['COMP_WORDS'].split()
# save original error_func so we can put it back after the hack
error_func = gparser.error
try:
instancemethod = type(OptionParser.error)
# hack to keep OptionParser from wrinting to sys.stderr
gparser.error = instancemethod(error_override, gparser, OptionParser)
gopts, args = gparser.parse_args(cwords[1:])
except SystemExit:
return None
finally:
# undo the hack and restore original OptionParser error function
gparser.error = instancemethod(error_func, gparser, OptionParser)
value = None
if args:
subcmdname = args[0]
try:
value = subcmds_map[subcmdname]
except KeyError:
pass
gparser.allow_interspersed_args = prev_interspersed # restore state
return value # can be None, indicates no command chosen.
class CmdComplete:
"""Simple default base class implementation for a subcommand that supports
command completion. This class is assuming that there might be a method
addopts(self, parser) to declare options for this subcommand, and an
optional completer data member to contain command-specific completion. Of
course, you don't really have to use this, but if you do it is convenient to
have it here."""
def autocomplete(self, completer):
import optparse
parser = optparse.OptionParser(self.__doc__.strip())
if hasattr(self, 'addopts'):
self.addopts(parser)
if hasattr(self, 'completer'):
completer = self.completer
return autocomplete(parser, completer)
# ------------------------------------------------------------------------------
# Support Functions
# ------------------------------------------------------------------------------
class CompletionResult(Exception):
def __init__(self, result):
self.result = result
def parse_options(parser, argv, completer=None, exit_if_no_args=False):
if completer:
raise CompletionResult(parser)
if (argv == ['--help']) or (argv == ['-h']):
parser.print_help()
sys.exit(1)
options, args = parser.parse_args(argv)
if exit_if_no_args and not args:
parser.print_help()
sys.exit(1)
return options, args
def make_autocompleter(command):
def wrapper(completer):
try:
parser = command(completer=completer)
except CompletionResult:
parser = sys.exc_info()[1].result
if isinstance(parser, tuple):
parser, completer = parser
return autocomplete(parser, completer)
return wrapper
|
nilq/baby-python
|
python
|
import io
import math
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Union
import discord
import matplotlib.pyplot as plt
import pandas as pd
import pytz
from blossom_wrapper import BlossomAPI
from dateutil import parser
from discord import Embed, File
from discord.ext.commands import Cog, UserNotFound
from discord_slash import SlashContext, cog_ext
from discord_slash.model import SlashMessage
from discord_slash.utils.manage_commands import create_option
from buttercup.bot import ButtercupBot
from buttercup.cogs import ranks
from buttercup.cogs.helpers import (
BlossomException,
BlossomUser,
InvalidArgumentException,
extract_utc_offset,
get_discord_time_str,
get_duration_str,
get_initial_username,
get_initial_username_list,
get_rank,
get_rgb_from_hex,
get_timedelta_str,
get_user,
get_user_gamma,
get_user_id,
get_user_list,
get_username,
get_usernames,
parse_time_constraints,
utc_offset_to_str,
)
from buttercup.strings import translation
i18n = translation()
def get_data_granularity(
user: Optional[BlossomUser], after: Optional[datetime], before: Optional[datetime]
) -> str:
"""Determine granularity of the graph.
It should be as detailed as possible, but only require 1 API call in the best case.
"""
if not user:
return "week"
# TODO: Adjust this when the Blossom dates have been fixed
now = datetime.now(tz=pytz.utc)
date_joined = parser.parse(user["date_joined"])
total_delta = now - date_joined
total_hours = total_delta.total_seconds() / 60
# The time delta that the data is calculated on
relevant_delta = (before or now) - (after or date_joined)
relevant_hours = relevant_delta.total_seconds() / 60
time_factor = relevant_hours / total_hours
total_gamma: int = user["gamma"]
# The expected gamma in the relevant time frame
adjusted_gamma = total_gamma * time_factor
if adjusted_gamma <= 500:
return "none"
if relevant_hours * 0.3 <= 500 or adjusted_gamma <= 1500:
# We estimate that the user is only active in one third of the hours
# The user is expected to complete 3 transcriptions within the same hour
return "hour"
# Don't be less accurate than a day, it loses too much detail
return "day"
def get_timedelta_from_time_frame(time_frame: Optional[str]) -> timedelta:
"""Get the timedelta for the given time frame option."""
if time_frame == "year":
return timedelta(days=356)
if time_frame == "month":
return timedelta(days=30)
if time_frame == "week":
return timedelta(weeks=1)
if time_frame == "hour":
return timedelta(hours=1)
if time_frame == "none":
return timedelta(seconds=1)
# One day is the default
return timedelta(days=1)
def add_zero_rates(
data: pd.DataFrame,
time_frame: str,
after_time: Optional[datetime],
before_time: Optional[datetime],
) -> pd.DataFrame:
"""Add entries for the zero rates to the data frame.
When the rate is zero, it is not returned in the API response.
Therefore we need to add it manually.
However, for a span of zero entries, we only need the first
and last entry. This reduces the number of data points.
"""
new_index = set()
delta = get_timedelta_from_time_frame(time_frame)
now = datetime.now(tz=pytz.utc)
if after_time:
# Add the earliest point according to the timeframe
first_date = data.index[0]
# Make sure everything is localized
first_date = first_date.replace(tzinfo=pytz.utc)
missing_delta: timedelta = first_date - after_time
missing_time_frames = missing_delta.total_seconds() // delta.total_seconds()
if missing_time_frames > 0:
# We need to add a new entry at the beginning
missing_delta = timedelta(
seconds=missing_time_frames * delta.total_seconds()
)
missing_date = first_date - missing_delta
new_index.add(missing_date)
for date in data.index:
new_index.add(date)
new_index.add(date - delta)
if date + delta < now:
new_index.add(date + delta)
# Add the latest point according to the timeframe
last_date = data.index[-1]
# Make sure everything is localized
last_date = last_date.replace(tzinfo=pytz.utc)
missing_delta: timedelta = (before_time or now) - last_date
missing_time_frames = missing_delta.total_seconds() // delta.total_seconds()
if missing_time_frames > 0:
# We need to add a new entry at the end
missing_delta = timedelta(seconds=missing_time_frames * delta.total_seconds())
missing_date = last_date + missing_delta
new_index.add(missing_date)
return data.reindex(new_index, fill_value=0).sort_index()
def get_user_colors(users: Optional[List[BlossomUser]]) -> List[str]:
"""Assign a color to each user.
This will prefer to assign a user their rank color.
A single user will get white for better readability.
"""
if not users or len(users) == 1:
# If we don't need to distinguish, take white (best contrast)
return ["#eeeeee"]
color_mapping = {}
available_ranks = [r for r in ranks]
left_over_users = []
for user in users:
user_rank = get_rank(user["gamma"])
# Give the user their rank color if possible
if user_rank in available_ranks:
color_mapping[user["username"]] = user_rank["color"]
available_ranks = [
r for r in available_ranks if r["name"] != user_rank["name"]
]
else:
left_over_users.append(user)
# Give the left over users another rank's color
for i, user in enumerate(left_over_users):
color_mapping[user["username"]] = available_ranks[i]["color"]
return [color_mapping[user["username"]] for user in users]
def add_milestone_lines(
ax: plt.Axes,
milestones: List[Dict[str, Union[str, int]]],
min_value: float,
max_value: float,
delta: float,
) -> plt.Axes:
"""Add the lines for the milestones the user reached.
:param ax: The axis to draw the milestones into.
:param milestones: The milestones to consider. Each must have a threshold and color.
:param min_value: The minimum value to determine if a milestone should be included.
:param max_value: The maximum value to determine if a milestone should be inlcuded.
:param delta: Determines how "far away" milestone lines are still included.
"""
for milestone in milestones:
if max_value + delta >= milestone["threshold"] >= min_value - delta:
ax.axhline(y=milestone["threshold"], color=milestone["color"], zorder=-1)
return ax
def create_file_from_figure(fig: plt.Figure, file_name: str) -> File:
"""Create a Discord file containing the figure."""
history_plot = io.BytesIO()
fig.savefig(history_plot, format="png")
history_plot.seek(0)
plt.close(fig)
return File(history_plot, file_name)
def get_history_data_from_rate_data(
rate_data: pd.DataFrame, offset: int
) -> pd.DataFrame:
"""Aggregate the rate data to history data.
:param rate_data: The rate data to calculate the history data from.
:param offset: The gamma offset at the first point of the graph.
"""
return rate_data.assign(gamma=rate_data.expanding(1).sum() + offset)
def get_next_rank(gamma: int) -> Optional[Dict[str, Union[str, int]]]:
"""Determine the next rank based on the current gamma."""
for rank in ranks:
if rank["threshold"] > gamma:
return rank
return None
def parse_goal_str(goal_str: str) -> Tuple[int, str]:
"""Parse the given goal string.
:returns: The goal gamma and the goal string.
"""
goal_str = goal_str.strip()
if goal_str.isnumeric():
goal_gamma = int(goal_str, 10)
return goal_gamma, f"{goal_gamma:,}"
for rank in ranks:
if goal_str.casefold() == rank["name"].casefold():
goal_gamma = int(rank["threshold"])
return rank["threshold"], f"{rank['name']} ({goal_gamma:,})"
raise InvalidArgumentException("goal", goal_str)
async def _get_user_progress(
user: Optional[BlossomUser],
after_time: Optional[datetime],
before_time: Optional[datetime],
blossom_api: BlossomAPI,
) -> int:
"""Get the number of transcriptions made in the given time frame."""
from_str = after_time.isoformat() if after_time else None
until_str = before_time.isoformat() if before_time else None
# We ask for submission completed by the user in the time frame
# The response will contain a count, so we just need 1 result
progress_response = blossom_api.get(
"submission/",
params={
"completed_by": get_user_id(user),
"complete_time__gte": from_str,
"complete_time__lte": until_str,
"page_size": 1,
},
)
if progress_response.status_code != 200:
raise BlossomException(progress_response)
return progress_response.json()["count"]
async def _get_progress_description(
user: Optional[BlossomUser],
user_gamma: int,
goal_gamma: int,
goal_str: str,
start: datetime,
after_time: datetime,
before_time: Optional[datetime],
blossom_api: BlossomAPI,
) -> str:
"""Get the description for the user's prediction to reach the goal."""
user_progress = await _get_user_progress(user, after_time, before_time, blossom_api)
time_frame = (before_time or start) - after_time
if user_gamma >= goal_gamma:
# The user has already reached the goal
return i18n["until"]["embed_description_reached"].format(
time_frame=get_timedelta_str(time_frame),
user=get_username(user),
user_gamma=user_gamma,
goal=goal_str,
user_progress=user_progress,
)
elif user_progress == 0:
return i18n["until"]["embed_description_zero"].format(
time_frame=get_timedelta_str(time_frame),
user=get_username(user),
user_gamma=user_gamma,
goal=goal_str,
)
else:
# Based on the progress in the timeframe, calculate the time needed
gamma_needed = goal_gamma - user_gamma
relative_time = timedelta(
seconds=gamma_needed * (time_frame.total_seconds() / user_progress)
)
absolute_time = start + relative_time
return i18n["until"]["embed_description_prediction"].format(
time_frame=get_timedelta_str(time_frame),
user=get_username(user),
user_gamma=user_gamma,
goal=goal_str,
user_progress=user_progress,
relative_time=get_timedelta_str(relative_time),
absolute_time=get_discord_time_str(absolute_time),
)
class History(Cog):
def __init__(self, bot: ButtercupBot, blossom_api: BlossomAPI) -> None:
"""Initialize the History cog."""
self.bot = bot
self.blossom_api = blossom_api
def get_all_rate_data(
self,
user: Optional[BlossomUser],
time_frame: str,
after_time: Optional[datetime],
before_time: Optional[datetime],
utc_offset: int,
) -> pd.DataFrame:
"""Get all rate data for the given user."""
page_size = 500
rate_data = pd.DataFrame(columns=["date", "count"]).set_index("date")
page = 1
# Placeholder until we get the real value from the response
next_page = "1"
from_str = after_time.isoformat() if after_time else None
until_str = before_time.isoformat() if before_time else None
while next_page is not None:
response = self.blossom_api.get(
"submission/rate",
params={
"completed_by": get_user_id(user),
"page": page,
"page_size": page_size,
"time_frame": time_frame,
"complete_time__gte": from_str,
"complete_time__lte": until_str,
"utc_offset": utc_offset,
},
)
if response.status_code != 200:
raise BlossomException(response)
new_data = response.json()["results"]
next_page = response.json()["next"]
new_frame = pd.DataFrame.from_records(new_data)
# Convert date strings to datetime objects
new_frame["date"] = new_frame["date"].apply(lambda x: parser.parse(x))
# Add the data to the list
rate_data = rate_data.append(new_frame.set_index("date"))
# Continue with the next page
page += 1
# Add the missing zero entries
rate_data = add_zero_rates(rate_data, time_frame, after_time, before_time)
return rate_data
def calculate_history_offset(
self,
user: Optional[BlossomUser],
rate_data: pd.DataFrame,
after_time: Optional[datetime],
before_time: Optional[datetime],
) -> int:
"""Calculate the gamma offset for the history graph.
Note: We always need to do this, because it might be the case that some
transcriptions don't have a date set.
"""
gamma = get_user_gamma(user, self.blossom_api)
if before_time is not None:
# We need to get the offset from the API
offset_response = self.blossom_api.get(
"submission/",
params={
"completed_by__isnull": False,
"completed_by": get_user_id(user),
"complete_time__gte": before_time.isoformat(),
"page_size": 1,
},
)
if not offset_response.ok:
raise BlossomException(offset_response)
# We still need to calculate based on the total gamma
# It may be the case that not all transcriptions have a date set
# Then they are not included in the data nor in the API response
return gamma - rate_data.sum() - offset_response.json()["count"]
else:
# We can calculate the offset from the given data
return gamma - rate_data.sum()
def get_user_history(
self,
user: Optional[BlossomUser],
after_time: Optional[datetime],
before_time: Optional[datetime],
utc_offset: int,
) -> pd.DataFrame:
"""Get a data frame representing the history of the user.
:returns: The history data of the user.
"""
# Get all rate data
time_frame = get_data_granularity(user, after_time, before_time)
rate_data = self.get_all_rate_data(
user, time_frame, after_time, before_time, utc_offset
)
# Calculate the offset for all data points
offset = self.calculate_history_offset(user, rate_data, after_time, before_time)
# Aggregate the gamma score
history_data = get_history_data_from_rate_data(rate_data, offset)
return history_data
@cog_ext.cog_slash(
name="history",
description="Display the history graph.",
options=[
create_option(
name="users",
description="The users to display the history graph for (max 5)."
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="The start date for the history data.",
option_type=3,
required=False,
),
create_option(
name="before",
description="The end date for the history data.",
option_type=3,
required=False,
),
],
)
async def history(
self,
ctx: SlashContext,
usernames: str = "me",
after: Optional[str] = None,
before: Optional[str] = None,
) -> None:
"""Get the transcription history of the user."""
start = datetime.now()
after_time, before_time, time_str = parse_time_constraints(after, before)
utc_offset = extract_utc_offset(ctx.author.display_name)
# Give a quick response to let the user know we're working on it
# We'll later edit this message with the actual content
msg = await ctx.send(
i18n["history"]["getting_history"].format(
users=get_initial_username_list(usernames, ctx), time_str=time_str,
)
)
users = get_user_list(usernames, ctx, self.blossom_api)
if users:
users.sort(key=lambda u: u["gamma"], reverse=True)
colors = get_user_colors(users)
min_gammas = []
max_gammas = []
fig: plt.Figure = plt.figure()
ax: plt.Axes = fig.gca()
fig.subplots_adjust(bottom=0.2)
ax.set_xlabel(
i18n["history"]["plot_xlabel"].format(
timezone=utc_offset_to_str(utc_offset)
)
)
ax.set_ylabel(i18n["history"]["plot_ylabel"])
for label in ax.get_xticklabels():
label.set_rotation(32)
label.set_ha("right")
ax.set_title(
i18n["history"]["plot_title"].format(
users=get_usernames(users, 2, escape=False)
)
)
for index, user in enumerate(users or [None]):
if users and len(users) > 1:
await msg.edit(
content=i18n["history"]["getting_history_progress"].format(
users=get_usernames(users),
time_str=time_str,
count=index + 1,
total=len(users),
)
)
history_data = self.get_user_history(
user, after_time, before_time, utc_offset
)
color = colors[index]
first_point = history_data.iloc[0]
last_point = history_data.iloc[-1]
min_gammas.append(first_point.at["gamma"])
max_gammas.append(last_point.at["gamma"])
# Plot the graph
ax.plot(
"date", "gamma", data=history_data.reset_index(), color=color,
)
# At a point for the last value
ax.scatter(
last_point.name, last_point.at["gamma"], color=color, s=4,
)
# Label the last value
ax.annotate(
int(last_point.at["gamma"]),
xy=(last_point.name, last_point.at["gamma"]),
color=color,
)
if users:
# Show milestone lines
min_value, max_value = min(min_gammas), max(max_gammas)
delta = (max_value - min_value) * 0.4
ax = add_milestone_lines(ax, ranks, min_value, max_value, delta)
if users and len(users) > 1:
ax.legend([get_username(user, escape=False) for user in users])
discord_file = create_file_from_figure(fig, "history_plot.png")
await msg.edit(
content=i18n["history"]["response_message"].format(
users=get_usernames(users),
time_str=time_str,
duration=get_duration_str(start),
),
file=discord_file,
)
@cog_ext.cog_slash(
name="rate",
description="Display the rate graph.",
options=[
create_option(
name="users",
description="The users to display the rate graph for (max 5)."
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="The start date for the rate data.",
option_type=3,
required=False,
),
create_option(
name="before",
description="The end date for the rate data.",
option_type=3,
required=False,
),
],
)
async def rate(
self,
ctx: SlashContext,
usernames: str = "me",
after: Optional[str] = None,
before: Optional[str] = None,
) -> None:
"""Get the transcription rate of the user."""
start = datetime.now()
after_time, before_time, time_str = parse_time_constraints(after, before)
utc_offset = extract_utc_offset(ctx.author.display_name)
# Give a quick response to let the user know we're working on it
# We'll later edit this message with the actual content
msg = await ctx.send(
i18n["rate"]["getting_rate"].format(
users=get_initial_username_list(usernames, ctx), time_str=time_str,
)
)
users = get_user_list(usernames, ctx, self.blossom_api)
if users:
users.sort(key=lambda u: u["gamma"], reverse=True)
colors = get_user_colors(users)
max_rates = []
fig: plt.Figure = plt.figure()
ax: plt.Axes = fig.gca()
fig.subplots_adjust(bottom=0.2)
ax.set_xlabel(
i18n["rate"]["plot_xlabel"].format(timezone=utc_offset_to_str(utc_offset))
)
ax.set_ylabel(i18n["rate"]["plot_ylabel"])
for label in ax.get_xticklabels():
label.set_rotation(32)
label.set_ha("right")
ax.set_title(
i18n["rate"]["plot_title"].format(
users=get_usernames(users, 2, escape=False)
)
)
for index, user in enumerate(users or [None]):
if users and len(users) > 1:
await msg.edit(
content=i18n["rate"]["getting_rate"].format(
users=get_usernames(users),
count=index + 1,
total=len(users),
time_str=time_str,
)
)
user_data = self.get_all_rate_data(
user, "day", after_time, before_time, utc_offset
)
max_rate = user_data["count"].max()
max_rates.append(max_rate)
max_rate_point = user_data[user_data["count"] == max_rate].iloc[0]
color = colors[index]
# Plot the graph
ax.plot(
"date", "count", data=user_data.reset_index(), color=color,
)
# At a point for the max value
ax.scatter(
max_rate_point.name, max_rate_point.at["count"], color=color, s=4,
)
# Label the max value
ax.annotate(
int(max_rate_point.at["count"]),
xy=(max_rate_point.name, max_rate_point.at["count"]),
color=color,
)
if users:
# A milestone at every 100 rate
milestones = [
dict(threshold=i * 100, color=ranks[i + 2]["color"])
for i in range(1, 8)
]
ax = add_milestone_lines(ax, milestones, 0, max(max_rates), 40)
if users and len(users) > 1:
ax.legend([get_username(user, escape=False) for user in users])
discord_file = create_file_from_figure(fig, "rate_plot.png")
await msg.edit(
content=i18n["rate"]["response_message"].format(
usernames=get_usernames(users),
time_str=time_str,
duration=get_duration_str(start),
),
file=discord_file,
)
async def _until_user_catch_up(
self,
ctx: SlashContext,
msg: SlashMessage,
user: BlossomUser,
target_username: str,
start: datetime,
after_time: datetime,
before_time: Optional[datetime],
time_str: str,
) -> None:
"""Determine how long it will take the user to catch up with the target user."""
# Try to find the target user
try:
target = get_user(target_username, ctx, self.blossom_api)
except UserNotFound:
# This doesn't mean the username is wrong
# They could have also mistyped a rank
# So we change the error message to something else
raise InvalidArgumentException("goal", target_username)
if not target:
# Having the combined server as target doesn't make sense
# Because it includes the current user, they could never reach it
raise InvalidArgumentException("goal", target_username)
if user["gamma"] > target["gamma"]:
# Swap user and target, the target has to have more gamma
# Otherwise the goal would have already been reached
user, target = target, user
user_progress = await _get_user_progress(
user, after_time, before_time, blossom_api=self.blossom_api
)
target_progress = await _get_user_progress(
target, after_time, before_time, blossom_api=self.blossom_api
)
time_frame = (before_time or start) - after_time
if user_progress <= target_progress:
description = i18n["until"]["embed_description_user_never"].format(
user=get_username(user),
user_gamma=user["gamma"],
user_progress=user_progress,
target=get_username(target),
target_gamma=target["gamma"],
target_progress=target_progress,
time_frame=get_timedelta_str(time_frame),
)
else:
# Calculate time needed
seconds_needed = (target["gamma"] - user["gamma"]) / (
(user_progress - target_progress) / time_frame.total_seconds()
)
relative_time = timedelta(seconds=seconds_needed)
absolute_time = start + relative_time
intersection_gamma = user["gamma"] + math.ceil(
(user_progress / time_frame.total_seconds())
* relative_time.total_seconds()
)
description = i18n["until"]["embed_description_user_prediction"].format(
user=get_username(user),
user_gamma=user["gamma"],
user_progress=user_progress,
target=get_username(target),
target_gamma=target["gamma"],
target_progress=target_progress,
intersection_gamma=intersection_gamma,
time_frame=get_timedelta_str(time_frame),
relative_time=get_timedelta_str(relative_time),
absolute_time=get_discord_time_str(absolute_time),
)
color = get_rank(target["gamma"])["color"]
await msg.edit(
content=i18n["until"]["embed_message"].format(
user=get_username(user),
goal=get_username(target),
time_str=time_str,
duration=get_duration_str(start),
),
embed=Embed(
title=i18n["until"]["embed_title"].format(user=get_username(user)),
description=description,
color=discord.Colour.from_rgb(*get_rgb_from_hex(color)),
),
)
@cog_ext.cog_slash(
name="until",
description="Determines the time required to reach the next milestone.",
options=[
create_option(
name="goal",
description="The gamma, flair rank or user to reach. "
"Defaults to the next rank.",
option_type=3,
required=False,
),
create_option(
name="username",
description="The user to make the prediction for. "
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="The start date for the prediction data.",
option_type=3,
required=False,
),
create_option(
name="before",
description="The end date for the prediction data.",
option_type=3,
required=False,
),
],
)
async def _until(
self,
ctx: SlashContext,
goal: Optional[str] = None,
username: str = "me",
after: str = "1 week",
before: Optional[str] = None,
) -> None:
"""Determine how long it will take the user to reach the given goal."""
start = datetime.now(tz=pytz.utc)
after_time, before_time, time_str = parse_time_constraints(after, before)
if not after_time:
# We need a starting point for the calculations
raise InvalidArgumentException("after", after)
# Send a first message to show that the bot is responsive.
# We will edit this message later with the actual content.
msg = await ctx.send(
i18n["until"]["getting_prediction"].format(
user=get_initial_username(username, ctx), time_str=time_str,
)
)
user = get_user(username, ctx, self.blossom_api)
if goal is not None:
try:
# Check if the goal is a gamma value or rank name
goal_gamma, goal_str = parse_goal_str(goal)
except InvalidArgumentException:
# The goal could be a username
if not user:
# If the user is the combined server, a target user doesn't make sense
raise InvalidArgumentException("goal", goal)
# Try to treat the goal as a user
return await self._until_user_catch_up(
ctx, msg, user, goal, start, after_time, before_time, time_str,
)
elif user:
# Take the next rank for the user
next_rank = get_next_rank(user["gamma"])
if next_rank:
goal_gamma, goal_str = parse_goal_str(next_rank["name"])
else:
# If the user has reached the maximum rank, take the next 10,000 tier
goal_gamma = ((user["gamma"] + 10_000) // 10_000) * 10_000
goal_str = f"{goal_gamma:,}"
else:
# You can't get the "next rank" of the whole server
raise InvalidArgumentException("goal", "<empty>")
user_gamma = get_user_gamma(user, self.blossom_api)
await msg.edit(
content=i18n["until"]["getting_prediction_to_goal"].format(
user=get_username(user), goal=goal_str, time_str=time_str,
)
)
description = await _get_progress_description(
user,
user_gamma,
goal_gamma,
goal_str,
start,
after_time,
before_time,
blossom_api=self.blossom_api,
)
# Determine the color of the target rank
color = get_rank(goal_gamma)["color"]
await msg.edit(
content=i18n["until"]["embed_message"].format(
user=get_username(user),
goal=goal_str,
time_str=time_str,
duration=get_duration_str(start),
),
embed=Embed(
title=i18n["until"]["embed_title"].format(user=get_username(user)),
description=description,
color=discord.Colour.from_rgb(*get_rgb_from_hex(color)),
),
)
def setup(bot: ButtercupBot) -> None:
"""Set up the History cog."""
# Initialize blossom api
cog_config = bot.config["Blossom"]
email = cog_config.get("email")
password = cog_config.get("password")
api_key = cog_config.get("api_key")
blossom_api = BlossomAPI(email=email, password=password, api_key=api_key)
bot.add_cog(History(bot=bot, blossom_api=blossom_api))
def teardown(bot: ButtercupBot) -> None:
"""Unload the History cog."""
bot.remove_cog("History")
|
nilq/baby-python
|
python
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from troveclient import base
from troveclient.v1 import instances
"""
Unit tests for instances.py
"""
class InstanceTest(testtools.TestCase):
def setUp(self):
super(InstanceTest, self).setUp()
self.orig__init = instances.Instance.__init__
instances.Instance.__init__ = mock.Mock(return_value=None)
self.instance = instances.Instance()
self.instance.manager = mock.Mock()
def tearDown(self):
super(InstanceTest, self).tearDown()
instances.Instance.__init__ = self.orig__init
def test___repr__(self):
self.instance.name = "instance-1"
self.assertEqual('<Instance: instance-1>', self.instance.__repr__())
def test_list_databases(self):
db_list = ['database1', 'database2']
self.instance.manager.databases = mock.Mock()
self.instance.manager.databases.list = mock.Mock(return_value=db_list)
self.assertEqual(db_list, self.instance.list_databases())
def test_delete(self):
db_delete_mock = mock.Mock(return_value=None)
self.instance.manager.delete = db_delete_mock
self.instance.delete()
self.assertEqual(1, db_delete_mock.call_count)
def test_restart(self):
db_restart_mock = mock.Mock(return_value=None)
self.instance.manager.restart = db_restart_mock
self.instance.id = 1
self.instance.restart()
self.assertEqual(1, db_restart_mock.call_count)
def test_detach_replica(self):
db_detach_mock = mock.Mock(return_value=None)
self.instance.manager.edit = db_detach_mock
self.instance.id = 1
self.instance.detach_replica()
self.assertEqual(1, db_detach_mock.call_count)
class InstancesTest(testtools.TestCase):
def setUp(self):
super(InstancesTest, self).setUp()
self.orig__init = instances.Instances.__init__
instances.Instances.__init__ = mock.Mock(return_value=None)
self.instances = instances.Instances()
self.instances.api = mock.Mock()
self.instances.api.client = mock.Mock()
self.instances.resource_class = mock.Mock(return_value="instance-1")
self.instance_with_id = mock.Mock()
self.instance_with_id.id = 215
def tearDown(self):
super(InstancesTest, self).tearDown()
instances.Instances.__init__ = self.orig__init
@mock.patch('warnings.warn')
def test_create(self, mock_warn):
def side_effect_func(path, body, inst):
return path, body, inst
self.instances._create = mock.Mock(side_effect=side_effect_func)
nics = [{'net-id': '000'}]
p, b, i = self.instances.create("test-name", 103, "test-volume",
['db1', 'db2'], ['u1', 'u2'],
datastore="datastore",
datastore_version="datastore-version",
nics=nics, slave_of='test',
replica_count=4,
modules=['mod_id'],
locality='affinity')
self.assertEqual("/instances", p)
self.assertEqual("instance", i)
self.assertEqual(['db1', 'db2'], b["instance"]["databases"])
self.assertEqual(['u1', 'u2'], b["instance"]["users"])
self.assertEqual("test-name", b["instance"]["name"])
self.assertEqual("test-volume", b["instance"]["volume"])
self.assertEqual("datastore", b["instance"]["datastore"]["type"])
self.assertEqual("datastore-version",
b["instance"]["datastore"]["version"])
self.assertEqual(nics, b["instance"]["nics"])
self.assertEqual(103, b["instance"]["flavorRef"])
self.assertEqual(4, b["instance"]["replica_count"])
self.assertEqual('affinity', b["instance"]["locality"])
# Assert that slave_of is not used and if specified, there is a warning
# and it's value is used for replica_of.
self.assertEqual('test', b['instance']['replica_of'])
self.assertNotIn('slave_of', b['instance'])
self.assertTrue(mock_warn.called)
self.assertEqual([{'id': 'mod_id'}], b["instance"]["modules"])
def test_list(self):
page_mock = mock.Mock()
self.instances._paginated = page_mock
limit = "test-limit"
marker = "test-marker"
include_clustered = {'include_clustered': False}
self.instances.list(limit, marker)
page_mock.assert_called_with("/instances", "instances", limit, marker,
include_clustered)
def test_get(self):
def side_effect_func(path, inst):
return path, inst
self.instances._get = mock.Mock(side_effect=side_effect_func)
self.assertEqual(('/instances/instance1', 'instance'),
self.instances.get('instance1'))
def test_delete(self):
resp = mock.Mock()
resp.status_code = 200
body = None
self.instances.api.client.delete = mock.Mock(return_value=(resp, body))
self.instances.delete('instance1')
self.instances.delete(self.instance_with_id)
resp.status_code = 500
self.assertRaises(Exception, self.instances.delete, 'instance1')
def test__action(self):
body = mock.Mock()
resp = mock.Mock()
resp.status_code = 200
self.instances.api.client.post = mock.Mock(return_value=(resp, body))
self.assertEqual('instance-1', self.instances._action(1, body))
self.instances.api.client.post = mock.Mock(return_value=(resp, None))
self.assertIsNone(self.instances._action(1, body))
def _set_action_mock(self):
def side_effect_func(instance, body):
self._instance_id = base.getid(instance)
self._body = body
self._instance_id = None
self._body = None
self.instances._action = mock.Mock(side_effect=side_effect_func)
def _test_resize_volume(self, instance, id):
self._set_action_mock()
self.instances.resize_volume(instance, 1024)
self.assertEqual(id, self._instance_id)
self.assertEqual({"resize": {"volume": {"size": 1024}}}, self._body)
def test_resize_volume_with_id(self):
self._test_resize_volume(152, 152)
def test_resize_volume_with_obj(self):
self._test_resize_volume(self.instance_with_id,
self.instance_with_id.id)
def _test_resize_instance(self, instance, id):
self._set_action_mock()
self.instances.resize_instance(instance, 103)
self.assertEqual(id, self._instance_id)
self.assertEqual({"resize": {"flavorRef": 103}}, self._body)
def test_resize_instance_with_id(self):
self._test_resize_instance(4725, 4725)
def test_resize_instance_with_obj(self):
self._test_resize_instance(self.instance_with_id,
self.instance_with_id.id)
def _test_restart(self, instance, id):
self._set_action_mock()
self.instances.restart(instance)
self.assertEqual(id, self._instance_id)
self.assertEqual({'restart': {}}, self._body)
def test_restart_with_id(self):
self._test_restart(253, 253)
def test_restart_with_obj(self):
self._test_restart(self.instance_with_id, self.instance_with_id.id)
def test_modify(self):
resp = mock.Mock()
resp.status_code = 200
body = None
self.instances.api.client.put = mock.Mock(return_value=(resp, body))
self.instances.modify(123)
self.instances.modify(123, 321)
self.instances.modify(self.instance_with_id)
self.instances.modify(self.instance_with_id, 123)
resp.status_code = 500
self.assertRaises(Exception, self.instances.modify, 'instance1')
def test_edit(self):
resp = mock.Mock()
resp.status_code = 204
def fake_patch(url, body):
# Make sure we never pass slave_of to the API.
self.assertIn('instance', body)
self.assertNotIn('slave_of', body['instance'])
return resp, None
self.instances.api.client.patch = mock.Mock(side_effect=fake_patch)
self.instances.edit(123)
self.instances.edit(123, 321)
self.instances.edit(123, 321, 'name-1234')
self.instances.edit(123, 321, 'name-1234', True)
self.instances.edit(self.instance_with_id)
self.instances.edit(self.instance_with_id, 123)
self.instances.edit(self.instance_with_id, 123, 'name-1234')
self.instances.edit(self.instance_with_id, 123, 'name-1234', True)
resp.status_code = 500
self.assertRaises(Exception, self.instances.edit, 'instance1')
def test_upgrade(self):
resp = mock.Mock()
resp.status_code = 200
body = None
self.instances.api.client.patch = mock.Mock(return_value=(resp, body))
self.instances.upgrade(self.instance_with_id, "5.6")
resp.status_code = 500
self.assertRaises(Exception, self.instances.upgrade,
'instance1')
def test_configuration(self):
def side_effect_func(path, inst):
return path, inst
self.instances._get = mock.Mock(side_effect=side_effect_func)
self.assertEqual(('/instances/instance1/configuration', 'instance'),
self.instances.configuration('instance1'))
class InstanceStatusTest(testtools.TestCase):
def test_constants(self):
self.assertEqual("ACTIVE", instances.InstanceStatus.ACTIVE)
self.assertEqual("BLOCKED", instances.InstanceStatus.BLOCKED)
self.assertEqual("BUILD", instances.InstanceStatus.BUILD)
self.assertEqual("FAILED", instances.InstanceStatus.FAILED)
self.assertEqual("REBOOT", instances.InstanceStatus.REBOOT)
self.assertEqual("RESIZE", instances.InstanceStatus.RESIZE)
self.assertEqual("SHUTDOWN", instances.InstanceStatus.SHUTDOWN)
self.assertEqual("RESTART_REQUIRED",
instances.InstanceStatus.RESTART_REQUIRED)
|
nilq/baby-python
|
python
|
import types
from . import base_objs as baseInitObjs
import plato_fit_integrals.core.workflow_coordinator as wflowCoord
class SurfaceEnergiesWorkFlow(wflowCoord.WorkFlowBase):
def __init__(self, surfaceObj, bulkObj):
self.surfObj = surfaceObj
self.bulkObj = bulkObj
self._ensureWorkFoldersAreTheSame()
self._createFilesOnInit()
self.output = types.SimpleNamespace()
def _ensureWorkFoldersAreTheSame(self):
if self.surfObj.workFolder != self.bulkObj.workFolder:
raise ValueError("surface workFolder must be the same as bulk workFolder.\nSurface path = {}\nBulk path = {}".format(self.surfObj.workFolder, self.bulkObj.workFolder))
@property
def preRunShellComms(self):
runList = list()
runList.extend( self.surfObj.runComm )
runList.extend( self.bulkObj.runComm )
runList = [x for x in runList if x is not None]
return runList
def _createFilesOnInit(self):
self.surfObj.writeFiles()
self.bulkObj.writeFiles()
def run(self):
ePerAtomBulk = self.bulkObj.ePerAtom
ePerAtomSurf = self.surfObj.ePerAtom
surfArea = self.surfObj.surfaceArea
nSurfAtoms = self.surfObj.nAtoms
surfEnergy = ( nSurfAtoms/(2*surfArea) ) * (ePerAtomSurf - ePerAtomBulk)
self.output.surfaceEnergy = surfEnergy
#TODO: I want both surfaceObj and bulkObj to have runComm, writeFile() and parseFile methods. The writeFile should use a variable on the object that
# lets the base folder be set to workFolder. The factory can handle the adapter needed for whatever the easiest to pass input object is
class SurfaceRunnerBase(baseInitObjs.PointDefectRunnerBase):
@property
def surfaceArea(self):
raise NotImplementedError()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
celery cli services module.
"""
from pyrin.application.services import get_component
from pyrin.task_queues.celery.cli import CeleryCLIPackage
def register_cli_handler(instance, **options):
"""
registers a new celery cli handler or replaces the existing one.
if `replace=True` is provided. otherwise, it raises an error
on adding a cli handler which is already registered.
:param CeleryCLIHandlerBase instance: celery cli handler to be registered.
it must be an instance of
CeleryCLIHandlerBase.
:keyword bool replace: specifies that if there is another registered
cli handler with the same name, replace it
with the new one, otherwise raise an error.
defaults to False.
:raises InvalidCLIHandlerTypeError: invalid cli handler type error.
:raises DuplicatedCLIHandlerError: duplicated cli handler error.
"""
get_component(CeleryCLIPackage.COMPONENT_NAME).register_cli_handler(instance, **options)
def execute(handler_name, **options):
"""
executes the handler with the given name with given inputs.
:param str handler_name: handler name to be executed.
:raises CLIHandlerNotFoundError: cli handler not found error.
"""
return get_component(CeleryCLIPackage.COMPONENT_NAME).execute(handler_name, **options)
def get_package_class():
"""
gets the package class of celery cli manager.
:raises PackageClassIsNotSetError: package class is not set error.
:returns: type[CeleryCLIPackage]
"""
return get_component(CeleryCLIPackage.COMPONENT_NAME).get_package_class()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Create a tabbed widget for all plot subwidgets in the list ``fb.plot_widgets_list``.
This list is compiled at startup in :class:`pyfda.tree_builder.Tree_Builder`, it is
kept as a module variable in :mod:`pyfda.filterbroker`.
"""
import logging
logger = logging.getLogger(__name__)
import importlib
from pyfda.libs.compat import QTabWidget, QVBoxLayout, QEvent, QtCore, pyqtSignal
from pyfda.libs.pyfda_lib import pprint_log
from pyfda.pyfda_rc import params
import pyfda.filterbroker as fb
#------------------------------------------------------------------------------
class PlotTabWidgets(QTabWidget):
# incoming, connected to input_tab_widget.sig_tx in pyfdax
sig_rx = pyqtSignal(object)
# outgoing: emitted by process_sig_rx
sig_tx = pyqtSignal(object)
def __init__(self, parent):
super(PlotTabWidgets, self).__init__(parent)
self._construct_UI()
#---------------------------------------------- --------------------------------
def _construct_UI(self):
"""
Initialize UI with tabbed subwidgets: Instantiate dynamically each widget
from the dict `fb.plot_classes` and try to
- set the TabToolTip from the instance attribute `tool_tip`
- set the tab label from the instance attribute `tab_label`
for each widget.
- connect the available signals of all subwidgets (not all widgets have
both `sig_rx` and `sig_tx` signals).
- `self.sig_rx` is distributed to all `inst.sig_rx` signals
- all `inst.sig_tx` signals are collected in `self.sig_tx`
- `self.sig_tx.connect(self.sig_rx)` distributes incoming signals (via
pyfdax or coming from the input widgets) among all input widgets.
In order to prevent infinite loops, every widget needs to block in-
coming signals with its own name!
"""
tabWidget = QTabWidget(self)
tabWidget.setObjectName("plot_tabs")
n_wdg = 0 # number and ...
inst_wdg_str = "" # ... full names of successfully instantiated plot widgets
#
for plot_class in fb.plot_classes:
try:
mod_fq_name = fb.plot_classes[plot_class]['mod'] # fully qualified module name
mod = importlib.import_module(mod_fq_name)
wdg_class = getattr(mod, plot_class)
# and instantiate it
inst = wdg_class(self)
except ImportError as e:
logger.warning('Class "{0}" could not be imported from {1}:\n{2}.'\
.format(plot_class, mod_fq_name, e))
continue # unsuccessful, try next widget
if hasattr(inst, 'tab_label'):
tabWidget.addTab(inst, inst.tab_label)
else:
tabWidget.addTab(inst, "not set")
if hasattr(inst, 'tool_tip'):
tabWidget.setTabToolTip(n_wdg, inst.tool_tip)
if hasattr(inst, 'sig_tx'):
inst.sig_tx.connect(self.sig_tx)
if hasattr(inst, 'sig_rx'):
self.sig_rx.connect(inst.sig_rx)
n_wdg += 1 # successfully instantiated one more widget
inst_wdg_str += '\t' + mod_fq_name + "." + plot_class + '\n'
if len(inst_wdg_str) == 0:
logger.warning("No plotting widgets found!")
else:
logger.debug("Imported {0:d} plotting classes:\n{1}".format(n_wdg, inst_wdg_str))
#----------------------------------------------------------------------
layVMain = QVBoxLayout()
layVMain.addWidget(tabWidget)
layVMain.setContentsMargins(*params['wdg_margins'])#(left, top, right, bottom)
self.setLayout(layVMain)
#----------------------------------------------------------------------
# GLOBAL SIGNALS & SLOTs
#----------------------------------------------------------------------
# self.sig_rx.connect(inst.sig_rx) # this happens in _construct_UI()
#----------------------------------------------------------------------
# LOCAL SIGNALS & SLOTs
#----------------------------------------------------------------------
self.timer_id = QtCore.QTimer()
self.timer_id.setSingleShot(True)
# redraw current widget at timeout (timer was triggered by resize event):
self.timer_id.timeout.connect(self.current_tab_redraw)
self.sig_tx.connect(self.sig_rx) # loop back to local inputs
# self.sig_rx.connect(self.log_rx) # enable for debugging
# When user has selected a different tab, trigger a redraw of current tab
tabWidget.currentChanged.connect(self.current_tab_changed)
# The following does not work: maybe current scope must be left?
# tabWidget.currentChanged.connect(tabWidget.currentWidget().redraw)
tabWidget.installEventFilter(self)
"""
https://stackoverflow.com/questions/29128936/qtabwidget-size-depending-on-current-tab
The QTabWidget won't select the biggest widget's height as its own height
unless you use layout on the QTabWidget. Therefore, if you want to change
the size of QTabWidget manually, remove the layout and call QTabWidget::resize
according to the currentChanged signal.
You can set the size policy of the widget that is displayed to QSizePolicy::Preferred
and the other ones to QSizePolicy::Ignored. After that call adjustSize to update the sizes.
void MainWindow::updateSizes(int index)
{
for(int i=0;i<ui->tabWidget->count();i++)
if(i!=index)
ui->tabWidget->widget(i)->setSizePolicy(QSizePolicy::Ignored, QSizePolicy::Ignored);
ui->tabWidget->widget(index)->setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Preferred);
ui->tabWidget->widget(index)->resize(ui->tabWidget->widget(index)->minimumSizeHint());
ui->tabWidget->widget(index)->adjustSize();
resize(minimumSizeHint());
adjustSize();
}
adjustSize(): The last two lines resize the main window itself. You might want to avoid it,
depending on your application. For example, if you set the rest of the widgets
to expand into the space just made available, it's not so nice if the window
resizes itself instead.
"""
#------------------------------------------------------------------------------
def log_rx(self, dict_sig=None):
"""
Enable `self.sig_rx.connect(self.log_rx)` above for debugging.
"""
if type(dict_sig) == dict:
logger.warning("SIG_RX\n{0}"\
.format(pprint_log(dict_sig)))
else:
logger.warning("empty dict")
#------------------------------------------------------------------------------
def current_tab_changed(self):
self.sig_tx.emit({'sender':__name__, 'ui_changed':'tab'})
#------------------------------------------------------------------------------
def current_tab_redraw(self):
self.sig_tx.emit({'sender':__name__, 'ui_changed':'resized'})
#------------------------------------------------------------------------------
def eventFilter(self, source, event):
"""
Filter all events generated by the QTabWidget. Source and type of all
events generated by monitored objects are passed to this eventFilter,
evaluated and passed on to the next hierarchy level.
This filter stops and restarts a one-shot timer for every resize event.
When the timer generates a timeout after 500 ms, ``current_tab_redraw()`` is
called by the timer.
"""
if isinstance(source, QTabWidget):
if event.type() == QEvent.Resize:
self.timer_id.stop()
self.timer_id.start(500)
# Call base class method to continue normal event processing:
return super(PlotTabWidgets, self).eventFilter(source, event)
#------------------------------------------------------------------------
def main():
import sys
from pyfda import pyfda_rc as rc
from pyfda.libs.compat import QApplication
app = QApplication(sys.argv)
app.setStyleSheet(rc.qss_rc)
mainw = PlotTabWidgets(None)
mainw.resize(300,400)
app.setActiveWindow(mainw)
mainw.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
# test with: python -m pyfda.plot_widgets.plot_tab_widgets
|
nilq/baby-python
|
python
|
'''
* Capítulo 05: Pré-processamento
5.2 Histograma de Cores
> Equalização de histograma
'''
import cv2
from matplotlib import pyplot as grafico
imagemOriginal = cv2.imread("maquina.jpg", 0)
imagemEqualizada = cv2.equalizeHist(imagemOriginal)
cv2.imshow("Imagem Original", imagemOriginal)
cv2.imshow("Imagem Equalizada", imagemEqualizada)
grafico.hist(imagemOriginal.ravel(), 256, [0,256])
grafico.figure();
grafico.hist(imagemEqualizada.ravel(), 256, [0,256])
grafico.show()
# Função equalizeHist = Equalizaa histogramas de imagen
# O resultado da execução:
# Imagem original;
# Imagem com o histogramae qualizado;
# Gráficos referentes ao histograma de ambasas imagens.
|
nilq/baby-python
|
python
|
__author__ = 'lucabasa'
__version__ = '1.1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import VarianceThreshold
from sklearn.decomposition import PCA
pd.set_option('max_columns', 200)
import utility as ut
def train_svc(df_train, df_test, n_splits=25, pca=False):
train = df_train.copy()
test = df_test.copy()
oof = np.zeros(len(train))
preds = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i].copy()
test2 = test[test['wheezy-copper-turtle-magic']==i].copy()
idx1 = train2.index
idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
if pca:
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
#data2 = StandardScaler().fit_transform(PCA(n_components=40, random_state=51).fit_transform(data[cols]))
data2 = StandardScaler().fit_transform(PCA(svd_solver='full',n_components='mle').fit_transform(data[cols]))
train3 = data2[:train2.shape[0]]
test3 = data2[train2.shape[0]:]
else:
sel = VarianceThreshold(threshold=1.5).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=n_splits, random_state=15)
for train_index, test_index in skf.split(train3, train2['target']):
clf = Pipeline([('scaler', StandardScaler()),
('svn', SVC(probability=True,kernel='poly',degree=4,gamma='auto'))])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
ut.report_oof(df_train, oof)
return oof, preds
def main():
df_train = pd.read_csv('data/train.csv')
df_test = pd.read_csv('data/test.csv')
oof_svc, preds_svc = train_svc(df_train, df_test)
ut.plot_results(oof_svc, preds_svc, df_train, 'svc')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
Simulates the initial state discrimination experiment using different
methods, to compare the resulting error rates.
"""
import torch
from perm_hmm.util import num_to_data
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
from perm_hmm.classifiers.perm_classifier import PermClassifier
class HMMSimulator(object):
"""
Runs an experiment where data is generated by an HMM, then classified by
a classifier.
Instances of this class have the following attributes:
``phmm``:
The :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM` whose
misclassification rates will be computed.
"""
def __init__(self, phmm):
"""
Initializes the experiment.
:param perm_hmm.models.hmms.PermutedDiscreteHMM phmm:
the model whose
misclassification rate will be computed.
"""
self.phmm = phmm
""":py:class:`PermutedDiscreteHMM`
The model whose misclassification rates we wish to analyze.
"""
def all_classifications(self, num_steps, classifier=None, perm_policy=None, verbosity=0):
"""
Computes the data required to compute the exact misclassification rate for the given classifier.
This method always calls ``perm_policy.reset()`` if ``perm_policy`` is
not ``None``.
:param num_steps: Number of steps, int.
:param classifier: Defaults to
:py:class:`~perm_hmm.classifiers.perm_classifier.PermClassifier`,
initialized with the hmm ``self.phmm``.
:param perm_policy: Defaults to None. If specified, will call
``perm_policy.get_perms`` to compute the permutations.
:param verbosity: If ``verbosity == 0``, only the
:py:class:`~perm_hmm.postprocessing.ExactPostprocessor` needed to
compute the misclassification rates is returned.
If ``verbosity == 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys:
b"posterior_log_initial_state_dist":
The posterior log initial state distribution used to compute
the classifications.
b"perms":
Only present if ``perm_policy`` is not ``None``. The
permutations computed from ``perm_policy.get_perms()``.
If ``verbosity > 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys as in
the case with ``verbosity == 1`` and in addition,
b"history":
Whatever is stored in ``perm_policy.calc_history`` after
calling ``perm_policy.get_perms``.
Note that if ``verbosity > 1``, the simulator calls
``perm_policy.reset(save_history=True)`` before calling
``perm_policy.get_perms()``.
"""
base = len(self.phmm.observation_dist.enumerate_support())
data = torch.stack(
[num_to_data(num, num_steps, base) for num in range(base**num_steps)]
).float()
if verbosity > 1:
save_history = True
else:
save_history = False
if classifier is None:
classifier = PermClassifier(self.phmm)
if perm_policy is not None:
perm_policy.reset(save_history=save_history)
perms = perm_policy.get_perms(data)
if save_history:
history = perm_policy.calc_history
classi_result = classifier.classify(data, perms=perms, verbosity=verbosity)
else:
perms = None
classi_result = classifier.classify(data, verbosity=verbosity)
if verbosity:
classifications, classi_dict = classi_result
if perm_policy is not None:
classi_dict[b"perms"] = perms
if save_history:
classi_dict[b"history"] = history
else:
classifications = classi_result
lp = self.phmm.log_prob(data, perms)
dist = self.phmm.posterior_log_initial_state_dist(data, perms)
log_joint = dist.T + lp
ep = ExactPostprocessor(
log_joint,
classifications,
)
if verbosity:
return ep, classi_dict
return ep
def simulate(self, num_steps, num_samples, classifier=None, perm_policy=None, verbosity=0):
"""
Computes the data required to compute the misclassification rates
of the given classifier.
This method always calls ``perm_policy.reset()`` if ``perm_policy`` is
not ``None``.
:param num_steps: Number of steps, int.
:param num_samples: number of samples to draw from the hmm, int
:param classifier: Defaults to
:py:class:`~perm_hmm.classifiers.perm_classifier.PermClassifier`,
initialized with the hmm ``self.phmm``.
:param perm_policy: Defaults to None. If specified, will call
``self.hmm.sample(perm_policy=perm_policy)``.
:param verbosity: If ``verbosity == 0``, only the
:py:class:`~perm_hmm.postprocessing.EmpiricalPostprocessor` needed
to compute the misclassification rates is returned.
If ``verbosity == 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys:
b"posterior_log_initial_state_dist":
The posterior log initial state distribution used to compute
the classifications.
b"perms":
Only present if ``perm_policy`` is not ``None``. The
permutations computed from ``perm_policy.get_perms()``.
If ``verbosity > 1``, this method returns a tuple, with the
postprocessor as the first element, and a dictionary with keys as in
the case with ``verbosity == 1`` and in addition,
b"history":
Whatever is stored in ``perm_policy.calc_history`` after
calling ``perm_policy.get_perms``.
Note that if ``verbosity > 1``, the simulator calls
``perm_policy.reset(save_history=True)`` before calling
``perm_policy.get_perms()``.
"""
if verbosity > 1:
save_history = True
else:
save_history = False
if perm_policy is not None:
perm_policy.reset(save_history=save_history)
output = self.phmm.sample((num_samples, num_steps), perm_policy=perm_policy)
if perm_policy is not None:
perms = perm_policy.perm_history
else:
perms = None
history = None
if save_history:
if perm_policy is not None:
history = perm_policy.calc_history
data = output.observations
if classifier is None:
classifier = PermClassifier(self.phmm)
if perms is not None:
classi_result = classifier.classify(data, perms=perms, verbosity=verbosity)
else:
classi_result = classifier.classify(data, verbosity=verbosity)
if verbosity:
classifications, classi_dict = classi_result
classi_dict[b"data"] = data
if perm_policy is not None:
classi_dict[b"perms"] = perms
if history is not None:
classi_dict[b"history"] = history
else:
classifications = classi_result
ep = EmpiricalPostprocessor(
output.states[..., 0],
classifications,
)
if verbosity:
return ep, classi_dict
return ep
|
nilq/baby-python
|
python
|
import json
import pickle
import pandas as pd
from decimal import Decimal
from django.db.models import Avg
from recommender.models import Rating
from scripts.recommenders.base_recommender import BaseRecommender
class SVDRecommender(BaseRecommender):
def __init__(self, save_path='./models/SVD/model/'):
self.save_path = save_path
self.avg = Decimal(list(Rating.objects.all().aggregate(Avg('rating')).values())[0])
self.load_model(self.save_path)
def load_model(self, save_path):
with open(save_path + 'user_bias.data', 'rb') as file:
self.user_bias = pickle.load(file)
with open(save_path + 'item_bias.data', 'rb') as file:
self.item_bias = pickle.load(file)
with open(save_path + 'user_factors.json', 'r') as file:
self.user_factors = pd.DataFrame(json.load(file)).T
with open(save_path + 'item_factors.json', 'r') as file:
self.item_factors = pd.DataFrame(json.load(file)).T
def recommend_items(self, user_id, num=10):
users_items = Rating.objects.filter(user_id=user_id).order_by('-rating')[:100]
return self.recommend_items_by_ratings(user_id, users_items.values(), num)
def recommend_items_by_ratings(self, user_id, users_items, num=10):
rated_movies_dict = {movie['movie_id']: movie['rating'] for movie in users_items}
recs = {}
if str(user_id) in self.user_factors.columns:
user = self.user_factors[str(user_id)]
scores = self.item_factors.T.dot(user)
rating = scores.sort_values(ascending=False)[:num + len(rated_movies_dict)]
user_bias = 0
if user_id in self.user_bias.keys():
user_bias = self.user_bias[user_id]
elif int(user_id) in self.user_bias.keys():
user_bias = self.user_bias[int(user_id)]
rating += float(user_bias + self.avg)
recs = {r[0]: {'prediction': r[1] + float(self.item_bias[r[0]])} for r in zip(rating.index, rating) if r[0] not in rated_movies_dict}
sorted_items = sorted(recs.items(), key=lambda item: -float(item[1]['prediction']))[:num]
return sorted_items
def predict_score(self, user_id, item_id):
if str(user_id) in self.user_factors.columns:
user = self.user_factors[str(user_id)]
scores = self.item_factors.T.dot(user)
user_bias = 0
if user_id in self.user_bias.keys():
user_bias = self.user_bias[user_id]
elif int(user_id) in self.user_bias.keys():
user_bias = self.user_bias[int(user_id)]
rating = float(user_bias + self.avg)
try:
return Decimal(scores[item_id] + rating)
except:
return Decimal(rating)
return Decimal(0.0)
|
nilq/baby-python
|
python
|
import ast
from PythonVoiceCodingPlugin.library import nearest_node_from_offset,sorted_by_source_region,get_source_region,node_from_range,make_flat
from PythonVoiceCodingPlugin.library.info import *
from PythonVoiceCodingPlugin.library.LCA import LCA
from PythonVoiceCodingPlugin.library.level_info import LevelVisitor
from PythonVoiceCodingPlugin.library.partial import partially_parse, line_partial
from PythonVoiceCodingPlugin.library.traverse import search_upwards,search_upwards_log, find_matching,match_node, find_all_nodes,search_upwards_for_parent
from PythonVoiceCodingPlugin.queries.abstract import SelectionQuery
from PythonVoiceCodingPlugin.queries.tiebreak import tiebreak_on_lca
from PythonVoiceCodingPlugin.queries.strategies import adjective_strategy,decode_abstract_vertical,translate_adjective,obtain_result
class SelectBigRoi(SelectionQuery):
"""docstring for BigRoi"""
def handle_single(self,view_information,query_description,extra = {}):
f = query_description["format"]
possibilities = {
1: self.case_one,2: self.case_two,3: self.case_three,4: self.case_four,
}
return possibilities[f](view_information,query_description, extra)
def preliminary(self,view_information,query_description, extra = {}):
selection = self._get_selection(view_information,extra)
build = self.general_build
if not build or not build[0]:
return None,None,None,None
root,atok,m,r = build
selection = m.forward(selection)
origin = nearest_node_from_offset(root,atok, selection[0]) if selection[0]==selection[1] else node_from_range(root,atok, selection)
definition_node = search_upwards(origin,ast.FunctionDef) # ,aybe need to change that in the future
# in order to find the outermost function.
if definition_node and definition_node.first_token.startpos > selection[1]:
token = atok.get_token_from_offset(selection[0])
while token.string.isspace():
token = atok.prev_token( token )
s = token.startpos
origin = nearest_node_from_offset(root,atok, s)
definition_node = search_upwards(origin,ast.FunctionDef)
definition_node = (
definition_node
if definition_node and query_description["big_roi"] not in ["import statement"]
else root
)
return build, selection, origin, definition_node
def decode(self,query_description):
standard = lambda x:x
possibilities = {
"return value": ((ast.Return,ast.Yield,ast.YieldFrom),(),get_return_value),
"pass":(ast.Pass,(),standard),
"break":(ast.Break,(),standard),
"continue":(ast.Continue,(),standard),
"if condition":(ast.If,(),get_pure_if_condition),
"else if condition":(ast.If,(),get_elif_condition),
"while condition":(ast.While,(),get_condition),
"if expression":(ast.IfExp,(),standard),
"if expression condition":(ast.IfExp,(),get_condition),
"if expression body":(ast.IfExp,(),get_body),
"comprehension condition":(ast.comprehension,(),get_comprehension_condition),
"assertion message":(ast.Assert,(), get_message),
"assertion condition":(ast.Assert,(), get_condition),
"assignment left":((ast.Assign,ast.AugAssign),(),get_left),
"assignment right":((ast.Assign,ast.AugAssign),(),get_right),
"assignment full":((ast.Assign,ast.AugAssign),(),standard),
"expression statement":(ast.Expr,(),standard),
"iterable":((ast.For,ast.comprehension),(),get_iterable),
"iterator":((ast.For,ast.comprehension),(),get_iterator),
"import statement":((ast.Import,ast.ImportFrom),(),standard),
}
temporary = possibilities[query_description["big_roi"]]
if "big_roi_sub_index" in query_description:
if query_description["big_roi_sub_index"] == 0:
return possibilities[query_description["big_roi"]]
else:
index = query_description["big_roi_sub_index"]
def modified_information(x, information,index):
data = information(x)
return get_sub_index(data,index)
y = lambda x: temporary[2](x)
y.secondary = lambda x: modified_information(x,temporary[2],index-1)
return (temporary[0],temporary[1],y)
def case_one(self,view_information,query_description, extra = {}):
################################################################
# <big_roi>
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
information = getattr(information,"secondary",information)
candidates = tiebreak_on_lca(definition_node,origin,find_all_nodes(definition_node, targets, exclusions))
candidates = [information(x) for x in candidates if information(x)]
result, alternatives = obtain_result(None, candidates)
return self._backward_result(result, alternatives,build)
def case_two(self,view_information,query_description, extra = {}):
################################################################
# <adjective> <big_roi>
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
temporary_information = lambda x: information(x) if match_node(x,targets,exclusions) else None
additional_parameters = {}
root,atok,m,r = build
if selection[0]!=selection[1]:
additional_parameters["small_root"] = origin
additional_parameters["only_information"] = True
# just looking on the shape of this code you know there's a bug in here somewhere:)
result, alternatives = adjective_strategy(
atok=atok,
root = definition_node,
adjective_word = query_description["adjective"],
level_nodes = find_all_nodes(definition_node, (ast.If,ast.While,ast.For,ast.Try,ast.With,ast.FunctionDef)),
information_nodes = find_matching(definition_node,temporary_information),
**additional_parameters
)
information = getattr(information,"secondary",information)
result = information(result) if result else None
alternatives =[ information(x) for x in alternatives] if alternatives else []
return self._backward_result(result, alternatives,build)
def case_three(self,view_information,query_description, extra = {}):
################################################################
# <vertical_abstract_only_direction> [<ndir>] <big_roi> [<big_roi_sub_index>]
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
temporary_information = lambda x: information(x) if match_node(x,targets,exclusions) else None
root,atok,m,r = build
direction = query_description["vertical_abstract_only_direction"]
ndir = query_description["ndir"]
row, column = view_information["rowcol"](m.backward(selection)[0])
# bug fixing
test_result = decode_abstract_vertical(root,atok,targets,row+1, 1,direction,True,
temporary_information,want_alternatives = False)
l = search_upwards_log(origin,ast.stmt)
if test_result in [l[0]] + l[1] and row + 1>=test_result.first_token.start[0]:
ndir = ndir + 1
result,alternatives = decode_abstract_vertical(root,atok,targets,row+1, ndir,direction,True,
temporary_information,want_alternatives = True)
if result:
new_definition_node = search_upwards(result,ast.FunctionDef)
if definition_node is not new_definition_node and new_definition_node is not None:
alternatives = tiebreak_on_lca(new_definition_node,result,find_all_nodes(new_definition_node,targets , exclusions))
result, alternatives = obtain_result(result, alternatives)
information = getattr(information,"secondary",information)
result = information(result) if result else None
alternatives = [information(x) for x in alternatives] if alternatives else []
return self._backward_result(result, alternatives,build)
def case_four(self,view_information,query_description, extra = {}):
################################################################
# [smart] <vertical_abstract_only_direction> [<ndir>] <block> [<adjective>] <big_roi> [<big_roi_sub_index>]
###############################################################
build, selection, origin, definition_node = self.preliminary(view_information, query_description,extra)
targets, exclusions, information = self.decode(query_description)
temporary_information = lambda x: match_node(x,ast.FunctionDef)
root,atok,m,r = build
direction = query_description["vertical_abstract_only_direction"]
ndir = query_description["ndir"]
row = view_information["rowcol"](selection[0])[0] + 1 if definition_node is root else definition_node.first_token.start[0]
bonus = 1 if definition_node.first_token.startpos > selection[1] else 0
t = decode_abstract_vertical(root,atok,targets,row, ndir + bonus,direction,True,temporary_information)
if query_description["adjective"]=="None":
information = getattr(information,"secondary",information)
candidates = tiebreak_on_lca(root,definition_node,find_all_nodes(t, targets, exclusions))
candidates = [information(x) for x in candidates if information(x)]
result, alternatives = obtain_result(None, candidates)
return self._backward_result(result, alternatives,build)
else:
additional_parameters = {}
result, alternatives = adjective_strategy(
atok=atok,
root = t,
adjective_word = query_description["adjective"],
level_nodes = find_all_nodes(t,(ast.If,ast.While,ast.For,ast.Try,ast.With,ast.FunctionDef)),
information_nodes = find_matching(t,lambda x: information(x) if match_node(x,targets,exclusions) else None),
**additional_parameters
)
information = getattr(information,"secondary",information)
result = information(result) if result else None
alternatives =[ information(x) for x in alternatives] if alternatives else []
return self._backward_result(result, alternatives,build)
|
nilq/baby-python
|
python
|
def get_current_admin():
def decorator(func):
setattr(func, 'get_current_admin', True)
return func
return decorator
|
nilq/baby-python
|
python
|
"""D-Bus interface for rauc."""
from enum import Enum
import logging
from typing import Optional
from ..exceptions import DBusError, DBusInterfaceError
from ..utils.gdbus import DBus
from .interface import DBusInterface
from .utils import dbus_connected
_LOGGER: logging.Logger = logging.getLogger(__name__)
DBUS_NAME = "de.pengutronix.rauc"
DBUS_OBJECT = "/"
class RaucState(str, Enum):
"""Rauc slot states."""
GOOD = "good"
BAD = "bad"
ACTIVE = "active"
class Rauc(DBusInterface):
"""Handle D-Bus interface for rauc."""
def __init__(self):
"""Initialize Properties."""
self._operation: Optional[str] = None
self._last_error: Optional[str] = None
self._compatible: Optional[str] = None
self._variant: Optional[str] = None
self._boot_slot: Optional[str] = None
async def connect(self):
"""Connect to D-Bus."""
try:
self.dbus = await DBus.connect(DBUS_NAME, DBUS_OBJECT)
except DBusError:
_LOGGER.warning("Can't connect to rauc")
except DBusInterfaceError:
_LOGGER.warning("Host has no rauc support. OTA updates have been disabled.")
@property
def operation(self) -> Optional[str]:
"""Return the current (global) operation."""
return self._operation
@property
def last_error(self) -> Optional[str]:
"""Return the last message of the last error that occurred."""
return self._last_error
@property
def compatible(self) -> Optional[str]:
"""Return the system compatible string."""
return self._compatible
@property
def variant(self) -> Optional[str]:
"""Return the system variant string."""
return self._variant
@property
def boot_slot(self) -> Optional[str]:
"""Return the used boot slot."""
return self._boot_slot
@dbus_connected
def install(self, raucb_file):
"""Install rauc bundle file.
Return a coroutine.
"""
return self.dbus.Installer.Install(raucb_file)
@dbus_connected
def get_slot_status(self):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.GetSlotStatus()
@dbus_connected
def signal_completed(self):
"""Return a signal wrapper for completed signal.
Return a coroutine.
"""
return self.dbus.wait_signal(f"{DBUS_NAME}.Installer.Completed")
@dbus_connected
def mark(self, state: RaucState, slot_identifier: str):
"""Get slot status.
Return a coroutine.
"""
return self.dbus.Installer.Mark(state, slot_identifier)
@dbus_connected
async def update(self):
"""Update Properties."""
data = await self.dbus.get_properties(f"{DBUS_NAME}.Installer")
if not data:
_LOGGER.warning("Can't get properties for rauc")
return
self._operation = data.get("Operation")
self._last_error = data.get("LastError")
self._compatible = data.get("Compatible")
self._variant = data.get("Variant")
self._boot_slot = data.get("BootSlot")
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common import debug
from nfv_vim.rpc._rpc_defs import RPC_MSG_RESULT
from nfv_vim.rpc._rpc_defs import RPC_MSG_TYPE
from nfv_vim.rpc._rpc_defs import RPC_MSG_VERSION
from nfv_vim.rpc._rpc_message import RPCMessage
DLOG = debug.debug_get_logger('nfv_vim.rpc.instance')
class APIRequestCreateInstance(RPCMessage):
"""
RPC API Request Message - Create Instance
"""
name = None
instance_type_uuid = None
image_uuid = None
vcpus = None
memory_mb = None
disk_gb = None
ephemeral_gb = None
swap_gb = None
network_uuid = None
auto_recovery = None
live_migration_timeout = None
live_migration_max_downtime = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.CREATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestCreateInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['name'] = self.name
msg['instance_type_uuid'] = self.instance_type_uuid
msg['image_uuid'] = self.image_uuid
msg['vcpus'] = self.vcpus
msg['memory_mb'] = self.memory_mb
msg['disk_gb'] = self.disk_gb
msg['ephemeral_gb'] = self.ephemeral_gb
msg['swap_gb'] = self.swap_gb
msg['network_uuid'] = self.network_uuid
msg['sw:wrs:auto_recovery'] = self.auto_recovery
msg['hw:wrs:live_migration_timeout'] = self.live_migration_timeout
msg['hw:wrs:live_migration_max_downtime'] \
= self.live_migration_max_downtime
def deserialize_payload(self, msg):
self.name = msg.get('name', None)
self.instance_type_uuid = msg.get('instance_type_uuid', None)
self.image_uuid = msg.get('image_uuid', None)
self.vcpus = msg.get('vcpus', None)
self.memory_mb = msg.get('memory_mb', None)
self.disk_gb = msg.get('disk_gb', None)
self.ephemeral_gb = msg.get('ephemeral_gb', None)
self.swap_gb = msg.get('swap_gb', None)
self.network_uuid = msg.get('network_uuid', None)
self.auto_recovery = msg.get('sw:wrs:auto_recovery', None)
self.live_migration_timeout = msg.get('hw:wrs:live_migration_timeout',
None)
self.live_migration_max_downtime \
= msg.get('hw:wrs:live_migration_max_downtime', None)
def __str__(self):
return "create-instance request: %s" % self.name
class APIResponseCreateInstance(RPCMessage):
"""
RPC API Response Message - Create Instance
"""
uuid = None
name = None
admin_state = None
oper_state = None
avail_status = None
action = None
host_uuid = None
host_name = None
instance_type_original_name = None
image_uuid = None
vcpus = None
memory_mb = None
disk_gb = None
ephemeral_gb = None
swap_gb = None
network_uuid = None
auto_recovery = None
live_migration_timeout = None
live_migration_max_downtime = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.CREATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseCreateInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
msg['name'] = self.name
msg['admin_state'] = self.admin_state
msg['oper_state'] = self.oper_state
msg['avail_status'] = self.avail_status
msg['action'] = self.action
msg['host_uuid'] = self.host_uuid
msg['host_name'] = self.host_name
msg['instance_type_original_name'] = self.instance_type_original_name
msg['image_uuid'] = self.image_uuid
msg['vcpus'] = self.vcpus
msg['memory_mb'] = self.memory_mb
msg['disk_gb'] = self.disk_gb
msg['ephemeral_gb'] = self.ephemeral_gb
msg['swap_gb'] = self.swap_gb
msg['network_uuid'] = self.network_uuid
msg['sw:wrs:auto_recovery'] = self.auto_recovery
msg['hw:wrs:live_migration_timeout'] = self.live_migration_timeout
msg['hw:wrs:live_migration_max_downtime'] \
= self.live_migration_max_downtime
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
self.name = msg.get('name', None)
self.admin_state = msg.get('admin_state', None)
self.oper_state = msg.get('oper_state', None)
self.avail_status = msg.get('avail_status', None)
self.action = msg.get('action', None)
self.host_uuid = msg.get('host_uuid', None)
self.host_name = msg.get('host_name', None)
self.instance_type_original_name = msg.get(
'instance_type_original_name', None)
self.image_uuid = msg.get('image_uuid', None)
self.vcpus = msg.get('vcpus', None)
self.memory_mb = msg.get('memory_mb', None)
self.disk_gb = msg.get('disk_gb', None)
self.ephemeral_gb = msg.get('ephemeral_gb', None)
self.swap_gb = msg.get('swap_gb', None)
self.network_uuid = msg.get('network_uuid', None)
self.auto_recovery = msg.get('sw:wrs:auto_recovery', None)
self.live_migration_timeout = msg.get('hw:wrs:live_migration_timeout',
None)
self.live_migration_max_downtime \
= msg.get('hw:wrs:live_migration_max_downtime', None)
def __str__(self):
return "create-instance response: %s" % self.uuid
class APIRequestStartInstance(RPCMessage):
"""
RPC API Request Message - Start Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.START_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestStartInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "start-instance request: %s" % self.uuid
class APIResponseStartInstance(RPCMessage):
"""
RPC API Response Message - Start Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.START_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseStartInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "start-instance response: %s" % self.uuid
class APIRequestStopInstance(RPCMessage):
"""
RPC API Request Message - Stop Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.STOP_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestStopInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "stop-instance request: %s" % self.uuid
class APIResponseStopInstance(RPCMessage):
"""
RPC API Response Message - Stop Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.STOP_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseStopInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "stop-instance response: %s" % self.uuid
class APIRequestPauseInstance(RPCMessage):
"""
RPC API Request Message - Pause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.PAUSE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestPauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "pause-instance request: %s" % self.uuid
class APIResponsePauseInstance(RPCMessage):
"""
RPC API Response Message - Pause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.PAUSE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponsePauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "pause-instance response: %s" % self.uuid
class APIRequestUnpauseInstance(RPCMessage):
"""
RPC API Request Message - Unpause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.UNPAUSE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestUnpauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "unpause-instance request: %s" % self.uuid
class APIResponseUnpauseInstance(RPCMessage):
"""
RPC API Response Message - Unpause Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.UNPAUSE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseUnpauseInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "unpause-instance response: %s" % self.uuid
class APIRequestSuspendInstance(RPCMessage):
"""
RPC API Request Message - Suspend Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.SUSPEND_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestSuspendInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "suspend-instance request: %s" % self.uuid
class APIResponseSuspendInstance(RPCMessage):
"""
RPC API Response Message - Suspend Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.SUSPEND_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseSuspendInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "suspend-instance response: %s" % self.uuid
class APIRequestResumeInstance(RPCMessage):
"""
RPC API Request Message - Resume Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.RESUME_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestResumeInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "resume-instance request: %s" % self.uuid
class APIResponseResumeInstance(RPCMessage):
"""
RPC API Response Message - Resume Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.RESUME_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseResumeInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "resume-instance response: %s" % self.uuid
class APIRequestRebootInstance(RPCMessage):
"""
RPC API Request Message - Reboot Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.REBOOT_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestRebootInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "reboot-instance request: %s" % self.uuid
class APIResponseRebootInstance(RPCMessage):
"""
RPC API Response Message - Reboot Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.REBOOT_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseRebootInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "reboot-instance response: %s" % self.uuid
class APIRequestLiveMigrateInstance(RPCMessage):
"""
RPC API Request Message - Live Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.LIVE_MIGRATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestLiveMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "live-migrate-instance request: %s" % self.uuid
class APIResponseLiveMigrateInstance(RPCMessage):
"""
RPC API Response Message - Live Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.LIVE_MIGRATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseLiveMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "live-migrate-instance response: %s" % self.uuid
class APIRequestColdMigrateInstance(RPCMessage):
"""
RPC API Request Message - Cold Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.COLD_MIGRATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestColdMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "cold-migrate-instance request: %s" % self.uuid
class APIResponseColdMigrateInstance(RPCMessage):
"""
RPC API Response Message - Cold Migrate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.COLD_MIGRATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseColdMigrateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "cold-migrate-instance response: %s" % self.uuid
class APIRequestEvacuateInstance(RPCMessage):
"""
RPC API Request Message - Evacuate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.EVACUATE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestEvacuateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "evacuate-instance request: %s" % self.uuid
class APIResponseEvacuateInstance(RPCMessage):
"""
RPC API Response Message - Evacuate Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.EVACUATE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseEvacuateInstance, self).__init__(msg_version,
msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "evacuate-instance response: %s" % self.uuid
class APIRequestDeleteInstance(RPCMessage):
"""
RPC API Request Message - Delete Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.DELETE_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestDeleteInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "delete-instance request: %s" % self.uuid
class APIResponseDeleteInstance(RPCMessage):
"""
RPC API Response Message - Delete Instance
"""
uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.DELETE_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseDeleteInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
def __str__(self):
return "delete-instance response: %s" % self.uuid
class APIRequestGetInstance(RPCMessage):
"""
RPC API Request Message - Get Instance
"""
get_all = False
filter_by_uuid = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.GET_INSTANCE_REQUEST,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIRequestGetInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['get_all'] = self.get_all
msg['filter_by_uuid'] = self.filter_by_uuid
def deserialize_payload(self, msg):
self.get_all = msg.get('get_all', True)
self.filter_by_uuid = msg.get('filter_by_uuid', None)
def __str__(self):
if self.get_all:
return "get-instance request: get-all"
else:
return "get-instance request: %s" % self.filter_by_uuid
class APIResponseGetInstance(RPCMessage):
"""
RPC API Response Message - Get Instance
"""
uuid = None
name = None
admin_state = None
oper_state = None
avail_status = None
action = None
host_uuid = None
host_name = None
instance_type_original_name = None
image_uuid = None
vcpus = None
memory_mb = None
disk_gb = None
ephemeral_gb = None
swap_gb = None
auto_recovery = None
live_migration_timeout = None
live_migration_max_downtime = None
def __init__(self, msg_version=RPC_MSG_VERSION.VERSION_1_0,
msg_type=RPC_MSG_TYPE.GET_INSTANCE_RESPONSE,
msg_result=RPC_MSG_RESULT.SUCCESS):
super(APIResponseGetInstance, self).__init__(msg_version, msg_type,
msg_result)
def serialize_payload(self, msg):
msg['uuid'] = self.uuid
msg['name'] = self.name
msg['admin_state'] = self.admin_state
msg['oper_state'] = self.oper_state
msg['avail_status'] = self.avail_status
msg['action'] = self.action
msg['host_uuid'] = self.host_uuid
msg['host_name'] = self.host_name
msg['instance_type_original_name'] = self.instance_type_original_name
msg['image_uuid'] = self.image_uuid
msg['vcpus'] = self.vcpus
msg['memory_mb'] = self.memory_mb
msg['disk_gb'] = self.disk_gb
msg['ephemeral_gb'] = self.ephemeral_gb
msg['swap_gb'] = self.swap_gb
msg['sw:wrs:auto_recovery'] = self.auto_recovery
msg['hw:wrs:live_migration_timeout'] = self.live_migration_timeout
msg['hw:wrs:live_migration_max_downtime'] \
= self.live_migration_max_downtime
def deserialize_payload(self, msg):
self.uuid = msg.get('uuid', None)
self.name = msg.get('name', None)
self.admin_state = msg.get('admin_state', None)
self.oper_state = msg.get('oper_state', None)
self.avail_status = msg.get('avail_status', None)
self.action = msg.get('action', None)
self.host_uuid = msg.get('host_uuid', None)
self.host_name = msg.get('host_name', None)
self.instance_type_original_name = msg.get(
'instance_type_original_name', None)
self.image_uuid = msg.get('image_uuid', None)
self.vcpus = msg.get('vcpus', None)
self.memory_mb = msg.get('memory_mb', None)
self.disk_gb = msg.get('disk_gb', None)
self.ephemeral_gb = msg.get('ephemeral_gb', None)
self.swap_gb = msg.get('swap_gb', None)
self.auto_recovery = msg.get('sw:wrs:auto_recovery', None)
self.live_migration_timeout = msg.get('hw:wrs:live_migration_timeout',
None)
self.live_migration_max_downtime \
= msg.get('hw:wrs:live_migration_max_downtime', None)
def __str__(self):
return "get-instance response: %s" % self.uuid
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Tests for util.py."""
import datetime
import logging
import os
import sys
import unittest
# Fix up paths for running tests.
sys.path.insert(0, "../src/")
from pipeline import util
from google.appengine.api import taskqueue
class JsonSerializationTest(unittest.TestCase):
"""Test custom json encoder and decoder."""
def testE2e(self):
now = datetime.datetime.now()
obj = {"a": 1, "b": [{"c": "d"}], "e": now}
new_obj = util.json.loads(util.json.dumps(
obj, cls=util.JsonEncoder), cls=util.JsonDecoder)
self.assertEquals(obj, new_obj)
class GetTaskTargetTest(unittest.TestCase):
def setUp(self):
super(GetTaskTargetTest, self).setUp()
os.environ["CURRENT_VERSION_ID"] = "v7.1"
os.environ["CURRENT_MODULE_ID"] = "foo-module"
def testGetTaskTarget(self):
self.assertEqual("v7.foo-module", util._get_task_target())
task = taskqueue.Task(url="/relative_url",
target=util._get_task_target())
self.assertEqual("v7.foo-module", task.target)
def testGetTaskTargetDefaultModule(self):
os.environ["CURRENT_MODULE_ID"] = "default"
self.assertEqual("v7.default", util._get_task_target())
task = taskqueue.Task(url="/relative_url",
target=util._get_task_target())
self.assertEqual("v7.default", task.target)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
nilq/baby-python
|
python
|
/home/runner/.cache/pip/pool/88/20/06/e25d76d7065f6488098440d13a701a2dc1acbe52cd8d7322b4405f3996
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neobolt.direct import connect
from neobolt.exceptions import ServiceUnavailable
from neobolt.routing import READ_ACCESS, WRITE_ACCESS, RoutingConnectionPool, RoutingProtocolError
from test.stub.tools import StubCluster, StubTestCase
VALID_ROUTING_RECORD = {
"ttl": 300,
"servers": [
{"role": "ROUTE", "addresses": ["127.0.0.1:9001", "127.0.0.1:9002", "127.0.0.1:9003"]},
{"role": "READ", "addresses": ["127.0.0.1:9004", "127.0.0.1:9005"]},
{"role": "WRITE", "addresses": ["127.0.0.1:9006"]},
],
}
VALID_ROUTING_RECORD_WITH_EXTRA_ROLE = {
"ttl": 300,
"servers": [
{"role": "ROUTE", "addresses": ["127.0.0.1:9001", "127.0.0.1:9002", "127.0.0.1:9003"]},
{"role": "READ", "addresses": ["127.0.0.1:9004", "127.0.0.1:9005"]},
{"role": "WRITE", "addresses": ["127.0.0.1:9006"]},
{"role": "MAGIC", "addresses": ["127.0.0.1:9007"]},
],
}
INVALID_ROUTING_RECORD = {
"X": 1,
}
UNREACHABLE_ADDRESS = ("127.0.0.1", 8080)
RoutingTable = object()
def connector(address, **kwargs):
return connect(address, auth=("neotest", "neotest"), **kwargs)
def RoutingPool(*routers):
return RoutingConnectionPool(connector, UNREACHABLE_ADDRESS, {}, *routers)
class RoutingConnectionPoolFetchRoutingInfoTestCase(StubTestCase):
def test_should_get_info_from_router(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
result = pool.fetch_routing_info(address)
assert len(result) == 1
record = result[0]
assert record["ttl"] == 300
assert record["servers"] == [
{"role": "ROUTE", "addresses": ["127.0.0.1:9001", "127.0.0.1:9002",
"127.0.0.1:9003"]},
{"role": "READ", "addresses": ["127.0.0.1:9004", "127.0.0.1:9005"]},
{"role": "WRITE", "addresses": ["127.0.0.1:9006"]},
]
def test_should_remove_router_if_cannot_connect(self):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert address in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_remove_router_if_connection_drops(self):
with StubCluster({9001: "v1/rude_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert address in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_not_fail_if_cannot_connect_but_router_already_removed(self):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
assert address not in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_not_fail_if_connection_drops_but_router_already_removed(self):
with StubCluster({9001: "v1/rude_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
assert address not in pool.routing_table.routers
_ = pool.fetch_routing_info(address)
assert address not in pool.routing_table.routers
def test_should_return_none_if_cannot_connect(self):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
result = pool.fetch_routing_info(address)
assert result is None
def test_should_return_none_if_connection_drops(self):
with StubCluster({9001: "v1/rude_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
result = pool.fetch_routing_info(address)
assert result is None
def test_should_fail_for_non_router(self):
with StubCluster({9001: "v1/non_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
with self.assertRaises(ServiceUnavailable):
_ = pool.fetch_routing_info(address)
def test_should_fail_if_database_error(self):
with StubCluster({9001: "v1/broken_router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
with self.assertRaises(ServiceUnavailable):
_ = pool.fetch_routing_info(address)
def test_should_call_get_routing_tables_with_context(self):
with StubCluster({9001: "v1/get_routing_table_with_context.script"}):
address = ("127.0.0.1", 9001)
routing_context = {"name": "molly", "age": "1"}
with RoutingConnectionPool(connector, UNREACHABLE_ADDRESS, routing_context) as pool:
pool.fetch_routing_info(address)
def test_should_call_get_routing_tables(self):
with StubCluster({9001: "v1/get_routing_table.script"}):
address = ("127.0.0.1", 9001)
with RoutingConnectionPool(connector, UNREACHABLE_ADDRESS, {}) as pool:
pool.fetch_routing_info(address)
class RoutingConnectionPoolFetchRoutingTableTestCase(StubTestCase):
def test_should_get_table_from_router(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
table = pool.fetch_routing_table(address)
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
assert not pool.missing_writer
def test_null_info_should_return_null_table(self):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
table = pool.fetch_routing_table(address)
assert table is None
def test_no_routers_should_raise_protocol_error(self):
with StubCluster({9001: "v1/router_no_routers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
with self.assertRaises(RoutingProtocolError):
_ = pool.fetch_routing_table(address)
def test_no_readers_should_raise_protocol_error(self):
with StubCluster({9001: "v1/router_no_readers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
with self.assertRaises(RoutingProtocolError):
_ = pool.fetch_routing_table(address)
def test_no_writers_should_return_table_with_no_writer(self):
with StubCluster({9001: "v1/router_no_writers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool() as pool:
table = pool.fetch_routing_table(address)
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert not table.writers
assert table.ttl == 300
assert pool.missing_writer
class RoutingConnectionPoolUpdateRoutingTableTestCase(StubTestCase):
scenarios = {
(None,): ServiceUnavailable,
(RoutingTable,): RoutingTable,
(ServiceUnavailable,): ServiceUnavailable,
(None, None): ServiceUnavailable,
(None, RoutingTable): RoutingTable,
(None, ServiceUnavailable): ServiceUnavailable,
(None, None, None): ServiceUnavailable,
(None, None, RoutingTable): RoutingTable,
(None, None, ServiceUnavailable): ServiceUnavailable,
}
def test_roll_back_to_initial_server_if_failed_update_with_existing_routers(self):
with StubCluster({9001: "v1/router.script"}):
initial_address = ("127.0.0.1", 9001) # roll back addresses
routers = [("127.0.0.1", 9002), ("127.0.0.1", 9003)] # not reachable servers
with RoutingConnectionPool(connector, initial_address, {}, *routers) as pool:
pool.update_routing_table()
table = pool.routing_table
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
def test_try_initial_server_first_if_missing_writer(self):
with StubCluster({9001: "v1/router.script"}):
initial_address = ("127.0.0.1", 9001)
with RoutingConnectionPool(connector, initial_address, {}) as pool:
pool.missing_writer = True
pool.update_routing_table()
table = pool.routing_table
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
assert not pool.missing_writer
def test_update_with_no_routers_should_signal_service_unavailable(self):
with RoutingPool() as pool:
with self.assertRaises(ServiceUnavailable):
pool.update_routing_table()
def test_update_scenarios(self):
for server_outcomes, overall_outcome in self.scenarios.items():
self._test_server_outcome(server_outcomes, overall_outcome)
def _test_server_outcome(self, server_outcomes, overall_outcome):
print("%r -> %r" % (server_outcomes, overall_outcome))
servers = {}
routers = []
for port, outcome in enumerate(server_outcomes, 9001):
if outcome is None:
servers[port] = "v1/rude_router.script"
elif outcome is RoutingTable:
servers[port] = "v1/router.script"
elif outcome is ServiceUnavailable:
servers[port] = "v1/non_router.script"
else:
assert False, "Unexpected server outcome %r" % outcome
routers.append(("127.0.0.1", port))
with StubCluster(servers):
with RoutingPool(*routers) as pool:
if overall_outcome is RoutingTable:
pool.update_routing_table()
table = pool.routing_table
assert table.routers == {("127.0.0.1", 9001), ("127.0.0.1", 9002),
("127.0.0.1", 9003)}
assert table.readers == {("127.0.0.1", 9004), ("127.0.0.1", 9005)}
assert table.writers == {("127.0.0.1", 9006)}
assert table.ttl == 300
elif overall_outcome is ServiceUnavailable:
with self.assertRaises(ServiceUnavailable):
pool.update_routing_table()
else:
assert False, "Unexpected overall outcome %r" % overall_outcome
class RoutingConnectionPoolEnsureRoutingTableTestCase(StubTestCase):
def test_should_update_if_stale(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
first_updated_time = pool.routing_table.last_updated_time
pool.routing_table.ttl = 0
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
second_updated_time = pool.routing_table.last_updated_time
assert second_updated_time != first_updated_time
assert not pool.missing_writer
def test_should_not_update_if_fresh(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
first_updated_time = pool.routing_table.last_updated_time
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
second_updated_time = pool.routing_table.last_updated_time
assert second_updated_time == first_updated_time
assert not pool.missing_writer
def test_should_flag_reading_without_writer(self):
with StubCluster({9001: "v1/router_no_writers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
pool.ensure_routing_table_is_fresh(READ_ACCESS)
assert pool.missing_writer
# TODO: fix flaky test
# def test_concurrent_refreshes_should_not_block_if_fresh(self):
# address = ("127.0.0.1", 9001)
# table = RoutingTable.parse_routing_info([VALID_ROUTING_RECORD])
#
# with RoutingPool(address) as pool:
# semaphore = Semaphore()
#
# class Refresher(Thread):
#
# refreshed = None
#
# def run(self):
# self.refreshed = pool.refresh_routing_table()
#
# class BlockingRefresher(Refresher):
#
# @classmethod
# def blocking_update(cls):
# pool.routing_table.update(table)
# semaphore.acquire()
# semaphore.release()
# return table
#
# def run(self):
# with patch.object(RoutingConnectionPool, "update_routing_table",
# side_effect=self.blocking_update):
# super(BlockingRefresher, self).run()
#
# first = BlockingRefresher()
# second = Refresher()
#
# assert not pool.routing_table.is_fresh()
#
# semaphore.acquire()
# first.start()
# second.start()
# sleep(1)
# assert not second.is_alive() # second call should return immediately without blocking
# second.join()
# semaphore.release()
# first.join()
#
# assert first.refreshed
# assert not second.refreshed
# assert pool.routing_table.is_fresh()
# TODO: fix flaky test
# def test_concurrent_refreshes_should_block_if_stale(self):
# address = ("127.0.0.1", 9001)
# table = RoutingTable.parse_routing_info([VALID_ROUTING_RECORD])
#
# with RoutingPool(address) as pool:
# semaphore = Semaphore()
#
# class Refresher(Thread):
#
# refreshed = None
#
# def run(self):
# self.refreshed = pool.refresh_routing_table()
#
# class BlockingRefresher(Refresher):
#
# @classmethod
# def blocking_update(cls):
# semaphore.acquire()
# semaphore.release()
# pool.routing_table.update(table)
# return table
#
# def run(self):
# with patch.object(RoutingConnectionPool, "update_routing_table",
# side_effect=self.blocking_update):
# super(BlockingRefresher, self).run()
#
# first = BlockingRefresher()
# second = Refresher()
#
# assert not pool.routing_table.is_fresh()
#
# semaphore.acquire()
# first.start()
# second.start()
# sleep(1)
# assert second.is_alive() # second call should block
# semaphore.release()
# second.join()
# first.join()
#
# assert first.refreshed
# assert not second.refreshed
# assert pool.routing_table.is_fresh()
class RoutingConnectionPoolAcquireForReadTestCase(StubTestCase):
def test_should_refresh(self):
with StubCluster({9001: "v1/router.script", 9004: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
_ = pool.acquire(access_mode=READ_ACCESS)
assert pool.routing_table.is_fresh(READ_ACCESS)
assert not pool.missing_writer
def test_connected_to_reader(self):
with StubCluster({9001: "v1/router.script", 9004: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
connection = pool.acquire(access_mode=READ_ACCESS)
assert connection.server.address in pool.routing_table.readers
assert not pool.missing_writer
def test_should_retry_if_first_reader_fails(self):
with StubCluster({9001: "v1/router.script",
9004: "v1/fail_on_init.script",
9005: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
_ = pool.acquire(access_mode=READ_ACCESS)
assert ("127.0.0.1", 9004) not in pool.routing_table.readers
assert ("127.0.0.1", 9005) in pool.routing_table.readers
def test_should_connect_to_read_in_absent_of_writer(self):
with StubCluster({9001: "v1/router_no_writers.script", 9004: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(READ_ACCESS)
connection = pool.acquire(access_mode=READ_ACCESS)
assert connection.server.address in pool.routing_table.readers
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
assert pool.missing_writer
class RoutingConnectionPoolAcquireForWriteTestCase(StubTestCase):
def test_should_refresh(self):
with StubCluster({9001: "v1/router.script", 9006: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
_ = pool.acquire(access_mode=WRITE_ACCESS)
assert pool.routing_table.is_fresh(WRITE_ACCESS)
assert not pool.missing_writer
def test_connected_to_writer(self):
with StubCluster({9001: "v1/router.script", 9006: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
connection = pool.acquire(access_mode=WRITE_ACCESS)
assert connection.server.address in pool.routing_table.writers
assert not pool.missing_writer
def test_should_retry_if_first_writer_fails(self):
with StubCluster({9001: "v1/router_with_multiple_writers.script",
9006: "v1/fail_on_init.script",
9007: "v1/empty.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
_ = pool.acquire(access_mode=WRITE_ACCESS)
assert ("127.0.0.1", 9006) not in pool.routing_table.writers
assert ("127.0.0.1", 9007) in pool.routing_table.writers
def test_should_error_to_writer_in_absent_of_reader(self):
with StubCluster({9001: "v1/router_no_readers.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
with self.assertRaises(RoutingProtocolError):
_ = pool.acquire(access_mode=WRITE_ACCESS)
assert not pool.routing_table.is_fresh(READ_ACCESS)
assert not pool.routing_table.is_fresh(WRITE_ACCESS)
assert not pool.missing_writer
class RoutingConnectionPoolDeactivateTestCase(StubTestCase):
def test_should_remove_router_from_routing_table_if_present(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9001)
assert target in pool.routing_table.routers
pool.deactivate(target)
assert target not in pool.routing_table.routers
def test_should_remove_reader_from_routing_table_if_present(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9004)
assert target in pool.routing_table.readers
pool.deactivate(target)
assert target not in pool.routing_table.readers
def test_should_remove_writer_from_routing_table_if_present(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9006)
assert target in pool.routing_table.writers
pool.deactivate(target)
assert target not in pool.routing_table.writers
def test_should_not_fail_if_absent(self):
with StubCluster({9001: "v1/router.script"}):
address = ("127.0.0.1", 9001)
with RoutingPool(address) as pool:
pool.ensure_routing_table_is_fresh(WRITE_ACCESS)
target = ("127.0.0.1", 9007)
pool.deactivate(target)
|
nilq/baby-python
|
python
|
import ast
import sys
class EnvVisitor(ast.NodeVisitor):
def __init__(self):
self.optional_environment_variables = set()
self.required_environment_variables = set()
def parse_and_visit(self, body, filename=''):
doc = ast.parse(body, filename=filename)
return self.visit(doc)
def visit_Call(self, call):
is_getenv = False
is_environ_get = False
if isinstance(call.func, ast.Attribute):
if call.func.attr == 'getenv':
is_getenv = True
elif call.func.attr == 'get':
if isinstance(call.func.value, ast.Attribute) and call.func.value.attr == 'environ':
is_environ_get = True
elif isinstance(call.func.value, ast.Name) and call.func.value.id == 'environ':
is_environ_get = True
elif isinstance(call.func, ast.Name):
if call.func.id == 'getenv':
is_getenv = True
if is_getenv:
if len(call.args) >= 1 and isinstance(call.args[0], ast.Str):
self.optional_environment_variables.add(ast.literal_eval(call.args[0]))
elif is_environ_get:
if len(call.args) >= 1 and isinstance(call.args[0], ast.Str):
self.optional_environment_variables.add(ast.literal_eval(call.args[0]))
self.generic_visit(call)
def visit_Subscript(self, what):
is_env_slice = False
if isinstance(what.value, ast.Attribute) and what.value.attr == 'environ':
is_env_slice = True
elif isinstance(what.value, ast.Name) and what.value.id == 'environ':
is_env_slice = True
if is_env_slice:
if isinstance(what.slice, ast.Index) and isinstance(what.slice.value, ast.Str):
self.required_environment_variables.add(ast.literal_eval(what.slice.value))
elif sys.version_info > (3, 9):
# this was added with the new parser in 3.9
if isinstance(what.slice, ast.Constant) and isinstance(what.slice.value, str):
self.required_environment_variables.add(what.slice.value)
self.generic_visit(what)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# __*__ coding: utf8 __*__
oneline = "Read, write and operate with models"
#import os
from model_base import model_base
# --------------------------------------------------------------------
class Free_class:
pass
def bound(x, y):
if x > y/2.: return x-y
if x < -y/2. : return x+y
return x
#=============================================================================
class model_ngbr(model_base):
# --------------------------------------------------------------------
def __init__(self,d={}):
model_base.__init__(self,d)
# vc=self.vc
# ix=self.legend.index('x')
# for at in self.atoms:
# at[ix]=at[ix]%vc[0]
# at[ix+1]=at[ix+1]%vc[1]
# at[ix+2]=at[ix+2]%vc[2]
#========= make Verlet ===========================
def make_verlet(self,r=None):
""" Make Verlet for the model """
if r==None:
r=((self.vc[0]*self.vc[1]*self.vc[2]/self.natoms)**0.33333333333)
print "Verlet go. r=",r
ver =Free_class()
vc=self.vc
ver.imax=tuple(( int(x/r)+1 for x in vc ))
ver.dr=tuple(( x/y for x,y in zip(vc, ver.imax) ))
ver.ind={}
for iat,vec in self.at_it('x y z'):
im=tuple( int(x/y)%ii for x,y,ii in zip(vec,ver.dr,ver.imax) )
ver.ind[ im ] =ver.ind.get(im,[])+[iat]
self.verlet=ver
print "Verlet done"
#==============================================================
def make_ngbr_short(self,r=None):
""" makes Short Neighbours table """
if r==None: r=max(self.vc)/3.
print "Short NGBR go. r=",r
if not hasattr(self,'verlet'): self.make_verlet(r/2.5)
ng=Free_class()
ng.r=r
def key_it(pt,im,mmm):
for i in range(pt[0]+1,pt[0]+mmm+1):
for j in range(pt[1]-mmm,pt[1]+mmm+1):
for k in range(pt[2]-mmm,pt[2]+mmm+1):
yield (i%im[0],j%im[1],k%im[2])
i=pt[0]
for j in range(pt[1]+1,pt[1]+mmm+1):
for k in range(pt[2]-mmm,pt[2]+mmm+1):
yield (i,j%im[1],k%im[2])
i=pt[0]
j=pt[1]
for k in range(pt[2]+1,pt[2]+mmm+1):
yield (i,j,k%im[2])
ver=self.verlet
mmm=int(r/min(ver.dr))+1
print 'mmm = ',mmm
ng.index=[[] for i in self.atoms]
for key in ver.ind:
at_list=ver.ind[key]
for i in at_list: ng.index[i] +=at_list
for key1 in key_it(key,ver.imax,mmm):
try:
at_list1=ver.ind[key1]
for i in at_list: ng.index[i] +=at_list1
for i in at_list1: ng.index[i] +=at_list
except:
pass
self.ngbr_short=ng
print "Short NGBR done"
#==============================================================
def read_ngbr_short(self,d={}):
""" read Short Neighbours table """
self.time=d.get('time',0)
if self.box<>[[0],[0],[0]]:
box=d.get('box',[[0],[0],[0]])
self.box=box
if len(box[0])==3: self.vc=[box[0][0],box[1][1],box[2][2]]
elif len(box[0])==2: self.vc=map(lambda x: x[1]-x[0], box)
else: self.vc=[box[0][0],box[1][0],box[2][0]]
dat=d.get('atoms',[])
ng=Free_class()
ind=[]
for i in dat:
s=[int(j) for j in i]
while len(ind)<s[0]:
ind.append([])
ind[s[0]-1] += [j-1 for j in s[2:] if j<>-1]
if self.atoms==[]: self.atoms=[[] for j in ind]
while len(ind)<len(self.atoms):
ind.append([])
ng.index=ind
self.ngbr_short=ng
# print "Short NGBR is read"
#==============================================================
def make_ngbr(self,r=None,part=''):
""" makes Neighbours table with distances """
try:
self.make_ngbr_numpy(r,part)
return
except ImportError:
print 'Numpy is not installed, falling back to standard procedure'
if r==None:
print 'Warning !!! Make full ngbr list. It could take alot of time!!!'
r=max(self.vc)/3.
print "NGBR go. r=",r
if not hasattr(self,'ngbr_short'): self.make_ngbr_short(r)
ng=Free_class()
r2=r*r
ng.r=r
ix=self.legend.index('x')
aat=[i[ix:ix+3] for i in self.atoms]
vc=self.vc
ngs=self.ngbr_short.index
ng.index=[{} for i in self.atoms]
for iat,nng in enumerate(ngs):
vec0=aat[iat]
for jat in nng:
if jat<=iat: continue
vec1=aat[jat]
vec= [ ((x-y)+0.5*v)%v-0.5*v for x,y,v in zip(vec1,vec0,vc) ]
dist2=sum(x*x for x in vec)
vec +=[dist2]
if dist2 <= r2:
ng.index[iat][jat]=vec
ng.index[jat][iat]=[-vec[0],-vec[1],-vec[2],vec[3]]
self.ngbr=ng
print "NGBR done"
#==============================================================
def make_ngbr_numpy(self,r=None,part=''):
""" makes Neighbours table with distances """
import n3umpy as np
if r==None:
print 'Warning !!! Make full ngbr list. It could takes alot of time!!!'
r=max(self.vc)/3.
print "NGBR numpy go. r=",r
ng=Free_class()
r2=r*r
ng.r=r
ix=self.legend.index('x')
crd = np.array(self.atoms, order = 'F')[:,ix:ix+3].astype(np.float32)
vc = np.array(self.vc, order = 'F').astype(np.float32)
ng.index=[{} for i in self.atoms]
for iat in range(crd.shape[0]):
d = crd[iat:] - crd[iat]
vn = d - (d/vc).round()*vc
r2n = np.array([np.dot(x,x) for x in vn])
idn = np.nonzero((r2n < r2) & (r2n > 0.))
for inn in idn[0]:
ng.index[iat][iat + inn] = vn[inn].tolist()
ng.index[iat][iat + inn] += [r2n[inn],]
ng.index[iat + inn][iat] = (-vn[inn]).tolist()
ng.index[iat + inn][iat] += [r2n[inn],]
print ng.index[0]
self.ngbr=ng
print "NGBR numpy done"
#==============================================================
#---------------------------------------------------------------
def get_round_it(self,crd,r=None):
""" returns list of atoms near to to the point
"""
def key_it(pt,im,mmm):
for i in range(pt[0]-mmm,pt[0]+mmm+1):
for j in range(pt[1]-mmm,pt[1]+mmm+1):
for k in range(pt[2]-mmm,pt[2]+mmm+1):
yield (i%im[0],j%im[1],k%im[2])
if r==None: r=min(self.vc)/3.
if not hasattr(self,'verlet'): self.make_verlet(r+0.05)
ver=self.verlet
mmm=int(r/min(self.verlet.dr))+1
pt=[int(x/y) for x,y in zip(crd,ver.dr)]
it=(ver.ind.get(k,[]) for k in key_it(pt,ver.imax,mmm))
for val in it:
for iat in val:
yield iat
#======== NGBR ===========================================
def ngbr_it(self,iat,r=None,part=''):
filt={}
filt['gt']=lambda x,y: x>y
filt['ge']=lambda x,y: x>=y
filt['lt']=lambda x,y: x<y
filt['le']=lambda x,y: x<=y
filt['ne']=lambda x,y: x<>y
filt['']=lambda x,y: 1==1
ff=filt[part]
if hasattr(self,'ngbr'):
for k,vec in self.ngbr.index[iat].iteritems():
if ff(k,iat):
yield k,vec
else:
if not hasattr(self,'ngbr_short'): self.make_ngbr_short(r)
for k in self.ngbr_short.index[iat]:
if ff(k,iat):
yield k,[None,None,None,None]
#======== Make NGBR table ===========================================
def make_ngbr_old(self,r=1e10,part=''):
""" makes Neighbours table
"""
print "NGBR go. r=",r
ng=Free_class()
r2=r*r
ng.r2=r2
ng.index = [dict(self.ngbr_it(iat,r,part)) for iat in xrange(len(self.atoms)) ]
self.ngbr=ng
print "NGBR done"
#======== Make GR ===========================================
def make_gr_it(self,r=1e10):
ind=self.ngbr.index
for i in ind:
for j in i:
rr=i[j][3]**0.5
if rr<r:
yield rr
#========================================================================
def ep_it(self,n=1):
from random import random
nn=0
dr=self.verlet.dr
ind=self.verlet.ind
im=self.verlet.imax
while nn<n:
key=tuple( int(i*random()) for i in im )
if ind.has_key(key): continue
yield( ((i+0.5)*j for i,j in zip(key,dr)) )
nn +=1
#************************************************************************
if __name__=='__main__': #run as programm
from model_i import dump_lmp
# from timer import timer
# tm=timer()
dump=dump_lmp('dump.lmp')
mod=model_ngbr(dump())
mod.make_verlet(2)
# print tm
mod.make_fast_ngbr(5)
# print tm
l=list(mod.make_gr_it(5))
# print tm
|
nilq/baby-python
|
python
|
import configparser
import datetime
import os
import time
#import xml.etree.ElementTree as ET
import lxml.etree as ET
from io import StringIO, BytesIO
from shutil import copyfile
import requests
from requests.auth import HTTPDigestAuth
from subprocess import Popen
print("Hikvision alert started")
# CONFIGS START
config = configparser.ConfigParser()
exists = os.path.isfile('/config/config.ini')
if exists:
config.read('/config/config.ini')
else:
copyfile('cfg/config.ini', '/config/config.ini')
config.read('/config/config.ini')
APP_PATH = config['DEFAULT']['APP_PATH']
NVR_URL = config['DEFAULT']['NVR_URL']
NVR_USR = config['DEFAULT']['NVR_USR']
NVR_PASS = config['DEFAULT']['NVR_PASS']
# CONFIGS ENDS
XML_NAMESPACE = 'http://www.hikvision.com/ver20/XMLSchema'
DEFAULT_HEADERS = {
'Content-Type': "application/xml; charset='UTF-8'",
'Accept': "*/*"
}
hik_request = requests.Session()
hik_request.auth = HTTPDigestAuth(NVR_USR, NVR_PASS)
hik_request.headers.update(DEFAULT_HEADERS)
url = NVR_URL + '/ISAPI/Event/notification/alertStream'
parse_string = ''
start_event = False
fail_count = 0
detection_date = datetime.datetime.now()
detection_id = '0'
log_file_name = "log-" + detection_date.strftime("%Y-%m-%d")+".txt"
log_file = open("/config/" + log_file_name, "a+")
while True:
try:
stream = hik_request.get(url, stream=True, timeout=(5, 60), verify=False)
if stream.status_code != requests.codes.ok:
print("Can't connect to the stream!")
raise ValueError('Connection unsuccessful.')
else:
print('Connection successful to: ' + NVR_URL)
fail_count = 0
for line in stream.iter_lines():
# filter out keep-alive new lines
if line:
str_line = line.decode("utf-8")
if str_line.find('<EventNotificationAlert') != -1:
start_event = True
parse_string += str_line
elif str_line.find('</EventNotificationAlert>') != -1:
parse_string += str_line
start_event = False
if parse_string:
#tree = ET.fromstring(parse_string)
# Use lxml instead of xml
parser = ET.XMLParser(recover=True)
tree = ET.parse(StringIO(parse_string), parser=parser)
channelID = tree.find('{%s}%s' % (XML_NAMESPACE, 'channelID'))
if channelID is None:
# Some devices use a different key
channelID = tree.find('{%s}%s' % (XML_NAMESPACE, 'dynChannelID'))
if channelID.text == '0':
# Continue and clear the chunk
parse_string = ""
continue
eventType = tree.find('{%s}%s' % (XML_NAMESPACE, 'eventType'))
eventState = tree.find('{%s}%s' % (XML_NAMESPACE, 'eventState'))
postCount = tree.find('{%s}%s' % (XML_NAMESPACE, 'activePostCount'))
current_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_file.write('%s - count: %s event: %s eventState: %s channel_id: %s\n' % (
current_date, postCount.text, eventType.text, eventState.text, channelID.text))
if eventType.text == 'linedetection':
print("Line decetion triggered!")
# Only trigger the event if the event not repeated in 5 sec
log_file.write('count: %s (triggered)\n' % postCount.text)
detection_date = datetime.datetime.now()
detection_id = channelID.text
# start the subprocess to process by channelID
p = Popen('python ' + APP_PATH + '/image_process.py ' + channelID.text,
shell=True)
# Clear the chunk
parse_string = ""
else:
if start_event:
parse_string += str_line
except (ValueError, requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as err:
fail_count += 1
time.sleep(fail_count * 5)
continue
|
nilq/baby-python
|
python
|
from project import app
if __name__ == "__main__":
app.run(debug = True, host = "0.0.0.0")
|
nilq/baby-python
|
python
|
"""
An emulation of the Window class, for injecting pane data into tests
"""
from tmux_session_utils.tmux_utils import (
inject_pane_data,
WINDOW_ID_VARIABLE,
WINDOW_LAYOUT_VARIABLE,
)
class FakeWindow:
"""
Represents a window in a tmux session, for test injection
"""
def __init__(self, identity: str = None):
"""
Set invalid starting properties for the window
"""
self.identity = identity
self.name = ""
self.session = ""
self.number = None
self.directory = ""
self.layout = ""
def set_session_name(self, session: str) -> "FakeWindow":
"""
Set the session name
Parameters
----------
session : string
The session name to set
Returns
-------
self
This instance
"""
self.session = session
return self
def set_name(self, name: str) -> "FakeWindow":
"""
Set the window name
Parameters
----------
name : string
The window name to set
Returns
-------
self
This instance
"""
self.name = name
return self
def set_number(self, number: int) -> "FakeWindow":
"""
Set the window number
Parameters
----------
number : number
The window number to set
Returns
-------
self
This instance
"""
self.number = number
return self
def set_directory(self, directory: str) -> "FakeWindow":
"""
Set the directory
Parameters
----------
directory : string
The directory to set
Returns
-------
self
This instance
"""
self.directory = directory
return self
def set_layout(self, layout: str) -> "FakeWindow":
"""
Set the layout
Parameters
----------
layout : string
The layout to set
Returns
-------
self
This instance
"""
self.layout = layout
return self
def inject(self):
"""
Inject the attributes for this window into the session
"""
inject_pane_data(
self.session,
self.number,
None,
{WINDOW_ID_VARIABLE: self.identity, WINDOW_LAYOUT_VARIABLE: self.layout},
)
|
nilq/baby-python
|
python
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import datetime
import unittest
import tempfile
import azext_interactive.azclishell.frequency_heuristic as fh
def _mock_update(_):
return {fh.day_format(datetime.datetime.utcnow()): 1}
def _mock_update2(_):
return {
fh.day_format(datetime.datetime.utcnow()): 2,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=2)): 1}
def _mock_update3(_):
return {
fh.day_format(datetime.datetime.utcnow()): 19,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=18)): 5,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=27)): 2,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=28)): 2,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=100)): 1,
fh.day_format(datetime.datetime.utcnow() - datetime.timedelta(days=200)): 1}
class FeedbackTest(unittest.TestCase):
""" tests the frequncy heuristic """
def __init__(self, *args, **kwargs):
super(FeedbackTest, self).__init__(*args, **kwargs)
from azure.cli.core.mock import DummyCli
from azext_interactive.azclishell.app import AzInteractiveShell
self.norm_update = fh.update_frequency
self.shell_ctx = AzInteractiveShell(DummyCli(), None)
def test_heuristic(self):
# test the correct logging of time for frequency
fh.update_frequency = _mock_update
self.assertEqual(1, fh.frequency_measurement(self.shell_ctx))
fh.update_frequency = _mock_update2
self.assertEqual(2, fh.frequency_measurement(self.shell_ctx))
fh.update_frequency = _mock_update3
self.assertEqual(3, fh.frequency_measurement(self.shell_ctx))
def test_update_freq(self):
# tests updating the files for frequency
fh.update_frequency = self.norm_update
now = fh.day_format(datetime.datetime.now())
fd, freq_path = tempfile.mkstemp()
freq_dir, freq_file = freq_path.rsplit(os.path.sep, 1)
def _get_freq():
return freq_file
self.shell_ctx.config.config_dir = freq_dir
self.shell_ctx.config.get_frequency = _get_freq
# with a file
json_freq = fh.update_frequency(self.shell_ctx)
self.assertEqual(json_freq, {now: 1})
json_freq = fh.update_frequency(self.shell_ctx)
self.assertEqual(json_freq, {now: 2})
if os.path.exists(freq_path):
os.close(fd)
os.remove(freq_path)
def test_update_freq_no_file(self):
# tests updating the files for frequency with no file written
fh.update_frequency = self.norm_update
fd, freq_path = tempfile.mkstemp()
freq_dir, freq_file = freq_path.rsplit(os.path.sep, 1)
def _get_freq():
return freq_file
self.shell_ctx.config.config_dir = freq_dir
self.shell_ctx.config.get_frequency = _get_freq
if os.path.exists(freq_path):
os.close(fd)
os.remove(freq_path)
# without a file already written
json_freq = fh.update_frequency(self.shell_ctx)
now = fh.day_format(datetime.datetime.now())
self.assertEqual(json_freq, {now: 1})
if os.path.exists(freq_path):
os.remove(freq_path)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from app import app
import sys, getopt, json
def clear_file(file_name):
with open(file_name, 'w') as filep:
json.dump({}, filep)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv, "c", ["clear"])
except getopt.GetoptError:
print('python webapp.py [-c or --clear for clearing memory]')
sys.exit(2)
for arg in args:
if arg in ['-c','--clear']:
clear_file('tx_history.json')
clear_file('retired_store.json')
clear_file('data_store.json')
clear_file('purchase_request_store.json')
print('Cleared memory')
app.run(debug=True, host="127.0.0.1", port=8090)
|
nilq/baby-python
|
python
|
import os
import sys
import time
import random
import string
import argparse
from collections import namedtuple
import copy
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
from torch import autograd
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.utils.data import Dataset
from torch.nn.parallel import DistributedDataParallel as pDDP
from torchsummary import summary
from torchvision.utils import save_image
# import horovod.torch as hvd
import gin
import numpy as np
from tqdm import tqdm, trange
from PIL import Image
from pprint import pprint
import apex
from apex.parallel import DistributedDataParallel as aDDP
from apex.fp16_utils import *
from apex import amp
from apex.multi_tensor_apply import multi_tensor_applier
import wandb
import ds_load
from utils import CTCLabelConverter, Averager, ModelEma, Metric
from cnv_model import OrigamiNet, ginM
from test import validation
parOptions = namedtuple('parOptions', ['DP', 'DDP', 'HVD'])
parOptions.__new__.__defaults__ = (False,) * len(parOptions._fields)
pO = None
OnceExecWorker = None
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def init_bn(model):
if type(model) in [torch.nn.InstanceNorm2d, torch.nn.BatchNorm2d]:
init.ones_(model.weight)
init.zeros_(model.bias)
elif type(model) in [torch.nn.Conv2d]:
init.kaiming_uniform_(model.weight)
def WrkSeeder(_):
return np.random.seed((torch.initial_seed()) % (2 ** 32))
@gin.configurable
def train(opt, AMP, WdB, train_data_path, train_data_list, test_data_path, test_data_list, charset,
experiment_name, train_batch_size, val_batch_size, workers, lr, valInterval, num_iter,
wdbprj, continue_model=''):
os.makedirs(f'./saved_models/{experiment_name}', exist_ok=True)
if OnceExecWorker and WdB:
wandb.init(project=wdbprj, name=experiment_name)
wandb.config.update(opt)
alph = ds_load.get_charset(charset)
train_dataset = ds_load.myLoadDS2(train_data_path, train_data_list, alph=alph)
valid_dataset = ds_load.myLoadDS2(test_data_path, test_data_list, alph=alph)
if OnceExecWorker:
print(pO)
# print('Alphabet :', len(train_dataset.alph), train_dataset.alph)
for d in [train_dataset, valid_dataset]:
print('Dataset Size :', len(d.fns))
print('Max LbW : ', max(list(map(len, d.tlbls))))
print('#Chars : ', sum([len(x) for x in d.tlbls]))
print('Sample label :', d.tlbls[-1])
# print("Dataset :", sorted(list(map(len, d.tlbls))))
print('-' * 80)
if opt.num_gpu > 1:
workers = workers * opt.num_gpu
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=train_batch_size,
shuffle=True,
pin_memory=True,
num_workers=int(workers),
worker_init_fn=WrkSeeder,
collate_fn=ds_load.SameTrCollate
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=val_batch_size,
pin_memory=True,
num_workers=int(workers),
)
model = OrigamiNet()
model.apply(init_bn)
model.train()
if OnceExecWorker:
for k in sorted(model.lreszs.keys()):
print(k, model.lreszs[k])
biparams = list(dict(filter(lambda kv: 'bias' in kv[0], model.named_parameters())).values())
nonbiparams = list(dict(filter(lambda kv: 'bias' not in kv[0], model.named_parameters())).values())
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=10 ** (-1 / 90000))
if OnceExecWorker and WdB:
wandb.watch(model, log="all")
'''
if pO.HVD:
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
# optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters(), compression=hvd.Compression.fp16)
'''
if pO.DDP and opt.rank != 0:
random.seed()
np.random.seed()
if AMP:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
if pO.DP:
model = torch.nn.DataParallel(model)
model_ema = ModelEma(model)
if continue_model != '':
if OnceExecWorker:
print(f'loading pretrained model from {continue_model}')
checkpoint = torch.load(continue_model)
model.load_state_dict(checkpoint['model'], strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
model_ema._load_checkpoint(continue_model)
criterion = torch.nn.CTCLoss(reduction='none', zero_infinity=True).to(device)
converter = CTCLabelConverter(train_dataset.ralph.values())
if OnceExecWorker:
with open(f'./saved_models/{experiment_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
opt_log += gin.operative_config_str()
opt_file.write(opt_log)
if WdB:
wandb.config.gin_str = gin.operative_config_str().splitlines()
print(optimizer)
print(opt_log)
start_time = time.time()
best_accuracy = -1
best_norm_ED = 1e+6
best_CER = 1e+6
i = 0
gAcc = 1
epoch = 1
btReplay = False and AMP
max_batch_replays = 3
titer = iter(train_loader)
while True:
start_time = time.time()
model.zero_grad()
train_loss = Metric(pO, 'train_loss')
train_loss.to(device)
for j in trange(valInterval, leave=False, desc='Training'):
try:
image_tensors, labels = next(titer)
except StopIteration:
epoch += 1
titer = iter(train_loader)
image_tensors, labels = next(titer)
image = image_tensors.to(device)
text, length = converter.encode(labels)
batch_size = image.size(0)
replay_batch = True
maxR = 3
while replay_batch and maxR > 0:
maxR -= 1
preds = model(image, text).float()
preds_size = torch.IntTensor([preds.size(1)] * batch_size).to(device)
preds = preds.permute(1, 0, 2).log_softmax(2)
if i == 0 and OnceExecWorker:
print('Model inp : ', image.dtype, image.size())
print('CTC inp : ', preds.dtype, preds.size(), preds_size[0])
# To avoid ctc_loss issue, disabled cudnn for the computation of the ctc_loss
torch.backends.cudnn.enabled = False
cost = criterion(preds, text.to(device), preds_size, length.to(device)).mean() / gAcc
torch.backends.cudnn.enabled = True
train_loss.update(cost)
optimizer.zero_grad()
default_optimizer_step = optimizer.step # added for batch replay
if not AMP:
cost.backward()
replay_batch = False
else:
with amp.scale_loss(cost, optimizer) as scaled_loss:
scaled_loss.backward()
# if pO.HVD: optimizer.synchronize()
if optimizer.step is default_optimizer_step or not btReplay:
replay_batch = False
elif maxR > 0:
optimizer.step()
if (i + 1) % gAcc == 0:
optimizer.step()
model.zero_grad()
model_ema.update(model, num_updates=i / 2)
if (i + 1) % (gAcc * 2) == 0:
lr_scheduler.step()
i += 1
# validation part
if True:
elapsed_time = time.time() - start_time
start_time = time.time()
model.eval()
with torch.no_grad():
# valid_loss, current_accuracy, current_norm_ED, ted, bleu, preds, labels, infer_time = validation(
# model_ema.ema, criterion, valid_loader, converter, opt, pO)
valid_loss, current_accuracy, current_norm_ED, ted, bleu, preds, labels, infer_time = validation(
model, criterion, valid_loader, converter, opt, pO)
model.train()
v_time = time.time() - start_time
if OnceExecWorker:
if current_norm_ED < best_norm_ED:
best_norm_ED = current_norm_ED
checkpoint = {
'model': model.state_dict(),
'state_dict_ema': model_ema.ema.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(checkpoint, f'./saved_models/{experiment_name}/best_norm_ED.pth')
if ted < best_CER:
best_CER = ted
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
out = f'[{i}] Loss: {train_loss.avg:0.5f} time: ({elapsed_time:0.1f},{v_time:0.1f})'
out += f' vloss: {valid_loss:0.3f}'
out += f' CER: {ted:0.4f} NER: {current_norm_ED:0.4f} lr: {lr_scheduler.get_last_lr()[0]:0.5f}'
out += f' bAcc: {best_accuracy:0.1f}, bNER: {best_norm_ED:0.4f}, bCER: {best_CER:0.4f}, B: {bleu * 100:0.2f}'
print(out)
with open(f'./saved_models/{experiment_name}/log_train.txt', 'a') as log:
log.write(out + '\n')
if WdB:
wandb.log({'lr': lr_scheduler.get_last_lr()[0], 'It': i, 'nED': current_norm_ED, 'B': bleu * 100,
'tloss': train_loss.avg, 'AnED': best_norm_ED, 'CER': ted, 'bestCER': best_CER,
'vloss': valid_loss})
if i == num_iter:
print('end the training')
sys.exit()
def gInit(opt):
global pO, OnceExecWorker
gin.parse_config_file(opt.gin)
pO = parOptions(**{ginM('dist'): True})
OnceExecWorker = pO.DP
cudnn.benchmark = True
def rSeed(sd):
random.seed(sd)
np.random.seed(sd)
torch.manual_seed(sd)
torch.cuda.manual_seed(sd)
def launch_fn(rank, opt):
global OnceExecWorker
gInit(opt)
OnceExecWorker = OnceExecWorker or (pO.DDP and rank == 0)
mp.set_start_method('fork', force=True)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(opt.port)
dist.init_process_group("nccl", rank=rank, world_size=opt.num_gpu)
# to ensure identical init parameters
rSeed(opt.manualSeed)
torch.cuda.set_device(rank)
opt.world_size = opt.num_gpu
opt.rank = rank
train(opt)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gin', help='Gin config file')
opt = parser.parse_args()
gInit(opt)
opt.manualSeed = ginM('manualSeed')
opt.port = ginM('port')
if OnceExecWorker:
rSeed(opt.manualSeed)
opt.num_gpu = torch.cuda.device_count()
train(opt)
|
nilq/baby-python
|
python
|
import logging
from huobi.connection.impl.websocket_watchdog import WebSocketWatchDog
from huobi.connection.impl.websocket_manage import WebsocketManage
from huobi.connection.impl.websocket_request import WebsocketRequest
from huobi.constant.system import WebSocketDefine, ApiVersion
class SubscribeClient(object):
# static property
subscribe_watch_dog = WebSocketWatchDog()
def __init__(self, **kwargs):
"""
Create the subscription client to subscribe the update from server.
:param kwargs: The option of subscription connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: Set the URI for subscription.
init_log: to init logger
"""
self.__api_key = kwargs.get("api_key", None)
self.__secret_key = kwargs.get("secret_key", None)
self.__uri = kwargs.get("url", WebSocketDefine.Uri)
self.__init_log = kwargs.get("init_log", None)
if self.__init_log and self.__init_log:
logger = logging.getLogger("huobi-client")
# logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
self.__websocket_manage_list = list()
def __create_websocket_manage(self, request):
manager = WebsocketManage(self.__api_key, self.__secret_key, self.__uri, request)
self.__websocket_manage_list.append(manager)
manager.connect()
SubscribeClient.subscribe_watch_dog.on_connection_created(manager)
def create_request(self, subscription_handler, parse, callback, error_handler, is_trade, is_mbp_feed=False):
request = WebsocketRequest()
request.subscription_handler = subscription_handler
request.is_trading = is_trade
request.is_mbp_feed = is_mbp_feed
request.auto_close = False # subscribe need connection. websocket request need close request.
request.json_parser = parse
request.update_callback = callback
request.error_handler = error_handler
return request
def create_request_v1(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request(subscription_handler=subscription_handler, parse=parse, callback=callback,
error_handler=error_handler, is_trade=is_trade)
request.api_version = ApiVersion.VERSION_V1
return request
def create_request_v2(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request(subscription_handler=subscription_handler, parse=parse, callback=callback,
error_handler=error_handler, is_trade=is_trade)
request.api_version = ApiVersion.VERSION_V2
return request
def execute_subscribe_v1(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request_v1(subscription_handler, parse, callback, error_handler, is_trade)
self.__create_websocket_manage(request)
def execute_subscribe_v2(self, subscription_handler, parse, callback, error_handler, is_trade=False):
request = self.create_request_v2(subscription_handler, parse, callback, error_handler, is_trade)
self.__create_websocket_manage(request)
def execute_subscribe_mbp(self, subscription_handler, parse, callback, error_handler, is_trade=False,
is_mbp_feed=True):
request = self.create_request(subscription_handler, parse, callback, error_handler, is_trade, is_mbp_feed)
self.__create_websocket_manage(request)
def unsubscribe_all(self):
for websocket_manage in self.__websocket_manage_list:
SubscribeClient.subscribe_watch_dog.on_connection_closed(websocket_manage)
websocket_manage.close()
self.__websocket_manage_list.clear()
|
nilq/baby-python
|
python
|
import numpy as np
from seisflows.tools import unix
from seisflows.tools.array import loadnpy, savenpy
from seisflows.tools.code import exists
from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \
loadclass, ParameterError
PAR = SeisflowsParameters()
PATH = SeisflowsPaths()
import solver
import postprocess
migration = loadclass('workflow','migration')()
class test_postprocess(object):
""" Postprocessing class
"""
def check(self):
""" Checks parameters and paths
"""
migration.check()
if 'INPUT' not in PATH:
setattr(PATH, 'INPUT', None)
def main(self):
""" Writes gradient of objective function
"""
if not PATH.INPUT:
migration.main()
postprocess.process_kernels()
|
nilq/baby-python
|
python
|
from jsonobject import JsonObject
from taxjar.data.float_property import TaxJarFloatProperty
class TaxJarBreakdownLineItem(JsonObject):
# NB: can return either string or integer
# `id` is a valid property, but isn't enforced here
# id = StringProperty()
taxable_amount = TaxJarFloatProperty()
tax_collectable = TaxJarFloatProperty()
combined_tax_rate = TaxJarFloatProperty()
state_taxable_amount = TaxJarFloatProperty()
state_sales_tax_rate = TaxJarFloatProperty()
state_amount = TaxJarFloatProperty()
county_taxable_amount = TaxJarFloatProperty()
county_tax_rate = TaxJarFloatProperty()
county_amount = TaxJarFloatProperty()
city_taxable_amount = TaxJarFloatProperty()
city_tax_rate = TaxJarFloatProperty()
city_amount = TaxJarFloatProperty()
special_district_taxable_amount = TaxJarFloatProperty()
special_tax_rate = TaxJarFloatProperty()
special_district_amount = TaxJarFloatProperty()
country_taxable_amount = TaxJarFloatProperty()
country_tax_rate = TaxJarFloatProperty()
country_tax_collectable = TaxJarFloatProperty()
gst_taxable_amount = TaxJarFloatProperty()
gst_tax_rate = TaxJarFloatProperty()
gst = TaxJarFloatProperty()
pst_taxable_amount = TaxJarFloatProperty()
pst_tax_rate = TaxJarFloatProperty()
pst = TaxJarFloatProperty()
qst_taxable_amount = TaxJarFloatProperty()
qst_tax_rate = TaxJarFloatProperty()
qst = TaxJarFloatProperty()
|
nilq/baby-python
|
python
|
import abjad
import consort
from abjad.tools import durationtools
from abjad.tools import rhythmmakertools
from abjad.tools import systemtools
from abjad.tools import templatetools
from abjad.tools import timespantools
layer = 1
score_template = templatetools.StringOrchestraScoreTemplate(
violin_count=2,
viola_count=1,
cello_count=1,
contrabass_count=0,
)
segment_timespan = abjad.Timespan(0, 4)
timespan_maker = consort.TaleaTimespanMaker(
playing_talea=rhythmmakertools.Talea(
counts=(1,),
denominator=1,
),
silence_talea=None,
)
timespan_quantization = abjad.Duration(1, 16)
def test_MusicSetting_01():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(1, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(2, 1),
stop_offset=abjad.Offset(3, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 1),
stop_offset=abjad.Offset(4, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_02():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=abjad.Timespan(1, 2),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_03():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=abjad.TimespanList([
abjad.Timespan(0, 1),
abjad.Timespan(2, 4),
]),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(1, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(2, 1),
stop_offset=abjad.Offset(3, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 1),
stop_offset=abjad.Offset(4, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_04():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 2, 1),
parts=1,
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(1, 1),
stop_offset=abjad.Offset(2, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(2, 1),
stop_offset=abjad.Offset(3, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_05():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 2, 1),
parts=(0, 2),
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(0, 1),
stop_offset=abjad.Offset(1, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(3, 1),
stop_offset=abjad.Offset(4, 1),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_06():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 1, 1),
parts=1,
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
timespan_quantization=timespan_quantization,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(21, 16),
stop_offset=abjad.Offset(37, 16),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
def test_MusicSetting_07():
music_setting = consort.MusicSetting(
timespan_maker=timespan_maker,
timespan_identifier=consort.RatioPartsExpression(
ratio=(1, 1, 1, 2),
parts=(1, 3),
),
viola_bowing_voice=consort.tools.MusicSpecifier(),
)
result = music_setting(
layer=layer,
score_template=score_template,
segment_timespan=segment_timespan,
timespan_quantization=timespan_quantization,
)
assert format(result) == abjad.String.normalize(
'''
abjad.TimespanList(
[
consort.tools.PerformedTimespan(
start_offset=abjad.Offset(19, 8),
stop_offset=abjad.Offset(27, 8),
layer=1,
music_specifier=consort.tools.MusicSpecifier(),
voice_name='Viola Bowing Voice',
),
]
)
''',
), format(result)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Error module tests."""
from __future__ import absolute_import, print_function
import json
from invenio_rest import InvenioREST
from invenio_rest.errors import FieldError, InvalidContentType, \
RESTException, RESTValidationError
def test_errors(app):
"""Error handlers view."""
InvenioREST(app)
@app.route('/', methods=['GET'])
def test_rest():
raise RESTException(description='error description')
@app.route('/contenttype', methods=['GET'])
def test_content_type():
raise InvalidContentType(allowed_content_types=['application/json'])
@app.route('/validationerror', methods=['GET'])
def test_validation_error():
raise RESTValidationError(
errors=[FieldError('myfield', 'mymessage', code=10)])
with app.test_client() as client:
res = client.get('/')
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['status'] is None
assert data['message'] == 'error description'
res = client.get('/contenttype')
assert res.status_code == 415
data = json.loads(res.get_data(as_text=True))
assert data['status'] == 415
assert 'application/json' in data['message']
res = client.get('/validationerror')
assert res.status_code == 400
data = json.loads(res.get_data(as_text=True))
print(data)
assert data['status'] == 400
assert data['message'] == 'Validation error.'
assert data['errors'] == [
dict(field='myfield', message='mymessage', code=10)
]
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.