text string | size int64 | token_count int64 |
|---|---|---|
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from tola.forms import RegistrationForm, NewUserRegistrationForm, NewTolaUserRegistrationForm, BookmarkForm
from django.contrib import messages
from django.contrib.auth import logout
from django.http import HttpResponseRedirect
from django.shortcuts import render
from workflow.models import ProjectAgreement, ProjectComplete, Program, SiteProfile, Sector,Country, TolaUser,TolaSites, TolaBookmarks, FormGuidance
from indicators.models import CollectedData, Indicator
from tola.tables import IndicatorDataTable
from django.shortcuts import get_object_or_404
from django.db.models import Sum, Q, Count
from tola.util import getCountry
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
@login_required(login_url='/accounts/login/')
def index(request, selected_countries=None, id=0, sector=0):
"""
Home page
get count of agreements approved and total for dashboard
"""
program_id = id
user_countries = getCountry(request.user)
if not selected_countries:
selected_countries = user_countries
selected_countries_list = None
selected_countries_label_list = None
else:
#transform to list if a submitted country
selected_countries = [selected_countries]
selected_countries_list = Country.objects.all().filter(id__in=selected_countries)
selected_countries_label_list = Country.objects.all().filter(id__in=selected_countries).values('country')
getAgencySite = TolaSites.objects.all().filter(id=1)
getSectors = Sector.objects.all().exclude(program__isnull=True).select_related()
#limit the programs by the selected sector
if int(sector) == 0:
getPrograms = Program.objects.all().prefetch_related('agreement','agreement__office').filter(funding_status="Funded", country__in=selected_countries)#.exclude(agreement__isnull=True)
sectors = Sector.objects.all()
else:
getPrograms = Program.objects.all().filter(funding_status="Funded", country__in=selected_countries, sector=sector)#.exclude(agreement__isnull=True)
sectors = Sector.objects.all().filter(id=sector)
filterForQuantitativeDataSums = {
'indicator__key_performance_indicator': True,
'periodic_target__isnull': False,
'achieved__isnull': False,
}
#get data for just one program or all programs
if int(program_id) == 0:
getFilteredName=None
filterForQuantitativeDataSums['indicator__program__country__in'] = selected_countries
#filter by all programs then filter by sector if found
if int(sector) > 0:
filterForQuantitativeDataSums['agreement__sector__in'] = sectors
getSiteProfile = SiteProfile.objects.all().prefetch_related('country','district','province').filter(Q(Q(projectagreement__sector__in=sectors)), country__in=selected_countries).filter(status=1)
getSiteProfileIndicator = SiteProfile.objects.all().prefetch_related('country','district','province').filter(Q(collecteddata__program__country__in=selected_countries)).filter(status=1)
agreement_total_count = ProjectAgreement.objects.all().filter(sector__in=sectors, program__country__in=selected_countries).count()
complete_total_count = ProjectComplete.objects.all().filter(project_agreement__sector__in=sectors, program__country__in=selected_countries).count()
agreement_approved_count = ProjectAgreement.objects.all().filter(approval='approved', sector__in=sectors, program__country__in=selected_countries).count()
complete_approved_count = ProjectComplete.objects.all().filter(approval='approved', project_agreement__sector__in=sectors, program__country__in=selected_countries).count()
agreement_awaiting_count = ProjectAgreement.objects.all().filter(approval='awaiting approval', sector__in=sectors, program__country__in=selected_countries).count()
complete_awaiting_count = ProjectComplete.objects.all().filter(approval='awaiting approval', project_agreement__sector__in=sectors, program__country__in=selected_countries).count()
agreement_open_count = ProjectAgreement.objects.all().filter(Q(Q(approval='open') | Q(approval="") | Q(approval=None)), sector__id__in=sectors, program__country__in=selected_countries).count()
complete_open_count = ProjectComplete.objects.all().filter(Q(Q(approval='open') | Q(approval="") | Q(approval=None)), project_agreement__sector__in=sectors, program__country__in=selected_countries).count()
agreement_wait_count = ProjectAgreement.objects.all().filter(Q(approval='in progress') & Q(Q(approval='in progress') | Q(approval=None) | Q(approval="")), sector__in=sectors, program__country__in=selected_countries).count()
complete_wait_count = ProjectComplete.objects.all().filter(Q(approval='in progress') & Q(Q(approval='in progress') | Q(approval=None) | Q(approval="")), project_agreement__sector__in=sectors, program__country__in=selected_countries).count()
else:
getSiteProfile = SiteProfile.objects.all().prefetch_related('country','district','province').filter(country__in=selected_countries).filter(status=1)
getSiteProfileIndicator = SiteProfile.objects.all().prefetch_related('country','district','province').filter(Q(collecteddata__program__country__in=selected_countries)).filter(status=1)
agreement_total_count = ProjectAgreement.objects.all().filter(program__country__in=selected_countries).count()
complete_total_count = ProjectComplete.objects.all().filter(program__country__in=selected_countries).count()
agreement_approved_count = ProjectAgreement.objects.all().filter(approval='approved', program__country__in=selected_countries).count()
complete_approved_count = ProjectComplete.objects.all().filter(approval='approved', program__country__in=selected_countries).count()
agreement_awaiting_count = ProjectAgreement.objects.all().filter(approval='awaiting approval', program__country__in=selected_countries).count()
complete_awaiting_count = ProjectComplete.objects.all().filter(approval='awaiting approval', program__country__in=selected_countries).count()
agreement_open_count = ProjectAgreement.objects.all().filter(Q(Q(approval='open') | Q(approval="") | Q(approval=None)), program__country__in=selected_countries).count()
complete_open_count = ProjectComplete.objects.all().filter(Q(Q(approval='open') | Q(approval="") | Q(approval=None)), program__country__in=selected_countries).count()
agreement_wait_count = ProjectAgreement.objects.all().filter(Q(approval='in progress') & Q(Q(approval='in progress') | Q(approval=None) | Q(approval="")), program__country__in=selected_countries).count()
complete_wait_count = ProjectComplete.objects.all().filter(Q(approval='in progress') & Q(Q(approval='in progress') | Q(approval=None) | Q(approval="")),program__country__in=selected_countries).count()
else:
filterForQuantitativeDataSums['indicator__program__id'] = program_id
getFilteredName=Program.objects.get(id=program_id)
agreement_total_count = ProjectAgreement.objects.all().filter(program__id=program_id).count()
complete_total_count = ProjectComplete.objects.all().filter(program__id=program_id).count()
agreement_approved_count = ProjectAgreement.objects.all().filter(program__id=program_id, approval='approved').count()
complete_approved_count = ProjectComplete.objects.all().filter(program__id=program_id, approval='approved').count()
agreement_open_count = ProjectAgreement.objects.all().filter(program__id=program_id, approval='open').count()
complete_open_count = ProjectComplete.objects.all().filter(Q(Q(approval='open') | Q(approval="")), program__id=program_id).count()
agreement_wait_count = ProjectAgreement.objects.all().filter(Q(program__id=program_id), Q(approval='in progress') & Q(Q(approval='in progress') | Q(approval=None) | Q(approval=""))).count()
complete_wait_count = ProjectComplete.objects.all().filter(Q(program__id=program_id), Q(approval='in progress') & Q(Q(approval='in progress') | Q(approval=None) | Q(approval=""))).count()
getSiteProfile = SiteProfile.objects.all().prefetch_related('country','district','province').filter(projectagreement__program__id=program_id).filter(status=1)
getSiteProfileIndicator = SiteProfile.objects.all().prefetch_related('country','district','province').filter(Q(collecteddata__program__id=program_id)).filter(status=1)
agreement_awaiting_count = ProjectAgreement.objects.all().filter(program__id=program_id, approval='awaiting approval').count()
complete_awaiting_count = ProjectComplete.objects.all().filter(program__id=program_id, approval='awaiting approval').count()
getQuantitativeDataSums = CollectedData.objects.all()\
.filter(**filterForQuantitativeDataSums)\
.exclude(\
achieved=None,\
periodic_target=None,\
program__funding_status="Archived")\
.order_by('indicator__program','indicator__number')\
.values('indicator__lop_target', 'indicator__program__id', 'indicator__program__name','indicator__number','indicator__name','indicator__id')\
.annotate(targets=Sum('periodic_target'), actuals=Sum('achieved'))
#Evidence and Objectives are for the global leader dashboard items and are the same every time
count_evidence = CollectedData.objects.all().filter(indicator__isnull=False).values("indicator__program__country__country").annotate(evidence_count=Count('evidence', distinct=True) + Count('tola_table', distinct=True),indicator_count=Count('pk', distinct=True)).order_by('-evidence_count')
getObjectives = CollectedData.objects.filter(\
indicator__strategic_objectives__isnull=False, \
indicator__program__country__in=selected_countries)\
.exclude(\
achieved=None,\
periodic_target=None)\
.order_by('indicator__strategic_objectives__name')\
.values('indicator__strategic_objectives__name')\
.annotate(\
indicators=Count('indicator__pk', distinct=True),\
targets=Sum('periodic_target__target'), actuals=Sum('achieved'))
# print(".............................%s............................" % getObjectives.query )
table = IndicatorDataTable(getQuantitativeDataSums)
table.paginate(page=request.GET.get('page', 1), per_page=20)
count_program = Program.objects.all().filter(country__in=selected_countries, funding_status='Funded').count()
approved_by = TolaUser.objects.get(user_id=request.user)
user_pending_approvals = ProjectAgreement.objects.filter(approved_by=approved_by).exclude(approval='approved')
count_program_agreement = ProjectAgreement.objects.all().filter(program__country__in=selected_countries,program__funding_status='Funded').values('program').distinct().count()
count_indicator = Indicator.objects.all().filter(program__country__in=selected_countries,program__funding_status='Funded').values('program').distinct().count()
count_evidence_adoption = CollectedData.objects.all().filter(indicator__isnull=False,indicator__program__country__in=selected_countries).values("indicator__program__country__country").annotate(evidence_count=Count('evidence', distinct=True) + Count('tola_table', distinct=True),indicator_count=Count('pk', distinct=True)).order_by('-evidence_count')
count_program = int(count_program)
count_program_agreement = int(count_program_agreement)
green = "#5CB85C"
yellow = "#E89424"
red = "#B30838"
# 66% or higher = Green above 25% below %66 is Orange and below %25 is Red
if count_program_agreement >= float(count_program/1.5):
workflow_adoption = green
elif count_program_agreement < count_program/1.5 and count_program_agreement > count_program/4:
workflow_adoption = yellow
elif count_program_agreement <= count_program/4:
workflow_adoption = red
if count_indicator >= float(count_program/1.5):
indicator_adoption = green
elif count_indicator < count_program/1.5 and count_indicator > count_program/4:
indicator_adoption = yellow
elif count_indicator <= count_program/4:
indicator_adoption = red
total_evidence_adoption_count = 0
total_indicator_data_count = 0
for country in count_evidence_adoption:
total_evidence_adoption_count = total_evidence_adoption_count + country['evidence_count']
total_indicator_data_count = total_indicator_data_count + country['indicator_count']
if total_evidence_adoption_count >= float(total_indicator_data_count/1.5):
evidence_adoption = green
elif total_evidence_adoption_count < total_indicator_data_count/1.5 and total_evidence_adoption_count > total_indicator_data_count/4:
evidence_adoption = yellow
elif total_evidence_adoption_count <= total_indicator_data_count/4:
evidence_adoption = red
return render(request, "index.html", {'agreement_total_count':agreement_total_count,\
'agreement_approved_count':agreement_approved_count,\
'agreement_open_count':agreement_open_count,\
'agreement_wait_count':agreement_wait_count,\
'agreement_awaiting_count':agreement_awaiting_count,\
'complete_open_count':complete_open_count,\
'complete_approved_count':complete_approved_count,'complete_total_count':complete_total_count,\
'complete_wait_count':complete_wait_count,\
'complete_awaiting_count':complete_awaiting_count,\
'programs':getPrograms,'getSiteProfile':getSiteProfile,\
'countries': user_countries,'selected_countries':selected_countries,\
'getFilteredName':getFilteredName,'getSectors':getSectors,\
'sector': sector, 'table': table, 'getQuantitativeDataSums':getQuantitativeDataSums,\
'count_evidence':count_evidence,
'getObjectives':getObjectives,
'selected_countries_list': selected_countries_list,
'getSiteProfileIndicator': getSiteProfileIndicator,
'getAgencySite': getAgencySite,
'workflow_adoption': workflow_adoption,
'count_program': count_program,
'count_program_agreement': count_program_agreement,
'indicator_adoption': indicator_adoption,
'count_indicator': count_indicator,
'evidence_adoption': evidence_adoption,
'count_evidence_adoption':total_evidence_adoption_count,
'count_indicator_data':total_indicator_data_count,
'selected_countries_label_list':selected_countries_label_list,
'user_pending_approvals':user_pending_approvals,
})
def register(request):
"""
Register a new User profile using built in Django Users Model
"""
privacy = TolaSites.objects.get(id=1)
if request.method == 'POST':
uf = NewUserRegistrationForm(request.POST)
tf = NewTolaUserRegistrationForm(request.POST)
if uf.is_valid() * tf.is_valid():
user = uf.save()
user.groups.add(Group.objects.get(name='ViewOnly'))
tolauser = tf.save(commit=False)
tolauser.user = user
tolauser.save()
messages.error(request, 'Thank you, You have been registered as a new user.', fail_silently=False)
return HttpResponseRedirect("/")
else:
uf = NewUserRegistrationForm()
tf = NewTolaUserRegistrationForm()
return render(request, "registration/register.html", {
'userform': uf,'tolaform': tf, 'helper': NewTolaUserRegistrationForm.helper,'privacy':privacy
})
def profile(request):
"""
Update a User profile using built in Django Users Model if the user is logged in
otherwise redirect them to registration version
"""
if request.user.is_authenticated():
obj = get_object_or_404(TolaUser, user=request.user)
form = RegistrationForm(request.POST or None, instance=obj,initial={'username': request.user})
if request.method == 'POST':
if form.is_valid():
form.save()
messages.error(request, 'Your profile has been updated.', fail_silently=False)
return render(request, "registration/profile.html", {
'form': form, 'helper': RegistrationForm.helper
})
else:
return HttpResponseRedirect("/accounts/register")
class BookmarkList(ListView):
"""
Bookmark Report filtered by project
"""
model = TolaBookmarks
template_name = 'registration/bookmark_list.html'
def get(self, request, *args, **kwargs):
getUser = TolaUser.objects.all().filter(user=request.user)
getBookmarks = TolaBookmarks.objects.all().filter(user=getUser)
return render(request, self.template_name, {'getBookmarks':getBookmarks})
class BookmarkCreate(CreateView):
"""
Using Bookmark Form for new bookmark per user
"""
model = TolaBookmarks
template_name = 'registration/bookmark_form.html'
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Bookmarks")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(BookmarkCreate, self).dispatch(request, *args, **kwargs)
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(BookmarkCreate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_initial(self):
initial = {
'user': self.request.user,
}
return initial
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Bookmark Created!')
latest = TolaBookmarks.objects.latest('id')
redirect_url = '/bookmark_update/' + str(latest.id)
return HttpResponseRedirect(redirect_url)
form_class = BookmarkForm
class BookmarkUpdate(UpdateView):
"""
Bookmark Form Update an existing site profile
"""
model = TolaBookmarks
template_name = 'registration/bookmark_form.html'
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Bookmarks")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(BookmarkUpdate, self).dispatch(request, *args, **kwargs)
def get_initial(self):
initial = {
'user': self.request.user,
}
return initial
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Bookmark Updated!')
latest = TolaBookmarks.objects.latest('id')
redirect_url = '/bookmark_update/' + str(latest.id)
return HttpResponseRedirect(redirect_url)
form_class = BookmarkForm
class BookmarkDelete(DeleteView):
"""
Bookmark Form Delete an existing bookmark
"""
model = TolaBookmarks
template_name = 'registration/bookmark_confirm_delete.html'
success_url = "/bookmark_list"
def dispatch(self, request, *args, **kwargs):
return super(BookmarkDelete, self).dispatch(request, *args, **kwargs)
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Bookmark Deleted!')
return self.render_to_response(self.get_context_data(form=form))
form_class = BookmarkForm
def logout_view(request):
"""
Logout a user
"""
logout(request)
# Redirect to a success page.
return HttpResponseRedirect("/")
| 21,559 | 6,090 |
import sys
from loguru import logger
from . import LOG_PATH
class Loggers:
def __init__(self, debug_switch=None):
self.file_handler_id = None
self.stderr_handler_id = None
# Initialise loggers
logger.remove() # Disable default logger
if debug_switch:
self.add_file_logger(level='DEBUG')
else:
self.add_file_logger()
self.add_stderr_logger()
def add_file_logger(self, level='ERROR'):
self.file_handler_id = logger.add(
LOG_PATH,
format='{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {line} | {name}:{function} | {message}',
level=level,
)
return self.file_handler_id
def add_stderr_logger(self, level='WARNING'):
self.stderr_handler_id = logger.add(
sys.stderr,
format='<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | '
'<level>{level: <8}</level> | '
'<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
diagnose=False,
level=level,
)
return self.stderr_handler_id
| 1,163 | 378 |
from ...api import FeedForward # For backward compatibility
| 60 | 15 |
import autograd.numpy as np
import tensorflow as tf
import torch
from numpy import linalg as la
from numpy import random as rnd
import pymanopt
from examples._tools import ExampleRunner
from pymanopt.manifolds import PSDFixedRank
from pymanopt.solvers import TrustRegions
SUPPORTED_BACKENDS = ("Autograd", "Callable", "PyTorch", "TensorFlow")
def create_cost_egrad_ehess(manifold, matrix, backend):
egrad = ehess = None
if backend == "Autograd":
@pymanopt.function.Autograd(manifold)
def cost(Y):
return np.linalg.norm(Y @ Y.T - matrix, "fro") ** 2
elif backend == "Callable":
@pymanopt.function.Callable(manifold)
def cost(Y):
return la.norm(Y @ Y.T - matrix, "fro") ** 2
@pymanopt.function.Callable(manifold)
def egrad(Y):
return 4 * (Y @ Y.T - matrix) @ Y
@pymanopt.function.Callable(manifold)
def ehess(Y, U):
return 4 * ((Y @ U.T + U @ Y.T) @ Y + (Y @ Y.T - matrix) @ U)
elif backend == "PyTorch":
matrix_ = torch.from_numpy(matrix)
@pymanopt.function.PyTorch(manifold)
def cost(Y):
X = torch.matmul(Y, torch.transpose(Y, 1, 0))
return torch.norm(X - matrix_) ** 2
elif backend == "TensorFlow":
@pymanopt.function.TensorFlow(manifold)
def cost(Y):
X = tf.matmul(Y, tf.transpose(Y))
return tf.norm(X - matrix) ** 2
else:
raise ValueError(f"Unsupported backend '{backend}'")
return cost, egrad, ehess
def run(backend=SUPPORTED_BACKENDS[0], quiet=True):
num_rows = 1000
rank = 5
low_rank_factor = rnd.randn(num_rows, rank)
matrix = low_rank_factor @ low_rank_factor.T
manifold = PSDFixedRank(num_rows, rank)
cost, egrad, ehess = create_cost_egrad_ehess(manifold, matrix, backend)
problem = pymanopt.Problem(manifold, cost=cost, egrad=egrad, ehess=ehess)
if quiet:
problem.verbosity = 0
solver = TrustRegions(maxiter=500, minstepsize=1e-6)
low_rank_factor_estimate = solver.solve(problem)
if quiet:
return
print("Rank of target matrix:", la.matrix_rank(matrix))
matrix_estimate = low_rank_factor_estimate @ low_rank_factor_estimate.T
print(
"Frobenius norm error of low-rank estimate:",
la.norm(matrix - matrix_estimate),
)
if __name__ == "__main__":
runner = ExampleRunner(
run, "Low-rank PSD matrix approximation", SUPPORTED_BACKENDS
)
runner.run()
| 2,525 | 924 |
def safe_divide(n, d):
return n / d if d else 0
| 52 | 23 |
# Generated by Django 3.1.6 on 2021-09-01 06:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datasets', '0019_auto_20210826_2054'),
]
operations = [
migrations.RenameField(
model_name='connection',
old_name='connection_type',
new_name='type',
),
migrations.RenameField(
model_name='historicalconnection',
old_name='connection_type',
new_name='type',
),
]
| 534 | 177 |
import warnings
import importlib_metadata
from tqdm import TqdmExperimentalWarning
# Monkey-patches GeoAxesSubplot for .set_extent()
from .utils import geoaxes # noqa: F401
__version__ = importlib_metadata.version("cartes")
# Silence this warning about autonotebook mode for tqdm
warnings.simplefilter("ignore", TqdmExperimentalWarning)
| 343 | 112 |
from src.models.train_DRRAA_module import DRRAA
from src.models.train_LSM_module import LSM
from src.models.train_BDRRAA_module import BDRRAA
import torch
import matplotlib.pyplot as plt
import numpy as np
import json
import scipy.stats as st
import matplotlib as mpl
def sparse_experiments(datasets, ks, sample_size, iterations, LR, print_loss = False):
# Initialize datastructures for storing experiment data
RAA_AUC_SCORES = {(key, k): [] for key in datasets for k in ks}
for idx, dataset in enumerate(datasets):
print(dataset)
# Load in data
data = torch.from_numpy(np.loadtxt("data/train_masks/" + dataset + "/sparse_i.txt")).long()
data2 = torch.from_numpy(np.loadtxt("data/train_masks/" + dataset + "/sparse_j.txt")).long()
sparse_i_rem = torch.from_numpy(np.loadtxt("data/train_masks/" + dataset + "/sparse_i_rem.txt")).long()
sparse_j_rem = torch.from_numpy(np.loadtxt("data/train_masks/" + dataset + "/sparse_j_rem.txt")).long()
non_sparse_i = torch.from_numpy(np.loadtxt("data/train_masks/" + dataset + "/non_sparse_i.txt")).long()
non_sparse_j = torch.from_numpy(np.loadtxt("data/train_masks/" + dataset + "/non_sparse_j.txt")).long()
for model_num in range(TRAIN_NUMS):
print(f"Train_num iter: {model_num}")
## RAA
# Cross validation loop
for k in ks:
print(f"kvals: {k}")
# model definition
raa = BDRRAA(k = k,
d = d,
sample_size = sample_size,
data = data, data2 = data2,
non_sparse_i=non_sparse_i, non_sparse_j=non_sparse_j,
sparse_i_rem=sparse_i_rem, sparse_j_rem=sparse_j_rem)
raa.train(iterations = iterations[idx], LR = LR, print_loss = print_loss)
RAA_AUC_TEMP, _, _ = raa.link_prediction()
RAA_AUC_SCORES[dataset, k].append(RAA_AUC_TEMP)
# Create confidence interval for RAA
lower_bound = [[] for d in datasets]
upper_bound = [[] for d in datasets]
for idx, dataset in enumerate(datasets):
for k in ks:
conf_interval = st.t.interval(alpha=0.95, df=len(RAA_AUC_SCORES[dataset, k]) - 1, loc=np.mean(RAA_AUC_SCORES[dataset, k]), scale=st.sem(RAA_AUC_SCORES[dataset, k]))
lower_bound[idx].append(conf_interval[0])
upper_bound[idx].append(conf_interval[1])
with open(f"auc_{dataset}_bipartite.txt", "w") as data:
data.write(f"{dataset}: AUC:")
data.write("\n")
for k in ks:
data.write(f"AUC for k: {k}")
data.write("\n")
data.write(json.dumps(RAA_AUC_SCORES[dataset, k]))
data.write("\n")
data.write(f"Mean AUC for RAA with {k} archetypes: {np.mean(RAA_AUC_SCORES[dataset, k])}")
data.write("\n")
data.write(f"confidence interval RAA lower and upper bound for each k:")
data.write("\n")
data.write(json.dumps(lower_bound))
data.write(json.dumps(upper_bound))
if __name__ == "__main__":
datasets = ["drug_gene"]
# Set iterations for each dataset
iterations = [10000]
# Set sample procentage for each dataset
sample_size = [0.5]
# Set if loss should be printed during training
print_loss = False
LR = 0.10
TRAIN_NUMS = 5
# Find seeds
seeds = torch.randint(low = 0, high = 10000, size = (TRAIN_NUMS,))
# set dimensionality
d = 2
ks = [3, 8]
for l, dataset in enumerate(datasets):
sparse_experiments([dataset], ks, sample_size[l], [iterations[l]], LR, print_loss = True) | 3,730 | 1,302 |
import tensorflow as tf
import numpy as np
tf.set_random_seed(234)
x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)
x = tf.placeholder(tf.float32, shape = [None,2])
y = tf.placeholder(tf.float32, shape=[None, 1])
w1 = tf.Variable(tf.random_normal([2,10]), name="weight")
b1 = tf.Variable(tf.random_normal([10]), name="bias")
h1 = tf.sigmoid(tf.matmul(x,w1)+b1)
w2 = tf.Variable(tf.random_normal([10,10]), name="weight")
b2 = tf.Variable(tf.random_normal([10]), name="bias")
h2 = tf.sigmoid(tf.matmul(h1,w2)+b2)
w3 = tf.Variable(tf.random_normal([10,10]), name="weight")
b3 = tf.Variable(tf.random_normal([10]), name="bias")
h3 = tf.sigmoid(tf.matmul(h2,w3)+b3)
w4 = tf.Variable(tf.random_normal([10,1]), name="weight")
b4 = tf.Variable(tf.random_normal([1]), name="bias")
h4 = tf.sigmoid(tf.matmul(h3,w4)+b4)
cost = -tf.reduce_mean(y*tf.log(h4) + (1-y)*tf.log(1-h4))
train = tf.train.GradientDescentOptimizer(0.2).minimize(cost)
prediction = tf.cast(h4>0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(y, prediction), dtype=tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(10000):
_, cost_value = sess.run([train,cost], feed_dict={x:x_data, y:y_data})
#sess.run(w)
print(step, cost_value)
hypothesis , p, a = sess.run([h4, prediction, accuracy], feed_dict={x:x_data, y:y_data})
print(hypothesis)
print(p)
print(a)
| 1,531 | 687 |
#importing neccesary libaries and mudules needed to support functions
from __future__ import unicode_literals
from kivy.app import App
from kivy.config import Config
Config.set('input', 'multitouchscreen1', 'tuio,192.168.2.22:3306')
Config.set('postproc','desktop','1')
Config.set('kivy','exit_on_escape','1')
Config.set('kivy','log_enable','1')
Config.set('kivy', 'log_maxfiles', '-1')
Config.set('widgets', 'scroll_friction','float')
Config.set('widgets', 'scroll_distance', '4')
Config.set('graphics','borderless','0')
Config.set('graphics','rotation','0')
Config.set('graphics','full_screen','1')
Config.set('graphics','allow_screensaver','1')
Config.set('graphics','kivy_clock','free_all')
Config.set('widgets', 'scroll_distance', '4')
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
Window.clearcolor=(0,0,0,0)
from kivy.uix.checkbox import CheckBox
from kivy.uix.switch import Switch
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.screenmanager import ScreenManager, Screen, RiseInTransition, FallOutTransition
import os
import sys
from kivy.uix.videoplayer import VideoPlayer
from kivy.uix.colorpicker import ColorPicker
from kivy.uix.listview import ListItemButton, ListView
from kivy.adapters.listadapter import ListAdapter
from kivy.uix.scrollview import ScrollView
from kivy.uix.anchorlayout import AnchorLayout
from kivy.clock import Clock
from kivy.uix.dropdown import DropDown
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.adapters.listadapter import ListAdapter
from kivy.uix.bubble import Bubble, BubbleButton
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.interactive import InteractiveLauncher
from kivy.uix.popup import Popup
from kivy.uix.spinner import Spinner
from kivy.core.image import Image
from kivy.graphics import Color,Rectangle, Line
from kivy.uix.actionbar import ActionBar,ActionView,ActionGroup,ActionButton,ActionPrevious, ActionView,ActionOverflow, ContextualActionView
from kivy.lang import Builder
from kivy.uix.behaviors import ButtonBehavior
from kivy.loader import Loader
from kivy.support import install_twisted_reactor
from kivy.adapters.models import SelectableDataItem
from kivy.storage.jsonstore import JsonStore
from kivy.uix.dropdown import DropDown
#Homepage function
class Homepage(Screen):
def __init__(self, **kwargs):
super(Homepage, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
#function for updating the rectangle
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#function for gprs calculation
def n_gps(self, *args):
patient_storage = JsonStore('patients_section.json')
doctors = JsonStore('doctors.json')
name = patient_storage.get('patient_section')['username']
data = 'Maximam 2km'
self.ids['n_actionbar'].text = str(name)
if True:
self.ids['n_location'].text=data
else:
name = doctors.get('doctors_details')['work_Number']
self.ids['n_actionbar'].text = str(name)
self.ids['n_location'].text=data
#function for loading lsit of doctors of patients
def listofcon(self, **kwargs):
patient_storage = JsonStore('patients_section.json')
doctors = JsonStore('doctors.json')
if patient_storage.get('patient_section')['username']==patient_storage.get('patient_section')['username']:
class Listofdoctors(GridLayout):
def __init__(self, **kwargs):
super(Listofdoctors, self). __init__(**kwargs)
self.cols =1
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rect = Rectangle(size=self.size, pos=self.size)
for i in range(100):
btn = Button(text=str(i), size_hint=(1,1))
self.add_widget(btn)
def update_rec(self, *args):
self.rect.size = self.size
self.rect.pos = self.pos
widgets = Listofdoctors()
self.ids['listview'].add_widget(widgets)
else:
if doctors.get('doctors_details')['work_number']==str:
self.ids['listview'].clear_widgets()
self.pi = Popup(title='register', content=Label(text='register', color=[0,1,0,1]), size_hint=(.2, .2))
self.pi.open()
Clock.schedule_once(self.pi.dismisss, 2)
#list for loading the chats
def n_chats(self, *args):
chats = JsonStore('chats.json')
self.ids['listview'].clear_widgets()
self.chats = Label(text='fhfhfhfhfh', size_hint=(.2, .2))
self.ids['listview'].add_widget(self.chats)
#class for image selection
class Imagesel(GridLayout):
def __init__(self, **kwargs):
super(Imagesel, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
#function for updating the size
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
cancel = ObjectProperty(None)
#function for loading the image file
def loading(self, path, filename):
with open(os.path.join(path, filename[0])) as image:
self.sound = Image(source=image)
return self.sound
#class for profile settings
class My_profile(Screen):
def __init__(self, **kwargs):
super(My_profile, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
#updatin the size
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#function for profiling photo
def profile_photo(self, *args):
from kivy.factory import Factory
self.image = Button(text='image', size_hint=(.2, .1), on_press=self.photo)
self.label = Label(text='proffesion', size_hint=(.1, .1))
self.text = TextInput(multline=False, size_hint=(1,.1),hint_text='lab tech: Mr.luke, surgeon: Dr.ian, pedetrician, nurse, artist')
self.hobbies = Label(text='Hobbies', size_hint=(.1,.1))
self.text_in = TextInput(multline=False, size_hint=(1,.1), hint_text='football, swimming, karate, boxing, wacthing, praying, singing ')
self.but = Button(text='update', size_hint=(.2, .1), on_press=self.update)
for data in [self.image, self.label, self.text, self.hobbies, self.text_in, self.but]:
self.ids['display_layout'].add_widget(data)
#function for photoupdating
def photo(self,*args):
im = Imagesel()
self.pop = Popup(title='iamge', title_align='center',content = im, size_hint=(1,1) )
self.pop.open()
#function for upadting the profile photo
def update(self, *args):
self.ids['display_layout'].clear_widgets()
print('jhgygyfg')
#function for updatign status
def status(self, *args):
self.text = TextInput(multline=False, hint_text='Status: Today am feelings good, thanks to my creator', size_hint=(1, .1))
self.but = Button(text='post', size_hint=(.2, .1), background_color=[0,1,0,1], on_press=self.up)
for s in [self.text, self.but]:
self.ids['display_layout'].add_widget(s)
def up(self, *arg):
print('hallow')
self.ids['display_layout'].clear_widgets()
def privacy(self, *args):
self.la = Label(text='My_status', size_hint=(.2, .1))
self.spinner = Spinner(text='public', values=('private', 'only_me'), size_hint=(.2, .1))
self.lla = Label(text='profile', size_hint=(.2, .1))
self.sspinner = Spinner(text='public', values=('private', 'only_me'), size_hint=(.2, .1))
self.dla = Label(text='location', size_hint=(.2, .1))
self.dspinner = Spinner(text='public', values=('private', 'only_me'), size_hint=(.2, .1))
self.pla = Label(text='proffesion', size_hint=(.2, .1))
self.pspinner = Spinner(text='public', values=('private', 'only_me'), size_hint=(.2, .1))
self.hla = Label(text='hobbies', size_hint=(.2, .1))
self.hspinner = Spinner(text='public', values=('private', 'only_me'), size_hint=(.2, .1))
self.but = Button(text='update', size_hint=(.2, .1), pos_hint={'top': .1},background_color=[0,1,1,1],color=[1,0,1,1],on_press=self.updater)
for privacy in [self.la, self.spinner, self.lla, self.sspinner, self.dla, self.dspinner, self.pla, self.pspinner, self.hla, self.hspinner, self.but]:
self.ids['display_layout'].add_widget(privacy)
def updater(self, *args):
print('okay we will do the backend ')
self.ids['display_layout'].clear_widgets()
def help(self,*args):
self.text = TextInput(hint_text='location', size_hint=(1, .9))
self.e = Button(text='exit', size_hint=(.1, .1), on_press=self.dis, background_color=[1,1,1,0])
for value in [self.text, self.e]:
self.ids['display_layout'].add_widget(value)
def dis(self, *args):
self.ids['display_layout'].clear_widgets()
#class containing the patient details
class Patient_details(Screen, FloatLayout):
def __init__(self, **kwargs):
super(Patient_details, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
def n_Register(self, *args):
for data in str(self.ids['n_username'].text) and str(self.ids['n_full_name'].text) and str(self.ids['n_email_address'].text) and str(self.ids['n_postion'].text) and str(self.ids['n_password'].text) and str(self.ids['n_confirm_password'].text):
if True:
from kivy.storage.jsonstore import JsonStore
store = JsonStore('patient.json')
store.put('data', data=data)
else:
raise ImportError
#classs for capturing doctors detials
class Doctors_details(Screen, FloatLayout):
def __init__(self, **kwargs):
super(Doctors_details, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rect = Rectangle(size=self.size, pos=self.pos)
def update_rec(self, *args):
self.rect.size = self.size
self.rect.pos= self.pos
def nn_Register(self, *args):
print('sjcnaihnkb')
#patients registration
class Registering(GridLayout):
cancel = ObjectProperty(None)
def __init__(self, **kwargs):
super(Registering, self). __init__(**kwargs)
self.cols=1
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
self.label = Label(text='validating your details')
self.add_widget(self.label)
#function for updating the size and postion
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#class for registering doctors
class Doctors_section(StackLayout):
cancel = ObjectProperty(None)
def __init__(self, **kwargs):
super(Doctors_section, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
self.cols=1
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
self.label = TextInput(hint_text='code send to phone',size_hint=(1, .5), is_focusable=True)
self.but = Button(text='verfy', size_hint=(1, .5), on_press=self.validating)
for widgets in [self.label, self.but]:
self.add_widget(widgets)
#function for updating the size and postion
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#backend of doctors registration process
class Docone(GridLayout):
cancel = ObjectProperty(None)
def __init__(self, **kwargs):
super(Docone, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.after:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
data = 'sfgnruibgdjvnek'
self.ids['doctors_registration'].text=data
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#class for registering
class Register_patients(Screen):
def __init__(self, **kwargs):
super(Register_patients, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
def n_Register(self, *args):
patient_storage = JsonStore('patients_section.json')
if self.ids['n_password'].text==self.ids['n_confirm_password'].text:
for data in [self.ids['n_username'].text==str,self.ids['n_full_name'].text==str, self.ids['n_email_address'].text==str, self.ids['n_Residential_center']==str, self.ids['n_password'].text==str, self.ids['n_confirm_password'].text==str]:
if True:
patient_storage.put('patient_section',username=self.ids['n_username'].text, full_name=self.ids['n_full_name'].text, email_address=self.ids['n_email_address'].text, location=self.ids['n_Residential_center'].text, password=self.ids['n_password'].text, confirm_password=self.ids['n_confirm_password'].text)
if True:
self.pop2 = Popup(title='uploading', title_align='center',color=[0,1,0,1], content=Label(text='Redirecting you to the loginpage'), size_hint=(1,1))
self.pop2.open()
Clock.schedule_once(self.pop2.dismiss, 5)
if True:
self.manager.current='loginpage'
else:
self.pop1 = Popup(title='error', content=Label(text='error while uploading'), size_hint=(.2, .2))
self.pop1.open()
self.po1.dismiss()
else:
self.d = Popup(title='error', color=[0,1,0,1],content=Label(text='passwords do not match'), size_hint=(.3, .3))
self.d.open()
Clock.schedule_once(self.d.dismiss, 2)
def dismissone(self, *args):
self.contentpop.dismiss()
#section for doctors details
class Register_Doctors_details(Screen):
def __init__(self, **kwargs):
super(Register_Doctors_details, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#function for doctors registration
def nn_Register(self, *args):
doctors_storage = JsonStore('doctors.json')
if self.ids['n_password'].text==self.ids['n_confirm_password'].text:
for data in [self.ids['n_full_name'].text==str, self.ids['n_email_address'].text==str, self.ids['n_phone_number'].text==str, self.ids['n_work_number'].text==str, self.ids['n_password'].text==str, self.ids['n_confirm_password'].text==str]:
if True:
doctors_storage.put('doctors_details', full_name=self.ids['n_full_name'].text, email=self.ids['n_email_address'].text, phone_number=self.ids['n_phone_number'].text, work_Number=self.ids['n_work_number'].text, password=self.ids['n_password'].text, confirm_password=self.ids['n_confirm_password'].text)
if True:
self.pop1 = Popup(title='uploading', title_align='center', content=Label(text='Rederecting you to the login page ', size_hint=(1,1)))
self.pop1.open()
Clock.schedule_once(self.pop1.dismiss, 5)
if True:
self.manager.current='loginpage'
else:
self.p = Popup(title='error', content=Label(text='error while uploading'), size_hint=(.2, .2))
self.p.open()
self.p.dismiss()
else:
self.p = Popup(title='password error',content=Label(text='passwords do not match') ,size_hint=(.3, .2))
self.p.open()
Clock.schedule_once(self.p.dismiss, 1)
def dismissme(self, *args):
self.conn.dismiss()
#login page
class Loginpage(Screen):
def __init__(self, **kwargs):
super(Loginpage, self). __init__(**kwargs)
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
Color(1,1,1,0)
self.rec = Rectangle(size=self.size, pos=self.pos)
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
#function for loging in
def verifycredentials(self):
patient_storage = JsonStore('patients_section.json')
doctors_storage = JsonStore('doctors.json')
if self.ids['n_email'].text==patient_storage.get('patient_section')['email_address'] and self.ids['n_password'].text==patient_storage.get('patient_section')['password']:
self.manager.current='homepage'
else:
if self.ids['n_email'].text==doctors_storage.get('doctors_details')['email'] and self.ids['n_password'].text==doctors_storage.get('doctors_details')['password']:
self.manager.current='homepage'
else:
self.p = Popup(title='Account',title_color=[1,0,1,1], content=Label(text='invalid details'), size_hint=(.3, .2))
self.p.open()
Clock.schedule_once(self.p.dismiss, 2)
#function for registering patient
def n_register(self, *args):
print('prepare your documents')
self.manager.current='register'
#function for regustering the doctors
def register_as_doc(self, *args):
print('hallpow')
self.manager.current='doctors_section'
def screen_postion(self, *args):
print('Hllo')
def forgot_password(self, *args):
barcont = Forgot(cancel=self.dismiss)
self.pop = Popup(title='forgot_password', title_align='center', content=barcont, size_hint=(.3, .3))
self.pop.open()
Clock.schedule_once(self.dismiss, 20)
def dismiss(self, *args):
self.pop.dismiss()
class Forgot(GridLayout):
cancel = ObjectProperty()
def __init__(self, **kwargs):
super(Forgot, self). __init__(**kwargs)
self.cols=1
self.bind(size=self.update_rec, pos=self.update_rec)
with self.canvas.before:
self.rec = Rectangle(size=self.size, pos=self.pos)
self.label = Label(text='email_address')
self._input_email = TextInput(multline=False, is_focusable=True, hint_text='input your email')
self.but = Button(text='Request', on_press=self.request_password)
for data in [self.label, self._input_email, self.but]:
self.add_widget(data)
def update_rec(self, *args):
self.rec.size=self.size
self.rec.pos = self.pos
def request_password(self, *args):
patient_storage = JsonStore('patients_section.json')
doctors_storage = JsonStore('doctors.json')
if self._input_email.text==patient_storage.get('patient_section')['email_address']:
password = patient_storage.get('patient_section')['password']
content4 = Label(text=str(password))
self.pop1 = Popup(title='your password is',color=[0,1,0,1], title_align='center',background_color=[0,1,0,1],content=content4, size_hint=(.3,.2))
self.pop1.open()
Clock.schedule_once(self.pop1.dismiss, 4)
else:
if self._input_email.text==doctors_storage.get('doctors_details')['email']:
password = patient_storage.get('doctors_details')['password']
content3 = Label(text=str(password))
self.pop2 = Popup(title='password recoverd', title_align='center', color=[0,1,0,1],content=content3, size_hint=(.3, .2))
self.po2.open()
Clock.schedule_once(self.pop2.dismiss, 4)
#screenmanager
class ScreenManagement(ScreenManager):
pass
kv_file = Builder.load_file('health.kv')
class healthApp(App):
def build(self):
self.icon = 'icons/watch.png'
return kv_file
if __name__ =='__main__':
healthApp().run()
| 21,498 | 6,950 |
import unittest
from time import sleep
import zmq
from locust.rpc import zmqrpc, Message
PORT = 5557
class ZMQRPC_tests(unittest.TestCase):
def setUp(self):
self.server = zmqrpc.Server('*', PORT)
self.client = zmqrpc.Client('localhost', PORT, 'identity')
def tearDown(self):
self.server.socket.close()
self.client.socket.close()
def test_client_send(self):
self.client.send(Message('test', 'message', 'identity'))
addr, msg = self.server.recv_from_client()
self.assertEqual(addr, b'identity')
self.assertEqual(msg.type, 'test')
self.assertEqual(msg.data, 'message')
def test_client_recv(self):
sleep(0.01)
# We have to wait for the client to finish connecting
# before sending a msg to it.
self.server.send_to_client(Message('test', 'message', 'identity'))
msg = self.client.recv()
self.assertEqual(msg.type, 'test')
self.assertEqual(msg.data, 'message')
self.assertEqual(msg.node_id, 'identity')
| 1,055 | 341 |
import datetime
from mangrove.datastore.database import get_db_manager,\
_delete_db_and_remove_db_manager
from mangrove.bootstrap.initializer import _find_views
from pytz import UTC
import random
from mangrove.datastore.entity import Entity
from collections import defaultdict
class ViewGenerationTimer(object):
def _set_db_manager(self):
self.manager = get_db_manager('http://localhost:5984/',
'mangrove-test')
def _delete_db_and_remove_db_manager(self):
_delete_db_and_remove_db_manager(self.manager)
def _refresh_db_manager(self):
self._set_db_manager()
self._delete_db_and_remove_db_manager()
self._set_db_manager()
def _reset(self, number_of_entities=0, number_of_data_records_per_entity=8):
self._number_of_entities = number_of_entities
self._refresh_db_manager()
self._setup_entities()
self._setup_datadict_types()
self._add_data_to_entities(number_of_data_records_per_entity)
def _setup_entities(self):
ENTITY_TYPE = ["Health_Facility", "Clinic"]
AGGREGATION_PATH_NAME = "governance"
# Entities for State 1: Maharashtra
# location, aggregation_path
locations = [
['India', 'MH', 'Pune'],
['India', 'MH', 'Mumbai'],
['India', 'Karnataka', 'Bangalore'],
['India', 'Karnataka', 'Hubli'],
['India', 'Kerala', 'Kochi'],
]
aggregation_paths = [
["Director", "Med_Supervisor", "Surgeon"],
["Director", "Med_Supervisor", "Nurse"],
["Director", "Med_Officer", "Doctor"],
["Director", "Med_Officer", "Surgeon"],
["Director", "Med_Officer", "Nurse"],
]
self.entities = []
for i in range(self._number_of_entities):
location = random.choice(locations)
aggregation_path = random.choice(aggregation_paths)
e = Entity(self.manager, entity_type=ENTITY_TYPE, location=location)
e.set_aggregation_path(AGGREGATION_PATH_NAME, aggregation_path)
e.save()
self.entities.append(e)
def _add_data_to_entities(self, number_of_data_records_per_entity):
months = [1]
number_of_years = number_of_data_records_per_entity / (
len(self.dd_types) * len(months)
)
years = range(2011 - max(1, number_of_years), 2011)
event_times = []
for year in years:
for month in months:
event_time = datetime.datetime(year, month, 1, tzinfo=UTC)
event_times.append(event_time)
#for e in self.entities:
# for dd_type in self.dd_types.values():
# for event_time in event_times:
# slug = dd_type.slug
# value = random.random()
# e.add_data(
# data=[(slug, value, self.dd_types[slug])],
# event_time=event_time
# )
def print_csv_of_view_generation_times(self):
iterations = [20, 40, 60, 80, 100]
times_by_view_name = defaultdict(dict)
for number_of_entities in iterations:
times = self._calculate_view_generation_time(number_of_entities, 8)
for k, v in times.items():
times_by_view_name[k][number_of_entities] = str(v)
print ",".join(["number of entities"] + [str(i) for i in iterations])
for name, times in times_by_view_name.items():
row = [name] + [times_by_view_name[name][number_of_entities] for number_of_entities in iterations]
print ",".join(row)
def print_view_generation_times(self):
times = self._calculate_view_generation_time(100, 8)
import operator
sorted_times = sorted(times.iteritems(), key=operator.itemgetter(1))
for view_name, generation_time in sorted_times:
print view_name + ": " + str(generation_time)
def _calculate_view_generation_time(self, number_of_entities, number_of_data_records_per_entity):
self._reset(number_of_entities, number_of_data_records_per_entity)
js_views = _find_views()
times = {}
for v in js_views.keys():
funcs = js_views[v]
js_map = (funcs['map'] if 'map' in funcs else None)
js_reduce = (funcs['reduce'] if 'reduce' in funcs else None)
start = datetime.datetime.now()
self.manager.create_view(v, js_map, js_reduce, view_document=v)
all_rows = self.manager.load_all_rows_in_view(v + "/" + v)
# we need to hit the view to make sure it compiles
number_of_rows = len(all_rows)
end = datetime.datetime.now()
times[v] = (end - start).total_seconds()
return times
if __name__ == "__main__":
divider = "-" * 70
timer = ViewGenerationTimer()
print divider
timer.print_view_generation_times()
print divider
timer.print_csv_of_view_generation_times()
print divider
| 5,106 | 1,638 |
# -*- coding: utf-8 -*-
"""
solace.scripts
~~~~~~~~~~~~~~
Provides some setup.py commands. The js-translation compiler is taken
from Sphinx, the Python documentation tool.
:copyright: (c) 2009 by Plurk Inc.
(c) 2009 by the Sphinx Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# note on imports: This module must not import anything from the
# solace package, so that the initial import happens in the commands.
import os
import sys
from datetime import datetime, timedelta
from distutils import log
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from random import randrange, choice, random, shuffle
from jinja2.utils import generate_lorem_ipsum
from babel.messages.pofile import read_po
from babel.messages.frontend import compile_catalog
from simplejson import dump as dump_json
class RunserverCommand(Command):
description = 'runs the development server'
user_options = [
('host=', 'h',
'the host of the server, defaults to localhost'),
('port=', 'p',
'the port of the server, defaults to 3000'),
('no-reloader', None,
'disable the automatic reloader'),
('no-debugger', None,
'disable the integrated debugger')
]
boolean_options = ['no-reloader', 'no-debugger']
def initialize_options(self):
self.host = 'localhost'
self.port = 3000
self.no_reloader = False
self.no_debugger = False
def finalize_options(self):
if not str(self.port).isdigit():
raise DistutilsOptionError('port has to be numeric')
def run(self):
from werkzeug import run_simple
def wsgi_app(*a):
from solace.application import application
return application(*a)
# werkzeug restarts the interpreter with the same arguments
# which would print "running runserver" a second time. Because
# of this we force distutils into quiet mode.
import sys
sys.argv.insert(1, '-q')
run_simple(self.host, self.port, wsgi_app,
use_reloader=not self.no_reloader,
use_debugger=not self.no_debugger)
class InitDatabaseCommand(Command):
description = 'initializes the database'
user_options = [
('drop-first', 'D',
'drops existing tables first')
]
boolean_options = ['drop-first']
def initialize_options(self):
self.drop_first = False
def finalize_options(self):
pass
def run(self):
from solace import database
if self.drop_first:
database.drop_tables()
print 'dropped existing tables'
database.init()
print 'created database tables'
class ResetDatabase(Command):
description = 'like initdb, but creates an admin:default user'
user_options = [
('username', 'u', 'the admin username'),
('email', 'e', 'the admin email'),
('password', 'p', 'the admin password')
]
def initialize_options(self):
self.username = 'admin'
self.email = None
self.password = 'default'
def finalize_options(self):
if self.email is None:
self.email = self.username + '@localhost'
def run(self):
from solace import database, models
database.drop_tables()
print 'dropped existing tables'
database.init()
print 'created database tables'
admin = models.User(self.username, self.email, self.password,
is_admin=True)
database.session.commit()
print 'Created %s:%s (%s)' % (self.username, self.password,
self.email)
class MakeTestData(Command):
description = 'adds tons of test data into the database'
user_options = [
('data-set-size', 's', 'the size of the dataset '
'(small, medium, large)')
]
USERNAMES = '''
asanuma bando chiba ekiguchi erizawa fukuyama inouye ise jo kanada
kaneko kasahara kasuse kazuyoshi koyama kumasaka matsushina
matsuzawa mazaki miwa momotami morri moto nakamoto nakazawa obinata
ohira okakura okano oshima raikatuji saigo sakoda santo sekigawa
shibukji sugita tadeshi takahashi takizawa taniguchi tankoshitsu
tenshin umehara yamakage yamana yamanouchi yamashita yamura
aebru aendra afui asanna callua clesil daev danu eadyel eane efae
ettannis fisil frudali glapao glofen grelnor halissa iorran oamira
oinnan ondar orirran oudin paenael
'''.split()
TAGS = '''
ajhar amuse animi apiin azoic bacon bala bani bazoo bear bloom bone
broke bungo burse caam cento clack clear clog coyly creem cush deity
durry ella evan firn grasp gype hance hanky havel hunks ingot javer
juno kroo larix lift luke malo marge mart mash nairy nomos noyau
papey parch parge parka pheal pint poche pooch puff quit ravin ream
remap rotal rowen ruach sadhu saggy saura savor scops seat sere
shone shorn sitao skair skep smush snoop soss sprig stalk stiff
stipa study swept tang tars taxis terry thirt ulex unkin unmix unsin
uprid vire wally wheat woven xylan
'''.split()
EPOCH = datetime(1930, 1, 1)
def initialize_options(self):
from solace import settings
self.data_set_size = 'small'
self.highest_date = None
self.locales = settings.LANGUAGE_SECTIONS[:]
def finalize_options(self):
if self.data_set_size not in ('small', 'medium', 'large'):
raise DistutilsOptionError('invalid value for data-set-size')
def get_date(self, last=None):
secs = randrange(10, 120)
d = (last or self.EPOCH) + timedelta(seconds=secs)
if self.highest_date is None or d > self.highest_date:
self.highest_date = d
return d
def create_users(self):
"""Creates a bunch of test users."""
from solace.models import User
num = {'small': 15, 'medium': 30, 'large': 50}[self.data_set_size]
result = []
used = set()
for x in xrange(num):
while 1:
username = choice(self.USERNAMES)
if username not in used:
used.add(username)
break
result.append(User(username, '%s@example.com' % username,
'default'))
print 'Generated %d users' % num
return result
def create_tags(self):
"""Creates a bunch of tags."""
from solace.models import Tag
num = {'small': 10, 'medium': 20, 'large': 50}[self.data_set_size]
result = {}
tag_count = 0
for locale in self.locales:
c = result[locale] = []
used = set()
for x in xrange(randrange(num - 5, num + 5)):
while 1:
tag = choice(self.TAGS)
if tag not in used:
used.add(tag)
break
c.append(Tag(tag, locale).name)
tag_count += 1
print 'Generated %d tags' % tag_count
return result
def create_topics(self, tags, users):
"""Generates a bunch of topics."""
from solace.models import Topic
last_date = None
topics = []
num, var = {'small': (50, 10), 'medium': (200, 20),
'large': (1000, 200)}[self.data_set_size]
count = 0
for locale in self.locales:
for x in xrange(randrange(num - var, num + var)):
topic = Topic(locale, generate_lorem_ipsum(1, False, 3, 9),
generate_lorem_ipsum(randrange(1, 5), False,
40, 140), choice(users),
date=self.get_date(last_date))
last_date = topic.last_change
these_tags = list(tags[locale])
shuffle(these_tags)
topic.bind_tags(these_tags[:randrange(2, 6)])
topics.append(topic)
count += 1
print 'Generated %d topics in %d locales' % (count, len(self.locales))
return topics
def answer_and_vote(self, topics, users):
from solace.models import Post
replies = {'small': 4, 'medium': 8, 'large': 12}[self.data_set_size]
posts = [x.question for x in topics]
last_date = topics[-1].last_change
for topic in topics:
for x in xrange(randrange(2, replies)):
post = Post(topic, choice(users),
generate_lorem_ipsum(randrange(1, 3), False,
20, 100),
self.get_date(last_date))
posts.append(post)
last_date = post.created
print 'Generated %d posts' % len(posts)
votes = 0
for post in posts:
for x in xrange(randrange(replies * 4)):
post = choice(posts)
user = choice(users)
if user != post.author:
if random() >= 0.05:
user.upvote(post)
else:
user.downvote(post)
votes += 1
print 'Casted %d votes' % votes
answered = 0
for topic in topics:
replies = list(topic.replies)
if replies:
replies.sort(key=lambda x: x.votes)
post = choice(replies[:4])
if post.votes > 0 and random() > 0.2:
topic.accept_answer(post, choice(users))
answered += 1
print 'Answered %d posts' % answered
return posts
def create_comments(self, posts, users):
"""Creates comments for the posts."""
from solace.models import Comment
num = {'small': 3, 'medium': 6, 'large': 10}[self.data_set_size]
last_date = posts[-1].created
comments = 0
for post in posts:
for x in xrange(randrange(num)):
comment = Comment(post, choice(users),
generate_lorem_ipsum(1, False, 10, 40),
self.get_date(last_date))
last_date = comment.date
comments += 1
print 'Generated %d comments' % comments
def rebase_dates(self, topics):
"""Rebase all dates so that they are most recent."""
print 'Rebasing dates...',
delta = datetime.utcnow() - self.highest_date
for topic in topics:
topic.last_change += delta
topic.date += delta
for post in topic.posts:
post.updated += delta
post.created += delta
for comment in post.comments:
comment.date += delta
topic._update_hotness()
print 'done'
def run(self):
from solace.database import session
users = self.create_users()
tags = self.create_tags()
topics = self.create_topics(tags, users)
posts = self.answer_and_vote(topics, users)
self.create_comments(posts, users)
self.rebase_dates(topics)
session.commit()
class CompileCatalogEx(compile_catalog):
"""Extends the standard catalog compiler to one that also creates
.js files for the strings that are needed in JavaScript.
"""
def run(self):
compile_catalog.run(self)
po_files = []
js_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.js'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
js_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.js'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.js'))
for js_file, (locale, po_file) in zip(js_files, po_files):
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if catalog.fuzzy and not self.use_fuzzy:
continue
log.info('writing JavaScript strings in catalog %r to %r',
po_file, js_file)
jscatalog = {}
for message in catalog:
if any(x[0].endswith('.js') for x in message.locations):
msgid = message.id
if isinstance(msgid, (list, tuple)):
msgid = msgid[0]
jscatalog[msgid] = message.string
outfile = open(js_file, 'wb')
try:
outfile.write('Solace.TRANSLATIONS.load(');
dump_json(dict(
messages=jscatalog,
plural_expr=catalog.plural_expr,
locale=str(catalog.locale),
domain=str(self.domain)
), outfile)
outfile.write(');\n')
finally:
outfile.close()
| 14,411 | 4,083 |
VERSION = "1.3"
TITLE = "Speedshield VAM Flash Utility"
EXENAME = "VAM-Flash-Utility" | 85 | 40 |
import argparse
import bz2
import calendar
from datetime import datetime
# import ftplib
import gzip
import logging
import netCDF4
import numpy as np
import os
import shutil
import urllib.error
import urllib.request
import warnings
#-----------------------------------------------------------------------------------------------------------------------
# set up a basic, global _logger which will write to the console as standard error
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
_logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------------------------------------------------
# ignore warnings
warnings.simplefilter('ignore', Warning)
#-----------------------------------------------------------------------------------------------------------------------
# days of each calendar month, for non-leap and leap years
_MONTH_DAYS_NONLEAP = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_MONTH_DAYS_LEAP = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
#-----------------------------------------------------------------------------------------------------------------------
def _read_daily_cmorph_to_monthly_sum(cmorph_files,
data_desc,
data_year,
data_month):
# for each file in the data directory read the data and add to the cumulative
summed_data = np.zeros((data_desc['xdef_count'] * data_desc['ydef_count'], ))
for cmorph_file in cmorph_files:
# read the year and month from the file name, make sure they all match
file_year = int(cmorph_file[-8:-4])
file_month = int(cmorph_file[-4:-2])
if file_year != data_year:
continue
elif file_month != data_month:
continue
# read the daily binary data from file, byte swap if not little endian, and mask the missing/fill values
data = np.fromfile(cmorph_file, 'f')
if not data_desc['little_endian']:
data = data.byteswap()
# replace missing values with zeros, so when we sum with previous values
# we're not adding anything to the sum where we actually have missing data
data[data == data_desc['undef']] = 0.0
# add to the summation array
summed_data += data
return summed_data
#-----------------------------------------------------------------------------------------------------------------------
def _get_years():
return list(range(1998, 2018)) # we know this, but not portable/reusable
#FIXME use the below once we work out the proxy issue on Windows
#
# # read the listing of directories from the list of raw data years, these should all be 4-digit years
# f = ftplib.FTP()
# f.connect('ftp://filsrv.cicsnc.org')
# f.login('anonymous')
# f.cwd('olivier/data_CMORPH_NIDIS/02_RAW')
# ls = f.mlsd()
# f.close()
#
# years = []
# for items in ls:
# if item['type'] == 'dir':
# year = item['name']
# if year.isdigit() and len(year) == 4 and int(year) > 1900:
# years.append(year)
#
# return years
#-----------------------------------------------------------------------------------------------------------------------
def _download_data_descriptor(work_dir):
file_url = "ftp://filsrv.cicsnc.org/olivier/data_CMORPH_NIDIS/03_PGMS/CMORPH_V1.0_RAW_0.25deg-DLY_00Z.ctl"
data_descriptor_file = work_dir + '/cmorph_data_descriptor.txt'
urllib.request.urlretrieve(file_url, data_descriptor_file)
return data_descriptor_file
#-----------------------------------------------------------------------------------------------------------------------
def _download_daily_files(destination_dir,
year,
month,
raw=True):
"""
Downloads the daily files corresponding to a specific month.
:param destination_dir: location where downloaded files will reside
:param year:
:param month: 1 == January, ..., 12 == December
:param raw: True: ingest raw data files, False: ingest the gauge adjusted data files
:return: list of the downloaded files (full paths)
"""
# determine which set of days per month we'll use based on if leap year or not
if calendar.isleap(year):
days_in_month = _MONTH_DAYS_LEAP
else:
days_in_month = _MONTH_DAYS_NONLEAP
# the base URL we'll append to in order to get the individual file URLs
year_month = str(year) + str(month).zfill(2)
if raw:
url_base = 'ftp://filsrv.cicsnc.org/olivier/data_CMORPH_NIDIS/02_RAW/' + str(year) + '/' + year_month
filename_base = 'CMORPH_V1.0_RAW_0.25deg-DLY_00Z_'
else:
url_base = 'ftp://filsrv.cicsnc.org/olivier/data_CMORPH_NIDIS/01_GAUGE_ADJUSTED/' + str(year) + '/' + year_month
filename_base = 'CMORPH_V1.0_ADJ_0.25deg-DLY_00Z_'
# list of files we'll return
files = []
for day in range(days_in_month[month - 1]):
# build the file name, URL, and local file name
filename_unzipped = filename_base + year_month + str(day + 1).zfill(2)
zip_extension = '.gz'
if not raw or year >= 2004: # after 2003 the RAW data uses bz2, all gauge adjusted files use bz2
zip_extension = '.bz2'
filename_zipped = filename_unzipped + zip_extension
file_url = url_base + '/' + filename_zipped
local_filename_zipped = destination_dir + '/' + filename_zipped
local_filename_unzipped = destination_dir + '/' + filename_unzipped
_logger.info('Downloading %s', file_url)
try:
# download the zipped file
urllib.request.urlretrieve(file_url, local_filename_zipped)
# decompress the zipped file
if not raw or year >= 2004:
# use BZ2 decompression for all gauge adjusted files and RAW files after 2003
with bz2.open(local_filename_zipped, 'r') as f_in, open(local_filename_unzipped, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
# use BZ2 decompression for files before 2004
with gzip.open(local_filename_zipped, 'r') as f_in, open(local_filename_unzipped, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# append to our list of data files
files.append(local_filename_unzipped)
# clean up the downloaded zip file
os.remove(local_filename_zipped)
except urllib.error.URLError:
# download failed, move to next
continue
return files
#-----------------------------------------------------------------------------------------------------------------------
def _compute_days(initial_year,
total_months,
initial_month=1,
units_start_year=1800):
'''
Computes the "number of days" equivalent for regular, incremental monthly time steps given an initial year/month.
Useful when using "days since <start_date>" as time units within a NetCDF dataset.
:param initial_year: the initial year from which the day values should start, i.e. the first value in the output
array will correspond to the number of days between January of this initial year since January
of the units start year
:param total_months: the total number of monthly increments (time steps measured in days) to be computed
:param initial_month: the month within the initial year from which the day values should start, with 1: January, 2: February, etc.
:param units_start_year: the start year from which the monthly increments are computed, with time steps measured
in days since January of this starting year
:return: an array of time step increments, measured in days since midnight of January 1st of the units start year
:rtype: ndarray of ints
'''
# compute an offset from which the day values should begin
start_date = datetime(units_start_year, 1, 1)
# initialize the list of day values we'll build
days = np.empty(total_months, dtype=int)
# loop over all time steps (months)
for i in range(total_months):
years = int((i + initial_month - 1) / 12) # the number of years since the initial year
months = int((i + initial_month - 1) % 12) # the number of months since January
# cook up a datetime object for the current time step (month)
current_date = datetime(initial_year + years, 1 + months, 1)
# get the number of days since the initial date
days[i] = (current_date - start_date).days
return days
#-----------------------------------------------------------------------------------------------------------------------
def _init_netcdf(netcdf_file,
work_dir):
"""
Initializes the NetCDF that will be written by the ASCII to NetCDF ingest process.
:param netcdf_file: output NetCDF we're initializing
:param work_dir: directory where files file name of the data descriptor file in CMORPH directory
"""
# read data description info
data_desc = _read_description(work_dir)
# get the years covered
years = _get_years()
# create a corresponding NetCDF
with netCDF4.Dataset(netcdf_file, 'w') as output_dataset:
# create the time, x, and y dimensions
output_dataset.createDimension('time', None)
output_dataset.createDimension('lon', data_desc['xdef_count'])
output_dataset.createDimension('lat', data_desc['ydef_count'])
#TODO provide additional attributes for CF compliance, data discoverability, etc.
output_dataset.title = data_desc['title']
# create the coordinate variables
time_variable = output_dataset.createVariable('time', 'i4', ('time',))
x_variable = output_dataset.createVariable('lon', 'f4', ('lon',))
y_variable = output_dataset.createVariable('lat', 'f4', ('lat',))
# set the coordinate variables' attributes
data_desc['units_since_year'] = 1800
time_variable.units = 'days since %s-01-01 00:00:00' % data_desc['units_since_year']
x_variable.units = 'degrees_east'
y_variable.units = 'degrees_north'
# generate longitude and latitude values, assign these to the NetCDF coordinate variables
lon_values = list(_frange(data_desc['xdef_start'], data_desc['xdef_start'] + (data_desc['xdef_count'] * data_desc['xdef_increment']), data_desc['xdef_increment']))
lat_values = list(_frange(data_desc['ydef_start'], data_desc['ydef_start'] + (data_desc['ydef_count'] * data_desc['ydef_increment']), data_desc['ydef_increment']))
x_variable[:] = np.array(lon_values, 'f4')
y_variable[:] = np.array(lat_values, 'f4')
# read the variable data from the CMORPH file, mask and reshape accordingly, and then assign into the variable
data_variable = output_dataset.createVariable('prcp',
'f8',
('time', 'lat', 'lon',),
fill_value=np.NaN)
# variable attributes
data_variable.units = 'mm'
data_variable.standard_name = 'precipitation'
data_variable.long_name = 'precipitation, monthly cumulative'
data_variable.description = data_desc['title']
return data_desc
#-----------------------------------------------------------------------------------------------------------------------
def ingest_cmorph_to_netcdf_full(work_dir,
netcdf_file,
raw=True):
"""
Ingests CMORPH daily precipitation files into a full period of record file containing monthly cumulative precipitation.
:param work_dir: work directory where downloaded CMORPH files will temporarily reside while being used for ingest
:param netcdf_file: output NetCDF
:param raw: if True then ingest from raw files, otherwise ingest from adjusted/corrected files
"""
# create/initialize the NetCDF dataset, get back a data descriptor dictionary
data_desc = _init_netcdf(netcdf_file, work_dir)
with netCDF4.Dataset(netcdf_file, 'a') as output_dataset:
# compute the time values
total_years = 2017 - int(data_desc['start_date'].year) + 1 #FIXME replace this hard-coded value with an additional end_year entry in the data_desc
output_dataset.variables['time'][:] = _compute_days(data_desc['start_date'].year,
total_years * 12,
initial_month=data_desc['start_date'].month,
units_start_year=data_desc['units_since_year'])
# get a handle to the precipitation variable, for convenience
data_variable = output_dataset.variables['prcp']
# loop over each year/month, reading binary data from CMORPH files and adding into the NetCDF variable
for year in range(data_desc['start_date'].year, 2018): # from start year through 2017, replace the value 2018 here with some other method of determining this value from the dataset itself
for month in range(1, 13):
# get the files for the month
downloaded_files = _download_daily_files(work_dir, year, month, raw)
if len(downloaded_files) > 0:
# read all the data for the month as a sum from the daily values, assign into the appropriate slice of the variable
data = _read_daily_cmorph_to_monthly_sum(downloaded_files, data_desc, year, month)
# assume values are in lat/lon orientation
data = np.reshape(data, (1, data_desc['ydef_count'], data_desc['xdef_count']))
# get the time index, which is actually the month's count from the start of the period of record
time_index = ((year - data_desc['start_date'].year) * 12) + month - 1
# assign into the appropriate slice for the monthly time step
data_variable[time_index, :, :] = data
# clean up
for file in downloaded_files:
os.remove(file)
#-----------------------------------------------------------------------------------------------------------------------
def _frange(start, stop, step):
i = start
while i < stop:
yield i
i += step
#-----------------------------------------------------------------------------------------------------------------------
def _read_description(work_dir):
"""
Reads a data descriptor file, example below:
DSET ../0.25deg-DLY_00Z/%y4/%y4%m2/CMORPH_V1.0_RAW_0.25deg-DLY_00Z_%y4%m2%d2
TITLE CMORPH Version 1.0BETA Version, daily precip from 00Z-24Z
OPTIONS template little_endian
UNDEF -999.0
XDEF 1440 LINEAR 0.125 0.25
YDEF 480 LINEAR -59.875 0.25
ZDEF 01 LEVELS 1
TDEF 99999 LINEAR 01jan1998 1dy
VARS 1
cmorph 1 99 yyyyy CMORPH Version 1.o daily precipitation (mm)
ENDVARS
:param descriptor_file: ASCII file with data description information
:return: dictionary of data description keys/values
"""
descriptor_file = _download_data_descriptor(work_dir)
data_dict = {}
with open(descriptor_file, 'r') as fp:
for line in fp:
words = line.split()
if words[0] == 'UNDEF':
data_dict['undef'] = float(words[1])
elif words[0] == 'XDEF':
data_dict['xdef_count'] = int(words[1])
data_dict['xdef_start'] = float(words[3])
data_dict['xdef_increment'] = float(words[4])
elif words[0] == 'YDEF':
data_dict['ydef_count'] = int(words[1])
data_dict['ydef_start'] = float(words[3])
data_dict['ydef_increment'] = float(words[4])
elif words[0] == 'TDEF':
data_dict['start_date'] = datetime.strptime(words[3], '%d%b%Y') # example: "01jan1998"
elif words[0] == 'OPTIONS':
if words[2] == 'big_endian':
data_dict['little_endian'] = False
else: # assume words[2] == 'little_endian'
data_dict['little_endian'] = True
elif words[0] == 'cmorph': # looking for a line like this: "cmorph 1 99 yyyyy CMORPH Version 1.o daily precipitation (mm)"
data_dict['variable_description'] = ' '.join(words[4:])
elif words[0] == 'TITLE':
data_dict['title'] = ' '.join(words[1:])
# clean up
os.remove(descriptor_file)
return data_dict
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
"""
This module is used to perform ingest of binary CMORPH datasets to NetCDF.
Example command line usage for reading all daily files for all months into a single NetCDF file with cumulative
monthly precipitation for the full period of record (all months), with all files downloaded from FTP and removed
once processing completes, for gauge adjusted data:
$ python -u ingest_cmorph.py --cmorph_dir C:/home/data/cmorph/raw \
--out_file C:/home/data/cmorph_file.nc \
--adjusted
"""
try:
# log some timing info, used later for elapsed time
start_datetime = datetime.now()
_logger.info("Start time: %s", start_datetime)
# parse the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--work_dir",
help="Directory where CMORPH daily files will be downloaded before being ingested to NetCDF",
required=True)
parser.add_argument("--out_file",
help="NetCDF output file containing variables read from the input data",
required=True)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--raw',
dest='feature',
action='store_true')
feature_parser.add_argument('--adjusted',
dest='feature',
action='store_false')
feature_parser.set_defaults(feature=True)
args = parser.parse_args()
print('\nIngesting CMORPH precipitation dataset')
print('Result NetCDF: %s' % args.out_file)
print('Work directory: %s' % args.cmorph_dir)
print('\n\tDownloading files: %s' % args.download_files)
print('\tRemoving files: %s' % args.remove_files)
print('\tObservation type: %s\n' % args.obs_type)
# perform the ingest to NetCDF
ingest_cmorph_to_netcdf_full(args.work_dir,
args.out_file,
raw=args.feature)
# report on the elapsed time
end_datetime = datetime.now()
_logger.info("End time: %s", end_datetime)
elapsed = end_datetime - start_datetime
_logger.info("Elapsed time: %s", elapsed)
except Exception as ex:
_logger.exception('Failed to complete', exc_info=True)
raise
| 20,854 | 6,119 |
#!/usr/bin/env python
import setuptools
import tivoctl
setuptools.setup(
name=tivoctl.__title__,
version=tivoctl.__version__,
description=tivoctl.__doc__,
url=tivoctl.__url__,
author=tivoctl.__author__,
author_email=tivoctl.__author_email__,
license=tivoctl.__license__,
long_description=open("README.md").read(),
entry_points={
"console_scripts": ["tivoctl=tivoctl.__main__:main"]
},
packages=["tivoctl"],
install_requires=[],
extras_require={},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Hom Automation",
],
)
| 744 | 249 |
import StringIO
from HLBackend import *
BasicTypeRawMapping = (
('void', 'void'),
('int', 'int'),
(VectorType.get(BasicType_Int, 2), 'ivec2'),
(VectorType.get(BasicType_Int, 3), 'ivec3'),
(VectorType.get(BasicType_Int, 4), 'ivec4'),
('float', 'float'),
(VectorType.get(BasicType_Float, 2), 'vec2'),
(VectorType.get(BasicType_Float, 3), 'vec3'),
(VectorType.get(BasicType_Float, 4), 'vec4'),
('sampler1D', 'sampler1D'),
('sampler1DArray', 'sampler1DArray'),
('sampler2D', 'sampler2D'),
('sampler2DArray', 'sampler2DArray'),
('sampler2DRect', 'sampler2DRect'),
('sampler2DRectArray', 'sampler2DRectArray'),
('samplerCube', 'samplerCube'),
('samplerCubeArray', 'samplerCubeArray'),
('sampler3D', 'sampler3D'),
('sampler3DArray', 'sampler3DArray'),
)
BasicTypeMapping = {}
BinaryOperationMap = {
BinaryOperation_ADD : '+',
BinaryOperation_SUB : '-',
BinaryOperation_MUL : '*',
BinaryOperation_IDIV : '/',
BinaryOperation_UDIV : '+',
BinaryOperation_IREM : '%',
BinaryOperation_UREM : '%',
BinaryOperation_BITAND :'&',
BinaryOperation_BITOR : '|',
BinaryOperation_BITXOR : '^',
BinaryOperation_SHIFTLEFT : '<<',
BinaryOperation_SHIFTRIGHT : '>>',
BinaryOperation_ILT : '<',
BinaryOperation_ILE : '<=',
BinaryOperation_IEQ : '==',
BinaryOperation_INE : '!=',
BinaryOperation_IGT : '>',
BinaryOperation_IGE : '>=',
# Floating point operations
BinaryOperation_FADD : '+',
BinaryOperation_FSUB : '-',
BinaryOperation_FMUL : '*',
BinaryOperation_FDIV : '/',
BinaryOperation_FREM : '%',
BinaryOperation_UFLT : '<',
BinaryOperation_UFLE : '<=',
BinaryOperation_UFEQ : '==',
BinaryOperation_UFNE : '!=',
BinaryOperation_UFGT : '>',
BinaryOperation_UFGE : '>=',
BinaryOperation_OFLT : 'oflt',
BinaryOperation_OFLE : 'ofle',
BinaryOperation_OFEQ : 'ofeq',
BinaryOperation_OFNE : 'ofne',
BinaryOperation_OFGT : 'ofgt',
BinaryOperation_OFGE : 'ofge',
}
for aslName, glslName in BasicTypeRawMapping:
aslType = BasicTypes.get(aslName, aslName)
BasicTypeMapping[aslType] = glslName
class GLSLCodeGenerator:
def __init__(self, out):
self.out = out
self.indentCount = 0
def write(self, string):
self.out.write(string)
def writeLine(self, string=""):
self.write(string)
self.write("\n")
def writeIndentedLine(self, string):
for i in range(self.indentCount):
self.write(' ')
self.writeLine(string)
def generate(self, shader):
shader.accept(self)
def beginLevel(self):
self.indentCount += 2
def endLevel(self):
self.indentCount -= 2
def visitFragmentShader(self, shader):
self.generateShaderContent(shader)
def visitVertexShader(self, shader):
self.generateShaderContent(shader)
def visitComputeShader(self, shader):
self.generateShaderContent(shader)
def visitTessellationControlShader(self, shader):
self.generateShaderContent(shader)
def visitTessellationEvaluationShader(self, shader):
self.generateShaderContent(shader)
def visitFunction(self, function):
self.writeLine("%s %s(%s) {" % (self.typeToString(function.functionType.returnType), function.name, self.generateFunctionArguments(function)))
self.beginLevel()
for var in function.variables:
self.writeIndentedLine("%s %s;" % (self.typeToString(var.type), var.name))
for stmnt in function.body.statements:
stmnt.accept(self)
self.endLevel()
self.writeLine("}")
def visitReturnVoidStatement(self, ret):
self.writeIndentedLine("return;")
def visitReturnStatement(self, ret):
self.writeIndentedLine("return %s;" % ret.value)
def visitAssignmentStatement(self, assignment):
self.writeIndentedLine("%s = %s;" % (assignment.reference.accept(self), assignment.value.accept(self)))
def visitGlobalVariable(self, variable):
return variable.name
def visitVariable(self, variable):
return variable.name
def visitConstant(self, constant):
return str(constant.value)
def visitBinaryExpression(self, expression):
return "(%s %s %s)" % (expression.left.accept(self), BinaryOperationMap[expression.operation], expression.right.accept(self))
def visitIfStatement(self, statement):
self.writeIndentedLine("if (%s)" % statement.condition.accept(self))
statement.thenStatement.accept(self)
if statement.elseStatement is not None:
self.writeIndentedLine("else")
statement.elseStatement.accept(self)
def visitBlockStatement(self, block):
self.writeIndentedLine('{')
self.beginLevel()
for statement in block.statements:
statement.accept(self)
self.endLevel()
self.writeIndentedLine('}')
def visitCallStatement(self, call):
function = call.function.name
argStr = ''
for arg in call.arguments:
if len(argStr) > 0:
argStr += ', '
argStr += arg.accept(self)
self.writeIndentedLine('%s (%s);' % (function, argStr))
def visitLoopStatement(self, loop):
if loop.isWhileLoop():
ifStatement = loop.getFirstStatement()
cond = ifStatement.condition
if ifStatement.elseStatement.isBreakStatement():
self.writeIndentedLine('while(%s)' % cond.accept(self))
body = ifStatement.thenStatement
else:
assert ifStatement.thenStatement.isBreakStatement()
self.writeIndentedLine('while(!(%s))' % cond.accept(self))
body = ifStatement.elseStatement
body.accept(self)
elif loop.isDoWhileLoop():
self.writeIndentedLine('do {')
self.beginLevel()
ifStatement = loop.getLastStatement()
for statement in loop.statements:
if statement is not ifStatement:
statement.accept(self)
self.endLevel()
cond = ifStatement.condition
if ifStatement.elseStatement.isBreakStatement():
assert ifStatement.thenStatement.isContinueStatement()
self.writeIndentedLine('} while (%s);' % cond.accept(self))
else:
assert ifStatement.thenStatement.isBreakStatement()
assert ifStatement.elseStatement.isContinueStatement()
self.writeIndentedLine('} while (!(%s));' % cond.accept(self))
else:
self.writeIndentedLine('for(;;) {')
self.beginLevel()
for statement in loop.statements:
statement.accept(self)
self.endLevel()
self.writeIndentedLine('}')
def visitBreakStatement(self, statement):
self.writeIndentedLine('break;')
def visitContinueStatement(self, statement):
self.writeIndentedLine('continue;')
def generateShaderContent(self, shader):
for structure in shader.structures:
self.generateStructure(structure)
if len(shader.structures) > 0:
self.writeLine()
for globalVar in shader.globalVariables:
self.generateGlobalVar(globalVar)
if len(shader.globalVariables) > 0:
self.writeLine()
for function in shader.functions:
function.accept(self)
def generateGlobalVar(self, globalVar):
self.writeLine("%s %s %s;" % (globalVar.kind, self.typeToString(globalVar.type), globalVar.name))
def generateStructure(self, structure):
self.writeLine("struct %s {" % structure.name)
self.beginLevel()
for field in structure.fields:
self.generateStructureField(field)
self.endLevel()
self.writeLine("};")
self.writeLine()
def generateStructureField(self, field):
self.writeIndentedLine("%s %s;" % (self.typeToString(field.fieldType), field.name))
def generateFunctionArguments(self, function):
# The main function does not have arguments
if function.name == "main":
return ""
return ""
def typeToString(self, aslType):
mapping = BasicTypeMapping.get(aslType, None)
if mapping is not None:
return mapping
if aslType.isReference():
return self.typeToString(aslType.baseType)
if aslType.isStructure():
return aslType.name
return "UnimplementedType"
class GLSLBackend(HighLevelBackend):
def __init__(self, module):
HighLevelBackend.__init__(self, module)
def generateShaderCode(self, shader):
out = StringIO.StringIO()
generator = GLSLCodeGenerator(out)
generator.generate(shader)
print "//", shader.name
print out.getvalue()
def createMainFunctionNameFor(self, mainFunction):
return "main"
def mainFunctionArgumentsToGlobal(self):
return True
def flattenMainStructureArguments(self):
return True | 8,065 | 2,975 |
from discretize.utils.interputils import interpmat
from .matutils import (
mkvc,
sdiag,
sdInv,
speye,
kron3,
spzeros,
ddx,
av,
av_extrap,
ndgrid,
ind2sub,
sub2ind,
getSubArray,
inv3X3BlockDiagonal,
inv2X2BlockDiagonal,
TensorType,
makePropertyTensor,
invPropertyTensor,
diagEst,
Zero,
Identity,
uniqueRows,
)
from .codeutils import (
memProfileWrapper,
hook,
setKwargs,
printTitles,
printLine,
checkStoppers,
printStoppers,
callHooks,
dependentProperty,
asArray_N_x_Dim,
requires,
)
from .meshutils import exampleLrmGrid, meshTensor, closestPoints, ExtractCoreMesh
from .curvutils import volTetra, faceInfo, indexCube
from .CounterUtils import Counter, count, timeIt
from . import ModelBuilder
from . import SolverUtils
from .coordutils import rotatePointsFromNormals, rotationMatrixFromNormals
from .modelutils import surface2ind_topo
from .PlotUtils import plot2Ddata, plotLayer
from .io_utils import download
from .printinfo import versions
| 1,078 | 363 |
"""
:author: Thomas Delaet <thomas@delaet.org>
"""
# pylint: disable-msg=C0301
# appifo scan messages and system messages
from domipy.messages.appinfo_request import AppInfoRequest
from domipy.messages.module_info import ModuleInfoMessage
from domipy.messages.ping import Ping
from domipy.messages.hello import Hello
from domipy.messages.login_request_salt import LoginRequestSaltCommand
from domipy.messages.login_request_salt import LoginRequestSaltMessage
from domipy.messages.waiting_for_loginsw import WaitingForLoginswMessage
from domipy.messages.login_request import LoginRequest
from domipy.messages.info import InfoMessage
from domipy.messages.control import ControllMessage
from domipy.messages.session_closed import SessionClosedMessage
from domipy.messages.session_opened import SessionOpenedMessage
from domipy.messages.session_timeout import SessionTimeoutMessage
# status messages
from domipy.messages.ai_status import GenericAIStatusMessage
from domipy.messages.ao_status import GenericAOStatusMessage
from domipy.messages.do_status import GenericDOStatusMessage
from domipy.messages.di_status import GenericDIStatusMessage
from domipy.messages.dio_status import GenericDIOStatusMessage
from domipy.messages.dbir_status import DBIRStatusMessage
from domipy.messages.dmr_status import DDMRStatusMessage
from domipy.messages.ddim_status import DDIMStatusMessage
from domipy.messages.din10v_status import DIN10VStatusMessage
from domipy.messages.dism_status import DISM4StatusMessage
from domipy.messages.dism_status import DISM8StatusMessage
from domipy.messages.dmov_status import DMOVStatusMessage
from domipy.messages.dpbu_status import DPBU01StatusMessage
from domipy.messages.dpbu_status import DPBU02StatusMessage
from domipy.messages.dpbu_status import DPBU04StatusMessage
from domipy.messages.dpbu_status import DPBU06StatusMessage
from domipy.messages.dtrp_status import DTRPStatusMessage
from domipy.messages.dtrv_status import DTRVStatusMessage
from domipy.messages.dtrv_status import DTRVBTStatusMessage
from domipy.messages.dtsc_status import DTSCStatusMessage
from domipy.messages.module_status_request import ModuleStatusRequest
from domipy.messages.dled_status import DLEDStatusMessage
# Command messages
from domipy.messages.set_ao import SetAnalogOutputMessage
from domipy.messages.set_di import DigitalShortPush
from domipy.messages.set_di import DigitalLongPush
from domipy.messages.set_di import DigitalShortPushEnd
from domipy.messages.set_di import DigitalLongPushEnd
from domipy.messages.set_dimmer import SetDimmer
from domipy.messages.set_dimmer import StartDimmer
from domipy.messages.set_dimmer import StopDimmer
from domipy.messages.set_dimmer import IncrementDimmer
from domipy.messages.set_dimmer import DecrementDimmer
from domipy.messages.set_do import SetDigitalOutputMessage
from domipy.messages.set_do import SetDigitalOutputOnMessage
from domipy.messages.set_do import SetDigitalOutputOffMessage
from domipy.messages.set_do import TogleDigitalOutputMessage
from domipy.messages.set_do import SetDigitalOutputOpenMessage
from domipy.messages.set_do import SetDigitalOutputCloseMessage
from domipy.messages.set_do import SetDigitalOutputStopMessage
from domipy.messages.set_temperature import SetTemperatureMessage
from domipy.messages.set_temperature import SetTemperatureModeMessage
from domipy.messages.set_temperature import SetTemperatureSetPointMessage
from domipy.messages.set_temperature import SetCoolingTemperatureSetPointMessage
from domipy.messages.set_temperature import SetTemperatureComfortMessage
from domipy.messages.set_temperature import SetTemperatureAutomaticMessage
from domipy.messages.set_temperature import SetTemperatureAbsenceMessage
from domipy.messages.set_temperature import SetTemperatureFrostMessage
from domipy.messages.set_temperature import SetRegulationModeMessage
from domipy.messages.temperature_status import TE1TemperaturetatusMessage
from domipy.messages.temperature_status import TE2TemperaturetatusMessage
from domipy.messages.var_status import VARStatusMessage
from domipy.messages.var_status import SYSStatusMessage
from domipy.messages.set_var import SwitchValueMessage
from domipy.messages.user_disconnected import UserDisconnected | 4,229 | 1,235 |
import functools
from typing import List, Iterable
import pandas as pd
import ray
from ray._private.utils import get_num_cpus
from ray_shuffling_data_loader.shuffle import shuffle
from ray_shuffling_data_loader.multiqueue import MultiQueue
MULTIQUEUE_ACTOR_NAME = "MultiQueue"
REDUCER_CLUSTER_CORE_SHARE = 0.6
class ShufflingDataset:
"""
A shuffling dataset that yields batches upon iteration.
This dataset will kick off shuffling for max_concurrent_epochs epochs at
construction time in the master process (rank 0).
Args:
filenames (str): Paths to input Parquet files.
num_epochs (int): Number of training epochs.
num_trainers (int): Number of trainer workers.
batch_size (int): Size of the batches that the iterator should yield.
rank (int): The worker rank of the current process.
drop_last (Optional[bool]): Whether to drop the last batch if it's
incomplete (smaller than batch_size). Default is False.
num_reducers (Optional[int]): The number of shuffler reducers. Default
is the number of trainers x the number of cores on the master
(rank 0) worker x 0.6.
max_concurrent_epochs (Optional[int]): The maximum number of epochs
whose shuffling stages should execute concurrently. Default is 2.
"""
def __init__(self,
filenames: List[str],
num_epochs: int,
num_trainers: int,
batch_size: int,
rank: int,
drop_last: bool = False,
num_reducers: int = None,
max_concurrent_epochs: int = 2,
max_batch_queue_size: int = 0):
if num_reducers is None:
num_reducers = int(
num_trainers * get_num_cpus() * REDUCER_CLUSTER_CORE_SHARE)
self._batch_size = batch_size
if rank == 0:
# rank == 0 --> master process
# Create the batch queue. Trainers will consume GPU batches
# through this batch queue.
self._batch_queue = MultiQueue(
num_epochs * num_trainers,
max_batch_queue_size,
name=MULTIQUEUE_ACTOR_NAME,
connect=False)
# Wait until actor has been created.
self._batch_queue.size(0)
# Kick off shuffle.
# TODO(Clark): Move the shuffle kickoff to an init() method so the
# user can better control when the shuffling starts?
self._shuffle_result = ray.remote(shuffle).remote(
filenames,
functools.partial(batch_consumer, self._batch_queue,
batch_size, num_trainers),
num_epochs,
num_reducers,
num_trainers,
max_concurrent_epochs,
collect_stats=False)
else:
# rank != 0 --> worker process
# Connect to the batch queue.
self._batch_queue = MultiQueue(
num_epochs * num_trainers,
max_batch_queue_size,
name=MULTIQUEUE_ACTOR_NAME,
connect=True)
self._shuffle_result = None
self._num_epochs = num_epochs
self._num_trainers = num_trainers
self._rank = rank
self._epoch = None
# Used to check that the user is correctly setting the epoch at the
# beginning of each epoch.
self._last_epoch = None
self._drop_last = drop_last
def set_epoch(self, epoch):
"""
Set the current training epoch. This should be called before
constructing the iterator on this dataset (e.g. before the
enumerate(train_loader) call).
Args:
epoch (int) The epoch number for the training epoch that is about
to start.
"""
self._epoch = epoch
def __iter__(self):
"""
This iterator yields GPU batches from the shuffling queue.
"""
if self._epoch is None or self._epoch == self._last_epoch:
raise ValueError(
"You must set the epoch on this dataset via set_epoch()"
"at the beginning of each epoch, before iterating over this "
"dataset (e.g. via enumerate(ds)).")
# Batch leftover buffer.
df_buffer = None
while True:
queue_idx = self._epoch * self._num_trainers + self._rank
batches = self._batch_queue.get(queue_idx, block=True)
if batches is None:
break
df = ray.get(batches)
# Get first-slice offset into current dataframe.
df_buffer_len = len(df_buffer) if df_buffer is not None else 0
offset = self._batch_size - df_buffer_len
# If we already have a leftover batch, concatenate it with a
# front-slice of the current dataframe, attempting to create a
# full-sized batch.
# If we don't already have a leftover batch, we consume the first
# batch in the current dataframe here.
df_buffer = pd.concat([df_buffer, df[:offset]])
# If we have a full-sized batch, yield it. Otherwise, hang on to
# it and yield it in a future round, once we have a full batch.
if len(df_buffer) == self._batch_size:
yield df_buffer
df_buffer = None
# Yield batches from the current dataframe.
pos = offset # Fallback if offset > len(df).
for pos in range(offset,
len(df) - self._batch_size + 1, self._batch_size):
yield df[pos:pos + self._batch_size]
# If leftover (incomplete) batch, save for later.
pos += self._batch_size
if pos < len(df):
df_buffer = df[pos:]
# Yield leftover (incomplete) batch if we're not dropping incomplete
# batches.
if df_buffer is not None and not self._drop_last:
yield df_buffer
self._last_epoch = self._epoch
if (self._epoch == self._num_epochs - 1
and self._shuffle_result is not None):
ray.get(self._shuffle_result)
def batch_consumer(queue: MultiQueue, batch_size: int, num_trainers: int,
rank: int, epoch: int, batches: Iterable[ray.ObjectRef]):
"""
Batch consumer that will be provided to the shuffler.
"""
queue_idx = epoch * num_trainers + rank
if batches is None:
queue.put(queue_idx, None)
else:
queue.put_batch(queue_idx, batches)
def debug_batch_consumer(rank: int, epoch: int,
batches: Iterable[pd.DataFrame]):
num_batches = len(batches) if batches is not None else 0
print(f"Received {num_batches} batches in consumer {rank}.")
if __name__ == "__main__":
from ray_shuffling_data_loader.stats import human_readable_size
from ray_shuffling_data_loader.data_generation import generate_data
print("Starting Ray...")
# FIXME(Clark): We're setting the idle worker killing time very high in
# order to work around an idle working killing bug that's causing objects
# to be lost. We should fix this.
ray.init(_system_config={"idle_worker_killing_time_threshold_ms": 10**6})
num_rows = 10**6
num_files = 10
num_row_groups_per_file = 1
max_row_group_skew = 0.0
data_dir = "data"
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
num_epochs = 4
num_trainers = 1
batch_size = 20000
rank = 0
num_reducers = 8
print(f"Creating shuffling dataset with {batch_size} batch size, "
f"{num_epochs} epochs, {num_reducers} reducers, and {num_trainers} "
"trainers.")
print(f"Should consume {num_rows // batch_size} batches.")
ds = ShufflingDataset(
filenames,
num_epochs,
num_trainers,
batch_size,
rank,
num_reducers=num_reducers)
for epoch in range(num_epochs):
ds.set_epoch(epoch)
for batch_idx, batch in enumerate(ds):
print(f"Consuming batch {batch_idx}!")
print("Done consuming batches.")
| 8,823 | 2,570 |
name=input("Enter the name : ")
def greet(name):
print("Good day, "+ name )
greet(name)
| 93 | 35 |
import FWCore.ParameterSet.Config as cms
from HeavyFlavorAnalysis.Skimming.tauTo3MuHLTPath_cfi import *
from HeavyFlavorAnalysis.Skimming.tauTo3MuFilter_cfi import *
tauTo3MuSkim = cms.Sequence(tauTo3MuHLTFilter+tauTo3MuFilter)
| 230 | 96 |
import os
import dash
import dash_html_components as html
import dash_vtk
# Get it here: https://github.com/plotly/dash-vtk/blob/master/demos/data/cow-nonormals.obj
obj_file = "datasets/cow-nonormals.obj"
txt_content = None
with open(obj_file, 'r') as file:
txt_content = file.read()
content = dash_vtk.View([
dash_vtk.GeometryRepresentation([
dash_vtk.Reader(
vtkClass="vtkOBJReader",
parseAsText=txt_content,
),
]),
])
# Dash setup
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div(
style={"width": "100%", "height": "400px"},
children=[content],
)
if __name__ == "__main__":
app.run_server(debug=True)
| 693 | 259 |
"""
Inheritance
- a base class is the 'parent' class which other classes can inherit attributes from
- sub classes inherit from a parent class and can add more attributes to themselves in addition to the attributes inherited from the parent class
- think of sub class as a 'is-a' relationship of the parent class
Composition
- what a class is composed of
- think of composition as a 'has-a' relationship
"""
| 416 | 106 |
from argparse import ArgumentParser
from glob import glob
from os import listdir
from os import system
from os import unlink
from os.path import isdir
from numpy import linspace
# Parses the input arguments.
from numpy import split
from pandas import DataFrame
from pandas import read_csv
parser = ArgumentParser(description="Generates the final data sets")
parser.add_argument("--tstat", default="tstat-3.1.1/tstat/tstat", help="the tstat command")
parser.add_argument("--dev_ratio", default=10, help="the dev set ratio")
parser.add_argument("--test_ratio", default=10, help="the test set ratio")
parser.add_argument("pcap", help="the name of the pcap folder")
parser.add_argument("dataset", help="the name of the data set folder")
args = parser.parse_args()
thresholds = linspace(0.000000, 0.001000, 11).tolist()
thresholds += linspace(0.001000, 0.010000, 10).tolist()
thresholds += linspace(0.010000, 0.100000, 10).tolist()
thresholds += linspace(0.100000, 1.000000, 10).tolist()
thresholds += linspace(1.000000, 10.000000, 10).tolist()
thresholds += linspace(10.000000, 100.000000, 10).tolist()
thresholds += linspace(100.000000, 1000.000000, 10).tolist()
thresholds = set(thresholds)
def split_capture(folder: str, threshold: float) -> None:
"""
Splits some pcap files into multiple smaller capture files.
:param folder: the name of the folder containing the pcap files to split
:param threshold: the time threshold
"""
system("mkdir -p %s-%f" % (folder, threshold))
for f in listdir(folder):
if f.endswith(".pcap"):
system("tshark -r %s/%s -w %s-%f/%s -Y \"tcp.time_relative <= %f\"" %
(folder, f, folder, threshold, f, threshold))
def create_data_set(source: str, output: str) -> None:
"""
Creates a CSV file by launching tstat.
:param source: the source file
:param output: the output folder
"""
prefix = "dataset"
parts = source.split("-")
m = {
"dos": "dos",
"browser": "browser",
"crawler": "crawler",
"goldeneye": "dos",
"hulk": "dos",
"firefox": "browser",
"wget": "crawler",
"edge": "browser",
"httrack": "crawler",
"chrome": "browser",
"rudy": "dos",
"slowloris": "dos",
"curl": "crawler",
"wpull": "crawler",
"goldeneye-2.1": "dos",
"firefox-62.0": "browser",
"hulk-1.0": "dos",
"wget-1.11.4": "crawler",
"edge-42.17134.1.0": "browser",
"httrack-3.49.2": "crawler",
"chrome-48.0.2564.109": "browser",
"rudy-1.0.0": "dos",
"chrome-68.0.3440.84": "browser",
"firefox-42.0": "browser",
"slowloris-0.1.5": "dos",
"curl-7.55.1": "crawler",
"curl-7.61.0": "crawler",
"slowloris-0.1.4": "dos",
"wpull-2.0.1": "crawler",
"wget-1.19.5": "crawler"
}
if len(parts) == 1:
name = "%s/%s-all.csv" % (output, prefix)
else:
name = "%s/%s-%s.csv" % (output, prefix, parts[1])
with open(name, "w") as o:
print(
"c_ip c_port c_pkts_all c_rst_cnt c_ack_cnt c_ack_cnt_p c_bytes_uniq c_pkts_data c_bytes_all "
"c_pkts_retx c_bytes_retx c_pkts_ooo c_syn_cnt c_fin_cnt s_ip s_port s_pkts_all s_rst_cnt s_ack_cnt "
"s_ack_cnt_p s_bytes_uniq s_pkts_data s_bytes_all s_pkts_retx s_bytes_retx s_pkts_ooo s_syn_cnt "
"s_fin_cnt first last durat c_first s_first c_last s_last c_first_ack s_first_ack c_isint s_isint "
"c_iscrypto s_iscrypto con_t p2p_t http_t complete application_short application_long os_short "
"os_long all category",
file=o)
for f in listdir(source):
if f.endswith(".pcap"):
parts = f[:-5].split("_")
app_parts = parts[0].split("-")
os_parts = parts[1].split("-")
category = m[parts[0]]
system("%s %s/%s -s %s > /dev/null" % (args.tstat, source, f, f))
if not isdir(f):
continue
tcp = []
tcp.append("%s/%s/log_tcp_complete" % (f, listdir("%s" % f)[0]))
tcp.append("%s/%s/log_tcp_nocomplete" % (f, listdir("%s" % f)[0]))
for t in tcp:
with open(t) as csv:
count = 1
for row in csv.readlines():
if count > 1:
row = row.rstrip("\n")
fields = row.split()
l = " ".join(fields[0: 44])
print(l, end="", file=o)
if t.endswith("_complete"):
print(" true", end="", file=o)
else:
print(" false", end="", file=o)
print(" %s" % app_parts[0], end="", file=o)
print(" %s" % parts[0], end="", file=o)
print(" %s" % os_parts[0], end="", file=o)
print(" %s" % parts[1], end="", file=o)
print(" %s_%s" % (parts[0], parts[1]), end="", file=o)
print(" %s" % category, end="", file=o)
print(file=o)
count += 1
system("rm -fr %s" % f)
print("Analyzing the pcap files...")
system("rm -fr %s/*.csv" % args.dataset)
create_data_set(args.pcap, args.dataset)
for i in thresholds:
split_capture(args.pcap, i)
create_data_set("%s-%f" % (args.pcap, i), args.dataset)
system("rm -fr %s-%f" % (args.pcap, i))
print("Processing the statistics...")
data_set = DataFrame()
for i in glob("%s/*.csv" % args.dataset):
data_set = data_set.append(read_csv(i, sep=" "))
unlink(i)
del data_set["c_ip"]
del data_set["s_ip"]
del data_set["c_port"]
del data_set["s_port"]
data_set.to_csv("%s/dataset.csv.gz" % args.dataset)
unknown = ["grabsite-2.1.16", "opera-62.0.3331.66", "slowhttptest-1.6", "firefox-68.0"]
unknown_set = data_set[data_set["application_long"].isin(unknown)]
data_set = data_set[~data_set["application_long"].isin(unknown)]
training_set, dev_set, known_set = split(data_set.sample(frac=1),
[int((1 - args.dev_ratio / 100 - args.test_ratio / 100) * len(data_set)),
int((1 - args.dev_ratio / 100) * len(data_set))])
training_set.to_csv("%s/training.csv.gz" % args.dataset)
dev_set.to_csv("%s/dev.csv.gz" % args.dataset)
known_set.to_csv("%s/known.csv.gz" % args.dataset)
unknown_set.to_csv("%s/unknown.csv.gz" % args.dataset)
| 7,281 | 2,526 |
# Import the necessary packages and libraries
import numpy as np
import argparse
import cv2
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
# Arguments are expected to be the --image <file name>
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="Specify the path to image")
args = vars(ap.parse_args())
# Declare constants
faceDetectorPath = 'caffe_face_detector'
modelFile = 'mask_detector.model'
minConfidence = 0.5
imageSize = 224
boxLabelWithMask = 'Wearing mask'
boxLabelWithoutMask = 'Not wearing mask'
# Load Caffe-based face detector model file
prototxtPath = os.path.sep.join([faceDetectorPath, "deploy.prototxt"])
weightsPath = os.path.sep.join([faceDetectorPath,
"res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# Load mask detector model file generated from
# 02. Training the ML Model.ipynb
maskDetectorModel = load_model(modelFile)
# Load specified input image
image = cv2.imread(args["image"])
# Clone the input image
orig = image.copy()
# Retrieve image spatial dimensions
(h, w) = image.shape[:2]
print("[LOG] Image successfully retrieved.")
# Preprocess image:
# Generate a blob from the input image
# Scalefactor: 1.0
# Size: 300x300
# Mean subtraction: RGB (104, 177, 123)
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# Pass the generated image blob to the face detector network
faceNet.setInput(blob)
# Get face detections
detections = faceNet.forward()
# Loop over the face detections
for i in range(0, detections.shape[2]):
# Obtain the confidence / probability associated with each detection
confidence = detections[0, 0, i, 2]
# Only do computations on detections with greater confidence
# than set minimum
if confidence > minConfidence:
# Obtain x,y coordinates of the output bounding box
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# Verify the bounding box is within the range of the
# frame's dimensions
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# Get the region of interest- the face
# Convert BGR channel to RGB channel
# Resize and preprocess it
try:
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (imageSize, imageSize))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
except Exception as e:
print(str(e))
# Pass the processed facial region of interest to the
# mask detector network model
(withMask, withoutMask) = maskDetectorModel.predict(face)[0]
# Assign class label
label = "With Mask" if withMask > withoutMask else "No Mask"
# Configure display text and color
if label == "With Mask":
boxLabel = boxLabelWithMask
color = (50, 205, 50)
else:
boxLabel = boxLabelWithoutMask
color = (50, 50, 205)
# Insert probability to the display text
boxLabel = "{}: {:.2f}%".format(boxLabel, max(withMask, withoutMask) * 100)
# Show display text and bounding box on the output window
cv2.putText(image, boxLabel, (startX, startY - 10),
cv2.FONT_HERSHEY_PLAIN, 1, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
# Show display text and bounding box on the output window
cv2.imshow("Face Mask Detector", image)
cv2.namedWindow("Face Mask Detector", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Face Mask Detector", 600, 600)
cv2.waitKey(0) | 3,792 | 1,359 |
from _winrt import *
| 22 | 9 |
n = 0
op = ''
cont = 0
soma = 0
maior = 0
menor = 0
while op in 'Ss':
n = int(input('Digite um número: '))
op = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
cont += 1
soma += n
media = soma / cont
if cont == 1:
maior = n
menor = n
else:
if n > maior:
maior = n
if n < menor:
menor = n
print(f'{cont}, {media}')
print(f'{maior}, {menor}')
| 439 | 180 |
import click
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
from kaitaistruct import KaitaiStructError
from matplotlib.ticker import MultipleLocator, NullFormatter
from .parser.hantek import Hantek
from .parser import conversions, utils
@click.argument("filename", type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True))
@click.command()
def main(filename):
try:
wave = Hantek.from_file(filename)
except (KaitaiStructError, EOFError) as exc:
raise click.ClickException(str(exc))
# Build the data array for each channel
data1 = np.array(wave.data1)
data2 = np.array(wave.data2)
data3 = np.array(wave.data3)
data4 = np.array(wave.data4)
# Time axis we just use channel 1 because the info. is the same for every channel
# Note: won't work if Ch1 is off and Ch2 is on.
# TODO: Look at all 4 channels to determine the correct value.
timebase = conversions.TIMEBASE[wave.header.channel1.timebase]
timebase_str = utils.format_number(timebase, "s")
seconds_per_sample = 1 / wave.header.channel1.samples_per_second
time = np.arange(0, float(wave.header.channel1.sample_count))
time *= seconds_per_sample
# Darkmode to simulate oscilloscope
plt.style.use('dark_background')
fig, ax = plt.subplots()
# Setup the graph like an oscilloscope graticule
ax.set_ylim(-100, 100)
ax.xaxis.set_major_locator(MultipleLocator(timebase))
ax.xaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_minor_locator(MultipleLocator(timebase / 5))
ax.yaxis.set_major_formatter(NullFormatter())
ax.grid(which="major", axis="both", color="0.5")
ax.grid(which="minor", axis="both", color="0.2")
ax.minorticks_on()
# Plot the channels
ytrans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData)
if len(data1) > 0:
vpd = conversions.VOLTS_PER_DIV[wave.header.channel1.volts_per_div]
vpd_str = utils.format_number(vpd * (10 ** wave.header.channel1.mode), "V")
col = conversions.CHANNEL_COLOUR[0]
ax.plot(time, data1, color=col, label=f"Ch1: {vpd_str}")
ax.axhline(y=wave.header.channel1.offset, color=col, lw=0.8, ls="-")
ax.text(0, wave.header.channel1.offset, "1>", color=col, transform=ytrans, ha="right", va="center")
if len(data2) > 0:
vpd = conversions.VOLTS_PER_DIV[wave.header.channel2.volts_per_div]
vpd_str = utils.format_number(vpd * (10 ** wave.header.channel1.mode), "V")
col = conversions.CHANNEL_COLOUR[1]
ax.plot(time, data2, color=col, label=f"Ch2: {vpd_str}")
ax.axhline(y=wave.header.channel2.offset, color=col, lw=0.8, ls="-")
ax.text(0, wave.header.channel2.offset, "2>", color=col, transform=ytrans, ha="right", va="center")
if len(data3) > 0:
vpd = conversions.VOLTS_PER_DIV[wave.header.channel3.volts_per_div]
vpd_str = utils.format_number(vpd * (10 ** wave.header.channel1.mode), "V")
col = conversions.CHANNEL_COLOUR[2]
ax.plot(time, data3, color=col, label=f"Ch3: {vpd_str}")
ax.axhline(y=wave.header.channel3.offset, color=col, lw=0.8, ls="-")
ax.text(0, wave.header.channel3.offset, "3>", color=col, transform=ytrans, ha="right", va="center")
if len(data4) > 0:
vpd = conversions.VOLTS_PER_DIV[wave.header.channel4.volts_per_div]
vpd_str = utils.format_number(vpd * (10 ** wave.header.channel1.mode), "V")
col = conversions.CHANNEL_COLOUR[3]
ax.plot(time, data4, color=col, label=f"Ch4: {vpd_str}")
ax.axhline(y=wave.header.channel4.offset, color=col, lw=0.8, ls="-")
ax.text(0, wave.header.channel4.offset, "4>", color=col, transform=ytrans, ha="right", va="center")
# Trigger line
trigger_level = wave.header.channel1.trigger_level
trigger_channel = wave.header.channel1.trigger_channel
col = conversions.CHANNEL_COLOUR[trigger_channel]
ax.axhline(y=trigger_level, color=col, lw=0.8, ls="-")
ax.text(0, trigger_level, "T", color=col, transform=ytrans, ha="right", va="center")
# Timebase Text
samples_per_second = utils.format_number(wave.header.channel1.samples_per_second)
sampling_depth = utils.format_number(wave.header.channel1.sampling_depth)
plt.title(f"Timebase: {timebase_str}, {samples_per_second}Sa/s, {sampling_depth}Pt")
# Display it
ax.legend()
plt.show()
| 4,482 | 1,646 |
import re
import random
def test_phones_on_home_page(app):
contacts = app.contact.get_contact_list()
index = random.randrange(len((contacts)))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.all_phones_from_home_page == app.contact.merge_phones_like_on_home_page(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
contacts = app.contact.get_contact_list()
index = random.randrange(len((contacts)))
contact_from_view_page = app.contact.get_contact_info_from_view_page(index)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_view_page.home_telephone == contact_from_edit_page.home_telephone
assert contact_from_view_page.work_telephone == contact_from_edit_page.work_telephone
assert contact_from_view_page.mobile_telephone == contact_from_edit_page.mobile_telephone
assert contact_from_view_page.secondary_telephone == contact_from_edit_page.secondary_telephone
| 1,102 | 368 |
import requests
import code
import json
import os.path
from pathlib import Path
AFIP_GROOVY_PATH = '{}/scanner'.format(Path('.'))
class Problem:
def __init__(self, name, problem_dict):
self.name = name
for key in problem_dict:
setattr(self, key, problem_dict[key])
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
def __getattr__(self, attr):
print('[!] WARNING: {} not set in vuln JSON!'.format(attr))
return ""
def _to_camel_case(self, snake_str, capitalize_first=False):
components = snake_str.split('_')
first = components[0] if not capitalize_first else components[0][0].upper() + components[0][1:]
return first + "".join(x.title() for x in components[1:])
def _to_title_case(self, snake_str):
return ' '.join([word[0].upper() + word[1:] for word in snake_str.split('_')])
def register(self):
create_groovy_files = input('Do you want to create the groovy files for this problem? [Y/n]').lower()
if create_groovy_files == 'n':
return
print('[+] Creating detector file')
self.create_detector_file()
print('[+] Creating vuln file')
self.create_groovy_vuln_file()
print('[+] Creating test file')
self.create_groovy_test_file()
danger = input('Do you want to add to the Groovy\'s vuln list? This IS dangerous. Be sure to be on git. [Y/n]').lower()
if danger != 'n':
print('[+] Adding to the vuln list')
self.add_to_groovy_vulns()
danger = input('Do you want to add to the Groovy\'s detector list? This IS dangerous. Be sure to git. [Y/n]')
if danger != 'n':
print('[+] Adding to detector list')
self.add_to_groovy_detectors()
print('[:)] Finished')
print('[!] You may want to add your detector to the setDetectorsFromStrings method in afip.main!')
def create_groovy_test_file(self):
camel_case_name = self._to_camel_case(self.name, True)
tests_path = '{}/src/tests'.format(AFIP_GROOVY_PATH)
file_path = '{}/{}Tests.groovy'.format(tests_path, camel_case_name)
if os.path.isfile(file_path):
print('[+] Test file already exists. Skipping.')
return False
lines = ['import afip.detectors.{}Detector'.format(camel_case_name),
'',
'class {}Tests extends BaseIntegrationTest {{'.format(camel_case_name),
' def setupSpec() {',
' detectorManager.addDetector(new {}Detector())'.format(camel_case_name),
' }',
'}',
]
with open(file_path, 'w+') as f:
f.write("\n".join(lines))
return True
def create_groovy_vuln_file(self):
camel_case_name = self._to_camel_case(self.name, True)
vulns_path = '{}/src/groovy/afip/vulns'.format(AFIP_GROOVY_PATH)
file_path = '{}/{}.groovy'.format(vulns_path, camel_case_name)
if os.path.isfile(file_path):
print('[+] Vuln file already exists. Skipping.')
return False
java_boolean = 'true' if self.is_vuln else 'false'
lines = ['package afip.vulns',
'',
'class {} extends Vuln {{'.format(camel_case_name),
'',
' {}(String filePath, int lineNumber, String code) {{'.format(camel_case_name),
' super(filePath, lineNumber, code)',
' setFriendlyName("{}")'.format(self._to_title_case(self.name)),
' setConfidence("{}")'.format(self.confidence),
' setInformation("{}")'.format(self.information),
' setIsVuln({})'.format(java_boolean),
' setCriticality("{}")'.format(self.severity),
' }',
'}',
]
with open(file_path, 'w+') as f:
f.write("\n".join(lines))
return True
def _change_line(self, original_line, added_list_content):
leading_spaces = len(original_line) - len(original_line.lstrip())
line = original_line.strip()
splited = line.split('[')
groovy_list_contents_str = '{}'.format(splited[1][:-1])
groovy_new_list_contents_str = '{}, {}'.format(
groovy_list_contents_str,
added_list_content)
groovy_new_line = '{}{}[{}]\n'.format(
' '*leading_spaces,
splited[0],
groovy_new_list_contents_str)
return groovy_new_line
def _new_groovy_vuln_list(self, line, new_vuln):
assert line.strip().startswith('private static final List<Class<Vuln>> vulnClasses = ')
return self._change_line(line, new_vuln)
def add_to_groovy_vulns(self):
def is_correct_line(line):
start = 'private static final List<Class<Vuln>> vulnClasses ='
return line.strip().startswith(start)
camel_case_name = self._to_camel_case(self.name, True)
vulns_path = '{}/src/groovy/afip/vulns'.format(AFIP_GROOVY_PATH)
file_path = '{}/Vuln.groovy'.format(vulns_path)
with open(file_path, 'r') as f:
original_file_lines = f.readlines()
with open(file_path, 'w') as f:
for line in original_file_lines:
if is_correct_line(line) and camel_case_name not in line:
new_lst = self._new_groovy_vuln_list(line, camel_case_name)
print('[+] Replacing line. Old vs new: \n {} \n {}'.format(line, new_lst))
f.write(new_lst)
else:
f.write(line)
return True
def create_detector_file(self):
camel_case_name = self._to_camel_case(self.name, True)
detectors_path = '{}/src/groovy/afip/detectors'.format(AFIP_GROOVY_PATH)
file_path = '{}/{}Detector.groovy'.format(detectors_path, camel_case_name)
if os.path.isfile(file_path):
print('[+] Detector file already exists. Skipping.')
return False
lines = ['package afip.detectors',
''
'import afip.vulns.{}'.format(camel_case_name),
'import afip.variables.Variable',
'import org.apache.commons.logging.LogFactory',
'',
'class {}Detector extends Detector {{'.format(camel_case_name),
' private static final log = LogFactory.getLog(this)',
'',
' {}Detector() {{'.format(camel_case_name),
' super({}, [], [])'.format(camel_case_name),
' }',
'',
' /** Edit this method to begin. Logic to detect vulns should be here and in similar methods. */',
' ArrayList<{}> detect(Variable _) {{ return [] }}'.format(camel_case_name),
'}',
]
with open(file_path, 'w+') as f:
f.write("\n".join(lines))
return True
def add_to_groovy_detectors(self):
camel_case_name = self._to_camel_case(self.name, True)
managers_path = '{}/src/groovy/afip/managers'.format(AFIP_GROOVY_PATH)
file_path = '{}/DetectorManager.groovy'.format(managers_path)
def is_correct_line(line):
return line.strip().startswith('private List<Class<Detector>> allDetectors = [')
with open(file_path, 'r') as f:
original_file_lines = f.readlines()
with open(file_path, 'w') as f:
for line in original_file_lines:
if is_correct_line(line) and camel_case_name not in line:
new_lst = self._change_line(line, '{}Detector'.format(camel_case_name))
print('[+] Replacing line. Old vs new: \n {} \n {}'.format(line, new_lst))
f.write(new_lst)
else:
f.write(line)
return True
def load_problems():
with open('vulns.json', 'r') as f:
problems = json.loads(f.read())
return problems
def pretty_print_as_supermarket_list(title, *strings):
"""Print a title (for no title, give a falsey value on first param)
and an arbitrary number of strings like it was a nice supermarket list.
"""
if title and strings:
print('[{0}]'.format(title))
for index, string in enumerate(strings, start=1):
print('{0}.\t{1}'.format(index, string))
def get_all_problems():
return [Problem(name, problem) for name, problem in load_problems().items()]
def deal_with_problem():
pretty_print_as_supermarket_list('Problems', *[p.name for p in PROBLEMS])
try:
n = int(input('Enter the problem number you want to deal with: '))
except ValueError:
return False
try:
problem = PROBLEMS[n-1]
except IndexError:
print('[!] Problem does not exist')
return True
problem.register()
return True
def main():
print('[!] Your AFIP Groovy path is {}'.format(AFIP_GROOVY_PATH))
print('Entering indivual problem handling mode. Just input nothing to go to interactive mode. Call main() to re enter.')
input_exists = deal_with_problem()
if not input_exists:
code.interact(local=dict(globals()))
keep_going = input('Do you wish to continue? [y/N] ').lower() == 'y'
while keep_going:
deal_with_problem()
keep_going = input('Do you wish to continue? [y/N] ').lower() == 'y'
PROBLEMS = get_all_problems()
main()
| 9,672 | 3,011 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('heritage', '0033_merge'),
]
operations = [
migrations.RemoveField(
model_name='sessionasset',
name='interviewasset_ptr',
),
migrations.RemoveField(
model_name='sessionasset',
name='session',
),
migrations.DeleteModel(
name='SessionAsset',
),
]
| 543 | 154 |
import pytest
import memory
inputs = [
(0, 3, 6, 436),
(1, 3, 2, 1),
(2, 1, 3, 10),
(1, 2, 3, 27),
(2, 3, 1, 78),
(3, 2, 1, 438),
(3, 1, 2, 1836)
]
@pytest.fixture(params=inputs)
def starting(request):
return list(request.param[:3]), request.param[-1]
def test_apply_rule_for_one_turn():
starting = list(inputs[0][:3])
assert 0 == memory.next(starting)
assert 3 == memory.next(starting + [0])
assert 3 == memory.next(starting + [0, 3])
assert 1 == memory.next(starting + [0, 3, 3])
assert 0 == memory.next(starting + [0, 3, 3, 1])
assert 4 == memory.next(starting + [0, 3, 3, 1, 0])
assert 0 == memory.next(starting + [0, 3, 3, 1, 0, 4])
def test_find_2020_number(starting):
numbers, expec = starting
assert expec == memory.compute_number_at_turn(turn=2020, starting=numbers)
inputs_high = [
(0, 3, 6, 175594),
(1, 3, 2, 2578),
(2, 1, 3, 3544142),
(1, 2, 3, 261214),
(2, 3, 1, 6895259),
(3, 2, 1, 18),
(3, 1, 2, 362)
]
@pytest.fixture(params=inputs_high)
def starting_h(request):
return list(request.param[:3]), request.param[-1]
def test_find_high_number(starting_h):
numbers, expec = starting_h
assert expec == memory.compute_number_at_turn(turn=30000000, starting=numbers)
| 1,301 | 636 |
import sys
import re
from utils.utils import print_writeofd
# First argument is whether or not to proceed with manual checking:
if sys.argv[1] == '-m':
MANUAL_CHECKING = True
elif sys.argv[1] == '-a':
MANUAL_CHECKING = False
else:
print("The first argument must be either -m or -a, see README.md for details")
exit(1)
# Second argument is the output file from ast_proj.py
ifd = open(sys.argv[2], 'r')
# Third argument is the output file for those projs that have constant inputs
ofd1 = open(sys.argv[3], 'w')
# All files number
allfile = 0
# Actual determined occurence of Constant, entirely or partly:
accocc_ent = 0
accocc_par = 0
experi_ent = 0
# Number of exception:
excep = 0
lines = ifd.readlines()
i = 0
j = 0
printed_ent_count = 0
# printed_par_count = 0
excepted = 0
# Ad-hoc take care of some cases where the traced line because
# a library call with constant parameters can actually return non-constant results
def auto_exclude_cases(line):
exclusion = ["Entirely Constant: ||\n", "Entirely Constant: string||\n", "Entirely Constant:\n",
"Entirely Constant: Hello, World!||\n", "Entirely Constant: hello world||\n", "Entirely Constant: ?||\n",
"Entirely Constant: polly||\n", "Entirely Constant: texttospeech||\n", "Entirely Constant: utf8||\n",
"Entirely Constant: ap-south-1||\n", "Entirely Constant: txt||\n", "Entirely Constant: mp3||\n",
"Entirely Constant: Message||||\n", "Entirely Constant: Text:||\n", "Entirely Constant: test1234||\n",
"Entirely Constant: Enter something : ||\n", "Entirely Constant: AWSPollyIMGSpeech||\n",
"Entirely Constant: TranslatedText||\n", "Entirely Constant: <speak>||\n", "Entirely Constant: w||\n",
"Entirely Constant: :||\n", "Entirely Constant: Hello World!||\n", "Entirely Constant: Hello world!||\n",
"Entirely Constant:\n"]
for exc in exclusion:
if (line == exc):
return True
inclusion = [".wav||\n", ".txt||\n", ".jpg||\n", ".png||\n"]
for inc in inclusion:
if inc in line:
return True
return False
while i < len(lines):
printed_ent = 0
experi = 0
allfile += 1
j = i + 1
while j < len(lines) and lines[j] != "=================================================\n":
j += 1
if j == len(lines):
break
# Now i and j stores the start and end of one search snippet
k = i + 1
while i < j:
if auto_exclude_cases(lines[i]):
i += 1
continue
if "Entirely Constant" in lines[i]:
if MANUAL_CHECKING:
print(re.sub('\n', '', lines[i]))
printed_ent = 1
i += 1
if "EXCEPTION" in lines[i]:
excepted = 1
if MANUAL_CHECKING:
if printed_ent == 1:
print("Does the above snippet contains an actual string constant? If constant, press 1, if not, press 2, if looks like for experimental purpose, press 3")
user = input()
while user != '1' and user != '2' and user != '3':
print("PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!")
user = input()
# If one of these is entirely constant, treat the entire project as entirely constant
if user == '1' and printed_ent == 1:
accocc_ent += 1
ofd1.write("Entirely constant: ")
ofd1.write(lines[k])
elif user == '2' and printed_ent == 1:
ofd1.write("Auto tool wrong detection: ")
ofd1.write(lines[k])
elif user == '3' and printed_ent == 1:
experi_ent += 1
ofd1.write("Entirely Experimental: ")
ofd1.write(lines[k])
printed_ent = 0
printed_par = 0
print("\n\n\n\n\n\n")
else:
if printed_ent == 1:
ofd1.write("Entirely constant: ")
ofd1.write(lines[k])
accocc_ent += 1
if excepted == 1:
excep += 1
ofd1.write(lines[k])
excepted = 0
print_writeofd("\n\n\n\n\n", ofd1)
print_writeofd("Total file searched: {}".format(allfile), ofd1)
print_writeofd("Entirely Constants found: {}".format(accocc_ent), ofd1)
print_writeofd("Experimental Constants found: {}".format(experi_ent), ofd1)
print_writeofd("Exceptions occurred: {}".format(excep), ofd1)
if MANUAL_CHECKING:
print_writeofd("RELYING ON MANUAL CHECKING: {} CONSTANT INPUTS".format(accocc_ent), ofd1)
print_writeofd("RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS".format(allfile - excep), ofd1)
else:
print_writeofd("RELYING ON AUTO TOOL: {} CONSTANT INPUTS".format(accocc_ent), ofd1)
print_writeofd("RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS".format(allfile - excep), ofd1) | 4,883 | 1,632 |
import calendar
import os
import re
import pandas as pd
REGIONAL_SHEETS = ['Baffin-Area-km^2',
'Baffin-Extent-km^2',
'Barents-Area-km^2',
'Barents-Extent-km^2',
'Beaufort-Area-km^2',
'Beaufort-Extent-km^2',
'Bering-Area-km^2',
'Bering-Extent-km^2',
'CanadianArchipelago-Area-km^2',
'CanadianArchipelago-Extent-km^2',
'Central-Arctic-Area-km^2',
'Central-Arctic-Extent-km^2',
'Chukchi-Area-km^2',
'Chukchi-Extent-km^2',
'East-Siberian-Area-km^2',
'East-Siberian-Extent-km^2',
'Greenland-Area-km^2',
'Greenland-Extent-km^2',
'Hudson-Area-km^2',
'Hudson-Extent-km^2',
'Kara-Area-km^2',
'Kara-Extent-km^2',
'Laptev-Area-km^2',
'Laptev-Extent-km^2',
'Okhotsk-Area-km^2',
'Okhotsk-Extent-km^2',
'St-Lawrence-Area-km^2',
'St-Lawrence-Extent-km^2']
# read a file produced by seaice.tools, and return a DataFrame or dict of
# DataFrames from that file
def read(filename, parse_like=None):
"""
filename: the file to read
parse_like: the name of the file generated by seaice.tools, eg
N_04_extent_v3.0.csv; with this parameter, you can have your own
"version" of the file, like my_great_N_april_extent_v3.0.csv,
and pass 'N_04_extent_v3.0.csv' as the second parameter to have
it parsed
"""
if parse_like is None:
parse_like = os.path.basename(filename)
if re.match('[NS]_seaice_extent_daily_v[0-9\.]*.csv', parse_like):
df = pd.read_csv(filename)
# strip column names
df.columns = [c.strip() for c in df.columns]
# first line is a label row with values like YYYY and MM and should be
# ignored
df = df[1:]
index = pd.DatetimeIndex(pd.to_datetime(df[['Year', 'Month', 'Day']]))
df = df.set_index(index)
df = df.drop(['Year', 'Month', 'Day'], 1)
df.name = 'Daily Extent CSV'
return df
elif re.match('[NS]_(?:0[1-9]|1[012])_extent_v[0-9\.]*.csv', parse_like):
df = pd.read_csv(filename, na_values=-9999)
# strip column names
df.columns = [c.strip() for c in df.columns]
index = pd.to_datetime(pd.DataFrame({'year': df.year, 'month': df.mo, 'day': 1}))
df = df.set_index(index)
df = df.drop(['year', 'mo'], 1)
df.name = 'Monthly Extent CSV'
return df
elif re.match('[NS]_seaice_extent_climatology_1981-2010_v[0-9\.]*.csv', parse_like):
return pd.read_csv(filename, header=1, index_col=0)
elif re.match('Sea_Ice_Index_Monthly_Data_with_Statistics_G02135_v[0-9\.]*.xlsx', parse_like):
dfs = {}
sheets = []
sheets += ['{}-NH'.format(month) for month in calendar.month_name[1:]]
sheets += ['{}-SH'.format(month) for month in calendar.month_name[1:]]
for sheet in sheets:
df = pd.read_excel(filename, sheet_name=sheet, header=9, index_col=0)
# strip column names
df.columns = [c.strip() for c in df.columns]
rank_columns = ['ordered-rank', 'ranked-year', 'ranked-extent']
ranked_df = df[rank_columns]
df = df.drop(rank_columns + ['reordered =>'], 1)
index = pd.to_datetime(pd.DataFrame({'year': df.year, 'month': df.month, 'day': 1}))
df = df.set_index(index)
df = df.drop(['year', 'month'], 1)
df.name = 'Monthly Sea Ice Index {}'.format(sheet)
ranked_df.name = 'Monthly Sea Ice Index {} Ranked'.format(sheet)
dfs[sheet] = df
dfs[sheet + ' Ranked'] = ranked_df
return dfs
elif re.match('Sea_Ice_Index_Regional_Daily_Data_G02135_v[0-9\.]*.xlsx', parse_like):
dfs = {}
xlsx_dfs = pd.read_excel(filename, sheet_name=REGIONAL_SHEETS, header=0, index_col=[0, 1])
for sheet_name, df in xlsx_dfs.items():
df = df.stack()
df = df.reset_index()
# convert month name to number
df.month = df.month.apply(calendar.month_name[:].index)
# get a datetime index
df['date'] = df.apply(lambda x: pd.Timestamp(year=int(x.level_2),
month=int(x.month),
day=int(x.day)), axis='columns')
df = df.drop(['month', 'day', 'level_2'], 1)
df = df.set_index('date').sort_index()
df.columns = [sheet_name]
dfs[sheet_name] = df
return dfs
elif re.match('Sea_Ice_Index_Regional_Monthly_Data_G02135_v[0-9\.]*.xlsx', parse_like):
return pd.read_excel(filename, sheet_name=REGIONAL_SHEETS, header=[0, 1])
elif re.match('Sea_Ice_Index_Daily_Extent_G02135_v[0-9\.]*.xlsx', parse_like):
dfs = {}
sheets = ['NH-Daily-Extent', 'NH-5-Day-Extent', 'NH-5-Day-Anomaly',
'NH-5-Day-Daily-Change', 'SH-Daily-Extent', 'SH-5-Day-Extent',
'SH-5-Day-Anomaly', 'SH-5-Day-Daily-Change']
sheets_with_climatology = ['NH-Daily-Extent', 'NH-5-Day-Extent',
'SH-Daily-Extent', 'SH-5-Day-Extent']
xlsx_dfs = pd.read_excel(filename, sheet_name=sheets, header=0, index_col=[0, 1])
for sheet_name, df in xlsx_dfs.items():
if sheet_name in sheets_with_climatology:
clima = df['1981-2010']
df = df.drop('1981-2010', 1)
df = df.drop(' ', 1)
df = df.stack()
df = df.reset_index()
# convert month name to number
df.level_0 = df.level_0.apply(calendar.month_name[:].index)
# Get a datetime index
df['date'] = df.apply(lambda x: pd.Timestamp(year=int(x.level_2),
month=int(x.level_0),
day=int(x.level_1)), axis='columns')
df = df.drop(['level_0', 'level_1', 'level_2'], 1)
df = df.set_index('date').sort_index()
df.columns = [sheet_name]
dfs[sheet_name] = df
if sheet_name in sheets_with_climatology:
dfs[sheet_name + ' climatology'] = clima
return dfs
elif re.match('Sea_Ice_Index_Monthly_Data_by_Year_G02135_v[0-9\.]*.xlsx', parse_like):
dfs = {}
sheets = ['NH-Extent', 'NH-Area', 'SH-Extent', 'SH-Area']
xlsx_dfs = pd.read_excel(filename, sheet_name=sheets, header=0, index_col=0)
for sheet_name, df in xlsx_dfs.items():
annual_df = df[['Annual']]
df = df.drop(['Unnamed: 12', 'Annual'], 1)
df = df.stack()
df = df.reset_index()
# convert month name to number
df.level_1 = df.level_1.apply(calendar.month_name[:].index)
df['date'] = df.apply(lambda x: pd.Timestamp(year=int(x.level_0),
month=int(x.level_1),
day=1), axis='columns')
df = df.drop(['level_0', 'level_1'], 1)
df = df.set_index('date').sort_index()
df.columns = [sheet_name]
dfs[sheet_name] = df
dfs[sheet_name + ' Annual'] = annual_df
return dfs
elif re.match('Sea_Ice_Index_Rates_of_Change_G02135_v[0-9\.]*.xlsx', parse_like):
sheets = ['NH-Ice-Change-Mkm^2-per-Month',
'NH-Ice-Change-km^2-per-Day',
'NH-Ice-Change-mi^2-per-Day',
'NH-Ice-Change-mi^2-per-Month',
'SH-Ice-Change-Mkm^2-per-Month',
'SH-Ice-Change-km^2-per-Day',
'SH-Ice-Change-mi^2-per-Day',
'SH-Ice-Change-mi^2-per-Month']
xlsx_dfs = pd.read_excel(filename, sheet_name=sheets, header=1, index_col=0)
dfs = {}
for sheet_name, df in xlsx_dfs.items():
clima = df.loc['1981-2010']
df = df[:-2]
df = df.stack()
df = df.reset_index()
# convert month name to number
df.level_1 = df.level_1.apply(calendar.month_name[:].index)
df['date'] = df.apply(lambda x: pd.Timestamp(year=int(x.level_0),
month=int(x.level_1),
day=1), axis='columns')
df = df.drop(['level_0', 'level_1'], 1)
df = df.set_index('date').sort_index()
df.columns = [sheet_name]
dfs[sheet_name] = df
dfs[sheet_name + ' climatology'] = clima
return dfs
elif re.match('Sea_Ice_Index_Min_Max_Rankings_G02135_v[0-9\.]*.xlsx', parse_like):
annual_sheets = ['NH-Annual-5-Day-Extent',
'NH-Annual-Daily-Extent',
'SH-Annual-5-Day-Extent',
'SH-Annual-Daily-Extent']
other_sheets = ['NH-5-Day-Extent-Min',
'NH-5-Day-Extent-Max',
'NH-Daily-Extent-Min',
'NH-Daily-Extent-Max',
'SH-5-Day-Extent-Min',
'SH-5-Day-Extent-Max',
'SH-Daily-Extent-Min',
'SH-Daily-Extent-Max']
dfs = {}
xlsx_annual = pd.read_excel(filename, sheet_name=annual_sheets, header=0, index_col=0)
for sheet_name, df in xlsx_annual.items():
dfs[sheet_name] = df
xlsx_dfs = pd.read_excel(filename, sheet_name=other_sheets, header=[0, 1], index_col=0)
for sheet_name, df in xlsx_dfs.items():
dfs[sheet_name] = df
return dfs
else:
raise Exception('Parser not found for given file: {}.'.format(parse_like))
| 10,319 | 3,599 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : numba
# @Time : 2019-12-26 14:52
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from numba import jit, njit, vectorize
import numpy as np
x = np.arange(100).reshape(10, 10)
# Numba likes NumPy broadcasting
# @jit
@njit # 设置为"nopython"模式 有更好的性能
def go_fast1(a): # 第一次调用时会编译
trace = 0
for i in range(a.shape[0]): # Numba likes loops
trace += np.tanh(a[i, i]) # Numba likes NumPy functions
return a + trace
| 599 | 275 |
"""Experiment 2, Analysis Group 2.
Comparing measures of global signal.
Mean cortical signal of MEDN correlated with signal of all gray matter
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
Mean cortical signal of MEDN correlated with signal of whole brain
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
"""
import os.path as op
import sys
import numpy as np
from nilearn import image, masking
from scipy.stats import ttest_1samp
sys.path.append("..")
from utils import get_prefixes # noqa: E402
def correlate_cort_with_gm(project_dir, participants_df):
"""Correlate mean cortical signal from MEDN files with signal from all gray matter.
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
"""
ALPHA = 0.05
corrs = []
for i_run, participant_row in participants_df.iterrows():
if participant_row["exclude"] == 1:
print(f"Skipping {participant_row['participant_id']}.")
continue
subj_id = participant_row["participant_id"]
dset = participant_row["dataset"]
dset_prefix = get_prefixes()[dset]
subj_prefix = dset_prefix.format(participant_id=subj_id)
cort_mask = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_label-CGM_mask.nii.gz",
)
dseg_file = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_desc-totalMaskWithCSF_dseg.nii.gz",
)
# Values 1-3 are cortical ribbon, subcortical structures, and cerebellum, respectively.
gm_mask = image.math_img(
"np.logical_and(img > 0, img <= 3).astype(int)", img=dseg_file
)
medn_file = op.join(
project_dir,
dset,
"derivatives",
"tedana",
subj_id,
"func",
f"{subj_prefix}_desc-optcomDenoised_bold.nii.gz",
)
cort_data = masking.apply_mask(medn_file, cort_mask)
gm_data = masking.apply_mask(medn_file, gm_mask)
# Average across voxels
cort_data = np.mean(cort_data, axis=1) # TODO: CHECK AXIS ORDER
gm_data = np.mean(gm_data, axis=1)
corr = np.corrcoef((cort_data, gm_data))
assert corr.shape == (2, 2), corr.shape
corr = corr[1, 0]
corrs.append(corr)
corrs = np.array(corrs)
# Convert r values to normally distributed z values with Fisher's
# transformation (not test statistics though)
z_values = np.arctanh(corrs)
mean_z = np.mean(z_values)
sd_z = np.std(z_values)
# And now a significance test!!
# TODO: Should we compute confidence intervals from z-values then
# convert back to r-values? I think so, but there's so little in the
# literature about dealing with *distributions* of correlation
# coefficients.
t, p = ttest_1samp(z_values, popmean=0, alternative="greater")
if p <= ALPHA:
print(
"ANALYSIS 1: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from all gray matter "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
else:
print(
"ANALYSIS 1: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from all gray matter "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were not significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
def correlate_cort_with_wb(project_dir, participants_df):
"""Correlate mean cortical signal from MEDN files with signal from whole brain.
- Distribution of Pearson correlation coefficients
- Page 2, right column, first paragraph
"""
ALPHA = 0.05
corrs = []
for i_run, participant_row in participants_df.iterrows():
if participant_row["exclude"] == 1:
print(f"Skipping {participant_row['participant_id']}.")
continue
subj_id = participant_row["participant_id"]
dset = participant_row["dataset"]
dset_prefix = get_prefixes()[dset]
subj_prefix = dset_prefix.format(participant_id=subj_id)
cort_mask = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_label-CGM_mask.nii.gz",
)
dseg_file = op.join(
project_dir,
dset,
"derivatives",
"power",
subj_id,
"anat",
f"{subj_id}_space-scanner_res-bold_desc-totalMaskWithCSF_dseg.nii.gz",
)
# Values 1+ are brain.
wb_mask = image.math_img("img > 0", img=dseg_file)
medn_file = op.join(
project_dir,
dset,
"derivatives",
"tedana",
subj_id,
"func",
f"{subj_prefix}_desc-optcomDenoised_bold.nii.gz",
)
cort_data = masking.apply_mask(medn_file, cort_mask)
wb_data = masking.apply_mask(medn_file, wb_mask)
# Average across voxels
cort_data = np.mean(cort_data, axis=1) # TODO: CHECK AXIS ORDER
wb_data = np.mean(wb_data, axis=1)
corr = np.corrcoef((cort_data, wb_data))
assert corr.shape == (2, 2), corr.shape
corr = corr[1, 0]
corrs.append(corr)
# Convert r values to normally distributed z values with Fisher's
# transformation (not test statistics though)
z_values = np.arctanh(corrs)
mean_z = np.mean(z_values)
sd_z = np.std(z_values)
# And now a significance test!!
# TODO: Should we compute confidence intervals from z-values then
# convert back to r-values? I think so, but there's so little in the
# literature about dealing with *distributions* of correlation
# coefficients.
t, p = ttest_1samp(z_values, popmean=0, alternative="greater")
if p <= ALPHA:
print(
"ANALYSIS 2: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from the whole brain "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
else:
print(
"ANALYSIS 2: Correlations between the mean multi-echo denoised signal extracted from "
"the cortical ribbon and that extracted from the whole brain "
f"(M[Z] = {mean_z}, SD[Z] = {sd_z}) were not significantly higher than zero, "
f"t({participants_df.shape[0] - 1}) = {t:.03f}, p = {p:.03f}."
)
| 7,215 | 2,381 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Creates a dictionary from token to index based on dictionary .txt given.
"""
import argparse
import json
import os
parser = argparse.ArgumentParser()
parser.add_argument('--dictionary', required=True, default=None, type=str)
parser.add_argument('--dict_out', required=True, default=None, type=str)
args = parser.parse_args()
def main():
if not os.path.exists(args.dictionary):
raise FileNotFoundError(f"Could not find dictionary file {args.dictionary}")
phonemes = set()
word2phones = {}
with open(args.dictionary, 'r') as f:
for line in f:
line = line.split()
word = line[0]
tokens = line[1:]
word2phones[word] = tokens
phonemes.update(tokens)
# Small list of additional punctuation
word2phones[','] = [' ']
word2phones[';'] = [' ']
word2phones['.'] = [' ']
word2phones['!'] = [' ']
word2phones['?'] = [' ']
word2phones['"'] = [' ']
word2phones['-'] = [' ']
phone2idx = {k: i for i, k in enumerate(phonemes)}
phone2idx[' '] = len(phone2idx)
phone2idx['sil'] = phone2idx[' '] # Silence
phone2idx['sp'] = phone2idx[' '] # Space
phone2idx['spn'] = phone2idx[' '] # OOV/unk
dicts = {
'phone2idx': phone2idx,
'word2phones': word2phones,
}
with open(args.dict_out, 'w') as f:
json.dump(dicts, f, ensure_ascii=False)
print(f"Total number of phone indices: {len(phone2idx)}")
if __name__ == '__main__':
main()
| 2,119 | 710 |
from typing import Union
from fortnitepy import FriendMessage, PartyMessage
Message = Union[FriendMessage, PartyMessage]
| 123 | 32 |
from modelo.BD_tipoVehiculo import _TipoVehiculo
from modelo.createDatabase import makeEngine
from modelo.createDatabase import makeBase
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
class TipoVehiculo:
base = makeBase()
eng = makeEngine()
def __init__(self,id,tipo):
self.id = id
self.tipo = tipo
def __str__(self):
return self.tipo
def getAllTipos(self):
Session = sessionmaker(bind=self.eng)
ses = Session()
query = None
try:
query = ses.query(_TipoVehiculo).all()
except:
print("no se puede generar la consulta")
ses.close()
return [TipoVehiculo( int(i.id), str(i.tipo)) for i in query]
def selectTipo(self, tipo, listoftipos):
#print(tipo, listoftipos)
for t in listoftipos:
if t.tipo == tipo:
return t
if __name__ == "__main__":
#tv = TipoVehiculo()
TipoVehiculo.getAllTipos(TipoVehiculo) | 1,087 | 401 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import enum
from numpy.lib.arraysetops import isin
from numpy.lib.function_base import insert
from data_utils.metrics import calc_metrics
from mt_dnn.batcher import Collater
from data_utils.task_def import TaskType
from data_utils.utils_qa import postprocess_qa_predictions
from copy import deepcopy
import numpy as np
import torch
from tqdm import tqdm
def extract_encoding(model, data, use_cuda=True):
if use_cuda:
model.cuda()
sequence_outputs = []
max_seq_len = 0
for idx, (batch_info, batch_data) in enumerate(data):
batch_info, batch_data = Collater.patch_data(use_cuda, batch_info, batch_data)
sequence_output = model.encode(batch_info, batch_data)
sequence_outputs.append(sequence_output)
max_seq_len = max(max_seq_len, sequence_output.shape[1])
new_sequence_outputs = []
for sequence_output in sequence_outputs:
new_sequence_output = torch.zeros(sequence_output.shape[0], max_seq_len, sequence_output.shape[2])
new_sequence_output[:, :sequence_output.shape[1], :] = sequence_output
new_sequence_outputs.append(new_sequence_output)
return torch.cat(new_sequence_outputs)
def reduce_multirc(uids, predictions, golds):
assert len(uids) == len(predictions)
assert len(uids) == len(golds)
from collections import defaultdict
predict_map = defaultdict(list)
gold_map = defaultdict(list)
for idx, uid in enumerate(uids):
blocks = uid.split('_')
assert len(blocks) == 3
nuid = '_'.join(blocks[:-1])
predict_map[uid].append(predictions[idx])
gold_map[uid].append(golds[idx])
return predict_map, gold_map
def merge(src, tgt):
def _mg(src, tgt):
if isinstance(src, dict):
for k, v in src.items():
if k in tgt:
tgt[k] = _mg(v, tgt[k])
else:
tgt[k] = v
elif isinstance(src, list):
tgt.extend(src)
elif isinstance(src, tuple):
if isinstance(src[0], list):
for i, k in enumerate(src):
tgt[i].extend(src[i])
else:
tgt.extend(src)
else:
tgt = src
return tgt
if tgt is None or len(tgt) == 0:
tgt = deepcopy(src)
return tgt
else:
return _mg(src, tgt)
def eval_model(model, data, metric_meta, device, with_label=True, label_mapper=None, task_type=TaskType.Classification):
predictions = []
golds = []
scores = []
ids = []
metrics = {}
for (batch_info, batch_data) in tqdm(data, total=len(data)):
batch_info, batch_data = Collater.patch_data(device, batch_info, batch_data)
score, pred, gold = model.predict(batch_info, batch_data)
scores = merge(score, scores)
golds = merge(gold, golds)
predictions = merge(pred, predictions)
ids = merge(batch_info['uids'], ids)
if task_type == TaskType.Span:
predictions, golds = postprocess_qa_predictions(golds, scores, version_2_with_negative=False)
elif task_type == TaskType.SpanYN:
predictions, golds = postprocess_qa_predictions(golds, scores, version_2_with_negative=True)
if with_label:
metrics = calc_metrics(metric_meta, golds, predictions, scores, label_mapper)
return metrics, predictions, scores, golds, ids | 3,454 | 1,107 |
import sys
import argparse
from copy import deepcopy
from typing import cast
from typing import List
from gffpal.gff import GFF
from gffpal.gff import GFF3Record
from gffpal.attributes import GFF3Attributes
import logging
logger = logging.getLogger(__name__)
TYPE_MAP = {
"euk": {
"5s_rrna": "rRNA_5S",
"8s_rrna": "rRNA_5S",
"18s_rrna": "rRNA_18S",
"28s_rrna": "rRNA_28S",
},
"bac": {
"5s_rrna": "rRNA_5S",
"16s_rrna": "rRNA_16S",
"23s_rrna": "rRNA_23S",
},
"arc": {
"5s_rrna": "rRNA_5S",
"16s_rrna": "rRNA_16S",
"23s_rrna": "rRNA_23S",
},
}
def cli_rnammer2gff(parser):
parser.add_argument(
"infile",
type=argparse.FileType('r'),
help=(
"Input gff2 result file. "
"Use '-' for stdin."
),
)
parser.add_argument(
"-o", "--outfile",
type=argparse.FileType('w'),
default=sys.stdout,
help="Output gff file path. Default stdout.",
)
parser.add_argument(
"-s", "--source",
default="RNAmmer",
help=f"What to put in the source gff column.",
)
parser.add_argument(
"-k", "--kingdom",
default="euk",
choices=["arc", "bac", "euk"],
help="What kingdom was used to run rnammer?",
)
return
def rnammer2gff(args: argparse.Namespace) -> None:
records: List[GFF3Record] = []
for line in args.infile:
if line.startswith("#"):
continue
sline = line.strip().split("\t")
rrna_type = sline[8]
new_type = TYPE_MAP[args.kingdom][rrna_type.lower()]
sline[1] = args.source
sline[2] = new_type
sline[8] = "."
rna_record = cast(GFF3Record, GFF3Record.parse("\t".join(sline)))
gene_record = deepcopy(rna_record)
gene_record.type = "rRNA_gene"
gene_record.add_child(rna_record)
records.append(gene_record)
records.append(rna_record)
num = 0
for record in GFF(records).traverse_children(sort=True):
if record.attributes is None:
attr = GFF3Attributes()
record.attributes = attr
else:
attr = record.attributes
if record.type == "rRNA_gene":
num += 1
attr.id = f"rRNA_gene{num}"
else:
attr.id = f"rRNA{num}"
attr.parent = [
p.attributes.id
for p
in record.parents
if (p.attributes is not None
and p.attributes.id is not None)
]
print(record, file=args.outfile)
return
| 2,694 | 959 |
"""
23.12.2020 - Felipe Ferreira de Aguiar
Manipulando Strings
* Strings Indices
* Fatiamento de Strings
* Funcões built-in len, abs, type, print
Documentação da aula abaixo:
https://docs.python.org/3/library/functions.html
https://docs.python.org/3/library/stdtypes.html
"""
# ! indice positivos [p = 0 , y = 1 .... 2 = 8]
texto = 'python s2'
print(texto[8])
# ! indice negativos -[987654321]
texto = 'python s2'
print(texto[-9])
# ! teste de print para ocultar ultimo caracter
url = 'www.google.com/'
print(url[:-1])
# ! print de um caracter especifico ao outro, porem o ultimo indice não é incluido
str = 'Felipe Aguiar'
nova_str = str[2:6]
print(nova_str)
| 700 | 289 |
# Visit pyGuru on Youtube
import turtle
t = turtle.Pen()
t.shape('turtle')
t.width(20)
t.speed(8)
t.up()
t.goto(-190,170)
t.down()
def h():
t.color('blue')
t.lt(90)
t.fd(100)
t.back(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.fd(50)
t.back(100)
def a():
t.lt(90)
t.fd(100)
t.rt(90)
t.fd(50)
t.rt(90)
t.fd(100)
t.back(50)
t.rt(90)
t.fd(50)
def p():
t.lt(90)
t.fd(100)
t.rt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.fd(50)
def y():
t.lt(90)
t.fd(50)
t.lt(90)
t.fd(25)
t.rt(90)
t.fd(50)
t.back(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.fd(50)
def n():
t.color('yellow')
t.up()
t.goto(-120,50)
t.down()
t.fd(80)
t.rt(141)
t.fd(100)
t.lt(141)
t.fd(80)
def e():
t.lt(90)
t.fd(80)
t.rt(90)
t.fd(60)
t.back(60)
t.lt(90)
t.back(40)
t.rt(90)
t.fd(60)
t.back(60)
t.lt(90)
t.back(40)
t.rt(90)
t.fd(60)
def w():
t.color('grey')
t.up()
t.fd(30)
t.lt(90)
t.fd(80)
t.down()
t.rt(170)
t.fd(90)
t.lt(120)
t.fd(50)
t.rt(70)
t.fd(50)
t.lt(120)
t.fd(90)
def e2():
t.lt(90)
t.fd(100)
t.rt(90)
t.fd(50)
t.back(50)
t.lt(90)
t.back(50)
t.rt(90)
t.fd(50)
t.back(50)
t.lt(90)
t.back(50)
t.rt(90)
t.fd(50)
def r():
t.lt(90)
t.fd(100)
t.rt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.rt(90)
t.fd(50)
t.lt(90)
t.lt(50)
t.fd(70)
def star():
for i in range(8):
t.fd(50)
t.lt(225)
def spiral():
for i in range(30):
t.rt(i*2)
t.fd(i)
t.circle(i*1.5)
# writing text
h()
t.color('red')
t.up()
t.rt(90)
t.fd(30)
t.down()
a()
t.color('lime')
t.up()
t.lt(90)
t.fd(80)
t.lt(90)
t.fd(80)
t.down()
p()
t.color('fuchsia')
t.up()
t.lt(90)
t.fd(80)
t.down()
p()
t.color('green')
t.up()
t.lt(90)
t.fd(105)
t.down()
y()
n()
t.color('orange')
t.up()
t.back(80)
t.rt(90)
t.fd(30)
t.down()
e()
w()
t.up()
t.goto(-150,-10)
t.back(100)
t.rt(90)
t.down()
t.color('red')
y()
t.color('green')
t.up()
t.back(100)
t.rt(90)
t.fd(30)
t.down()
e2()
t.color('lime')
t.up()
t.fd(30)
t.down()
a()
t.color('blue')
t.up()
t.lt(90)
t.fd(50)
t.lt(90)
t.fd(80)
t.down()
r()
# making stars
t.speed(0)
t.width(3)
t.up()
t.goto(-190,190)
t.down()
t.color('red')
star()
t.up()
t.goto(130,-180)
t.down()
t.color('lime')
star()
t.color('blue')
t.up()
t.goto(150,150)
t.down()
star()
t.color('green')
t.up()
t.goto(-150,-150)
t.down()
star()
t.color('yellow')
t.up()
t.goto(230,0)
t.down()
star()
# Making spirals
t.color('red')
t.up()
t.goto(-300,250)
t.down()
t.speed(0)
spiral()
t.color('blue')
t.up()
t.goto(-300,-250)
t.down()
t.speed(0)
spiral()
t.color('lime')
t.up()
t.goto(300,250)
t.down()
t.speed(0)
spiral()
t.color('green')
t.up()
t.goto(300,-250)
t.down()
t.speed(0)
spiral()
turtle.done()
| 2,915 | 1,940 |
### Creating data drift monitor
from azureml.datadrift import DataDriftDetector
monitor = DataDriftDetector.create_from_datasets(workspace=ws,
name='dataset-drift-detector',
baseline_data_set=train_ds,
target_data_set=new_data_ds,
compute_target='aml-cluster',
frequency='Week',
feature_list=['age','height', 'bmi'],
latency=24)
### After creating the dataset monitor, you can backfill to immediately compare the baseline dataset to existing data in the target dataset, as shown in the following example, which backfills the monitor based on weekly changes in data for the previous six weeks:
import datetime as dt
backfill = monitor.backfill( dt.datetime.now() - dt.timedelta(weeks=6), dt.datetime.now())
### Configure Alerts
# The following code shows an example of scheduling a data drift monitor to run every week, and send an alert if the drift magnitude is greater than 0.3:
alert_email = AlertConfiguration('data_scientists@contoso.com')
monitor = DataDriftDetector.create_from_datasets(ws, 'dataset-drift-detector',
baseline_data_set, target_data_set,
compute_target=cpu_cluster,
frequency='Week', latency=2,
drift_threshold=.3,
alert_configuration=alert_email)
| 1,767 | 395 |
def sol(prblm):
_1 = prblm.count("1")
if _1 >= 2:
return 1
return 0
count = 0
for _ in range(int(input())):
prblm = input()
count += sol(prblm)
print(count)
| 189 | 84 |
import unittest
from vodem.api import network_provider_fullname
class TestNetworkProviderFullname(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_response = {
'network_provider_fullname': '',
}
def test_call(self):
resp = network_provider_fullname()
self.assertEqual(self.valid_response, resp)
| 371 | 111 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-14 15:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TaggedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_tags_tagged_items', to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='UserTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=256, verbose_name='Text')),
],
),
migrations.CreateModel(
name='UserTagGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Name')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
),
migrations.AddField(
model_name='usertag',
name='user_tag_group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_tags.UserTagGroup', verbose_name='User tag group'),
),
migrations.AddField(
model_name='taggeditem',
name='user_tags',
field=models.ManyToManyField(to='user_tags.UserTag', verbose_name='User tag'),
),
migrations.AlterUniqueTogether(
name='usertaggroup',
unique_together=set([('user', 'name')]),
),
migrations.AlterUniqueTogether(
name='usertag',
unique_together=set([('user_tag_group', 'text')]),
),
]
| 2,419 | 726 |
import os
import shutil
from ..base import Downloader, Extractor, Recipe
class HgZipDocRecipe(Downloader, Extractor, Recipe):
def __init__(self, *args, **kwargs):
super(HgZipDocRecipe, self).__init__(*args, **kwargs)
self.sha256 = '0de7075c9be80856f3a1c8968f42cfa0' \
'9c44d09068b49c6c67dbf641b55ba8b9'
self.pythons = ['python2']
self.name = 'hg-zipdoc'
self.version = '57b36658dbdf'
self.url = 'https://bitbucket.org/cstangeland/hg-zipdoc/get/' \
'$version.tar.gz'
def install(self):
src = os.path.join(self.directory, 'zipdoc.py')
dst = os.path.join(self.prefix_dir, 'etc', 'mercurial', 'zipdoc.py')
dir = os.path.dirname(dst)
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copy2(src, dst)
| 850 | 339 |
import pandas as pd
df = pd.read_csv('dataset.csv', header=None)
ds = df.sample(frac=1)
ds.to_csv('shuffled_dataset.csv')
print('Shuffled Dataset Generated') | 158 | 60 |
import numpy as np
import tensorflow as tf
import DeepSparseCoding.tf1x.utils.plot_functions as pf
import DeepSparseCoding.tf1x.utils.data_processing as dp
from DeepSparseCoding.tf1x.models.lca_model import LcaModel
from DeepSparseCoding.tf1x.modules.lca_conv_module import LcaConvModule
class LcaConvModel(LcaModel):
"""
Convolutional LCA model
Inference is defined within the graph
"""
def __init__(self):
super(LcaConvModel, self).__init__()
def load_params(self, params):
"""
Load parameters into object
Inputs:
params: [dict] model parameters
Modifiable Parameters:
stride_x
stride_y
patch_size_y
patch_size_x
"""
super(LcaConvModel, self).load_params(params)
if len(self.params.data_shape) == 2:
self.params.data_shape += [1]
self.input_shape = [None,] + self.params.data_shape
def build_module(self, input_node):
module = LcaConvModule(input_node, self.params.num_neurons, self.sparse_mult,
self.eta, self.params.thresh_type, self.params.rectify_a,
self.params.num_steps, self.params.patch_size_y, self.params.patch_size_x,
self.params.stride_y, self.params.stride_x, self.params.eps)
return module
def generate_plots(self, input_data, input_labels=None):
"""
Plot weights, reconstruction, and gradients
Inputs:
input_data: data object containing the current image batch
input_labels: data object containing the current label batch
"""
if input_data.shape[-1] == 3:
cmap = None
elif input_data.shape[-1] == 1:
cmap = "Greys_r"
else:
assert False, ("Input_data.shape[-1] should indicate color channel, and should be 1 or 3")
feed_dict = self.get_feed_dict(input_data, input_labels)
weights, recon, activity = tf.compat.v1.get_default_session().run(
[self.module.w, self.module.reconstruction, self.get_encodings()], feed_dict)
recon = dp.rescale_data_to_one(recon)[0]
weights = np.transpose(dp.rescale_data_to_one(weights.T)[0].T, axes=(3,0,1,2))
current_step = str(self.global_step.eval())
filename_suffix = "_v"+self.params.version+"_"+current_step.zfill(5)+".png"
input_data = dp.rescale_data_to_one(input_data)[0]
num_features = activity.shape[-1]
activity = np.reshape(activity, [-1, num_features])
fig = pf.plot_activity_hist(activity, title="LCA Activity Histogram",
save_filename=self.params.disp_dir+"lca_act_hist"+filename_suffix)
pf.plot_data_tiled(input_data[0,...], normalize=False,
title="Images at step "+current_step, vmin=None, vmax=None, cmap=cmap,
save_filename=self.params.disp_dir+"images"+filename_suffix)
pf.plot_data_tiled(recon[0,...], normalize=False,
title="Recons at step "+current_step, vmin=None, vmax=None, cmap=cmap,
save_filename=self.params.disp_dir+"recons"+filename_suffix)
pf.plot_data_tiled(weights, normalize=False, title="Dictionary at step "+current_step,
vmin=np.min(weights), vmax=np.max(weights), cmap=cmap,
save_filename=self.params.disp_dir+"phi"+filename_suffix)
for weight_grad_var in self.grads_and_vars[self.sched_idx]:
grad = weight_grad_var[0][0].eval(feed_dict)
shape = grad.shape
name = weight_grad_var[0][1].name.split('/')[1].split(':')[0]#np.split
#TODO this function is breaking due to the range of gradients
#pf.plot_data_tiled(np.transpose(grad, axes=(3,0,1,2)), normalize=True,
# title="Gradient for phi at step "+current_step, vmin=None, vmax=None, cmap=cmap,
# save_filename=self.params.disp_dir+"dphi"+filename_suffix)
| 3,616 | 1,295 |
from django import forms
from django.core.exceptions import ValidationError
from django.forms.models import construct_instance, InlineForeignKeyField
from problems.forms.fields import AutoFilledField
class ProblemObjectForm(forms.Form):
def __init__(self, *args, **kwargs):
self.problem = kwargs.pop("problem")
self.revision = kwargs.pop("revision")
self.owner = kwargs.pop("owner")
super(ProblemObjectForm, self).__init__(*args, **kwargs)
class DBProblemObjectModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.problem = kwargs.pop("problem")
self.revision = kwargs.pop("revision")
self.owner = kwargs.pop("owner")
super(DBProblemObjectModelForm, self).__init__(*args, **kwargs)
self.fields["base_problem"] = AutoFilledField(initial=self.problem)
self.fields["commit_id"] = AutoFilledField(initial=self.revision.commit_id)
class ProblemObjectModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.problem = kwargs.pop("problem")
self.revision = kwargs.pop("revision")
self.owner = kwargs.pop("owner")
super(ProblemObjectModelForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(ProblemObjectModelForm, self).clean()
cleaned_data["problem"] = self.revision
return cleaned_data
def _get_validation_exclusions(self):
exclude = super(ProblemObjectModelForm, self)._get_validation_exclusions()
if 'problem' in exclude:
exclude.remove("problem")
return exclude
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
try:
self.instance = construct_instance(self, self.instance, opts.fields, exclude)
except ValidationError as e:
self._update_errors(e)
self.instance.problem = self.cleaned_data["problem"]
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def save(self, commit=True):
super(ProblemObjectModelForm, self).save(commit=False)
self.instance.problem = self.cleaned_data["problem"]
if commit:
self.instance.save()
self.save_m2m()
return self.instance
| 3,130 | 873 |
""" Plotting ProjectionPursuitRegressor
This example trains a regressor to fit data in R1 against only a single
output, so it can be visualized in 2D.
"""
import numpy as np
import sys
sys.path.append("..")
from skpp import ProjectionPursuitRegressor
from matplotlib import pyplot as plt
X = np.arange(100).reshape(100, 1)
Y = (3*X**2 - 2*X)[:,0]/1000 + np.random.randn(100)
estimator = ProjectionPursuitRegressor()
estimator.fit(X, Y)
plt.scatter(X, Y, c='k', s=10, label='dummy data points')
plt.plot(estimator.predict(X), label='relationship fit')
plt.legend()
plt.title('The relationship found between input and output\nfor a ' +
'one-dimensional example.')
plt.xlabel('Single-dimensional input X')
plt.ylabel('Single-dimensionsl output Y')
plt.show()
| 762 | 289 |
import os
from flask import Flask, send_from_directory, request, redirect, url_for, flash
from flask_cors import CORS
from flask import render_template
from flask import url_for
from werkzeug.utils import secure_filename
import re
import subprocess
import json
web_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(web_dir, 'dist')
app = Flask(__name__, root_path=root_dir)
CORS(app, supports_credentials=True)
UPLOAD_FOLDER = os.path.join(web_dir, 'uploads')
ALLOWED_EXTENSIONS = set(['zip', 'txt', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def get_result_file_path(logStr):
'''
Parse result file path from log message.
'''
m = re.search(r'RESULT_FILE:(.*)', logStr)
if m:
result = m.group(1)
if os.path.isfile(result):
return result
return 'No Result'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def check_done(logfile):
'''
Check all jobs are done or not from log message.
'''
if os.path.isfile(logfile):
with open(logfile) as f:
for line in f:
if '--ALL DONE--' in line:
return True
f.close()
return False
else:
return True
@app.route('/')
def index():
return send_from_directory(os.path.join(web_dir, 'dist'), "index.html")
@app.route('/GetLog')
def GetLog():
logfile = 'htmlcapture.log'
if os.path.isfile(logfile):
ret = {'log':[], 'zipfile':'' }
with open(logfile) as f:
for line in f:
ret['log'].append(line)
f.close()
if len(ret['log']) > 2 and ('--ALL DONE--' in ret['log'][-1]):
resultFilepath = get_result_file_path(ret['log'][-2])
if resultFilepath == 'No Result':
ret['zipfile'] = 'No Result'
else:
static_path = os.path.join(web_dir, 'dist', 'static')
rel_path = os.path.relpath(resultFilepath, static_path)
ret['zipfile'] = url_for('static', filename=rel_path)
return json.dumps(ret)
else:
return ''
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return "No any file"
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
return "No any file"
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
basename = os.path.splitext(filename)[0]
save_zip = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# output_dir = os.path.join(web_dir, '../data/result')
# output_dir = os.path.realpath(output_dir)
output_zip = os.path.join(web_dir, 'dist', 'static', basename + '_result.zip')
file.save(save_zip)
cmd = '../.env/bin/python ../runall.py {} {}'.format(save_zip, output_zip)
print(cmd)
subprocess.Popen([cmd], shell=True)
return 'Job start'
return 'NG'
| 3,323 | 1,054 |
from typing import List, Dict
from .Area import Area
class Level:
def __init__(self, course_id : int, name : str, properties : Dict = None, areas : List[Area] = None, offset : int = None):
self.course_id = course_id
self.name = name
self.properties = properties or {}
self.offset = offset
self.areas = areas or []
def __str__(self):
return f'<Level {hex(self.course_id)}: {self.name}>' #: {repr(self.properties)}, {[str(area) for area in self.areas]}' | 483 | 162 |
import math
from typing import Optional
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
import sys
sys.path.append("..")
from place_db import PlaceDB
from build_graph import build_graph_from_placedb
class PlaceEnv(gym.Env):
def __init__(self, placedb, grid = 32):
# need to get GCN vector and CNN
assert grid * grid >= 1.5 * placedb.node_cnt
self.grid = grid
self.max_height = placedb.max_height
self.max_width = placedb.max_width
self.placedb = placedb
self.num_macro = placedb.node_cnt
self.node_name_list = list(self.placedb.node_info.keys())
self.action_space = spaces.Discrete(self.grid * self.grid)
self.state = None
self.graph = build_graph_from_placedb(self.placedb)
def reset(self):
num_macro_placed = 0
num_macro = self.num_macro
canvas = np.zeros((self.grid, self.grid))
node_pos = {}
self.state = (canvas, num_macro_placed, num_macro, node_pos)
return self.state
# hpwl without pin offset
def comp_simple_hpwl(self, node_pos):
simple_hpwl = 0
# print("node_pos", node_pos)
for net_name in self.placedb.net_info:
min_x = self.grid
min_y = self.grid
max_x = 0
max_y = 0
for node_name in self.placedb.net_info[net_name]: # self.node_name_list:
# print("node_pos[node_name]", node_pos[node_name])
min_x = min(min_x, node_pos[node_name][0])
min_y = min(min_y, node_pos[node_name][1])
max_x = max(max_x, node_pos[node_name][0])
max_y = max(max_y, node_pos[node_name][1])
# print("min_x = {}, min_y = {}, max_x = {}, max_y = {}".format(min_x, min_y, max_x, max_y))
simple_hpwl += (max_x - min_x + 1) + (max_y - min_y + 1) # range [0, 64]
return simple_hpwl # * (self.max_height / self.grid)
# hpwl within pin offset
def comp_hpwl(self):
pass
def step(self, action):
err_msg = f"{action!r} ({type(action)}) invalid"
assert self.action_space.contains(action), err_msg
canvas, num_macro_placed, num_macro, node_pos = self.state
# print("===action: {}".format(action))
x = action // self.grid
y = action % self.grid
if canvas[x][y] == 1:
reward = 0
done = True
else:
canvas[x][y] = 1
node_pos[self.node_name_list[num_macro_placed]] = (x, y)
num_macro_placed += 1
if num_macro_placed == num_macro:
reward = self.grid * 2 * len(self.placedb.net_info) - self.comp_simple_hpwl(node_pos)
print("reward = {}".format(reward))
done = True
else:
reward = 0
done = False
self.state = (canvas, num_macro_placed, num_macro, node_pos)
return self.state, reward, done, {}
def render(self, mode='human'):
return None
def close(self):
return None
| 3,182 | 1,064 |
from arjuna.engine.unitee.enums import *
built_in_prop_type = {
BuiltInProp.ID : str,
BuiltInProp.PRIORITY : int,
BuiltInProp.THREADS : int,
BuiltInProp.NAME : str,
BuiltInProp.AUTHOR : str,
BuiltInProp.IDEA : str,
BuiltInProp.UNSTABLE : bool,
BuiltInProp.COMPONENT : str,
BuiltInProp.APP_VERSION : str,
}
def validate_built_in_props(props):
for k,v in props.items():
if is_builtin_prop(k.upper()):
expected_type = built_in_prop_type[BuiltInProp[k.upper()]]
actual_type = type(v)
if v is not None and actual_type is not expected_type:
raise Exception("Built-in property {} should of type {}. Found {} of type {}".format(
k,
expected_type,
v,
actual_type
))
def get_value_type(built_in_prop_name):
return built_in_prop_type[BuiltInProp[built_in_prop_name.upper()]]
def is_builtin_prop(name):
return name.upper() in BuiltInProp.__members__ | 1,045 | 352 |
import numpy as np
import torch
from agents import dqn_agent
from agents.prioritized_replay_buffer_sumtree import PrioritizedReplayBuffer
class PrioritizedAgent(dqn_agent.Agent):
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
super().__init__(state_size, action_size, seed)
# Replay memory
self.memory = PrioritizedReplayBuffer(dqn_agent.BUFFER_SIZE, dqn_agent.BATCH_SIZE, seed, dqn_agent.device)
self.importance_sampling_coeff = 0.4 # importance-sampling, from initial value increasing to 1
self.importance_increment_per_sampling = 0.001
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
self.optimizer.zero_grad()
states, actions, rewards, next_states, dones, indices, probabilities = experiences
q_expected = rewards + (gamma * self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)) * (1 - dones)
q_current = self.qnetwork_local(states).gather(1, actions)
delta = (q_expected - q_current)
absolute_errors = np.abs(delta.detach())
self.memory.batch_update(indices, absolute_errors)
sampling_weights = np.power((1 / len(self.memory)) * (1 / probabilities), self.importance_sampling_coeff)
loss = torch.mean((delta * sampling_weights)**2)
self.importance_sampling_coeff += self.importance_increment_per_sampling
self.importance_sampling_coeff = min(self.importance_sampling_coeff, 1.0)
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, dqn_agent.TAU)
| 2,184 | 670 |
"""CMS Plugins for the ``cmsplugin_pdf`` app."""
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import PDFPluginModel
class PDFPlugin(CMSPluginBase):
model = PDFPluginModel
name = _('PDF File')
render_template = 'cmsplugin_pdf/partials/pdf.html'
def render(self, context, instance, placeholder):
context.update({'pdf_plugin': instance})
return context
plugin_pool.register_plugin(PDFPlugin)
| 528 | 160 |
# Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/f274a6ac753440deb8c47de3ee127244
import sys
def input():
return sys.stdin.readline().rstrip()
N = int(input())
if N < 5:
if N % 2 != 0:
ans = -1
else:
ans = N // 2
else:
ct, N = divmod(N, 5)
if N == 0:
ans = ct
else:
if N % 2 == 0:
ct += N // 2
ans = ct
else:
ct += (N + 5) // 2 - 1
ans = ct
print(ans) | 487 | 221 |
import pygame
pygame.font.init()
# Screen
screen_width = 800
screen_height = 550
# Colors
RED = (134, 28, 9)
YELLOW = (212, 169, 65)
WHITE = (255, 255, 255)
GREEN = (0, 127, 33)
BLUE = (0, 97, 148)
# Rectangles constants
RECT_1 = (20, 20)
RECT_2 = (60, 20)
RECT_3 = (20, 60)
RECT_4 = (30, 30)
RECT_5 = (20, 108)
RECT_6 = (108, 10)
RECT_7 = (60, 30)
RECT_8 = (30, 60)
RECT_9 = (20, 168)
RECT_10 = (168, 10)
# Screen refresh
fps = 20
# Wall group
walls = pygame.sprite.Group()
ball_sprites = pygame.sprite.Group()
TAM_TANK = 32
# Clock
clk = pygame.time.Clock()
# Tanks
tank_1 = pygame.image.load("Sprites/Tank_1.png")
tank_2 = pygame.image.load("Sprites/Tank_2.png")
| 675 | 386 |
from distutils.core import setup, Extension
sources = ['board_wrap.cxx', 'board.cpp',
'board_bits.cpp', 'init.cpp',
'lineshapes.cpp', 'pns.cpp', 'shapes.cpp']
module = Extension(
'_board', sources=sources,
extra_compile_args=['/O2'],
language='c++'
)
setup(name='board',
ext_modules=[module],
py_modules=['board']) | 367 | 132 |
# -*- coding: utf-8 -*-
"""Pod / Node management API"""
from flask import g, abort
from citadel.libs.view import create_api_blueprint, user_require
from citadel.models.container import Container
from citadel.rpc.client import get_core
bp = create_api_blueprint('pod', __name__, 'pod')
def _get_pod(name):
pod = get_core(g.zone).get_pod(name)
if not pod:
abort(404, 'pod `%s` not found' % name)
return pod
@bp.route('/')
@user_require(False)
def get_all_pods():
"""List all pods
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"name": "eru",
"desc": "eru test pod",
"__class__": "Pod"
}
]
"""
return get_core(g.zone).list_pods()
@bp.route('/<name>')
@user_require(False)
def get_pod(name):
"""Get a single pod by name
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"name": "eru",
"desc": "eru test pod",
"__class__": "Pod"
}
"""
return _get_pod(name)
@bp.route('/<name>/nodes')
@user_require(False)
def get_pod_nodes(name):
"""List nodes under a pod
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"name": "c1-eru-2.ricebook.link",
"endpoint": "tcp://xxx.xxx.xxx.xxx:2376",
"podname": "eru",
"cpu": {"0": 75},
"memory": 855085056,
"info": "{\\"ID\\":\\"UUWL:QZS7:MPQY:KMYY:T5Q4:GCBY:JBRA:Q55K:NUKW:O2N2:4BEX:UTFK\\",\\"Containers\\":7,\\"ContainersRunning\\":6,\\"ContainersPaused\\":0,\\"ContainersStopped\\":1,\\"Images\\":9,\\"Driver\\":\\"overlay\\",\\"DriverStatus\\":[[\\"Backing Filesystem\\",\\"xfs\\"],[\\"Supports d_type\\",\\"false\\"]],\\"SystemStatus\\":null,\\"Plugins\\":{\\"Volume\\":[\\"local\\"],\\"Network\\":[\\"bridge\\",\\"host\\",\\"macvlan\\",\\"null\\",\\"overlay\\"],\\"Authorization\\":null},\\"MemoryLimit\\":true,\\"SwapLimit\\":true,\\"KernelMemory\\":true,\\"CpuCfsPeriod\\":true,\\"CpuCfsQuota\\":true,\\"CPUShares\\":true,\\"CPUSet\\":true,\\"IPv4Forwarding\\":true,\\"BridgeNfIptables\\":true,\\"BridgeNfIp6tables\\":true,\\"Debug\\":false,\\"NFd\\":57,\\"OomKillDisable\\":true,\\"NGoroutines\\":72,\\"SystemTime\\":\\"2018-03-20T16:10:51.806831123+08:00\\",\\"LoggingDriver\\":\\"json-file\\",\\"CgroupDriver\\":\\"cgroupfs\\",\\"NEventsListener\\":1,\\"KernelVersion\\":\\"3.10.0-693.5.2.el7.x86_64\\",\\"OperatingSystem\\":\\"CentOS Linux 7 (Core)\\",\\"OSType\\":\\"linux\\",\\"Architecture\\":\\"x86_64\\",\\"IndexServerAddress\\":\\"https://index.docker.io/v1/\\",\\"RegistryConfig\\":{\\"InsecureRegistryCIDRs\\":[\\"127.0.0.0/8\\"],\\"IndexConfigs\\":{\\"docker.io\\":{\\"Name\\":\\"docker.io\\",\\"Mirrors\\":[\\"https://registry.docker-cn.com/\\"],\\"Secure\\":true,\\"Official\\":true}},\\"Mirrors\\":[\\"https://registry.docker-cn.com/\\"]},\\"NCPU\\":1,\\"MemTotal\\":1928826880,\\"DockerRootDir\\":\\"/var/lib/docker\\",\\"HttpProxy\\":\\"\\",\\"HttpsProxy\\":\\"\\",\\"NoProxy\\":\\"\\",\\"Name\\":\\"c1-eru-2.ricebook.link\\",\\"Labels\\":[],\\"ExperimentalBuild\\":false,\\"ServerVersion\\":\\"17.12.1-ce\\",\\"ClusterStore\\":\\"etcd://127.0.0.1:2379\\",\\"ClusterAdvertise\\":\\"\\",\\"Runtimes\\":{\\"runc\\":{\\"path\\":\\"docker-runc\\"}},\\"DefaultRuntime\\":\\"runc\\",\\"Swarm\\":{\\"NodeID\\":\\"\\",\\"NodeAddr\\":\\"\\",\\"LocalNodeState\\":\\"inactive\\",\\"ControlAvailable\\":false,\\"Error\\":\\"\\",\\"RemoteManagers\\":null},\\"LiveRestoreEnabled\\":false,\\"Isolation\\":\\"\\",\\"InitBinary\\":\\"docker-init\\",\\"ContainerdCommit\\":{\\"ID\\":\\"9b55aab90508bd389d7654c4baf173a981477d55\\",\\"Expected\\":\\"9b55aab90508bd389d7654c4baf173a981477d55\\"},\\"RuncCommit\\":{\\"ID\\":\\"9f9c96235cc97674e935002fc3d78361b696a69e\\",\\"Expected\\":\\"9f9c96235cc97674e935002fc3d78361b696a69e\\"},\\"InitCommit\\":{\\"ID\\":\\"949e6fa\\",\\"Expected\\":\\"949e6fa\\"},\\"SecurityOptions\\":[\\"name=seccomp,profile=default\\"]}",
"available": true,
"labels": {},
"__class__": "Node"
}
]
"""
pod = _get_pod(name)
return get_core(g.zone).get_pod_nodes(pod.name)
@bp.route('/<name>/containers')
@user_require(False)
def get_pod_containers(name):
pod = _get_pod(name)
return Container.get_by(zone=g.zone, podname=pod.name)
@bp.route('/<name>/networks')
@user_require(False)
def list_networks(name):
"""List networks under a pod
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
[
{"name": "host", "subnets": [], "__class__": "Network"},
{"name": "bridge", "subnets": ["172.17.0.0/16"], "__class__": "Network"}
]
"""
pod = _get_pod(name)
return get_core(g.zone).list_networks(pod.name)
| 5,088 | 2,138 |
import time
from .service import Service
import re
from ..utils.Utils import execute_shell_command
from manafa.utils.Logger import log
class HunterService(Service):
def __init__(self, boot_time=0, output_res_folder="hunter"):
Service.__init__(self, output_res_folder)
self.trace = {}
self.boot_time = boot_time
self.end_time = boot_time
def config(self, **kwargs):
pass
def init(self, boot_time=0, **kwargs):
self.boot_time = boot_time
self.trace = {}
def start(self, run_id=None):
self.clean()
def get_results_filename(self, run_id):
if run_id is None:
run_id = execute_shell_command("date +%s")[1].strip()
return self.results_dir + "/hunter-%s-%s.log" % (run_id, str(self.boot_time))
def stop(self, run_id=None):
filename = self.get_results_filename(run_id)
time.sleep(1)
execute_shell_command("adb logcat -d | grep -io \"[<>].*m=example.*]\" > %s" % filename)
return filename
def clean(self):
execute_shell_command("find %s -type f | xargs rm " % self.results_dir)
execute_shell_command("adb logcat -c") # or adb logcat -b all -c
def parseFile(self, filename, functions, instrument=False):
"""array functions to decide which methods collect instrumentation data
variable instrument to decide if array functions is an array of methods
to collect information or to discard"""
with open(filename, 'r') as filehandle:
lines = filehandle.read().splitlines()
self.parseHistory(lines, functions, instrument)
def parseHistory(self, lines_list, functions, instrument=False):
for i, line in enumerate(lines_list):
if re.match(r"^>", line):
before_components = re.split('^>', line.replace(" ", ""))
components = re.split('[,=\[\]]', before_components[1])
function_name = components[0].replace("$", ".")
add_function = self.verifyFunction(function_name, functions, instrument)
if add_function:
begin_time = components[6]
if function_name not in self.trace:
self.trace[function_name] = {}
self.trace[function_name][0] = {'begin_time': float(begin_time) * (pow(10, -3))}
else:
self.trace[function_name][len(self.trace[function_name])] = {
'begin_time': float(begin_time) * (pow(10, -3))}
elif re.match(r"^<", line):
before_components = re.split('^<', line.replace(" ", ""))
components = re.split('[,=\[\] ]', before_components[1])
function_name = components[0].replace("$", ".")
add_function = self.verifyFunction(function_name, functions, instrument)
if add_function:
end_time = components[6]
self.updateTraceReturn(function_name, end_time)
else:
log("invalid line" + line)
def addConsumption(self, function_name, position, consumption, per_component_consumption, metrics):
self.trace[function_name][position].update(
{
'checked': False,
'consumption': consumption,
'per_component_consumption': per_component_consumption,
'metrics': metrics
}
)
def addConsumptionToTraceFile(self, filename, functions, instrument=False):
split_filename = re.split("/", filename)
new_filename = "/".join(split_filename[0: len(split_filename) - 1])
new_filename += '[edited]' + split_filename[len(split_filename) - 1]
with open(filename, 'r+') as fr, open(new_filename, 'w') as fw:
for line in fr:
checked = False
function_begin = ">"
if re.match(r"^>", line):
before_components = re.split('^>', line)
components = re.split('[,=\[\] ]', before_components[1])
function_name = components[0].replace("$", ".")
elif re.match(r"^<", line):
before_components = re.split('^<', line)
components = re.split('[,=\[\] ]', before_components[1])
function_name = components[0].replace("$", ".")
checked = True
function_begin = "<"
add_function = self.verifyFunction(function_name, functions, instrument)
if add_function:
consumption, time = self.returnConsumptionAndTimeByFunction(function_name, checked)
new_line = function_begin + function_name + " [m=example, " + 'cpu = ' + str(
consumption) + ', t = ' + str(time) + ']\n'
fw.write(new_line)
execute_shell_command("rm %s" % filename)
return new_filename
'''
Returns cpu consumption instead total consumption
'''
def returnConsumptionAndTimeByFunction(self, function_name, checked):
consumption = 0.0
cpu_consumption = 0.0
da_time = 0.0
for i, times in enumerate(self.trace[function_name]):
results = self.trace[function_name][i]
if not results['checked']:
if checked:
consumption = results['consumption']
per_component_consumption = results['per_component_consumption']
cpu_consumption = per_component_consumption['cpu']
da_time = results['end_time'] if 'end_time' in results else self.end_time
self.updateChecked(function_name, i)
return cpu_consumption, da_time
da_time = results['begin_time']
return cpu_consumption, da_time
return cpu_consumption, da_time
def updateChecked(self, function_name, position):
self.trace[function_name][position].update(
{
'checked': True
}
)
def updateTraceReturn(self, function_name, end_time):
i = len(self.trace[function_name]) - 1 if function_name in self.trace else -1
while i >= 0:
times = self.trace[function_name][i]
if 'end_time' not in times:
end = float(end_time) * (pow(10, -3))
times.update({'end_time': end})
if end > self.end_time:
self.end_time = end
break
i -= 1
# Verify if it is to add the function to hunter_trace or get consumption
@staticmethod
def verifyFunction(function_name, functions, add_function=False):
if len(functions) == 0:
return True
res = not add_function
for function in functions:
if function in function_name:
res = not res
break
return res
| 7,068 | 1,924 |
import hlt
from hlt import constants
import logging
# Import my stuff
import strategies
import helpers
game = hlt.Game()
# Pre-processing area
ship_status = {}
ship_destination = {}
class parameters():
def __init__(self):
# Ship numbers
self.max_ships = 30
self.min_ships = 2
# dropoff parameters
self.large_distance_from_drop = 10
self.farthest_allowed_dropoff = game.game_map.width/2
self.dropoff_dense_requirement = constants.DROPOFF_COST
self.max_dropoffs = 1
# Halite collection parameters
self.minimum_useful_halite = constants.MAX_HALITE/10
self.sufficient_halite_for_droping = constants.MAX_HALITE
self.density_kernal_side_length = 3
self.search_region = 1
self.number_of_dense_spots_to_check = 10
self.explore_dense_requirement = self.minimum_useful_halite*self.density_kernal_side_length**2
# Turn based parameters
self.turn_to_stop_spending = 300
self.crash_return_fudge = 10 # constants.MAX_TURNS - game.game_map.width/2
params = parameters()
# Start
game.ready("BH4-test")
logging.info("Successfully created bot! Player ID is {}.".format(game.my_id))
# Game Loop
while True:
hd = helpers.halite_density(game.game_map, params)
m = max([max(x) for x in hd])
# m <= constants.MAX_HALITE*params.density_kernal_side_length**2
if m > 0*params.explore_dense_requirement:
strategies.expand(game, ship_status, ship_destination, params)
else:
logging.info("Started vacuum")
strategies.vacuum(game, ship_status, ship_destination, params)
| 1,650 | 574 |
# importing dependencies
import re
import inltk
import nltk
nltk.download('punkt')
import io
import random
import string
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from googlesearch import search
##Setting up marathi stopwords
lang='mr'
f=open("marathi_corpus/stop_marathi.txt",'r')
stop_words=f.readlines()
stm=[]
for i in stop_words:
i.strip()
stm.append(re.sub('\n',"",i))
f.close()
#reading corpus
f=open('marathi_corpus/covid.txt')
raw=f.read()
f.close()
sent_tokens=nltk.sent_tokenize(raw)
word_tokens=nltk.word_tokenize(raw)
#text preprocessing:
remove_punct_dict=dict((ord(punct),None) for punct in string.punctuation)#removing punctuation
def preprocess(text):
return (nltk.word_tokenize(text.translate(remove_punct_dict))) #working on word tokens
##greetings
greeting_inputs=("नमस्कार","हाय")
greeting_res=("नमस्कार","हाय")
def greet_sent(sentence):
for word in sentence.split():
if word in greeting_inputs:
return random.choice(greeting_res)
thank_list=['आभार', 'धन्यवाद', 'बाय', "खूप खूप धन्यवाद"]
def bye(sentence):
for word in sentence.split():
if word in thank_list:
return random.choice(thank_list)
#return from knowledge base
def response(user_response):
bot_response=''
sent_tokens.append(user_response)
tfvec=TfidfVectorizer(tokenizer=preprocess,stop_words=stm)
tfidf=tfvec.fit_transform(sent_tokens)
vals=cosine_similarity(tfidf[-1],tfidf)
idx=vals.argsort()[0][-2]
flat=vals.flatten()
flat.sort()
sent_tokens.pop()
req_tfidf=flat[-2]
if (req_tfidf==0):
bot_response=bot_response+"मला माफ करा. मला कळलं नाही तुम्हाला काय म्हणायचंय ते."
bot_response=bot_response+"\nमला हे इंटरनेटवर मिळाले:"
query=user_response
for url in search(query, lang=lang, num_results=3):
bot_response=bot_response+"\n"+url
return bot_response
else:
bot_response=bot_response+sent_tokens[idx]
return bot_response
#chating system
def chat(user_response):
bot_response=''
if bye(user_response)!=None:
bot_response=bot_response+bye(user_response)
return (bot_response, False)
elif greet_sent(user_response)!=None:
bot_response=bot_response+greet_sent(user_response)
return (bot_response, True)
else:
bot_response=bot_response+response(user_response)
return(bot_response, True)
| 2,481 | 982 |
# Generic imports
import math
# Custom imports
from lbm.src.utils.shapes import *
from lbm.src.core.obstacle import *
###############################################
### Base app
class base_app():
### Set inlet fields
def set_inlets(self, lattice, it):
pass
### Compute observables
def observables(self, lattice, it):
pass
### Finalize
def finalize(self, lattice):
pass
### Add obstacles
def add_obstacles(self, lattice, obstacles):
for i in range(len(obstacles)):
obs = obstacles[i]
shape = generate_shape(obs.n_pts, obs.pos,
obs.type, obs.size,
obs.name, obs.n_spts,
lattice.output_dir)
obstacles[i].set_polygon(shape.curve_pts)
obstacles[i].set_tag(i+1)
area, bnd, ibb = lattice.add_obstacle(obstacles[i])
obstacles[i].fill(area, bnd, ibb)
### Iteration printings
def printings(self, it):
if (self.stop == 'it'):
print('# it = '+str(it)+' / '+str(self.it_max), end='\r')
if (self.stop == 'obs'):
str_d = "{:10.6f}".format(self.drag_buff.obs)
str_l = "{:10.6f}".format(self.lift_buff.obs)
print('# it = '+str(it)+
', avg drag ='+str_d+', avg lift ='+str_l, end='\r')
### Check stopping criterion
def check_stop(self, it):
compute = True
if (self.stop == 'it'):
if (it >= self.it_max):
compute = False
print('\n')
print('# Computation ended: it>it_max')
if (self.stop == 'obs'):
if (self.drag_buff.obs_cv and self.lift_buff.obs_cv):
compute = False
print('\n')
print('# Computation ended: converged')
return compute
| 1,931 | 610 |
def fraction_plusMinus(arr):
count_pos = 0
count_neg = 0
count_0 = 0
arr_len = len(arr)
# print(arr_len)
# print(arr)
for i in range(0, len(arr)):
if arr[i] > 0:
count_pos += 1
elif arr[i] < 0:
count_neg += 1
elif arr[i] == 0:
count_0 += 1
propor_pos = round(count_pos / arr_len, arr_len)
propor_neg = round(count_neg / arr_len, arr_len)
propor_zero = round(count_0 / arr_len, arr_len)
# print(propor_pos, propor_neg, propor_zero, sep=' ', end='\n')
print(propor_pos, end='\n')
print(propor_neg, end='\n')
print(propor_zero, end='\n')
fraction_plusMinus([-2, 3, -4, 0, 5, 1])
fraction_plusMinus([5, 2, -4, 0, 0, 1, -3])
| 742 | 319 |
from model.contact import ContactGroup
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def add_new(self, contactgroup):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contactgroup)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cash = None
def fill_contact_form(self, contactgroup):
wd = self.app.wd
self.change_fields("firstname", contactgroup.firstname)
self.change_fields("lastname", contactgroup.lastname)
self.change_fields("nickname", contactgroup.nickname)
self.change_fields("company", contactgroup.company)
self.change_fields("address", contactgroup.address)
self.change_fields("home", contactgroup.home)
self.change_fields("work", contactgroup.work)
self.change_fields("mobile", contactgroup.mobile)
self.change_fields("email", contactgroup.email)
self.change_fields("email2", contactgroup.email2)
self.change_fields("address2", contactgroup.address2)
self.change_fields("middlename", contactgroup.middlename)
self.change_fields("notes", contactgroup.notes)
def change_fields(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_by_index(self, index, new_contactgroup):
wd = self.app.wd
self.open_contacts_page()
# self.select_contact_by_index(index)
# search Modify btn with index
wd.find_element_by_xpath(".//*[@id='maintable']/tbody/tr["+str(index+2)+"]/td[8]/a/img").click()
self.fill_contact_form(new_contactgroup)
wd.find_element_by_name("update").click()
self.contact_cash = None
def modify_by_id(self, id, new_contactgroup):
wd = self.app.wd
self.open_contacts_page()
# self.select_contact_by_index(index)
# search Modify btn with index
wd.find_element_by_xpath(".//*[@id='maintable']/tbody/tr["+str(id+2)+"]/td[8]/a/img").click()
self.fill_contact_form(new_contactgroup)
wd.find_element_by_name("update").click()
self.contact_cash = None
def modify_first(self):
self.modify_by_index(0)
def open_contacts_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/") > 0):
wd.find_element_by_link_text("home").click()
def select_add_group_from_list(self, id):
wd = self.app.wd
#wd.find_elements_by_xpath(".//*[@id='content']/form[2]/div[4]/select/option")[index2].click()
wd.find_element_by_xpath(".//*[@id='content']/form[2]/div[4]//option[@value='%s']" % id).click()
wd.find_element_by_name("add").click()
def select_group_for_deletion(self, id):
wd = self.app.wd
wd.find_element_by_xpath(".//*[@id='right']//option[@value='%s']" % id).click()
def delete_contact_from_group(self):
wd = self.app.wd
wd.find_element_by_name("remove").click()
def delete_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
# select first group
self.select_contact_by_index(index)
# delete
wd.find_element_by_xpath(".//*[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cash = None
def delete_by_id(self, id):
wd = self.app.wd
self.open_contacts_page()
# select first group
self.select_contact_by_id(id)
# delete
wd.find_element_by_xpath(".//*[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cash = None
def delete_first(self):
self.delete_by_index(0)
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_xpath(".//*[@id='%s']" % id).click()
def select_first(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cash = None
def get_contact_list(self):
if self.contact_cash is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cash = []
for element in wd.find_elements_by_xpath("//tbody/tr[@name='entry']"):
firstname = element.find_element_by_xpath("td[3]").text
lastname = element.find_element_by_xpath("td[2]").text
id = element.find_element_by_name("selected[]").get_attribute("value")
all_phones = element.find_element_by_xpath("td[6]").text
all_emails = element.find_element_by_xpath("td[5]").text
address = element.find_element_by_xpath("td[4]").text
self.contact_cash.append(ContactGroup(firstname=firstname, lastname=lastname, id=id, address=address, all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones))
return list(self.contact_cash)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
address = wd.find_element_by_name("address").text
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return ContactGroup(firstname=firstname, lastname=lastname, id=id, address=address, homephone=homephone, mobilephone=mobilephone, workphone=workphone,
secondaryphone=secondaryphone, email=email, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return ContactGroup(homephone=homephone, mobilephone=mobilephone, workphone=workphone, secondaryphone=secondaryphone)
| 7,731 | 2,578 |
# pylint: disable=non-parent-init-called
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.widgets.widget.py is part of the RAMSTK Project
#
# All rights reserved.
# Copyright 2007 - 2020 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK GTK3 Base Widget Module."""
# Standard Library Imports
from typing import Any, Dict
# RAMSTK Package Imports
from ramstk.views.gtk3 import GObject, _
class RAMSTKWidget:
"""The RAMSTK Base Widget class."""
# Define private scalar class attributes.
_default_height = -1
_default_width = -1
def __init__(self) -> None:
"""Create RAMSTK Base widgets."""
GObject.GObject.__init__(self)
# Initialize private dictionary attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dictionary attributes.
self.dic_handler_id: Dict[str, int] = {"": 0}
# Initialize public list attributes.
# Initialize public scalar attributes.
self.height: int = -1
self.width: int = -1
def do_set_properties(self, **kwargs: Any) -> None:
"""Set the properties of the RAMSTK combobox.
:param **kwargs: See below
:Keyword Arguments:
* *height* (int) -- height of the RAMSTKWidget().
* *tooltip* (str) -- the tooltip, if any, for the combobox.
Default is a message to file a QA-type issue to have one added.
* *width* (int) -- width of the RAMSTKWidget().
:return: None
:rtype: None
"""
_can_focus = kwargs.get("can_focus", True)
_height = kwargs.get("height", self._default_height)
_tooltip = kwargs.get(
"tooltip",
_("Missing tooltip, please file a quality type issue to have one added."),
)
_width = kwargs.get("width", self._default_width)
if _height == 0:
_height = self._default_height
if _width == 0:
_width = self._default_width
self.height = _height
self.width = _width
self.set_property("can-focus", _can_focus) # type: ignore
self.set_property("height-request", _height) # type: ignore
self.set_property("tooltip-markup", _tooltip) # type: ignore
self.set_property("width-request", _width) # type: ignore
| 2,377 | 720 |
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import numpy as np
import cv2
import picamera.array
camera = PiCamera()
camera.resolution = (800, 600)
camera.framerate =10
rawCapture = PiRGBArray(camera, size=(800, 600))
time.sleep(0.1)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
img = frame.array
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord("q"):
break
| 1,011 | 477 |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from datetime import datetime
# In[2]:
hh_df = pd.read_csv('home_ac/processed_hhdata_86_2.csv')
# print(hh_df.shape)
# hh_df.head(15)
hh_df.drop_duplicates(subset ="localhour", keep = False, inplace = True)
print(hh_df.shape)
# In[3]:
hh_df['hour_index']=0
#hh_df.iloc[-50]
# In[4]:
used = ['localhour', 'use', 'temperature', 'cloud_cover','GH', 'is_weekday','month','hour','AC','DC','hour_index']
datarow= []
# In[5]:
hour_index=0#hour index
hour_value=0
missing_count=0
start_time= pd.to_datetime(hh_df['localhour'].iloc[0][:-3])
for index, row in hh_df.iterrows():
row.localhour=row.localhour[:-3]
#print(row.localhour)
difference=(pd.to_datetime(row.localhour)-pd.to_datetime(hh_df['localhour'].iloc[0][:-3])).total_seconds()/3600
#print("index is difference",difference)
if difference!=hour_index:
gap = difference-hour_index
missing_count += gap
#fill in the missing hours
for i in range(int(gap)):
print("\n---------------------------------------")
print("missing data for hour index:",hour_index+i)
#row.hour=(hour_index+i)%24
temprow=None
#print("this is lastrow",lastrow)
temprow=lastrow
#print("this is temprow",temprow)
temprow.hour_index=hour_index+i
#print("this is hour of lastrow",lastrow.hour)
#temprow.hour = (hour_index+i)%24
current_time = start_time+pd.Timedelta(hour_index+i,unit='h')
temprow.localhour = current_time
temprow.hour = current_time.hour
temprow.month = current_time.month
temprow.is_weekday = int(datetime.strptime(str(current_time), "%Y-%m-%d %H:%M:%S").weekday() < 5)
print("The inserted row is \n",temprow)
#datarow.append(row[used])
datarow.append(temprow[used])
temprow=None
#hour=None
#print(datarow)
hour_index = difference
hour_index +=1
row.hour_index=difference
#hour_value = row.hour
#print(row[used])
#print("reach here")
lastrow = row[used]
datarow.append(row[used])
print("total missing hours",missing_count)
#------------------------------------------testing----------------------------
# hour_index=0 #hour index
# missing_count=0
# for index, row in hh_df.iterrows():
# #print(row.localhour)
# #row.month = float(pd.to_datetime(row.localhour[:-3]).month)
# #row.day = float(pd.to_datetime(row.localhour[:-3]).day)
# #data_hour = float(pd.to_datetime(row.localhour).hour-6)%24
# data_hour = float(pd.to_datetime(row.localhour[:-3]).hour)
# #print(data_hour)
# if data_hour != hour_index%24:
# print("we are missing hours for",row.localhour)
# missing_count += 1
# hour_index +=1
# hour_index += 1
# print("In total missing hours", missing_count)
# for index, row in hh_df.iterrows():
# #row.month = float(pd.to_datetime(row.localhour[:-3]).month)
# #row.day = float(pd.to_datetime(row.localhour[:-3]).day)
# print("------------")
# print(row.localhour)
# print(float(pd.to_datetime(row.localhour).hour-6)%24)
# print(float(pd.to_datetime(row.localhour[:-3]).hour))
# # print(pd.to_datetime(row.localhour))
# # print(pd.to_datetime(row.localhour).tz_localize('UTC'))
# # print(pd.to_datetime(row.localhour).tz_localize('UTC').tz_convert('US/Central'))
# # print(pd.to_datetime(row.localhour[:-3]).tz_localize('US/Central'))
# # print(pd.to_datetime(row.localhour)-pd.Timedelta('06:00:00'))
# In[6]:
df = pd.DataFrame(data=datarow, columns=used)
print(df.head())
df.to_csv('datanew/afterfix6.csv')
| 3,801 | 1,377 |
import pandas as pd
import pickle
import json
def extract_uncropped_name(filename):
f = filename.split('/')[-1]
video_source = filename.split('/')[-2]
video_source = video_source.replace('_cropped','')
image_format = f.split('.')[-1]
image_prefix = f.split('c')[0]
new_name = video_source+'_'+image_prefix+'.'+image_format
return new_name
csv_path='CollectedData_Daniel.csv'
all_data = pd.read_csv(csv_path)
for shuffle in [0,1,2]:
docu_path = 'Documentation_data-MultiMouse_95shuffle{}.pickle'.format(shuffle)
f = open(docu_path,'rb')
a = pickle.load(f)
train_indices = a[1]
test_indices = a[2]
data = all_data.iloc[3:,0].to_numpy()
train_data = data[train_indices]
test_data = data[test_indices]
train_data_set = set()
test_data_set = set()
for e in test_data:
test_data_set.add(extract_uncropped_name(e))
for e in train_data:
train_data_set.add(extract_uncropped_name(e))
print ('train dataset')
#print (train_data_set)
print (len(train_data_set))
print ('test dataset')
#print (test_data_set)
print (len(test_data_set))
ret_obj = {}
ret_obj['train_data'] = list(train_data_set)
ret_obj['test_data'] = list(test_data_set)
with open('3mouse_shuffule{}.json'.format(shuffle),'w') as f:
json.dump(ret_obj,f)
| 1,375 | 530 |
__author__ = 'harsh'
'''
Given two words, determine if the first word, or any anagram of it, appears in consecutive characters of the second word.
For instance, tea appears as an anagram in the last three letters of slate, but let does not appear as an anagram in actor
even though all the letters of let appear in slate.
Return the anagram of the first word that has appeared in the second word.
Sample Input 1
tea
slate
Sample Output1
ate
Sample Input 2
let
slate
Sample Output2
NONE
'''
def is_substr_anagram(soFar, rest, superstr):
if len(rest) == 0:
if soFar in superstr:
print soFar + " is present in " + superstr
return soFar
else:
return None
else:
for i, ch in enumerate(list(rest)):
sub_soFar = is_substr_anagram(soFar + ch, rest[:i] + rest[i+1:], superstr)
if sub_soFar:
return sub_soFar
return None
def main():
soFar = is_substr_anagram("", "an", "arpana")
print soFar
main() | 1,072 | 333 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from flask import Flask, request, abort
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import *
import json
from ArticutAPI import Articut
from decimal import Decimal, ROUND_HALF_UP
app = Flask(__name__)
# line bot info
with open("line_bot.json", encoding="utf-8") as f:
linebotDICT = json.loads(f.read())
line_bot_api = LineBotApi(linebotDICT["line_bot_api"])
handler = WebhookHandler(linebotDICT["handler"])
# articut info
with open("account.json", encoding="utf-8") as f:
accountDICT = json.loads(f.read())
articut = Articut(username=accountDICT["username"], apikey=accountDICT["apikey"])
# 代名詞
with open("Dict/pronoun.json", encoding="utf-8") as f:
pronounDICT = json.loads(f.read())
# 絕對性詞彙
with open("Dict/absolution.json", encoding="utf-8") as f:
absolutionDICT = json.loads(f.read())
# 負向詞彙
with open("Dict/negative.json", encoding="utf-8") as f:
negativeDICT = json.loads(f.read())
# 正向詞彙
with open("Dict/positive.json", encoding="utf-8") as f:
positiveDICT = json.loads(f.read())
# 其他代名詞詞彙
with open("Dict/other_pronoun.json", encoding="utf-8") as f:
otherpronounDICT = json.loads(f.read())
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# 忽略的詞性
ignorance = ["FUNC_conjunction", "FUNC_degreeHead", "FUNC_determiner", "FUNC_inner", "FUNC_inter", "FUNC_modifierHead", "FUNC_negation", "ASPECT"]
# 憂鬱指數
index = 0
def wordExtractor(inputLIST, unify=True):
'''
配合 Articut() 的 .getNounStemLIST() 和 .getVerbStemLIST() …等功能,拋棄位置資訊,只抽出詞彙。
'''
resultLIST = []
for i in inputLIST:
if i == []:
pass
else:
for e in i:
resultLIST.append(e[-1])
if unify == True:
return sorted(list(set(resultLIST)))
else:
return sorted(resultLIST)
def MakePronoun(inputLIST, inputDICT):
global index
index = 0
first_person = 0
others = 0
dictLen = 0
for i in inputLIST:
if i in pronounDICT["first"]:
first_person += 1
#else:
#others += 1
inputDICT = inputDICT["result_obj"]
for i in range(len(inputDICT)):
for j in range(len(inputDICT[i])):
if inputDICT[i][j]["pos"] not in ignorance:
dictLen += 1
#if inputDICT[i][j]["text"] in otherpronounDICT["others"]:
#others += 1
msg = "[代名詞 使用情況]\n"
msg += ("第一人稱:" + str(first_person) + '\n')
#msg += ("其他人稱:" + str(others) + '\n')
if first_person > 1:
msg += ("第一人稱占比:" + str(Decimal(str((first_person/dictLen)*100)).quantize(Decimal('.00'), ROUND_HALF_UP)) + "%\n")
else:
first_person = 1
msg += ("第一人稱占比:" + str(Decimal(str((first_person/dictLen)*100)).quantize(Decimal('.00'), ROUND_HALF_UP)) + "%\n")
index += Decimal(str((first_person/dictLen)*25)).quantize(Decimal('.00'), ROUND_HALF_UP)
return msg
def MakeAbsolution(inputDICT):
global index
absolute = 0
dictLen = 0
inputDICT = inputDICT["result_obj"]
for i in range(len(inputDICT)):
for j in range(len(inputDICT[i])):
if inputDICT[i][j]["pos"] not in ignorance:
dictLen += 1
if inputDICT[i][j]["text"] in absolutionDICT["absolution"]:
absolute += 1
msg = "\n[絕對性詞彙 使用情況]\n"
msg += ("絕對性詞彙:" + str(absolute) + '\n')
msg += ("絕對性詞彙占比:" + str(Decimal(str((absolute/dictLen)*100)).quantize(Decimal('.00'), ROUND_HALF_UP))+ "%\n")
index += Decimal(str((absolute/dictLen)*54)).quantize(Decimal('.00'), ROUND_HALF_UP)
return msg
def MakeDepression(inputDICT):
global index
depress = 0
encourage = 0
dictLen = 0
inputDICT = inputDICT["result_obj"]
for i in range(len(inputDICT)):
for j in range(len(inputDICT[i])):
if inputDICT[i][j]["pos"] not in ignorance:
dictLen += 1
if inputDICT[i][j]["text"] in negativeDICT["negative"]:
depress += 1
elif inputDICT[i][j]["text"] in negativeDICT["death"]:
depress += 2
elif inputDICT[i][j]["text"] in negativeDICT["medicine"]:
depress += 2
elif inputDICT[i][j]["text"] in negativeDICT["disease"]:
depress += 2
#elif inputDICT[i][j]["text"] in positiveDICT["positive"]:
#encourage += 1
msg = "\n[負向詞彙 使用情況]\n"
msg += ("負向詞彙:" + str(depress) + '\n')
#msg += ("正向詞彙:" + str(encourage) + '\n')
msg += ("負向詞彙占比:" + str(Decimal(str((depress/dictLen)*100)).quantize(Decimal('.00'), ROUND_HALF_UP))+ "%\n")
#msg += ("正向詞彙占比:" + str(Decimal(str((encourage/dictLen)*100)).quantize(Decimal('.00'), ROUND_HALF_UP))+ "%")
index += Decimal(str((depress/dictLen)*21)).quantize(Decimal('.00'), ROUND_HALF_UP)
return msg
def MakeIndex():
global index
msg = "\n[憂鬱文本分析]\n"
msg += ("憂鬱指數:" + str(index) + '\n')
msg += ("提醒您:此工具的用途為分析有潛在憂鬱傾向的文本。若您的文本之憂鬱指數高於5.5,代表此文本與其他憂鬱文本的相似度較高。")
return msg
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
inputSTR = event.message.text
# input userDefinedDict
mixedDICT = {**absolutionDICT, **negativeDICT, **positiveDICT, **otherpronounDICT}
with open("mixedDICT.json", mode="w", encoding="utf-8") as f:
json.dump(mixedDICT, f, ensure_ascii=False)
# parse with userDefinedDict
inputDICT = articut.parse(inputSTR, userDefinedDictFILE="./mixedDICT.json")
inputLIST = articut.getPersonLIST(inputDICT)
inputLIST = wordExtractor(inputLIST, unify=False)
PronounMsg = MakePronoun(inputLIST, inputDICT)
AbsolutionMsg = MakeAbsolution(inputDICT)
DepressionMsg = MakeDepression(inputDICT)
IndexMsg = MakeIndex()
ResultMsg = PronounMsg + AbsolutionMsg + DepressionMsg + IndexMsg
SendMsg=[TextSendMessage(text=ResultMsg)]
line_bot_api.reply_message(event.reply_token, SendMsg)
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 6,766 | 2,794 |
# Generated by Django 3.1.4 on 2021-01-08 02:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('description', models.CharField(blank=True, max_length=60)),
('picture_link', models.CharField(blank=True, max_length=1000)),
],
),
migrations.CreateModel(
name='Challenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('city', models.CharField(default='Unknown', max_length=60)),
('organizer', models.CharField(default='Anonymous', max_length=60)),
('duration', models.PositiveIntegerField(default=30)),
('reward', models.PositiveIntegerField(default=100)),
],
),
migrations.CreateModel(
name='Charity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('picture_link', models.CharField(blank=True, max_length=1000, null=True)),
('amount', models.PositiveIntegerField(default=5)),
('cost', models.PositiveIntegerField(default=1000)),
],
),
migrations.CreateModel(
name='Prize',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('picture_link', models.CharField(blank=True, max_length=1000, null=True)),
('amount', models.PositiveIntegerField(default=5)),
('cost', models.PositiveIntegerField(default=1000)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Guest', max_length=60)),
('deeds', models.PositiveIntegerField(default=0)),
('score', models.PositiveIntegerField(default=0)),
('rank', models.PositiveIntegerField(default=2109)),
('badges', models.ManyToManyField(to='core.Badge')),
('friends', models.ManyToManyField(related_name='_user_friends_+', to='core.User')),
('goals', models.ManyToManyField(to='core.Challenge')),
],
),
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('description', models.CharField(blank=True, max_length=500, null=True)),
('challenge', models.ManyToManyField(to='core.Challenge')),
('charity', models.ManyToManyField(to='core.Charity')),
],
),
]
| 3,505 | 969 |
d = int(input('Digite a distância percorrida: Km '))
p = float
if d <= 200:
p = d * 0.50
print('Total a pagar pelos {}Km rodados é R${:.2f}'.format(d, p))
else:
p = d * 0.45
print('Total a pagar pelos {}Km rodados é R${:.2f}'.format(d, p))
| 256 | 118 |
import pytest
import os
import sys
from polyglotdb.io.types.parsing import (SegmentTier, OrthographyTier,
GroupingTier, TextOrthographyTier,
TranscriptionTier,
TextTranscriptionTier, TextMorphemeTier,
MorphemeTier)
from polyglotdb.io.parsers.base import BaseParser
from polyglotdb.io import (inspect_textgrid, inspect_fave, inspect_mfa, inspect_partitur)
from polyglotdb.corpus import CorpusContext
from polyglotdb.structure import Hierarchy
from polyglotdb.config import CorpusConfig
def pytest_addoption(parser):
parser.addoption("--skipacoustics", action="store_true",
help="skip acoustic tests")
@pytest.fixture(scope='session')
def test_dir():
base = os.path.dirname(os.path.abspath(__file__))
generated = os.path.join(base, 'data', 'generated')
if not os.path.exists(generated):
os.makedirs(generated)
return os.path.join(base, 'data') # was tests/data
@pytest.fixture(scope='session')
def buckeye_test_dir(test_dir):
return os.path.join(test_dir, 'buckeye')
@pytest.fixture(scope='session')
def results_test_dir(test_dir):
results = os.path.join(test_dir, 'generated', 'results')
os.makedirs(results, exist_ok=True)
return results
@pytest.fixture(scope='session')
def timit_test_dir(test_dir):
return os.path.join(test_dir, 'timit')
@pytest.fixture(scope='session')
def textgrid_test_dir(test_dir):
return os.path.join(test_dir, 'textgrids')
@pytest.fixture(scope='session')
def praatscript_test_dir(test_dir):
return os.path.join(test_dir, 'praat_scripts')
@pytest.fixture(scope='session')
def praatscript_test_dir(test_dir):
return os.path.join(test_dir, 'praat_scripts')
@pytest.fixture(scope='session')
def fave_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'fave')
@pytest.fixture(scope='session')
def mfa_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'mfa')
@pytest.fixture(scope='session')
def maus_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'maus')
@pytest.fixture(scope='session')
def labbcat_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'labbcat')
@pytest.fixture(scope='session')
def partitur_test_dir(test_dir):
return os.path.join(test_dir, 'partitur')
@pytest.fixture(scope='session')
def text_transcription_test_dir(test_dir):
return os.path.join(test_dir, 'text_transcription')
@pytest.fixture(scope='session')
def text_spelling_test_dir(test_dir):
return os.path.join(test_dir, 'text_spelling')
@pytest.fixture(scope='session')
def ilg_test_dir(test_dir):
return os.path.join(test_dir, 'ilg')
@pytest.fixture(scope='session')
def csv_test_dir(test_dir):
return os.path.join(test_dir, 'csv')
@pytest.fixture(scope='session')
def features_test_dir(test_dir):
return os.path.join(test_dir, 'features')
@pytest.fixture(scope='session')
def export_test_dir(test_dir):
path = os.path.join(test_dir, 'export')
if not os.path.exists(path):
os.makedirs(path)
return path
@pytest.fixture(scope='session')
def corpus_data_timed():
levels = [SegmentTier('label', 'phone'),
OrthographyTier('label', 'word'),
GroupingTier('line', 'line')]
phones = [('k', 0.0, 0.1), ('ae', 0.1, 0.2), ('t', 0.2, 0.3), ('s', 0.3, 0.4),
('aa', 0.5, 0.6), ('r', 0.6, 0.7),
('k', 0.8, 0.9), ('uw', 0.9, 1.0), ('t', 1.0, 1.1),
('d', 2.0, 2.1), ('aa', 2.1, 2.2), ('g', 2.2, 2.3), ('z', 2.3, 2.4),
('aa', 2.4, 2.5), ('r', 2.5, 2.6),
('t', 2.6, 2.7), ('uw', 2.7, 2.8),
('ay', 3.0, 3.1),
('g', 3.3, 3.4), ('eh', 3.4, 3.5), ('s', 3.5, 3.6)]
words = [('cats', 0.0, 0.4), ('are', 0.5, 0.7), ('cute', 0.8, 1.1),
('dogs', 2.0, 2.4), ('are', 2.4, 2.6), ('too', 2.6, 2.8),
('i', 3.0, 3.1), ('guess', 3.3, 3.6)]
lines = [(0.0, 1.1), (2.0, 2.8), (3.0, 3.6)]
levels[0].add(phones)
levels[1].add(words)
levels[2].add(lines)
hierarchy = Hierarchy({'phone': 'word', 'word': 'line', 'line': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_timed')
return data
@pytest.fixture(scope='session')
def subannotation_data():
levels = [SegmentTier('label', 'phone'),
OrthographyTier('label', 'word'),
OrthographyTier('stop_information', 'phone')]
levels[2].subannotation = True
phones = [('k', 0.0, 0.1), ('ae', 0.1, 0.2), ('t', 0.2, 0.3), ('s', 0.3, 0.4),
('aa', 0.5, 0.6), ('r', 0.6, 0.7),
('k', 0.8, 0.9), ('u', 0.9, 1.0), ('t', 1.0, 1.1),
('d', 2.0, 2.1), ('aa', 2.1, 2.2), ('g', 2.2, 2.3), ('z', 2.3, 2.4),
('aa', 2.4, 2.5), ('r', 2.5, 2.6),
('t', 2.6, 2.7), ('uw', 2.7, 2.8),
('ay', 3.0, 3.1),
('g', 3.3, 3.4), ('eh', 3.4, 3.5), ('s', 3.5, 3.6)]
words = [('cats', 0.0, 0.4), ('are', 0.5, 0.7), ('cute', 0.8, 1.1),
('dogs', 2.0, 2.4), ('are', 2.4, 2.6), ('too', 2.6, 2.8),
('i', 3.0, 3.1), ('guess', 3.3, 3.6)]
info = [('burst', 0, 0.05), ('vot', 0.05, 0.1), ('closure', 0.2, 0.25),
('burst', 0.25, 0.26), ('vot', 0.26, 0.3), ('closure', 2.2, 2.25),
('burst', 2.25, 2.26), ('vot', 2.26, 2.3),
('voicing_during_closure', 2.2, 2.23), ('voicing_during_closure', 2.24, 2.25)]
levels[0].add(phones)
levels[1].add(words)
levels[2].add(info)
hierarchy = Hierarchy({'phone': 'word', 'word': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_sub')
return data
@pytest.fixture(scope='session')
def corpus_data_onespeaker(corpus_data_timed):
for k in corpus_data_timed.data.keys():
corpus_data_timed.data[k].speaker = 'some_speaker'
return corpus_data_timed
@pytest.fixture(scope='session')
def corpus_data_untimed():
levels = [TextTranscriptionTier('transcription', 'word'),
TextOrthographyTier('spelling', 'word'),
TextMorphemeTier('morpheme', 'word'),
GroupingTier('line', 'line')]
transcriptions = [('k.ae.t-s', 0), ('aa.r', 1), ('k.y.uw.t', 2),
('d.aa.g-z', 3), ('aa.r', 4), ('t.uw', 5),
('ay', 6), ('g.eh.s', 7)]
morphemes = [('cat-PL', 0), ('are', 1), ('cute', 2),
('dog-PL', 3), ('are', 4), ('too', 5),
('i', 6), ('guess', 7)]
words = [('cats', 0), ('are', 1), ('cute', 2),
('dogs', 3), ('are', 4), ('too', 5),
('i', 6), ('guess', 7)]
lines = [(0, 2), (3, 5), (6, 7)]
levels[0].add(transcriptions)
levels[1].add(words)
levels[2].add(morphemes)
levels[3].add(lines)
hierarchy = Hierarchy({'word': 'line', 'line': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_untimed')
return data
@pytest.fixture(scope='session')
def corpus_data_ur_sr():
levels = [SegmentTier('sr', 'phone'),
OrthographyTier('word', 'word'),
TranscriptionTier('ur', 'word')]
srs = [('k', 0.0, 0.1), ('ae', 0.1, 0.2), ('s', 0.2, 0.4),
('aa', 0.5, 0.6), ('r', 0.6, 0.7),
('k', 0.8, 0.9), ('u', 0.9, 1.1),
('d', 2.0, 2.1), ('aa', 2.1, 2.2), ('g', 2.2, 2.25),
('ah', 2.25, 2.3), ('z', 2.3, 2.4),
('aa', 2.4, 2.5), ('r', 2.5, 2.6),
('t', 2.6, 2.7), ('uw', 2.7, 2.8),
('ay', 3.0, 3.1),
('g', 3.3, 3.4), ('eh', 3.4, 3.5), ('s', 3.5, 3.6)]
words = [('cats', 0.0, 0.4), ('are', 0.5, 0.7), ('cute', 0.8, 1.1),
('dogs', 2.0, 2.4), ('are', 2.4, 2.6), ('too', 2.6, 2.8),
('i', 3.0, 3.1), ('guess', 3.3, 3.6)]
urs = [('k.ae.t.s', 0.0, 0.4), ('aa.r', 0.5, 0.7), ('k.y.uw.t', 0.8, 1.1),
('d.aa.g.z', 2.0, 2.4), ('aa.r', 2.4, 2.6), ('t.uw', .6, 2.8),
('ay', 3.0, 3.1), ('g.eh.s', 3.3, 3.6)]
levels[0].add(srs)
levels[1].add(words)
levels[2].add(urs)
hierarchy = Hierarchy({'phone': 'word', 'word': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_ursr')
return data
@pytest.fixture(scope='session')
def lexicon_data():
corpus_data = [{'spelling': 'atema', 'transcription': ['ɑ', 't', 'e', 'm', 'ɑ'], 'frequency': 11.0},
{'spelling': 'enuta', 'transcription': ['e', 'n', 'u', 't', 'ɑ'], 'frequency': 11.0},
{'spelling': 'mashomisi', 'transcription': ['m', 'ɑ', 'ʃ', 'o', 'm', 'i', 's', 'i'],
'frequency': 5.0},
{'spelling': 'mata', 'transcription': ['m', 'ɑ', 't', 'ɑ'], 'frequency': 2.0},
{'spelling': 'nata', 'transcription': ['n', 'ɑ', 't', 'ɑ'], 'frequency': 2.0},
{'spelling': 'sasi', 'transcription': ['s', 'ɑ', 's', 'i'], 'frequency': 139.0},
{'spelling': 'shashi', 'transcription': ['ʃ', 'ɑ', 'ʃ', 'i'], 'frequency': 43.0},
{'spelling': 'shisata', 'transcription': ['ʃ', 'i', 's', 'ɑ', 't', 'ɑ'], 'frequency': 3.0},
{'spelling': 'shushoma', 'transcription': ['ʃ', 'u', 'ʃ', 'o', 'm', 'ɑ'], 'frequency': 126.0},
{'spelling': 'ta', 'transcription': ['t', 'ɑ'], 'frequency': 67.0},
{'spelling': 'tatomi', 'transcription': ['t', 'ɑ', 't', 'o', 'm', 'i'], 'frequency': 7.0},
{'spelling': 'tishenishu', 'transcription': ['t', 'i', 'ʃ', 'e', 'n', 'i', 'ʃ', 'u'],
'frequency': 96.0},
{'spelling': 'toni', 'transcription': ['t', 'o', 'n', 'i'], 'frequency': 33.0},
{'spelling': 'tusa', 'transcription': ['t', 'u', 's', 'ɑ'], 'frequency': 32.0},
{'spelling': 'ʃi', 'transcription': ['ʃ', 'i'], 'frequency': 2.0}]
return corpus_data
@pytest.fixture(scope='session')
def corpus_data_syllable_morpheme_srur():
levels = [SegmentTier('sr', 'phone', label=True),
TranscriptionTier('ur', 'word'),
GroupingTier('syllable', 'syllable'),
MorphemeTier('morphemes', 'word'),
OrthographyTier('word', 'word'),
GroupingTier('line', 'line')]
srs = [('b', 0, 0.1), ('aa', 0.1, 0.2), ('k', 0.2, 0.3), ('s', 0.3, 0.4),
('ah', 0.4, 0.5), ('s', 0.5, 0.6),
('er', 0.7, 0.8),
('f', 0.9, 1.0), ('er', 1.0, 1.1),
('p', 1.2, 1.3), ('ae', 1.3, 1.4), ('k', 1.4, 1.5), ('eng', 1.5, 1.6)]
urs = [('b.aa.k.s-ah.z', 0, 0.6), ('aa.r', 0.7, 0.8),
('f.ao.r', 0.9, 1.1), ('p.ae.k-ih.ng', 1.2, 1.6)]
syllables = [(0, 0.3), (0.3, 0.6), (0.7, 0.8), (0.9, 1.1),
(1.2, 1.5), (1.5, 1.6)]
morphemes = [('box-PL', 0, 0.6), ('are', 0.7, 0.8),
('for', 0.9, 1.1), ('pack-PROG', 1.2, 1.6)]
words = [('boxes', 0, 0.6), ('are', 0.7, 0.8),
('for', 0.9, 1.1), ('packing', 1.2, 1.6)]
lines = [(0, 1.6)]
levels[0].add(srs)
levels[1].add(urs)
levels[2].add(syllables)
levels[3].add(morphemes)
levels[4].add(words)
levels[5].add(lines)
hierarchy = Hierarchy({'phone': 'syllable', 'syllable': 'word',
'word': 'line', 'line': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_syllable_morpheme')
return data
@pytest.fixture(scope='session')
def graph_db():
config = {'graph_http_port': 7474, 'graph_bolt_port': 7687,
'acoustic_http_port': 8086}
config['host'] = 'localhost'
return config
@pytest.fixture(scope='session')
def untimed_config(graph_db, corpus_data_untimed):
config = CorpusConfig('untimed', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_untimed.types('untimed'))
c.initialize_import(corpus_data_untimed.speakers,
corpus_data_untimed.token_headers,
corpus_data_untimed.hierarchy.subannotations)
c.add_discourse(corpus_data_untimed)
c.finalize_import(corpus_data_untimed)
return config
@pytest.fixture(scope='session')
def timed_config(graph_db, corpus_data_timed):
config = CorpusConfig('timed', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_timed.types('timed'))
c.initialize_import(corpus_data_timed.speakers,
corpus_data_timed.token_headers,
corpus_data_timed.hierarchy.subannotations)
c.add_discourse(corpus_data_timed)
c.finalize_import(corpus_data_timed)
return config
@pytest.fixture(scope='session')
def syllable_morpheme_config(graph_db, corpus_data_syllable_morpheme_srur):
config = CorpusConfig('syllable_morpheme', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_syllable_morpheme_srur.types('syllable_morpheme'))
c.initialize_import(corpus_data_syllable_morpheme_srur.speakers,
corpus_data_syllable_morpheme_srur.token_headers,
corpus_data_syllable_morpheme_srur.hierarchy.subannotations)
c.add_discourse(corpus_data_syllable_morpheme_srur)
c.finalize_import(corpus_data_syllable_morpheme_srur)
return config
@pytest.fixture(scope='session')
def ursr_config(graph_db, corpus_data_ur_sr):
config = CorpusConfig('ur_sr', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_ur_sr.types('ur_sr'))
c.initialize_import(corpus_data_ur_sr.speakers,
corpus_data_ur_sr.token_headers,
corpus_data_ur_sr.hierarchy.subannotations)
c.add_discourse(corpus_data_ur_sr)
c.finalize_import(corpus_data_ur_sr)
return config
@pytest.fixture(scope='session')
def subannotation_config(graph_db, subannotation_data):
config = CorpusConfig('subannotations', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*subannotation_data.types('subannotations'))
c.initialize_import(subannotation_data.speakers,
subannotation_data.token_headers,
subannotation_data.hierarchy.subannotations)
c.add_discourse(subannotation_data)
c.finalize_import(subannotation_data)
return config
@pytest.fixture(scope='session')
def lexicon_test_data():
data = {'cats': {'POS': 'NNS'}, 'are': {'POS': 'VB'}, 'cute': {'POS': 'JJ'},
'dogs': {'POS': 'NNS'}, 'too': {'POS': 'IN'}, 'i': {'POS': 'PRP'},
'guess': {'POS': 'VB'}}
return data
@pytest.fixture(scope='session')
def acoustic_config(graph_db, textgrid_test_dir):
config = CorpusConfig('acoustic', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(acoustic_path)
c.load(parser, acoustic_path)
config.pitch_algorithm = 'acousticsim'
config.formant_source = 'acousticsim'
return config
@pytest.fixture(scope='session')
def acoustic_syllabics():
return ['ae', 'aa', 'uw', 'ay', 'eh', 'ih', 'aw', 'ey', 'iy',
'uh', 'ah', 'ao', 'er', 'ow']
@pytest.fixture(scope='session')
def acoustic_utt_config(graph_db, textgrid_test_dir, acoustic_syllabics):
config = CorpusConfig('acoustic_utt', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(acoustic_path)
c.load(parser, acoustic_path)
c.encode_pauses(['sil'])
c.encode_utterances(min_pause_length=0)
c.encode_syllabic_segments(acoustic_syllabics)
c.encode_syllables()
config.pitch_algorithm = 'acousticsim'
config.formant_source = 'acousticsim'
return config
@pytest.fixture(scope='session')
def overlapped_config(graph_db, textgrid_test_dir, acoustic_syllabics):
config = CorpusConfig('overlapped', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'overlapped_speech')
with CorpusContext(config) as c:
c.reset()
parser = inspect_mfa(acoustic_path)
c.load(parser, acoustic_path)
c.encode_pauses(['sil'])
c.encode_utterances(min_pause_length=0)
c.encode_syllabic_segments(acoustic_syllabics)
c.encode_syllables()
config.pitch_algorithm = 'acousticsim'
config.formant_source = 'acousticsim'
return config
@pytest.fixture(scope='session')
def french_config(graph_db, textgrid_test_dir):
config = CorpusConfig('french', **graph_db)
french_path = os.path.join(textgrid_test_dir, 'FR001_5.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(french_path)
c.load(parser, french_path)
c.encode_pauses(['sil', '<SIL>'])
c.encode_utterances(min_pause_length=.15)
return config
@pytest.fixture(scope='session')
def fave_corpus_config(graph_db, fave_test_dir):
config = CorpusConfig('fave_test_corpus', **graph_db)
with CorpusContext(config) as c:
c.reset()
parser = inspect_fave(fave_test_dir)
c.load(parser, fave_test_dir)
return config
@pytest.fixture(scope='session')
def summarized_config(graph_db, textgrid_test_dir):
config = CorpusConfig('summarized', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(acoustic_path)
c.load(parser, acoustic_path)
return config
@pytest.fixture(scope='session')
def stressed_config(graph_db, textgrid_test_dir):
config = CorpusConfig('stressed', **graph_db)
stressed_path = os.path.join(textgrid_test_dir, 'stressed_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_mfa(stressed_path)
c.load(parser, stressed_path)
return config
@pytest.fixture(scope='session')
def partitur_corpus_config(graph_db, partitur_test_dir):
config = CorpusConfig('partitur', **graph_db)
partitur_path = os.path.join(partitur_test_dir, 'partitur_test.par,2')
with CorpusContext(config) as c:
c.reset()
parser = inspect_partitur(partitur_path)
c.load(parser, partitur_path)
return config
@pytest.fixture(scope='session')
def praat_path():
if sys.platform == 'win32':
return 'praatcon.exe'
elif os.environ.get('TRAVIS', False):
return os.path.join(os.environ.get('HOME'), 'tools', 'praat')
else:
return 'praat'
@pytest.fixture(scope='session')
def reaper_path():
if os.environ.get('TRAVIS', False):
return os.path.join(os.environ.get('HOME'), 'tools', 'reaper')
else:
return 'reaper'
@pytest.fixture(scope='session')
def vot_classifier_path(test_dir):
return os.path.join(test_dir, 'classifier', 'sotc_classifiers', 'sotc_voiceless.classifier')
@pytest.fixture(scope='session')
def localhost():
return 'http://localhost:8080'
@pytest.fixture(scope='session')
def stress_pattern_file(test_dir):
return os.path.join(test_dir, 'lexicons', 'stress_pattern_lex.txt')
@pytest.fixture(scope='session')
def timed_lexicon_enrich_file(test_dir):
return os.path.join(test_dir, 'csv', 'timed_enrichment.txt')
@pytest.fixture(scope='session')
def acoustic_speaker_enrich_file(test_dir):
return os.path.join(test_dir, 'csv', 'acoustic_speaker_enrichment.txt')
@pytest.fixture(scope='session')
def acoustic_discourse_enrich_file(test_dir):
return os.path.join(test_dir, 'csv', 'acoustic_discourse_enrichment.txt')
@pytest.fixture(scope='session')
def acoustic_inventory_enrich_file(test_dir):
return os.path.join(test_dir, 'features', 'basic.txt') | 20,236 | 8,132 |
#:: 0-.| .-| .|||-| ::-| .||-| ||
print "".join(("".join((y for y in x)) for x in
( (chr (sum( (x if y != 0 else x*20 for y, x in
enumerate ( len(x) if x != "0" else 0 for x in
(x.replace('|',':.:').replace(':','..')for x in
y))))) for y in x) for x in((x.split() for x in
y)for y in (x.split('-') for x in(x[1:] for x in
file(__file__, 'r') if x[0] == '#'))))))
#. :||-| ::||-| :|||-| .:|-| .-.| :-. .:||
| 408 | 199 |
import re
import sys
from pydantic import ValidationError
from bin.contentctl_project.contentctl_core.application.builder.investigation_builder import InvestigationBuilder
from bin.contentctl_project.contentctl_core.domain.entities.investigation import Investigation
from bin.contentctl_project.contentctl_infrastructure.builder.yml_reader import YmlReader
from bin.contentctl_project.contentctl_core.domain.entities.enums.enums import SecurityContentType
class SecurityContentInvestigationBuilder(InvestigationBuilder):
investigation: Investigation
def setObject(self, path: str) -> None:
yml_dict = YmlReader.load_file(path)
try:
self.investigation = Investigation.parse_obj(yml_dict)
except ValidationError as e:
print('Validation Error for file ' + path)
print(e)
sys.exit(1)
def reset(self) -> None:
self.investigation = None
def getObject(self) -> Investigation:
return self.investigation
def addInputs(self) -> None:
pattern = r"\$([^\s.]*)\$"
inputs = []
for input in re.findall(pattern, self.investigation.search):
inputs.append(input)
self.investigation.inputs = inputs
def addLowercaseName(self) -> None:
self.investigation.lowercase_name = self.investigation.name.replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower().replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower() | 1,512 | 437 |
import numpy as np
from unittest import TestCase
from PIL import Image
from d2vs.ocr import OCR
class OCRTestCases(TestCase):
def setUp(self):
self.ocr = OCR()
def _img_to_np(self, path):
img = Image.open(path)
return np.asarray(img, dtype='uint8')
def _check_scan(self, path, expected_text, expected_item_type=None):
readings = self.ocr.read(self._img_to_np(path))
assert len(readings) == 1
_, text, item_type = readings[0]
assert text == expected_text
if item_type:
assert item_type == expected_item_type
| 626 | 222 |
import awacs
import awacs.aws
import awacs.awslambda
import awacs.codecommit
import awacs.ec2
import awacs.iam
import awacs.logs
import awacs.s3
import awacs.sts
import awacs.kms
import troposphere
from troposphere import codepipeline, Ref, iam
from troposphere.s3 import Bucket, VersioningConfiguration
import cumulus.steps.dev_tools
from cumulus.chain import step
class Pipeline(step.Step):
def __init__(self,
name,
bucket_name,
pipeline_service_role_arn=None,
create_bucket=True,
pipeline_policies=None,
bucket_policy_statements=None,
bucket_kms_key_arn=None,
):
"""
:type pipeline_service_role_arn: basestring Override the pipeline service role. If you pass this
the pipeline_policies is not used.
:type create_bucket: bool if False, will not create the bucket. Will attach policies either way.
:type bucket_name: the name of the bucket that will be created suffixed with the chaincontext instance name
:type bucket_policy_statements: [awacs.aws.Statement]
:type pipeline_policies: [troposphere.iam.Policy]
:type bucket_kms_key_arn: ARN used to decrypt the pipeline artifacts
"""
step.Step.__init__(self)
self.name = name
self.bucket_name = bucket_name
self.create_bucket = create_bucket
self.pipeline_service_role_arn = pipeline_service_role_arn
self.bucket_policy_statements = bucket_policy_statements
self.pipeline_policies = pipeline_policies or []
self.bucket_kms_key_arn = bucket_kms_key_arn
def handle(self, chain_context):
"""
This step adds in the shell of a pipeline.
* s3 bucket
* policies for the bucket and pipeline
* your next step in the chain MUST be a source stage
:param chain_context:
:return:
"""
if self.create_bucket:
pipeline_bucket = Bucket(
"PipelineBucket%s" % self.name,
BucketName=self.bucket_name,
VersioningConfiguration=VersioningConfiguration(
Status="Enabled"
)
)
chain_context.template.add_resource(pipeline_bucket)
default_bucket_policies = self.get_default_bucket_policy_statements(self.bucket_name)
if self.bucket_policy_statements:
bucket_access_policy = self.get_bucket_policy(
pipeline_bucket=self.bucket_name,
bucket_policy_statements=self.bucket_policy_statements,
)
chain_context.template.add_resource(bucket_access_policy)
pipeline_bucket_access_policy = iam.ManagedPolicy(
"PipelineBucketAccessPolicy",
Path='/managed/',
PolicyDocument=awacs.aws.PolicyDocument(
Version="2012-10-17",
Id="bucket-access-policy%s" % chain_context.instance_name,
Statement=default_bucket_policies
)
)
chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_NAME] = self.bucket_name
chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref(
pipeline_bucket_access_policy)
default_pipeline_role = self.get_default_pipeline_role()
pipeline_service_role_arn = self.pipeline_service_role_arn or troposphere.GetAtt(default_pipeline_role, "Arn")
generic_pipeline = codepipeline.Pipeline(
"Pipeline",
RoleArn=pipeline_service_role_arn,
Stages=[],
ArtifactStore=codepipeline.ArtifactStore(
Type="S3",
Location=self.bucket_name,
)
)
if self.bucket_kms_key_arn:
encryption_config = codepipeline.EncryptionKey(
"ArtifactBucketKmsKey",
Id=self.bucket_kms_key_arn,
Type='KMS',
)
generic_pipeline.ArtifactStore.EncryptionKey = encryption_config
pipeline_output = troposphere.Output(
"PipelineName",
Description="Code Pipeline",
Value=Ref(generic_pipeline),
)
pipeline_bucket_output = troposphere.Output(
"PipelineBucket",
Description="Name of the input artifact bucket for the pipeline",
Value=self.bucket_name,
)
if not self.pipeline_service_role_arn:
chain_context.template.add_resource(default_pipeline_role)
chain_context.template.add_resource(pipeline_bucket_access_policy)
chain_context.template.add_resource(generic_pipeline)
chain_context.template.add_output(pipeline_output)
chain_context.template.add_output(pipeline_bucket_output)
def get_default_pipeline_role(self):
# TODO: this can be cleaned up by using a policytype and passing in the pipeline role it should add itself to.
pipeline_policy = iam.Policy(
PolicyName="%sPolicy" % self.name,
PolicyDocument=awacs.aws.PolicyDocument(
Version="2012-10-17",
Id="PipelinePolicy",
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
# TODO: actions here could be limited more
Action=[awacs.aws.Action("s3", "*")],
Resource=[
troposphere.Join('', [
awacs.s3.ARN(),
self.bucket_name,
"/*"
]),
troposphere.Join('', [
awacs.s3.ARN(),
self.bucket_name,
]),
],
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[awacs.aws.Action("kms", "*")],
Resource=['*'],
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.aws.Action("cloudformation", "*"),
awacs.aws.Action("codebuild", "*"),
],
# TODO: restrict more accurately
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.codecommit.GetBranch,
awacs.codecommit.GetCommit,
awacs.codecommit.UploadArchive,
awacs.codecommit.GetUploadArchiveStatus,
awacs.codecommit.CancelUploadArchive
],
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.iam.PassRole
],
Resource=["*"]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.aws.Action("lambda", "*")
],
Resource=["*"]
),
],
)
)
pipeline_service_role = iam.Role(
"PipelineServiceRole",
Path="/",
AssumeRolePolicyDocument=awacs.aws.Policy(
Statement=[
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[awacs.sts.AssumeRole],
Principal=awacs.aws.Principal(
'Service',
"codepipeline.amazonaws.com"
)
)]
),
Policies=[pipeline_policy] + self.pipeline_policies
)
return pipeline_service_role
def get_default_bucket_policy_statements(self, pipeline_bucket):
bucket_policy_statements = [
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.s3.ListBucket,
awacs.s3.GetBucketVersioning,
],
Resource=[
troposphere.Join('', [
awacs.s3.ARN(),
pipeline_bucket,
]),
],
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.s3.HeadBucket,
],
Resource=[
'*'
]
),
awacs.aws.Statement(
Effect=awacs.aws.Allow,
Action=[
awacs.s3.GetObject,
awacs.s3.GetObjectVersion,
awacs.s3.PutObject,
awacs.s3.ListObjects,
awacs.s3.ListBucketMultipartUploads,
awacs.s3.AbortMultipartUpload,
awacs.s3.ListMultipartUploadParts,
awacs.aws.Action("s3", "Get*"),
],
Resource=[
troposphere.Join('', [
awacs.s3.ARN(),
pipeline_bucket,
'/*'
]),
],
)
]
return bucket_policy_statements
def get_bucket_policy(self, pipeline_bucket, bucket_policy_statements):
policy = troposphere.s3.BucketPolicy(
"PipelineBucketPolicy",
Bucket=pipeline_bucket,
PolicyDocument=awacs.aws.Policy(
Statement=bucket_policy_statements,
),
)
return policy
| 10,285 | 2,629 |
"""
GrooveWalrus: Flash Player
Copyright (C) 2009, 2010
11y3y3y3y43@gmail.com
http://groove-walrus.turnip-town.net
-----
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
"""
#import urllib
#import urllib2
import wx
import wx.xrc as xrc
import os
from main_utils.read_write_xml import xml_utils
from main_utils import system_files
from main_utils import jiwa
#FLASH_ENABLED = True
try:
from main_utils import player_flash
except Exception, expt:
print "Flash plugin: "+ str(Exception) + str(expt)
#FLASH_ENABLED = False
#from wx.lib.flashwin import FlashWindow
#SYSLOC = os.path.abspath(os.path.dirname(sys.argv[0]))
#DIZZLER_SETTINGS = os.path.join(os.getcwd(), 'plugins','flash') + os.sep + "settings_flash.xml"
#DIZZLER = os.path.join(os.getcwd(), 'plugins','flash') + os.sep
DIZZLER_URL = 'http://www.dizzler.com/player/podmini.swf?m='
GROOVESHARK_URL ="http://listen.grooveshark.com/songWidget.swf?hostname=cowbell.grooveshark.com&style=metal&p=1&songID="
RESFILE = os.path.join(os.getcwd(), 'plugins','flash') + os.sep + "layout_flash.xml"
#http://www.dizzler.com/player/podmini.swf?m=chairlift-bruises
#http://www.boostermp3.com
#http://www.jiwa.fm
#http://www.jiwa.fm/res/widget/monotitle.swf?trackId=369589&skin=round
#http://www.jiwa.fr/track/search/q=u2%20one&noRestricted=true
class MainPanel(wx.Dialog):
def __init__(self, parent, pathToPlugins=None):
if(not pathToPlugins==None):
RESFILE = os.path.join(pathToPlugins,'flash') + os.sep + "layout_flash.xml"
wx.Dialog.__init__(self, parent, -1, "Flash", size=(475,310), style=wx.FRAME_SHAPED|wx.RESIZE_BORDER) #STAY_ON_TOP)
self.parent = parent
self.FLASH_SETTINGS = system_files.GetDirectories(self).MakeDataDirectory('plugins') + os.sep
# XML Resources can be loaded from a file like this:
res = xrc.XmlResource(RESFILE)
# Now create a panel from the resource data
panel = res.LoadPanel(self, "m_pa_plugin_flash")
# control references --------------------
self.pa_flash_player = xrc.XRCCTRL(self, 'm_pa_flash_player')
#header for dragging and moving
self.st_flash_header = xrc.XRCCTRL(self, 'm_st_flash_header')
self.bm_flash_close = xrc.XRCCTRL(self, 'm_bm_flash_close')
self.bm_flash_tab = xrc.XRCCTRL(self, 'm_bm_flash_tab')
self.cb_flash_autoload = xrc.XRCCTRL(self, 'm_cb_flash_autoload')
self.rx_flash_service = xrc.XRCCTRL(self, 'm_rx_flash_service')
self.bm_flash_tab.Show(False)
# bindings ----------------
self.bm_flash_close.Bind(wx.EVT_LEFT_UP, self.CloseMe)
#self.bm_flash_tab.Bind(wx.EVT_LEFT_UP, self.OnMakeTabClick)
self.Bind(wx.EVT_CHECKBOX, self.SaveOptions, self.cb_flash_autoload)
self.Bind(wx.EVT_RADIOBOX, self.SetService, self.rx_flash_service)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.st_flash_header.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
self.st_flash_header.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.st_flash_header.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
self.st_flash_header.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
#self.st_flash_using.SetLabel('Using: ' + self.parent.web_music_type)
#self.bu_update_restart.Enable(False)
# set layout --------------
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(sizer)
self.SetAutoLayout(True)
#self.LoadSetings()
#*** if FLASH_ENABLED:
try:
self.parent.StopAll()
except Exception, expt:
print "Flash plug-in: " + str(Exception) + str(expt)
#flash windows
try:
self.flash_window = player_flash.Player(self) #.mediaPlayer #FlashWindow(self.pa_flash_player, style=wx.NO_BORDER, size=wx.Size(500,140))#, size=(400, 120))
self.parent.player = self.flash_window
#self.flash.Show(True)
flash_sizer = wx.BoxSizer(wx.VERTICAL)
flash_sizer.Add(self.flash_window.mediaPlayer, 1, wx.EXPAND|wx.ALL, 5)
self.pa_flash_player.SetSizer(flash_sizer)
self.parent.use_web_music = True
self.parent.flash = self.flash_window
self.parent.use_backend = 'flash'
except Exception, expt:
print "Flash plug-in: " + str(Exception) + str(expt)
dlg = wx.MessageDialog(self, "Flash for Internet Explorer must be installed for this plug-in to work.", 'Alert', wx.OK | wx.ICON_WARNING)
if (dlg.ShowModal() == wx.ID_OK):
dlg.Destroy()
##self.parent.web_music_url = DIZZLER_URL
##self.parent.web_music_type = "Dizzler"
##self.MakeModal(False)
self.LoadSettings()
#self.SetService(None)
#set a reciever to catch new song events
self.parent.SetReceiver(self, 'main.playback.load')
# ***else:
# *** dlg = wx.MessageDialog(self, "Flash for Internet Explorer must be installed for this plug-in to work.", 'Alert', wx.OK | wx.ICON_WARNING)
# *** if (dlg.ShowModal() == wx.ID_OK):
# *** dlg.Destroy()
def GenericReceiverAction(self, message):
"""Sets the pubsub receiver action."""
self.GetService(None)
def CloseMe(self, event=None):
self.SaveOptions(None)
self.parent.use_web_music = False
self.parent.OnStopClick(None)
self.parent.SetBackend(None)
self.Destroy()
def OnMakeTabClick(self, event=None):
pass
def OnMakeTabClick2(self, event=None):
# transfer plug-in to tab in main player
# make a new page
page1 = PageOne(self.parent.nb_main)
# add the pages to the notebook
self.parent.nb_main.AddPage(page1, "Flash")
#flash windows
flash_window = FlashWindow(page1, style=wx.NO_BORDER, size=wx.Size(500,140))#, size=(400, 120))
#self.flash.Show(True)
flash_sizer = wx.BoxSizer(wx.VERTICAL)
flash_sizer.Add(flash_window, 1, wx.EXPAND|wx.ALL, 5)
page1.SetSizer(flash_sizer)
self.parent.use_web_music = True
#self.parent.flash = flash_window
self.Destroy()
#def LoadFlashSong(self, artist, song):
#start playback
#self.flash_window.movie = DIZZLER_URL + artist + "-" + song
#def StopFlashSong(self):
#stop playback
#self.flash_window.movie = 'temp.swf'
#def SetDizzler(self, event):
#stop playback
#self.parent.web_music_url = DIZZLER_URL
#self.parent.web_music_type = "Dizzler"
#self.st_flash_using.SetLabel('Using Dizzler')
#def SetGrooveShark(self, event):
#stop playback
#self.parent.web_music_url = DIZZLER_URL
#self.parent.web_music_type = "GrooveShark"
#self.st_flash_using.SetLabel('Using GrooveShark')
def GetService(self, event):
service = self.rx_flash_service.GetSelection()
#print service
if service == 0:
#self.parent.web_music_url =''
self.parent.current_song.song_url = GROOVESHARK_URL + str(self.parent.current_song.song_id)
self.parent.web_music_type = "GrooveShark"
print "GROOVESHARK"
elif service == 1:
#self.parent.web_music_url = DIZZLER_URL
self.parent.current_song.song_url = DIZZLER_URL + self.parent.current_song.artist + "-" + self.parent.current_song.song
self.parent.web_music_type = "Dizzler"
print "DIZZLER"
else:
artist = self.parent.current_song.artist
song = self.parent.current_song.song
self.parent.current_song.song_url = jiwa.JiwaMusic().GetFlashUrlFirstResult(artist, song)
self.parent.web_music_type = "Jiwa"
print "JIWA"
def SetService(self, event):
self.SaveOptions(None)
#MouseClicker(25, 220)
def LoadSettings(self):
#load the setting from settings_falsh.xml if it exists
settings_dict = xml_utils().get_generic_settings(self.FLASH_SETTINGS + "settings_flash.xml")
#print settings_dict
if len(settings_dict) >= 1:
autoload=0
if settings_dict.has_key('autoload'):
autoload = int(settings_dict['autoload'])
self.cb_flash_autoload.SetValue(autoload)
service=0
if settings_dict.has_key('service'):
service = int(settings_dict['service'])
self.rx_flash_service.SetSelection(service)
if settings_dict.has_key('window_position'):
# not good, replace eval
self.SetPosition(eval(settings_dict['window_position']))
if settings_dict.has_key('window_size'):
self.SetSize(eval(settings_dict['window_size']))
def SaveOptions(self, event):
# save value to options.xml
window_dict = {}
window_dict['autoload'] = str(int(self.cb_flash_autoload.GetValue()))
window_dict['service'] = str(int(self.rx_flash_service.GetSelection()))
window_dict['window_position'] = str(self.GetScreenPosition())
window_dict['window_size'] = str(self.GetSize())#[0], self.GetSize()[1]))
xml_utils().save_generic_settings(self.FLASH_SETTINGS, "settings_flash.xml", window_dict)
# ---------------------------------------------------------
# titlebar-like move and drag
def OnMouseLeftDown(self, evt):
self.Refresh()
self.ldPos = evt.GetEventObject().ClientToScreen(evt.GetPosition())
self.wPos = self.ClientToScreen((0,0))
self.CaptureMouse()
def OnMouseMotion(self, evt):
#print evt.GetPosition()
#print self.GetScreenPosition()
if evt.Dragging() and evt.LeftIsDown():
dPos = evt.GetEventObject().ClientToScreen(evt.GetPosition())
#nPos = (self.wPos.x + (dPos.x - self.ldPos.x), -2)
try:
nPos = (self.wPos.x + (dPos.x - self.ldPos.x), self.wPos.y + (dPos.y - self.ldPos.y))
self.Move(nPos)
except Exception, expt:
pass
def OnMouseLeftUp(self, evt):
try:
self.ReleaseMouse()
except wx._core.PyAssertionError:
pass
def OnRightUp(self, evt):
#self.hide_me()
#self..Destroy()
pass
class PageOne(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# ===================================================================
charset = 'utf-8'
def url_quote(s, safe='/', want_unicode=False):
"""
Wrapper around urllib.quote doing the encoding/decoding as usually wanted:
@param s: the string to quote (can be str or unicode, if it is unicode,
config.charset is used to encode it before calling urllib)
@param safe: just passed through to urllib
@param want_unicode: for the less usual case that you want to get back
unicode and not str, set this to True
Default is False.
"""
if isinstance(s, unicode):
s = s.encode(charset)
elif not isinstance(s, str):
s = str(s)
#s = urllib.quote(s, safe)
if want_unicode:
s = s.decode(charset) # ascii would also work
return s
# ===================================================================
#import win32api
#import win32con
#win32api.keybd_event(win32con.VK_F3, 0) # this will press F3 key
#def MouseClicker(position_x, position_y):
# print position_x
# print win32api.GetFocus() # this will return you the handle of the window which has focus
# win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, position_x, position_y, 0, 0) # this will press mouse left button
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 20, 20, 0, 0) # this will raise mouse left button
#win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 20, 20, 0, 0) # this will raise mouse left button
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, position_x, position_y) # this will press mouse left button
#win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, position_x, position_y) # this will press mouse left button
# print "clicky"
| 13,539 | 4,542 |
import logging
import urllib2
logger = logging.getLogger('sitetest')
def reload_url(url, user_agent_string):
request = urllib2.Request(url)
request.add_header('User-agent', user_agent_string)
response = urllib2.urlopen(request)
logger.info("Response: %s: %s" % (response.code, response))
| 308 | 105 |
from unittest.mock import patch, Mock, call
from federation.fetchers import retrieve_remote_profile, retrieve_remote_content
class TestRetrieveRemoteContent:
@patch("federation.fetchers.importlib.import_module")
def test_calls_activitypub_retrieve_and_parse_content(self, mock_import):
mock_retrieve = Mock()
mock_import.return_value = mock_retrieve
retrieve_remote_content("https://example.com/foobar")
mock_retrieve.retrieve_and_parse_content.assert_called_once_with(
id="https://example.com/foobar", guid=None, handle=None, entity_type=None, sender_key_fetcher=None,
)
@patch("federation.fetchers.importlib.import_module")
def test_calls_diaspora_retrieve_and_parse_content(self, mock_import):
mock_retrieve = Mock()
mock_import.return_value = mock_retrieve
retrieve_remote_content("1234", handle="user@example.com", entity_type="post", sender_key_fetcher=sum)
mock_retrieve.retrieve_and_parse_content.assert_called_once_with(
id="1234", guid="1234", handle="user@example.com", entity_type="post", sender_key_fetcher=sum,
)
class TestRetrieveRemoteProfile:
@patch("federation.fetchers.importlib.import_module", autospec=True)
@patch("federation.fetchers.validate_handle", autospec=True, return_value=False)
@patch("federation.fetchers.identify_protocol_by_id", autospec=True, return_value=Mock(PROTOCOL_NAME='activitypub'))
def test_retrieve_remote_profile__url_calls_activitypub_retrieve(self, mock_identify, mock_validate, mock_import):
mock_utils = Mock()
mock_import.return_value = mock_utils
retrieve_remote_profile("https://example.com/foo")
mock_import.assert_called_once_with("federation.utils.activitypub")
mock_utils.retrieve_and_parse_profile.assert_called_once_with("https://example.com/foo")
@patch("federation.fetchers.importlib.import_module", autospec=True)
@patch("federation.fetchers.validate_handle", autospec=True, return_value=True)
@patch("federation.fetchers.identify_protocol_by_id", autospec=True)
def test_retrieve_remote_profile__handle_calls_both_activitypub_and_diaspora_retrieve(
self, mock_identify, mock_validate, mock_import,
):
mock_utils = Mock(retrieve_and_parse_profile=Mock(return_value=None))
mock_import.return_value = mock_utils
retrieve_remote_profile("user@example.com")
calls = [
call("federation.utils.activitypub"),
call("federation.utils.diaspora"),
]
assert mock_import.call_args_list == calls
calls = [
call("user@example.com"),
call("user@example.com"),
]
assert mock_utils.retrieve_and_parse_profile.call_args_list == calls
| 2,813 | 903 |
from django import template
from blog.models import Category
register = template.Library()
@register.inclusion_tag('inc/menu_tpl.html')
def show_menu(menu_class='menu'):
categories = Category.objects.all()
return {'categories': categories, 'menu_class': menu_class}
| 278 | 81 |
from __future__ import division, print_function, absolute_import
import numpy as np
from highway_env import utils
from highway_env.envs.abstract import AbstractEnv
from highway_env.road.lane import LineType, StraightLane, SineLane, LanesConcatenation
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.control import ControlledVehicle, MDPVehicle, CarSim, FreeControl
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.vehicle.dynamics import RedLight
import time
import random
class MergeEnvOut(AbstractEnv):
"""
A highway merge negotiation environment.
The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp.
It is rewarded for maintaining a high velocity and avoiding collisions, but also making room for merging
vehicles.
"""
COLLISION_REWARD = -1
RIGHT_LANE_REWARD = 0.1
HIGH_VELOCITY_REWARD = 0.2
MERGING_VELOCITY_REWARD = -0.5
LANE_CHANGE_REWARD = -0.05
DEFAULT_CONFIG = {"other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle",
"incoming_vehicle_destination": None,
"other_vehicles_destination": None}
def __init__(self):
super(MergeEnvOut, self).__init__()
self.config = self.DEFAULT_CONFIG.copy()
self.steps = 0
# self.make_road()
# self.reset()
# self.double_merge()
# self.make_vehicles()
def configure(self, config):
self.config.update(config)
def _observation(self):
return super(MergeEnvOut, self)._observation()
def _reward(self, action):
"""
The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition
"""
action_reward = {0: self.LANE_CHANGE_REWARD,
1: 0,
2: self.LANE_CHANGE_REWARD,
3: 0,
4: 0}
reward = self.COLLISION_REWARD * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index / (len(self.road.lanes) - 2) \
+ self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index / (self.vehicle.SPEED_COUNT - 1)
# Altruistic penalty
for vehicle in self.road.vehicles:
if vehicle.lane_index == len(self.road.lanes) - 1 and isinstance(vehicle, ControlledVehicle):
reward += self.MERGING_VELOCITY_REWARD * \
(vehicle.target_velocity - vehicle.velocity) / vehicle.target_velocity
return reward + action_reward[action]
def _is_terminal(self):
"""
The episode is over when a collision occurs or when the access ramp has been passed.
"""
return self.vehicle.crashed or self.vehicle.position[0] > 300
def reset(self):
# self.make_road()
print("enter reset")
self.make_roads()
self.make_vehicles()
return self._observation()
def make_roads(self):
net = RoadNetwork()
n, c, s = LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED
net.add_lane("s1", "inter1", StraightLane(np.array([0, 0]), np.array([100, 0]), line_types=[c, s]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 0]), np.array([150, 0]), line_types=[c, s]))
net.add_lane("inter2", "inter3", StraightLane(np.array([150, 0]), np.array([200, 0]), line_types=[c, s]))
net.add_lane("inter3", "x1", StraightLane(np.array([200, 0]), np.array([300, 0]), line_types=[c, s]))
net.add_lane("s1", "inter1", StraightLane(np.array([0, 4]), np.array([100, 4]), line_types=[s, s]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 4]), np.array([150, 4]), line_types=[s, s]))
net.add_lane("inter2", "inter3", StraightLane(np.array([150, 4]), np.array([200, 4]), line_types=[s, s]))
net.add_lane("inter3", "x1", StraightLane(np.array([200, 4]), np.array([300, 4]), line_types=[s, s]))
net.add_lane("s1", "inter1", StraightLane(np.array([0, 8]), np.array([100, 8]), line_types=[s, s]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 8]), np.array([150, 8]), line_types=[s, s]))
net.add_lane("inter2", "inter3", StraightLane(np.array([150, 8]), np.array([200, 8]), line_types=[s, c]))
net.add_lane("inter3", "x1", StraightLane(np.array([200, 8]), np.array([300, 8]), line_types=[s, c]))
amplitude = 4.5
net.add_lane("s1", "inter1", StraightLane(np.array([0, 12]), np.array([100, 12]), line_types=[s, c]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 12]), np.array([150, 12]), line_types=[s, c]))
net.add_lane("inter2", "ee", StraightLane(np.array([150, 12]), np.array([200, 12]), line_types=[s, c],forbidden=True))
net.add_lane("ee", "ex",
SineLane(np.array([200, 12 + amplitude]), np.array([250, 12 + amplitude]), -amplitude,
2 * np.pi / (2 * 50), np.pi / 2, line_types=[c, c], forbidden=True))
net.add_lane("ex", "x2",
StraightLane(np.array([250, 17 + amplitude]), np.array([300, 17 + amplitude]), line_types=[c, c],
forbidden=True))
road = Road(network=net, np_random=self.np_random)
# road.vehicles.append(RedLight(road, [150, 0]))
# road.vehicles.append(RedLight(road, [150, 4]))
# road.vehicles.append(RedLight(road, [150, 8]))
self.road = road
def make_vehicles(self):
"""
Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.
:return: the ego-vehicle
"""
max_l = 300
road = self.road
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
car_number_each_lane = 2
# reset_position_range = (30, 40)
# reset_lane = random.choice(road.lanes)
reset_lane = ("s1", "inter1", 1)
ego_vehicle = None
birth_place = [("s1", "inter1", 0), ("s1", "inter1", 1), ("s1", "inter1", 2), ("s1", "inter1", 3)]
destinations = ["x1", "x2"]
position_deviation = 10
velocity_deviation = 2
# print("graph:", self.road.network.graph, "\n")
for l in self.road.network.LANES:
lane = road.network.get_lane(l)
cars_on_lane = car_number_each_lane
reset_position = None
if l == reset_lane:
# print("enter l==reset_lane")
cars_on_lane += 1
reset_position = random.choice(range(1, car_number_each_lane))
# reset_position = 2
for i in range(cars_on_lane):
if i == reset_position and not ego_vehicle:
ego_lane = self.road.network.get_lane(("s1", "inter1", 1))
ego_vehicle = IDMVehicle(self.road,
ego_lane.position(0, 1),
velocity=10,
heading=ego_lane.heading_at(0)).plan_route_to("x2")
# print("ego_route:", ego_vehicle.route, "\n")
# print("ego_relative_offset:",ego_vehicle.lane.local_coordinates(ego_vehicle.position)[1])
ego_vehicle.id = 0
road.vehicles.append(ego_vehicle)
self.vehicle = ego_vehicle
else:
car = other_vehicles_type.make_on_lane(road, birth_place[np.random.randint(0, 4)],
longitudinal=5 + np.random.randint(1,
10) * position_deviation,
velocity=5 + np.random.randint(1, 5) * velocity_deviation)
if self.config["other_vehicles_destination"] is not None:
destination = destinations[self.config["other_vehicles_destination"]]
else:
destination = destinations[np.random.randint(0, 2)]
# print("destination:",destination)
car.plan_route_to(destination)
car.randomize_behavior()
road.vehicles.append(car)
lane.vehicles.append(car)
# road.vehicles.append(
# other_vehicles_type(road, l.position((i + 1) * np.random.randint(*reset_position_range), 0),
# velocity=np.random.randint(18, 25), dst=3, max_length=max_l))
# for l in road.lanes[3:]:
# cars_on_lane = car_number_each_lane
# reset_position = None
# if l is reset_lane:
# cars_on_lane+=1
# reset_position = random.choice(range(1,car_number_each_lane))
# for i in range(cars_on_lane):
# if i == reset_position:
# ego_vehicle = ControlledVehicle(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=20,max_length=max_l)
# road.vehicles.append(ego_vehicle)
# self.vehicle = ego_vehicle
# else:
# road.vehicles.append(other_vehicles_type(road, l.position((i+1) * np.random.randint(*reset_position_range), 0), velocity=np.random.randint(18,25),dst=2,rever=True,max_length=max_l))
for i in range(self.road.network.LANES_NUMBER):
lane = road.network.get_lane(self.road.network.LANES[i])
# print("lane:", lane.LANEINDEX, "\n")
lane.vehicles = sorted(lane.vehicles, key=lambda x: lane.local_coordinates(x.position)[0])
# print("len of lane.vehicles:", len(lane.vehicles), "\n")
for j, v in enumerate(lane.vehicles):
# print("i:",i,"\n")
v.vehicle_index_in_line = j
def fake_step(self):
"""
:return:
"""
for k in range(int(self.SIMULATION_FREQUENCY // self.POLICY_FREQUENCY)):
self.road.act()
self.road.step(1 / self.SIMULATION_FREQUENCY)
# Automatically render intermediate simulation steps if a viewer has been launched
self._automatic_rendering()
# Stop at terminal states
if self.done or self._is_terminal():
break
self.enable_auto_render = False
self.steps += 1
from highway_env.extractors import Extractor
extractor = Extractor()
extractor_features = extractor.FeatureExtractor(self.road.vehicles, 0, 1)
for i in range(2):
birth_place = [("s1", "inter1", 0), ("s1", "inter1", 1), ("s1", "inter1", 2), ("s1", "inter1", 3)]
destinations = ["x1", "x2"]
# position_deviation = 5
velocity_deviation = 1.5
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
birth = birth_place[np.random.randint(0, 4)]
lane = self.road.network.get_lane(birth)
car = other_vehicles_type.make_on_lane(self.road, birth,
longitudinal=0,
velocity=5 + np.random.randint(1, 10) * velocity_deviation)
if self.config["incoming_vehicle_destination"] is not None:
destination = destinations[self.config["incoming_vehicle_destination"]]
else:
destination = destinations[np.random.randint(0, 2)]
car.plan_route_to(destination)
car.randomize_behavior()
self.road.vehicles.append(car)
lane.vehicles.append(car)
# obs = self._observation()
# reward = self._reward(action)
terminal = self._is_terminal()
info = {}
return terminal,extractor_features
if __name__ == '__main__':
pass
| 12,366 | 4,105 |
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.listitems.listitem import ListItem
class ListItemCollection(BaseEntityCollection):
"""List Item collection"""
def __init__(self, context, resource_path=None):
super(ListItemCollection, self).__init__(context, ListItem, resource_path)
| 355 | 95 |
# pylint: disable=C0114
# TODO: Convert this into a function XD
from datetime import date
from datetime import timedelta as delta
import os, requests, sys
length = delta(days = 100)
today = date.today()
# Sprint attributes:
# - start dates are 1/1, 5/1, and 9/1 (year dosen't matter)
# - end dates are 100 days after the start date
sprints = {
1:{'start':date(today.year, 1, 1), 'end':date(today.year, 1, 1) + length},
2:{'start':date(today.year, 5, 1), 'end':date(today.year, 5, 1) + length},
3:{'start':date(today.year, 9, 1), 'end':date(today.year, 9, 1) + length},
4:{'start':date(today.year+1, 1, 1)}
}
print("Date : {}".format(today))
for sprint in sprints:
if sprints[sprint]['start'] <= today <= sprints[sprint]['end']:
current = today - sprints[sprint]['start'] + delta(days = 1)
remaining = sprints[sprint]['end'] - today - delta(days = 1)
print("Sprint : {}".format(sprint))
print("Day : {}".format(current.days))
print("Remaining : {}".format(remaining.days))
topic = "Sprint {}: Day {} ({} days remaining)".format(sprint, current.days, remaining.days)
# TODO: Convert this into a function; arg would be the topic
# Update Slack
token = os.getenv('SLACK_AUTH_TOKEN')
channel = os.getenv('SLACK_CHANNEL_ID')
url = 'https://slack.com/api/conversations.setTopic'
payload = {'channel':channel, 'topic': topic}
headers = {'Authorization': "Bearer {}".format(token)}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text.encode('utf8'))
sys.exit(0)
print("No sprint in progress")
for sprint in range(1,4):
if sprints[sprint]['end'] <= today <= sprints[sprint+1]['start']:
next_sprint = sprints[sprint+1]['start'] - today
print("Next sprint starts in {} days".format(next_sprint.days))
break
| 1,947 | 683 |
from uuid import uuid4
from boto3.dynamodb.conditions import Key
DEFAULT_USERNAME = "default"
class TodoDB(object):
def list_items(self):
pass
def add_item(self, description, metadata=None):
pass
def get_item(self, uid):
pass
def delete_item(self, uid):
pass
def update_item(self, uid, description=None, state=None, metadata=None):
pass
class InMemoryTodoDB(TodoDB):
def __init__(self, state=None):
if state is None:
state = {}
self._state = state
def list_all_items(self):
all_items = []
for username in self._state:
all_items.extend(self.list_items(username))
return all_items
def list_items(self, username=DEFAULT_USERNAME):
return self._state.get(username, {}).values()
def add_item(self, description, metadata=None, username=DEFAULT_USERNAME):
if username not in self._state:
self._state[username] = {}
uid = str(uuid4())
self._state[username][uid] = {"uid": uid, "description": description, "state": "unstarted", "metadata": metadata if metadata is not None else {}, "username": username}
return uid
def get_item(self, uid, username=DEFAULT_USERNAME):
return self._state[username][uid]
def delete_item(self, uid, username=DEFAULT_USERNAME):
del self._state[username][uid]
def update_item(self, uid, description=None, state=None, metadata=None, username=DEFAULT_USERNAME):
item = self._state[username][uid]
if description is not None:
item["description"] = description
if state is not None:
item["state"] = state
if metadata is not None:
item["metadata"] = metadata
class DynamoDBTodo(TodoDB):
def __init__(self, table_resource):
self._table = table_resource
def list_all_items(self):
response = self._table.scan()
return response["Items"]
def list_items(self, username=DEFAULT_USERNAME):
response = self._table.query(KeyConditionExpression=Key("username").eq(username))
return response["Items"]
def add_item(self, description, metadata=None, username=DEFAULT_USERNAME):
uid = str(uuid4())
self._table.put_item(Item={"username": username, "uid": uid, "description": description, "state": "unstarted", "metadata": metadata if metadata is not None else {}})
return uid
def get_item(self, uid, username=DEFAULT_USERNAME):
response = self._table.get_item(Key={"username": username, "uid": uid})
return response["Item"]
def delete_item(self, uid, username=DEFAULT_USERNAME):
self._table.delete_item(Key={"username": username, "uid": uid})
def update_item(self, uid, description=None, state=None, metadata=None, username=DEFAULT_USERNAME):
item = self.get_item(uid, username)
if description is not None:
item["description"] = description
if state is not None:
item["state"] = state
if metadata is not None:
item["metadata"] = metadata
self._table.put_item(Item=item) | 2,785 | 947 |
import sqlite3
sl_conn = sqlite3.connect('demo_data.sqlite3')
sl_cur = sl_conn.cursor()
# Creating table demo
table = """
CREATE TABLE demo(
s VARCHAR (10),
x INT,
y INT
);
"""
sl_cur.execute('DROP TABLE demo')
sl_cur.execute(table)
# Checking for table creation accuracy
sl_cur.execute('PRAGMA table_info(demo);').fetchall()
demo_insert = """
INSERT INTO demo (s, x, y)
VALUES ('g', 3, 9), ('v', 5, 7), ('f', 8, 7);
"""
sl_cur.execute(demo_insert)
sl_cur.close()
sl_conn.commit()
# Testing demo file
sl_conn = sqlite3.connect('demo_data.sqlite3')
sl_cur = sl_conn.cursor()
# Number of rows
sl_cur.execute('SELECT COUNT(*) FROM demo')
result = sl_cur.fetchall()
print(f'There are {result} rows.\n')
# How many rows are there where both x and y are at least 5?
sl_cur.execute("""
SELECT COUNT(*)
FROM demo
WHERE x >= 5
AND y >= 5;
""")
result = sl_cur.fetchall()
print(f'There are {result} rows with values of at least 5.\n')
# How many unique values of y are there?
sl_cur.execute("""
SELECT COUNT(DISTINCT y)
FROM demo
""")
result = sl_cur.fetchall()
print(f"There are {result} unique values of 'y'.")
# Closing connection and committing
sl_cur.close() | 1,285 | 483 |
import re
import helper
from telebot import types
def run(m, bot):
chat_id = m.chat.id
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.row_width = 2
for c in helper.getUserHistory(chat_id):
expense_data = c.split(',')
str_date = "Date=" + expense_data[0]
str_category = ",\t\tCategory=" + expense_data[1]
str_amount = ",\t\tAmount=$" + expense_data[2]
markup.add(str_date + str_category + str_amount)
info = bot.reply_to(m, "Select expense to be edited:", reply_markup=markup)
bot.register_next_step_handler(info, select_category_to_be_updated, bot)
def select_category_to_be_updated(m, bot):
info = m.text
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.row_width = 2
selected_data = [] if info is None else info.split(',')
for c in selected_data:
markup.add(c.strip())
choice = bot.reply_to(m, "What do you want to update?", reply_markup=markup)
bot.register_next_step_handler(choice, enter_updated_data, bot, selected_data)
def enter_updated_data(m, bot, selected_data):
choice1 = "" if m.text is None else m.text
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.row_width = 2
for cat in helper.getSpendCategories():
markup.add(cat)
if 'Date' in choice1:
new_date = bot.reply_to(m, "Please enter the new date (in dd-mmm-yyy format)")
bot.register_next_step_handler(new_date, edit_date, bot, selected_data)
if 'Category' in choice1:
new_cat = bot.reply_to(m, "Please select the new category", reply_markup=markup)
bot.register_next_step_handler(new_cat, edit_cat, bot, selected_data)
if 'Amount' in choice1:
new_cost = bot.reply_to(m, "Please type the new cost")
bot.register_next_step_handler(new_cost, edit_cost, bot, selected_data)
def edit_date(m, bot, selected_data):
user_list = helper.read_json()
new_date = "" if m.text is None else m.text
date_format = r'^(([0][1-9])|([1-2][0-9])|([3][0-1]))\-(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\-\d{4}$'
x1 = re.search(date_format, new_date)
if x1 is None:
bot.reply_to(m, "The date is incorrect")
return
chat_id = m.chat.id
data_edit = helper.getUserHistory(chat_id)
for i in range(len(data_edit)):
user_data = data_edit[i].split(',')
selected_date = selected_data[0].split('=')[1]
selected_category = selected_data[1].split('=')[1]
selected_amount = selected_data[2].split('=')[1]
if user_data[0] == selected_date and user_data[1] == selected_category and user_data[2] == selected_amount[1:]:
data_edit[i] = new_date + ',' + selected_category + ',' + selected_amount[1:]
break
user_list[str(chat_id)]['data'] = data_edit
helper.write_json(user_list)
bot.reply_to(m, "Date is updated")
def edit_cat(m, bot, selected_data):
user_list = helper.read_json()
chat_id = m.chat.id
data_edit = helper.getUserHistory(chat_id)
new_cat = "" if m.text is None else m.text
for i in range(len(data_edit)):
user_data = data_edit[i].split(',')
selected_date = selected_data[0].split('=')[1]
selected_category = selected_data[1].split('=')[1]
selected_amount = selected_data[2].split('=')[1]
if user_data[0] == selected_date and user_data[1] == selected_category and user_data[2] == selected_amount[1:]:
data_edit[i] = selected_date + ',' + new_cat + ',' + selected_amount[1:]
break
user_list[str(chat_id)]['data'] = data_edit
helper.write_json(user_list)
bot.reply_to(m, "Category is updated")
def edit_cost(m, bot, selected_data):
user_list = helper.read_json()
new_cost = "" if m.text is None else m.text
chat_id = m.chat.id
data_edit = helper.getUserHistory(chat_id)
if helper.validate_entered_amount(new_cost) != 0:
for i in range(len(data_edit)):
user_data = data_edit[i].split(',')
selected_date = selected_data[0].split('=')[1]
selected_category = selected_data[1].split('=')[1]
selected_amount = selected_data[2].split('=')[1]
if user_data[0] == selected_date and user_data[1] == selected_category and user_data[2] == selected_amount[1:]:
data_edit[i] = selected_date + ',' + selected_category + ',' + new_cost
break
user_list[str(chat_id)]['data'] = data_edit
helper.write_json(user_list)
bot.reply_to(m, "Expense amount is updated")
else:
bot.reply_to(m, "The cost is invalid")
return
| 4,679 | 1,637 |
# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
password supervisor service
"""
import re
import secrets
import string
from crypt import crypt, mksalt, METHOD_SHA512 # pylint: disable=no-name-in-module
from hashlib import sha512
from hmac import compare_digest
class PasswordSupervisorResult():
"""classes wrapping password supervisor checks results"""
def __init__(self, result, message):
self._result = result
self._message = message
@property
def is_strong(self):
"""iface getter"""
return self._result
@property
def message(self):
"""getter"""
return self._message
class PasswordSupervisor():
"""password supervisor implementation"""
MIN_LENGTH = 10
MIN_CLASSES = 3
@classmethod
def check_strength(cls, password):
"""supervisor; checks password strength against policy"""
# length
if len(password) < cls.MIN_LENGTH:
return PasswordSupervisorResult(False, f'Password too short. At least {cls.MIN_LENGTH} characters required.')
# complexity
classes = 0
if re.search('[a-z]', password):
classes += 1
if re.search('[A-Z]', password):
classes += 1
if re.search('[0-9]', password):
classes += 1
if re.search('[^a-zA-Z0-9]', password):
classes += 1
if classes < cls.MIN_CLASSES:
return PasswordSupervisorResult(
False,
f'Only {classes} character classes found. At least {cls.MIN_CLASSES} classes required (lowercase, uppercase, digits, other).'
)
return PasswordSupervisorResult(True, 'Password is according to policy.')
@classmethod
def generate(cls, length=40):
"""supervisor; generates password"""
alphabet = string.ascii_letters + string.digits
while True:
ret = ''.join(secrets.choice(alphabet) for i in range(length))
if cls.check_strength(ret).is_strong:
break
return ret
@staticmethod
def generate_apikey():
"""supervisor; generate new apikey"""
return secrets.token_hex(32)
@staticmethod
def hash(value, salt=None):
"""encoder; hash password with algo"""
return crypt(value, salt if salt else mksalt(METHOD_SHA512))
@staticmethod
def get_salt(value):
"""encoder; demerge salt from value"""
return value[:value.rfind('$')] if value else None
@staticmethod
def compare(value1, value2):
"""encoder; compare hashes"""
return compare_digest(value1, value2) if isinstance(value1, str) and isinstance(value2, str) else False
@staticmethod
def hash_simple(value):
"""encoder; create non salted hash"""
return sha512(value.encode('utf-8')).hexdigest()
| 2,910 | 861 |
import requests
from bs4 import BeautifulSoup
def get_rating(handle):
handle = str(handle)
url = 'http://codeforces.com/api/user.info?handles=' + handle
results = BeautifulSoup(requests.get(url).text, 'html.parser').text
results = eval(results)
if results['status'] != 'OK':
results['comment'] = 'handle: ' + handle + ' 不存在'
return results
info = results['result'][0]
if 'rating' not in info.keys():
info['rating'] = 0
res = {'status': 'OK', 'rating': info['rating']}
return res
def get_rating_change(handle):
print(handle)
url = 'http://codeforces.com/api/user.rating?handle=' + str(handle)
temp = requests.get(url)
results = BeautifulSoup(temp.text, 'html.parser').text
return eval(results)
| 802 | 270 |
#!/usr/bin/env python
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import multiprocessing
import os
from tempfile import mkdtemp
from shutil import rmtree
from time import sleep
import json
from lunr.common.config import LunrConfig
from lunr.common.lock import JsonLockFile
from lunr.storage.helper.utils import get_conn
from lunr.storage.helper.utils.client.memory import ClientException, reset
from lunr.storage.helper.utils.manifest import Manifest, save_manifest
from lunr.storage.helper.utils.worker import Worker, SaveProcess,\
StatsSaveProcess, RestoreProcess, StatsRestoreProcess, Block
class MockCinder(object):
def __init__(self):
self.snapshot_progress_called = 0
self.update_volume_metadata_called = 0
def snapshot_progress(self, *args, **kwargs):
self.snapshot_progress_called += 1
def update_volume_metadata(self, *args, **kwargs):
self.update_volume_metadata_called += 1
class TestStatsRestoreProcess(unittest.TestCase):
def setUp(self):
self.cinder = MockCinder()
self.scratch = mkdtemp()
self.stats_path = os.path.join(self.scratch, 'stats')
self.stat_queue = multiprocessing.Queue()
with JsonLockFile(self.stats_path) as lock:
self.stats_lock = lock
self.volume_id = 'volume_id'
self.block_count = 10
self.process = StatsRestoreProcess(
self.cinder, self.volume_id, self.stat_queue,
self.block_count, self.stats_lock, update_interval=1)
self.process.start()
def tearDown(self):
rmtree(self.scratch)
self.assertFalse(self.process.is_alive())
def test_restored(self):
blocks_restored = 3
for i in xrange(blocks_restored):
task = ('restored', 1)
self.stat_queue.put(task)
self.stat_queue.put(None)
while self.process.is_alive():
sleep(0.1)
with open(self.stats_path) as f:
stats = json.loads(f.read())
self.assertEqual(stats['block_count'], self.block_count)
self.assertEqual(stats['blocks_restored'], blocks_restored)
percent = 3 * 100.0 / 10
self.assertEqual(stats['progress'], percent)
class TestStatsSaveProcess(unittest.TestCase):
def setUp(self):
self.cinder = MockCinder()
self.scratch = mkdtemp()
self.stats_path = os.path.join(self.scratch, 'stats')
self.stat_queue = multiprocessing.Queue()
with JsonLockFile(self.stats_path) as lock:
self.stats_lock = lock
self.backup_id = 'backup_id'
self.block_count = 10
self.process = StatsSaveProcess(
self.cinder, self.backup_id, self.stat_queue,
self.block_count, self.stats_lock, update_interval=1)
self.process.start()
def tearDown(self):
rmtree(self.scratch)
self.assertFalse(self.process.is_alive())
def test_read(self):
blocks_read = 8
for i in xrange(blocks_read):
task = ('read', 1)
self.stat_queue.put(task)
self.stat_queue.put(None)
while self.process.is_alive():
sleep(0.1)
with open(self.stats_path) as f:
stats = json.loads(f.read())
self.assertEqual(stats['blocks_read'], blocks_read)
self.assertEqual(stats['block_count'], self.block_count)
self.assertEqual(stats['upload_count'], self.block_count)
self.assertEqual(stats['blocks_uploaded'], 0)
percent = (8 + 0) * 100.0 / (10 + 10)
self.assertEqual(stats['progress'], percent)
def test_uploaded(self):
blocks_uploaded = 3
for i in xrange(blocks_uploaded):
task = ('uploaded', 1)
self.stat_queue.put(task)
self.stat_queue.put(None)
while self.process.is_alive():
sleep(0.1)
with open(self.stats_path) as f:
stats = json.loads(f.read())
self.assertEqual(stats['blocks_read'], 0)
self.assertEqual(stats['block_count'], self.block_count)
self.assertEqual(stats['upload_count'], self.block_count)
self.assertEqual(stats['blocks_uploaded'], blocks_uploaded)
percent = (0 + 3) * 100.0 / (10 + 10)
self.assertEqual(stats['progress'], percent)
def test_upload_count(self):
upload_count = 7
task = ('upload_count', upload_count)
self.stat_queue.put(task)
blocks_uploaded = 3
for i in xrange(blocks_uploaded):
task = ('uploaded', 1)
self.stat_queue.put(task)
self.stat_queue.put(None)
while self.process.is_alive():
sleep(0.1)
with open(self.stats_path) as f:
stats = json.loads(f.read())
self.assertEqual(stats['blocks_read'], 0)
self.assertEqual(stats['block_count'], self.block_count)
self.assertEqual(stats['upload_count'], upload_count)
self.assertEqual(stats['blocks_uploaded'], 3)
percent = (0 + 3) * 100.0 / (10 + 7)
self.assertEqual(stats['progress'], percent)
class TestSaveProcess(unittest.TestCase):
def setUp(self):
self.block_queue = multiprocessing.JoinableQueue()
self.result_queue = multiprocessing.Queue()
self.stat_queue = multiprocessing.Queue()
self.volume_id = 'volume_id'
self.scratch = mkdtemp()
backup_path = os.path.join(self.scratch, 'backups')
self.conf = LunrConfig({
'backup': {'client': 'disk'},
'disk': {'path': backup_path},
})
self.conn = get_conn(self.conf)
self.conn.put_container(self.volume_id)
self.process = SaveProcess(self.conf, self.volume_id,
self.block_queue, self.result_queue,
self.stat_queue)
self.process.start()
def tearDown(self):
rmtree(self.scratch)
self.assertFalse(self.process.is_alive())
def test_upload(self):
dev = '/dev/zero'
salt = 'salt'
block_count = 3
for i in xrange(block_count):
block = Block(dev, i, salt)
# Lie about the hash.
block._hydrate()
hash_ = "hash_%s" % i
block._hash = hash_
self.block_queue.put(block)
self.block_queue.put(None)
while self.process.is_alive():
sleep(0.1)
stats, errors = self.result_queue.get()
self.assertEquals(stats['uploaded'], block_count)
self.assertEquals(len(errors.keys()), 0)
headers, listing = self.conn.get_container(self.volume_id)
self.assertEquals(len(listing), block_count)
class TestWorker(unittest.TestCase):
def setUp(self):
reset()
self.scratch = mkdtemp()
def tearDown(self):
rmtree(self.scratch)
def test_salt_empty_blocks(self):
vol1 = 'vol1'
vol2 = 'vol2'
manifest1 = Manifest()
manifest2 = Manifest()
conf = LunrConfig({'backup': {'client': 'memory'}})
worker1 = Worker(vol1, conf, manifest1)
worker2 = Worker(vol1, conf, manifest2)
self.assert_(worker1.manifest.salt != worker2.manifest.salt)
self.assert_(worker1.empty_block_hash != worker2.empty_block_hash)
self.assertEquals(worker1.empty_block, worker2.empty_block)
def test_delete_with_missing_blocks(self):
stats_path = os.path.join(self.scratch, 'stats')
manifest = Manifest.blank(2)
worker = Worker('foo',
LunrConfig({
'backup': {'client': 'memory'},
'storage': {'run_dir': self.scratch}
}),
manifest=manifest)
conn = worker.conn
conn.put_container('foo')
backup = manifest.create_backup('bak1')
backup[0] = worker.empty_block_hash
backup[1] = 'some_random_block_that_isnt_uploaded'
save_manifest(manifest, conn, worker.id, worker._lock_path())
obj = conn.get_object('foo', 'manifest', newest=True)
self.assertRaises(ClientException, conn.get_object,
'foo', backup[0], newest=True)
self.assertRaises(ClientException, conn.get_object,
'foo', backup[1], newest=True)
# Shouldn't blow up on 404.
worker.delete('bak1')
# Manifest should still be nicely deleted.
self.assertRaises(ClientException, conn.get_object,
'foo', 'manifest', newest=True)
def test_audit(self):
manifest = Manifest.blank(2)
worker = Worker('foo',
LunrConfig({
'backup': {'client': 'memory'},
'storage': {'run_dir': self.scratch}
}),
manifest=manifest)
conn = worker.conn
conn.put_container('foo')
backup = manifest.create_backup('bak1')
backup[0] = worker.empty_block_hash
conn.put_object('foo', backup[0], 'zeroes')
backup[1] = 'some_block_hash'
conn.put_object('foo', backup[1], ' more stuff')
save_manifest(manifest, conn, worker.id, worker._lock_path())
# Add some non referenced blocks.
conn.put_object('foo', 'stuff1', 'unreferenced stuff1')
conn.put_object('foo', 'stuff2', 'unreferenced stuff2')
conn.put_object('foo', 'stuff3', 'unreferenced stuff3')
_headers, original_list = conn.get_container('foo')
# Manifest, 2 blocks, 3 stuffs.
self.assertEquals(len(original_list), 6)
worker.audit()
_headers, new_list = conn.get_container('foo')
# Manifest, 2 blocks.
self.assertEquals(len(new_list), 3)
def test_save_stats(self):
manifest = Manifest.blank(2)
stats_path = os.path.join(self.scratch, 'statsfile')
worker = Worker('foo',
LunrConfig({
'backup': {'client': 'memory'},
'storage': {'run_dir': self.scratch}
}),
manifest=manifest,
stats_path=stats_path)
conn = worker.conn
conn.put_container('foo')
worker.save('/dev/zero', 'backup_id', timestamp=1)
try:
with open(stats_path) as f:
json.loads(f.read())
except ValueError:
self.fail("stats path does not contain valid json")
if __name__ == "__main__":
unittest.main()
| 11,320 | 3,465 |
import os
from unittest.mock import patch
import requests
import subprocess
import tempfile
import time
import unittest
from contextlib import contextmanager
from server.common.config.app_config import AppConfig
from server.tests import PROJECT_ROOT, FIXTURES_ROOT
@contextmanager
def run_eb_app(tempdirname):
ps = subprocess.Popen(["python", "artifact.dir/application.py"], cwd=tempdirname)
server = "http://localhost:5000"
for _ in range(10):
try:
requests.get(f"{server}/health")
break
except requests.exceptions.ConnectionError:
time.sleep(1)
try:
yield server
finally:
try:
ps.terminate()
except ProcessLookupError:
pass
class Elastic_Beanstalk_Test(unittest.TestCase):
def test_run(self):
tempdir = tempfile.TemporaryDirectory(dir=f"{PROJECT_ROOT}/server")
tempdirname = tempdir.name
config = AppConfig()
# test that eb works
config.update_server_config(multi_dataset__dataroot=f"{FIXTURES_ROOT}", app__flask_secret_key="open sesame")
config.complete_config()
config.write_config(f"{tempdirname}/config.yaml")
subprocess.check_call(f"git ls-files . | cpio -pdm {tempdirname}", cwd=f"{PROJECT_ROOT}/server/eb", shell=True)
subprocess.check_call(["make", "build"], cwd=tempdirname)
with run_eb_app(tempdirname) as server:
session = requests.Session()
response = session.get(f"{server}/d/pbmc3k.cxg/api/v0.2/config")
data_config = response.json()
assert data_config["config"]["displayNames"]["dataset"] == "pbmc3k"
def test_config(self):
check_config_script = os.path.join(PROJECT_ROOT, "server", "eb", "check_config.py")
with tempfile.TemporaryDirectory() as tempdir:
configfile = os.path.join(tempdir, "config.yaml")
app_config = AppConfig()
app_config.update_server_config(multi_dataset__dataroot=f"{FIXTURES_ROOT}")
app_config.write_config(configfile)
command = ["python", check_config_script, configfile]
# test failure mode (flask_secret_key not set)
env = os.environ.copy()
env.pop("CXG_SECRET_KEY", None)
env["PYTHONPATH"] = PROJECT_ROOT
with self.assertRaises(subprocess.CalledProcessError) as exception_context:
subprocess.check_output(command, env=env)
output = str(exception_context.exception.stdout, "utf-8")
self.assertTrue(
output.startswith(
"Error: Invalid type for attribute: app__flask_secret_key, expected type str, got NoneType"
),
f"Actual: {output}",
)
self.assertEqual(exception_context.exception.returncode, 1)
# test passing case
env["CXG_SECRET_KEY"] = "secret"
output = subprocess.check_output(command, env=env)
output = str(output, "utf-8")
self.assertTrue(output.startswith("PASS"))
| 3,112 | 922 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiConnectivityConnectionRef(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, connection_uuid=None): # noqa: E501
"""TapiConnectivityConnectionRef - a model defined in OpenAPI
:param connection_uuid: The connection_uuid of this TapiConnectivityConnectionRef. # noqa: E501
:type connection_uuid: str
"""
self.openapi_types = {
'connection_uuid': str
}
self.attribute_map = {
'connection_uuid': 'connection-uuid'
}
self._connection_uuid = connection_uuid
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityConnectionRef':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.ConnectionRef of this TapiConnectivityConnectionRef. # noqa: E501
:rtype: TapiConnectivityConnectionRef
"""
return util.deserialize_model(dikt, cls)
@property
def connection_uuid(self):
"""Gets the connection_uuid of this TapiConnectivityConnectionRef.
none # noqa: E501
:return: The connection_uuid of this TapiConnectivityConnectionRef.
:rtype: str
"""
return self._connection_uuid
@connection_uuid.setter
def connection_uuid(self, connection_uuid):
"""Sets the connection_uuid of this TapiConnectivityConnectionRef.
none # noqa: E501
:param connection_uuid: The connection_uuid of this TapiConnectivityConnectionRef.
:type connection_uuid: str
"""
self._connection_uuid = connection_uuid
| 1,966 | 570 |
from typing import List
from enum import Enum
from .units import Process, Unit, Track
class Mode(Enum):
PROCESS = 0
DISK = 1
PAGE = 2
class Reader():
def __init__(self) -> None:
pass
def read(self,mode : Mode, path : str ) -> List[Unit]:
with open(path,"r") as f:
creator = None
if mode == Mode.PROCESS:
creator = lambda str : Process.parse(str)
elif mode == Mode.DISK:
creator = lambda str : Track.parse(str)
units = []
for l in f.readlines():
units.append(creator(l))
return units
| 649 | 196 |
"""
Script to create a transaction.
"""
from hashlib import sha256
from pylisk.transaction import BalanceTransferTransaction
from pylisk.account import Account
def main():
address = "lskjks9w7v7wd6kg5gkt9eq5tvzu2w5vwfdc3ptkw"
acc = Account.from_info({"address": address})
bal_trs = BalanceTransferTransaction(
nonce=acc.nonce,
sender_public_key=acc.public_key,
recipient_bin_add=acc.bin_address,
amount=100000000,
)
NETWORK_ID = {
"testnet": bytes.fromhex(
"15f0dacc1060e91818224a94286b13aa04279c640bd5d6f193182031d133df7c"
),
}
seed_phrase_1 = (
"slight decline reward exist rib zebra multiply anger display alpha raccoon sing"
)
seed_1 = sha256(seed_phrase_1.encode()).digest()
bal_trs.sign(seed=seed_1, net_id=NETWORK_ID["testnet"])
hex_trs = bal_trs.serialize().hex()
print(f"{hex_trs=}")
if __name__ == "__main__":
main()
| 957 | 400 |
import pytest
import torch
import gpytorch
from torch_ssge import SSGE
KERNELS = [
gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel()),
]
# Example is consist of (<sampler instance>)
# <sampler instance> should have '.sample' and 'log_prob' method.
EXAMPLES = [
[
torch.distributions.uniform.Uniform,
[
{"low":torch.tensor([0.0]), "high":torch.tensor([1.0])},
{"low":torch.tensor([-1.0]), "high":torch.tensor([1.0])},
{"low":torch.tensor([-10.0]), "high":torch.tensor([10.0])},
],
],
[
torch.distributions.laplace.Laplace,
[
{"loc":torch.tensor([0.0]), "scale":torch.tensor([1.0])},
{"loc":torch.tensor([1.0]), "scale":torch.tensor([0.5])},
{"loc":torch.tensor([-1.0]), "scale":torch.tensor([2])},
],
],
[
torch.distributions.normal.Normal,
[
{"loc":torch.tensor([0.0]), "scale":torch.tensor([1.0])},
{"loc":torch.tensor([1.0]), "scale":torch.tensor([0.5])},
{"loc":torch.tensor([-1.0]), "scale":torch.tensor([2])},
],
],
[
torch.distributions.poisson.Poisson,
[
{"rate":torch.tensor([1])},
{"rate":torch.tensor([0.5])},
{"rate":torch.tensor([4])},
]
],
[
torch.distributions.studentT.StudentT,
[
{"df":torch.tensor([2.0])},
{"df":torch.tensor([4.0]), "loc":torch.tensor([1.0]), "scale":torch.tensor([2.0])},
]
],
[
torch.distributions.gamma.Gamma,
[
{"concentration":torch.tensor([1.0]), "rate":torch.tensor([1.0])},
{"concentration":torch.tensor([2.0]), "rate":torch.tensor([2.0])},
]
],
[
torch.distributions.gumbel.Gumbel,
[
{"loc":torch.tensor([1.0]), "scale":torch.tensor([2.0])},
]
],
[
torch.distributions.dirichlet.Dirichlet,
[
{"concentration":torch.tensor([0.5, 0.5])},
{"concentration":torch.tensor([0.9, 0.1])},
]
],
[
torch.distributions.multivariate_normal.MultivariateNormal,
[
{"loc":torch.randn(10), "covariance_matrix":torch.eye(10)},
]
]
[
torch.distributions.multinomial.Multinomial,
[
{"probs":torch.tensor([ 1., 1., 1., 1.])},
]
]
]
@pytest.mark.parametrize("gpytorch_kernel", KERNELS)
@pytest.mark.parametrize("torch_dist", EXAMPLES)
def test_exponential_family(gpytorch_kernel, dist_example):
estimator = SSGE(
gpytorch_kernel,
noise=1e-3
)
dist_module, params = dist_example
for param in params:
dist = dist_module(param)
sample = dist.sample((100, 1))
estimator.fit(sample)
mean = dist.mean
sqrt = dist.var.sqrt()
test_points = torch.linspace(mean - 3 * sqrt, mean + 3 * sqrt, 500)
test_points.requires_grad_()
grad_estimate = estimator(test_points)
grad_analytical = torch.autograd.grad(
dist.log_prob(test_points),
test_points
)[0]
assert torch.allclose(grad_estimate, grad_analytical, atol = 0.1, rtol = 0.)
| 3,362 | 1,240 |