blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6083b266f2a8b47adca2da5524d7109d84630dcb | 14308c0c13bd736acba2abb23c7c63fe411b00cb | /synergyWayUsers/app/serializers.py | aac3f37738423289196e79b3e2c1ef9016cc1d0b | [] | no_license | vladaoleynik/SynergyWayUsers | 5e2b1ccbe3f5e3919d8c6e6a8a8c445a494de0fc | e3d92403f7ec44d69514ceee9b453cf114368578 | refs/heads/dev | 2023-01-07T00:24:07.422328 | 2016-03-15T19:30:57 | 2016-03-15T19:30:57 | 53,443,652 | 0 | 0 | null | 2022-12-26T20:22:33 | 2016-03-08T20:40:54 | HTML | UTF-8 | Python | false | false | 1,150 | py | class UserSerializer(object):
def __init__(self, data):
self.data = data
def serialize_object(self):
"""
Method to populate data for convenience on FE.
Formats single object.
:return: JSON. Formatted data.
"""
user = self.data[0]
result = {}
for name, value in user.iteritems():
if 'course_' not in name:
result[name] = value
result['courses'] = [
{
'course_id': obj['course_id'],
'name': obj['course_name'],
'code': obj['course_code']
} for obj in self.data if obj['course_id']
]
return result
def serialize_list(self):
"""
Method to populate data for convenience on FE.
Formats list of objects.
:return: JSON. Formatted data.
"""
if not self.data:
return {
'count': 0,
'data': []
}
single_user = self.data[0]
return {
'count': single_user.get('full_count', 0),
'data': self.data
}
| [
"voleynik3221@gmail.com"
] | voleynik3221@gmail.com |
ae33e5d72485a8c2af5218c8c928af920fd4a784 | b07428c4bc62779b6a067b7f2c7803e230a0ebea | /myapp/mypages/models.py | 887c64382cd995718864d37dfe7ee519f764194b | [] | no_license | deceptikon/djangoproject | ee9150302f5b160754778ddf22363e892fa7f278 | 0f9a7dc50a60a63a60929f8006ad17def66be9a3 | refs/heads/master | 2020-09-11T15:40:18.390292 | 2019-11-30T15:18:50 | 2019-11-30T15:18:50 | 222,114,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from django.db import models
# Create your models here.
# https://docs.djangoproject.com/en/2.2/ref/models/fields/
class Product(models.Model):
name = models.CharField(max_length=150)
price = models.IntegerField()
discount = models.BooleanField()
description = models.TextField(default=None)
def __str__(self):
return self.name
# python manage.py makemigrations mypages
# python manage.py migrate mypages
| [
"lexx.kg@gmail.com"
] | lexx.kg@gmail.com |
a060092d67625dbb41dface109bd6ddf81522409 | 974671bcbf93e78030e559e0914c8a9f8f419051 | /projects/urls.py | 7555b0f1356292497c4341bf4544b0c9c1d287f9 | [
"MIT"
] | permissive | TheDim0n/ProjectManager | f84d2b8488a6d6535d6b91f6b210c6d50a3c91b3 | 50d36e7e3fc71655aa5a82bb19eacc07172ba5e4 | refs/heads/master | 2022-12-06T13:43:19.801667 | 2020-09-01T13:18:41 | 2020-09-01T13:18:41 | 279,873,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | from django.urls import path
from . import views
app_name = 'projects'
urlpatterns = [
path('', views.ProjectListView.as_view(), name='index'),
path('<int:pk>', views.ProjectDetailView.as_view(), name="project_details"),
path('create_project', views.ProjectCreateView.as_view(success_url='/projects/'), name='create_project'),
path('status/<str:status_name>', views.projects_status_ordered, name="status_order"),
path('update_project/<int:pk>', views.ProjectUpdateView.as_view(), name='update_project'),
path('<int:pk>/delete', views.ProjectDeleteView.as_view(success_url='/projects/'), name='delete_project'),
path('<int:pk>/delete_task', views.ProjectTaskDeleteView.as_view(), name='delete_task'),
path('<int:pk>/delete_level', views.ProjectLevelDeleteView.as_view(), name='delete_level'),
path('<int:pk>/<int:lpk>/create_level', views.ProjectLevelCreateView.as_view(), name='create_level'),
path('<int:pk>/<int:lpk>/create_task', views.ProjectTaskCreateView.as_view(), name='create_task'),
path('task_details/<int:pk>', views.ProjectTaskUpdateView.as_view(), name="task_details"),
path('level_details/<int:pk>', views.ProjectLevelUpdateView.as_view(), name="level_details"),
]
| [
"dim0n2023@yandex.ru"
] | dim0n2023@yandex.ru |
484b36d95ccf1122a18ef55f269dda7d400b80d3 | 1e19cab9c19562477cf561a88949faeee3731015 | /quanbenxiaoshuo/novels/apps.py | 19579c3e8a0d90b30a3869db99775e9dc90b0c58 | [] | no_license | sugyli/a_dou | 62f5c3090f4001b68613a0b7c30526a58f512aa7 | 4c3121495416361d7f4bfe97e3ed15c61c28f1e3 | refs/heads/master | 2021-06-24T12:30:44.018193 | 2019-12-02T05:27:41 | 2019-12-02T05:27:41 | 205,197,259 | 0 | 0 | null | 2021-02-08T20:36:17 | 2019-08-29T15:45:23 | JavaScript | UTF-8 | Python | false | false | 120 | py | from django.apps import AppConfig
class NovelsConfig(AppConfig):
name = 'novels'
verbose_name=u'小说管理'
| [
"“3101967255@qq.com”"
] | “3101967255@qq.com” |
64b3e520641e62179bf0226098f2410138702d70 | 760258f9eb5915d4bdf1c34d732770372f58c893 | /lib/sms.py | 0191b8eb9ade5533533e14cf6afa4a0fc8f03816 | [] | no_license | atlpatchin/django3template | 0da0c73df1e5b9b5f9924701f84a5c69e56ce461 | 7e931eab7bf6740009e3462a8c56cf9184b1fb5e | refs/heads/master | 2021-09-27T20:57:02.599606 | 2020-04-02T12:05:56 | 2020-04-02T12:05:56 | 252,442,378 | 1 | 0 | null | 2021-09-22T18:49:46 | 2020-04-02T11:58:24 | Python | UTF-8 | Python | false | false | 142 | py | # coding: utf-8
"""短信验证码"""
import requests
class SMS(object):
"""短信类"""
pass
if __name__ == '__main__':
pass | [
"atlpat@163.com"
] | atlpat@163.com |
fc52ed52791feaa1eb11f8171d6f65b4744c1571 | 706a59a5bf96d6951e92b9b77dccc7e05c8cde1a | /account/migrations/0006_alter_customuser_password.py | 30e7148c1feed89153a325a898afa08b14fd0ae1 | [] | no_license | kimyou1102/DREAM | f86428da4ed247f6bba75d6cd2dcd78bc7300b8e | e057918f18695fe0eb21434170451015bd6c31c9 | refs/heads/master | 2023-06-14T07:53:32.786447 | 2021-07-03T11:37:09 | 2021-07-03T11:37:09 | 459,999,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 3.2.5 on 2021-07-01 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0005_custombaseuser'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='password',
field=models.CharField(max_length=128, verbose_name='password'),
),
]
| [
"tkrhk2836@naver.com"
] | tkrhk2836@naver.com |
286e74bfecad4b0a3ad17401140825ee5bbc630d | 2c2678375480992f6a7b678f2568d2ea713c86d3 | /EcalTools/python/__init__.py | 32b37b5b8f0328b3f4800169acfbcb28d99ff177 | [] | no_license | emanueledimarco/EcalReconstruction | f1dc7977d649477efba993ba95b5b4412d4871b0 | fd7908be8ffeced00ef92c3bedb9e46d81665e4b | refs/heads/master | 2022-09-05T02:12:08.284331 | 2020-05-10T23:33:13 | 2020-05-10T23:33:13 | 43,128,921 | 0 | 2 | null | 2022-07-12T21:35:59 | 2015-09-25T09:44:29 | C++ | UTF-8 | Python | false | false | 208 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/EcalReconstruction/EcalTools/',1)[0])+'/cfipython/slc6_amd64_gcc491/EcalReconstruction/EcalTools')
| [
"emanuele.dimarco@gmail.com"
] | emanuele.dimarco@gmail.com |
b7d0763f0b232f7949bb124cfb7259ed467eff4b | 52d5f7dead5c8572a67f63a006d843fdf6bff2ed | /venv/Scripts/pip3-script.py | 6b285cad085f50948276613a6821d349300f1a4c | [] | no_license | doprinhas/HackHash | 684281b8746445b8acabb7cceea4f51e3679c80d | 0ceb35732188c175db8039686c9d946a11fec797 | refs/heads/master | 2020-12-05T12:44:11.989626 | 2020-01-08T19:57:17 | 2020-01-08T19:57:17 | 232,113,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\Dor\PycharmProjects\HackHash\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"dorpinhas94@gmail.com"
] | dorpinhas94@gmail.com |
f452c2dd8a6951453600e481311f716a0a0636bb | b7fcb8153dc565b50c2d1bfe6fc8dc62c77b343f | /src/upgeo/demo/ssa/plot/demo_magdist_region.py | 68abaceffaac2a4f3ec0bf98d5e438dc16129342 | [] | no_license | grenouille82/pygp | 25f6ed2ff6f456231a233a26d063949a8716d44d | 1e629a68309398fc7ff89fc0f0b2b9cea7850041 | refs/heads/master | 2021-01-23T03:16:17.767216 | 2017-03-24T14:49:34 | 2017-03-24T14:49:34 | 86,063,095 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,686 | py | '''
Created on Mar 28, 2013
@author: marcel
'''
import numpy as np
import upgeo.util.metric as metric
from upgeo.base.kernel import GroupNoiseKernel, HiddenKernel,\
MaskedFeatureKernel, ARDSEKernel, NoiseKernel, DiracConvolvedKernel,\
FixedParameterKernel, SEKernel, SqConstantKernel, LinearKernel,\
ARDSELinKernel, ExpGaussianKernel, ExpARDGaussianKernel,\
MaskedFeatureConvolvedKernel
from upgeo.util.filter import MeanShiftFilter, MinMaxFilter, FunctionFilter,\
CompositeFilter
from upgeo.util.array import unique
from upgeo.demo.util import loadmat_mtl_data
from upgeo.base.selector import KMeansSelector, FixedSelector
from upgeo.mtl.kernel import ConvolvedMTLKernel
from upgeo.mtl.gp import SparseCMOGPRegression, STLGPRegression,\
PooledGPRegression
from upgeo.mtl.infer import SparseCMOGPExactInference
from upgeo.util.glob import APPROX_TYPE
from upgeo.base.infer import ExactInference
def create_mtlgp_model(train, test, task_ids):
Xtrain = train[0]
Ytrain = train[1]
Gtrain = train[2]
_,itask = unique(Gtrain,True)
Xtest = test[0]
Ytest = test[1]
Gtest = test[2]
k = len(task_ids)
mse = np.zeros(k)
nmse = np.zeros(k)
mll = np.zeros(k)
nmll = np.zeros(k)
Yfit = np.zeros(n)
Var = np.zeros(n)
gp.fit(Xtrain, Ytrain, itask)
print 'opthyperparams={0}'.format(np.exp(gp.hyperparams))
for i in xrange(k):
#norm_period = (periods[i]-min_periop)/(max_period-min_period)
#m = np.sum(~Ytest_nan[:,i])
train_ids = Gtrain == task_ids[i]
test_ids = Gtest == task_ids[i]
yfit, var = gp.predict_task(Xtest[test_ids], q=i, ret_var=True)
print 'yfit={0}'.format(yfit)
print 'var={0}'.format(var)
Yfit[test_ids] = yfit
Var[test_ids] = var
mse[i] = metric.mspe(Ytest[test_ids], yfit)
nmse[i] = mse[i]/np.var(Ytest[test_ids])
mll[i] = metric.nlp(Ytest[test_ids], yfit, var)
nmll[i] = mll[i]-metric.nlpp(Ytest[test_ids], np.mean(Ytrain[train_ids]), np.var(Ytrain[train_ids]))
return mse, nmse, mll, nmll, Yfit, Var
def create_noise_kernel(grp_idx, s, kernel=None, mask=None):
noise_kernel = GroupNoiseKernel(grp_idx, s)
if kernel != None:
noise_kernel = HiddenKernel(noise_kernel)
noise_kernel = noise_kernel*kernel
if mask != None:
noise_kernel = MaskedFeatureKernel(noise_kernel, mask)
return noise_kernel
def create_testset(mag_idx, dist_idx, values):
mag, dist = np.mgrid[4:8.1:0.1, 0:201]
mag = mag.flatten()
dist = dist.flatten()
n = len(mag)
X = np.tile(values, (n,1))
if mag_idx < dist_idx:
#np.vstack((X[0:mag_idx], mag, X[mag_idx:]))
X = np.c_[X[:,0:mag_idx], mag, X[:,mag_idx:]]
X = np.c_[X[:,:dist_idx], dist, X[:,dist_idx:]]
else:
X = np.c_[X[:,:dist_idx], dist, X[:,dist_idx:]]
X = np.c_[X[:,0:mag_idx], mag, X[:,mag_idx:]]
return X
if __name__ == '__main__':
filename = '/home/mhermkes/datasets/multilevel/nga/ssa/transfer/viz_mtl_eudata_big.mat'
#filename = '/home/mhermkes/datasets/multilevel/nga/ssa/transfer/viz_mtl_eudata_big_eq.mat'
mag_idx = 0
dist_idx = 5
X,y,tasks = loadmat_mtl_data(filename)
task_ids, itask = unique(tasks, True)
k = len(task_ids)
Xt = create_testset(mag_idx, dist_idx, [1,0,0,10,760])
print X
jbd_trans_fun = lambda x: np.log(np.sqrt(x**2 + 12**2))
jbd_inv_fun = lambda x: np.sqrt(np.exp(x)**2 - 12**2)
#event_idx = 0 #index of the event id row
#site_idx = 1 #index of the site id row
#event_mask = [0,1] #mask of the event features, which should be normalized
#site_mask = [6] #mask of the site features, which should be normalized
#record_mask = [5] #mask of the record features, which should be normalized
norm_mask = [0,4,5,6]
dist_mask = [5]
#norm_mask = [1,5,6,7]
#dist_mask = [6]
fmask = np.r_[0, np.ones(7)]
fmask = np.array(fmask, dtype=np.bool)
dist_filter = FunctionFilter(jbd_trans_fun, jbd_inv_fun, dist_mask)
cov_filter = MinMaxFilter(norm_mask)
cov_filter = CompositeFilter([dist_filter, MinMaxFilter(norm_mask)])
target_filter = MeanShiftFilter()
#norm
Xtrain = cov_filter.process(X)
ytrain = np.squeeze(target_filter.process(y[:,np.newaxis]))
Xtest = cov_filter.process(Xt, reuse_stats=True)
#learn GP
#l = (np.max(X,0)-np.min(X,0))/2
#l[l == 0] = 1e-4
#kernel = SEKernel(np.log(1), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel()# + NoiseKernel(np.log(0.5))
#kernel = SEKernel(np.log(1), np.log(1)) + SqConstantKernel(np.log(0.001)) + SqConstantKernel(np.log(1)) * LinearKernel() + NoiseKernel(np.log(0.5))
#kernel = SEKernel(np.log(1), np.log(1))# + NoiseKernel(np.log(0.5))
#kernel = RBFKernel(np.log(1), np.log(1)) + NoiseKernel(np.log(0.5))
#kernel = RBFKernel(np.log(1), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel() + NoiseKernel(np.log(0.5))
#kernel = ARDSEKernel(np.log(1)*np.ones(7), np.log(1)) #+ NoiseKernel(np.log(0.5))
kernel = ARDSEKernel(np.log(1)*np.ones(7), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel() #+ NoiseKernel(np.log(0.5))
#kernel = ARDSEKernel(np.log(l), np.log(1)) + ARDLinearKernel(np.log(1)*np.ones(len(l)), np.log(1)) + NoiseKernel(np.log(0.5))
#kernel = ARDSELinKernel(np.log(l), np.log(1), np.log(1)) + NoiseKernel(np.log(0.5))
#kernel = ARDRBFKernel(np.log(l), np.log(1)) + NoiseKernel(np.log(0.5))
#kernel = ARDRBFKernel(np.log(l), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel() + NoiseKernel(np.log(0.5))
#selector = KMeansSelector(30, False)
#kernel = MaskedFeatureKernel(SEKernel(np.log(1), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel(), fmask) + CorrelatedNoiseKernel(0, np.log(0.1), np.log(0.5))
#kernel = MaskedFeatureKernel(ARDSEKernel(np.log(l), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel(), fmask) + CorrelatedNoiseKernel(0, np.log(0.1), np.log(0.5))
#meanfunctions for standard data
#meanfct = create_meanfct(7, data=None, mask=None) #mean
#meanfct = create_meanfct(7, data=(Xtrain,ytrain), mask=None) #fixmean
#meanfunctions for different parameters in the meanfct and covfct
#meanfct = create_meanfct(10, data=None, mask=None) #mean
#meanfct = create_meanfct(10, data=data_train, mask=None) #fixmean
#kernel = MaskedFeatureKernel(kernel, fmask)
#create complex noise model
#noise_kernel = create_noise_kernel(0, np.log(1)) + NoiseKernel(np.log(0.5))
noise_kernel = NoiseKernel(np.log(0.5))
kernel = kernel + noise_kernel
#noise_kernel = create_noise_kernel(0, np.log(1), MaskedFeatureKernel(SEKernel(np.log(1), np.log(1)), np.array(np.r_[np.zeros(6), np.ones(2)], dtype=np.bool)))
#noise_kernel = create_noise_kernel(0, np.log(1), MaskedFeatureKernel(ARDSEKernel(np.log(l[6:7]), np.log(1)), np.array(np.r_[np.zeros(6), np.ones(2)], dtype=np.bool)))
#kernel = MaskedFeatureKernel(kernel, fmask) + noise_kernel
#mtl kernel
#noise_kernel = NoiseKernel(np.log(0.5)) #+ TaskNoiseKernel(X[train,0], 0, np.log(0.001))
#mtl_kernel = MaskedFeatureKernel(SEKernel(np.log(1), np.log(1)), np.array(np.r_[0, np.ones(5), np.zeros(2)] ,dtype=bool))*MaskedFeatureKernel(SEKernel(np.log(1), np.log(1)), np.array(np.r_[0, np.zeros(5), np.ones(2)] ,dtype=bool))
#mtl_kernel = MaskedFeatureKernel(SEKernel(np.log(1), np.log(1)), np.array(np.r_[0, np.ones(5), np.zeros(2)] ,dtype=bool))*MaskedFeatureKernel(SEKernel(np.log(1), np.log(1)), np.array(np.r_[0, np.ones(7)] ,dtype=bool))
#mtl_kernel = mtl_kernel + MaskedFeatureKernel(SqConstantKernel(np.log(1)) * LinearKernel(), fmask)
#kernel = FixedParameterKernel(mtl_kernel+noise_kernel, [3])
#algo = SparseGPRegression(kernel, infer_method=FITCExactInference, selector=selector, fix_inducing=False)
#algo = GPRegression(kernel, meanfct=meanfct, infer_method=ExactInference)
#create kernel
#kernel = SEKernel(np.log(np.mean(ll)), np.log(1)) + NoiseKernel(np.log(0.1))
gp = STLGPRegression(kernel, infer_method=ExactInference)
#gp = PooledGPRegression(kernel, infer_method=ExactInference)
#selector = RandomSubsetSelector(15)
selector = KMeansSelector(30, False)
Xu = selector.apply(Xtrain, ytrain)
selector = FixedSelector(Xu)
#
#latent_kernel = ExpGaussianKernel(np.log(0.1))
latent_kernel = ExpARDGaussianKernel(np.ones(7)*np.log(0.1))
#latent_kernel = CompoundKernel([ExpGaussianKernel(np.log(0.1)), ExpGaussianKernel(np.log(0.2))])
#latent_kernel = DiracConvolvedKernel(FixedParameterKernel(SEKernel(np.log(0.1),np.log(1)), [1]))
#latent_kernel = DiracConvolvedKernel(FixedParameterKernel(SEKernel(np.log(0.01),np.log(1))+SqConstantKernel(np.log(1)) * LinearKernel(), [1]))
#latent_kernel = DiracConvolvedKernel(FixedParameterKernel(ARDSEKernel(np.ones(7)*np.log(0.1),np.log(1))+ SqConstantKernel(np.log(1)) * LinearKernel(), [7]))
#latent_kernel = CompoundKernel([DiracConvolvedKernel(FixedParameterKernel(ARDSEKernel(np.ones(7)*np.log(0.1),np.log(1)), [7])), DiracConvolvedKernel(FixedParameterKernel(ARDSEKernel(np.ones(7)*np.log(0.25),np.log(1)), [7]))])
#latent_kernel = CompoundKernel([ExpARDGaussianKernel(np.ones(7)*np.log(0.1)), ExpARDGaussianKernel(np.log(np.random.random(7)+0.0001))])
#latent_kernel = CompoundKernel([ExpARDGaussianKernel(np.ones(7)*np.log(0.1)), ExpARDGaussianKernel(np.ones(7)*np.log(0.2))])
#latent_Kernel = DiracConvolvedKernel(GaussianKernel(np.log(1)))
#noise_kernel = SEKernel(np.log(0.1), np.log(1)) + SqConstantKernel(np.log(1)) * LinearKernel() #+ NoiseKernel(np.log(0.5))
noise_kernel = ARDSEKernel(np.ones(7)*np.log(0.1),np.log(1))+ SqConstantKernel(np.log(1)) * LinearKernel()# + NoiseKernel(np.log(0.5))
#noise_kernel = ARDSEKernel(np.ones(7)*np.log(0.1),np.log(1))#+ NoiseKernel(np.log(0.5))
#noise_kernel = SEKernel(np.log(0.1), np.log(1)) + NoiseKernel(np.log(0.5))
#noise_kernel = TaskNoiseKernel((periods-np.min(periods))/(np.max(periods)-np.min(periods)), 7, np.log(0.5))
#noise_kernel = TaskNoiseKernel((periods-np.min(periods))/(np.max(periods)-np.min(periods)), 7, np.log(0.5))
noise_kernel = noise_kernel + NoiseKernel(np.log(0.5))
#noise_kernel = MaskedFeatureKernel(noise_kernel, fmask) + create_noise_kernel(0, np.log(1)) + NoiseKernel(np.log(0.5))
#latent_kernel = MaskedFeatureConvolvedKernel(latent_kernel, fmask)
#theta = [np.log(0.1), np.log(1)]
#theta = [np.log(0.1), np.log(1), np.log(0.2), np.log(1)]
theta = np.r_[np.ones(7)*np.log(0.1), np.log(1)]
#theta = np.r_[np.ones(7)*np.log(0.1), np.log(1), np.ones(7)*np.log(0.2), np.log(1)]
#theta = [np.log(1)]
#theta = [np.log(1),np.log(1)]
#theta = [np.log(1), np.log(1)]
#theta = [np.log(0.01), np.log(1)]
#kernel = ConvolvedMTLKernel(latent_kernel, theta, k, noise_kernel)
#idx = [7,15]
#kernel._theta[:,idx] = np.log(np.random.rand(k,len(idx)))
#gp = SparseCMOGPRegression(kernel, beta=100, infer_method=SparseCMOGPExactInference, approx_type=APPROX_TYPE.PITC, selector=selector, fix_inducing=True)
#gp = SparseCMOGPRegression(kernel, infer_method=SparseCMOGPExactInference, approx_type=APPROX_TYPE.PITC, selector=selector, fix_inducing=True)
print 'X={0}'.format(X)
print 'Xtest={0}'.format(Xtest)
gp.fit(Xtrain,ytrain,itask)
k = len(task_ids)
yhat = np.zeros(len(X))
for i in xrange(k):
yfit, var = gp.predict_task(Xtest, q=i, ret_var=True)
#print 'yfit={0}'.format(yfit)
yhat[tasks==task_ids[i]] = gp.predict_task(Xtrain[tasks==task_ids[i]], q=i, ret_var=False)
yfit = np.squeeze(target_filter.invprocess(yfit[:,np.newaxis]))
#np.savetxt('/home/mhermkes/datasets/multilevel/nga/ssa/transfer/viz/region_model/ardselin/stl_region{0}.csv'.format(task_ids[i]), np.c_[Xt[:,[mag_idx, dist_idx]], yfit,var], delimiter=',')
resid = yhat - ytrain
#np.savetxt('/home/mhermkes/datasets/multilevel/nga/ssa/transfer/viz/region_model/resid/ardselin/stl_resid.csv', np.c_[tasks, X[:,[mag_idx, dist_idx]], resid], delimiter=',')
print 'likel: {0}'.format(gp.log_likel)
print 'train error: {0}'.format(metric.mspe(ytrain, yhat))
print 'hyper params: {0}'.format(np.exp(gp.hyperparams))
| [
"Marcel.Hermkes@webtrekk.com"
] | Marcel.Hermkes@webtrekk.com |
bd63b8e1ecf45c334724bc34debf628114b3047e | f734a39a0c37186e90caea597f13000823c9e67a | /leetcode/Hash Table/1213. Intersection of Three Sorted Arrays.py | 658d6de9e6d97a5ad69bbe7071633e6fde37a8e0 | [
"MIT"
] | permissive | yanshengjia/algorithm | 681746e0371a82860e64a279bfe4c83545469641 | 46caaf74aeab8af74861fb5b249eb4169baf8493 | refs/heads/master | 2022-08-02T20:15:57.927418 | 2022-07-17T14:43:51 | 2022-07-17T14:43:51 | 192,160,418 | 69 | 32 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | """
Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order, return a sorted array of only the integers that appeared in all three arrays.
Example 1:
Input: arr1 = [1,2,3,4,5], arr2 = [1,2,5,7,9], arr3 = [1,3,4,5,8]
Output: [1,5]
Explanation: Only 1 and 5 appeared in the three arrays.
Solution:
Use Hashtable to record the frequency of numbers, a number in intersection should have the frequency of 3
"""
# Time: O(m+n+q), m n q is the length of 3 arrays
# Space: O(x), x it the size of intersection
class Solution:
def arraysIntersection(self, arr1: List[int], arr2: List[int], arr3: List[int]) -> List[int]:
d = dict()
for c in arr1:
d[c] = d.get(c, 0) + 1
for c in arr2:
d[c] = d.get(c, 0) + 1
for c in arr3:
d[c] = d.get(c, 0) + 1
res = []
for k, v in d.items():
if v == 3:
res.append(k)
res.sort()
return res
| [
"i@yanshengjia.com"
] | i@yanshengjia.com |
d7ccabafc3937cc5321c684ced89702c10f836ce | b087978eb569d3c68aec6ee3bc4f10dd8c1ceb5a | /music_library/music-player/bin/mid3iconv | b6795144ecafb90490638bd59f2ed08f809e5b65 | [] | no_license | mariopetrov9/Programming-with-python-101 | 93b3e4a0e52ddcfcdaf1d16deeee1dca87abe41c | 18b0d3b040131d9eab39f935fb100064ece34829 | refs/heads/master | 2023-02-22T19:09:36.964343 | 2016-06-06T21:15:53 | 2016-06-06T21:15:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,301 | #!/home/krasi_b2/HackBulgaria/week07/music-player/bin/python3
# ID3iconv is a Java based ID3 encoding convertor, here's the Python version.
# Copyright 2006 Emfox Zhou <EmfoxZhou@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import sys
import locale
import mutagen
import mutagen.id3
from mutagen._compat import PY3, text_type
from mutagen._toolsutil import SignalHandler, get_win32_unicode_argv, print_, \
fsnative as fsn, OptionParser
VERSION = (0, 3)
_sig = SignalHandler()
def getpreferredencoding():
return locale.getpreferredencoding() or "utf-8"
def isascii(string):
"""Checks whether a unicode string is non-empty and contains only ASCII
characters.
"""
if not string:
return False
try:
string.encode('ascii')
except UnicodeEncodeError:
return False
return True
class ID3OptionParser(OptionParser):
def __init__(self):
mutagen_version = ".".join(map(str, mutagen.version))
my_version = ".".join(map(str, VERSION))
version = "mid3iconv %s\nUses Mutagen %s" % (
my_version, mutagen_version)
return OptionParser.__init__(
self, version=version,
usage="%prog [OPTION] [FILE]...",
description=("Mutagen-based replacement the id3iconv utility, "
"which converts ID3 tags from legacy encodings "
"to Unicode and stores them using the ID3v2 format."))
def format_help(self, *args, **kwargs):
text = OptionParser.format_help(self, *args, **kwargs)
return text + "\nFiles are updated in-place, so use --dry-run first.\n"
def update(options, filenames):
encoding = options.encoding or getpreferredencoding()
verbose = options.verbose
noupdate = options.noupdate
force_v1 = options.force_v1
remove_v1 = options.remove_v1
def conv(uni):
return uni.encode('iso-8859-1').decode(encoding)
for filename in filenames:
with _sig.block():
if verbose != "quiet":
print_(u"Updating", filename)
if has_id3v1(filename) and not noupdate and force_v1:
mutagen.id3.delete(filename, False, True)
try:
id3 = mutagen.id3.ID3(filename)
except mutagen.id3.ID3NoHeaderError:
if verbose != "quiet":
print_(u"No ID3 header found; skipping...")
continue
except Exception as err:
print_(text_type(err), file=sys.stderr)
continue
for tag in filter(lambda t: t.startswith(("T", "COMM")), id3):
frame = id3[tag]
if isinstance(frame, mutagen.id3.TimeStampTextFrame):
# non-unicode fields
continue
try:
text = frame.text
except AttributeError:
continue
try:
text = [conv(x) for x in frame.text]
except (UnicodeError, LookupError):
continue
else:
frame.text = text
if not text or min(map(isascii, text)):
frame.encoding = 3
else:
frame.encoding = 1
if verbose == "debug":
print_(id3.pprint())
if not noupdate:
if remove_v1:
id3.save(filename, v1=False)
else:
id3.save(filename)
def has_id3v1(filename):
try:
with open(filename, 'rb+') as f:
f.seek(-128, 2)
return f.read(3) == b"TAG"
except IOError:
return False
def main(argv):
parser = ID3OptionParser()
parser.add_option(
"-e", "--encoding", metavar="ENCODING", action="store",
type="string", dest="encoding",
help=("Specify original tag encoding (default is %s)" % (
getpreferredencoding())))
parser.add_option(
"-p", "--dry-run", action="store_true", dest="noupdate",
help="Do not actually modify files")
parser.add_option(
"--force-v1", action="store_true", dest="force_v1",
help="Use an ID3v1 tag even if an ID3v2 tag is present")
parser.add_option(
"--remove-v1", action="store_true", dest="remove_v1",
help="Remove v1 tag after processing the files")
parser.add_option(
"-q", "--quiet", action="store_const", dest="verbose",
const="quiet", help="Only output errors")
parser.add_option(
"-d", "--debug", action="store_const", dest="verbose",
const="debug", help="Output updated tags")
for i, arg in enumerate(argv):
if arg == "-v1":
argv[i] = fsn(u"--force-v1")
elif arg == "-removev1":
argv[i] = fsn(u"--remove-v1")
(options, args) = parser.parse_args(argv[1:])
if args:
update(options, args)
else:
parser.print_help()
if __name__ == "__main__":
argv = get_win32_unicode_argv()
_sig.init()
main(argv)
| [
"mariopetrov9@gmail.com"
] | mariopetrov9@gmail.com | |
7b6313a1b37a49a859f18c49c5e3defd72bf4e92 | 474470e5edd4ea1c44c7b9ca63ae03c776096891 | /codewars/ValidateCreditCard/solution.py | f97f4e16f7d2b6948b59ebdbe3d5c79a0744e35e | [] | no_license | jaabberwocky/leetcode | 5de0541b7cd3892cedea9c9bcd44c8e4d876cccd | a65131f28f8a160f899606114411133933f2893f | refs/heads/master | 2021-06-06T03:32:31.609968 | 2021-05-16T13:49:32 | 2021-05-16T13:49:32 | 143,909,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | def validate(n):
digits = [int(d) for d in str(n)[::-1]]
ctr = 1
for ind, digit in enumerate(digits):
if ind == 0:
continue
if ind == ctr:
digit *= 2
if digit > 9:
digit -= 9
digits[ind] = digit
ctr += 2
return sum(digits) % 10 == 0
if __name__ == "__main__":
t1 = 2121
rs = validate(2121)
try:
assert rs==True
except AssertionError:
print("Code is incorrect!") | [
"tobiasleongzhunmun@gmail.com"
] | tobiasleongzhunmun@gmail.com |
315318c95f31de93fcfacce751b179734c20fbf7 | b2daa16d26445d7ed5269d0a0dd513594dddd896 | /config/settings/base.py | 5ed19a76350b55d642410bbbf6fb9d24563e7631 | [] | no_license | Santiago-Otero-Figueredo/finanzas_personales | d952169e8be70ea7d7c6e1e219605c9d3480f2e9 | 2ecb7ea6b60fa4dc68fa124ab53d09ea4016836e | refs/heads/master | 2023-01-22T14:07:42.094851 | 2020-12-04T01:46:54 | 2020-12-04T01:46:54 | 283,634,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,690 | py | """
Django settings for finanzas_personales project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import json
from sys import platform
from django.core.exceptions import ImproperlyConfigured
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
STATIC_SERVER_DIR = environ.Path(__file__) - 4
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(os.path.dirname(BASE_DIR), "secrets.json")) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
"""Get the secret variable or return explicit exception."""
try:
return secrets[setting]
except KeyError:
error_msg = "Definir la variable de ambiente {0}".format(setting)
raise ImproperlyConfigured(error_msg)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_secret("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_secret("DEBUG")
AUTH_USER_MODEL = 'usuarios.Usuario'
ALLOWED_HOSTS = ['91336fb43965.ngrok.io', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Terceros
'bootstrap4',
'tempus_dominus',
'django.contrib.humanize',
# Propios
'finanzas_personales.apps.usuarios',
'finanzas_personales.apps.movimientos',
'finanzas_personales.apps.funcionalidades',
'finanzas_personales.apps.template_tags',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_currentuser.middleware.ThreadLocalUserMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(BASE_DIR), "finanzas_personales", "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': get_secret("DATABASE_DEFAULT")
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = str(STATIC_SERVER_DIR('static_collected'))
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(os.path.dirname(BASE_DIR), "finanzas_personales", "static"),
]
MEDIA_URL = '/media/'
if platform == 'linux' or platform == 'linux2':
SERVER_MEDIA_DIR = environ.Path(__file__) - 3
MEDIA_ROOT = str(SERVER_MEDIA_DIR('media'))
else:
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "finanzas_personales", "media")
LIB_VERSION = '4.0.5'
CACHE_BACKEND = 'default'
LOGIN_REDIRECT_URL = '/movimientos/registrar'
LOGIN_URL = 'inicio_sesion' | [
"santiago.otero.figueredo@gmail.com"
] | santiago.otero.figueredo@gmail.com |
7c2d99114b3aafbeb624eb534da25400a8ae4e87 | 06c1d6bcd099bf1c25abb52ba07351b068d1ab16 | /Unidad_3/leccion_3.py | 7c26b82dce0e0e918ab604fafd4e3dc5a427c8aa | [] | no_license | dafnemus/python-curso-udemy | 1105e5f51980d6f5ec32dac338ebc340250c6384 | 493717fb321b24bd5abcadb8e27d25d68b4f12f8 | refs/heads/main | 2023-03-09T12:27:41.934087 | 2021-02-24T18:34:56 | 2021-02-24T18:34:56 | 337,728,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | # pylint: disable=missing-docstring
# 1. Aplica un incremento de sueldo del 8% al salario de un trabajador.
# Para ello, recuerda que primero debes solicitar el monto base del salario.
def incrementar_sueldo(sueldo):
incremento = 0.08
valor_incrementado = sueldo * incremento
sueldo_incrementado = sueldo + valor_incrementado
print(f'Total sueldo:{sueldo_incrementado}', end=' ')
print(f'incremento: {valor_incrementado}')
incrementar_sueldo(2000)
print()
# 2. Aplica un incremento de sueldo del 8% al salario de un trabajador,
# solo si este gana menos que el salario mínimo
# (escoge cualquier valor para el salario mínimo, porejemplo 1000).
# Si el trabajador gana más que el salario mínimo, el incremento es del 5%
def incrementar_sueldo_2(sueldo):
sueldo_minimo = 1000
incremento_1 = 0.08
incremento_2 = 0.05
sueldo_incrementado = 0
valor_incrementado = 0
if sueldo <= sueldo_minimo:
valor_incrementado = sueldo * incremento_1
elif sueldo > sueldo_minimo:
valor_incrementado = sueldo * incremento_2
sueldo_incrementado = sueldo + valor_incrementado
print(f'Total sueldo:{sueldo_incrementado}', end=' ')
print(f'incremento: {valor_incrementado}')
incrementar_sueldo_2(800)
incrementar_sueldo_2(2000)
print()
# 3. Dado un valor que representa una cantidad en segundos,
# indica su equivalente en minutos, horas y días.
def convertir_segundos(segundos):
un_minuto = 60
hora = 3600
dias = 86400
resultado_min = segundos / un_minuto
resultado_hr = segundos / hora
resultado_dia = segundos / dias
print(f'segundos {segundos}')
print(f'segundos a hora: {resultado_hr}')
print(f'segundos a minutos: {resultado_min}')
print(f'segundosa dias: {resultado_dia}')
convertir_segundos(87600)
print()
# 4. Determinar el mínimo de 3 valores solicitados. Ahora, con 4 valores.
lista_valores = []
def agregar_valor(valor):
lista_valores.append(valor)
def minimo():
print(f'valores: {lista_valores}')
if len(lista_valores) <= 4:
print(f'valor minimo: {min(lista_valores)}')
agregar_valor(2)
agregar_valor(8)
agregar_valor(3)
minimo()
print()
# 5. Solicita al usuario, un número mayor que cero y menor a un millón,
# determina si el número de dígitos de dicho valor.
# Así, si el valor ingresado es 3, entonces el resultado será 1.
# Del mismo modo, si el valor ingresado es 768590, el resultado será 6
def contar_digitos(numero):
if 0 < numero < 1000000:
digitos = len(str(numero))
print(f'el numero {numero} tiene {digitos} digitos')
contar_digitos(22)
| [
"dafnemus@gmail.com"
] | dafnemus@gmail.com |
5bfe02e3fdc1ef7f383a3e3cbdb80a77861e7187 | 1f34608b9c050735ab49df9c37af77445e5c506d | /inventory/migrations/0003_auto__add_monthlyweatherbycity.py | 1a558dff1917e25152f06d83d120c6ef8d6d954b | [] | no_license | MiguelGervassi/django-inventory | e7830b3c2a5128764d93fe29290d64f4afff21ad | 835ab2aaf337f5aa43d7da724accbe0ac867b587 | refs/heads/master | 2021-05-26T14:35:30.535408 | 2013-11-18T11:37:20 | 2013-11-18T11:37:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,428 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MonthlyWeatherByCity'
db.create_table(u'inventory_monthlyweatherbycity', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('month', self.gf('django.db.models.fields.IntegerField')()),
('boston_temp', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=1)),
('houston_temp', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=1)),
))
db.send_create_signal(u'inventory', ['MonthlyWeatherByCity'])
def backwards(self, orm):
# Deleting model 'MonthlyWeatherByCity'
db.delete_table(u'inventory_monthlyweatherbycity')
models = {
u'inventory.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_category': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'inventory.inventory': {
'Meta': {'object_name': 'Inventory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'inventory.inventoryproduct': {
'Meta': {'object_name': 'InventoryProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['inventory.Inventory']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['inventory.Product']"}),
'quantity_on_hand': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quantity_sold': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'inventory.monthlyweatherbycity': {
'Meta': {'object_name': 'MonthlyWeatherByCity'},
'boston_temp': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'}),
'houston_temp': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month': ('django.db.models.fields.IntegerField', [], {})
},
u'inventory.product': {
'Meta': {'object_name': 'Product'},
'product_category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['inventory.Category']", 'db_column': "'product_category'"}),
'product_description': ('django.db.models.fields.TextField', [], {'default': "'None'"}),
'product_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'product_sell_price': ('inventory.fields.CurrencyField', [], {'max_digits': '10', 'decimal_places': '2'}),
'product_unit_price': ('inventory.fields.CurrencyField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'inventory.product_reports': {
'Meta': {'object_name': 'Product_Reports'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['inventory.InventoryProduct']"}),
'report_date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'total_profit_earned': ('inventory.fields.CurrencyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'total_quantity_sold': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_sell_amt_earned': ('inventory.fields.CurrencyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'total_unit_amt_earned': ('inventory.fields.CurrencyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['inventory'] | [
"Miguel.Gervasi@gmail.com"
] | Miguel.Gervasi@gmail.com |
9765c834cc9e5d16a0e3967295cb69af240d6325 | 556347a38988f5df368de98296ba55be23a5db85 | /utils/fourier_transform.py | ddc454269ac6794e13f1dbb3a6674908053287e5 | [] | no_license | tchewik/pfur_aommt | c8d88b331be10011320212c6931b0864849ac0f5 | dc285a247cb35d529a52c09eef4a263d62066395 | refs/heads/master | 2020-04-29T18:46:00.258295 | 2019-05-18T22:09:10 | 2019-05-18T22:09:10 | 176,332,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | import numpy as np
from functools import partial
from multiprocessing import Pool
def _idft_calc(u, data):
return sum([data[x] * (np.cos(2 * np.pi * u * x / len(data)) + np.sin(2 * np.pi * u * x / len(data)) * 1j) for x in range(len(data))])
def _hanna_func(n, data):
return .5 * (1 - np.cos(2. * np.pi * data[n] / len(data)))
class Fourier:
@staticmethod
def dft(data):
""" discrete fourier transform """
data = np.array(data).astype(float)
N = len(data)
n = np.arange(N)
k = n.reshape((N, 1))
M = np.exp(-2j * np.pi * k * n / N)
return np.dot(M, data)
@staticmethod
def idft(data):
""" inversed discrete fourier transform """
with Pool() as pool:
result = pool.map(partial(_idft_calc, data=data), range(len(data)))
return result
@staticmethod
def _hanna_window(data):
""" applies hanna window function """
with Pool() as pool:
result = pool.map(partial(_hanna_func, data=data), range(len(data)))
return result
| [
"elenachistov@gmail.com"
] | elenachistov@gmail.com |
19c1083ddebaae8a8cafbbfcbc4f663167f858b0 | 79fa6f3a9c0c07b2768b5c67d48cd2d3ada921c7 | /kikimr/public/api/grpc/ydb_export_v1_pb2.py | 8b1ed589a3769c3321e6a8c3913604b83594a9b6 | [
"Apache-2.0"
] | permissive | clumpytuna/ydb-python-sdk | 8dd951a532045587fcba1d541b3fb8798c358318 | f09d8db19f62032738ed77dabb3672c3e0f86cc3 | refs/heads/master | 2023-06-09T22:38:29.747969 | 2021-06-30T08:09:14 | 2021-06-30T08:09:14 | 319,103,389 | 0 | 0 | NOASSERTION | 2020-12-06T18:32:35 | 2020-12-06T18:32:34 | null | UTF-8 | Python | false | true | 2,581 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kikimr/public/api/grpc/ydb_export_v1.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from kikimr.public.api.protos import ydb_export_pb2 as kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='kikimr/public/api/grpc/ydb_export_v1.proto',
package='Ydb.Export.V1',
syntax='proto3',
serialized_pb=_b('\n*kikimr/public/api/grpc/ydb_export_v1.proto\x12\rYdb.Export.V1\x1a)kikimr/public/api/protos/ydb_export.proto2\xa9\x01\n\rExportService\x12K\n\nExportToYt\x12\x1d.Ydb.Export.ExportToYtRequest\x1a\x1e.Ydb.Export.ExportToYtResponse\x12K\n\nExportToS3\x12\x1d.Ydb.Export.ExportToS3Request\x1a\x1e.Ydb.Export.ExportToS3ResponseB\x1a\n\x18\x63om.yandex.ydb.export.v1b\x06proto3')
,
dependencies=[kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.yandex.ydb.export.v1'))
_EXPORTSERVICE = _descriptor.ServiceDescriptor(
name='ExportService',
full_name='Ydb.Export.V1.ExportService',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=105,
serialized_end=274,
methods=[
_descriptor.MethodDescriptor(
name='ExportToYt',
full_name='Ydb.Export.V1.ExportService.ExportToYt',
index=0,
containing_service=None,
input_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOYTREQUEST,
output_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOYTRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ExportToS3',
full_name='Ydb.Export.V1.ExportService.ExportToS3',
index=1,
containing_service=None,
input_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOS3REQUEST,
output_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOS3RESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_EXPORTSERVICE)
DESCRIPTOR.services_by_name['ExportService'] = _EXPORTSERVICE
# @@protoc_insertion_point(module_scope)
| [
"arcadia-devtools@yandex-team.ru"
] | arcadia-devtools@yandex-team.ru |
dd362f074593582e8d1cff300c32f36d8363e0e1 | 3120d8b22cc0b6755da6341434165baf0a855e9d | /Day2_Assignments/qn8.py | 7f7412c69962024cfaaa570c9f8403267924cb54 | [] | no_license | karthika-onebill/python_basics_assignments | 6033c8f442d452b463e81ba8bc70a6d1ed87b14f | 793bd0205d2f3eab47bf939aa0c0e002728805dd | refs/heads/master | 2023-05-24T14:18:46.603229 | 2021-06-20T02:17:02 | 2021-06-20T02:17:02 | 376,065,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | '''
8) the user enters a string and a substring. You have to print the number of times that the substring occurs in the given string
'''
# way 1 : using count() function
s = input("Enter the string : ")
substring = input("Enter the substring : ")
print(s.count(substring))
# way 2 : without using count()
cnt = 0
for i in range(len(s)):
if(substring == s[i:i+len(substring)]):
cnt += 1
print(cnt)
| [
"karthikavel2000@gmail.com"
] | karthikavel2000@gmail.com |
be1e735af83e692b35403ebe733bb449ff5aef36 | b9a54e1aeb517285c0d84506e615e709fdab8c1f | /movies/migrations/0003_auto_20200424_0022.py | fe6581f4dcee518ec61652d4a675ed46ffc1bbe2 | [] | no_license | ziad-elnaggar/Movies-Games | a827b8ff00e02b444e68f17e3d97cc376c3bb026 | b2fd788e18ac1c6d359e7be028961666e62df8f3 | refs/heads/master | 2023-01-12T14:33:45.092098 | 2020-11-16T06:04:48 | 2020-11-16T06:04:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | # Generated by Django 3.0.5 on 2020-04-23 22:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movies', '0002_auto_20200420_2150'),
]
operations = [
migrations.CreateModel(
name='Usersmovies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movieid', models.CharField(max_length=100)),
('title', models.CharField(max_length=100)),
('year', models.CharField(max_length=100)),
('poster', models.CharField(max_length=1000)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Usersmovie',
),
]
| [
"ziad.a.elnaggar@gmail.com"
] | ziad.a.elnaggar@gmail.com |
2ef2d309811ea4d2f3a0e53f43ae916c66ce51f3 | ae9f11f7078515b8ef87da6c4c56346bab5dd36d | /mailer/tasks.py | 1eca9c5d17cd43a78383deb51dbfe2f747864ca2 | [] | no_license | feedcase/CeleryProject | 26cc1caeeeca1a8d1f6ef38e15c88447acf4d606 | 86dfe4103d532a8540b0d56e1077b424f5a500b3 | refs/heads/master | 2023-02-24T06:48:10.996342 | 2021-02-01T17:09:11 | 2021-02-01T17:09:11 | 335,017,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from django.core.mail import send_mail
from mailer.celery import app
from .services import send
from .models import Contacts
@app.task
def send_spam_email(user_email):
send(user_email)
| [
"seva1502@gmail.com"
] | seva1502@gmail.com |
e1cc86ce3d6ec88a63cf4bfe101118fa87b5c487 | bf1c74cae00d409b60889e0577716f0f0f17724a | /bomb.py | 039169381cfa28f09e387234af82af0fdc5137e9 | [] | no_license | wangpeilin/pygame- | e349399a4f0a2a03a1c76cc3a00439aa2a4e17ef | 3cad2a1920e0330968db7295960c1192615cf61b | refs/heads/master | 2020-08-02T09:50:54.834108 | 2019-09-27T12:22:48 | 2019-09-27T12:22:48 | 211,308,406 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | # 爆炸特效
import pygame
class Bomb(pygame.sprite.Sprite):
def __init__(self, screen):
super(Bomb, self).__init__()
self.screen = screen
self.image = [pygame.image.load("images/bomb-" + str(i) + ".png") for i in range(1, 8)]
self.index = 0
self.interval = 20
self.interval_index = 0
self.position = [0, 0]
self.visible = False
def set_pos(self, x, y):
self.position[0] = x
self.position[1] = y
def action(self):
if self.visible:
self.interval_index += 1
if self.interval_index < self.interval:
return
else:
self.interval_index = 0
self.index += 1
if self.index >= len(self.image):
self.index = 0
self.visible = False
def draw(self):
if self.visible:
self.screen.blit(self.image[self.index], self.position)
| [
"1163942544@qq.com"
] | 1163942544@qq.com |
5b4f5181977f8ac6e3c3156745081e3cce07a39e | 059f80a4a3d27a949d4e86578e09f07b47c7084b | /trajectory_manager_CORONA.py | 949a885f9c145efed37adb30165b545b0bd05d1b | [] | no_license | sozenoid/XYLENE_probing | ccd593201ca84273e5f0c736567b61b7d29c47df | b08ddded75c8b44a003ffc84646f37d3a5e3ffd3 | refs/heads/master | 2021-06-28T18:48:00.471783 | 2020-10-05T14:33:25 | 2020-10-05T14:33:25 | 166,769,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 09:08:16 2019
@author: macenrola
"""
import multiprocessing
import subprocess
import sys
import glob
import os
def launch_trajectory(traj_name):
cmd = "cp2k.sopt -i {0} -o {0}.out"
return subprocess.call(cmd.format(traj_name).split(), shell=False)
def print_trajectory(traj_name):
cmd = "cp2k.sopt -i {0} -o {0}.out"
print cmd.format(traj_name)
def make_inputs(pattern_to_target, node_size=40):
"""
PRE: Will take a suffix, collect all the matching files and generate inputs files for this script
POSE: Will print a nfile/node_size files
"""
# This is the Thomas pattern
pattern="""#!/bin/bash
#PBS -N 350-adam
#PBS -l select=1:ncpus=40:mpiprocs=40:ompthreads=1
#PBS -l walltime=48:0:00
#PBS -o 350-adam.out
#PBS -e 350-adam.err
#PBS -l software="CP2K"
cd {}
export OMP_NUM_THREADS=1
# Now run the program
export CP2K_DATA_DIR=/home/users/app/common_apps/chemistry/cp2k/dev/8.0_Jan2020_gcc9_mkl/data/
# module load knl-software intel-mpi cp2k/8.0-dev
module load .legacy-stratus-software knl-software intel-mkl cp2k/8.0-dev
/home/users/astar/ihpc/stuhlamb/XYLENE_probing/trajectory_manager.py {}
"""
flist = glob.glob("*{}".format(pattern_to_target))
ftowrite = []
for i, f in enumerate(sorted(flist)):
ftowrite.append(f)
if (i+1)%node_size==0 and i!=0:
with open("traj_launcher_{}.pbs".format(i/node_size), "wt") as w:
w.write(pattern.format(os.getcwd()," ".join(ftowrite)))
ftowrite=[]
if ftowrite!=[]:
with open("traj_launcher_{}.pbs".format(i/node_size), "wt") as w:
w.write(pattern.format(os.getcwd()," ".join(ftowrite)))
if __name__ == '__main__':
pool = multiprocessing.Pool(None)
if len(sys.argv)==1:
print """You need to provide arguments
Use `trajectory_manager.py suffix inp` to generate input files for all files in the current directory that have the suffix "inp"
"""
elif sys.argv[1]=="suffix" and len(sys.argv)==3:
make_inputs(sys.argv[2])
else:
# pool = multiprocessing.Pool(len(sys.argv[1:]))
tasks = sys.argv[1:]
results = []
r = pool.map_async(print_trajectory, tasks, callback=results.append)
r.wait() # Wait on the results
print results
r = pool.map(launch_trajectory, tasks)
print r
| [
"stuhlamb@corona.cm.cluster"
] | stuhlamb@corona.cm.cluster |
f18aa97b5ffc96f15248cad15ddee3ba1135c971 | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /0.22/_downloads/aaf6e18611e50c34953a2674b6489a9c/plot_30_info.py | 6f27946faf6e543cadc3b69272928b6c607cd2ee | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 8,689 | py | # -*- coding: utf-8 -*-
"""
.. _tut-info-class:
The Info data structure
=======================
This tutorial describes the :class:`mne.Info` data structure, which keeps track
of various recording details, and is attached to :class:`~mne.io.Raw`,
:class:`~mne.Epochs`, and :class:`~mne.Evoked` objects.
.. contents:: Page contents
:local:
:depth: 2
We'll begin by loading the Python modules we need, and loading the same
:ref:`example data <sample-dataset>` we used in the :ref:`introductory tutorial
<tut-overview>`:
"""
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
###############################################################################
# As seen in the :ref:`introductory tutorial <tut-overview>`, when a
# :class:`~mne.io.Raw` object is loaded, an :class:`~mne.Info` object is
# created automatically, and stored in the ``raw.info`` attribute:
print(raw.info)
###############################################################################
# However, it is not strictly necessary to load the :class:`~mne.io.Raw` object
# in order to view or edit the :class:`~mne.Info` object; you can extract all
# the relevant information into a stand-alone :class:`~mne.Info` object using
# :func:`mne.io.read_info`:
info = mne.io.read_info(sample_data_raw_file)
print(info)
###############################################################################
# As you can see, the :class:`~mne.Info` object keeps track of a lot of
# information about:
#
# - the recording system (gantry angle, HPI details, sensor digitizations,
# channel names, ...)
# - the experiment (project name and ID, subject information, recording date,
# experimenter name or ID, ...)
# - the data (sampling frequency, applied filter frequencies, bad channels,
# projectors, ...)
#
# The complete list of fields is given in :class:`the API documentation
# <mne.Info>`.
#
#
# Querying the ``Info`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The fields in a :class:`~mne.Info` object act like Python :class:`dictionary
# <dict>` keys, using square brackets and strings to access the contents of a
# field:
print(info.keys())
print() # insert a blank line
print(info['ch_names'])
###############################################################################
# Most of the fields contain :class:`int`, :class:`float`, or :class:`list`
# data, but the ``chs`` field bears special mention: it contains a list of
# dictionaries (one :class:`dict` per channel) containing everything there is
# to know about a channel other than the data it recorded. Normally it is not
# necessary to dig into the details of the ``chs`` field — various MNE-Python
# functions can extract the information more cleanly than iterating over the
# list of dicts yourself — but it can be helpful to know what is in there. Here
# we show the keys for the first channel's :class:`dict`:
print(info['chs'][0].keys())
###############################################################################
# .. _picking_channels:
#
# Obtaining subsets of channels
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It is often useful to convert between channel names and the integer indices
# identifying rows of the data array where those channels' measurements are
# stored. The :class:`~mne.Info` object is useful for this task; two
# convenience functions that rely on the :class:`mne.Info` object for picking
# channels are :func:`mne.pick_channels` and :func:`mne.pick_types`.
# :func:`~mne.pick_channels` minimally takes a list of all channel names and a
# list of channel names to include; it is also possible to provide an empty
# list to ``include`` and specify which channels to ``exclude`` instead:
print(mne.pick_channels(info['ch_names'], include=['MEG 0312', 'EEG 005']))
print(mne.pick_channels(info['ch_names'], include=[],
exclude=['MEG 0312', 'EEG 005']))
###############################################################################
# :func:`~mne.pick_types` works differently, since channel type cannot always
# be reliably determined from channel name alone. Consequently,
# :func:`~mne.pick_types` needs an :class:`~mne.Info` object instead of just a
# list of channel names, and has boolean keyword arguments for each channel
# type. Default behavior is to pick only MEG channels (and MEG reference
# channels if present) and exclude any channels already marked as "bad" in the
# ``bads`` field of the :class:`~mne.Info` object. Therefore, to get *all* and
# *only* the EEG channel indices (including the "bad" EEG channels) we must
# pass ``meg=False`` and ``exclude=[]``:
print(mne.pick_types(info, meg=False, eeg=True, exclude=[]))
###############################################################################
# Note that the ``meg`` and ``fnirs`` parameters of :func:`~mne.pick_types`
# accept strings as well as boolean values, to allow selecting only
# magnetometer or gradiometer channels (via ``meg='mag'`` or ``meg='grad'``) or
# to pick only oxyhemoglobin or deoxyhemoglobin channels (via ``fnirs='hbo'``
# or ``fnirs='hbr'``, respectively).
#
# A third way to pick channels from an :class:`~mne.Info` object is to apply
# `regular expression`_ matching to the channel names using
# :func:`mne.pick_channels_regexp`. Here the ``^`` represents the beginning of
# the string and ``.`` character matches any single character, so both EEG and
# EOG channels will be selected:
print(mne.pick_channels_regexp(info['ch_names'], '^E.G'))
###############################################################################
# :func:`~mne.pick_channels_regexp` can be especially useful for channels named
# according to the `10-20 <ten-twenty_>`_ system (e.g., to select all channels
# ending in "z" to get the midline, or all channels beginning with "O" to get
# the occipital channels). Note that :func:`~mne.pick_channels_regexp` uses the
# Python standard module :mod:`re` to perform regular expression matching; see
# the documentation of the :mod:`re` module for implementation details.
#
# .. warning::
# Both :func:`~mne.pick_channels` and :func:`~mne.pick_channels_regexp`
# operate on lists of channel names, so they are unaware of which channels
# (if any) have been marked as "bad" in ``info['bads']``. Use caution to
# avoid accidentally selecting bad channels.
#
#
# Obtaining channel type information
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Sometimes it can be useful to know channel type based on its index in the
# data array. For this case, use :func:`mne.channel_type`, which takes
# an :class:`~mne.Info` object and a single integer channel index:
print(mne.channel_type(info, 25))
###############################################################################
# To obtain several channel types at once, you could embed
# :func:`~mne.channel_type` in a :term:`list comprehension`, or use the
# :meth:`~mne.io.Raw.get_channel_types` method of a :class:`~mne.io.Raw`,
# :class:`~mne.Epochs`, or :class:`~mne.Evoked` instance:
picks = (25, 76, 77, 319)
print([mne.channel_type(info, x) for x in picks])
print(raw.get_channel_types(picks=picks))
###############################################################################
# Alternatively, you can get the indices of all channels of *all* channel types
# present in the data, using :func:`~mne.channel_indices_by_type`,
# which returns a :class:`dict` with channel types as keys, and lists of
# channel indices as values:
ch_idx_by_type = mne.channel_indices_by_type(info)
print(ch_idx_by_type.keys())
print(ch_idx_by_type['eog'])
###############################################################################
# Dropping channels from an ``Info`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you want to modify an :class:`~mne.Info` object by eliminating some of the
# channels in it, you can use the :func:`mne.pick_info` function to pick the
# channels you want to keep and omit the rest:
print(info['nchan'])
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
print(mne.pick_info(info, eeg_indices)['nchan'])
###############################################################################
# By default, :func:`~mne.pick_info` will make a copy of the original
# :class:`~mne.Info` object before modifying it; if you want to modify it
# in-place, include the parameter ``copy=False``.
#
#
# .. LINKS
#
# .. _`regular expression`: https://en.wikipedia.org/wiki/Regular_expression
# .. _`ten-twenty`: https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
b460c3a97a846a6135ef38b86c0ca6c1c5edc1d9 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/doctor/migrations/0231_doctor_rating_data.py | a7aa55dce7e42ba73a952ade46e91fef58e6585e | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 2.0.5 on 2019-03-27 07:09
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('doctor', '0230_merge_20190320_1712'),
]
operations = [
migrations.AddField(
model_name='doctor',
name='rating_data',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
]
| [
"root@PBMAC518.local"
] | root@PBMAC518.local |
1692e595b877b44d05dbf5b3b8052e97d5d06780 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/matplotlib/2019/8/figure.py | 4e0cc02f9b055f7b8ac7ab105f45c733614451a0 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 101,029 | py | """
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
Top level container for all plot elements.
"""
import logging
from numbers import Integral
import numpy as np
from matplotlib import rcParams
from matplotlib import backends, docstring, projections
from matplotlib import __version__ as _mpl_version
from matplotlib import get_backend
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
from matplotlib.backend_bases import FigureCanvasBase
import matplotlib.cbook as cbook
import matplotlib.colorbar as cbar
import matplotlib.image as mimage
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.gridspec import GridSpec
import matplotlib.legend as mlegend
from matplotlib.patches import Rectangle
from matplotlib.projections import (get_projection_names,
process_projection_requirements)
from matplotlib.text import Text, TextWithDash
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
import matplotlib._layoutbox as layoutbox
from matplotlib.backend_bases import NonGuiException
_log = logging.getLogger(__name__)
docstring.interpd.update(projection_names=get_projection_names())
def _stale_figure_callback(self, val):
if self.figure:
self.figure.stale = val
class _AxesStack(cbook.Stack):
"""
Specialization of the `.Stack` to handle all tracking of
`~matplotlib.axes.Axes` in a `.Figure`.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** should be a hash of the args and kwargs
used in generating the Axes.
* **ind** is a serial number for tracking the order
in which axes were added.
The AxesStack is a callable, where ``ax_stack()`` returns
the current axes. Alternatively the :meth:`current_key_axes` will
return the current key and associated axes.
"""
def __init__(self):
super().__init__()
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure.
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return *None*.
"""
item = dict(self._elements).get(key)
if item is None:
return None
cbook.warn_deprecated(
"2.1",
message="Adding an axes using the same arguments as a previous "
"axes currently reuses the earlier instance. In a future "
"version, a new instance will always be created and returned. "
"Meanwhile, this warning can be suppressed, and the future "
"behavior ensured, by passing a unique label to each axes "
"instance.")
return item[1]
def _entry_from_axes(self, e):
ind, k = {a: (ind, k) for k, (ind, a) in self._elements}[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
super().remove(self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return super().bubble(self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *key* is unhashable, replace it by a unique, arbitrary object.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
cbook._check_isinstance(Axes, a=a)
try:
hash(key)
except TypeError:
key = object()
a_existing = self.get(key)
if a_existing is not None:
super().remove((key, a_existing))
cbook._warn_external(
"key {!r} already existed; Axes is being replaced".format(key))
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return super().push((key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
@cbook.deprecated("3.2")
class AxesStack(_AxesStack):
pass
class SubplotParams:
"""
A class to hold the parameters for a subplot.
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fractions of the figure width or height.
Defaults are given by :rc:`figure.subplot.[name]`.
Parameters
----------
left : float
The left side of the subplots of the figure.
right : float
The right side of the subplots of the figure.
bottom : float
The bottom of the subplots of the figure.
top : float
The top of the subplots of the figure.
wspace : float
The amount of width reserved for space between subplots,
expressed as a fraction of the average axis width.
hspace : float
The amount of height reserved for space between subplots,
expressed as a fraction of the average axis height.
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the dimensions of the passed parameters. *None* means unchanged.
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left >= self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom >= self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The top level container for all the plot elements.
The Figure instance supports callbacks through a *callbacks* attribute
which is a `.CallbackRegistry` instance. The events you can connect to
are 'dpi_changed', and the callback will be called with ``func(fig)`` where
fig is the `Figure` instance.
Attributes
----------
patch
The `.Rectangle` instance representing the figure background patch.
suppressComposite
For multiple figure images, the figure will make composite images
depending on the renderer option_image_nocomposite function. If
*suppressComposite* is a boolean, this will override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __repr__(self):
return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format(
clsname=self.__class__.__name__,
h=self.bbox.size[0], w=self.bbox.size[1],
naxes=len(self.axes),
)
def __init__(self,
figsize=None,
dpi=None,
facecolor=None,
edgecolor=None,
linewidth=0.0,
frameon=None,
subplotpars=None, # default to rc
tight_layout=None, # default to rc figure.autolayout
constrained_layout=None, # default to rc
#figure.constrained_layout.use
):
"""
Parameters
----------
figsize : 2-tuple of floats, default: :rc:`figure.figsize`
Figure dimension ``(width, height)`` in inches.
dpi : float, default: :rc:`figure.dpi`
Dots per inch.
facecolor : default: :rc:`figure.facecolor`
The figure patch facecolor.
edgecolor : default: :rc:`figure.edgecolor`
The figure patch edge color.
linewidth : float
The linewidth of the frame (i.e. the edge linewidth of the figure
patch).
frameon : bool, default: :rc:`figure.frameon`
If ``False``, suppress drawing the figure background patch.
subplotpars : :class:`SubplotParams`
Subplot parameters. If not given, the default subplot
parameters :rc:`figure.subplot.*` are used.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
If ``False`` use *subplotpars*. If ``True`` adjust subplot
parameters using `.tight_layout` with default padding.
When providing a dict containing the keys ``pad``, ``w_pad``,
``h_pad``, and ``rect``, the default `.tight_layout` paddings
will be overridden.
constrained_layout : bool
If ``True`` use constrained layout to adjust positioning of plot
elements. Like ``tight_layout``, but designed to be more
flexible. See
:doc:`/tutorials/intermediate/constrainedlayout_guide`
for examples. (Note: does not work with :meth:`.subplot` or
:meth:`.subplot2grid`.)
Defaults to :rc:`figure.constrained_layout.use`.
"""
super().__init__()
# remove the non-figure artist _axes property
# as it makes no sense for a figure to be _in_ an axes
# this is used by the property methods in the artist base class
# which are over-ridden in this class
del self._axes
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
if frameon is None:
frameon = rcParams['figure.frameon']
if not np.isfinite(figsize).all() or (np.array(figsize) <= 0).any():
raise ValueError('figure size must be positive finite not '
f'{figsize}')
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.dpi_scale_trans = Affine2D().scale(dpi)
# do not use property as it will trigger
self._dpi = dpi
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.transFigure = BboxTransformTo(self.bbox)
self.patch = Rectangle(
xy=(0, 0), width=1, height=1, visible=frameon,
facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth,
# Don't let the figure patch influence bbox calculation.
in_layout=False)
self._set_artist_props(self.patch)
self.patch.set_antialiased(False)
FigureCanvasBase(self) # Set self.canvas.
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
# constrained_layout:
self._layoutbox = None
# set in set_constrained_layout_pads()
self.set_constrained_layout(constrained_layout)
self.set_tight_layout(tight_layout)
self._axstack = _AxesStack() # track all figure axes and current axes
self.clf()
self._cachedRenderer = None
# groupers to keep track of x and y labels we want to align.
# see self.align_xlabels and self.align_ylabels and
# axis._get_tick_boxes_siblings
self._align_xlabel_grp = cbook.Grouper()
self._align_ylabel_grp = cbook.Grouper()
# list of child gridspecs for this figure
self._gridspecs = []
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditionally.
if 'WebAgg' in type(self.canvas).__name__:
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using
:func:`~matplotlib.pyplot.figure`, it will lack a
:class:`~matplotlib.backend_bases.FigureManagerBase`, and
will raise an AttributeError.
.. warning::
This does not manage an GUI event loop. Consequently, the figure
may only be shown briefly or not shown at all if you or your
environment are not managing an event loop.
Proper use cases for `.Figure.show` include running this from a
GUI application or an IPython shell.
If you're running a pure python shell or executing a non-GUI
python script, you should use `matplotlib.pyplot.show` instead,
which takes care of managing the event loop for you.
Parameters
----------
warn : bool
If ``True`` and we are not running headless (i.e. on Linux with an
unset DISPLAY), issue warning when called on a non-GUI backend.
"""
try:
manager = getattr(self.canvas, 'manager')
except AttributeError as err:
raise AttributeError("%s\n"
"Figure.show works only "
"for figures managed by pyplot, normally "
"created by pyplot.figure()." % err)
if manager is not None:
try:
manager.show()
return
except NonGuiException:
pass
if (backends._get_running_interactive_framework() != "headless"
and warn):
cbook._warn_external('Matplotlib is currently using %s, which is '
'a non-GUI backend, so cannot show the '
'figure.' % get_backend())
def _get_axes(self):
return self._axstack.as_list()
axes = property(fget=_get_axes,
doc="List of axes in the Figure. You can access the "
"axes in the Figure through this list. "
"Do not modify the list itself. Instead, use "
"`~Figure.add_axes`, `~.Figure.subplot` or "
"`~.Figure.delaxes` to add or remove an axes.")
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi, forward=True):
"""
Parameters
----------
dpi : float
forward : bool
Passed on to `~.Figure.set_size_inches`
"""
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi)
w, h = self.get_size_inches()
self.set_size_inches(w, h, forward=forward)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.")
def get_tight_layout(self):
"""Return whether `.tight_layout` is called when drawing."""
return self._tight
def set_tight_layout(self, tight):
"""
Set whether and how `.tight_layout` is called when drawing.
Parameters
----------
tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None
If a bool, sets whether to call `.tight_layout` upon drawing.
If ``None``, use the ``figure.autolayout`` rcparam instead.
If a dict, pass it as kwargs to `.tight_layout`, overriding the
default paddings.
"""
if tight is None:
tight = rcParams['figure.autolayout']
self._tight = bool(tight)
self._tight_parameters = tight if isinstance(tight, dict) else {}
self.stale = True
def get_constrained_layout(self):
"""
Return a boolean: True means constrained layout is being used.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
"""
return self._constrained
def set_constrained_layout(self, constrained):
"""
Set whether ``constrained_layout`` is used upon drawing. If None,
the rcParams['figure.constrained_layout.use'] value will be used.
When providing a dict containing the keys `w_pad`, `h_pad`
the default ``constrained_layout`` paddings will be
overridden. These pads are in inches and default to 3.0/72.0.
``w_pad`` is the width padding and ``h_pad`` is the height padding.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
constrained : bool or dict or None
"""
self._constrained_layout_pads = dict()
self._constrained_layout_pads['w_pad'] = None
self._constrained_layout_pads['h_pad'] = None
self._constrained_layout_pads['wspace'] = None
self._constrained_layout_pads['hspace'] = None
if constrained is None:
constrained = rcParams['figure.constrained_layout.use']
self._constrained = bool(constrained)
if isinstance(constrained, dict):
self.set_constrained_layout_pads(**constrained)
else:
self.set_constrained_layout_pads()
self.stale = True
def set_constrained_layout_pads(self, **kwargs):
"""
Set padding for ``constrained_layout``. Note the kwargs can be passed
as a dictionary ``fig.set_constrained_layout(**paddict)``.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
w_pad : scalar
Width padding in inches. This is the pad around axes
and is meant to make sure there is enough room for fonts to
look good. Defaults to 3 pts = 0.04167 inches
h_pad : scalar
Height padding in inches. Defaults to 3 pts.
wspace : scalar
Width padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being w_pad + wspace.
hspace : scalar
Height padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being h_pad + hspace.
"""
todo = ['w_pad', 'h_pad', 'wspace', 'hspace']
for td in todo:
if td in kwargs and kwargs[td] is not None:
self._constrained_layout_pads[td] = kwargs[td]
else:
self._constrained_layout_pads[td] = (
rcParams['figure.constrained_layout.' + td])
def get_constrained_layout_pads(self, relative=False):
"""
Get padding for ``constrained_layout``.
Returns a list of `w_pad, h_pad` in inches and
`wspace` and `hspace` as fractions of the subplot.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
relative : boolean
If `True`, then convert from inches to figure relative.
"""
w_pad = self._constrained_layout_pads['w_pad']
h_pad = self._constrained_layout_pads['h_pad']
wspace = self._constrained_layout_pads['wspace']
hspace = self._constrained_layout_pads['hspace']
if relative and (w_pad is not None or h_pad is not None):
renderer0 = layoutbox.get_renderer(self)
dpi = renderer0.dpi
w_pad = w_pad * dpi / renderer0.width
h_pad = h_pad * dpi / renderer0.height
return w_pad, h_pad, wspace, hspace
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which=None):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
Parameters
----------
bottom : scalar
The bottom of the subplots for :meth:`subplots_adjust`.
rotation : angle in degrees
The rotation of the xtick labels.
ha : str
The horizontal alignment of the xticklabels.
which : {None, 'major', 'minor', 'both'}
Selects which ticklabels to rotate. Default is None which works
the same as major.
"""
allsubplots = all(hasattr(ax, 'is_last_row') for ax in self.axes)
if len(self.axes) == 1:
for label in self.axes[0].get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels(which=which):
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
self.stale = True
def get_children(self):
"""Get a list of artists contained in the figure."""
return [self.patch,
*self.artists,
*self.axes,
*self.lines,
*self.patches,
*self.texts,
*self.images,
*self.legends]
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns
-------
bool, {}
"""
inside, info = self._default_contains(mouseevent, figure=self)
if inside is not None:
return inside, info
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, *args, **kwargs):
"""
Return the figure bounding box in display space. Arguments are ignored.
"""
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
Parameters
----------
t : str
The title text.
x : float, default 0.5
The x location of the text in figure coordinates.
y : float, default 0.98
The y location of the text in figure coordinates.
horizontalalignment, ha : {'center', 'left', right'}, default: 'center'
The horizontal alignment of the text relative to (*x*, *y*).
verticalalignment, va : {'top', 'center', 'bottom', 'baseline'}, \
default: 'top'
The vertical alignment of the text relative to (*x*, *y*).
fontsize, size : default: :rc:`figure.titlesize`
The font size of the text. See `.Text.set_size` for possible
values.
fontweight, weight : default: :rc:`figure.titleweight`
The font weight of the text. See `.Text.set_weight` for possible
values.
Returns
-------
text
The `.Text` instance of the title.
Other Parameters
----------------
fontproperties : None or dict, optional
A dict of font properties. If *fontproperties* is given the
default values for font size and weight are taken from the
`FontProperties` defaults. :rc:`figure.titlesize` and
:rc:`figure.titleweight` are ignored in this case.
**kwargs
Additional kwargs are :class:`matplotlib.text.Text` properties.
Examples
--------
>>> fig.suptitle('This is the figure title', fontsize=12)
"""
manual_position = ('x' in kwargs or 'y' in kwargs)
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if 'horizontalalignment' not in kwargs and 'ha' not in kwargs:
kwargs['horizontalalignment'] = 'center'
if 'verticalalignment' not in kwargs and 'va' not in kwargs:
kwargs['verticalalignment'] = 'top'
if 'fontproperties' not in kwargs:
if 'fontsize' not in kwargs and 'size' not in kwargs:
kwargs['size'] = rcParams['figure.titlesize']
if 'fontweight' not in kwargs and 'weight' not in kwargs:
kwargs['weight'] = rcParams['figure.titleweight']
sup = self.text(x, y, t, **kwargs)
if self._suptitle is not None:
self._suptitle.set_text(t)
self._suptitle.set_position((x, y))
self._suptitle.update_from(sup)
sup.remove()
else:
self._suptitle = sup
self._suptitle._layoutbox = None
if self._layoutbox is not None and not manual_position:
w_pad, h_pad, wspace, hspace = \
self.get_constrained_layout_pads(relative=True)
figlb = self._layoutbox
self._suptitle._layoutbox = layoutbox.LayoutBox(
parent=figlb, artist=self._suptitle,
name=figlb.name+'.suptitle')
# stack the suptitle on top of all the children.
# Some day this should be on top of all the children in the
# gridspec only.
for child in figlb.children:
if child is not self._suptitle._layoutbox:
layoutbox.vstack([self._suptitle._layoutbox,
child],
padding=h_pad*2., strength='required')
self.stale = True
return self._suptitle
def set_canvas(self, canvas):
"""
Set the canvas that contains the figure
Parameters
----------
canvas : FigureCanvas
"""
self.canvas = canvas
def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None,
vmin=None, vmax=None, origin=None, resize=False, **kwargs):
"""
Add a non-resampled image to the figure.
The image is attached to the lower or upper left corner depending on
*origin*.
Parameters
----------
X
The image data. This is an array of one of the following shapes:
- MxN: luminance (grayscale) values
- MxNx3: RGB values
- MxNx4: RGBA values
xo, yo : int
The *x*/*y* image offset in pixels.
alpha : None or float
The alpha blending value.
norm : :class:`matplotlib.colors.Normalize`
A :class:`.Normalize` instance to map the luminance to the
interval [0, 1].
cmap : str or :class:`matplotlib.colors.Colormap`
The colormap to use. Default: :rc:`image.cmap`.
vmin, vmax : scalar
If *norm* is not given, these values set the data limits for the
colormap.
origin : {'upper', 'lower'}
Indicates where the [0, 0] index of the array is in the upper left
or lower left corner of the axes. Defaults to :rc:`image.origin`.
resize : bool
If *True*, resize the figure to match the given image size.
Returns
-------
:class:`matplotlib.image.FigureImage`
Other Parameters
----------------
**kwargs
Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`.
Notes
-----
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with extent [0, 0, 1, 1].
Examples::
f = plt.figure()
nx = int(f.get_figwidth() * f.dpi)
ny = int(f.get_figheight() * f.dpi)
data = np.random.random((ny, nx))
f.figimage(data)
plt.show()
"""
if resize:
dpi = self.get_dpi()
figsize = [x / dpi for x in (X.shape[1], X.shape[0])]
self.set_size_inches(figsize, forward=True)
im = mimage.FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.stale_callback = _stale_figure_callback
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = self.images.remove
self.stale = True
return im
def set_size_inches(self, w, h=None, forward=True):
"""
Set the figure size in inches.
Call signatures::
fig.set_size_inches(w, h) # OR
fig.set_size_inches((w, h))
Parameters
----------
w : (float, float) or float
Width and height in inches (if height not specified as a separate
argument) or width.
h : float
Height in inches.
forward : bool, default: True
If ``True``, the canvas size is automatically updated, e.g.,
you can resize the figure window from the shell.
See Also
--------
matplotlib.Figure.get_size_inches
"""
if h is None: # Got called with a single pair as argument.
w, h = w
size = np.array([w, h])
if not np.isfinite(size).all() or (size <= 0).any():
raise ValueError(f'figure size must be positive finite not {size}')
self.bbox_inches.p1 = size
if forward:
canvas = getattr(self, 'canvas')
if canvas is not None:
dpi_ratio = getattr(canvas, '_dpi_ratio', 1)
manager = getattr(canvas, 'manager', None)
if manager is not None:
manager.resize(*(size * self.dpi / dpi_ratio).astype(int))
self.stale = True
def get_size_inches(self):
"""
Returns the current size of the figure in inches.
Returns
-------
size : ndarray
The size (width, height) of the figure in inches.
See Also
--------
matplotlib.Figure.set_size_inches
"""
return np.array(self.bbox_inches.p1)
def get_edgecolor(self):
"""Get the edge color of the Figure rectangle."""
return self.patch.get_edgecolor()
def get_facecolor(self):
"""Get the face color of the Figure rectangle."""
return self.patch.get_facecolor()
def get_figwidth(self):
"""Return the figure width as a float."""
return self.bbox_inches.width
def get_figheight(self):
"""Return the figure height as a float."""
return self.bbox_inches.height
def get_dpi(self):
"""Return the resolution in dots per inch as a float."""
return self.dpi
def get_frameon(self):
"""
Return the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.get_visible()``.
"""
return self.patch.get_visible()
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle.
Parameters
----------
color : color
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle.
Parameters
----------
color : color
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the resolution of the figure in dots-per-inch.
Parameters
----------
val : float
"""
self.dpi = val
self.stale = True
def set_figwidth(self, val, forward=True):
"""
Set the width of the figure in inches.
Parameters
----------
val : float
forward : bool
"""
self.set_size_inches(val, self.get_figheight(), forward=forward)
def set_figheight(self, val, forward=True):
"""
Set the height of the figure in inches.
Parameters
----------
val : float
forward : bool
"""
self.set_size_inches(self.get_figwidth(), val, forward=forward)
def set_frameon(self, b):
"""
Set the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.set_visible()``.
Parameters
----------
b : bool
"""
self.patch.set_visible(b)
self.stale = True
frameon = property(get_frameon, set_frameon)
def delaxes(self, ax):
"""
Remove the `~matplotlib.axes.Axes` *ax* from the figure and update the
current axes.
"""
self._axstack.remove(ax)
for func in self._axobservers:
func(self)
self.stale = True
def add_artist(self, artist, clip=False):
"""
Add any :class:`~matplotlib.artist.Artist` to the figure.
Usually artists are added to axes objects using
:meth:`matplotlib.axes.Axes.add_artist`, but use this method in the
rare cases that adding directly to the figure is necessary.
Parameters
----------
artist : `~matplotlib.artist.Artist`
The artist to add to the figure. If the added artist has no
transform previously set, its transform will be set to
``figure.transFigure``.
clip : bool, optional, default ``False``
An optional parameter ``clip`` determines whether the added artist
should be clipped by the figure patch. Default is *False*,
i.e. no clipping.
Returns
-------
artist : The added `~matplotlib.artist.Artist`
"""
artist.set_figure(self)
self.artists.append(artist)
artist._remove_method = self.artists.remove
if not artist.is_transform_set():
artist.set_transform(self.transFigure)
if clip:
artist.set_clip_path(self.patch)
self.stale = True
return artist
def _make_key(self, *args, **kwargs):
"""Make a hashable key out of args and kwargs."""
def fixitems(items):
# items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
# some objects can define __getitem__ without being
# iterable and in those cases the conversion to tuples
# will fail. So instead of using the np.iterable(v) function
# we simply try and convert to a tuple, and proceed if not.
try:
v = tuple(v)
except Exception:
pass
ret.append((k, v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if np.iterable(a):
a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(kwargs.items())
return key
def _process_projection_requirements(
self, *args, polar=False, projection=None, **kwargs):
"""
Handle the args/kwargs to add_axes/add_subplot/gca, returning::
(axes_proj_class, proj_class_kwargs, proj_stack_key)
which can be used for new axes initialization/identification.
"""
if polar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection=%r. "
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
if isinstance(projection, str) or projection is None:
projection_class = projections.get_projection_class(projection)
elif hasattr(projection, '_as_mpl_axes'):
projection_class, extra_kwargs = projection._as_mpl_axes()
kwargs.update(**extra_kwargs)
else:
raise TypeError('projection must be a string, None or implement a '
'_as_mpl_axes method. Got %r' % projection)
# Make the key without projection kwargs, this is used as a unique
# lookup for axes instances
key = self._make_key(*args, **kwargs)
return projection_class, kwargs, key
@docstring.dedent_interpd
def add_axes(self, *args, **kwargs):
"""
Add an axes to the figure.
Call signatures::
add_axes(rect, projection=None, polar=False, **kwargs)
add_axes(ax)
Parameters
----------
rect : sequence of float
The dimensions [left, bottom, width, height] of the new axes. All
quantities are in fractions of figure width and height.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : boolean, optional
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared axes.
label : str
A label for the returned axes.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned axes class. The keyword arguments for the
rectilinear axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual axes
class.
%(Axes)s
Returns
-------
axes : `~.axes.Axes` (or a subclass of `~.axes.Axes`)
The returned axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection are used and
`.projections.polar.PolarAxes` if polar projection
are used.
Notes
-----
If the figure already has an axes with key (*args*,
*kwargs*) then it will simply make that axes current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new axes), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two axes that are otherwise identical to be added to the figure,
make sure you give them unique labels.
In rare circumstances, `.add_axes` may be called with a single
argument, a axes instance already created in the present figure but
not in the figure's list of axes.
See Also
--------
.Figure.add_subplot
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
Some simple examples::
rect = l, b, w, h
fig = plt.figure()
fig.add_axes(rect, label=label1)
fig.add_axes(rect, label=label2)
fig.add_axes(rect, frameon=False, facecolor='g')
fig.add_axes(rect, polar=True)
ax = fig.add_axes(rect, projection='polar')
fig.delaxes(ax)
fig.add_axes(ax)
"""
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes):
a = args[0]
if a.get_figure() is not self:
raise ValueError(
"The Axes must have been created in the present figure")
else:
rect = args[0]
if not np.isfinite(rect).all():
raise ValueError('all entries in rect must be finite '
'not {}'.format(rect))
projection_class, kwargs, key = \
self._process_projection_requirements(*args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if isinstance(ax, projection_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
a = projection_class(self, rect, **kwargs)
return self._add_axes_internal(key, a)
@docstring.dedent_interpd
def add_subplot(self, *args, **kwargs):
"""
Add an `~.axes.Axes` to the figure as part of a subplot arrangement.
Call signatures::
add_subplot(nrows, ncols, index, **kwargs)
add_subplot(pos, **kwargs)
add_subplot(ax)
add_subplot()
Parameters
----------
*args
Either a 3-digit integer or three separate integers
describing the position of the subplot. If the three
integers are *nrows*, *ncols*, and *index* in order, the
subplot will take the *index* position on a grid with *nrows*
rows and *ncols* columns. *index* starts at 1 in the upper left
corner and increases to the right.
*pos* is a three digit integer, where the first digit is the
number of rows, the second the number of columns, and the third
the index of the subplot. i.e. fig.add_subplot(235) is the same as
fig.add_subplot(2, 3, 5). Note that all integers must be less than
10 for this form to work.
If no positional arguments are passed, defaults to (1, 1, 1).
In rare circumstances, `.add_subplot` may be called with a single
argument, a subplot axes instance already created in the
present figure but not in the figure's list of axes.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the
name of a custom projection, see `~matplotlib.projections`. The
default None results in a 'rectilinear' projection.
polar : boolean, optional
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared axes.
label : str
A label for the returned axes.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for the returned axes
base class; except for the *figure* argument. The keyword arguments
for the rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes)s
Returns
-------
axes : `.axes.SubplotBase`, or another subclass of `~.axes.Axes`
The axes of the subplot. The returned axes base class depends on
the projection used. It is `~.axes.Axes` if rectilinear projection
are used and `.projections.polar.PolarAxes` if polar projection
are used. The returned axes is then a subplot subclass of the
base class.
Notes
-----
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new subplot), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two subplots that are otherwise identical to be added to the figure,
make sure you give them unique labels.
See Also
--------
.Figure.add_axes
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
::
fig = plt.figure()
fig.add_subplot(221)
# equivalent but more general
ax1 = fig.add_subplot(2, 2, 1)
# add a subplot with no frame
ax2 = fig.add_subplot(222, frameon=False)
# add a polar subplot
fig.add_subplot(223, projection='polar')
# add a red subplot that share the x-axis with ax1
fig.add_subplot(224, sharex=ax1, facecolor='red')
#delete x2 from the figure
fig.delaxes(ax2)
#add x2 to the figure again
fig.add_subplot(ax2)
"""
if not len(args):
args = (1, 1, 1)
if len(args) == 1 and isinstance(args[0], Integral):
if not 100 <= args[0] <= 999:
raise ValueError("Integer subplot specification must be a "
"three-digit number, not {}".format(args[0]))
args = tuple(map(int, str(args[0])))
if 'figure' in kwargs:
# Axes itself allows for a 'figure' kwarg, but since we want to
# bind the created Axes to self, it is not allowed here.
raise TypeError(
"add_subplot() got an unexpected keyword argument 'figure'")
if isinstance(args[0], SubplotBase):
a = args[0]
if a.get_figure() is not self:
raise ValueError(
"The Subplot must have been created in the present figure")
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
projection_class, kwargs, key = \
self._process_projection_requirements(*args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, projection_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
return self._add_axes_internal(key, a)
def _add_axes_internal(self, key, ax):
"""Private helper for `add_axes` and `add_subplot`."""
self._axstack.add(key, ax)
self.sca(ax)
ax._remove_method = self._remove_ax
self.stale = True
ax.stale_callback = _stale_figure_callback
return ax
def subplots(self, nrows=1, ncols=1, sharex=False, sharey=False,
squeeze=True, subplot_kw=None, gridspec_kw=None):
"""
Add a set of subplots to this figure.
This utility wrapper makes it convenient to create common layouts of
subplots in a single call.
Parameters
----------
nrows, ncols : int, optional, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of properties among x (`sharex`) or y (`sharey`)
axes:
- True or 'all': x- or y-axis will be shared among all subplots.
- False or 'none': each subplot x- or y-axis will be independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the
first column subplot are created. To later turn other subplots'
ticklabels on, use `~matplotlib.axes.Axes.tick_params`.
squeeze : bool, optional, default: True
- If True, extra dimensions are squeezed out from the returned
array of Axes:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object
is always a 2D array containing Axes instances, even if it ends
up being 1x1.
subplot_kw : dict, optional
Dict with keywords passed to the
:meth:`~matplotlib.figure.Figure.add_subplot` call used to create
each subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.gridspec.GridSpec` constructor used to create
the grid the subplots are placed on.
Returns
-------
ax : `~.axes.Axes` object or array of Axes objects.
*ax* can be either a single `~matplotlib.axes.Axes` object or
an array of Axes objects if more than one subplot was created. The
dimensions of the resulting array can be controlled with the
squeeze keyword, see above.
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Create a figure
plt.figure()
# Create a subplot
ax = fig.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Create two subplots and unpack the output array immediately
ax1, ax2 = fig.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Create four polar axes and access them through the returned array
axes = fig.subplots(2, 2, subplot_kw=dict(polar=True))
axes[0, 0].plot(x, y)
axes[1, 1].scatter(x, y)
# Share a X axis with each column of subplots
fig.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
fig.subplots(2, 2, sharey='row')
# Share both X and Y axes with all subplots
fig.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
fig.subplots(2, 2, sharex=True, sharey=True)
See Also
--------
.pyplot.subplots
.Figure.add_subplot
.pyplot.subplot
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# This check was added because it is very easy to type
# `subplots(1, 2, 1)` when `subplot(1, 2, 1)` was intended.
# In most cases, no error will ever occur, but mysterious behavior
# will result because what was intended to be the subplot index is
# instead treated as a bool for sharex.
if isinstance(sharex, Integral):
cbook._warn_external(
"sharex argument to subplots() was an integer. Did you "
"intend to use subplot() (without 's')?")
cbook._check_in_list(["all", "row", "col", "none"],
sharex=sharex, sharey=sharey)
if subplot_kw is None:
subplot_kw = {}
if gridspec_kw is None:
gridspec_kw = {}
# don't mutate kwargs passed by user...
subplot_kw = subplot_kw.copy()
gridspec_kw = gridspec_kw.copy()
if self.get_constrained_layout():
gs = GridSpec(nrows, ncols, figure=self, **gridspec_kw)
else:
# this should turn constrained_layout off if we don't want it
gs = GridSpec(nrows, ncols, figure=None, **gridspec_kw)
self._gridspecs.append(gs)
# Create array to hold all axes.
axarr = np.empty((nrows, ncols), dtype=object)
for row in range(nrows):
for col in range(ncols):
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
subplot_kw["sharex"] = shared_with[sharex]
subplot_kw["sharey"] = shared_with[sharey]
axarr[row, col] = self.add_subplot(gs[row, col], **subplot_kw)
# turn off redundant tick labeling
if sharex in ["col", "all"]:
# turn off all but the bottom row
for ax in axarr[:-1, :].flat:
ax.xaxis.set_tick_params(which='both',
labelbottom=False, labeltop=False)
ax.xaxis.offsetText.set_visible(False)
if sharey in ["row", "all"]:
# turn off all but the first column
for ax in axarr[:, 1:].flat:
ax.yaxis.set_tick_params(which='both',
labelleft=False, labelright=False)
ax.yaxis.offsetText.set_visible(False)
if squeeze:
# Discarding unneeded dimensions that equal 1. If we only have one
# subplot, just return it instead of a 1-element array.
return axarr.item() if axarr.size == 1 else axarr.squeeze()
else:
# Returned axis array will be always 2-d, even if nrows=ncols=1.
return axarr
def _remove_ax(self, ax):
def _reset_locators_and_formatters(axis):
# Set the formatters and locators to be associated with axis
# (where previously they may have been associated with another
# Axis isntance)
#
# Because set_major_formatter() etc. force isDefault_* to be False,
# we have to manually check if the original formatter was a
# default and manually set isDefault_* if that was the case.
majfmt = axis.get_major_formatter()
isDefault = majfmt.axis.isDefault_majfmt
axis.set_major_formatter(majfmt)
if isDefault:
majfmt.axis.isDefault_majfmt = True
majloc = axis.get_major_locator()
isDefault = majloc.axis.isDefault_majloc
axis.set_major_locator(majloc)
if isDefault:
majloc.axis.isDefault_majloc = True
minfmt = axis.get_minor_formatter()
isDefault = majloc.axis.isDefault_minfmt
axis.set_minor_formatter(minfmt)
if isDefault:
minfmt.axis.isDefault_minfmt = True
minloc = axis.get_minor_locator()
isDefault = majloc.axis.isDefault_minloc
axis.set_minor_locator(minloc)
if isDefault:
minloc.axis.isDefault_minloc = True
def _break_share_link(ax, grouper):
siblings = grouper.get_siblings(ax)
if len(siblings) > 1:
grouper.remove(ax)
for last_ax in siblings:
if ax is not last_ax:
return last_ax
return None
self.delaxes(ax)
last_ax = _break_share_link(ax, ax._shared_y_axes)
if last_ax is not None:
_reset_locators_and_formatters(last_ax.yaxis)
last_ax = _break_share_link(ax, ax._shared_x_axes)
if last_ax is not None:
_reset_locators_and_formatters(last_ax.xaxis)
def clf(self, keep_observers=False):
"""
Clear the figure.
Set *keep_observers* to True if, for example,
a gui widget is tracking the axes in the figure.
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry()
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self._axstack
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = []
self._suptitle = None
if self.get_constrained_layout():
layoutbox.nonetree(self._layoutbox)
self.stale = True
def clear(self, keep_observers=False):
"""
Clear the figure -- synonym for :meth:`clf`.
"""
self.clf(keep_observers=keep_observers)
@allow_rasterization
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase`
instance *renderer*.
"""
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
artists = self.get_children()
artists.remove(self.patch)
artists = sorted(
(artist for artist in artists if not artist.get_animated()),
key=lambda artist: artist.get_zorder())
for ax in self.axes:
locator = ax.get_axes_locator()
if locator:
pos = locator(ax, renderer)
ax.apply_aspect(pos)
else:
ax.apply_aspect()
for child in ax.get_children():
if hasattr(child, 'apply_aspect'):
locator = child.get_axes_locator()
if locator:
pos = locator(child, renderer)
child.apply_aspect(pos)
else:
child.apply_aspect()
try:
renderer.open_group('figure', gid=self.get_gid())
if self.get_constrained_layout() and self.axes:
self.execute_constrained_layout(renderer)
if self.get_tight_layout() and self.axes:
try:
self.tight_layout(renderer,
**self._tight_parameters)
except ValueError:
pass
# ValueError can occur when resizing a window.
self.patch.draw(renderer)
mimage._draw_list_compositing_images(
renderer, self, artists, self.suppressComposite)
renderer.close_group('figure')
finally:
self.stale = False
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
Draw :class:`matplotlib.artist.Artist` instance *a* only.
This is available only after the figure is drawn.
"""
if self._cachedRenderer is None:
raise AttributeError("draw_artist can only be used after an "
"initial draw which caches the renderer")
a.draw(self._cachedRenderer)
def get_axes(self):
"""
Return a list of axes in the Figure. You can access and modify the
axes in the Figure through this list.
Do not modify the list itself. Instead, use `~Figure.add_axes`,
`~.Figure.subplot` or `~.Figure.delaxes` to add or remove an axes.
Note: This is equivalent to the property `~.Figure.axes`.
"""
return self.axes
# Note: in the docstring below, the newlines in the examples after the
# calls to legend() allow replacing it with figlegend() to generate the
# docstring of pyplot.figlegend.
@docstring.dedent_interpd
def legend(self, *args, **kwargs):
"""
Place a legend on the figure.
To make a legend from existing artists on every axes::
legend()
To make a legend for a list of lines and labels::
legend(
(line1, line2, line3),
('label1', 'label2', 'label3'),
loc='upper right')
These can also be specified by keyword::
legend(
handles=(line1, line2, line3),
labels=('label1', 'label2', 'label3'),
loc='upper right')
Parameters
----------
handles : list of `.Artist`, optional
A list of Artists (lines, patches) to be added to the legend.
Use this together with *labels*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
The length of handles and labels should be the same in this
case. If they are not, they are truncated to the smaller length.
labels : list of str, optional
A list of labels to show next to the artists.
Use this together with *handles*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
Other Parameters
----------------
%(_legend_kw_doc)s
Returns
-------
:class:`matplotlib.legend.Legend` instance
Notes
-----
Not all kinds of artist are supported by the legend command. See
:doc:`/tutorials/intermediate/legend_guide` for details.
"""
handles, labels, extra_args, kwargs = mlegend._parse_legend_args(
self.axes,
*args,
**kwargs)
# check for third arg
if len(extra_args):
# cbook.warn_deprecated(
# "2.1",
# message="Figure.legend will accept no more than two "
# "positional arguments in the future. Use "
# "'fig.legend(handles, labels, loc=location)' "
# "instead.")
# kwargs['loc'] = extra_args[0]
# extra_args = extra_args[1:]
pass
l = mlegend.Legend(self, handles, labels, *extra_args, **kwargs)
self.legends.append(l)
l._remove_method = self.legends.remove
self.stale = True
return l
@cbook._delete_parameter("3.1", "withdash")
@docstring.dedent_interpd
def text(self, x, y, s, fontdict=None, withdash=False, **kwargs):
"""
Add text to figure.
Parameters
----------
x, y : float
The position to place the text. By default, this is in figure
coordinates, floats in [0, 1]. The coordinate system can be changed
using the *transform* keyword.
s : str
The text string.
fontdict : dictionary, optional, default: None
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by your rc parameters. A
property in *kwargs* override the same property in fontdict.
withdash : boolean, optional, default: False
Creates a `~matplotlib.text.TextWithDash` instance instead of a
`~matplotlib.text.Text` instance.
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties
Other miscellaneous text parameters.
%(Text)s
Returns
-------
text : `~.text.Text`
See Also
--------
.Axes.text
.pyplot.text
"""
default = dict(transform=self.transFigure)
if (withdash
and withdash is not cbook.deprecation._deprecated_parameter):
text = TextWithDash(x=x, y=y, text=s)
else:
text = Text(x=x, y=y, text=s)
text.update(default)
if fontdict is not None:
text.update(fontdict)
text.update(kwargs)
text.set_figure(self)
text.stale_callback = _stale_figure_callback
self.texts.append(text)
text._remove_method = self.texts.remove
self.stale = True
return text
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.stale_callback = _stale_figure_callback
a.set_transform(self.transFigure)
@docstring.dedent_interpd
def gca(self, **kwargs):
"""
Get the current axes, creating one if necessary.
The following kwargs are supported for ensuring the returned axes
adheres to the given projection etc., and for axes creation if
the active axes does not exist:
%(Axes)s
"""
ckey, cax = self._axstack.current_key_axes()
# if there exists an axes on the stack see if it matches
# the desired axes configuration
if cax is not None:
# if no kwargs are given just return the current axes
# this is a convenience for gca() on axes such as polar etc.
if not kwargs:
return cax
# if the user has specified particular projection detail
# then build up a key which can represent this
else:
projection_class, _, key = \
self._process_projection_requirements(**kwargs)
# let the returned axes have any gridspec by removing it from
# the key
ckey = ckey[1:]
key = key[1:]
# if the cax matches this key then return the axes, otherwise
# continue and a new axes will be created
if key == ckey and isinstance(cax, projection_class):
return cax
else:
cbook._warn_external('Requested projection is different '
'from current axis projection, '
'creating new axis with requested '
'projection.')
# no axes found, so create one which spans the figure
return self.add_subplot(1, 1, 1, **kwargs)
def sca(self, a):
"""Set the current axes to be a and return a."""
self._axstack.bubble(a)
for func in self._axobservers:
func(self)
return a
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`. Do not use elsewhere.
"""
# Look first for an image in the current Axes:
cax = self._axstack.current_key_axes()[1]
if cax is None:
return None
im = cax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def __getstate__(self):
state = super().__getstate__()
# the axobservers cannot currently be pickled.
# Additionally, the canvas cannot currently be pickled, but this has
# the benefit of meaning that a figure can be detached from one canvas,
# and re-attached to another.
for attr_to_pop in ('_axobservers', 'show',
'canvas', '_cachedRenderer'):
state.pop(attr_to_pop, None)
# add version information to the state
state['__mpl_version__'] = _mpl_version
# check whether the figure manager (if any) is registered with pyplot
from matplotlib import _pylab_helpers
if getattr(self.canvas, 'manager', None) \
in _pylab_helpers.Gcf.figs.values():
state['_restore_to_pylab'] = True
# set all the layoutbox information to None. kiwisolver objects can't
# be pickled, so we lose the layout options at this point.
state.pop('_layoutbox', None)
# suptitle:
if self._suptitle is not None:
self._suptitle._layoutbox = None
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != _mpl_version:
cbook._warn_external(
f"This figure was saved with matplotlib version {version} and "
f"is unlikely to function correctly.")
self.__dict__ = state
# re-initialise some of the unstored state information
self._axobservers = []
self.canvas = None
self._layoutbox = None
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
# XXX The following is a copy and paste from pyplot. Consider
# factoring to pylab_helpers
if self.get_label():
mgr.set_window_title(self.get_label())
# make this figure current on button press event
def make_active(event):
pylab_helpers.Gcf.set_active(mgr)
mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
make_active)
pylab_helpers.Gcf.set_active(mgr)
self.number = num
plt.draw_if_interactive()
self.stale = True
def add_axobserver(self, func):
"""Whenever the axes state change, ``func(self)`` will be called."""
self._axobservers.append(func)
def savefig(self, fname, *, transparent=None, **kwargs):
"""
Save the current figure.
Call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
The output formats available depend on the backend being used.
Parameters
----------
fname : str or PathLike or file-like object
A path, or a Python file-like object, or
possibly some backend-dependent object such as
`matplotlib.backends.backend_pdf.PdfPages`.
If *format* is not set, then the output format is inferred from
the extension of *fname*, if any, and from :rc:`savefig.format`
otherwise. If *format* is set, it determines the output format.
Hence, if *fname* is not a path or has no extension, remember to
specify *format* to ensure that the correct backend is used.
Other Parameters
----------------
dpi : [ *None* | scalar > 0 | 'figure' ]
The resolution in dots per inch. If *None*, defaults to
:rc:`savefig.dpi`. If 'figure', uses the figure's dpi value.
quality : [ *None* | 1 <= scalar <= 100 ]
The image quality, on a scale from 1 (worst) to 95 (best).
Applicable only if *format* is jpg or jpeg, ignored otherwise.
If *None*, defaults to :rc:`savefig.jpeg_quality` (95 by default).
Values above 95 should be avoided; 100 completely disables the
JPEG quantization stage.
optimize : bool
If *True*, indicates that the JPEG encoder should make an extra
pass over the image in order to select optimal encoder settings.
Applicable only if *format* is jpg or jpeg, ignored otherwise.
Is *False* by default.
progressive : bool
If *True*, indicates that this image should be stored as a
progressive JPEG file. Applicable only if *format* is jpg or
jpeg, ignored otherwise. Is *False* by default.
facecolor : color or None, optional
The facecolor of the figure; if *None*, defaults to
:rc:`savefig.facecolor`.
edgecolor : color or None, optional
The edgecolor of the figure; if *None*, defaults to
:rc:`savefig.edgecolor`
orientation : {'landscape', 'portrait'}
Currently only supported by the postscript backend.
papertype : str
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
format : str
The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when
this is unset is documented under *fname*.
transparent : bool
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
bbox_inches : str or `~matplotlib.transforms.Bbox`, optional
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
pad_inches : scalar, optional
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
A list of extra artists that will be considered when the
tight bbox is calculated.
metadata : dict, optional
Key/value pairs to store in the image metadata. The supported keys
and defaults depend on the image format and backend:
- 'png' with Agg backend: See the parameter ``metadata`` of
`~.FigureCanvasAgg.print_png`.
- 'pdf' with pdf backend: See the parameter ``metadata`` of
`~.backend_pdf.PdfPages`.
- 'eps' and 'ps' with PS backend: Only 'Creator' is supported.
pil_kwargs : dict, optional
Additional keyword arguments that are passed to `PIL.Image.save`
when saving the figure. Only applicable for formats that are saved
using Pillow, i.e. JPEG, TIFF, and (if the keyword is set to a
non-None value) PNG.
"""
kwargs.setdefault('dpi', rcParams['savefig.dpi'])
if "frameon" in kwargs:
cbook.warn_deprecated("3.1", name="frameon", obj_type="kwarg",
alternative="facecolor")
frameon = kwargs.pop("frameon")
if frameon is None:
frameon = dict.__getitem__(rcParams, 'savefig.frameon')
else:
frameon = False # Won't pass "if frameon:" below.
if transparent is None:
transparent = rcParams['savefig.transparent']
if transparent:
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
original_axes_colors = []
for ax in self.axes:
patch = ax.patch
original_axes_colors.append((patch.get_facecolor(),
patch.get_edgecolor()))
patch.set_facecolor('none')
patch.set_edgecolor('none')
else:
kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
if frameon:
original_frameon = self.patch.get_visible()
self.patch.set_visible(frameon)
self.canvas.print_figure(fname, **kwargs)
if frameon:
self.patch.set_visible(original_frameon)
if transparent:
for ax, cc in zip(self.axes, original_axes_colors):
ax.patch.set_facecolor(cc[0])
ax.patch.set_edgecolor(cc[1])
@docstring.dedent_interpd
def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
"""
Create a colorbar for a ScalarMappable instance, *mappable*.
Documentation for the pyplot thin wrapper:
%(colorbar_doc)s
"""
if ax is None:
ax = self.gca()
# Store the value of gca so that we can set it back later on.
current_ax = self.gca()
if cax is None:
if use_gridspec and isinstance(ax, SubplotBase) \
and (not self.get_constrained_layout()):
cax, kw = cbar.make_axes_gridspec(ax, **kw)
else:
cax, kw = cbar.make_axes(ax, **kw)
# need to remove kws that cannot be passed to Colorbar
NON_COLORBAR_KEYS = ['fraction', 'pad', 'shrink', 'aspect', 'anchor',
'panchor']
cb_kw = {k: v for k, v in kw.items() if k not in NON_COLORBAR_KEYS}
cb = cbar.colorbar_factory(cax, mappable, **cb_kw)
self.sca(current_ax)
self.stale = True
return cb
def subplots_adjust(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
*None*) and update the subplot locations.
"""
if self.get_constrained_layout():
self.set_constrained_layout(False)
cbook._warn_external("This figure was using "
"constrained_layout==True, but that is "
"incompatible with subplots_adjust and or "
"tight_layout: setting "
"constrained_layout==False. ")
self.subplotpars.update(left, bottom, right, top, wspace, hspace)
for ax in self.axes:
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if isinstance(ax._sharex, SubplotBase):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif isinstance(ax._sharey, SubplotBase):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
self.stale = True
def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2):
"""
Blocking call to interact with a figure.
Wait until the user clicks *n* times on the figure, and return the
coordinates of each click in a list.
There are three possible interactions:
- Add a point.
- Remove the most recently added point.
- Stop the interaction and return the points added so far.
The actions are assigned to mouse buttons via the arguments
*mouse_add*, *mouse_pop* and *mouse_stop*. Mouse buttons are defined
by the numbers:
- 1: left mouse button
- 2: middle mouse button
- 3: right mouse button
- None: no mouse button
Parameters
----------
n : int, optional, default: 1
Number of mouse clicks to accumulate. If negative, accumulate
clicks until the input is terminated manually.
timeout : scalar, optional, default: 30
Number of seconds to wait before timing out. If zero or negative
will never timeout.
show_clicks : bool, optional, default: True
If True, show a red cross at the location of each click.
mouse_add : {1, 2, 3, None}, optional, default: 1 (left click)
Mouse button used to add points.
mouse_pop : {1, 2, 3, None}, optional, default: 3 (right click)
Mouse button used to remove the most recently added point.
mouse_stop : {1, 2, 3, None}, optional, default: 2 (middle click)
Mouse button used to stop input.
Returns
-------
points : list of tuples
A list of the clicked (x, y) coordinates.
Notes
-----
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def get_default_bbox_extra_artists(self):
bbox_artists = [artist for artist in self.get_children()
if (artist.get_visible() and artist.get_in_layout())]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
return bbox_artists
def get_tightbbox(self, renderer, bbox_extra_artists=None):
"""
Return a (tight) bounding box of the figure in inches.
Artists that have ``artist.set_in_layout(False)`` are not included
in the bbox.
Parameters
----------
renderer : `.RendererBase` instance
renderer that will be used to draw the figures (i.e.
``fig.canvas.get_renderer()``)
bbox_extra_artists : list of `.Artist` or ``None``
List of artists to include in the tight bounding box. If
``None`` (default), then all artist children of each axes are
included in the tight bounding box.
Returns
-------
bbox : `.BboxBase`
containing the bounding box (in figure inches).
"""
bb = []
if bbox_extra_artists is None:
artists = self.get_default_bbox_extra_artists()
else:
artists = bbox_extra_artists
for a in artists:
bbox = a.get_tightbbox(renderer)
if bbox is not None and (bbox.width != 0 or bbox.height != 0):
bb.append(bbox)
for ax in self.axes:
if ax.get_visible():
# some axes don't take the bbox_extra_artists kwarg so we
# need this conditional....
try:
bbox = ax.get_tightbbox(renderer,
bbox_extra_artists=bbox_extra_artists)
except TypeError:
bbox = ax.get_tightbbox(renderer)
bb.append(bbox)
bb = [b for b in bb
if (np.isfinite(b.width) and np.isfinite(b.height)
and (b.width != 0 or b.height != 0))]
if len(bb) == 0:
return self.bbox_inches
_bbox = Bbox.union(bb)
bbox_inches = TransformedBbox(_bbox, Affine2D().scale(1 / self.dpi))
return bbox_inches
def init_layoutbox(self):
"""Initialize the layoutbox for use in constrained_layout."""
if self._layoutbox is None:
self._layoutbox = layoutbox.LayoutBox(parent=None,
name='figlb',
artist=self)
self._layoutbox.constrain_geometry(0., 0., 1., 1.)
def execute_constrained_layout(self, renderer=None):
"""
Use ``layoutbox`` to determine pos positions within axes.
See also `.set_constrained_layout_pads`.
"""
from matplotlib._constrained_layout import do_constrained_layout
_log.debug('Executing constrainedlayout')
if self._layoutbox is None:
cbook._warn_external("Calling figure.constrained_layout, but "
"figure not setup to do constrained layout. "
" You either called GridSpec without the "
"fig keyword, you are using plt.subplot, "
"or you need to call figure or subplots "
"with the constrained_layout=True kwarg.")
return
w_pad, h_pad, wspace, hspace = self.get_constrained_layout_pads()
# convert to unit-relative lengths
fig = self
width, height = fig.get_size_inches()
w_pad = w_pad / width
h_pad = h_pad / height
if renderer is None:
renderer = layoutbox.get_renderer(fig)
do_constrained_layout(fig, renderer, h_pad, w_pad, hspace, wspace)
def tight_layout(self, renderer=None, pad=1.08, h_pad=None, w_pad=None,
rect=None):
"""
Automatically adjust subplot parameters to give specified padding.
To exclude an artist on the axes from the bounding box calculation
that determines the subplot parameters (i.e. legend, or annotation),
then set `a.set_in_layout(False)` for that artist.
Parameters
----------
renderer : subclass of `~.backend_bases.RendererBase`, optional
Defaults to the renderer for the figure.
pad : float, optional
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad : float, optional
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size. Defaults to *pad*.
rect : tuple (left, bottom, right, top), optional
A rectangle (left, bottom, right, top) in the normalized
figure coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
See Also
--------
.Figure.set_tight_layout
.pyplot.tight_layout
"""
from .tight_layout import (
get_renderer, get_subplotspec_list, get_tight_layout_figure)
subplotspec_list = get_subplotspec_list(self.axes)
if None in subplotspec_list:
cbook._warn_external("This figure includes Axes that are not "
"compatible with tight_layout, so results "
"might be incorrect.")
if renderer is None:
renderer = get_renderer(self)
kwargs = get_tight_layout_figure(
self, self.axes, subplotspec_list, renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
if kwargs:
self.subplots_adjust(**kwargs)
def align_xlabels(self, axs=None):
"""
Align the ylabels of subplots in the same subplot column if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the bottom, it is aligned with labels on axes that
also have their label on the bottom and that have the same
bottom-most subplot row. If the label is on the top,
it is aligned with labels on axes with the same top-most row.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list of (or ndarray) `~matplotlib.axes.Axes`
to align the xlabels.
Default is to align all axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that ``axs`` are from the same `.GridSpec`, so that
their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with rotated xtick labels::
fig, axs = plt.subplots(1, 2)
for tick in axs[0].get_xticklabels():
tick.set_rotation(55)
axs[0].set_xlabel('XLabel 0')
axs[1].set_xlabel('XLabel 1')
fig.align_xlabels()
"""
if axs is None:
axs = self.axes
axs = np.asarray(axs).ravel()
for ax in axs:
_log.debug(' Working on: %s', ax.get_xlabel())
ss = ax.get_subplotspec()
nrows, ncols, row0, row1, col0, col1 = ss.get_rows_columns()
labpo = ax.xaxis.get_label_position() # top or bottom
# loop through other axes, and search for label positions
# that are same as this one, and that share the appropriate
# row number.
# Add to a grouper associated with each axes of sibblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.xaxis.get_label_position() == labpo:
ss = axc.get_subplotspec()
nrows, ncols, rowc0, rowc1, colc, col1 = \
ss.get_rows_columns()
if (labpo == 'bottom' and rowc1 == row1 or
labpo == 'top' and rowc0 == row0):
# grouper for groups of xlabels to align
self._align_xlabel_grp.join(ax, axc)
def align_ylabels(self, axs=None):
"""
Align the ylabels of subplots in the same subplot column if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the left, it is aligned with labels on axes that
also have their label on the left and that have the same
left-most subplot column. If the label is on the right,
it is aligned with labels on axes with the same right-most column.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or ndarray) of `~matplotlib.axes.Axes`
to align the ylabels.
Default is to align all axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that ``axs`` are from the same `.GridSpec`, so that
their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with large yticks labels::
fig, axs = plt.subplots(2, 1)
axs[0].plot(np.arange(0, 1000, 50))
axs[0].set_ylabel('YLabel 0')
axs[1].set_ylabel('YLabel 1')
fig.align_ylabels()
"""
if axs is None:
axs = self.axes
axs = np.asarray(axs).ravel()
for ax in axs:
_log.debug(' Working on: %s', ax.get_ylabel())
ss = ax.get_subplotspec()
nrows, ncols, row0, row1, col0, col1 = ss.get_rows_columns()
labpo = ax.yaxis.get_label_position() # left or right
# loop through other axes, and search for label positions
# that are same as this one, and that share the appropriate
# column number.
# Add to a list associated with each axes of sibblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc != ax:
if axc.yaxis.get_label_position() == labpo:
ss = axc.get_subplotspec()
nrows, ncols, row0, row1, colc0, colc1 = \
ss.get_rows_columns()
if (labpo == 'left' and colc0 == col0 or
labpo == 'right' and colc1 == col1):
# grouper for groups of ylabels to align
self._align_ylabel_grp.join(ax, axc)
def align_labels(self, axs=None):
"""
Align the xlabels and ylabels of subplots with the same subplots
row or column (respectively) if label alignment is being
done automatically (i.e. the label position is not manually set).
Alignment persists for draw events after this is called.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or ndarray) of `~matplotlib.axes.Axes`
to align the labels.
Default is to align all axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_ylabels
"""
self.align_xlabels(axs=axs)
self.align_ylabels(axs=axs)
def add_gridspec(self, nrows, ncols, **kwargs):
"""
Return a `.GridSpec` that has this figure as a parent. This allows
complex layout of axes in the figure.
Parameters
----------
nrows : int
Number of rows in grid.
ncols : int
Number or columns in grid.
Returns
-------
gridspec : `.GridSpec`
Other Parameters
----------------
**kwargs
Keyword arguments are passed to `.GridSpec`.
See Also
--------
matplotlib.pyplot.subplots
Examples
--------
Adding a subplot that spans two rows::
fig = plt.figure()
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
# spans two rows:
ax3 = fig.add_subplot(gs[:, 1])
"""
_ = kwargs.pop('figure', None) # pop in case user has added this...
gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, **kwargs)
self._gridspecs.append(gs)
return gs
def figaspect(arg):
"""
Calculate the width and height for a figure with a specified aspect ratio.
While the height is taken from :rc:`figure.figsize`, the width is
adjusted to match the desired aspect ratio. Additionally, it is ensured
that the width is in the range [4., 16.] and the height is in the range
[2., 16.]. If necessary, the default height is adjusted to ensure this.
Parameters
----------
arg : scalar or 2d array
If a scalar, this defines the aspect ratio (i.e. the ratio height /
width).
In case of an array the aspect ratio is number of rows / number of
columns, so that the array could be fitted in the figure undistorted.
Returns
-------
width, height
The figure size in inches.
Notes
-----
If you want to create an axes within the figure, that still preserves the
aspect ratio, be sure to create it with equal width and height. See
examples below.
Thanks to Fernando Perez for this function.
Examples
--------
Make a figure twice as tall as it is wide::
w, h = figaspect(2.)
fig = Figure(figsize=(w, h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Make a figure with the proper aspect for an array::
A = rand(5, 3)
w, h = figaspect(A)
fig = Figure(figsize=(w, h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
"""
isarray = hasattr(arg, 'shape') and not np.isscalar(arg)
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = nr / nc
else:
arr_ratio = arg
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
docstring.interpd.update(Figure=martist.kwdoc(Figure))
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
c4cfcfe5af13c6bba69de8261120099d274a6277 | 4fbdf94ee280515df7f285b80ab0590c8c753dd0 | /image_gradients.py | 08a726e515a5d1d7bb433b6bc2444b046a9ddfdb | [] | no_license | hxh-dhruv-hxh/Some-OpenCV-Codes | 97d48e9aaf9f0029a4cfd6f4a94100dd4770a938 | b77902ea5da233809bbf3260c744c53fdb9c0184 | refs/heads/main | 2023-04-11T11:57:11.988679 | 2021-04-24T13:49:10 | 2021-04-24T13:49:10 | 361,171,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('opencv-master/samples/data/sudoku.png', cv2.IMREAD_GRAYSCALE)
lap = cv2.Laplacian(img, cv2.CV_64F, ksize=3)
lap = np.uint8(np.absolute(lap))
# Finding vertical edges
sobelX = cv2.Sobel(img, cv2.CV_64F, 1, 0)
# Finding horizontal edges
sobelY = cv2.Sobel(img, cv2.CV_64F, 0, 1)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
# Combining the sobel x and y filters
sobelCombined = cv2.bitwise_or(sobelX, sobelY)
edges = cv2.Canny(img, 150, 250)
titles = ['image', "lap", 'SobelX', 'SobelY', 'sobelCombined', 'Canny']
images = [img, lap, sobelX, sobelY, sobelCombined, edges]
for i in range(6):
plt.subplot(2, 3, i+1)
plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show() | [
"55949575+dhrv04@users.noreply.github.com"
] | 55949575+dhrv04@users.noreply.github.com |
768abaf1511810961280fb83757f380b00ffe82d | e099b5691a78eca8022fdeaa8d0efb75ecb8c0f6 | /day-19-turtle-sketch/main.py | 427af5fa3d7b53721b2cabda19262404a5a257e9 | [] | no_license | kpgabriel/PyCharmProjectsUdemy | b1cfd16ce017aff1b5ad94ba45a4d205fa97b4ef | 4440fac3d2a12603b37bdb67047a429f77f1985c | refs/heads/master | 2023-06-04T09:08:52.853580 | 2021-06-22T00:29:58 | 2021-06-22T00:29:58 | 373,293,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import random
from turtle import Turtle, Screen
screen = Screen()
screen.setup(height=400, width=500)
is_race_on = False
colors = ["red", "orange", "yellow", "green", "blue", "purple"]
all_turtles = []
i = 0
y = -100
for color in colors:
new_turtle = Turtle(shape="turtle")
new_turtle.penup()
new_turtle.color(color)
new_turtle.goto(x=-230, y=y)
all_turtles.append(new_turtle)
y += 25
i += 1
user_bet = screen.textinput("Bet", "Which turtle do you think will win? ")
if user_bet:
is_race_on = True
while is_race_on:
for turtle in all_turtles:
if turtle.xcor() > 230:
winning_color = turtle.pencolor()
is_race_on = False
if user_bet.lower() == winning_color:
print(f"{winning_color} Wins!!! Good Bet!")
else:
print(f"Sorry the winning turtle was {winning_color}")
rand_distance = random.randint(0, 10)
turtle.forward(rand_distance)
screen.exitonclick()
| [
"kpgabriel17@gmail.com"
] | kpgabriel17@gmail.com |
6733aab9ea53e9cbe7a36f8c18521ad328708815 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /pytorch/source/PIL/ImageQt.py | b747781c50bd2eede24eb9145a6224a4a90712ff | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 6,558 | py | #
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com)
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath, py3
from io import BytesIO
import sys
qt_versions = [
['5', 'PyQt5'],
['side2', 'PySide2'],
['4', 'PyQt4'],
['side', 'PySide']
]
# If a version has already been imported, attempt it first
qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules,
reverse=True)
for qt_version, qt_module in qt_versions:
try:
if qt_module == 'PyQt5':
from PyQt5.QtGui import QImage, qRgba, QPixmap
from PyQt5.QtCore import QBuffer, QIODevice
elif qt_module == 'PySide2':
from PySide2.QtGui import QImage, qRgba, QPixmap
from PySide2.QtCore import QBuffer, QIODevice
elif qt_module == 'PyQt4':
from PyQt4.QtGui import QImage, qRgba, QPixmap
from PyQt4.QtCore import QBuffer, QIODevice
elif qt_module == 'PySide':
from PySide.QtGui import QImage, qRgba, QPixmap
from PySide.QtCore import QBuffer, QIODevice
except (ImportError, RuntimeError):
continue
qt_is_installed = True
break
else:
qt_is_installed = False
qt_version = None
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
def fromqimage(im):
"""
:param im: A PIL Image object, or a file name
(given either as Python string or a PyQt string object)
"""
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
# preserve alha channel with png
# otherwise ppm is more friendly with Image.open
if im.hasAlphaChannel():
im.save(buffer, 'png')
else:
im.save(buffer, 'ppm')
b = BytesIO()
try:
b.write(buffer.data())
except TypeError:
# workaround for Python 2
b.write(str(buffer.data()))
buffer.close()
b.seek(0)
return Image.open(b)
def fromqpixmap(im):
return fromqimage(im)
# buffer = QBuffer()
# buffer.open(QIODevice.ReadWrite)
# # im.save(buffer)
# # What if png doesn't support some image features like animation?
# im.save(buffer, 'ppm')
# bytes_io = BytesIO()
# bytes_io.write(buffer.data())
# buffer.close()
# bytes_io.seek(0)
# return Image.open(bytes_io)
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {
'1': 1,
'L': 8,
'P': 8,
}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]
+ b'\x00' * extra_padding)
return b''.join(new_data)
def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if py3:
im = str(im.toUtf8(), "utf-8")
else:
im = unicode(im.toUtf8(), "utf-8") # noqa: F821
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
if qt_is_installed:
class ImageQt(QImage):
def __init__(self, im):
"""
An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
class.
:param im: A PIL Image object, or a file name (given either as
Python string or a PyQt string object).
"""
im_data = _toqclass_helper(im)
# must keep a reference, or Qt will crash!
# All QImage constructors that take data operate on an existing
# buffer, so this buffer has to hang on for the life of the image.
# Fixes https://github.com/python-pillow/Pillow/issues/1370
self.__data = im_data['data']
QImage.__init__(self,
self.__data, im_data['im'].size[0],
im_data['im'].size[1], im_data['format'])
if im_data['colortable']:
self.setColorTable(im_data['colortable'])
def toqimage(im):
return ImageQt(im)
def toqpixmap(im):
# # This doesn't work. For now using a dumb approach.
# im_data = _toqclass_helper(im)
# result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
# result.loadFromData(im_data['data'])
# Fix some strange bug that causes
if im.mode == 'RGB':
im = im.convert('RGBA')
qimage = toqimage(im)
return QPixmap.fromImage(qimage)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
b7174ad5e70aad83997120f3f26a0af8c31902f4 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20210501/get_dscp_configuration.py | 1dde2dd4b7042811e658cde1bbc7524c32f1811b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,569 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDscpConfigurationResult',
'AwaitableGetDscpConfigurationResult',
'get_dscp_configuration',
'get_dscp_configuration_output',
]
@pulumi.output_type
class GetDscpConfigurationResult:
"""
Differentiated Services Code Point configuration for any given network interface
"""
def __init__(__self__, associated_network_interfaces=None, destination_ip_ranges=None, destination_port_ranges=None, etag=None, id=None, location=None, markings=None, name=None, protocol=None, provisioning_state=None, qos_collection_id=None, qos_definition_collection=None, resource_guid=None, source_ip_ranges=None, source_port_ranges=None, tags=None, type=None):
if associated_network_interfaces and not isinstance(associated_network_interfaces, list):
raise TypeError("Expected argument 'associated_network_interfaces' to be a list")
pulumi.set(__self__, "associated_network_interfaces", associated_network_interfaces)
if destination_ip_ranges and not isinstance(destination_ip_ranges, list):
raise TypeError("Expected argument 'destination_ip_ranges' to be a list")
pulumi.set(__self__, "destination_ip_ranges", destination_ip_ranges)
if destination_port_ranges and not isinstance(destination_port_ranges, list):
raise TypeError("Expected argument 'destination_port_ranges' to be a list")
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if markings and not isinstance(markings, list):
raise TypeError("Expected argument 'markings' to be a list")
pulumi.set(__self__, "markings", markings)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if qos_collection_id and not isinstance(qos_collection_id, str):
raise TypeError("Expected argument 'qos_collection_id' to be a str")
pulumi.set(__self__, "qos_collection_id", qos_collection_id)
if qos_definition_collection and not isinstance(qos_definition_collection, list):
raise TypeError("Expected argument 'qos_definition_collection' to be a list")
pulumi.set(__self__, "qos_definition_collection", qos_definition_collection)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if source_ip_ranges and not isinstance(source_ip_ranges, list):
raise TypeError("Expected argument 'source_ip_ranges' to be a list")
pulumi.set(__self__, "source_ip_ranges", source_ip_ranges)
if source_port_ranges and not isinstance(source_port_ranges, list):
raise TypeError("Expected argument 'source_port_ranges' to be a list")
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="associatedNetworkInterfaces")
def associated_network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
Associated Network Interfaces to the DSCP Configuration.
"""
return pulumi.get(self, "associated_network_interfaces")
@property
@pulumi.getter(name="destinationIpRanges")
def destination_ip_ranges(self) -> Optional[Sequence['outputs.QosIpRangeResponse']]:
"""
Destination IP ranges.
"""
return pulumi.get(self, "destination_ip_ranges")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[Sequence['outputs.QosPortRangeResponse']]:
"""
Destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def markings(self) -> Optional[Sequence[int]]:
"""
List of markings to be used in the configuration.
"""
return pulumi.get(self, "markings")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
RNM supported protocol types.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the DSCP Configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="qosCollectionId")
def qos_collection_id(self) -> str:
"""
Qos Collection ID generated by RNM.
"""
return pulumi.get(self, "qos_collection_id")
@property
@pulumi.getter(name="qosDefinitionCollection")
def qos_definition_collection(self) -> Optional[Sequence['outputs.QosDefinitionResponse']]:
"""
QoS object definitions
"""
return pulumi.get(self, "qos_definition_collection")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the DSCP Configuration resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="sourceIpRanges")
def source_ip_ranges(self) -> Optional[Sequence['outputs.QosIpRangeResponse']]:
"""
Source IP ranges.
"""
return pulumi.get(self, "source_ip_ranges")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence['outputs.QosPortRangeResponse']]:
"""
Sources port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDscpConfigurationResult(GetDscpConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDscpConfigurationResult(
associated_network_interfaces=self.associated_network_interfaces,
destination_ip_ranges=self.destination_ip_ranges,
destination_port_ranges=self.destination_port_ranges,
etag=self.etag,
id=self.id,
location=self.location,
markings=self.markings,
name=self.name,
protocol=self.protocol,
provisioning_state=self.provisioning_state,
qos_collection_id=self.qos_collection_id,
qos_definition_collection=self.qos_definition_collection,
resource_guid=self.resource_guid,
source_ip_ranges=self.source_ip_ranges,
source_port_ranges=self.source_port_ranges,
tags=self.tags,
type=self.type)
def get_dscp_configuration(dscp_configuration_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDscpConfigurationResult:
"""
Differentiated Services Code Point configuration for any given network interface
:param str dscp_configuration_name: The name of the resource.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['dscpConfigurationName'] = dscp_configuration_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20210501:getDscpConfiguration', __args__, opts=opts, typ=GetDscpConfigurationResult).value
return AwaitableGetDscpConfigurationResult(
associated_network_interfaces=__ret__.associated_network_interfaces,
destination_ip_ranges=__ret__.destination_ip_ranges,
destination_port_ranges=__ret__.destination_port_ranges,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
markings=__ret__.markings,
name=__ret__.name,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
qos_collection_id=__ret__.qos_collection_id,
qos_definition_collection=__ret__.qos_definition_collection,
resource_guid=__ret__.resource_guid,
source_ip_ranges=__ret__.source_ip_ranges,
source_port_ranges=__ret__.source_port_ranges,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_dscp_configuration)
def get_dscp_configuration_output(dscp_configuration_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDscpConfigurationResult]:
"""
Differentiated Services Code Point configuration for any given network interface
:param str dscp_configuration_name: The name of the resource.
:param str resource_group_name: The name of the resource group.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
04236783dd857f37f9aed820802a70a89a16edfa | e7951f82f195e94b6791247b80b0e6f20030579c | /examinations/settings.py | b15fb7724b1d445f8e6b443179e2d00453c87014 | [] | no_license | pkula/examination | 84becf8f973c2b2ce8a7799f078da2903532fc94 | 767dcf51fbdbd72e0722640d24802c3d28b023fe | refs/heads/master | 2020-04-26T04:00:46.584545 | 2019-03-22T00:11:21 | 2019-03-22T00:11:21 | 173,286,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,367 | py | """
Django settings for examinations project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j51vrf3*0yww)_a51q!80%45+9ulwkoso_z%ncjepr1vwwqeq*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'examinations.examSheetsApi',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'examinations.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'examinations.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
#'rest_framework.permissions.AllowAny',
)
} | [
"pxkula@gmail.com"
] | pxkula@gmail.com |
6a926b6a082e80870569878b3faf26af11142290 | d48b735d608d00393a80893060d287d113cded28 | /scrapy_redis/scheduler.py | eb5f508bc31bc59049ec5e5222f93bd4c7459b3b | [
"Apache-2.0"
] | permissive | gavinliu4011/housespider | 8053259eaeb0a3fb75c36b3d7294f759dff96400 | 3e0f3ae319e7ba3006b0a0bf25de7e12f91c03cc | refs/heads/master | 2020-03-22T12:32:40.784965 | 2018-07-07T05:53:16 | 2018-07-07T05:53:16 | 140,046,370 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,460 | py | import importlib
import six
from scrapy.utils.misc import load_object
from . import connection, defaults
from .defaults import BLOOMFILTER_BIT, BLOOMFILTER_HASH_NUMBER
# TODO: add SCRAPY_JOB support.
class Scheduler(object):
"""Redis-based scheduler
Settings
--------
SCHEDULER_PERSIST : bool (default: False)
Whether to persist or clear redis queue.
SCHEDULER_FLUSH_ON_START : bool (default: False)
Whether to flush redis queue on start.
SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
How many seconds to wait before closing if no message is received.
SCHEDULER_QUEUE_KEY : str
Scheduler redis key.
SCHEDULER_QUEUE_CLASS : str
Scheduler queue class.
SCHEDULER_DUPEFILTER_KEY : str
Scheduler dupefilter redis key.
SCHEDULER_DUPEFILTER_CLASS : str
Scheduler dupefilter class.
SCHEDULER_SERIALIZER : str
Scheduler serializer.
"""
def __init__(self, server,
persist=False,
flush_on_start=False,
queue_key=defaults.SCHEDULER_QUEUE_KEY,
queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
idle_before_close=0,
serializer=None):
"""Initialize scheduler.
Parameters
----------
server : Redis
The redis server instance.
persist : bool
Whether to flush requests when closing. Default is False.
flush_on_start : bool
Whether to flush requests on start. Default is False.
queue_key : str
Requests queue key.
queue_cls : str
Importable path to the queue class.
dupefilter_key : str
Duplicates filter key.
dupefilter_cls : str
Importable path to the dupefilter class.
idle_before_close : int
Timeout before giving up.
"""
if idle_before_close < 0:
raise TypeError("idle_before_close cannot be negative")
self.server = server
self.persist = persist
self.flush_on_start = flush_on_start
self.queue_key = queue_key
self.queue_cls = queue_cls
self.dupefilter_cls = dupefilter_cls
self.dupefilter_key = dupefilter_key
self.idle_before_close = idle_before_close
self.serializer = serializer
self.stats = None
def __len__(self):
return len(self.queue)
@classmethod
def from_settings(cls, settings):
kwargs = {
'persist': settings.getbool('SCHEDULER_PERSIST'),
'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),
'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),
}
# If these values are missing, it means we want to use the defaults.
optional = {
# TODO: Use custom prefixes for this settings to note that are
# specific to scrapy-redis.
'queue_key': 'SCHEDULER_QUEUE_KEY',
'queue_cls': 'SCHEDULER_QUEUE_CLASS',
'dupefilter_key': 'SCHEDULER_DUPEFILTER_KEY',
# We use the default setting name to keep compatibility.
'dupefilter_cls': 'DUPEFILTER_CLASS',
'serializer': 'SCHEDULER_SERIALIZER',
}
for name, setting_name in optional.items():
val = settings.get(setting_name)
if val:
kwargs[name] = val
# Support serializer as a path to a module.
if isinstance(kwargs.get('serializer'), six.string_types):
kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
server = connection.from_settings(settings)
# Ensure the connection is working.
server.ping()
return cls(server=server, **kwargs)
@classmethod
def from_crawler(cls, crawler):
instance = cls.from_settings(crawler.settings)
# FIXME: for now, stats are only supported from this constructor
instance.stats = crawler.stats
return instance
def open(self, spider):
self.spider = spider
try:
self.queue = load_object(self.queue_cls)(
server=self.server,
spider=spider,
key=self.queue_key % {'spider': spider.name},
serializer=self.serializer,
)
except TypeError as e:
raise ValueError("Failed to instantiate queue class '%s': %s",
self.queue_cls, e)
try:
self.df = load_object(self.dupefilter_cls)(
server=self.server,
key=self.dupefilter_key % {'spider': spider.name},
debug=spider.settings.getbool('DUPEFILTER_DEBUG'),
bit=spider.settings.getint('BLOOMFILTER_BIT', BLOOMFILTER_BIT),
hash_number=spider.settings.getint('BLOOMFILTER_HASH_NUMBER', BLOOMFILTER_HASH_NUMBER)
)
except TypeError as e:
raise ValueError("Failed to instantiate dupefilter class '%s': %s",
self.dupefilter_cls, e)
if self.flush_on_start:
self.flush()
# notice if there are requests already in the queue to resume the crawl
if len(self.queue):
spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
def close(self, reason):
if not self.persist:
self.flush()
def flush(self):
self.df.clear()
self.queue.clear()
def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
if self.stats:
self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
self.queue.push(request)
return True
def next_request(self):
block_pop_timeout = self.idle_before_close
request = self.queue.pop(block_pop_timeout)
if request and self.stats:
self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
return request
def has_pending_requests(self):
return len(self) > 0
| [
"gavinliu4011@163.com"
] | gavinliu4011@163.com |
d6c933feb40555e72159079ffea381c447710cfc | e49ee66bc574f76b05248d9484e3cb8628aed60a | /tests/scripts/verifiers.py | 72cc68d00fa53e2cb9093d904c31e6621220696b | [] | no_license | mgoszcz/articles | 8399ba78069c36eecdd787747aa2875c8aceb9ba | b68577daf552bdd1e61636ef974b3713092dff67 | refs/heads/master | 2022-11-07T00:04:24.703911 | 2020-07-02T21:06:46 | 2020-07-02T21:06:46 | 257,048,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,695 | py | import unittest
from lib.article import Article
from lib.article_dict import ArticleDict
from tests.test_data.article_test_data import ArticleDictTestData, ArticleTestData
class Verifiers(unittest.TestCase):
def verify_articles_in_dictionary(self, dictionary: ArticleDict, test_data: ArticleDictTestData):
self.assertEqual(len(dictionary), len(test_data.articles), 'Verify count of articles is correct')
for test_data_article in test_data.articles:
passed = False
for article in dictionary.values():
if test_data_article.title == article.title:
if test_data_article.page != article.page:
continue
if test_data_article.description != article.description:
continue
if test_data_article.binder != article.binder:
continue
if test_data_article.tags != article.tags:
continue
passed = True
self.assertTrue(passed,
f'Verify article is present: title: {test_data_article.title}, '
f'\n\tdescription: {test_data_article.description}, \n\tpage: {test_data_article.page}, '
f'\n\tbinder: {test_data_article.binder}, \n\ttags: {test_data_article.tags}')
def verify_article_with_title_only(self, article: Article, reference_article: ArticleTestData):
self.assertEqual(article.title, reference_article.title, 'Verify proper title is created')
self.assertEqual(article.description, '', 'Verify description is empty string')
self.assertEqual(article.binder, '', 'Verify binder is empty string')
self.assertEqual(article.page, '', 'Verify page is empty string')
self.assertEqual(article.tags, [], 'Verify tags is empty list')
self.assertNotEqual(article.uuid, None, 'Verify uuid is created')
def verify_article_with_all_fields(self, article: Article, reference_article: ArticleTestData):
self.assertEqual(article.title, reference_article.title, 'Verify proper title is created')
self.assertEqual(article.description, reference_article.description,
'Verify proper description is created')
self.assertEqual(article.binder, reference_article.binder, 'Verify proper binder is created')
self.assertEqual(article.page, reference_article.page, 'Verify proper page is created')
self.assertEqual(article.tags, reference_article.tags, 'Verify proper tags are created')
self.assertNotEqual(article.uuid, None, 'Verify uuid is created') | [
"marcin.goszczynski88@gmail.com"
] | marcin.goszczynski88@gmail.com |
f2b7a5d3182b111a6c16fa52895980a61ec2dc88 | 46d4afa2ebf0b04541766291ec238271a6b01f4b | /dicerollv2.py | 5316f764eb7046464ea604d222de6a84d6be10ec | [] | no_license | Kurolox/python-learning | 77f1732e03bf8d4a58de86d5fc860931720ab816 | b3621453069ca5a3f07718d0f05e332372f52801 | refs/heads/master | 2021-01-17T16:19:57.479866 | 2016-08-11T15:38:22 | 2016-08-11T15:38:22 | 65,402,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | import random
import re
def get_input():
print("Insert the number of dices you want to roll (format xdY+Z.)")
while True:
diceinput = input()
# Finding pattern xdY(+Z) in the input
pattern = re.compile(r"(\d+)(d|D)(\d+)((\+|-)(\d+))?")
mo = pattern.search(diceinput)
# If the input doesn't have any pattern, ask again.
if mo is not None:
dicelist = {"nodices": int(mo.group(1)),
"dnumber": int(mo.group(3))}
# If there's no modifier, default to 0
if mo.group(4) is None:
dicelist["modifier"] = 0
else:
dicelist["modifier"] = int(mo.group(4))
return dicelist
else:
print("That's not a correct input. Try again.")
def rolling_dices(dicedict):
actualdiceroll = 0
totaldiceroll = 0
print("Rolling " + str(dicedict["nodices"]) +
" " + str(dicedict["dnumber"]) + "-sided dices with a"
" modifier of " + str(dicedict["modifier"]) + ".\n")
# Rolling dices! Whew. I guess that this is the only part that matters.
for dice in range(dicedict["nodices"]):
actualdiceroll = random.randint(1, dicedict["dnumber"])
totaldiceroll += actualdiceroll
print(actualdiceroll, end=" ")
print("\n\nTotal without modifier: " + str(totaldiceroll))
print("Total with modifier: " +
str(totaldiceroll + dicedict.get("modifier", 0)))
# Starting the program. I need to learn methods, man.
rolling_dices(get_input())
| [
"kurolox@gmail.com"
] | kurolox@gmail.com |
4de5d342f5f6db3ec70d35c5b46c60132fe5dbc6 | fae0af723a5d2b41fa57e5cc0bec700974440069 | /tencentcloud/faceid/v20180301/models.py | a078bf16f7239b59287d4ff2c20a108960d6620c | [
"Apache-2.0"
] | permissive | simiaoxiaoseng/tencentcloud-sdk-python | dc319b492967044bf08756a7591e06d70f6d1e4b | e93b2291526946fd2381fc9e40f7f4c7f34c7c42 | refs/heads/master | 2020-04-12T19:11:46.876644 | 2018-12-20T13:39:13 | 2018-12-20T13:39:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,143 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class DetectAuthRequest(AbstractModel):
"""DetectAuth请求参数结构体
"""
def __init__(self):
"""
:param RuleId: 用于细分客户使用场景,由腾讯侧在线下对接时分配。
:type RuleId: str
:param TerminalType: 本接口不需要传递此参数。
:type TerminalType: str
:param IdCard: 身份标识(与公安权威库比对时必须是身份证号)。
规则:a-zA-Z0-9组合。最长长度32位。
:type IdCard: str
:param Name: 姓名。最长长度32位。
:type Name: str
:param RedirectUrl: 认证结束后重定向的回调链接地址。最长长度1024位。
:type RedirectUrl: str
:param Extra: 透传字段,在获取验证结果时返回。
:type Extra: str
:param ImageBase64: 用于人脸比对的照片,图片的BASE64值;
BASE64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
:type ImageBase64: str
"""
self.RuleId = None
self.TerminalType = None
self.IdCard = None
self.Name = None
self.RedirectUrl = None
self.Extra = None
self.ImageBase64 = None
def _deserialize(self, params):
self.RuleId = params.get("RuleId")
self.TerminalType = params.get("TerminalType")
self.IdCard = params.get("IdCard")
self.Name = params.get("Name")
self.RedirectUrl = params.get("RedirectUrl")
self.Extra = params.get("Extra")
self.ImageBase64 = params.get("ImageBase64")
class DetectAuthResponse(AbstractModel):
"""DetectAuth返回参数结构体
"""
def __init__(self):
"""
:param Url: 用于发起核身流程的URL,仅微信H5场景使用。
:type Url: str
:param BizToken: 一次核身流程的标识,有效时间为7,200秒;
完成核身后,可用该标识获取验证结果信息。
:type BizToken: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Url = None
self.BizToken = None
self.RequestId = None
def _deserialize(self, params):
self.Url = params.get("Url")
self.BizToken = params.get("BizToken")
self.RequestId = params.get("RequestId")
class GetActionSequenceRequest(AbstractModel):
"""GetActionSequence请求参数结构体
"""
class GetActionSequenceResponse(AbstractModel):
"""GetActionSequence返回参数结构体
"""
def __init__(self):
"""
:param ActionSequence: 动作顺序(2,1 or 1,2) 。1代表张嘴,2代表闭眼。
:type ActionSequence: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ActionSequence = None
self.RequestId = None
def _deserialize(self, params):
self.ActionSequence = params.get("ActionSequence")
self.RequestId = params.get("RequestId")
class GetDetectInfoRequest(AbstractModel):
"""GetDetectInfo请求参数结构体
"""
def __init__(self):
"""
:param BizToken: 人脸核身流程的标识,调用DetectAuth接口时生成。
:type BizToken: str
:param RuleId: 用于细分客户使用场景,由腾讯侧在线下对接时分配。
:type RuleId: str
:param InfoType: 指定拉取的结果信息,取值(0:全部;1:文本类;2:身份证正反面;3:视频最佳截图照片;4:视频)。
如 134表示拉取文本类、视频最佳截图照片、视频。
:type InfoType: str
"""
self.BizToken = None
self.RuleId = None
self.InfoType = None
def _deserialize(self, params):
self.BizToken = params.get("BizToken")
self.RuleId = params.get("RuleId")
self.InfoType = params.get("InfoType")
class GetDetectInfoResponse(AbstractModel):
"""GetDetectInfo返回参数结构体
"""
def __init__(self):
"""
:param DetectInfo: JSON字符串。
{
// 文本类信息
"Text": {
"ErrCode": null, // 本次核身最终结果。0为成功
"ErrMsg": null, // 本次核身的错误信息。
"IdCard": "", // 本次核身最终获得的身份证号。
"Name": "", // 本次核身最终获得的姓名。
"OcrNation": null, // ocr阶段获取的民族
"OcrAddress": null, // ocr阶段获取的地址
"OcrBirth": null, // ocr阶段获取的出生信息
"OcrAuthority": null, // ocr阶段获取的证件签发机关
"OcrValidDate": null, // ocr阶段获取的证件有效期
"OcrName": null, // ocr阶段获取的姓名
"OcrIdCard": null, // ocr阶段获取的身份证号
"OcrGender": null, // ocr阶段获取的性别
"LiveStatus": null, // 活体检测阶段的错误码。0为成功
"LiveMsg": null, // 活体检测阶段的错误信息
"Comparestatus": null,// 一比一阶段的错误码。0为成功
"Comparemsg": null, // 一比一阶段的错误信息
"Extra": "", // DetectAuth结果传进来的Extra信息
"Detail": { // 活体一比一信息详情
"LivenessData": []
}
},
// 身份证正反面照片Base64
"IdCardData": {
"OcrFront": null,
"OcrBack": null
},
// 视频最佳帧截图Base64
"BestFrame": {
"BestFrame": null
},
// 活体视频Base64
"VideoData": {
"LivenessVideo": null
}
}
:type DetectInfo: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DetectInfo = None
self.RequestId = None
def _deserialize(self, params):
self.DetectInfo = params.get("DetectInfo")
self.RequestId = params.get("RequestId")
class GetLiveCodeRequest(AbstractModel):
"""GetLiveCode请求参数结构体
"""
class GetLiveCodeResponse(AbstractModel):
"""GetLiveCode返回参数结构体
"""
def __init__(self):
"""
:param LiveCode: 数字验证码,如:1234
:type LiveCode: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LiveCode = None
self.RequestId = None
def _deserialize(self, params):
self.LiveCode = params.get("LiveCode")
self.RequestId = params.get("RequestId")
class ImageRecognitionRequest(AbstractModel):
"""ImageRecognition请求参数结构体
"""
def __init__(self):
"""
:param IdCard: 身份证号
:type IdCard: str
:param Name: 姓名
:type Name: str
:param ImageBase64: 用于人脸比对的照片,图片的BASE64值;
BASE64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
:type ImageBase64: str
:param Optional: 本接口不需要传递此参数。
:type Optional: str
"""
self.IdCard = None
self.Name = None
self.ImageBase64 = None
self.Optional = None
def _deserialize(self, params):
self.IdCard = params.get("IdCard")
self.Name = params.get("Name")
self.ImageBase64 = params.get("ImageBase64")
self.Optional = params.get("Optional")
class ImageRecognitionResponse(AbstractModel):
"""ImageRecognition返回参数结构体
"""
def __init__(self):
"""
:param Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Sim = None
self.RequestId = None
def _deserialize(self, params):
self.Sim = params.get("Sim")
self.RequestId = params.get("RequestId")
class LivenessCompareRequest(AbstractModel):
"""LivenessCompare请求参数结构体
"""
def __init__(self):
"""
:param ImageBase64: 用于人脸比对的照片,图片的BASE64值;
BASE64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
:type ImageBase64: str
:param VideoBase64: 用于活体检测的视频,视频的BASE64值;
BASE64编码后的大小不超过5M,支持mp4、avi、flv格式。
:type VideoBase64: str
:param LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param ValidateData: 数字模式传参:唇语验证码(1234),需先获取唇语验证码;
动作模式传参:传动作顺序(12,21),需先获取动作顺序;
静默模式传参:空。
:type ValidateData: str
:param Optional: 本接口不需要传递此参数。
:type Optional: str
"""
self.ImageBase64 = None
self.VideoBase64 = None
self.LivenessType = None
self.ValidateData = None
self.Optional = None
def _deserialize(self, params):
self.ImageBase64 = params.get("ImageBase64")
self.VideoBase64 = params.get("VideoBase64")
self.LivenessType = params.get("LivenessType")
self.ValidateData = params.get("ValidateData")
self.Optional = params.get("Optional")
class LivenessCompareResponse(AbstractModel):
"""LivenessCompare返回参数结构体
"""
def __init__(self):
"""
:param BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
:type BestFrameBase64: str
:param Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)。
:type Sim: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BestFrameBase64 = None
self.Sim = None
self.RequestId = None
def _deserialize(self, params):
self.BestFrameBase64 = params.get("BestFrameBase64")
self.Sim = params.get("Sim")
self.RequestId = params.get("RequestId")
class LivenessRecognitionRequest(AbstractModel):
"""LivenessRecognition请求参数结构体
"""
def __init__(self):
"""
:param IdCard: 身份证号
:type IdCard: str
:param Name: 姓名
:type Name: str
:param VideoBase64: 用于活体检测的视频,视频的BASE64值;
BASE64编码后的大小不超过5M,支持mp4、avi、flv格式。
:type VideoBase64: str
:param LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param ValidateData: 数字模式传参:唇语验证码(1234),需先获取唇语验证码;
动作模式传参:传动作顺序(12,21),需先获取动作顺序;
静默模式传参:空。
:type ValidateData: str
:param Optional: 本接口不需要传递此参数。
:type Optional: str
"""
self.IdCard = None
self.Name = None
self.VideoBase64 = None
self.LivenessType = None
self.ValidateData = None
self.Optional = None
def _deserialize(self, params):
self.IdCard = params.get("IdCard")
self.Name = params.get("Name")
self.VideoBase64 = params.get("VideoBase64")
self.LivenessType = params.get("LivenessType")
self.ValidateData = params.get("ValidateData")
self.Optional = params.get("Optional")
class LivenessRecognitionResponse(AbstractModel):
"""LivenessRecognition返回参数结构体
"""
def __init__(self):
"""
:param BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
:type BestFrameBase64: str
:param Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BestFrameBase64 = None
self.Sim = None
self.RequestId = None
def _deserialize(self, params):
self.BestFrameBase64 = params.get("BestFrameBase64")
self.Sim = params.get("Sim")
self.RequestId = params.get("RequestId") | [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
dd00b17559362e528e8945974b31d50d495d3ca3 | 473deae70ce35c63a9e01481a18268ac0fef56e4 | /DJANGO-DEPLOYMENT-MINDFIRE/BLOGGING/BLOGGING/asgi.py | e4f7b0b81d17c959dc01cb0ffdc0593b632047ac | [] | no_license | subhamMishra14/DJANGO-DEPLOYMENT-MINDFIRE | 8fbbd7b6837f17bc4fa506bb46c764aa0acd0322 | 35b8fe79ea9281c1c51de808507c5982ec9502c7 | refs/heads/master | 2020-08-31T22:12:03.376067 | 2019-12-18T14:00:28 | 2019-12-18T14:00:28 | 218,798,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for BLOGGING project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BLOGGING.settings')
application = get_asgi_application()
| [
"subham.mishra.14@gmail.com"
] | subham.mishra.14@gmail.com |
c8469ead17f2bf6575e6cf1a25391b4db6c88303 | 2cc9b2d7d99af939beca70e1c4f4994aec0e95e1 | /services/scores/project/config.py | 0b9579b70f9e187783c39036490f4d6670c80eae | [] | no_license | kelleyrw/testdriven-app | 5ad1da46216fc3b2a8f38e0041191772fe116a55 | 8f661c7a7efd6811319206996959c359cedefda5 | refs/heads/master | 2023-01-07T01:00:54.876432 | 2019-07-20T19:27:09 | 2019-07-20T19:27:09 | 161,355,306 | 0 | 0 | null | 2023-01-04T16:35:18 | 2018-12-11T15:34:54 | Python | UTF-8 | Python | false | false | 921 | py | # project/config.py
import os
class BaseConfig:
"""Base configuration"""
DEBUG = False
TESTING = False
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
SECRET_KEY = os.environ.get("SECRET_KEY")
SQLALCHEMY_TRACK_MODIFICATIONS = False
USERS_SERVICE_URL = os.environ.get("USERS_SERVICE_URL")
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
DEBUG_TB_ENABLED = True
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestingConfig(BaseConfig):
"""Testing configuration"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_TEST_URL")
class StagingConfig(BaseConfig):
"""Staging configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class ProductionConfig(BaseConfig):
"""Production configuration"""
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
| [
"kelleyrw@users.noreply.github.com"
] | kelleyrw@users.noreply.github.com |
d41da186fe71beeba5d6a5db47eb2df882f9a820 | 44221bc0507955c1e62d256182291ac95514c4f6 | /automatron_notify/__init__.py | e4ef215bc2aaa375436f09977691bf480f1315f1 | [
"MIT"
] | permissive | automatron/automatron-notify | 8c14ee5d8025ebefc7e9b7788e5414230c269676 | 4dcacfb3a56a51a7d1a7521f2ab9f7a895493f1a | refs/heads/master | 2021-01-17T14:31:31.323071 | 2014-03-25T08:18:46 | 2014-03-25T08:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from automatron.core.event import IAutomatronEventHandler
class IAutomatronNotifyHandler(IAutomatronEventHandler):
def on_notify(server, username, title, body, body_as_html=None):
"""
Called when a notification is triggered.
"""
| [
"iksteen@gmail.com"
] | iksteen@gmail.com |
321adce537d7842bc56ed5889f848d7433663330 | 4b8d6d0c057049beabdc7a516bd0653af94894a6 | /DRF_nextjs/asgi.py | c3274d19c1591f6d6331af69cbe01c1a6e03c5b4 | [] | no_license | felipefoc/DRF-Next.Js | 71a4d35cd2f69ffe84fb76b37a7094cc2950a71f | f8a904ec17d21e88590719ba98202d9fbcccf11e | refs/heads/main | 2023-03-14T18:51:55.521287 | 2021-03-22T04:15:32 | 2021-03-22T04:15:32 | 350,203,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for DRF_nextjs project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DRF_nextjs.settings')
application = get_asgi_application()
| [
"felipemfmayer@gmail.com"
] | felipemfmayer@gmail.com |
141c85f367df5664a2789b37bc7d83c97dc4a197 | b5a29700c3516cf12f837e2284e3844546205d09 | /plugins/vipread_generic_plugin.py | 2771bd40386bf812df6f131de4bd2ab09fe0bf1a | [] | no_license | p1g3/Collect-Info-Research | f609823486f36460186cfde27f4be7c9c5a058ae | e8e7366677a8642c3bcf4b103e43378762e6673c | refs/heads/master | 2020-12-24T03:59:01.190032 | 2020-01-31T06:47:35 | 2020-01-31T06:47:35 | 237,374,792 | 37 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py |
import asyncio
import feedparser
import ssl
import pymongo
from loguru import logger
import datetime
from dateutil import parser
class vipread_generic_plugin:
def __init__(self,loop,collection,lock):
ssl._create_default_https_context = ssl._create_unverified_context
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
self.loop = loop
self.rss = 'http://vipread.com/feed'
self.collection = collection
self.type = 'generic'
self.lock = lock
async def return_result(self):
logger.info("{} is running.",self.__class__.__name__)
future = self.loop.run_in_executor(None,feedparser.parse,self.rss)
try:
parse_result = await asyncio.wait_for(future, 10, loop=self.loop)
except:
logger.warning("{} parse time out".format(self.rss))
return
if parse_result.has_key('entries'):
entries = parse_result['entries']
format_time = datetime.date.today()
for entrie in entries:
article_time = parser.parse(entrie['updated'])
if (article_time.year == format_time.year) and (article_time.month == format_time.month) and (article_time.day == format_time.day):
add_dict = {'type':self.type,'title':entrie['title'],'link':entrie['link'],'is_send':0}
try:
await self.lock
if self.collection.count_documents({'link':entrie['link']}) < 1:
self.collection.insert_one(add_dict)
logger.info('[Generic] {} {}'.format(entrie['title'],entrie['link']))
finally:
self.lock.release()
else:
logger.error('[Error Parse] {}',self.rss)
if __name__ == '__main__':
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.info_collect
collection = db['infos']
lock = asyncio.Lock()
loop = asyncio.get_event_loop()
class_name = vipread_generic_plugin(loop,collection,lock)
loop.run_until_complete(class_name.return_result())
| [
"p1g3cyx@gmail.com"
] | p1g3cyx@gmail.com |
1fb219910dbc733d206df189140aba037582bb5d | 462e68b21feb4aab5bf89519a36088b2aa5efdb7 | /Decision Tree ID3/Introduction to Decision Trees-137.py | f8ef9b91511c3f98a2431f58eca3f6ee4615f9aa | [] | no_license | JKChang2015/Data-Analysis-Python | 19458bc0aa7ea9dbd2a34866a798548a9a18d93f | 9c5da4c0d17a3768f3f853bc09c7700fbb2840b9 | refs/heads/master | 2020-03-09T06:44:26.515614 | 2016-07-17T01:39:07 | 2016-07-17T01:39:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,501 | py | ## 3. Converting categorical variables ##
# Convert a single column from text categories into numbers.
for name in ["workclass","education","marital_status","occupation","relationship","race","sex","native_country","high_income"]:
col = pandas.Categorical.from_array(income[name])
income[name] = col.codes
## 5. Performing a split ##
# Enter your code here.
private_incomes = income[income["workclass"] == 4]
public_incomes = income[income["workclass"] != 4]
## 8. Entropy ##
import math
# We'll do the same calculation we did above, but in Python.
# Passing 2 as the second parameter to math.log will take a base 2 log.
entropy = -(2/5 * math.log(2/5, 2) + 3/5 * math.log(3/5, 2))
print(entropy)
income_entropy = -((len(income[income["high_income"] == 0]) / income.shape[0] ) * math.log((len(income[income["high_income"] == 0]) / income.shape[0] ), 2) +(len(income[income["high_income"] == 1]) / income.shape[0] ) * math.log((len(income[income["high_income"] == 1]) / income.shape[0] ), 2) )
## 9. Information gain ##
import numpy
def calc_entropy(column):
"""
Calculate entropy given a pandas Series, list, or numpy array.
"""
# Compute the counts of each unique value in the column.
counts = numpy.bincount(column)
# Divide by the total column length to get a probability.
probabilities = counts / len(column)
# Initialize the entropy to 0.
entropy = 0
# Loop through the probabilities, and add each one to the total entropy.
for prob in probabilities:
if prob > 0:
entropy += prob * math.log(prob, 2)
return -entropy
# Verify our function matches our answer from earlier.
entropy = calc_entropy([1,1,0,0,1])
print(entropy)
information_gain = entropy - ((.8 * calc_entropy([1,1,0,0])) + (.2 * calc_entropy([1])))
print(information_gain)
entropy = calc_entropy(income["high_income"])
med = numpy.median(income["age"])
left = income[income["age"] <= med]["high_income"]
right = income[income["age"] > med]["high_income"]
age_information_gain = entropy - ((left.shape[0] / income.shape[0]) * calc_entropy(left) + ((right.shape[0] / income.shape[0]) * calc_entropy(right)))
## 10. Finding the best split ##
def calc_information_gain(data, split_name, target_name):
"""
Calculate information gain given a dataset, column to split on, and target.
"""
# Calculate original entropy.
original_entropy = calc_entropy(data[target_name])
# Find the median of the column we're splitting.
column = data[split_name]
median = column.median()
# Make two subsets of the data based on the median.
left_split = data[column <= median]
right_split = data[column > median]
# Loop through the splits, and calculate the subset entropy.
to_subtract = 0
for subset in [left_split, right_split]:
prob = (subset.shape[0] / data.shape[0])
to_subtract += prob * calc_entropy(subset[target_name])
# Return information gain.
return original_entropy - to_subtract
# Verify that our answer is the same as in the last screen.
print(calc_information_gain(income, "age", "high_income"))
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
information_gains = []
for column in columns:
information_gains.append(calc_information_gain(income,column,"high_income"))
highest_gain = columns[information_gains.index(max(information_gains))] | [
"noreply@github.com"
] | JKChang2015.noreply@github.com |
80939f748aac5f3242ea0bc5610644cacf4f8ba9 | d31d744f62c09cb298022f42bcaf9de03ad9791c | /lingvo/lingvo/tasks/car/input_preprocessors.py | 5848311b990c04f1afc36ede62048283bad93104 | [
"Apache-2.0"
] | permissive | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136,426 | py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocessors."""
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.tasks.car import car_lib
from lingvo.tasks.car import detection_3d_lib
from lingvo.tasks.car import geometry
from lingvo.tasks.car import ops
import numpy as np
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
# pylint:enable=g-direct-tensorflow-import
def _ConsistentShuffle(tensors, seed):
"""Shuffle multiple tensors with the same shuffle order."""
shuffled_idx = tf.range(tf.shape(tensors[0])[0])
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=seed)
return tuple([tf.gather(t, shuffled_idx) for t in tensors])
def _GetApplyPointMaskFn(points_mask):
"""Returns a function that applies a mask to one of our points tensors."""
def _ApplyPointMaskFn(points_tensor):
"""Applies a mask to the points tensor."""
if points_tensor is None:
return points_tensor
return tf.boolean_mask(points_tensor, points_mask)
return _ApplyPointMaskFn
def _Dense(sparse):
return tf.sparse_to_dense(
sparse_indices=sparse.indices,
output_shape=sparse.dense_shape,
sparse_values=sparse.values,
default_value=0)
class Preprocessor(base_layer.BaseLayer):
"""Base class for input preprocessor.
Input preprocessors expect the combined output of all extractors and performs
a transformation on them. Input preprocessors can add/edit/remove fields
from the NestedMap of features.
Note: Features correspond to that for one example (no batch dimension).
Sub-classes need to implement the following three functions:
1) TransformFeatures(features): Given a NestedMap of features representing the
output of all the extractors, apply a transformation on the features.
2) TransformShapes(shapes): Given a corresponding NestedMap of shapes,
produce a NestedMap of shapes that corresponds to the transformation of the
features after TransformFeatures.
3) TransformDTypes(dtypes): Given a corresponding NestedMap of dtypes,
produce a NestedMap of dtypes that corresponds to the transformation of the
features after TransformFeatures.
The preprocessor is expected to explicitly pass through untouched fields.
For example, a preprocessor that does data augmentation should modify the
features NestedMap on the fields it cares about augmenting, and then return
the features NestedMap.
"""
@classmethod
def Params(cls):
"""Default params."""
p = super().Params()
p.name = cls.__name__
return p
def FProp(self, theta, features):
"""Performs TransformFeatures."""
del theta # unused
return self.TransformFeatures(features)
def TransformFeatures(self, features):
"""Transforms the features for one example.
Args:
features: A `NestedMap` of tensors.
Returns:
A `NestedMap` of tensors corresponding.
"""
raise NotImplementedError()
def TransformShapes(self, shapes):
"""Sets correct shapes corresponding to TransformFeatures.
Args:
shapes: A `NestedMap` of TensorShapes, corresponding to the
pre-transformed features.
Returns:
A `NestedMap` of TensorShapes corresponding to the transformed features.
"""
raise NotImplementedError()
def TransformDTypes(self, dtypes):
"""Sets correct dtypes corresponding to TransformFeatures.
Args:
dtypes: A `NestedMap` of DTypes, corresponding to the pre-transformed
features.
Returns:
A `NestedMap` of DTypes corresponding to the transformed features.
"""
raise NotImplementedError()
class EntryPreprocessor(Preprocessor):
"""A Preprocessor that transforms a NestedMap sub-structure.
Some preprocessors want to apply a function to any NestedMap whose key matches
a specific prefix. An EntryPreprocessor provides an interface for specifying
the function transformation for a NestedMap of inputs, adding, modifying, or
deleting the entries in that NestedMap.
For example, if an input contains a nested structure such as:
- lasers.front.xyz
.features
- lasers.side.xyz
.features
and one wants to apply a transform that modifies the .xyz features
on both structures, one can define an EntryPreprocessor that implements:
UpdateEntry(entry):
UpdateEntryShape(shapes):
UpdateEntryDType(dtypes):
and set self.params.prefixes = ['lasers.front', 'lasers.side']
where the prefixes refer to a fully-qualified NestedMap sub-structure.
The arguments to these functions will contain just the NestedMap structure
whose key prefix can be found in self.params.prefixes. One can then modify
these structures as desired.
Example:
def UpdateEntry(self, entry):
# entry is a NestedMap.
assert 'xyz' in entry
entry.xyz = self._ApplyFn(entry.xyz)
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prefixes', ['pseudo_ri'], 'List of keys to apply to.')
return p
def _ApplyToMatchingStructure(self, nested_map, fn):
"""Apply fn to any NestedMap sub-structure whose prefix is in p.prefixes."""
p = self.params
# Don't mutate the original.
nested_map = nested_map.DeepCopy()
updated_entries = []
for prefix in p.prefixes:
entry = nested_map.GetItem(prefix)
if not isinstance(entry, py_utils.NestedMap):
raise TypeError('Prefix key {} selected a {}, not a NestedMap!'.format(
prefix, type(entry)))
fn(entry)
updated_entries.append(entry)
return nested_map, updated_entries
def UpdateEntry(self, entry):
"""Update the Tensors in a NestedMap entry.
Args:
entry: A NestedMap of Tensors.
"""
raise NotImplementedError()
def UpdateEntryShape(self, shapes):
"""Update the shapes in a NestedMap entry.
Args:
shapes: A NestedMap of TensorShapes.
"""
raise NotImplementedError()
def UpdateEntryDType(self, dtypes):
"""Transform the dtypes in a NestedMap entry.
Args:
dtypes: A NestedMap of dtypes.
"""
raise NotImplementedError()
def TransformFeatures(self, features):
features, _ = self._ApplyToMatchingStructure(features, self.UpdateEntry)
return features
def TransformShapes(self, shapes):
shapes, _ = self._ApplyToMatchingStructure(shapes, self.UpdateEntryShape)
return shapes
def TransformDTypes(self, dtypes):
dtypes, _ = self._ApplyToMatchingStructure(dtypes, self.UpdateEntryDType)
return dtypes
class CreateDecoderCopy(Preprocessor):
"""Creates references to current lasers, images, and labels.
This is useful if the data is further transformed.
If desired, the keys that are copied can be customized by overriding the
default keys param.
This preprocessor expects features to optionally contain the following keys:
- lasers - a NestedMap of tensors
- images - a NestedMap of tensors
- labels - a NestedMap of tensors
Adds the following features (if the features existed):
- decoder_copy.lasers - a copy of the lasers NestedMap
- decoder_copy.images - a copy of the images NestedMap
- decoder_copy.labels - a copy of the labels NestedMap
The processor also by default pads the laser features; this can be disabled
by setting the pad_lasers param to None.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keys', ['lasers', 'labels', 'images'],
'Keys to look for and copy if exists.')
p.Define('parent_key', 'decoder_copy', 'The key to nest the copies under.')
p.Define('pad_lasers', PadLaserFeatures.Params(),
'Params for a layer that pads the laser features.')
p.name = 'create_decoder_copy'
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.pad_lasers is not None:
self.CreateChild('pad_lasers', p.pad_lasers)
def _DeepCopyIfExists(self, keys, nested_map, parent_key):
"""Deep copy a specific key to a parent key if it exists."""
for key in keys:
if key in nested_map:
if parent_key not in nested_map:
nested_map[parent_key] = py_utils.NestedMap()
nested_map[parent_key][key] = nested_map[key].DeepCopy()
return nested_map
def TransformFeatures(self, features):
p = self.params
features = self._DeepCopyIfExists(p.keys, features, p.parent_key)
if p.pad_lasers is not None:
features[p.parent_key] = self.pad_lasers.TransformFeatures(
features[p.parent_key])
return features
def TransformShapes(self, shapes):
p = self.params
shapes = self._DeepCopyIfExists(p.keys, shapes, p.parent_key)
if p.pad_lasers is not None:
shapes[p.parent_key] = self.pad_lasers.TransformShapes(
shapes[p.parent_key])
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes = self._DeepCopyIfExists(p.keys, dtypes, p.parent_key)
if p.pad_lasers is not None:
dtypes[p.parent_key] = self.pad_lasers.TransformDTypes(
dtypes[p.parent_key])
return dtypes
class FilterByKey(Preprocessor):
"""Filters features to keep only specified keys.
This keeps only feature entries that are specified. This allows us to reduce
the number of fields returned. For example, during training, one may not
need the actual laser points if training with a pillars based model that
has a preprocessor that already maps the points to grid.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'keep_key_prefixes', [''], 'Prefixes of keys to keep. If this '
'contains the empty string, then it will keep all the keys.')
return p
def _FilterFn(self, key, entry):
"""Filter a nested map."""
del entry # unused
p = self.params
for prefix in p.keep_key_prefixes:
if key.startswith(prefix):
return True
return False
def TransformFeatures(self, features):
return features.FilterKeyVal(self._FilterFn)
def TransformShapes(self, shapes):
return shapes.FilterKeyVal(self._FilterFn)
def TransformDTypes(self, dtypes):
return dtypes.FilterKeyVal(self._FilterFn)
class FilterGroundTruthByNumPoints(Preprocessor):
"""Removes ground truth boxes with less than params.min_num_points points.
This preprocessor expects features to contain the following keys::
labels.labels of shape [..., L]
labels.bboxes_3d of shape [..., L, 7]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
labels.bboxes_3d_num_points of shape [..., L].
Modifies the bounding box data to turn off ground truth objects that don't
meet the params.min_num_points point filter:
labels.labels: Boxes with less than params.min_num_points have their label
set to params.background_id (defaults to 0).
labels.bboxes_3d_mask: Boxes with less than params.min_num_points are set
to 0.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'min_num_points', 1, 'The minimum number of points allowed before '
'the associated ground truth box is turned off. Defaults to 1.')
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.greater_equal(features.labels.bboxes_3d_num_points,
p.min_num_points)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FilterGroundTruthByDifficulty(Preprocessor):
"""Removes groundtruth boxes based on detection difficulty.
This preprocessor expects features to contain the following keys::
labels.single_frame_detection_difficulties of shape [..., L]
labels.labels of shape [..., L]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
The preprocessor masks out the bboxes_3d_mask / labels based on whether
single_frame_detection_difficulties is greater than p.difficulty_threshold.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
p.Define(
'difficulty_threshold', 1,
'Filter groundtruth bounding boxes whose detection difficulty is '
'greater than `difficulty_threshold`')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.less_equal(
features.labels.single_frame_detection_difficulties,
p.difficulty_threshold)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class CountNumberOfPointsInBoxes3D(Preprocessor):
"""Computes bboxes_3d_num_points.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
labels.bboxes_3d_num_points: [L] - integer tensor containing the number of
laser points for each corresponding bbox.
"""
def TransformFeatures(self, features):
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz,
features.labels.bboxes_3d)
bboxes_3d_num_points = tf.reduce_sum(
tf.cast(points_in_bboxes_mask, tf.int32), axis=0, keepdims=False)
bboxes_3d_num_points *= tf.cast(features.labels.bboxes_3d_mask, tf.int32)
features.labels.bboxes_3d_num_points = bboxes_3d_num_points
return features
def TransformShapes(self, shapes):
num_bboxes = shapes.labels.bboxes_3d[0]
shapes.labels.bboxes_3d_num_points = tf.TensorShape([num_bboxes])
return shapes
def TransformDTypes(self, dtypes):
dtypes.labels.bboxes_3d_num_points = tf.int32
return dtypes
class AddPerPointLabels(Preprocessor):
"""Computes the class and bbox id of each point.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.labels of shape [L]
This makes an assumption that each point is only in 1 box, which should
almost always true in 3D. In cases where this is not true, the largest
label integer and largest bbox_id will be assigned.
NOTE: Be very careful that this is performed after any modifications
to the semantic labels of each point in the pointcloud. Examples of this
would be operators like GroundTruthAugmentation, or DropBoxesOutOfRange.
Adds the following features:
lasers.points_label: [P] - integer tensor containing the class id of each
point.
lasers.points_bbox_id: [P] - integer tensor containing box id of each
point from 0 to num_bboxes, where an id of num_bboxes indicates a
background point.
lasers.points_bbox_3d: [P, 7] - float tensor containing bounding box of
each point.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'per_dimension_adjustment', None,
'A list of len 3 of floats with the amount (in meters) to add to '
'each dimension of the box before using it to select points. '
'If enabled, this is designed to protect against overly tight box '
'annotations that appear in KITTI.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
bboxes_3d = features.labels.bboxes_3d
num_points, _ = py_utils.GetShape(points_xyz)
num_bboxes, _ = py_utils.GetShape(bboxes_3d)
if p.per_dimension_adjustment:
if len(p.per_dimension_adjustment) != 3:
raise ValueError(
'param `per_dimension_adjustment` expected to be len 3.')
dims_adjustment = tf.constant([0, 0, 0] + p.per_dimension_adjustment +
[0])
bboxes_3d = bboxes_3d + dims_adjustment
# Find which points are in each box and what class each box is.
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz, bboxes_3d)
points_in_bboxes_mask = tf.cast(points_in_bboxes_mask, tf.int32)
points_in_bboxes_mask = py_utils.HasShape(points_in_bboxes_mask,
[num_points, num_bboxes])
# points_in_bboxes_mask is a [num_points, num_bboxes] 0/1 tensor
# indicating whether that point is in a given box.
# Each point should only be in one box, so after broadcasting the label
# across the binary mask, we do a reduce_max to get the max label id
# for each point. Since each point only belongs to one box, it will be
# the only non-zero (background) label in that box.
# Note: We assume background to be class_id == 0
points_label = tf.reduce_max(
points_in_bboxes_mask * features.labels.labels, axis=1)
points_bbox_id = tf.argmax(
points_in_bboxes_mask, axis=1, output_type=tf.int32)
# If the class is background, make its id == num_bboxes
points_bbox_id = tf.where(points_label > 0, points_bbox_id,
tf.broadcast_to(num_bboxes, [num_points]))
# For each point, get the bbox_3d data.
dummy_bbox = tf.constant([[0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
bboxes_3d = tf.concat([bboxes_3d, dummy_bbox], axis=0)
points_bbox_3d = tf.gather(bboxes_3d, points_bbox_id)
points_label = tf.reshape(points_label, [num_points])
points_bbox_id = tf.reshape(points_bbox_id, [num_points])
features.lasers.points_label = points_label
features.lasers.points_bbox_id = points_bbox_id
features.lasers.points_bbox_3d = points_bbox_3d
return features
def TransformShapes(self, shapes):
num_points = shapes.lasers.points_xyz[0]
shapes.lasers.points_label = tf.TensorShape([num_points])
shapes.lasers.points_bbox_id = tf.TensorShape([num_points])
shapes.lasers.points_bbox_3d = tf.TensorShape([num_points, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_label = tf.int32
dtypes.lasers.points_bbox_id = tf.int32
dtypes.lasers.points_bbox_3d = tf.float32
return dtypes
class PointsToGrid(Preprocessor):
"""Bins points to a 3D-grid using custom op: ops.point_to_grid.
Expects features to have keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If normalizing the labels is enabled, then also expects:
- labels.weights
- labels.bboxes_td
- labels.bboxes_td_mask
- labels.bboxes_3d_mask
Let:
gx, gy, gz = p.grid_size
F = 3 + num_laser_features
Adds the following features:
grid_centers: [gx, gy, gz, 3]: For each grid cell, the (x,y,z)
floating point coordinate of its center.
grid_num_points: [gx, gy, gz]: The number of points in each grid
cell (integer).
laser_grid: [gx, gy, gz, num_points_per_cell, F] - A 5D floating
point Tensor containing the laser data placed into a fixed grid.
Modifies the bboxes in labels to also be within the grid range x/y by default.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('grid_size', (40, 40, 1), 'Grid size along x,y,z axis.')
# The max range of x and y is [-80, 80].
p.Define('grid_range_x', (-80, 80), 'The X-axis Range covered by the grid')
p.Define('grid_range_y', (-80, 80), 'The Y-axis Range covered by the grid')
p.Define('grid_range_z', (-2, 4), 'The Z-axis Range covered by the grid')
p.Define('normalize_td_labels', True,
'Whether to clip the labels to the grid limits.')
return p
def _NormalizeLabels(self, ymin, xmin, ymax, xmax, x_range, y_range):
"""Normalizes the bboxes within a given range."""
assert x_range, 'Must specify x_range if clipping.'
assert y_range, 'Must specify y_range if clipping.'
assert len(x_range) == 2, 'x_range %s must be 2 elements.' % x_range
assert len(y_range) == 2, 'y_range %s must be 2 elements.' % y_range
x_range_min = x_range[0]
x_range_len = x_range[1] - x_range[0]
y_range_min = y_range[0]
y_range_len = y_range[1] - y_range[0]
xmin = tf.cast(xmin - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
xmax = tf.cast(xmax - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
ymin = tf.cast(ymin - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
ymax = tf.cast(ymax - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
return ymin, xmin, ymax, xmax
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if ('points_padding' in features.lasers and
features.lasers.points_padding is not None):
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
points_full = tf.concat([points_xyz, points_feature], axis=-1)
points_grid_full, grid_centers, num_points = ops.point_to_grid(
points_full, p.num_points_per_cell, p.grid_size[0], p.grid_size[1],
p.grid_size[2], p.grid_range_x, p.grid_range_y, p.grid_range_z)
features.laser_grid = points_grid_full
features.grid_centers = grid_centers
features.grid_num_points = num_points
if p.normalize_td_labels:
# Normalize bboxes_td w.r.t grid range.
obb = features.labels
x_range = p.grid_range_x
y_range = p.grid_range_y
ymin, xmin, ymax, xmax = tf.unstack(obb.bboxes_td[..., :4], axis=-1)
ymin, xmin, ymax, xmax = self._NormalizeLabels(
ymin, xmin, ymax, xmax, x_range=x_range, y_range=y_range)
obb.bboxes_td = tf.concat(
[tf.stack([ymin, xmin, ymax, xmax], axis=-1), obb.bboxes_td[..., 4:]],
axis=-1)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.grid_centers = tf.TensorShape(list(p.grid_size) + [3])
shapes.grid_num_points = tf.TensorShape(list(p.grid_size))
shapes.laser_grid = tf.TensorShape(
list(p.grid_size) +
[p.num_points_per_cell, 3 + shapes.lasers.points_feature[-1]])
return shapes
def TransformDTypes(self, dtypes):
dtypes.grid_centers = tf.float32
dtypes.grid_num_points = tf.int32
dtypes.laser_grid = tf.float32
return dtypes
class _PointPillarGridSettings:
"""Settings for PointPillars model defined in paper.
https://arxiv.org/abs/1812.05784
"""
# Chooses grid sizes that are a multiple of 16 to support point pillars
# model requirements. These also happen to match the values
# in the PointPillars paper (voxel width of 0.16m in x, y)
GRID_X = 432
GRID_Y = 496
GRID_Z = 1
# These fields are set in the subclasses.
GRID_X_RANGE = None
GRID_Y_RANGE = None
GRID_Z_RANGE = None
@classmethod
def UpdateGridParams(cls, grid_params):
"""Apply PointPillars settings to grid_params."""
grid_params.grid_size = (cls.GRID_X, cls.GRID_Y, cls.GRID_Z)
grid_params.grid_range_x = cls.GRID_X_RANGE
grid_params.grid_range_y = cls.GRID_Y_RANGE
grid_params.grid_range_z = cls.GRID_Z_RANGE
@classmethod
def UpdateAnchorGridParams(cls, anchor_params, output_stride=2):
"""Apply PointPillars settings to anchor_params."""
# Set anchor settings to match grid settings.
# Grid size for anchors is half the resolution.
anchor_params.grid_size = (cls.GRID_X // output_stride,
cls.GRID_Y // output_stride, cls.GRID_Z)
anchor_params.grid_range_x = cls.GRID_X_RANGE
anchor_params.grid_range_y = cls.GRID_Y_RANGE
# Grid along z axis should be pinned to 0.
anchor_params.grid_range_z = (0, 0)
def MakeGridSettings(grid_x_range, grid_y_range, grid_z_range, grid_x, grid_y,
grid_z):
"""Returns configured class for PointPillar grid settings."""
class GridSettings(_PointPillarGridSettings):
GRID_X_RANGE = grid_x_range
GRID_Y_RANGE = grid_y_range
GRID_Z_RANGE = grid_z_range
GRID_X = grid_x
GRID_Y = grid_y
GRID_Z = grid_z
return GridSettings
PointPillarGridCarSettings = MakeGridSettings(
grid_x_range=(0, 69.12),
grid_y_range=(-39.68, 39.68),
grid_z_range=(-3, 1),
grid_x=432,
grid_y=496,
grid_z=1)
PointPillarGridPedCycSettings = MakeGridSettings(
grid_x_range=(0, 47.36),
grid_y_range=(-19.84, 19.84),
grid_z_range=(-2.5, 0.5),
grid_x=432,
grid_y=496,
grid_z=1)
class GridToPillars(Preprocessor):
"""Create pillars from a grid of points.
Expects features to have keys:
grid_centers: [gx, gy, gz, 3]
grid_num_points: [gx, gy, gz]
laser_grid: [gx, gy, gz, num_points_per_cell, F]
Adds the following features:
point_count: [num_pillars]. The number of points in the pillar.
point_locations: [num_pillars, 3]. The grid location of each pillar.
pillar_points: [num_pillars, num_points_per_cell, F]. Points of each
pillar.
Drops the following features by default:
laser_grid
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('num_pillars', 12000, 'The maximum number of pillars to produce.')
p.Define('drop_laser_grid', True, 'Whether to drop the laser_grid feature.')
# The density based sampler is more expensive.
p.Define('use_density_sampler', False,
'Use a density based sampler during pillar selection.')
return p
def _GumbelTransform(self, probs):
"""Adds gumbel noise to log probabilities for multinomial sampling.
This enables fast sampling from a multinomial distribution without
replacement. See https://arxiv.org/abs/1611.01144 for details.
A colab that demonstrates this in practice is here:
http://colab/drive/1iuMt2n_r7dKPQG9T0UVMuK3fkbBayKjd
Args:
probs: A 1-D float tensor containing probabilities, summing to 1.
Returns:
A 1-D float tensor of the same size of probs, with gumbel noise added to
log probabilities. Taking the top k elements from this provides a
multinomial sample without replacement.
"""
p = self.params
log_prob = tf.math.log(probs)
probs_shape = tf.shape(probs)
uniform_samples = tf.random.uniform(
shape=probs_shape,
dtype=probs.dtype,
seed=p.random_seed,
name='uniform_samples')
gumbel_noise = -tf.math.log(-tf.math.log(uniform_samples))
return gumbel_noise + log_prob
def _DensitySample(self, num_points):
p = self.params
# Flatten to [nx * ny * nz] for convenience during sampling.
num_grid_points = np.prod(p.grid_size)
flattened_num_points = tf.reshape(num_points, [num_grid_points])
# Normalize flattened_num_points to sum to 1.
flattened_num_points = tf.cast(flattened_num_points, tf.float32)
flattened_num_points /= tf.reduce_sum(flattened_num_points)
# TODO(jngiam): Consider generalizing this to enable other methods of
# sampling: e.g., use largest deviation in z-axis. The gumbel transform
# can still be applied regardless.
# Add gumbel noise for multinomial sampling.
sampling_logits = self._GumbelTransform(flattened_num_points)
_, locations = tf.nn.top_k(
sampling_logits, k=min(p.num_pillars, num_grid_points))
# Unravel coordinates back to grid locations.
locations = tf.unravel_index(locations, p.grid_size)
# Unravel index will return a 3 x num_locations tensor, this needs to be
# transposed so that we have it as num_locations x 3.
locations = py_utils.HasShape(locations, [3, -1])
locations = tf.transpose(locations)
return locations
def TransformFeatures(self, features):
p = self.params
num_points = features.grid_num_points
if p.use_density_sampler:
locations = self._DensitySample(num_points)
else:
# Select non-empty cells uniformly at random.
locations = tf.random.shuffle(tf.cast(tf.where(num_points > 0), tf.int32))
num_features = py_utils.GetShape(features.laser_grid)[-1]
# [nx, ny, nz, np, 4] (x, y, z, f)
points = features.laser_grid
# [K, np, 4] (x, y, z, f)
points = tf.gather_nd(points, locations)
# [nx, ny, nz, 1, 3] (cx, cy, cz)
centers = features.grid_centers[..., tf.newaxis, :]
# [K, 1, 3] (cx, cy, cz)
centers = tf.gather_nd(centers, locations)
# NOTE: If there are fewer pillars than p.num_pillars, the following
# padding creates many 'fake' pillars at grid cell (0, 0, 0) with
# an all-zero pillar. Hopefully, the model can learn to ignore these.
#
# pillar_points[i, :, :] is the pillar located at pillar_locations[i, :3],
# and pillar_points[i, :, :] == points_grid_full[pillar_locations[i, :3]].
# for 0 <= i < pillar_count;
# pillar_locations[i, :3] are zero-ed, for i >= pillar_count.
features.pillar_count = tf.shape(locations)[0]
features.pillar_locations = py_utils.PadOrTrimTo(locations,
[p.num_pillars, 3])
features.pillar_points = py_utils.PadOrTrimTo(
points, [p.num_pillars, p.num_points_per_cell, num_features])
features.pillar_centers = py_utils.PadOrTrimTo(centers,
[p.num_pillars, 1, 3])
if p.drop_laser_grid:
del features['laser_grid']
return features
def TransformShapes(self, shapes):
p = self.params
num_features = shapes.laser_grid[-1]
shapes.pillar_count = tf.TensorShape([])
shapes.pillar_locations = tf.TensorShape([p.num_pillars, 3])
shapes.pillar_points = tf.TensorShape(
[p.num_pillars, p.num_points_per_cell, num_features])
shapes.pillar_centers = tf.TensorShape([p.num_pillars, 1, 3])
if p.drop_laser_grid:
del shapes['laser_grid']
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes.pillar_count = tf.int32
dtypes.pillar_locations = tf.int32
dtypes.pillar_points = tf.float32
dtypes.pillar_centers = tf.float32
if p.drop_laser_grid:
del dtypes['laser_grid']
return dtypes
class GridAnchorCenters(Preprocessor):
"""Create anchor centers on a grid.
Anchors are placed in the middle of each grid cell. For example, on a 2D grid
range (0 -> 10, 0 -> 10) with a 10 x 5 grid size, the anchors will be placed
at [(0.5, 1), (0.5, 3), ... , (9.5, 7), (9.5, 9)].
Adds the following features:
anchor_centers: [num_locations, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'grid_size', (20, 20, 1), 'Grid size along x,y,z axis. This will '
'be used to generate the anchor center locations. Note that this '
'would likely be different from the grid_* parameters in '
'LaserGridExtractor: the grid extractor may choose to extract '
'points more densely. Instead, this should correspond to the '
'model\'s prediction layer: the predicted anchor box residuals '
'should match this grid.')
p.Define('grid_range_x', (-25, 25), 'The x-axis range covered by the grid.')
p.Define('grid_range_y', (-25, 25), 'The y-axis range covered by the grid.')
p.Define('grid_range_z', (0, 0), 'The z-axis range covered by the grid.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# Compute the grid cell size and adjust the range sent to dense coordinates
# by half a cell size so as to ensure that the anchors are placed in the
# center of each grid cell.
grid_size_x, grid_size_y, grid_size_z = p.grid_size
grid_cell_sizes = [
float(p.grid_range_x[1] - p.grid_range_x[0]) / grid_size_x,
float(p.grid_range_y[1] - p.grid_range_y[0]) / grid_size_y,
float(p.grid_range_z[1] - p.grid_range_z[0]) / grid_size_z,
]
half_size_x, half_size_y, half_size_z = np.asarray(grid_cell_sizes) / 2.0
grid_shape = list(p.grid_size) + [3]
anchor_centers = utils_3d.CreateDenseCoordinates([
[
p.grid_range_x[0] + half_size_x,
p.grid_range_x[1] - half_size_x,
grid_size_x
],
[
p.grid_range_y[0] + half_size_y,
p.grid_range_y[1] - half_size_y,
grid_size_y
],
[
p.grid_range_z[0] + half_size_z,
p.grid_range_z[1] - half_size_z,
grid_size_z
],
]) # pyformat: disable
features.anchor_centers = tf.reshape(anchor_centers, grid_shape)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape(list(p.grid_size) + [3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
return dtypes
class SparseCenterSelector(Preprocessor):
"""Select centers for anchors and cells.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If lasers.num_seeded_points of shape [] is provided, it indicates that the
first num_seeded_points of lasers.points_xyz should be used as seeds for
farthest point sampling (e.g., always chosen). Currently the concept
of seeding is not implemented for anything but farthest point sampling.
Adds the following features:
anchor_centers: [num_cell_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz: [num_cell_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
"""
_SAMPLING_METHODS = ['farthest_point', 'random_uniform']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 256, 'Number of centers.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'sampling_method', 'farthest_point',
'Which sampling method to use. One of {}'.format(cls._SAMPLING_METHODS))
p.Define(
'fix_z_to_zero', True, 'Whether to fix z to 0 when retrieving the '
'center xyz coordinates.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sampling_method not in self._SAMPLING_METHODS:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
if p.features_preparation_layers is not None:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def _FarthestPointSampleCenters(self, points_xyz, num_seeded_points):
"""Samples centers with Farthest Point Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
num_seeded_points: integer indicating how many of the first
num_seeded_points points in points_xyz should be considered
as seeds for FPS (always chosen).
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
num_points = tf.shape(points_xyz)[0]
points_padding = tf.zeros((num_points,), dtype=tf.float32)
padded_num_points = tf.maximum(num_points, p.num_cell_centers)
# Pad both the points and padding if for some reason the input pointcloud
# has less points than p.num_cell_centers.
points_xy = py_utils.PadOrTrimTo(points_xyz[:, :2], [padded_num_points, 2])
points_padding = py_utils.PadOrTrimTo(
points_padding, [padded_num_points], pad_val=1.0)
sampled_idx, _ = car_lib.FarthestPointSampler(
points_xy[tf.newaxis, ...],
points_padding[tf.newaxis, ...],
p.num_cell_centers,
num_seeded_points=num_seeded_points,
random_seed=p.random_seed)
sampled_idx = sampled_idx[0, :]
# Gather centers.
if p.fix_z_to_zero:
centers = tf.concat([
tf.gather(points_xy, sampled_idx),
tf.zeros((p.num_cell_centers, 1)),
], axis=-1) # pyformat: disable
else:
centers = tf.gather(points_xyz, sampled_idx)
return centers
def _RandomUniformSampleCenters(self, points_xyz):
"""Samples centers with Random Uniform Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
# We want the center Z value to be 0 so just exclude it
centers_xy = tf.random.shuffle(points_xyz[:, :2], seed=p.random_seed)
selected_centers_xy = py_utils.PadOrTrimTo(centers_xy,
[p.num_cell_centers, 2])
return tf.concat([selected_centers_xy,
tf.zeros((p.num_cell_centers, 1))],
axis=-1)
def _SampleCenters(self, points_xyz, num_seeded_points):
p = self.params
if p.sampling_method == 'farthest_point':
return self._FarthestPointSampleCenters(points_xyz, num_seeded_points)
elif p.sampling_method == 'random_uniform':
if num_seeded_points > 0:
raise NotImplementedError(
'Random sampling with seeded points not yet implemented.')
return self._RandomUniformSampleCenters(points_xyz)
else:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
def TransformFeatures(self, features):
p = self.params
prepared_features = features.DeepCopy()
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
num_seeded_points = prepared_features.lasers.get('num_seeded_points', 0)
points_data = prepared_features.lasers
points_xyz = points_data.points_xyz
if 'points_padding' in points_data:
points_padding = points_data.points_padding
points_mask = 1 - points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
centers = self._SampleCenters(points_xyz, num_seeded_points)
centers = py_utils.HasShape(centers, [p.num_cell_centers, 3])
features.anchor_centers = centers
features.cell_center_xyz = centers
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape([p.num_cell_centers, 3])
shapes.cell_center_xyz = tf.TensorShape([p.num_cell_centers, 3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_xyz = tf.float32
return dtypes
class SparseCellGatherFeatures(Preprocessor):
"""Select local features for each cell.
This preprocessor expects features to contain:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- cell_center_xyz of shape [C, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
cell_points_xyz: [num_centers, num_points_per_cell, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature: [num_centers, num_points_per_cell, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding: [num_centers, num_points_per_cell] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 128, 'The number of points per cell.')
p.Define('max_distance', 3.0, 'Max distance of point to cell center.')
p.Define(
'sample_neighbors_uniformly', False,
'Whether to sample the neighbor points for every cell center '
'uniformly at random. If False, this will default to selecting by '
'distance.')
return p
def TransformFeatures(self, features):
p = self.params
num_centers = py_utils.GetShape(features.cell_center_xyz, 1)[0]
num_features = py_utils.GetShape(features.lasers.points_feature)[-1]
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
# Note: points_xyz and points_feature must be unpadded as we pass
# padding=None to neighborhood indices. Ensuring that it is unpadded
# helps improve performance.
# Get nearby points using kNN.
sample_indices, sample_indices_padding = car_lib.NeighborhoodIndices(
tf.expand_dims(points_xyz, 0),
tf.expand_dims(features.cell_center_xyz, 0),
p.num_points_per_cell,
points_padding=None,
max_distance=p.max_distance,
sample_neighbors_uniformly=p.sample_neighbors_uniformly)
# Take first example since NeighboorhoodIndices expects batch dimension.
sample_indices = sample_indices[0, :, :]
sample_indices_padding = sample_indices_padding[0, :, :]
sample_indices = py_utils.HasShape(sample_indices,
[num_centers, p.num_points_per_cell])
cell_points_xyz = tf.gather(points_xyz, sample_indices)
cell_points_xyz = py_utils.HasShape(cell_points_xyz,
[num_centers, p.num_points_per_cell, 3])
cell_feature = tf.gather(points_feature, sample_indices)
cell_feature = py_utils.HasShape(
cell_feature, [num_centers, p.num_points_per_cell, num_features])
cell_points_padding = py_utils.HasShape(
sample_indices_padding, [num_centers, p.num_points_per_cell])
features.update({
'cell_points_xyz': cell_points_xyz,
'cell_feature': cell_feature,
'cell_points_padding': cell_points_padding,
})
return features
def TransformShapes(self, shapes):
p = self.params
num_centers = shapes.cell_center_xyz[0]
base_shape = [num_centers, p.num_points_per_cell]
num_features = shapes.lasers.points_feature[-1]
shapes.cell_points_xyz = tf.TensorShape(base_shape + [3])
shapes.cell_feature = tf.TensorShape(base_shape + [num_features])
shapes.cell_points_padding = tf.TensorShape(base_shape)
return shapes
def TransformDTypes(self, dtypes):
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
class SparseCellCentersTopK(Preprocessor):
"""Given selected centers and gathered points/features, apply a filter.
This preprocessor expects features to contain `cell_center_xyz` and all
entries in params.features_to_modify, and that the leading dimension should
all be the same (num_cell_centers from SparseCenterSelector).
We then modify all values in features that are specified in
params.features_to_modify by sorting them with the specified sort function
(specified by params.sort_by) operating on features.cell_center_xyz, and then
taking the top K (specified by params.num_cell_centers) along the first
dimension.
"""
_REGISTERED_SORT_FUNCTIONS = ['distance']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 512, 'The number of centers after filtering.')
p.Define(
'sort_by', 'distance', 'A string specifying which sort function '
'to use. Currently we just support `distance`.')
p.Define('features_to_modify', [
'cell_center_xyz', 'anchor_centers', 'cell_points_xyz', 'cell_feature',
'cell_points_padding'
], 'A list of keys from the features dict to modify.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sort_by not in self._REGISTERED_SORT_FUNCTIONS:
raise ValueError('{} not supported. We only support {}.'.format(
p.sort_by, self._REGISTERED_SORT_FUNCTIONS))
if len(p.features_to_modify) < 1:
raise ValueError('Need to modify at least one feature.')
def _SortByDistance(self, features):
dist = tf.linalg.norm(features.cell_center_xyz, axis=-1)
return tf.argsort(dist, axis=-1, direction='ASCENDING')
def _Sort(self, features):
p = self.params
if p.sort_by == 'distance':
return self._SortByDistance(features)
else:
raise ValueError('Unsupported sort function: {}.'.format(p.sort_by))
def TransformFeatures(self, features):
p = self.params
sort_indices = self._Sort(features)
sort_indices_top_k = sort_indices[:p.num_cell_centers, ...]
# Gather each of the relevant items
for key in p.features_to_modify:
shape = py_utils.GetShape(features[key])
output_shape = [p.num_cell_centers] + shape[1:]
features[key] = py_utils.PadOrTrimTo(
tf.gather(features[key], sort_indices_top_k), output_shape)
return features
def TransformShapes(self, shapes):
p = self.params
for key in p.features_to_modify:
shapes[key] = tf.TensorShape([p.num_cell_centers] + shapes[key][1:])
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class TileAnchorBBoxes(Preprocessor):
"""Creates anchor_bboxes given anchor_centers.
This preprocessor expects features to contain the following keys:
- anchor_centers of shape [...base shape..., 3]
Adds the following features:
anchor_bboxes: base_shape + [7] - Floating point anchor box
output containing the anchor boxes and the 7 floating point
values for each box that define the box (x, y, z, dx, dy, dz, phi).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('anchor_box_dimensions', [],
'List of anchor box sizes per center.')
p.Define('anchor_box_offsets', [], 'List of anchor box offsets per center.')
p.Define('anchor_box_rotations', [],
'List of anchor box rotations per center.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
assert p.anchor_box_dimensions
assert p.anchor_box_offsets
assert p.anchor_box_rotations
base_shape = py_utils.GetShape(features.anchor_centers)[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
anchor_centers = tf.reshape(features.anchor_centers, [-1, 3])
anchor_bboxes = utils_3d.MakeAnchorBoxes(
anchor_centers, tf.identity(p.anchor_box_dimensions),
tf.identity(p.anchor_box_offsets), tf.identity(p.anchor_box_rotations))
features.anchor_bboxes = tf.reshape(anchor_bboxes,
base_shape + [num_box_per_center, 7])
return features
def TransformShapes(self, shapes):
p = self.params
base_shape = shapes.anchor_centers[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
shapes.anchor_bboxes = base_shape.concatenate([num_box_per_center, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_bboxes = tf.float32
return dtypes
class _AnchorBoxSettings:
"""Helper class to parameterize and update anchor box settings."""
# Implementations should fill out the following class members.
DIMENSION_PRIORS = []
ROTATIONS = []
CENTER_X_OFFSETS = []
CENTER_Y_OFFSETS = []
CENTER_Z_OFFSETS = []
@classmethod
def NumAnchors(cls):
return np.prod([
len(cls.DIMENSION_PRIORS),
len(cls.ROTATIONS),
len(cls.CENTER_X_OFFSETS),
len(cls.CENTER_Y_OFFSETS),
len(cls.CENTER_Z_OFFSETS)
])
@classmethod
def GenerateAnchorSettings(cls):
"""Generate anchor settings.
Returns:
A `NestedMap` containing three lists of the same length:
- anchor_box_dimensions
- anchor_box_rotations
- anchor_box_offsets
These can be used with the TileAnchorBBoxes preprocessor.
"""
anchor_box_dimensions = []
anchor_box_rotations = []
anchor_box_offsets = []
# The following is equivalent to a formulation of itertools.product, but
# is explicitly listed for readability.
# *Please note*: The ordering is important for ModelV2, which makes
# assumptions that the offset dimensions come first.
for cx in cls.CENTER_X_OFFSETS:
for cy in cls.CENTER_Y_OFFSETS:
for cz in cls.CENTER_Z_OFFSETS:
for rot in cls.ROTATIONS:
for dims in cls.DIMENSION_PRIORS:
anchor_box_dimensions += [dims]
anchor_box_rotations += [rot]
anchor_box_offsets += [(cx, cy, cz)]
# Check one of the lists has entries.
assert anchor_box_dimensions
return py_utils.NestedMap(
anchor_box_dimensions=anchor_box_dimensions,
anchor_box_rotations=anchor_box_rotations,
anchor_box_offsets=anchor_box_offsets)
@classmethod
def Update(cls, params):
"""Updates anchor box settings from input configuration lists.
Given dimensions priors, rotations, and offsets, computes the cartesian
product of the settings.
Args:
params: The KITTIAnchorExtractorBase.Params() object to update.
Returns:
Params updated with the anchor settings.
In total there are N combinations, where each (anchor_box_dimensions[i],
anchor_box_rotations[i], anchor_box_offsets[i]) for i in range(N) is an
option.
"""
p = params
settings = cls.GenerateAnchorSettings()
p.anchor_box_dimensions = settings.anchor_box_dimensions
p.anchor_box_rotations = settings.anchor_box_rotations
p.anchor_box_offsets = settings.anchor_box_offsets
return p
def MakeAnchorBoxSettings(dimension_priors, rotations, center_x_offsets,
center_y_offsets, center_z_offsets):
"""Returns a configured class for setting anchor box settings."""
class CustomAnchorBoxSettings(_AnchorBoxSettings):
DIMENSION_PRIORS = dimension_priors
ROTATIONS = rotations
CENTER_X_OFFSETS = center_x_offsets
CENTER_Y_OFFSETS = center_y_offsets
CENTER_Z_OFFSETS = center_z_offsets
return CustomAnchorBoxSettings
class SparseCarV1AnchorBoxSettings(_AnchorBoxSettings):
"""Anchor box settings for training on Cars for Sparse models."""
# Borrowed from PointPillar dimension prior for cars.
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
# 4 Rotations with axis aligned and both diagonals.
ROTATIONS = [0, np.pi / 2, np.pi / 4, 3 * np.pi / 4]
# 25 offsets per anchor box with fixed z offset at -1.
CENTER_X_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Y_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsCar(_AnchorBoxSettings):
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
ROTATIONS = [0, np.pi / 2]
# Fixed offset for every anchor box, based on a reading of the paper / code
# 0 offsets for x and y, and -1 for z.
CENTER_X_OFFSETS = [0.]
CENTER_Y_OFFSETS = [0.]
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsPed(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsPedCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.7), (0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class AnchorAssignment(Preprocessor):
"""Perform anchor assignment on the features.
This preprocessor expects features to contain the following keys:
- anchor_bboxes of shape [...base shape..., 7]
- labels.bboxes_3d
- labels.labels
- labels.bboxes_3d_mask
Adds the following features:
anchor_localization_residuals: base_shape + [7] floating point tensor of
residuals. The model is expected to regress against these residuals as
targets. The residuals can be converted back into bboxes using
detection_3d_lib.Utils3D.ResidualsToBBoxes.
assigned_gt_idx: base_shape - The corresponding index of the ground
truth bounding box for each anchor box in anchor_bboxes, anchors not
assigned will have idx be set to -1.
assigned_gt_bbox: base_shape + [7] - The corresponding ground
truth bounding box for each anchor box in anchor_bboxes.
assigned_gt_labels: base_shape - The assigned groundtruth label
for each anchor box.
assigned_gt_similarity_score: base_shape - The similarity score
for each assigned anchor box.
assigned_cls_mask: base_shape mask for classification loss per anchor.
This should be 1.0 if the anchor has a foreground or background
assignment; otherwise, it will be assigned to 0.0.
assigned_reg_mask: base_shape mask for regression loss per anchor.
This should be 1.0 if the anchor has a foreground assignment;
otherwise, it will be assigned to 0.0.
Note: background anchors do not have regression targets.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'foreground_assignment_threshold', 0.5,
'Score (usually IOU) threshold for assigning a box as foreground.')
p.Define(
'background_assignment_threshold', 0.35,
'Score (usually IOU) threshold for assigning a box as background.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# anchor_bboxes will be returned with shape [#centers, #boxes_per_center, 7]
# flatten boxes here for matching.
base_shape = py_utils.GetShape(features.anchor_bboxes)[:-1]
anchor_bboxes = tf.reshape(features.anchor_bboxes, [-1, 7])
assigned_anchors = utils_3d.AssignAnchors(
anchor_bboxes,
features.labels.bboxes_3d,
features.labels.labels,
features.labels.bboxes_3d_mask,
foreground_assignment_threshold=p.foreground_assignment_threshold,
background_assignment_threshold=p.background_assignment_threshold)
# Add new features.
features.assigned_gt_idx = tf.reshape(assigned_anchors.assigned_gt_idx,
base_shape)
features.assigned_gt_bbox = tf.reshape(assigned_anchors.assigned_gt_bbox,
base_shape + [7])
features.assigned_gt_labels = tf.reshape(
assigned_anchors.assigned_gt_labels, base_shape)
features.assigned_gt_similarity_score = tf.reshape(
assigned_anchors.assigned_gt_similarity_score, base_shape)
features.assigned_cls_mask = tf.reshape(assigned_anchors.assigned_cls_mask,
base_shape)
features.assigned_reg_mask = tf.reshape(assigned_anchors.assigned_reg_mask,
base_shape)
# Compute residuals.
features.anchor_localization_residuals = utils_3d.LocalizationResiduals(
features.anchor_bboxes, features.assigned_gt_bbox)
return features
def TransformShapes(self, shapes):
base_shape = shapes.anchor_bboxes[:-1]
box_shape = base_shape.concatenate([7])
shapes.anchor_localization_residuals = box_shape
shapes.assigned_gt_idx = base_shape
shapes.assigned_gt_bbox = box_shape
shapes.assigned_gt_labels = base_shape
shapes.assigned_gt_similarity_score = base_shape
shapes.assigned_cls_mask = base_shape
shapes.assigned_reg_mask = base_shape
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_localization_residuals = tf.float32
dtypes.assigned_gt_idx = tf.int32
dtypes.assigned_gt_bbox = tf.float32
dtypes.assigned_gt_labels = tf.int32
dtypes.assigned_gt_similarity_score = tf.float32
dtypes.assigned_cls_mask = tf.float32
dtypes.assigned_reg_mask = tf.float32
return dtypes
class DropLaserPointsOutOfRange(Preprocessor):
"""Drops laser points that are out of pre-defined x/y/z ranges.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
Removes or sets padding to 1 for all points outside a given range. Modifies
all items in the lasers subdictionary like lasers.points_xyz,
lasers.points_feature, lasers.points_padding, and optionally
lasers.points_label, lasers.points_bbox_id.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only points that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only points that have y coordinates within this range are kept.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
else:
# All points are real, we keep points unpadded by applying boolean_mask
# on points_mask later.
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
if min_x != -np.inf:
points_mask &= points_xyz[:, 0] >= min_x
if min_y != -np.inf:
points_mask &= points_xyz[:, 1] >= min_y
if min_z != -np.inf:
points_mask &= points_xyz[:, 2] >= min_z
if max_x != np.inf:
points_mask &= points_xyz[:, 0] <= max_x
if max_y != np.inf:
points_mask &= points_xyz[:, 1] <= max_y
if max_z != np.inf:
points_mask &= points_xyz[:, 2] <= max_z
if 'points_padding' in features.lasers:
# Suffices to just update the padding.
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class KITTIDropPointsOutOfFrustum(Preprocessor):
"""Drops laser points that are outside of the camera frustum.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- images.velo_to_image_plane of shape [3, 4]
- images.width of shape [1]
- images.height of shape [1]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding, and
optionally lasers.points_label, lasers.points_bbox_id so that
points outside the frustum have padding set to 1 or are removed.
"""
def TransformFeatures(self, features):
# Drop points behind the car (behind x-axis = 0).
images = features.images
front_indices = features.lasers.points_xyz[:, 0] >= 0
if 'points_padding' not in features.lasers:
# Keep tensors unpadded and small using boolean_mask.
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
front_indices)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, front_indices)
# Drop those points outside the image plane.
points_image = geometry.PointsToImagePlane(features.lasers.points_xyz,
images.velo_to_image_plane)
in_image_plane = (
(points_image[:, 0] >= 0) &
(points_image[:, 0] <= tf.cast(images.width, tf.float32)) &
(points_image[:, 1] >= 0) &
(points_image[:, 1] <= tf.cast(images.height, tf.float32)))
if 'points_padding' in features.lasers:
# Update padding to only include front indices and in image plane.
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
points_mask &= front_indices
points_mask &= in_image_plane
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(in_image_plane))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomWorldRotationAboutZAxis(Preprocessor):
"""Rotates the world randomly as a form of data augmentation.
Rotations are performed around the *z-axis*. This assumes that the car is
always level. In general, we'd like to instead rotate the car on the spot,
this would then make sense for cases where the car is on a slope.
When there are leading dimensions, this will rotate the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same rotation applied to both.
Adds the following features:
world_rot_z which contains the rotation applied to the example.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
p.Define(
'include_world_rot_z', True,
'Whether to include the applied rotation as an additional tensor. '
'It can be helpful to disable this when using the preprocessor in a '
'way that expects the structure of the features to be the same '
'(e.g., as a branch in tf.cond).')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
def TransformFeatures(self, features):
p = self.params
rot = tf.random.uniform((),
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
# Rotating about the z-axis is equal to experiencing yaw.
pose = [0., 0., 0., rot, 0., 0.]
# Rotate points.
features.lasers.points_xyz = geometry.CoordinateTransform(
features.lasers.points_xyz, pose)
# Rotate bboxes, note that heading has a special case.
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
# The heading correction should subtract rot from the bboxes rotations.
bboxes_rot = geometry.WrapAngleRad(bboxes_rot - rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
if p.include_world_rot_z:
features.world_rot_z = rot
return features
def TransformShapes(self, shapes):
if self.params.include_world_rot_z:
shapes.world_rot_z = tf.TensorShape([])
return shapes
def TransformDTypes(self, dtypes):
if self.params.include_world_rot_z:
dtypes.world_rot_z = tf.float32
return dtypes
class DropPointsOutOfFrustum(Preprocessor):
"""Drops points outside of pre-defined theta / phi ranges.
Note that the ranges for keep_phi_range can be negative, this is because the
phi values wrap around 2*pi. Thus, a valid range that filters the 90 deg
frontal field of view of the car can be specified as [-pi/4, pi/4].
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Modifies the following features:
- lasers.points_xyz removing any points out of frustum.
- lasers.points_feature removing any points out of frustum.
Note: We expect a downstream processor that filters out boxes with few points
to drop the corresponding bboxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_theta_range', (0., np.pi),
'Only points that have theta coordinates within this range.')
p.Define('keep_phi_range', (0., 2. * np.pi),
'Only points that have phi coordinates within this range.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
raise ValueError('DropPointsOutOfFrustum preprocessor does not support '
'padded lasers.')
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
min_theta, max_theta = p.keep_theta_range
if (min_theta < 0. or min_theta > np.pi or max_theta < 0. or
max_theta > np.pi):
raise ValueError('Valid values for theta are between 0 and pi, '
'keep_theta_range={}'.format(p.keep_theta_range))
if min_theta > max_theta:
raise ValueError('min_theta must be <= max_theta, '
'keep_theta_range={}'.format(p.keep_theta_range))
min_phi, max_phi = p.keep_phi_range
if (min_phi < -2. * np.pi or min_phi > 2. * np.pi or
max_phi < -2. * np.pi or max_phi > 2. * np.pi):
raise ValueError('Valid values for phi are between -2*pi and 2*pi,'
'keep_phi_range={}'.format(p.keep_phi_range))
if min_phi > max_phi:
raise ValueError('min_phi must be <= max_phi, '
'keep_phi_range={}'.format(p.keep_phi_range))
_, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
# phi is returned in range [-pi, pi], we shift the values which are between
# [-pi, 0] to be [pi, 2pi] instead to make the logic below easier to follow.
# Hence, all phi values after this will be [0, 2pi].
phi = tf.where(phi >= 0., phi, 2. * np.pi + phi)
# Theta does not have circular boundary conditions, a simple check suffices.
points_mask = (theta >= min_theta) & (theta <= max_theta)
if min_phi < 0. and max_phi < 0.:
# Both are less than zero, we just just add 2pi and will use the regular
# check.
min_phi += 2. * np.pi
max_phi += 2. * np.pi
if min_phi < 0.:
# The minimum threshold is below 0, so we split into checking between
# (0 to min_phi) and (0 to max_phi). Note that min_phi is negative, but
# phi is always positive, so we take 2*pi + min_phi to get the range of
# appropriate values.
points_mask &= (phi >= (2. * np.pi + min_phi)) | (phi <= max_phi)
else:
# Both must be greater than 0 if we get to this condition.
assert min_phi >= 0.
assert max_phi >= 0.
points_mask &= (phi >= min_phi) & (phi <= max_phi)
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class DropBoxesOutOfRange(Preprocessor):
"""Drops boxes outside of pre-defined x/y/z ranges (boundaries inclusive).
This preprocessor expects features to contain the following keys:
- labels.bboxes_3d of shape [N, 7]
- labels.bboxes_3d_mask of shape [N]
Modifies the following features:
- labels.bboxes_3d_mask to mask out any additional boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only boxes that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only boxes that have y coordinates within this range are kept.')
p.Define('keep_z_range', (-np.inf, np.inf),
'Only boxes that have z coordinates within this range are kept.')
return p
def TransformFeatures(self, features):
p = self.params
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
# For each bounding box, compute whether any of its extrema
# fall outside of the range.
bboxes_3d_corners = geometry.BBoxCorners(
features.labels.bboxes_3d[tf.newaxis, ...])[0]
bboxes_3d_corners = py_utils.HasShape(bboxes_3d_corners, [-1, 8, 3])
min_bbox_x = tf.reduce_min(bboxes_3d_corners[:, :, 0], axis=-1)
max_bbox_x = tf.reduce_max(bboxes_3d_corners[:, :, 0], axis=-1)
min_bbox_y = tf.reduce_min(bboxes_3d_corners[:, :, 1], axis=-1)
max_bbox_y = tf.reduce_max(bboxes_3d_corners[:, :, 1], axis=-1)
min_bbox_z = tf.reduce_min(bboxes_3d_corners[:, :, 2], axis=-1)
max_bbox_z = tf.reduce_max(bboxes_3d_corners[:, :, 2], axis=-1)
mask = (
tf.math.logical_and(min_bbox_x >= min_x, max_bbox_x <= max_x)
& tf.math.logical_and(min_bbox_y >= min_y, max_bbox_y <= max_y)
& tf.math.logical_and(min_bbox_z >= min_z, max_bbox_z <= max_z))
max_num_boxes = py_utils.GetShape(features.labels.bboxes_3d_mask)
mask = py_utils.HasShape(mask, max_num_boxes)
features.labels.bboxes_3d_mask *= tf.cast(mask, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class PadLaserFeatures(Preprocessor):
"""Pads laser features so that the dimensions are fixed.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz and lasers.points_feature to add padding.
Optionally also modifies lasers.points_label and lasers.points_bbox_id
if they exist to add padding.
Modifies/adds the following features:
labels.points_padding of shape [P] representing the padding.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('max_num_points', 128500,
'Max number of points to pad the points to.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_mask = tf.cast(points_mask, tf.bool)
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
npoints = tf.shape(features.lasers.points_xyz)[0]
features.lasers.points_padding = tf.ones([npoints])
shuffled_idx = tf.range(npoints)
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=p.random_seed)
def _PadOrTrimFn(points_tensor):
# Shuffle before trimming so we have a random sampling
points_tensor = tf.gather(points_tensor, shuffled_idx)
return py_utils.PadOrTrimTo(points_tensor, [p.max_num_points] +
points_tensor.shape[1:].as_list())
features.lasers = features.lasers.Transform(_PadOrTrimFn)
features.lasers.points_padding = 1.0 - features.lasers.points_padding
return features
def TransformShapes(self, shapes):
p = self.params
def _TransformShape(points_shape):
return tf.TensorShape([p.max_num_points] + points_shape[1:].as_list())
shapes.lasers = shapes.lasers.Transform(_TransformShape)
shapes.lasers.points_padding = tf.TensorShape([p.max_num_points])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_padding = tf.float32
return dtypes
class WorldScaling(Preprocessor):
"""Scale the world randomly as a form of data augmentation.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same scaling applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('scaling', None, 'The scaling range.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.scaling is None:
raise ValueError('scaling needs to be specified, instead of None.')
if len(p.scaling) != 2:
raise ValueError('scaling needs to be a list of two elements.')
def TransformFeatures(self, features):
p = self.params
scaling = tf.random.uniform((),
minval=p.scaling[0],
maxval=p.scaling[1],
seed=p.random_seed,
dtype=features.lasers.points_xyz.dtype)
# Scale points [num_points, 3].
features.lasers.points_xyz *= scaling
# Scaling bboxes (location and dimensions).
bboxes_xyz = features.labels.bboxes_3d[..., :3] * scaling
bboxes_dims = features.labels.bboxes_3d[..., 3:6] * scaling
bboxes_rot = features.labels.bboxes_3d[..., 6:]
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomDropLaserPoints(Preprocessor):
"""Randomly dropout laser points and the corresponding features.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Modifies the following features:
lasers.points_xyz, lasers.points_feature.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_prob', 0.95, 'Probability for keeping points.')
return p
def TransformFeatures(self, features):
p = self.params
num_points, _ = py_utils.GetShape(features.lasers.points_xyz)
pts_keep_sample_prob = tf.random.uniform([num_points],
minval=0,
maxval=1,
seed=p.random_seed)
pts_keep_mask = pts_keep_sample_prob < p.keep_prob
if 'points_padding' in features.lasers:
# Update points_padding so that where pts_keep_mask is True,
# points_padding remains 0.
points_mask = 1 - features.lasers.points_padding
points_mask *= tf.cast(pts_keep_mask, tf.float32)
features.lasers.points_padding = 1 - points_mask
else:
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
pts_keep_mask)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, pts_keep_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomFlipY(Preprocessor):
"""Flip the world along axis Y as a form of data augmentation.
When there are leading dimensions, this will flip the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same flipping applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('flip_probability', 0.5, 'Probability of flipping.')
return p
def TransformFeatures(self, features):
p = self.params
threshold = 1. - p.flip_probability
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) >= threshold
# Flip points
points_xyz = features.lasers.points_xyz
points_y = tf.where(choice, -points_xyz[..., 1:2], points_xyz[..., 1:2])
features.lasers.points_xyz = tf.concat(
[points_xyz[..., 0:1], points_y, points_xyz[..., 2:3]], axis=-1)
# Flip boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_y = tf.where(choice, -bboxes_xyz[..., 1:2], bboxes_xyz[..., 1:2])
bboxes_xyz = tf.concat(
[bboxes_xyz[..., 0:1], bboxes_y, bboxes_xyz[..., 2:3]], axis=-1)
# Compensate rotation.
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_rot = tf.where(choice, geometry.WrapAngleRad(-bboxes_rot),
bboxes_rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GlobalTranslateNoise(Preprocessor):
"""Add global translation noise of xyz coordinates to points and boxes.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same
random translation noise applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('noise_std', [0.2, 0.2, 0.2],
'Standard deviation of translation noise per axis.')
return p
def TransformFeatures(self, features):
p = self.params
# Use three different seeds but the same base seed so
# that the values are different.
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
pose = tf.stack([
random_translate_x, random_translate_y, random_translate_z, 0.0, 0.0,
0.0
],
axis=0)
# Translate points.
points_xyz = features.lasers.points_xyz
features.lasers.points_xyz = geometry.CoordinateTransform(points_xyz, pose)
# Translate boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
features.labels.bboxes_3d = tf.concat(
[bboxes_xyz, features.labels.bboxes_3d[..., 3:]], axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomBBoxTransform(Preprocessor):
"""Randomly transform bounding boxes and the points inside them.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- lasers.points_padding of shape [P]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
Modifies the following features:
lasers.points_{xyz,feature,padding}, labels.bboxes_3d with the
transformed bounding boxes and points.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
# At the moment we don't use this because it can cause boxes to collide with
# each other. We need to compute box intersections when deciding whether to
# apply the translation jitter. Theoretically we should also do this for
# rotation.
p.Define('noise_std', [0.0, 0.0, 0.0],
'Standard deviation of translation noise per axis.')
p.Define(
'max_scaling', None,
'An optional float list of length 3. When max_scaling is not none, '
'delta parameters s_x, s_y, s_z are drawn from '
'[-max_scaling[i], max_scaling[i]] where i is in [0, 2].')
p.Define(
'max_shearing', None,
'An optional float list of length 6. When max_shearing is not none, '
'shearing parameters sh_x^y, sh_x^z, sh_y^x, sh_y^z, sh_z^x, sh_z^y are'
'drawn from [-max_shearing[i], max_shearing[i]], where i is in [0, 5].')
p.Define(
'max_num_points_per_bbox', 16384,
'The maximum number of points that fall within a bounding box. '
'Bounding boxes with more points than this value will '
'have some points droppped.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
if p.max_scaling is not None:
if len(p.max_scaling) != 3:
raise ValueError('max_scaling needs to be specified as either None or '
'list of 3 floating point numbers, instead of {}.'
''.format(p.max_scaling))
if p.max_shearing is not None:
if len(p.max_shearing) != 6:
raise ValueError('max_shearing needs to be specified as either None or '
'list of 6 floating point numbers, instead of {}.'
''.format(p.max_shearing))
def _Foreground(self, features, points_xyz, points_feature, real_bboxes_3d,
points_in_bbox_mask, rotation, translate_pose, transform_fn):
"""Extract and transform foreground points and features."""
out_bbox_xyz, out_bbox_feature, out_bbox_mask = self._ForLoopBuffers(
features)
# Only iterate over the actual number of boxes in the scene.
actual_num_bboxes = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
ret = py_utils.ForLoop(
body=transform_fn,
start=0,
limit=actual_num_bboxes,
delta=1,
loop_state=py_utils.NestedMap(
points_xyz=points_xyz,
points_feature=points_feature,
bboxes_3d=real_bboxes_3d,
points_in_bbox_mask=points_in_bbox_mask,
rotation=rotation,
translate_pose=translate_pose,
out_bbox_points=out_bbox_xyz,
out_bbox_feature=out_bbox_feature,
out_bbox_mask=out_bbox_mask))
# Gather all of the transformed points and features
out_bbox_xyz = tf.reshape(ret.out_bbox_points, [-1, 3])
num_features = features.lasers.points_feature.shape[-1]
out_bbox_feature = tf.reshape(ret.out_bbox_feature, [-1, num_features])
out_bbox_mask = tf.cast(tf.reshape(ret.out_bbox_mask, [-1]), tf.bool)
fg_xyz = tf.boolean_mask(out_bbox_xyz, out_bbox_mask)
fg_feature = tf.boolean_mask(out_bbox_feature, out_bbox_mask)
return fg_xyz, fg_feature
def _Background(self, points_xyz, points_feature, points_in_bbox_mask):
# If a point is in any bounding box, it is a foreground point.
foreground_points_mask = tf.reduce_any(points_in_bbox_mask, axis=-1)
# All others are background. We rotate all of the foreground points to
# final_points_* and keep the background points unchanged
background_points_mask = tf.math.logical_not(foreground_points_mask)
background_points_xyz = tf.boolean_mask(points_xyz, background_points_mask)
background_points_feature = tf.boolean_mask(points_feature,
background_points_mask)
return background_points_xyz, background_points_feature
def _ForLoopBuffers(self, features):
"""Create and return the buffers for the for loop."""
p = self.params
bboxes_3d = features.labels.bboxes_3d
# Compute the shapes and create the buffers for the For loop.
max_num_bboxes = tf.shape(bboxes_3d)[0]
per_box_shape = [max_num_bboxes, p.max_num_points_per_bbox, 3]
out_bbox_points = inplace_ops.empty(
per_box_shape, dtype=tf.float32, init=True)
num_features = features.lasers.points_feature.shape[-1]
bbox_feature_shape = [
max_num_bboxes, p.max_num_points_per_bbox, num_features
]
out_bbox_feature = inplace_ops.empty(
bbox_feature_shape, dtype=tf.float32, init=True)
per_box_mask_shape = [max_num_bboxes, p.max_num_points_per_bbox]
out_bbox_mask = inplace_ops.empty(
per_box_mask_shape, dtype=tf.float32, init=True)
return out_bbox_points, out_bbox_feature, out_bbox_mask
def TransformFeatures(self, features):
p = self.params
num_features = features.lasers.points_feature.shape[-1]
def Transform(i, state):
"""Transform the points in bounding box `i`."""
state.points_xyz = tf.reshape(state.points_xyz, [-1, 3])
bbox_mask = tf.reshape(state.points_in_bbox_mask[:, i], [-1])
# Fetch only the points in the bounding box.
points_xyz_masked = tf.boolean_mask(state.points_xyz, bbox_mask)
points_feature_masked = tf.boolean_mask(state.points_feature, bbox_mask)
num_points = tf.shape(points_xyz_masked)[0]
# TODO(vrv): Fold the following into a single transformation
# matrix.
#
# Translate the box to the origin, then rotate the desired
# rotation angle.
translation_vec = state.bboxes_3d[i, 0:3]
rotation_vec = [state.rotation[i], 0., 0.]
pose = tf.concat([-translation_vec, rotation_vec], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_masked, pose)
if p.max_scaling is not None or p.max_shearing is not None:
# Translate the points in the bounding box by moving dz/2 so that the
# bottom of the bounding box is at Z = 0 when any of the two
# (max_scaling or max_shearing) is not None
translation_scale_or_shear = tf.stack(
[0., 0., state.bboxes_3d[i, 5] / 2], axis=0)
pose1 = tf.concat([translation_scale_or_shear, [0., 0., 0.]], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_adj, pose1)
else:
translation_scale_or_shear = tf.stack([0., 0., 0.], axis=0)
if p.max_scaling is not None:
# Perform scaling to the point cloud
# Scaling matrix
# [[s_x+1 0 0]
# [ 0 s_y+1 0]
# [ 0 0 s_z+1]]
sx = tf.random.uniform([],
minval=-p.max_scaling[0],
maxval=p.max_scaling[0],
seed=p.random_seed)
sy = tf.random.uniform([],
minval=-p.max_scaling[1],
maxval=p.max_scaling[1],
seed=p.random_seed)
sz = tf.random.uniform([],
minval=-p.max_scaling[2],
maxval=p.max_scaling[2],
seed=p.random_seed)
scaling_matrix = tf.stack(
[[sx + 1., 0., 0.], [0., sy + 1., 0.], [0., 0., sz + 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', scaling_matrix, points_xyz_adj)
if p.max_shearing is not None:
# Perform shearing to the point cloud
# Shearing matrix
# [[1 sh_x^y sh_x^z]
# [sh_y^x 1 sh_y^z]
# [sh_z^x sh_z^y 1 ]]
sxy = tf.random.uniform([],
minval=-p.max_shearing[0],
maxval=p.max_shearing[0],
seed=p.random_seed)
sxz = tf.random.uniform([],
minval=-p.max_shearing[1],
maxval=p.max_shearing[1],
seed=p.random_seed)
syx = tf.random.uniform([],
minval=-p.max_shearing[2],
maxval=p.max_shearing[2],
seed=p.random_seed)
syz = tf.random.uniform([],
minval=-p.max_shearing[3],
maxval=p.max_shearing[3],
seed=p.random_seed)
szx = tf.random.uniform([],
minval=-p.max_shearing[4],
maxval=p.max_shearing[4],
seed=p.random_seed)
szy = tf.random.uniform([],
minval=-p.max_shearing[5],
maxval=p.max_shearing[5],
seed=p.random_seed)
shearing_matrix = tf.stack(
[[1., sxy, sxz], [syx, 1., syz], [szx, szy, 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', shearing_matrix, points_xyz_adj)
# Translate the points back, adding noise if needed.
translation_with_noise = (
translation_vec - translation_scale_or_shear +
state.translate_pose[i])
pose2 = tf.concat([translation_with_noise, [0., 0., 0.]], axis=0)
final_points_xyz = geometry.CoordinateTransform(points_xyz_adj, pose2)
# final_points_xyz is an [M, 3] Tensor where M is the number of points in
# the box.
points_mask = tf.ones([num_points], dtype=tf.float32)
final_points_xyz = py_utils.PadOrTrimTo(final_points_xyz,
[p.max_num_points_per_bbox, 3])
final_points_feature = py_utils.PadOrTrimTo(
points_feature_masked, [p.max_num_points_per_bbox, num_features])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
state.out_bbox_points = inplace_ops.alias_inplace_update(
state.out_bbox_points, [i], tf.expand_dims(final_points_xyz, 0))
state.out_bbox_feature = inplace_ops.alias_inplace_update(
state.out_bbox_feature, [i], tf.expand_dims(final_points_feature, 0))
state.out_bbox_mask = inplace_ops.alias_inplace_update(
state.out_bbox_mask, [i], tf.expand_dims(points_mask, 0))
return state
# Get the points and features that reside in boxes.
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(features.lasers.points_xyz, points_mask)
points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
else:
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
# Fetch real bounding boxes and compute point mask.
real_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d,
features.labels.bboxes_3d_mask)
points_in_bbox_mask = geometry.IsWithinBBox3D(points_xyz, real_bboxes_3d)
# Choose a random rotation for every real box.
num_boxes = tf.shape(real_bboxes_3d)[0]
rotation = tf.random.uniform([num_boxes],
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
translate_pose = tf.stack(
[random_translate_x, random_translate_y, random_translate_z], axis=1)
fg_xyz, fg_feature = self._Foreground(features, points_xyz, points_feature,
real_bboxes_3d, points_in_bbox_mask,
rotation, translate_pose, Transform)
# Concatenate them with the background points and features.
bg_xyz, bg_feature = self._Background(points_xyz, points_feature,
points_in_bbox_mask)
all_points = tf.concat([bg_xyz, fg_xyz], axis=0)
all_features = tf.concat([bg_feature, fg_feature], axis=0)
# Shuffle the points/features randomly.
all_points, all_features = _ConsistentShuffle((all_points, all_features),
p.random_seed)
# Padding should technically be unnecessary: the number of points before and
# after should be the same, but in practice we sometimes seem to drop a few
# points, and so we pad to make the shape fixed.
#
# TODO(vrv): Identify the source of this problem and then assert a shape
# matching check.
if 'points_padding' in features.lasers:
features.lasers.points_xyz = py_utils.PadOrTrimTo(
all_points, tf.shape(features.lasers.points_xyz))
features.lasers.points_feature = py_utils.PadOrTrimTo(
all_features, tf.shape(features.lasers.points_feature))
total_points = tf.shape(all_points)[0]
features.lasers.points_padding = 1.0 - py_utils.PadOrTrimTo(
tf.ones([total_points]), tf.shape(features.lasers.points_padding))
else:
features.lasers.points_xyz = all_points
features.lasers.points_feature = all_features
# Translate noise.
bboxes_xyz = real_bboxes_3d[..., :3]
bboxes_xyz += translate_pose[..., :3]
bboxes_dim = real_bboxes_3d[..., 3:6]
# Rotate bboxes by their corresponding rotation.
bboxes_rot = real_bboxes_3d[..., 6:]
bboxes_rot -= rotation[:, tf.newaxis]
features.labels.bboxes_3d = py_utils.PadOrTrimTo(
tf.concat([bboxes_xyz, bboxes_dim, bboxes_rot], axis=-1),
tf.shape(features.labels.bboxes_3d))
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(real_bboxes_3d)[0]),
tf.shape(features.labels.bboxes_3d_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GroundTruthAugmentor(Preprocessor):
"""Augment bounding box labels and points from a database.
This preprocessor expects features to contain the following keys:
lasers.points_xyz of shape [P, 3]
lasers.points_feature of shape [P, F]
lasers.points_padding of shape [P]
labels.bboxes_3d of shape [L, 7]
labels.bboxes_3d_mask of shape [L]
labels.labels of shape [L]
Modifies the above features so that additional objects from
a groundtruth database are added.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'groundtruth_database', None,
'If not None, loads groundtruths from this database and adds '
'them to the current scene. Groundtruth database is expected '
'to be a TFRecord of KITTI or Waymo crops.')
p.Define(
'num_db_objects', None,
'Number of objects in the database. Because we use TFRecord '
'we cannot easily query the number of objects efficiencly.')
p.Define('max_num_points_per_bbox', 2048,
'Maximum number of points in each bbox to augment with.')
p.Define(
'filter_min_points', 0,
'Minimum number of points each database object must have '
'to be included in an example.')
p.Define(
'filter_max_points', None,
'Maximum number of points each database object must have '
'to be included in an example.')
p.Define(
'difficulty_sampling_probability', None,
'Probability for sampling ground truth example whose difficulty '
'equals {0, 1, 2, 3, ...}. Example: [1.0, 1.0, 1.0, 1.0] for '
'uniform sampling 4 different difficulties. Default value is '
'None = uniform sampling for all difficulties.')
p.Define(
'class_sampling_probability', None,
'Probability for sampling ground truth example based on its class index'
' Example: For KITTI classes are [Background, Car, Van, Truck, '
'Pedestrian, Person_sitting, Cyclist, Tram, Misc, DontCare], using '
'probability vector [0., 1.0, 1.0, 0., 0., 0., 0.,0., 0., 0.], we '
'uniformly sampling Car and Van. Default value is None: Uses '
'label_filter flag and does not sample based on class.')
p.Define('filter_min_difficulty', 0,
'Filter ground truth boxes whose difficulty is < this value.')
p.Define('max_augmented_bboxes', 15,
'Maximum number of augmented bounding boxes per scene.')
p.Define(
'label_filter', [],
'A list where if specified, only examples of these label integers will '
'be included in an example.')
p.Define(
'batch_mode', False, 'Bool value to control whether the whole'
'groundtruth database is loaded or partially loaded to save memory'
'usage. Setting to False loads the whole ground truth database into '
'memory. Otherwise, only a fraction of the data will be loaded into '
'the memory.')
return p
def _ReadDB(self, file_patterns):
"""Read the groundtruth database and return as a NestedMap of Tensors."""
p = self.params
def Process(record):
"""Process a groundtruth record."""
feature_map = {
'num_points': tf.io.FixedLenFeature((), tf.int64, 0),
'points': tf.io.VarLenFeature(dtype=tf.float32),
'points_feature': tf.io.VarLenFeature(dtype=tf.float32),
'bbox_3d': tf.io.VarLenFeature(dtype=tf.float32),
'label': tf.io.FixedLenFeature((), tf.int64, 0),
'difficulty': tf.io.FixedLenFeature((), tf.int64, 0),
'text': tf.io.VarLenFeature(dtype=tf.string),
}
example_data = tf.io.parse_single_example(record, feature_map)
num_points = example_data['num_points']
points = tf.reshape(_Dense(example_data['points']), [num_points, 3])
features = tf.reshape(
_Dense(example_data['points_feature']), [num_points, 1])
points_mask = tf.ones(num_points, dtype=tf.bool)
# TODO(vrv): Use random selection instead of first N points.
points = py_utils.PadOrTrimTo(points, [p.max_num_points_per_bbox, 3])
features = py_utils.PadOrTrimTo(features, [p.max_num_points_per_bbox, 1])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
bboxes_3d = tf.reshape(_Dense(example_data['bbox_3d']), [7])
label = tf.cast(example_data['label'], tf.int32)
difficulty = tf.cast(example_data['difficulty'], tf.int32)
return (points, features, points_mask, bboxes_3d, label, difficulty)
if p.batch_mode:
# Prepare dataset for ground truth bounding boxes. Randomly shuffle the
# file patterns.
file_count = len(tf.io.gfile.glob(file_patterns))
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.apply(tf.stateless_cache_dataset())
dataset = dataset.apply(
tf.stateless_shuffle_dataset(
buffer_size=file_count, reshuffle_each_iteration=True))
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
dataset = dataset.repeat()
# Only prefetch a few objects from the database to reduce memory
# consumption.
dataset = dataset.map(Process, num_parallel_calls=10)
# We need more bboxes than max_augmented_bboxes in a batch, because some
# of the boxes are filtered out.
dataset = dataset.batch(p.max_augmented_bboxes * 10)
dataset = dataset.apply(tf.stateless_cache_dataset()).prefetch(
p.max_augmented_bboxes * 30)
else:
# Prepare dataset for ground truth bounding boxes.
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
# Read the entire dataset into memory.
dataset = dataset.take(p.num_db_objects)
dataset = dataset.map(Process, num_parallel_calls=10)
# We batch the output of the dataset into a very large Tensor, then cache
# it in memory.
dataset = dataset.batch(p.num_db_objects)
dataset = dataset.apply(tf.stateless_cache_dataset()).repeat()
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
(db_points_xyz, db_points_feature, db_points_mask, db_bboxes, db_labels,
db_difficulties) = input_batch
return py_utils.NestedMap(
points_xyz=db_points_xyz,
points_feature=db_points_feature,
points_mask=db_points_mask,
bboxes_3d=db_bboxes,
labels=db_labels,
difficulties=db_difficulties)
def _CreateExampleFilter(self, db):
"""Construct db example filter.
Args:
db: NestedMap of the following Tensors: points_mask - [N, P] - The points
mask for every object in the database, where N is the number of objects
and P is the maximum number of points per object. labels - [N] - int32
Label for each object in the database. difficulties - [N] - int32
Difficulty for each label in the database.
Returns:
A [N] boolean Tensor for each object in the database, True if
that corresponding object passes the filter.
"""
p = self.params
db_points_mask = db.points_mask
db_label = db.labels
db_difficulty = db.difficulties
num_objects_in_database = tf.shape(db_points_mask)[0]
# Filter number of objects.
points_per_object = tf.reduce_sum(tf.cast(db_points_mask, tf.int32), axis=1)
example_filter = points_per_object >= p.filter_min_points
if p.filter_max_points:
example_filter = tf.math.logical_and(
example_filter, points_per_object <= p.filter_max_points)
if p.difficulty_sampling_probability is not None:
# Sample db based on difficulity of each example.
sampling_prob = p.difficulty_sampling_probability
db_difficulty_probability = tf.zeros_like(db_difficulty, dtype=tf.float32)
for difficulty_idx, difficulty_prob in enumerate(sampling_prob):
db_difficulty_probability += (
tf.cast(tf.equal(db_difficulty, difficulty_idx), tf.float32) *
difficulty_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_difficulty_probability
example_filter &= sampled_filter
else:
# Filter out db examples below min difficulty
example_filter = tf.math.logical_and(
example_filter, db_difficulty >= p.filter_min_difficulty)
example_filter = tf.reshape(example_filter, [num_objects_in_database])
db_label = tf.reshape(db_label, [num_objects_in_database])
if p.class_sampling_probability is not None:
# Sample example based on its class probability.
sampling_prob = p.class_sampling_probability
db_class_probability = tf.zeros_like(db_label, dtype=tf.float32)
for class_idx, class_prob in enumerate(sampling_prob):
db_class_probability += (
tf.cast(tf.equal(db_label, class_idx), tf.float32) * class_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_class_probability
example_filter &= sampled_filter
elif p.label_filter:
# Filter based on labels.
# Create a label filter where all is false
valid_labels = tf.constant(p.label_filter)
label_mask = tf.reduce_any(
tf.equal(db_label[..., tf.newaxis], valid_labels), axis=1)
example_filter = tf.math.logical_and(example_filter, label_mask)
return example_filter
# TODO(vrv): Create an overlap filter that also ensures that boxes don't
# overlap with groundtruth points, so that the scenes are more plausible.
def _FilterIndices(self, gt_bboxes_3d, db_bboxes, db_idx):
"""Identify database boxes that don't overlap with other boxes."""
# We accomplish overlap filtering by first computing the pairwise 3D IoU of
# all boxes (concatenated) as a way of computing pairwise box overlaps.
num_gt_bboxes = tf.shape(gt_bboxes_3d)[0]
filtered_bboxes = tf.gather(db_bboxes, db_idx)
all_bboxes = tf.concat([gt_bboxes_3d, filtered_bboxes], axis=0)
pairwise_overlap = ops.pairwise_iou3d(all_bboxes, all_bboxes)
# We now have an M x M matrix with 1s on the diagonal and non-zero entries
# whenever a box collides with another.
#
# To increase the number of boxes selected, we filter the upper triangular
# entries so that the boxes are chosen greedily: boxes with smaller indices
# will be selected before later boxes, because earlier boxes will not appear
# to collide with later boxes, but later boxes may collide with earlier
# ones.
pairwise_overlap = tf.linalg.band_part(pairwise_overlap, -1, 0)
# We compute the sum of the IoU overlaps for all database boxes.
db_overlap_sums = tf.reduce_sum(pairwise_overlap[num_gt_bboxes:], axis=1)
# Those boxes that don't overlap with any other boxes will only have
# a 1.0 IoU with itself.
non_overlapping_boxes = tf.reshape(db_overlap_sums <= 1., [-1])
# Filter to select only those object ids that pass this filter.
db_idx = tf.boolean_mask(db_idx, non_overlapping_boxes)
return db_idx
def TransformFeatures(self, features):
p = self.params
tf.logging.info('Loading groundtruth database at %s' %
(p.groundtruth_database))
db = p.groundtruth_database.Instantiate().BuildDataSource(self._ReadDB).data
original_features_shape = tf.shape(features.lasers.points_feature)
# Compute the number of bboxes to augment.
num_bboxes_in_scene = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
max_bboxes = tf.shape(features.labels.bboxes_3d_mask)[0]
num_augmented_bboxes = tf.minimum(max_bboxes - num_bboxes_in_scene,
p.max_augmented_bboxes)
# Compute an object index over all objects in the database.
num_objects_in_database = tf.shape(db.points_xyz)[0]
db_idx = tf.range(num_objects_in_database)
# Find those indices whose examples pass the filters, and select only those
# indices.
example_filter = self._CreateExampleFilter(db)
db_idx = tf.boolean_mask(db_idx, example_filter)
# At this point, we might still have a large number of object candidates,
# from which we only need a sample.
# To reduce the amount of computation, we randomly subsample to slightly
# more than we want to augment.
db_idx = tf.random.shuffle(
db_idx, seed=p.random_seed)[0:num_augmented_bboxes * 5]
# After filtering, further filter out the db boxes that would occlude with
# other boxes (including other database boxes).
#
# Gather the filtered ground truth bounding boxes according to the mask, so
# we can compute overlaps below.
gt_bboxes_3d_mask = tf.cast(features.labels.bboxes_3d_mask, tf.bool)
gt_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d, gt_bboxes_3d_mask)
gt_bboxes_3d = py_utils.HasShape(gt_bboxes_3d, [num_bboxes_in_scene, 7])
db_idx = self._FilterIndices(gt_bboxes_3d, db.bboxes_3d, db_idx)
# From the filtered object ids, select only as many boxes as we need.
shuffled_idx = db_idx[0:num_augmented_bboxes]
num_augmented_bboxes = tf.shape(shuffled_idx)[0]
# Gather based off the indices.
sampled_points_xyz = tf.gather(db.points_xyz, shuffled_idx)
sampled_points_feature = tf.gather(db.points_feature, shuffled_idx)
sampled_mask = tf.reshape(
tf.gather(db.points_mask, shuffled_idx),
[num_augmented_bboxes, p.max_num_points_per_bbox])
sampled_bboxes = tf.gather(db.bboxes_3d, shuffled_idx)
sampled_labels = tf.gather(db.labels, shuffled_idx)
# Mask points/features.
sampled_points_xyz = tf.boolean_mask(sampled_points_xyz, sampled_mask)
sampled_points_feature = tf.boolean_mask(sampled_points_feature,
sampled_mask)
# Flatten before concatenation with ground truths.
sampled_points_xyz = tf.reshape(sampled_points_xyz, [-1, 3])
sampled_points_feature = tf.reshape(sampled_points_feature,
[-1, original_features_shape[-1]])
sampled_bboxes = tf.reshape(sampled_bboxes, [-1, 7])
# Concatenate the samples with the ground truths.
if 'points_padding' in features.lasers:
points_mask = tf.cast(1. - features.lasers.points_padding, tf.bool)
# Densify the original points.
dense_points_xyz = tf.boolean_mask(features.lasers.points_xyz,
points_mask)
dense_points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
# Concatenate the dense original points with our new sampled oints.
points_xyz = tf.concat([dense_points_xyz, sampled_points_xyz], axis=0)
points_feature = tf.concat([dense_points_feature, sampled_points_feature],
axis=0)
original_points_shape = tf.shape(features.lasers.points_xyz)
features.lasers.points_xyz = py_utils.PadOrTrimTo(points_xyz,
original_points_shape)
features.lasers.points_feature = py_utils.PadOrTrimTo(
points_feature, original_features_shape)
# Compute the modified mask / padding.
final_points_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(points_xyz)[0]),
tf.shape(features.lasers.points_padding))
features.lasers.points_padding = 1. - final_points_mask
else:
points_xyz = tf.concat([features.lasers.points_xyz, sampled_points_xyz],
axis=0)
points_feature = tf.concat(
[features.lasers.points_feature, sampled_points_feature], axis=0)
features.lasers.points_xyz = points_xyz
features.lasers.points_feature = points_feature
# Reconstruct a new, dense, bboxes_3d vector that includes the filtered
# groundtruth bounding boxes followed by the database augmented boxes.
bboxes_3d = tf.concat([gt_bboxes_3d, sampled_bboxes], axis=0)
bboxes_3d = py_utils.PadOrTrimTo(bboxes_3d, [max_bboxes, 7])
features.labels.bboxes_3d = bboxes_3d
bboxes_3d_mask = tf.ones(
num_bboxes_in_scene + num_augmented_bboxes, dtype=tf.float32)
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
bboxes_3d_mask, [max_bboxes])
gt_labels = tf.boolean_mask(features.labels.labels, gt_bboxes_3d_mask)
gt_labels = py_utils.HasShape(gt_labels, [num_bboxes_in_scene])
labels = tf.concat([gt_labels, sampled_labels], axis=0)
features.labels.labels = py_utils.PadOrTrimTo(labels, [max_bboxes])
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FrustumDropout(Preprocessor):
"""Randomly drops out points in a frustum.
All points are first converted to spherical coordinates, and then a point
is randomly selected. All points in the frustum around that point within
a given phi, theta angle width and distance to the original greater than
a given value are dropped with probability = 1 - keep_prob.
Here, we can specify whether the dropped frustum is the union or intersection
of the phi and theta angle filters.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding with points
randomly dropped out.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('theta_width', 0.03, 'Theta angle width for dropping points.')
p.Define('phi_width', 0.0, 'Phi angle width for dropping points.')
p.Define(
'distance', 0.0, 'Drop points that have larger distance to the'
'origin than the value given here.')
p.Define(
'keep_prob', 0.0, 'keep_prob: 1. = drop no points in the Frustum,'
'0 = drop all points, between 0 and 1 = down sample the points.')
p.Define(
'drop_type', 'union', 'Drop either the union or intersection of '
'phi width and theta width.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.phi_width < 0:
raise ValueError('phi_width must be >= 0, phi_width={}'.format(
p.phi_width))
if p.theta_width < 0:
raise ValueError('theta_width must be >= 0, theta_width={}'.format(
p.theta_width))
if p.distance < 0:
raise ValueError('distance must be >= 0, distance={}'.format(p.distance))
if p.keep_prob < 0 or p.keep_prob > 1:
raise ValueError('keep_prob must be >= 0 and <=1, keep_prob={}'.format(
p.keep_prob))
if p.drop_type not in ['union', 'intersection']:
raise ValueError('drop_type must be union or intersection ,'
'drop_type={}'.format(p.drop_type))
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_padding = features.lasers.points_padding
else:
points_padding = None
if points_padding is not None:
points_mask = tf.cast(1 - points_padding, tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
real_points_idx = tf.boolean_mask(
tf.range(0, num_total_points, dtype=tf.int32), points_mask)
num_points = py_utils.GetShape(real_points_idx)[0]
else:
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
num_points = py_utils.GetShape(points_xyz)[0]
r, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
def _PickRandomPoint():
point_idx = tf.random.uniform((),
minval=0,
maxval=num_points,
dtype=tf.int32)
if points_padding is not None:
point_idx = real_points_idx[point_idx]
return point_idx
# Pick a point at random and drop all points that are near that point in the
# frustum for distance larger than r; repeat this for both theta and phi.
if p.theta_width > 0:
theta_half_width = p.theta_width / 2.
point_idx = _PickRandomPoint()
# Points within theta width and further than distance will be dropped.
theta_drop_filter = ((theta < (theta[point_idx] + theta_half_width)) &
(theta > (theta[point_idx] - theta_half_width)) &
(r > p.distance))
else:
theta_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
if p.phi_width > 0:
phi_half_width = p.phi_width / 2.
point_idx = _PickRandomPoint()
# Points within phi width and further than distance will be dropped.
phi_drop_filter = ((phi < (phi[point_idx] + phi_half_width)) &
(phi >
(phi[point_idx] - phi_half_width)) & (r > p.distance))
else:
phi_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
# Create drop_filter by combining filters. This contains a filter for the
# points to be removed. One can use the intersection method to limit the
# dropped points be within both phi and theta ranges.
if p.drop_type == 'union':
drop_filter = theta_drop_filter | phi_drop_filter
elif p.drop_type == 'intersection':
drop_filter = theta_drop_filter & phi_drop_filter
if p.keep_prob == 0:
# Drop all points in drop_filter.
down_sampling_filter = drop_filter
else:
# Randomly drop points in drop_filter based on keep_prob.
sampling_drop_filter = tf.random.uniform([num_total_points],
minval=0,
maxval=1,
dtype=tf.float32)
# Points greater than the threshold (keep_prob) will be dropped.
sampling_drop_filter = sampling_drop_filter > p.keep_prob
# Instead of dropping all points in the frustum, we drop out points
# that are in the selected frustum (drop_filter).
down_sampling_filter = drop_filter & sampling_drop_filter
points_mask &= ~down_sampling_filter
if points_padding is not None:
features.lasers.points_padding = 1 - tf.cast(points_mask, tf.float32)
else:
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RepeatPreprocessor(Preprocessor):
"""Repeat a preprocessor multiple times.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features multiple times (repeat_count).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('repeat_count', 1, 'Number of times the subprocessor is applied to'
' features.')
p.Define('subprocessor', None, 'One of the input preprocessors.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.repeat_count < 0 or not isinstance(p.repeat_count, int):
raise ValueError(
'repeat_count must be >= 0 and int, repeat_count={}'.format(
p.repeat_count))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
for _ in range(p.repeat_count):
features = self.subprocessor.FPropDefaultTheta(features)
return features
def TransformShapes(self, shapes):
p = self.params
for _ in range(p.repeat_count):
shapes = self.subprocessor.TransformShapes(shapes)
return shapes
def TransformDTypes(self, dtypes):
p = self.params
for _ in range(p.repeat_count):
dtypes = self.subprocessor.TransformDTypes(dtypes)
return dtypes
class RandomApplyPreprocessor(Preprocessor):
"""Randomly apply a preprocessor with certain probability.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features with certain probability.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prob', 1.0, 'The probability the subprocessor being executed.')
p.Define('subprocessor', None, 'Params for an input preprocessor.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float):
raise ValueError(
'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) <= p.prob
# Features is passed downstream and may be modified, we make deep copies
# here to use with tf.cond to avoid having tf.cond access updated
# versions. Note that we need one copy for each branch in case the branches
# further modify features.
features_0, features_1 = features.DeepCopy(), features.DeepCopy()
features = tf.cond(choice,
lambda: self.subprocessor.TransformFeatures(features_0),
lambda: features_1)
return features
def TransformShapes(self, shapes):
shapes_transformed = self.subprocessor.TransformShapes(shapes)
if not shapes.IsCompatible(shapes_transformed):
raise ValueError(
'NestedMap structures are different between shapes and transformed'
'shapes. Original shapes: {}. Transformed shapes: {}'.format(
shapes, shapes_transformed))
def IsCompatibleWith(a, b):
return a.is_compatible_with(b)
if not all(
py_utils.Flatten(
py_utils.Transform(IsCompatibleWith, shapes, shapes_transformed))):
raise ValueError(
'Shapes after transformation - {} are different from original '
'shapes - {}.'.format(shapes_transformed, shapes))
return shapes
def TransformDTypes(self, dtypes):
transformed_dtypes = self.subprocessor.TransformDTypes(dtypes)
if transformed_dtypes != dtypes:
raise ValueError(
'DTypes after transformation of preprocessor - {} should be '
'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes,
transformed_dtypes))
return dtypes
class ConstantPreprocessor(Preprocessor):
"""Preprocessor that produces specified constant values in a nested output."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'constants', py_utils.NestedMap(),
'Map of key names to numpy arrays of constant values to use. '
'Must be a NestedMap or dict convertible to NestedMap.')
return p
def TransformFeatures(self, features):
constants = py_utils.NestedMap(self.params.constants)
features.update(constants.Transform(tf.constant))
return features
def TransformShapes(self, shapes):
constants = py_utils.NestedMap(self.params.constants)
shapes.update(
constants.Transform(lambda x: tf.TensorShape(np.array(x).shape)))
return shapes
def TransformDTypes(self, dtypes):
constants = py_utils.NestedMap(self.params.constants)
dtypes.update(constants.Transform(lambda x: tf.as_dtype(np.array(x).dtype)))
return dtypes
class IdentityPreprocessor(Preprocessor):
"""Preprocessor that passes all inputs through.
This may be useful for situations where one wants a 'no-op' preprocessor, such
as being able to randomly choose to do nothing among a set of preprocessor
choices.
"""
def TransformFeatures(self, features):
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomChoicePreprocessor(Preprocessor):
"""Randomly applies a preprocessor with specified weights.
The input at features[p.weight_tensor_key] must be a floating point vector
Tensor whose length matches the number of subprocessors to select among. The
values in that Tensor are interpreted as relative weights.
For example, if p.subprocessors = [preprocessor1, preprocessor2] and the
weights are [1., 2.], then preprocessor1 will be applied with probability 1/3,
and preprocessor2 will be applied with probability 2/3.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'subprocessors', [],
'Params for preprocessors. Each value should be a tuple of '
'(Preprocessor.Params(), BaseSchedule.Params()), where the schedule '
'defines the weights to use over time.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if not p.subprocessors:
raise ValueError('No subprocessors were specified.')
subprocessors, schedules = zip(*p.subprocessors)
def _FilterNonSchedules(v):
return not issubclass(getattr(v, 'cls', False), schedule.BaseSchedule)
invalid_values = [_FilterNonSchedules(s) for s in schedules]
if any(invalid_values):
raise TypeError('Not all schedule values were schedules: '
f'{invalid_values}')
self.CreateChildren('subprocessors', list(subprocessors))
self.CreateChildren('schedules', list(schedules))
def TransformFeatures(self, features):
p = self.params
choice_list = []
weight_list = []
# Pass a unique copy of the input to each branch, in case the
# subprocessor destructively modifies the features in unexpected ways.
for subp, sched in zip(self.subprocessors, self.schedules):
choice_list.append(
lambda subp=subp: subp.TransformFeatures(features.DeepCopy()))
weight_list.append(sched.Value())
weight_tensor = tf.stack(weight_list)
chosen_bin = tf.random.categorical(
tf.math.log(weight_tensor[tf.newaxis]),
1,
seed=p.random_seed,
dtype=tf.int32)[0, 0]
features = tf.switch_case(chosen_bin, branch_fns=choice_list)
return features
def TransformShapes(self, shapes):
transformed_shapes = [
subp.TransformShapes(shapes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_shapes[0] == curr for curr in transformed_shapes):
raise ValueError('Shapes after transformations were not identical: '
f'{transformed_shapes}')
return transformed_shapes[0]
def TransformDTypes(self, dtypes):
transformed_dtypes = [
subp.TransformDTypes(dtypes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_dtypes[0] == curr for curr in transformed_dtypes):
raise ValueError('DTypes after transformations were not identical: '
f'{transformed_dtypes}')
return transformed_dtypes[0]
class SparseSampler(Preprocessor):
"""Fused SparseCenterSelector and SparseCellGatherFeatures.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Adds the following features:
anchor_centers - [num_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz - [num_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
cell_center_padding - [num_centers] - 0/1 padding for each center.
cell_points_xyz - [num_centers, num_neighbors, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature - [num_centers, num_neighbors, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding - [num_centers, num_neighbors] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('center_selector', 'farthest', 'Method to sample centers. '
'Valid options - uniform, farthest.')
p.Define('neighbor_sampler', 'uniform', 'Method to select neighbors. '
'Valid options - uniform, closest.')
p.Define('num_centers', 16, 'The number of centers to sample.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
p.Define('num_neighbors', 64, 'Sample these many points within the '
'neighorhood.')
p.Define(
'max_distance', 1.0, 'Points with L2 distances from a center '
'larger than this threshold are not considered to be in the '
'neighborhood.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.features_preparation_layers:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def TransformFeatures(self, features):
p = self.params
n, m = p.num_centers, p.num_neighbors
prepared_features = features.DeepCopy()
if p.features_preparation_layers:
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
points_data = prepared_features.lasers
points = py_utils.HasShape(points_data.points_xyz, [-1, 3])
if 'points_padding' in points_data:
points_mask = 1 - points_data.points_padding
points = tf.boolean_mask(points, points_mask)
# If num_points < num_centers, pad points to have at least num_centers
# points.
num_points = tf.shape(points)[0]
required_num_points = tf.maximum(num_points, p.num_centers)
zeros = tf.zeros([required_num_points - num_points, 3])
points = tf.concat([points, zeros], axis=0)
num_seeded_points = points_data.get('num_seeded_points', 0)
neighbor_algorithm = 'auto'
# Based on benchmarks, the hash solution works better when the number of
# centers is >= 16 and there are at least 10k points per point cloud.
if p.num_centers >= 16:
neighbor_algorithm = 'hash'
centers, center_paddings, indices, indices_paddings = ops.sample_points(
points=tf.expand_dims(points, 0),
points_padding=tf.zeros([1, required_num_points], tf.float32),
num_seeded_points=num_seeded_points,
center_selector=p.center_selector,
neighbor_sampler=p.neighbor_sampler,
neighbor_algorithm=neighbor_algorithm,
num_centers=p.num_centers,
center_z_min=p.keep_z_range[0],
center_z_max=p.keep_z_range[1],
num_neighbors=p.num_neighbors,
max_distance=p.max_distance,
random_seed=p.random_seed if p.random_seed else -1)
centers = py_utils.HasShape(centers, [1, n])[0, :]
center_paddings = py_utils.HasShape(center_paddings, [1, n])[0, :]
indices = py_utils.HasShape(indices, [1, n, m])[0, :]
indices_paddings = py_utils.HasShape(indices_paddings, [1, n, m])[0, :]
features.cell_center_padding = center_paddings
features.cell_center_xyz = py_utils.HasShape(
tf.gather(points, centers), [n, 3])
features.anchor_centers = features.cell_center_xyz
features.cell_points_xyz = py_utils.HasShape(
tf.gather(points, indices), [n, m, 3])
features.cell_feature = tf.gather(points_data.points_feature, indices)
features.cell_points_padding = indices_paddings
return features
def TransformShapes(self, shapes):
p = self.params
n, m, f = p.num_centers, p.num_neighbors, shapes.lasers.points_feature[-1]
shapes.anchor_centers = tf.TensorShape([n, 3])
shapes.cell_center_padding = tf.TensorShape([n])
shapes.cell_center_xyz = tf.TensorShape([n, 3])
shapes.cell_points_xyz = tf.TensorShape([n, m, 3])
shapes.cell_feature = tf.TensorShape([n, m, f])
shapes.cell_points_padding = tf.TensorShape([n, m])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_padding = tf.float32
dtypes.cell_center_xyz = tf.float32
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
68c3277a9fe9cd3efe646288a0c0b687daeb5f40 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_continua.py | 1d4f1175f6f6eee08a5947b834b37af45e65325d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _CONTINUA():
def __init__(self,):
self.name = "CONTINUA"
self.definitions = continuum
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['continuum']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
144e5a7d1b97218faf780fe0706e3cee01e48160 | 37fdc797f0060a67c1e9318032bc7102d4fd9ecd | /spider/beautifulsoup_test/lib/python3.7/site-packages/twisted/names/test/test_server.py | 1378cd4196e91a2ddb3a28c59f527bcdbe43cc1f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Change0224/PycharmProjects | 8fa3d23b399c5fb55661a79ca059f3da79847feb | 818ba4fd5dd8bcdaacae490ed106ffda868b6ca4 | refs/heads/master | 2021-02-06T15:37:16.653849 | 2020-03-03T14:30:44 | 2020-03-03T14:30:44 | 243,927,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,264 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.names.server}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyClass
from twisted.internet import defer
from twisted.internet.interfaces import IProtocolFactory
from twisted.names import dns, error, resolve, server
from twisted.python import failure, log
from twisted.trial import unittest
class RaisedArguments(Exception):
"""
An exception containing the arguments raised by L{raiser}.
"""
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def raiser(*args, **kwargs):
"""
Raise a L{RaisedArguments} exception containing the supplied arguments.
Used as a fake when testing the call signatures of methods and functions.
"""
raise RaisedArguments(args, kwargs)
class NoResponseDNSServerFactory(server.DNSServerFactory):
"""
A L{server.DNSServerFactory} subclass which does not attempt to reply to any
received messages.
Used for testing logged messages in C{messageReceived} without having to
fake or patch the preceding code which attempts to deliver a response
message.
"""
def allowQuery(self, message, protocol, address):
"""
Deny all queries.
@param message: See L{server.DNSServerFactory.allowQuery}
@param protocol: See L{server.DNSServerFactory.allowQuery}
@param address: See L{server.DNSServerFactory.allowQuery}
@return: L{False}
@rtype: L{bool}
"""
return False
def sendReply(self, protocol, message, address):
"""
A noop send reply.
@param protocol: See L{server.DNSServerFactory.sendReply}
@param message: See L{server.DNSServerFactory.sendReply}
@param address: See L{server.DNSServerFactory.sendReply}
"""
class RaisingDNSServerFactory(server.DNSServerFactory):
"""
A L{server.DNSServerFactory} subclass whose methods raise an exception
containing the supplied arguments.
Used for stopping L{messageReceived} and testing the arguments supplied to
L{allowQuery}.
"""
class AllowQueryArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def allowQuery(self, *args, **kwargs):
"""
Raise the arguments supplied to L{allowQuery}.
@param args: Positional arguments which will be recorded in the raised
exception.
@type args: L{tuple}
@param kwargs: Keyword args which will be recorded in the raised
exception.
@type kwargs: L{dict}
"""
raise self.AllowQueryArguments(args, kwargs)
class RaisingProtocol(object):
"""
A partial fake L{IProtocol} whose methods raise an exception containing the
supplied arguments.
"""
class WriteMessageArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def writeMessage(self, *args, **kwargs):
"""
Raises the supplied arguments.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
raise self.WriteMessageArguments(args, kwargs)
class NoopProtocol(object):
"""
A partial fake L{dns.DNSProtocolMixin} with a noop L{writeMessage} method.
"""
def writeMessage(self, *args, **kwargs):
"""
A noop version of L{dns.DNSProtocolMixin.writeMessage}.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
class RaisingResolver(object):
"""
A partial fake L{IResolver} whose methods raise an exception containing the
supplied arguments.
"""
class QueryArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def query(self, *args, **kwargs):
"""
Raises the supplied arguments.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
raise self.QueryArguments(args, kwargs)
class RaisingCache(object):
"""
A partial fake L{twisted.names.cache.Cache} whose methods raise an exception
containing the supplied arguments.
"""
class CacheResultArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def cacheResult(self, *args, **kwargs):
"""
Raises the supplied arguments.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
raise self.CacheResultArguments(args, kwargs)
def assertLogMessage(testCase, expectedMessages, callable, *args, **kwargs):
"""
Assert that the callable logs the expected messages when called.
XXX: Put this somewhere where it can be re-used elsewhere. See #6677.
@param testCase: The threading_test case controlling the threading_test which triggers the
logged messages and on which assertions will be called.
@type testCase: L{unittest.SynchronousTestCase}
@param expectedMessages: A L{list} of the expected log messages
@type expectedMessages: L{list}
@param callable: The function which is expected to produce the
C{expectedMessages} when called.
@type callable: L{callable}
@param args: Positional arguments to be passed to C{callable}.
@type args: L{list}
@param kwargs: Keyword arguments to be passed to C{callable}.
@type kwargs: L{dict}
"""
loggedMessages = []
log.addObserver(loggedMessages.append)
testCase.addCleanup(log.removeObserver, loggedMessages.append)
callable(*args, **kwargs)
testCase.assertEqual(
[m['message'][0] for m in loggedMessages],
expectedMessages)
class DNSServerFactoryTests(unittest.TestCase):
"""
Tests for L{server.DNSServerFactory}.
"""
def test_resolverType(self):
"""
L{server.DNSServerFactory.resolver} is a L{resolve.ResolverChain}
instance
"""
self.assertIsInstance(
server.DNSServerFactory().resolver,
resolve.ResolverChain)
def test_resolverDefaultEmpty(self):
"""
L{server.DNSServerFactory.resolver} is an empty L{resolve.ResolverChain}
by default.
"""
self.assertEqual(
server.DNSServerFactory().resolver.resolvers,
[])
def test_authorities(self):
"""
L{server.DNSServerFactory.__init__} accepts an C{authorities}
argument. The value of this argument is a list and is used to extend the
C{resolver} L{resolve.ResolverChain}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(
authorities=[dummyResolver]).resolver.resolvers,
[dummyResolver])
def test_caches(self):
"""
L{server.DNSServerFactory.__init__} accepts a C{caches} argument. The
value of this argument is a list and is used to extend the C{resolver}
L{resolve.ResolverChain}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(
caches=[dummyResolver]).resolver.resolvers,
[dummyResolver])
def test_clients(self):
"""
L{server.DNSServerFactory.__init__} accepts a C{clients} argument. The
value of this argument is a list and is used to extend the C{resolver}
L{resolve.ResolverChain}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(
clients=[dummyResolver]).resolver.resolvers,
[dummyResolver])
def test_resolverOrder(self):
"""
L{server.DNSServerFactory.resolver} contains an ordered list of
authorities, caches and clients.
"""
# Use classes here so that we can see meaningful names in threading_test results
class DummyAuthority(object):
pass
class DummyCache(object):
pass
class DummyClient(object):
pass
self.assertEqual(
server.DNSServerFactory(
authorities=[DummyAuthority],
caches=[DummyCache],
clients=[DummyClient]).resolver.resolvers,
[DummyAuthority, DummyCache, DummyClient])
def test_cacheDefault(self):
"""
L{server.DNSServerFactory.cache} is L{None} by default.
"""
self.assertIsNone(server.DNSServerFactory().cache)
def test_cacheOverride(self):
"""
L{server.DNSServerFactory.__init__} assigns the last object in the
C{caches} list to L{server.DNSServerFactory.cache}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(caches=[object(), dummyResolver]).cache,
dummyResolver)
def test_canRecurseDefault(self):
"""
L{server.DNSServerFactory.canRecurse} is a flag indicating that this
server is capable of performing recursive DNS lookups. It defaults to
L{False}.
"""
self.assertFalse(server.DNSServerFactory().canRecurse)
def test_canRecurseOverride(self):
"""
L{server.DNSServerFactory.__init__} sets C{canRecurse} to L{True} if it
is supplied with C{clients}.
"""
self.assertEqual(
server.DNSServerFactory(clients=[None]).canRecurse, True)
def test_verboseDefault(self):
"""
L{server.DNSServerFactory.verbose} defaults to L{False}.
"""
self.assertFalse(server.DNSServerFactory().verbose)
def test_verboseOverride(self):
"""
L{server.DNSServerFactory.__init__} accepts a C{verbose} argument which
overrides L{server.DNSServerFactory.verbose}.
"""
self.assertTrue(server.DNSServerFactory(verbose=True).verbose)
def test_interface(self):
"""
L{server.DNSServerFactory} implements L{IProtocolFactory}.
"""
self.assertTrue(verifyClass(IProtocolFactory, server.DNSServerFactory))
def test_defaultProtocol(self):
"""
L{server.DNSServerFactory.protocol} defaults to L{dns.DNSProtocol}.
"""
self.assertIs(server.DNSServerFactory.protocol, dns.DNSProtocol)
def test_buildProtocolProtocolOverride(self):
"""
L{server.DNSServerFactory.buildProtocol} builds a protocol by calling
L{server.DNSServerFactory.protocol} with its self as a positional
argument.
"""
class FakeProtocol(object):
factory = None
args = None
kwargs = None
stubProtocol = FakeProtocol()
def fakeProtocolFactory(*args, **kwargs):
stubProtocol.args = args
stubProtocol.kwargs = kwargs
return stubProtocol
f = server.DNSServerFactory()
f.protocol = fakeProtocolFactory
p = f.buildProtocol(addr=None)
self.assertEqual(
(stubProtocol, (f,), {}),
(p, p.args, p.kwargs)
)
def test_verboseLogQuiet(self):
"""
L{server.DNSServerFactory._verboseLog} does not log messages unless
C{verbose > 0}.
"""
f = server.DNSServerFactory()
assertLogMessage(
self,
[],
f._verboseLog,
'Foo Bar'
)
def test_verboseLogVerbose(self):
"""
L{server.DNSServerFactory._verboseLog} logs a message if C{verbose > 0}.
"""
f = server.DNSServerFactory(verbose=1)
assertLogMessage(
self,
['Foo Bar'],
f._verboseLog,
'Foo Bar'
)
def test_messageReceivedLoggingNoQuery(self):
"""
L{server.DNSServerFactory.messageReceived} logs about an empty query if
the message had no queries and C{verbose} is C{>0}.
"""
m = dns.Message()
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Empty query from ('192.0.2.100', 53)"],
f.messageReceived,
message=m, proto=None, address=('192.0.2.100', 53))
def test_messageReceivedLogging1(self):
"""
L{server.DNSServerFactory.messageReceived} logs the query types of all
queries in the message if C{verbose} is set to C{1}.
"""
m = dns.Message()
m.addQuery(name='example.com', type=dns.MX)
m.addQuery(name='example.com', type=dns.AAAA)
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["MX AAAA query from ('192.0.2.100', 53)"],
f.messageReceived,
message=m, proto=None, address=('192.0.2.100', 53))
def test_messageReceivedLogging2(self):
"""
L{server.DNSServerFactory.messageReceived} logs the repr of all queries
in the message if C{verbose} is set to C{2}.
"""
m = dns.Message()
m.addQuery(name='example.com', type=dns.MX)
m.addQuery(name='example.com', type=dns.AAAA)
f = NoResponseDNSServerFactory(verbose=2)
assertLogMessage(
self,
["<Query example.com MX IN> "
"<Query example.com AAAA IN> query from ('192.0.2.100', 53)"],
f.messageReceived,
message=m, proto=None, address=('192.0.2.100', 53))
def test_messageReceivedTimestamp(self):
"""
L{server.DNSServerFactory.messageReceived} assigns a unix timestamp to
the received message.
"""
m = dns.Message()
f = NoResponseDNSServerFactory()
t = object()
self.patch(server.time, 'time', lambda: t)
f.messageReceived(message=m, proto=None, address=None)
self.assertEqual(m.timeReceived, t)
def test_messageReceivedAllowQuery(self):
"""
L{server.DNSServerFactory.messageReceived} passes all messages to
L{server.DNSServerFactory.allowQuery} along with the receiving protocol
and origin address.
"""
message = dns.Message()
dummyProtocol = object()
dummyAddress = object()
f = RaisingDNSServerFactory()
e = self.assertRaises(
RaisingDNSServerFactory.AllowQueryArguments,
f.messageReceived,
message=message, proto=dummyProtocol, address=dummyAddress)
args, kwargs = e.args
self.assertEqual(args, (message, dummyProtocol, dummyAddress))
self.assertEqual(kwargs, {})
def test_allowQueryFalse(self):
"""
If C{allowQuery} returns C{False},
L{server.DNSServerFactory.messageReceived} calls L{server.sendReply}
with a message whose C{rCode} is L{dns.EREFUSED}.
"""
class SendReplyException(Exception):
pass
class RaisingDNSServerFactory(server.DNSServerFactory):
def allowQuery(self, *args, **kwargs):
return False
def sendReply(self, *args, **kwargs):
raise SendReplyException(args, kwargs)
f = RaisingDNSServerFactory()
e = self.assertRaises(
SendReplyException,
f.messageReceived,
message=dns.Message(), proto=None, address=None)
(proto, message, address), kwargs = e.args
self.assertEqual(message.rCode, dns.EREFUSED)
def _messageReceivedTest(self, methodName, message):
"""
Assert that the named method is called with the given message when it is
passed to L{DNSServerFactory.messageReceived}.
@param methodName: The name of the method which is expected to be
called.
@type methodName: L{str}
@param message: The message which is expected to be passed to the
C{methodName} method.
@type message: L{dns.Message}
"""
# Make it appear to have some queries so that
# DNSServerFactory.allowQuery allows it.
message.queries = [None]
receivedMessages = []
def fakeHandler(message, protocol, address):
receivedMessages.append((message, protocol, address))
protocol = NoopProtocol()
factory = server.DNSServerFactory(None)
setattr(factory, methodName, fakeHandler)
factory.messageReceived(message, protocol)
self.assertEqual(receivedMessages, [(message, protocol, None)])
def test_queryMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_QUERY} on to L{DNSServerFactory.handleQuery}.
"""
self._messageReceivedTest(
'handleQuery', dns.Message(opCode=dns.OP_QUERY))
def test_inverseQueryMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_INVERSE} on to L{DNSServerFactory.handleInverseQuery}.
"""
self._messageReceivedTest(
'handleInverseQuery', dns.Message(opCode=dns.OP_INVERSE))
def test_statusMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_STATUS} on to L{DNSServerFactory.handleStatus}.
"""
self._messageReceivedTest(
'handleStatus', dns.Message(opCode=dns.OP_STATUS))
def test_notifyMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_NOTIFY} on to L{DNSServerFactory.handleNotify}.
"""
self._messageReceivedTest(
'handleNotify', dns.Message(opCode=dns.OP_NOTIFY))
def test_updateMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_UPDATE} on to L{DNSServerFactory.handleOther}.
This may change if the implementation ever covers update messages.
"""
self._messageReceivedTest(
'handleOther', dns.Message(opCode=dns.OP_UPDATE))
def test_connectionTracking(self):
"""
The C{connectionMade} and C{connectionLost} methods of
L{DNSServerFactory} cooperate to keep track of all L{DNSProtocol}
objects created by a factory which are connected.
"""
protoA, protoB = object(), object()
factory = server.DNSServerFactory()
factory.connectionMade(protoA)
self.assertEqual(factory.connections, [protoA])
factory.connectionMade(protoB)
self.assertEqual(factory.connections, [protoA, protoB])
factory.connectionLost(protoA)
self.assertEqual(factory.connections, [protoB])
factory.connectionLost(protoB)
self.assertEqual(factory.connections, [])
def test_handleQuery(self):
"""
L{server.DNSServerFactory.handleQuery} takes the first query from the
supplied message and dispatches it to
L{server.DNSServerFactory.resolver.query}.
"""
m = dns.Message()
m.addQuery(b'one.example.com')
m.addQuery(b'two.example.com')
f = server.DNSServerFactory()
f.resolver = RaisingResolver()
e = self.assertRaises(
RaisingResolver.QueryArguments,
f.handleQuery,
message=m, protocol=NoopProtocol(), address=None)
(query,), kwargs = e.args
self.assertEqual(query, m.queries[0])
def test_handleQueryCallback(self):
"""
L{server.DNSServerFactory.handleQuery} adds
L{server.DNSServerFactory.resolver.gotResolverResponse} as a callback to
the deferred returned by L{server.DNSServerFactory.resolver.query}. It
is called with the query response, the original protocol, message and
origin address.
"""
f = server.DNSServerFactory()
d = defer.Deferred()
class FakeResolver(object):
def query(self, *args, **kwargs):
return d
f.resolver = FakeResolver()
gotResolverResponseArgs = []
def fakeGotResolverResponse(*args, **kwargs):
gotResolverResponseArgs.append((args, kwargs))
f.gotResolverResponse = fakeGotResolverResponse
m = dns.Message()
m.addQuery(b'one.example.com')
stubProtocol = NoopProtocol()
dummyAddress = object()
f.handleQuery(message=m, protocol=stubProtocol, address=dummyAddress)
dummyResponse = object()
d.callback(dummyResponse)
self.assertEqual(
gotResolverResponseArgs,
[((dummyResponse, stubProtocol, m, dummyAddress), {})])
def test_handleQueryErrback(self):
"""
L{server.DNSServerFactory.handleQuery} adds
L{server.DNSServerFactory.resolver.gotResolverError} as an errback to
the deferred returned by L{server.DNSServerFactory.resolver.query}. It
is called with the query failure, the original protocol, message and
origin address.
"""
f = server.DNSServerFactory()
d = defer.Deferred()
class FakeResolver(object):
def query(self, *args, **kwargs):
return d
f.resolver = FakeResolver()
gotResolverErrorArgs = []
def fakeGotResolverError(*args, **kwargs):
gotResolverErrorArgs.append((args, kwargs))
f.gotResolverError = fakeGotResolverError
m = dns.Message()
m.addQuery(b'one.example.com')
stubProtocol = NoopProtocol()
dummyAddress = object()
f.handleQuery(message=m, protocol=stubProtocol, address=dummyAddress)
stubFailure = failure.Failure(Exception())
d.errback(stubFailure)
self.assertEqual(
gotResolverErrorArgs,
[((stubFailure, stubProtocol, m, dummyAddress), {})])
def test_gotResolverResponse(self):
"""
L{server.DNSServerFactory.gotResolverResponse} accepts a tuple of
resource record lists and triggers a response message containing those
resource record lists.
"""
f = server.DNSServerFactory()
answers = []
authority = []
additional = []
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.gotResolverResponse,
(answers, authority, additional),
protocol=RaisingProtocol(), message=dns.Message(), address=None)
(message,), kwargs = e.args
self.assertIs(message.answers, answers)
self.assertIs(message.authority, authority)
self.assertIs(message.additional, additional)
def test_gotResolverResponseCallsResponseFromMessage(self):
"""
L{server.DNSServerFactory.gotResolverResponse} calls
L{server.DNSServerFactory._responseFromMessage} to generate a response.
"""
factory = NoResponseDNSServerFactory()
factory._responseFromMessage = raiser
request = dns.Message()
request.timeReceived = 1
e = self.assertRaises(
RaisedArguments,
factory.gotResolverResponse,
([], [], []),
protocol=None, message=request, address=None
)
self.assertEqual(
((), dict(message=request, rCode=dns.OK,
answers=[], authority=[], additional=[])),
(e.args, e.kwargs)
)
def test_responseFromMessageNewMessage(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message which is a copy of the request message.
"""
factory = server.DNSServerFactory()
request = dns.Message(answer=False, recAv=False)
response = factory._responseFromMessage(message=request),
self.assertIsNot(request, response)
def test_responseFromMessageRecursionAvailable(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message whose C{recAV} attribute is L{True} if
L{server.DNSServerFactory.canRecurse} is L{True}.
"""
factory = server.DNSServerFactory()
factory.canRecurse = True
response1 = factory._responseFromMessage(
message=dns.Message(recAv=False))
factory.canRecurse = False
response2 = factory._responseFromMessage(
message=dns.Message(recAv=True))
self.assertEqual(
(True, False),
(response1.recAv, response2.recAv))
def test_responseFromMessageTimeReceived(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message whose C{timeReceived} attribute has the same value as that found
on the request.
"""
factory = server.DNSServerFactory()
request = dns.Message()
request.timeReceived = 1234
response = factory._responseFromMessage(message=request)
self.assertEqual(request.timeReceived, response.timeReceived)
def test_responseFromMessageMaxSize(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message whose C{maxSize} attribute has the same value as that found
on the request.
"""
factory = server.DNSServerFactory()
request = dns.Message()
request.maxSize = 0
response = factory._responseFromMessage(message=request)
self.assertEqual(request.maxSize, response.maxSize)
def test_messageFactory(self):
"""
L{server.DNSServerFactory} has a C{_messageFactory} attribute which is
L{dns.Message} by default.
"""
self.assertIs(dns.Message, server.DNSServerFactory._messageFactory)
def test_responseFromMessageCallsMessageFactory(self):
"""
L{server.DNSServerFactory._responseFromMessage} calls
C{dns._responseFromMessage} to generate a response
message from the request message. It supplies the request message and
other keyword arguments which should be passed to the response message
initialiser.
"""
factory = server.DNSServerFactory()
self.patch(dns, '_responseFromMessage', raiser)
request = dns.Message()
e = self.assertRaises(
RaisedArguments,
factory._responseFromMessage,
message=request, rCode=dns.OK
)
self.assertEqual(
((), dict(responseConstructor=factory._messageFactory,
message=request, rCode=dns.OK, recAv=factory.canRecurse,
auth=False)),
(e.args, e.kwargs)
)
def test_responseFromMessageAuthoritativeMessage(self):
"""
L{server.DNSServerFactory._responseFromMessage} marks the response
message as authoritative if any of the answer records are authoritative.
"""
factory = server.DNSServerFactory()
response1 = factory._responseFromMessage(
message=dns.Message(), answers=[dns.RRHeader(auth=True)])
response2 = factory._responseFromMessage(
message=dns.Message(), answers=[dns.RRHeader(auth=False)])
self.assertEqual(
(True, False),
(response1.auth, response2.auth),
)
def test_gotResolverResponseLogging(self):
"""
L{server.DNSServerFactory.gotResolverResponse} logs the total number of
records in the response if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
answers = [dns.RRHeader()]
authority = [dns.RRHeader()]
additional = [dns.RRHeader()]
assertLogMessage(
self,
["Lookup found 3 records"],
f.gotResolverResponse,
(answers, authority, additional),
protocol=NoopProtocol(), message=dns.Message(), address=None)
def test_gotResolverResponseCaching(self):
"""
L{server.DNSServerFactory.gotResolverResponse} caches the response if at
least one cache was provided in the constructor.
"""
f = NoResponseDNSServerFactory(caches=[RaisingCache()])
m = dns.Message()
m.addQuery(b'example.com')
expectedAnswers = [dns.RRHeader()]
expectedAuthority = []
expectedAdditional = []
e = self.assertRaises(
RaisingCache.CacheResultArguments,
f.gotResolverResponse,
(expectedAnswers, expectedAuthority, expectedAdditional),
protocol=NoopProtocol(), message=m, address=None)
(query, (answers, authority, additional)), kwargs = e.args
self.assertEqual(query.name.name, b'example.com')
self.assertIs(answers, expectedAnswers)
self.assertIs(authority, expectedAuthority)
self.assertIs(additional, expectedAdditional)
def test_gotResolverErrorCallsResponseFromMessage(self):
"""
L{server.DNSServerFactory.gotResolverError} calls
L{server.DNSServerFactory._responseFromMessage} to generate a response.
"""
factory = NoResponseDNSServerFactory()
factory._responseFromMessage = raiser
request = dns.Message()
request.timeReceived = 1
e = self.assertRaises(
RaisedArguments,
factory.gotResolverError,
failure.Failure(error.DomainError()),
protocol=None, message=request, address=None
)
self.assertEqual(
((), dict(message=request, rCode=dns.ENAME)),
(e.args, e.kwargs)
)
def _assertMessageRcodeForError(self, responseError, expectedMessageCode):
"""
L{server.DNSServerFactory.gotResolver} accepts a L{failure.Failure} and
triggers a response message whose rCode corresponds to the DNS error
contained in the C{Failure}.
@param responseError: The L{Exception} instance which is expected to
trigger C{expectedMessageCode} when it is supplied to
C{gotResolverError}
@type responseError: L{Exception}
@param expectedMessageCode: The C{rCode} which is expected in the
message returned by C{gotResolverError} in response to
C{responseError}.
@type expectedMessageCode: L{int}
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.gotResolverError,
failure.Failure(responseError),
protocol=RaisingProtocol(), message=dns.Message(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, expectedMessageCode)
def test_gotResolverErrorDomainError(self):
"""
L{server.DNSServerFactory.gotResolver} triggers a response message with
an C{rCode} of L{dns.ENAME} if supplied with a L{error.DomainError}.
"""
self._assertMessageRcodeForError(error.DomainError(), dns.ENAME)
def test_gotResolverErrorAuthoritativeDomainError(self):
"""
L{server.DNSServerFactory.gotResolver} triggers a response message with
an C{rCode} of L{dns.ENAME} if supplied with a
L{error.AuthoritativeDomainError}.
"""
self._assertMessageRcodeForError(
error.AuthoritativeDomainError(), dns.ENAME)
def test_gotResolverErrorOtherError(self):
"""
L{server.DNSServerFactory.gotResolver} triggers a response message with
an C{rCode} of L{dns.ESERVER} if supplied with another type of error and
logs the error.
"""
self._assertMessageRcodeForError(KeyError(), dns.ESERVER)
e = self.flushLoggedErrors(KeyError)
self.assertEqual(len(e), 1)
def test_gotResolverErrorLogging(self):
"""
L{server.DNSServerFactory.gotResolver} logs a message if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Lookup failed"],
f.gotResolverError,
failure.Failure(error.DomainError()),
protocol=NoopProtocol(), message=dns.Message(), address=None)
def test_gotResolverErrorResetsResponseAttributes(self):
"""
L{server.DNSServerFactory.gotResolverError} does not allow request
attributes to leak into the response ie it sends a response with AD, CD
set to 0 and empty response record sections.
"""
factory = server.DNSServerFactory()
responses = []
factory.sendReply = (
lambda protocol, response, address: responses.append(response)
)
request = dns.Message(authenticData=True, checkingDisabled=True)
request.answers = [object(), object()]
request.authority = [object(), object()]
request.additional = [object(), object()]
factory.gotResolverError(
failure.Failure(error.DomainError()),
protocol=None, message=request, address=None
)
self.assertEqual([dns.Message(rCode=3, answer=True)], responses)
def test_gotResolverResponseResetsResponseAttributes(self):
"""
L{server.DNSServerFactory.gotResolverResponse} does not allow request
attributes to leak into the response ie it sends a response with AD, CD
set to 0 and none of the records in the request answer sections are
copied to the response.
"""
factory = server.DNSServerFactory()
responses = []
factory.sendReply = (
lambda protocol, response, address: responses.append(response)
)
request = dns.Message(authenticData=True, checkingDisabled=True)
request.answers = [object(), object()]
request.authority = [object(), object()]
request.additional = [object(), object()]
factory.gotResolverResponse(
([], [], []),
protocol=None, message=request, address=None
)
self.assertEqual([dns.Message(rCode=0, answer=True)], responses)
def test_sendReplyWithAddress(self):
"""
If L{server.DNSServerFactory.sendReply} is supplied with a protocol
*and* an address tuple it will supply that address to
C{protocol.writeMessage}.
"""
m = dns.Message()
dummyAddress = object()
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.sendReply,
protocol=RaisingProtocol(),
message=m,
address=dummyAddress)
args, kwargs = e.args
self.assertEqual(args, (m, dummyAddress))
self.assertEqual(kwargs, {})
def test_sendReplyWithoutAddress(self):
"""
If L{server.DNSServerFactory.sendReply} is supplied with a protocol but
no address tuple it will supply only a message to
C{protocol.writeMessage}.
"""
m = dns.Message()
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.sendReply,
protocol=RaisingProtocol(),
message=m,
address=None)
args, kwargs = e.args
self.assertEqual(args, (m,))
self.assertEqual(kwargs, {})
def test_sendReplyLoggingNoAnswers(self):
"""
If L{server.DNSServerFactory.sendReply} logs a "no answers" message if
the supplied message has no answers.
"""
self.patch(server.time, 'time', lambda: 86402)
m = dns.Message()
m.timeReceived = 86401
f = server.DNSServerFactory(verbose=2)
assertLogMessage(
self,
["Replying with no answers", "Processed query in 1.000 seconds"],
f.sendReply,
protocol=NoopProtocol(),
message=m,
address=None)
def test_sendReplyLoggingWithAnswers(self):
"""
If L{server.DNSServerFactory.sendReply} logs a message for answers,
authority, additional if the supplied a message has records in any of
those sections.
"""
self.patch(server.time, 'time', lambda: 86402)
m = dns.Message()
m.answers.append(dns.RRHeader(payload=dns.Record_A('127.0.0.1')))
m.authority.append(dns.RRHeader(payload=dns.Record_A('127.0.0.1')))
m.additional.append(dns.RRHeader(payload=dns.Record_A('127.0.0.1')))
m.timeReceived = 86401
f = server.DNSServerFactory(verbose=2)
assertLogMessage(
self,
['Answers are <A address=127.0.0.1 ttl=None>',
'Authority is <A address=127.0.0.1 ttl=None>',
'Additional is <A address=127.0.0.1 ttl=None>',
'Processed query in 1.000 seconds'],
f.sendReply,
protocol=NoopProtocol(),
message=m,
address=None)
def test_handleInverseQuery(self):
"""
L{server.DNSServerFactory.handleInverseQuery} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleInverseQuery,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleInverseQueryLogging(self):
"""
L{server.DNSServerFactory.handleInverseQuery} logs the message origin
address if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Inverse query from ('::1', 53)"],
f.handleInverseQuery,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
def test_handleStatus(self):
"""
L{server.DNSServerFactory.handleStatus} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleStatus,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleStatusLogging(self):
"""
L{server.DNSServerFactory.handleStatus} logs the message origin address
if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Status request from ('::1', 53)"],
f.handleStatus,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
def test_handleNotify(self):
"""
L{server.DNSServerFactory.handleNotify} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleNotify,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleNotifyLogging(self):
"""
L{server.DNSServerFactory.handleNotify} logs the message origin address
if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Notify message from ('::1', 53)"],
f.handleNotify,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
def test_handleOther(self):
"""
L{server.DNSServerFactory.handleOther} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleOther,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleOtherLogging(self):
"""
L{server.DNSServerFactory.handleOther} logs the message origin address
if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Unknown op code (0) from ('::1', 53)"],
f.handleOther,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
| [
"lijj0224@163.com"
] | lijj0224@163.com |
a446f3bcb4ed5b343d63a75ac1a60b160a3d9408 | 2dd433fa5a90a61c3a9d2762849e27f78542677a | /comicnamer/utils.py | d942331d8f98ee7c2488a6b8e437681957fcb99a | [] | no_license | fredsherbet/comicnamer | 6d1e52cb9e3e3a10e5705fbb08378d0b60f227b6 | 6bb0f985afca2f544e709d85330c42447aa8bb67 | refs/heads/master | 2021-01-16T19:41:26.904067 | 2010-09-02T13:32:36 | 2010-09-02T13:32:36 | 3,166,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,589 | py | #!/usr/bin/env python
#encoding:utf-8
#author:Samus
#project:comicnamer
#repository:http://github.com/dbr/comicnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Utilities for comicnamer, including filename parsing
Modified from http://github.com/dbr/tvnamer
"""
import datetime
import os
import re
import sys
import shutil
import logging
import platform
from comicvine_api import (comicvine_error, comicvine_seriesnotfound,
comicvine_issuenotfound, comicvine_attributenotfound, comicvine_userabort)
from unicode_helper import p
from config import Config
from comicnamer_exceptions import (InvalidPath, InvalidFilename,
SeriesNotFound, DataRetrievalError, IssueNotFound,
IssueNameNotFound, ConfigValueError, UserAbort)
def log():
"""Returns the logger for current file
"""
return logging.getLogger(__name__)
def warn(text):
"""Displays message to sys.stdout
"""
p(text, file = sys.stderr)
def getIssueName(comicvine_instance, issue):
"""Queries the comicvine_api.Comicvine instance for issue name and corrected
series name.
If series cannot be found, it will warn the user. If the issue is not
found, it will use the corrected series name and not set an issue name.
If the site is unreachable, it will warn the user. If the user aborts
it will catch comicvine_api's user abort error and raise comicnamer's
"""
try:
series = comicvine_instance[issue.seriesname]
except comicvine_error, errormsg:
raise DataRetrievalError("Error contacting www.comicvine.com: %s" % errormsg)
except comicvine_seriesnotfound:
# No such series found.
raise SeriesNotFound("Series %s not found on www.comicvine.com" % issue.seriesname)
except comicvine_userabort, error:
raise UserAbort(unicode(error))
else:
# Series was found, use corrected series name
correctedSeriesName = series['seriesname']
issnames = []
for cissno in issue.issuenumbers:
try:
issueinfo = series[cissno]
except comicvine_issuenotfound:
raise IssueNotFound(
"Issue %s of series %s could not be found" % (
cissno,
issue.seriesname))
except comicvine_attributenotfound:
raise IssueNameNotFound(
"Could not find issue name for %s" % issue)
else:
issnames.append(issueinfo['issuename'])
return correctedSeriesName, issnames
def _applyReplacements(cfile, replacements):
"""Applies custom replacements.
Argument cfile is string.
Argument replacements is a list of dicts, with keys "match",
"replacement", and (optional) "is_regex"
"""
for rep in replacements:
if 'is_regex' in rep and rep['is_regex']:
cfile = re.sub(rep['match'], rep['replacement'], cfile)
else:
cfile = cfile.replace(rep['match'], rep['replacement'])
return cfile
def applyCustomInputReplacements(cfile):
"""Applies custom input filename replacements, wraps _applyReplacements
"""
return _applyReplacements(cfile, Config['input_filename_replacements'])
def applyCustomOutputReplacements(cfile):
"""Applies custom output filename replacements, wraps _applyReplacements
"""
return _applyReplacements(cfile, Config['output_filename_replacements'])
def applyCustomFullpathReplacements(cfile):
"""Applies custom replacements to full path, wraps _applyReplacements
"""
return _applyReplacements(cfile, Config['move_files_fullpath_replacements'])
def cleanRegexedSeriesName(seriesname):
"""Cleans up series name by removing any . and _
characters, along with any trailing hyphens.
Is basically equivalent to replacing all _ and . with a
space, but handles decimal numbers in string, for example:
>>> cleanRegexedSeriesName("an.example.1.0.test")
'an example 1.0 test'
>>> cleanRegexedSeriesName("an_example_1.0_test")
'an example 1.0 test'
"""
seriesname = re.sub("(\D)[.](\D)", "\\1 \\2", seriesname)
seriesname = re.sub("(\D)[.]", "\\1 ", seriesname)
seriesname = re.sub("[.](\D)", " \\1", seriesname)
seriesname = seriesname.replace("_", " ")
seriesname = re.sub("-$", "", seriesname)
return seriesname.strip()
class FileFinder(object):
"""Given a file, it will verify it exists. Given a folder it will descend
one level into it and return a list of files, unless the recursive argument
is True, in which case it finds all files contained within the path.
The with_extension argument is a list of valid extensions, without leading
spaces. If an empty list (or None) is supplied, no extension checking is
performed.
"""
def __init__(self, path, with_extension = None, recursive = False):
self.path = path
if with_extension is None:
self.with_extension = []
else:
self.with_extension = with_extension
self.recursive = recursive
def findFiles(self):
"""Returns list of files found at path
"""
if os.path.isfile(self.path):
if self._checkExtension(self.path):
return [os.path.abspath(self.path)]
else:
return []
elif os.path.isdir(self.path):
return self._findFilesInPath(self.path)
else:
raise InvalidPath("%s is not a valid file/directory" % self.path)
def _checkExtension(self, fname):
if len(self.with_extension) == 0:
return True
_, extension = os.path.splitext(fname)
for cext in self.with_extension:
cext = ".%s" % cext
if extension == cext:
return True
else:
return False
def _findFilesInPath(self, startpath):
"""Finds files from startpath, could be called recursively
"""
allfiles = []
for subf in os.listdir(unicode(startpath)):
if not self._checkExtension(subf):
continue
newpath = os.path.join(startpath, subf)
newpath = os.path.abspath(newpath)
if os.path.isfile(newpath):
allfiles.append(newpath)
else:
if self.recursive:
allfiles.extend(self._findFilesInPath(newpath))
#end if recursive
#end if isfile
#end for sf
return allfiles
class FileParser(object):
"""Deals with parsing of filenames
"""
def __init__(self, path):
self.path = path
self.compiled_regexs = []
self._compileRegexs()
def _compileRegexs(self):
"""Takes issue_patterns from config, compiles them all
into self.compiled_regexs
"""
for cpattern in Config['filename_patterns']:
try:
cregex = re.compile(cpattern, re.VERBOSE)
except re.error, errormsg:
warn("WARNING: Invalid issue_pattern, %s. %s" % (
errormsg, cregex.pattern))
else:
self.compiled_regexs.append(cregex)
def parse(self):
"""Runs path via configured regex, extracting data from groups.
Returns an IssueInfo instance containing extracted data.
"""
_, filename = os.path.split(self.path)
filename = applyCustomInputReplacements(filename)
for cmatcher in self.compiled_regexs:
match = cmatcher.match(filename)
if match:
namedgroups = match.groupdict().keys()
if 'issuenumber1' in namedgroups:
# Multiple issues, have issuenumber1 or 2 etc
issnos = []
for cur in namedgroups:
issnomatch = re.match('issuenumber(\d+)', cur)
if issnomatch:
issnos.append(int(match.group(cur)))
issnos.sort()
issuenumbers = issnos
elif 'issuenumberstart' in namedgroups:
# Multiple issues, regex specifies start and end number
start = int(match.group('issuenumberstart'))
end = int(match.group('issuenumberend'))
if start > end:
# Swap start and end
start, end = end, start
issuenumbers = range(start, end + 1)
elif 'issuenumber' in namedgroups:
issuenumbers = [int(match.group('issuenumber')), ]
elif 'year' in namedgroups or 'month' in namedgroups or 'day' in namedgroups:
if not all(['year' in namedgroups, 'month' in namedgroups, 'day' in namedgroups]):
raise ConfigValueError(
"Date-based regex must contain groups 'year', 'month' and 'day'")
match.group('year')
issuenumbers = [datetime.date(int(match.group('year')),
int(match.group('month')),
int(match.group('day')))]
else:
raise ConfigValueError(
"Regex does not contain issue number group, should"
"contain issuenumber, issuenumber1-9, or"
"issuenumberstart and issuenumberend\n\nPattern"
"was:\n" + cmatcher.pattern)
if 'seriesname' in namedgroups:
seriesname = match.group('seriesname')
else:
raise ConfigValueError(
"Regex must contain seriesname. Pattern was:\n" + cmatcher.pattern)
if seriesname != None:
seriesname = cleanRegexedSeriesName(seriesname)
issue = IssueInfo(
seriesname = seriesname,
issuenumbers = issuenumbers,
filename = self.path)
return issue
else:
raise InvalidFilename(self.path)
def formatIssueName(names, join_with):
"""Takes a list of issue names, formats them into a string.
If two names are supplied, such as "Pilot (1)" and "Pilot (2)", the
returned string will be "Pilot (1-2)"
If two different issue names are found, such as "The first", and
"Something else" it will return "The first, Something else"
"""
if len(names) == 1:
return names[0]
found_names = []
numbers = []
for cname in names:
number = re.match("(.*) \(([0-9]+)\)$", cname)
if number:
issname, issno = number.group(1), number.group(2)
if len(found_names) > 0 and issname not in found_names:
return join_with.join(names)
found_names.append(issname)
numbers.append(int(issno))
else:
# An issue didn't match
return join_with.join(names)
names = []
start, end = min(numbers), max(numbers)
names.append("%s (%d-%d)" % (found_names[0], start, end))
return join_with.join(names)
def makeValidFilename(value, normalize_unicode = False, windows_safe = False, custom_blacklist = None, replace_with = "_"):
"""
Takes a string and makes it into a valid filename.
normalize_unicode replaces accented characters with ASCII equivalent, and
removes characters that cannot be converted sensibly to ASCII.
windows_safe forces Windows-safe filenames, regardless of current platform
custom_blacklist specifies additional characters that will removed. This
will not touch the extension separator:
>>> makeValidFilename("T.est.cbr", custom_blacklist=".")
'T_est.cbr'
"""
if windows_safe:
# Allow user to make Windows-safe filenames, if they so choose
sysname = "Windows"
else:
sysname = platform.system()
# If the filename starts with a . prepend it with an underscore, so it
# doesn't become hidden.
# This is done before calling splitext to handle filename of "."
# splitext acts differently in python 2.5 and 2.6 - 2.5 returns ('', '.')
# and 2.6 returns ('.', ''), so rather than special case '.', this
# special-cases all files starting with "." equally (since dotfiles have)
if value.startswith("."):
value = "_" + value
# Treat extension seperatly
value, extension = os.path.splitext(value)
# Remove any null bytes
value = value.replace("\0", "")
# Blacklist of characters
if sysname == 'Darwin':
# : is technically allowed, but Finder will treat it as / and will
# generally cause weird behaviour, so treat it as invalid.
blacklist = r"/:"
elif sysname in ['Linux', 'FreeBSD']:
blacklist = r"/"
else:
# platform.system docs say it could also return "Windows" or "Java".
# Failsafe and use Windows sanitisation for Java, as it could be any
# operating system.
blacklist = r"\/:*?\"<>|"
# Append custom blacklisted characters
if custom_blacklist is not None:
blacklist += custom_blacklist
# Replace every blacklisted character with a underscore
value = re.sub("[%s]" % re.escape(blacklist), replace_with, value)
# Remove any trailing whitespace
value = value.strip()
# There are a bunch of filenames that are not allowed on Windows.
# As with character blacklist, treat non Darwin/Linux platforms as Windows
if sysname not in ['Darwin', 'Linux']:
invalid_filenames = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2",
"COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1",
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"]
if value in invalid_filenames:
value = "_" + value
# Replace accented characters with ASCII equivalent
if normalize_unicode:
import unicodedata
value = unicode(value) # cast data to unicode
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
# Truncate filenames to valid/sane length.
# NTFS is limited to 255 characters, HFS+ and EXT3 don't seem to have
# limits, FAT32 is 254. I doubt anyone will take issue with losing that
# one possible character, and files over 254 are pointlessly unweidly
max_len = 254
if len(value + extension) > max_len:
if len(extension) > len(value):
# Truncate extension instead of filename, no extension should be
# this long..
new_length = max_len - len(value)
extension = extension[:new_length]
else:
new_length = max_len - len(extension)
value = value[:new_length]
return value + extension
def formatIssueNumbers(issuenumbers):
"""Format issue number(s) into string, using configured values
"""
if len(issuenumbers) == 1:
issno = Config['issue_single'] % issuenumbers[0]
else:
issno = Config['issue_separator'].join(
Config['issue_single'] % x for x in issuenumbers)
return issno
class IssueInfo(object):
"""Stores information (issue number, issue name), and contains
logic to generate new name
"""
def __init__(self,
seriesname = None,
issuenumbers= None,
issuename = None,
filename = None):
self.seriesname = seriesname
self.issuenumbers = issuenumbers
self.issuename = issuename
self.fullpath = filename
def fullpath_get(self):
return self._fullpath
def fullpath_set(self, value):
self._fullpath = value
if value is None:
self.filename, self.extension = None, None
else:
self.filepath, self.filename = os.path.split(value)
self.filename, self.extension = os.path.splitext(self.filename)
self.extension = self.extension.replace(".", "")
fullpath = property(fullpath_get, fullpath_set)
@property
def fullfilename(self):
return u"%s.%s" % (self.filename, self.extension)
def generateFilename(self):
"""
Uses the following config options:
filename_with_issue # Filename when issue name is found
filename_without_issue # Filename when no issue can be found
issue_single # formatting for a single issue number
issue_separator # used to join multiple issue numbers
"""
# Format issue number into string, or a list
issno = Config['issue_single'] % self.issuenumbers[0]
# Data made available to config'd output file format
if self.extension is None:
prep_extension = ''
else:
prep_extension = '.%s' % self.extension
issdata = {
'seriesname': self.seriesname,
'issue': issno,
'issuename': self.issuename,
'ext': prep_extension}
if (self.issuename is None or (isinstance(self.issuename, list) and self.issuename[0] is None)):
fname = Config['filename_without_issue'] % issdata
else:
if isinstance(self.issuename, list):
issdata['issuename'] = formatIssueName(
self.issuename,
join_with = Config['multiiss_join_name_with']
)
fname = Config['filename_with_issue'] % issdata
return makeValidFilename(
fname,
normalize_unicode = Config['normalize_unicode_filenames'],
windows_safe = Config['windows_safe_filenames'],
replace_with = Config['replace_invalid_characters_with'])
def __repr__(self):
return "<%s: %s>" % (
self.__class__.__name__,
self.generateFilename())
def same_partition(f1, f2):
"""Returns True if both files or directories are on the same partition
"""
return os.stat(f1).st_dev == os.stat(f2).st_dev
def delete_file(fpath):
raise NotImplementedError("delete_file not yet implimented")
class Renamer(object):
"""Deals with renaming of files
"""
def __init__(self, filename):
self.filename = os.path.abspath(filename)
def newName(self, newName, force = False):
"""Renames a file, keeping the path the same.
"""
filepath, filename = os.path.split(self.filename)
filename, _ = os.path.splitext(filename)
newpath = os.path.join(filepath, newName)
if os.path.isfile(newpath):
# If the destination exists, raise exception unless force is True
if not force:
raise OSError("File %s already exists, not forcefully renaming %s" % (
newpath, self.filename))
os.rename(self.filename, newpath)
self.filename = newpath
def newPath(self, new_path, force = False, always_copy = False, always_move = False, create_dirs = True, getPathPreview = False):
"""Moves the file to a new path.
If it is on the same partition, it will be moved (unless always_copy is True)
If it is on a different partition, it will be copied.
If the target file already exists, it will raise OSError unless force is True.
"""
if always_copy and always_move:
raise ValueError("Both always_copy and always_move cannot be specified")
old_dir, old_filename = os.path.split(self.filename)
# Join new filepath to old one (to handle realtive dirs)
new_dir = os.path.abspath(os.path.join(old_dir, new_path))
# Join new filename onto new filepath
new_fullpath = os.path.join(new_dir, old_filename)
if len(Config['move_files_fullpath_replacements']) > 0:
p("Before custom full path replacements: %s" % (new_fullpath))
new_fullpath = applyCustomFullpathReplacements(new_fullpath)
new_dir = os.path.dirname(new_fullpath)
p("New path: %s" % new_fullpath)
if getPathPreview:
return new_fullpath
if create_dirs:
p("Creating %s" % new_dir)
try:
os.makedirs(new_dir)
except OSError, e:
if e.errno != 17:
raise
if os.path.isfile(new_fullpath):
# If the destination exists, raise exception unless force is True
if not force:
raise OSError("File %s already exists, not forcefully moving %s" % (
new_fullpath, self.filename))
if same_partition(self.filename, new_dir):
if always_copy:
# Same partition, but forced to copy
p("copy %s to %s" % (self.filename, new_fullpath))
shutil.copyfile(self.filename, new_fullpath)
else:
# Same partition, just rename the file to move it
p("move %s to %s" % (self.filename, new_fullpath))
os.rename(self.filename, new_fullpath)
else:
# File is on different partition (different disc), copy it
p("copy %s to %s" % (self.filename, new_fullpath))
shutil.copyfile(self.filename, new_fullpath)
if always_move:
# Forced to move file, we just trash old file
p("Deleting %s" % (self.filename))
delete_file(self.filename)
self.filename = new_fullpath
| [
"iam@attractive.com"
] | iam@attractive.com |
8498fc35377d666c8beda3737b4569c2b0bef667 | de2a0871ab99080664f532da8cccb909ebfcddef | /merge_evalres.py | 014320eae68f6475abdb0231df42d2387dc45396 | [
"MIT"
] | permissive | jchazalon/smartdoc15-ch1-eval | 0d8abc31276532e8e163f24ec5e8387e61e0f97d | c2a5ef7fb04e7aa5ecc02d365be08345d435031f | refs/heads/master | 2021-01-18T20:05:58.939875 | 2018-01-19T18:51:07 | 2018-01-19T18:51:07 | 69,476,202 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,411 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# Imports
import logging
import argparse
import os
import os.path
import sys
import fileinput
import itertools # chain
from collections import namedtuple
from dexml import ParseError
# ==============================================================================
# SegEval Tools suite imports
from utils.args import *
from utils.log import *
from models.models import *
# ==============================================================================
logger = logging.getLogger(__name__)
# ==============================================================================
# Constants
PROG_VERSION = "0.4"
PROG_NAME = "Segmentation Evaluation Result Merger"
PROG_NAME_SHORT = "SegEval"
XML_VERSION_MIN = 0.3
XML_VERSION_MAX = 0.3
ERRCODE_OK = 0
ERRCODE_NOFILE = 10
# ==============================================================================
# Lightweight structure to store results and merge them in a convenient way
evalres = namedtuple("evalres", ["mean_segmentation_precision", # None means undefined
"mean_segmentation_recall", # None means undefined
"mean_detection_precision", # None means undefined / redundant
"mean_detection_recall", # None means undefined / redundant
"mean_jaccard_index_smartdoc", # None means undefined
"mean_jaccard_index_segonly", # None means undefined
"count_total_frames", # float at this level
"count_true_accepted_frames", # float at this level
"count_true_rejected_frames", # float at this level
"count_false_accepted_frames", # float at this level
"count_false_rejected_frames"])# float at this level
res_init = evalres(None, None, None, None, None, None, 0.0, 0.0, 0.0, 0.0, 0.0)
# ==============================================================================
def read_results_from_file(eval_file):
current_mdl = None
try:
try:
current_mdl = EvalResult.loadFromFile(eval_file)
logger.debug("Got EvalResult file.")
except dexml.ParseError:
current_mdl = EvalSummary.loadFromFile(eval_file)
logger.debug("Got EvalSummary file.")
except Exception, e:
logger.error("File '%s' is not a valid segmentation evaluation file." % eval_file)
logger.error("\t Is it a '*.segeval.xml' or a '*.evalsummary.xml' file?")
raise e
return current_mdl.global_results
# ==============================================================================
def res_model_to_tuple(result_model):
cta = result_model.count_true_accepted_frames
cf = result_model.count_total_frames
cr = result_model.count_true_accepted_frames + result_model.count_false_accepted_frames
res = evalres(
result_model.mean_segmentation_precision if cta > 0 else None,
result_model.mean_segmentation_recall if cta > 0 else None,
result_model.detection_precision if cf > 0 else None,
result_model.detection_recall if cf > 0 else None,
result_model.mean_jaccard_index_smartdoc if cf > 0 else None,
result_model.mean_jaccard_index_segonly if cr > 0 else None,
float(result_model.count_total_frames),
float(result_model.count_true_accepted_frames),
float(result_model.count_true_rejected_frames),
float(result_model.count_false_accepted_frames),
float(result_model.count_false_rejected_frames))
return res
def getOrDefault(value, default):
# TODO add warning if using default
return value if value is not None else default
def res_tuple_to_model(result_tuple):
mdl = EvalSummary(
version="0.3",
software_used=Software(name=PROG_NAME_SHORT, version=PROG_VERSION))
mdl.global_results = GlobalEvalResults()
# Force to 0.0 only at the end of the process, so as not to loose information.
mdl.global_results.mean_segmentation_precision = getOrDefault(result_tuple.mean_segmentation_precision, 0.0)
mdl.global_results.mean_segmentation_recall = getOrDefault(result_tuple.mean_segmentation_recall, 0.0)
mdl.global_results.detection_precision = getOrDefault(result_tuple.mean_detection_precision, 0.0)
mdl.global_results.detection_recall = getOrDefault(result_tuple.mean_detection_recall, 0.0)
mdl.global_results.mean_jaccard_index_smartdoc = getOrDefault(result_tuple.mean_jaccard_index_smartdoc, 0.0)
mdl.global_results.mean_jaccard_index_segonly = getOrDefault(result_tuple.mean_jaccard_index_segonly, 0.0)
mdl.global_results.count_total_frames = int(result_tuple.count_total_frames)
mdl.global_results.count_true_accepted_frames = int(result_tuple.count_true_accepted_frames)
mdl.global_results.count_true_rejected_frames = int(result_tuple.count_true_rejected_frames)
mdl.global_results.count_false_accepted_frames = int(result_tuple.count_false_accepted_frames)
mdl.global_results.count_false_rejected_frames = int(result_tuple.count_false_rejected_frames)
return mdl
# ==============================================================================
def merge_res_tuples(res1, res2):
'''evalres x evalres ---> evalres'''
# If any of two contains zero frames, return the other
if res1.count_total_frames == 0:
return res2
if res2.count_total_frames == 0:
return res1
# Now both res contain frames
# First merge counters
count_total_frames = res1.count_total_frames + res2.count_total_frames
count_true_accepted_frames = res1.count_true_accepted_frames + res2.count_true_accepted_frames
count_true_rejected_frames = res1.count_true_rejected_frames + res2.count_true_rejected_frames
count_false_accepted_frames = res1.count_false_accepted_frames + res2.count_false_accepted_frames
count_false_rejected_frames = res1.count_false_rejected_frames + res2.count_false_rejected_frames
# Segmentation precision and recall
mean_segmentation_precision = None
mean_segmentation_recall = None
if count_true_accepted_frames > 0:
mean_segmentation_precision = (
( getOrDefault(res1.mean_segmentation_precision, 0.0) * res1.count_true_accepted_frames
+ getOrDefault(res2.mean_segmentation_precision, 0.0) * res2.count_true_accepted_frames)
/ count_true_accepted_frames)
mean_segmentation_recall = (
( getOrDefault(res1.mean_segmentation_recall, 0.0) * res1.count_true_accepted_frames
+ getOrDefault(res2.mean_segmentation_recall, 0.0) * res2.count_true_accepted_frames)
/ count_true_accepted_frames)
else:
logger.warn("No frame accepted while merging. Mean segmentation precision and recall left undefined.")
# Detection precision and recall (adapted from eval_seg)
count_expected = count_true_accepted_frames + count_false_rejected_frames
count_retrieved = count_true_accepted_frames + count_false_accepted_frames
mean_detection_precision = None
if count_retrieved > 0:
mean_detection_precision = count_true_accepted_frames / count_retrieved
else:
logger.warn("No frame accepted while merging. Mean detection precision left undefined.")
mean_detection_recall = None
if count_expected > 0:
mean_detection_recall = count_true_accepted_frames / count_expected
else:
logger.error("Cannot compute full sample recall if nothing is expected! Mean detection recall left undefined.")
# Jaccard index
mean_jaccard_index_smartdoc = None
if count_total_frames > 0:
mean_jaccard_index_smartdoc = (
( getOrDefault(res1.mean_jaccard_index_smartdoc, 0.0) * res1.count_total_frames
+ getOrDefault(res2.mean_jaccard_index_smartdoc, 0.0) * res2.count_total_frames)
/ count_total_frames)
else:
logger.error("No frame in sample. Mean Jaccard index (smartdoc variant) left undefined.")
mean_jaccard_index_segonly = None
if count_retrieved > 0:
mean_jaccard_index_segonly = (
( getOrDefault(res1.mean_jaccard_index_segonly, 0.0)
* (res1.count_true_accepted_frames + res1.count_false_accepted_frames)
+ getOrDefault(res2.mean_jaccard_index_segonly, 0.0)
* (res2.count_true_accepted_frames + res2.count_false_accepted_frames) )
/ count_retrieved)
else:
logger.error("No retreived frame in sample. Mean Jaccard index (segonly variant) left undefined.")
# Prepare result
res_agg = evalres(
mean_segmentation_precision,
mean_segmentation_recall,
mean_detection_precision,
mean_detection_recall,
mean_jaccard_index_smartdoc,
mean_jaccard_index_segonly,
count_total_frames,
count_true_accepted_frames,
count_true_rejected_frames,
count_false_accepted_frames,
count_false_rejected_frames)
# All done
return res_agg
# ==============================================================================
def main(argv=None):
# Option parsing
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Merge page segmentation evaluation results.',
version=PROG_VERSION)
parser.add_argument('-d', '--debug',
action="store_true",
help="Activate debug output.")
parser.add_argument('-o', '--output-file',
help="Optional path to output file.")
parser.add_argument('-f', '--files-from', metavar="FILE_LIST",
action=StoreValidFilePathOrStdin,
help="File containing the list of files to merge, or '-' to use standard input. \
Will be read BEFORE files specified on command line.")
parser.add_argument('files',
action=StoreValidFilePaths,
metavar='result_file',
nargs='*',
help='EvalSummary or SegEval files containing global results to merge.')
args = parser.parse_args()
# -----------------------------------------------------------------------------
# Logger activation
initLogger(logger)
output_prettyprint = False
if args.debug:
logger.setLevel(logging.DEBUG)
output_prettyprint = True
# -----------------------------------------------------------------------------
# Output log header
programHeader(logger, PROG_NAME, PROG_VERSION)
logger.debug(DBGSEP)
dumpArgs(args, logger)
logger.debug(DBGSEP)
# -----------------------------------------------------------------------------
logger.debug("Starting up")
# Create file name generator
file_iter = None
files_in_list = []
if args.files_from:
files_in_list = (line.rstrip("\n") for line in fileinput.input([args.files_from]))
file_iter = itertools.chain(files_in_list, args.files)
# --------------------------------------------------------------------------
logger.debug("--- Process started. ---")
# Init variables
res_agg = res_init
# Loop over files
file_count = 0
for eval_file in file_iter:
logger.debug("Processing file '%s'" % eval_file)
# Try to read either EvalResult or EvalSummary
res_cur = res_model_to_tuple(read_results_from_file(eval_file))
# Merge evaluation results
res_agg = merge_res_tuples(res_cur, res_agg)
# Logging
logger.debug(
"\t %d new frames (total is %d)",
res_cur.count_total_frames,
res_agg.count_total_frames)
logger.debug(
"\t AFTER: mean_segmentation_precision=%f ; mean_segmentation_recall =%f",
getOrDefault(res_agg.mean_segmentation_precision, 0.0),
getOrDefault(res_agg.mean_segmentation_recall, 0.0))
logger.debug(
"\t mean_detection_precision =%f ; mean_detection_recall =%f",
getOrDefault(res_agg.mean_detection_precision, 0.0),
getOrDefault(res_agg.mean_detection_recall, 0.0))
logger.debug(
"\t mean_jaccard_index_smartdoc=%f ; mean_jaccard_index_segonly=%f",
getOrDefault(res_agg.mean_jaccard_index_smartdoc, 0.0),
getOrDefault(res_agg.mean_jaccard_index_segonly, 0.0))
# Stats
file_count += 1
logger.debug("--- Process complete. ---")
# --------------------------------------------------------------------------
# Test for empty task and trap
if file_count == 0:
logger.error("No file processed. Output file will be useless so it is deactivated.")
logger.error("\t Use '-h' option to review program synopsis.")
return ERRCODE_NOFILE
# else
# Final output
aggreg_mdl = res_tuple_to_model(res_agg)
gr_mdl = aggreg_mdl.global_results
logger.debug("------------------------------")
logger.debug("Final results")
logger.debug("------------------------------")
logger.debug("Segmentation quality:")
logger.info("\tmean segmentation precision = %f", getOrDefault(gr_mdl.mean_segmentation_precision, 0.0))
logger.info("\tmean segmentation recall = %f", getOrDefault(gr_mdl.mean_segmentation_recall, 0.0))
logger.debug("------------------------------")
logger.debug("Detection quality:")
logger.info("\tmean detection precision = %f", getOrDefault(gr_mdl.detection_precision, 0.0))
logger.info("\tmean detection recall = %f", getOrDefault(gr_mdl.detection_recall, 0.0))
logger.debug("------------------------------")
logger.debug("Jaccard index:")
logger.info("\tmean ji smartdoc = %f", getOrDefault(gr_mdl.mean_jaccard_index_smartdoc, 0.0))
logger.info("\tmean ji seg only = %f", getOrDefault(gr_mdl.mean_jaccard_index_segonly, 0.0))
logger.debug("------------------------------")
logger.debug("Frame counts:")
logger.info("\ttotal_frames = %d", gr_mdl.count_total_frames)
logger.info("\ttrue_accepted = %d", gr_mdl.count_true_accepted_frames)
logger.info("\ttrue_rejected = %d", gr_mdl.count_true_rejected_frames)
logger.info("\tfalse_accepted = %d", gr_mdl.count_false_accepted_frames)
logger.info("\tfalse_rejected = %d", gr_mdl.count_false_rejected_frames)
logger.debug("- - - - - - - - - - - - - - - ")
logger.debug("Note:")
logger.debug("\texpected = true_accept + false_reject = %d", (gr_mdl.count_true_accepted_frames + gr_mdl.count_false_rejected_frames))
logger.debug("\tretrieved = true_accept + false_accept = %d", (gr_mdl.count_true_accepted_frames + gr_mdl.count_false_accepted_frames))
logger.debug("------------------------------")
logger.debug("")
# Export the XML structure to file if needed
if args.output_file is not None:
aggreg_mdl.exportToFile(args.output_file, pretty_print=output_prettyprint)
logger.debug("Clean exit.")
logger.debug(DBGSEP)
return ERRCODE_OK
# --------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
| [
"joseph.chazalon@univ-lr.fr"
] | joseph.chazalon@univ-lr.fr |
5c1ba44c812cddbba0d7d69d8007ae070698c369 | 123216cb332c60431a15580f9f730bd0c23a2d42 | /rango/migrations/0002_auto_20210730_0302.py | 2682e896ab00980610c27865844e809abcd59565 | [] | no_license | zzh2471437/tango_with_django_project | d052d430a1442ef5337b88f59a0a05eb00b55168 | 10505fd81a948f9027b539c7abc89957ba7b9e4a | refs/heads/master | 2023-06-25T09:47:39.579944 | 2021-07-30T15:04:04 | 2021-07-30T15:04:04 | 389,978,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # Generated by Django 2.1.5 on 2021-07-30 03:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'categories'},
),
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
| [
"“2471437Z@student.gla.ac.uk”"
] | “2471437Z@student.gla.ac.uk” |
d604b39ae0f8e7002cb175fae59528062f11a466 | 5da988c176252fca1b558190eff74ef3b89afc9f | /instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py | d225e6bd069b0db9f870fc1da037a9f0be6aaf31 | [
"Apache-2.0"
] | permissive | kinvolk/opentelemetry-python | 3801376ee6bdb46d85d8876a97713e698e1241ce | 47483865854c7adae7455f8441dab7f814f4ce2a | refs/heads/master | 2023-05-25T19:36:05.130267 | 2020-11-02T17:29:59 | 2020-11-02T17:29:59 | 201,488,070 | 1 | 2 | Apache-2.0 | 2023-05-16T18:48:46 | 2019-08-09T14:56:28 | Python | UTF-8 | Python | false | false | 8,741 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instrument `celery`_ to trace Celery applications.
.. _celery: https://pypi.org/project/celery/
Usage
-----
* Start broker backend
.. code::
docker run -p 5672:5672 rabbitmq
* Run instrumented task
.. code:: python
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.instrumentation.celery import CeleryInstrumentor
from celery import Celery
from celery.signals import worker_process_init
@worker_process_init.connect(weak=False)
def init_celery_tracing(*args, **kwargs):
trace.set_tracer_provider(TracerProvider())
span_processor = BatchExportSpanProcessor(ConsoleSpanExporter())
trace.get_tracer_provider().add_span_processor(span_processor)
CeleryInstrumentor().instrument()
app = Celery("tasks", broker="amqp://localhost")
@app.task
def add(x, y):
return x + y
add.delay(42, 50)
API
---
"""
import logging
import signal
from collections.abc import Iterable
from celery import signals # pylint: disable=no-name-in-module
from opentelemetry import propagators, trace
from opentelemetry.instrumentation.celery import utils
from opentelemetry.instrumentation.celery.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.trace.propagation.textmap import DictGetter
from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
# Task operations
_TASK_TAG_KEY = "celery.action"
_TASK_APPLY_ASYNC = "apply_async"
_TASK_RUN = "run"
_TASK_RETRY_REASON_KEY = "celery.retry.reason"
_TASK_REVOKED_REASON_KEY = "celery.revoked.reason"
_TASK_REVOKED_TERMINATED_SIGNAL_KEY = "celery.terminated.signal"
_TASK_NAME_KEY = "celery.task_name"
_MESSAGE_ID_ATTRIBUTE_NAME = "messaging.message_id"
class CarrierGetter(DictGetter):
def get(self, carrier, key):
value = getattr(carrier, key, [])
if isinstance(value, str) or not isinstance(value, Iterable):
value = (value,)
return value
def keys(self, carrier):
return []
carrier_getter = CarrierGetter()
class CeleryInstrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
tracer_provider = kwargs.get("tracer_provider")
# pylint: disable=attribute-defined-outside-init
self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)
signals.task_prerun.connect(self._trace_prerun, weak=False)
signals.task_postrun.connect(self._trace_postrun, weak=False)
signals.before_task_publish.connect(
self._trace_before_publish, weak=False
)
signals.after_task_publish.connect(
self._trace_after_publish, weak=False
)
signals.task_failure.connect(self._trace_failure, weak=False)
signals.task_retry.connect(self._trace_retry, weak=False)
def _uninstrument(self, **kwargs):
signals.task_prerun.disconnect(self._trace_prerun)
signals.task_postrun.disconnect(self._trace_postrun)
signals.before_task_publish.disconnect(self._trace_before_publish)
signals.after_task_publish.disconnect(self._trace_after_publish)
signals.task_failure.disconnect(self._trace_failure)
signals.task_retry.disconnect(self._trace_retry)
def _trace_prerun(self, *args, **kwargs):
task = utils.retrieve_task(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
request = task.request
tracectx = propagators.extract(carrier_getter, request) or None
logger.debug("prerun signal start task_id=%s", task_id)
operation_name = "{0}/{1}".format(_TASK_RUN, task.name)
span = self._tracer.start_span(
operation_name, context=tracectx, kind=trace.SpanKind.CONSUMER
)
activation = self._tracer.use_span(span, end_on_exit=True)
activation.__enter__()
utils.attach_span(task, task_id, (span, activation))
@staticmethod
def _trace_postrun(*args, **kwargs):
task = utils.retrieve_task(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
logger.debug("postrun signal task_id=%s", task_id)
# retrieve and finish the Span
span, activation = utils.retrieve_span(task, task_id)
if span is None:
logger.warning("no existing span found for task_id=%s", task_id)
return
# request context tags
if span.is_recording():
span.set_attribute(_TASK_TAG_KEY, _TASK_RUN)
utils.set_attributes_from_context(span, kwargs)
utils.set_attributes_from_context(span, task.request)
span.set_attribute(_TASK_NAME_KEY, task.name)
activation.__exit__(None, None, None)
utils.detach_span(task, task_id)
def _trace_before_publish(self, *args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_message(kwargs)
if task is None or task_id is None:
return
operation_name = "{0}/{1}".format(_TASK_APPLY_ASYNC, task.name)
span = self._tracer.start_span(
operation_name, kind=trace.SpanKind.PRODUCER
)
# apply some attributes here because most of the data is not available
if span.is_recording():
span.set_attribute(_TASK_TAG_KEY, _TASK_APPLY_ASYNC)
span.set_attribute(_MESSAGE_ID_ATTRIBUTE_NAME, task_id)
span.set_attribute(_TASK_NAME_KEY, task.name)
utils.set_attributes_from_context(span, kwargs)
activation = self._tracer.use_span(span, end_on_exit=True)
activation.__enter__()
utils.attach_span(task, task_id, (span, activation), is_publish=True)
headers = kwargs.get("headers")
if headers:
propagators.inject(type(headers).__setitem__, headers)
@staticmethod
def _trace_after_publish(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_message(kwargs)
if task is None or task_id is None:
return
# retrieve and finish the Span
_, activation = utils.retrieve_span(task, task_id, is_publish=True)
if activation is None:
logger.warning("no existing span found for task_id=%s", task_id)
return
activation.__exit__(None, None, None)
utils.detach_span(task, task_id, is_publish=True)
@staticmethod
def _trace_failure(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
# retrieve and pass exception info to activation
span, _ = utils.retrieve_span(task, task_id)
if span is None or not span.is_recording():
return
status_kwargs = {"status_code": StatusCode.ERROR}
ex = kwargs.get("einfo")
if (
hasattr(task, "throws")
and ex is not None
and isinstance(ex.exception, task.throws)
):
return
if ex is not None:
status_kwargs["description"] = str(ex)
span.set_status(Status(**status_kwargs))
@staticmethod
def _trace_retry(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_request(kwargs)
reason = utils.retrieve_reason(kwargs)
if task is None or task_id is None or reason is None:
return
span, _ = utils.retrieve_span(task, task_id)
if span is None or not span.is_recording():
return
# Add retry reason metadata to span
# Use `str(reason)` instead of `reason.message` in case we get
# something that isn't an `Exception`
span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason))
| [
"noreply@github.com"
] | kinvolk.noreply@github.com |
c7c7e5c1f3818d56efd5758696f9e9cdb33b4d45 | b4828cf9403fedde5dd346b3338a5f4bf0f1eb96 | /hackerrank_sols/Python/input.py | 891fc4c290f538166cca4e06196f203faf1156d1 | [] | no_license | Masters-Akt/CS_codes | 9ab3d87ca384ebd364c7b87c8da94b753082a7e3 | 1aaa107439f2e208bb67b0bcca676f90b6bc6a11 | refs/heads/master | 2023-01-24T00:11:05.151592 | 2023-01-21T18:45:57 | 2023-01-21T18:45:57 | 292,529,160 | 6 | 7 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | #Kumar Ankit
x,k=(input().split(" "))
x=int(x)
k=int(k)
print(eval(input())==k)
| [
"64123046+Masters-Akt@users.noreply.github.com"
] | 64123046+Masters-Akt@users.noreply.github.com |
a1a59786acc50a3bcfc44d678b26a02c420f6cd1 | d7df6e3a7aafd8316f71b46ab6e1b2d4741318f6 | /non_optimal_solutions/productExceptSelf.py | 62eb747d2826160ef5da98e492bc02a10c24d678 | [] | no_license | echrisinger/Blind-75 | 72b01be6ad71103eb378e91295089a9e56747ff7 | b17d53619c7b2cc5851cd2a02fa3e81f676914de | refs/heads/master | 2022-10-13T01:32:42.698432 | 2020-05-26T03:22:06 | 2020-05-26T03:22:06 | 260,078,456 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
before, after = [1]*len(nums), [1]*len(nums)
for i in range(len(nums)-1):
before[i+1] = before[i]*nums[i]
rev_i = len(nums) - 1 - i
after[rev_i-1] = after[rev_i] * nums[rev_i]
res = [0] * len(nums)
for i in range(len(nums)):
res[i] = before[i] * after[i]
return res
# O(n) space
| [
"echrisinger@gmail.com"
] | echrisinger@gmail.com |
18fcdcdba81100a0f2df2ed2fb80b682d2c8d32d | 98c42b6722dbdd1774bb89ea76fc8dd585fa2a92 | /SoftUni/SimpleConditions/Company.py | 6bcd73327705a8029c6b0ca31b015b83ed2ba690 | [] | no_license | Putzmeister/PythonProjects | 19ee45ca576596243b062f12d4161cff80b573e2 | 97a7f682b808c0ea536042c5890c113b07fdde67 | refs/heads/master | 2021-08-22T07:20:31.002695 | 2017-11-29T15:53:58 | 2017-11-29T15:53:58 | 112,492,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import math
neededhours = int(input())
days = int(input())
overtimeWorkers = int(input())
if 0 <= neededhours <= 200000 and 0 <= days <= 20000 and 0 <= overtimeWorkers <= 200:
workingDays = 0.9 * days
workingHours = workingDays * 8
overtime = overtimeWorkers * 2 * days
totalhours = math.floor(workingHours + overtime)
if totalhours >= neededhours:
lefHours = totalhours - neededhours
print('Yes!' + str(lefHours) + ' hours left.')
else:
lefHours = neededhours - totalhours
print('Not enough time!' + str(lefHours) + ' hours needed.')
| [
"putzmeister@users.noreply.github.com"
] | putzmeister@users.noreply.github.com |
26fc8b49fcc85ffb16820963727e86ecec723ae3 | abccdbf9b0849b47960c3c352870793405debfed | /0x02-python-import_modules/3-infinite_add.py | 319d74896baaa8ff2b1e4ae09a0a2729223fdf4b | [] | no_license | hunterxx0/holbertonschool-higher_level_programming | 88b1b0f31b536c6940f2e64a6924a06ba9cbf193 | 44064cf0722cd20d93f58b64ab185d2898770d73 | refs/heads/master | 2022-12-20T12:14:15.877147 | 2020-09-24T21:25:54 | 2020-09-24T21:25:54 | 259,276,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
x = len(argv)
if x == 2:
print("{}".format(argv[1]))
elif x == 1:
print("0")
else:
s = 0
for i in range(1, x):
s += int(argv[i])
print("{}".format(s))
| [
"azouzimhamed@gmail.com"
] | azouzimhamed@gmail.com |
5ad0df8d9e33195deba111bc3a3458f03e70e9d1 | 37635cea6ee5fdfffcdd113d3e5deb24e3258365 | /blog/views.py | 3651f9ef0c91b87d1f3124db83a2ecbe6f01e823 | [] | no_license | shubhambhatia92/portfolio | 680b185b3e7596bab2a17f176b621a5a278fb75f | d793d2ba2fed97b80a7b7c11bb1539f058b9db59 | refs/heads/master | 2020-04-02T03:58:31.035311 | 2018-10-23T07:13:16 | 2018-10-23T07:13:16 | 153,993,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from django.shortcuts import render,get_object_or_404
from .models import blog
# Create your views here.
def allblogs(request):
blogs=blog.objects
return render(request,'blog/allblogs.html',{'blogs' :blogs})
def detail(request, blog_id):
detailblog=get_object_or_404(blog,pk=blog_id)
return render(request,'blog/detail.html',{'blog':detailblog}) | [
"shubhambhatia92@gmail.com"
] | shubhambhatia92@gmail.com |
3a53d7bc4cc348fe37afcba294869c5a3c482088 | 875b93935c054c1650ec43b86f54ffe257d5c56a | /src/DataAcquisition/RetrieveTweets.py | fd68e9c1e46e74580a8021da08ecf127e435c6c7 | [] | no_license | FelixDSantos/SarcasmDetection | 600ae72b9a04eb37bf1c39276fc546c8031d4a07 | 38e3bb27c404b53b5cc7ddf355089c3810dc7a34 | refs/heads/master | 2021-06-22T10:35:56.622283 | 2017-07-14T17:07:13 | 2017-07-14T17:07:13 | 80,047,964 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
import tweepy
import os
import itertools
#consumer key, consumer secret, access token, access secret.
ckey = 'zc7f3iKjDkeJYCdbEhfKQJ7bU'
csecret = 'pQKhuzZkRJ0sJ1bHevnkR42qh4UGW4dxLw3FGzgoVSSPXUzmGQ'
atoken ='333587045-PRmu0YPeMFoEBYCQi9gk4OGRGr9MkLx4aLs45rHj'
asecret ='vmmuJ0KjEQ6nsARCm8zjcfNCbRN9YKRr9at2edD8OWKBB'
#
def getTweet(id):
try:
tweet = api.get_status(id)
return tweet.text
except tweepy.TweepError as e:
print('Failed to retrieve tweet with ID: ',id,' ' ,e.reason)
if(e.reason.__contains__('Rate limit exceeded')):
return 'Sleep'
Sarcasmset='/Users/FelixDSantos/LeCode/DeepLearning/fyp/Data/sarcasm_tweets.txt'
# TweetOnly='/Users/FelixDSantos/LeCode/DeepLearning/fyp/Data/Cleaned/TweetOnly.txt'
TweetOnly='/Users/FelixDSantos/LeCode/DeepLearning/fyp/Data/Cleaned/test.txt'
auth = tweepy.AppAuthHandler(ckey, csecret)
# auth.set_access_token(atoken, asecret)
api = tweepy.API(auth,wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
def tweetIDsToTweettxt(idtext,tweetoutputtext):
with open(Sarcasmset, 'r') as f:
# header= next(f)
with open(TweetOnly, 'a') as newappend:
if (os.path.getsize(TweetOnly) == 0):
newappend.write("Tweet\t\tSarcasm")
newappend.write("\n")
for line in f:
words = line.split(",")
tweetid=words[1].replace("\n","")
result = getTweet(tweetid)
while(result=='Sleep'):
time.sleep(60)
result=getTweet(tweetid)
if(result!= None):
tweet = result
label= words[0]
newappend.write(tweet+'\t\t'+label)
newappend.write("\n")
newappend.close()
f.close()
def streamHashtag(hashtag,label,amount):
Tweets = tweepy.Cursor(api.search, q=hashtag,languages=["en"]).items(amount)
# listoftweets=[]
for tweet in Tweets:
if (not tweet.retweeted) and ('RT @' not in tweet.text) and ('@' not in tweet.text) and ('http' not in tweet.text) and(tweet.lang=='en'):
yield([tweet.text,label])
def streamtweets(path)
sarcasmtweets=streamHashtag("#sarcasm",1,1000000)
# sarcasmtweets+=streamHashtag("#not",1,200000)
# lensarcasmtweets=sum(1 for x in sarcasmtweets)
print("Successfully retrieved {} tweets".format('#sarcasm'))
sarcasmtweets=itertools.chain(sarcasmtweets,streamHashtag("#not",1,200000))
# lensarcasmtweets=sum(1 for x in sarcasmtweets)
print("Successfully retrieved {} tweets".format('#sarcasm and #not'))
nonsarcasm=streamHashtag("a",0,0)
alltweets=itertools.chain(sarcasmtweets,nonsarcasm)
tweetstream=path
print("Writing to file {}".format(tweetstream))
with open(tweetstream, 'a') as newappend:
if (os.path.getsize(tweetstream) == 0):
newappend.write("Tweet\t\tSarcasm")
newappend.write("\n")
for tweet in alltweets:
newappend.write(tweet[0] + '\t\t' + str(tweet[1]))
newappend.write("\n")
print("Tweets writting to file {}".format(tweetstream))
| [
"f.delossantosiii1@nuigalway.ie"
] | f.delossantosiii1@nuigalway.ie |
ead86ff3ce709ffe0865987335eb19c8dcab3987 | 8a3c1c66828008941dffad983ad79936830045d7 | /abc172/b.py | 084cbc4ece4e6e4b1bae05f8ff60e9956d5934a1 | [
"MIT"
] | permissive | nishio/atcoder | 71130c7923f557b5269ffd8063dab1f7e2732a30 | 8db36537b5d8580745d5f98312162506ad7d7ab4 | refs/heads/master | 2023-04-15T07:41:00.322297 | 2021-04-25T09:00:26 | 2021-04-25T09:00:26 | 273,831,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | S = input()
T = input()
print(sum(S[i] != T[i] for i in range(len(S))))
| [
"nishio.hirokazu@gmail.com"
] | nishio.hirokazu@gmail.com |
293ff48845ca5dffe641254523ab8dda7d9ca0dc | 28d368fda86c41c62fedad60274f012b545408fe | /Q_16.py | 366a715ca14e76c38487bbbbe39b3425b1cdf304 | [] | no_license | adi1201239/b | e46b7149142a9131008ad40ca7a3330cf6d583da | d5f0132dc067ee3b14dee6f971bb5acd3cbef248 | refs/heads/master | 2020-04-17T05:03:56.253819 | 2019-01-22T18:14:04 | 2019-01-22T18:14:04 | 166,260,348 | 0 | 0 | null | 2019-01-17T16:48:51 | 2019-01-17T16:41:31 | Java | UTF-8 | Python | false | false | 264 | py | i = 1
x = int(input("Enter the number:"))
for k in range (1, (x+1), 1):
c = 0;
for j in range(1, (i + 1), 1):
a = i % j
if (a == 0):
c = c + 1
if (c == 2):
print(i)
else:
k = k - 1
i = i+1 | [
"noreply@github.com"
] | adi1201239.noreply@github.com |
3cad8bd54419850ca2db1e342c3d3452f6f847f5 | 3b4b188514c33a1f4568baa59a2a385a2d7b6205 | /config/urls.py | b7d78a9010e1d399cb8c68101fcb8d15635d4acf | [] | no_license | amyth/django-starter | 5d74a7a5654611f966748523982d9d4591f1e43d | 8a629cd717c038677488fd1860cc6001baf8c542 | refs/heads/master | 2020-05-17T17:32:46.993614 | 2014-09-24T07:15:17 | 2014-09-24T07:15:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """ Main project url confuguration module. Other url modules
to be included in this module.
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Custom apps' urls
url(r'^', include('candidates.urls')),
url(r'^', include('recruiters.urls')),
# Third party apps' urls
url(r'^', include('social_auth.urls')),
url(r'^api', include('rest_framework.urls', namespace='rest_framework')),
# Admin urls
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"aroras.official@gmail.com"
] | aroras.official@gmail.com |
087bc3914f01d56c5b118f5446be99dce12b524f | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Backtracking/restore_ip_addresses.py | 9f2f7ded2404852ca3a967a2eb84096a1fa29da3 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
def dfs(idx, path):
if len(path) == 4 or idx == len(s):
if len(path) == 4 and idx == len(s):
output.append(".".join(path))
return
for i in range(idx, min(idx + 3, len(s))):
ip = s[idx : i + 1]
if i == idx or (i > idx and s[idx] != "0" and int(ip) < 256):
dfs(i + 1, path + [ip])
output = []
dfs(0, [])
return output | [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
46643a2e72ac2cd8d0b60bac0865c11aea33f5a6 | 51bcde2fff5b47b18d2a3ecf6352bde0e4847a32 | /accounts/views.py | ed129c2fe36ee7ea1d5484d3094f22de9736264f | [] | no_license | hello-im-yj/dstagram | 2c16a0c3f18cdb783918cc4653a7cb702c9b7159 | 93fd9934f9e1b6d81305a702934eee91d30a48cf | refs/heads/master | 2023-01-19T19:53:17.503184 | 2020-11-23T10:25:50 | 2020-11-23T10:25:50 | 312,838,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from django.shortcuts import render
from django.contrib.auth.models import User
from django.views.generic.base import TemplateView
from django.views.generic import CreateView
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
#User creation
class UserCreateView(CreateView) :
template_name = 'accounts/register.html'
form_class = UserCreationForm
success_url = reverse_lazy('accounts:register_done')
class UserCreateDoneTV(TemplateView) :
template_name = 'accounts/register_done.html'
| [
"sandwich17yj@likelion.org"
] | sandwich17yj@likelion.org |
04e4fd79673db814b97dd67d4af811840db67123 | 3eee6855254e8efc6a90eac380bbd9f854f1355b | /classroom/migrations/0003_rename_syllabus_curriculum.py | abaa6507ebe9610727b54edc4ebf645cd3fd6b8f | [] | no_license | bhaskerath/major_project | 4cd745311c7378fa211134b0f90230f9f8e55b1e | 971fee2f067d5877d6d3c339285d033b3111ab35 | refs/heads/main | 2023-08-10T22:31:49.024250 | 2021-09-12T11:52:05 | 2021-09-12T11:52:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # Generated by Django 3.2.6 on 2021-08-28 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('classroom', '0002_syllabus_is_complete'),
]
operations = [
migrations.RenameModel(
old_name='Syllabus',
new_name='Curriculum',
),
]
| [
"71547800+ashyshyadav@users.noreply.github.com"
] | 71547800+ashyshyadav@users.noreply.github.com |
60607d0470e14ff9502fce42008287242814b7d8 | 2bd395c1bc738951d7b113d2feeecd4a253b1bcd | /xm_smach/smach_lib/xm_smach/pick_turn.py | b944635ece6301612642d5991991d7ec167ad145 | [] | no_license | xm-project/xm_2019 | 9752e8baacd67a56d7c56b828981dbbc863c7bdb | 2ead11b1415612d2d9be0898ba0c971bcde46943 | refs/heads/master | 2020-08-03T07:14:53.716356 | 2019-09-29T13:12:41 | 2019-09-29T13:12:41 | 211,664,411 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,800 | py | #! /usr/bin/env python
# encoding:utf8
import rospy
from smach import *
from smach_ros import *
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from geometry_msgs.msg import *
from math import pi,atan
import tf
# FGM: This State is expected to solve the problem that xm backs up outwards the door
class PickTurn(State):
def __init__(self):
State.__init__(self, outcomes=[
'succeeded', 'aborted'],input_keys=['turn_pose'], output_keys=['turn_pose'])
self.tf_Listener = tf.TransformListener()
def execute(self, userdata):
try:
now = rospy.Time(0)
self.tf_Listener.waitForTransform('map' , 'base_link' , now , rospy.Duration(1.0))
turn_pose= self.tf_Listener.lookupTransform('map', 'base_link', now)
rospy.logwarn('position-------')
rospy.logerr(turn_pose[1])
rospy.logwarn(turn_pose)
quaternion1 = turn_pose[1][0]
quaternion2 = turn_pose[1][1]
quaternion3 = turn_pose[1][2]
quaternion4 = turn_pose[1][3]
angular = euler_from_quaternion([quaternion1, quaternion2, quaternion3, quaternion4])
rospy.logwarn(angular)
angle = angular[2] + pi*2/3
rospy.logwarn(angle)
quaternion = quaternion_from_euler(0, 0, angle)
rospy.logwarn(quaternion)
userdata.turn_pose.orientation = Quaternion(quaternion[0],quaternion[1],quaternion[2],quaternion[3])
userdata.turn_pose.position = Point(turn_pose[0][0],turn_pose[0][1],turn_pose[0][2])
rospy.logwarn(userdata.turn_pose)
return 'succeeded'
except Exception , e:
rospy.logerr(e)
return 'aborted'
# FGM : This is a State to judge if we need turn
class IsTurn(State):
def __init__(self):
State.__init__(self , outcomes = ['yes' , 'no' , 'error'])
self.tf_Listener = tf.TransformListener()
def execute(self , userdata):
try:
now = rospy.Time(0)
self.tf_Listener.waitForTransform('map' , 'base_link' , now , rospy.Duration(2.0))
(point,orientation) = self.tf_Listener.lookupTransform('base_link' , 'map' , now)
rospy.logerr(point)
while not is_shutdown():
now = rospy.Time(0)
self.tf_Listener.waitForTransform('map' , 'base_link' , now , rospy.Duration(1.0))
now_velocity = self.tf_Listener.lookupTwist('base_link' , 'map' , now,rospy.Duration(2.0))
rospy.logwarn(now_velocity)
if abs(now_velocity[0][1])+abs(now_velocity[0][0]) <= 0 :
continue
quaternion1 = orientation[0]
quaternion2 = orientation[1]
quaternion3 = orientation[2]
quaternion4 = orientation[3]
angular_xm = euler_from_quaternion([quaternion1,quaternion2,quaternion3,quaternion4])
rospy.logwarn(angular_xm)
velocity_an = atan(now_velocity[0][1]/now_velocity[0][0])
rospy.logwarn(velocity_an)
if(now_velocity[0][1]>0 and now_velocity[0][0]<0):
velocity_an += pi
elif(now_velocity[0][1]<0 and now_velocity[0][0]<0):
velocity_an -= pi
rospy.logwarn(velocity_an)
deta = angular_xm[2] - velocity_an
rospy.logwarn(deta)
if( deta > pi/2 or deta < -pi/2):
return 'yes'
else:
return 'no'
except Exception , e:
rospy.logerr(e)
return 'error'
class NewNav():
def __init__(self):
self.new_nav = Concurrence(outcomes = ['succeeded','aboretd','error'],
input_keys=['pos_xm'],
default_outcome = 'succeeded',
outcome_cb = self.nav_outcome_cb,
child_termination_cb = self.nav_child_termination_cb)
with self.new_nav:
self.turn_back = StateMachine(outcomes = ['succeeded','aborted','error'])
with self.turn_back:
self.turn_back.userdata.nav_pos = Pose()
StateMachine.add('ISTURN',
IsTurn(),
transitions={'yes':'PICKTURN' , 'no':'ISTURN','error':'error'},
remapping={'pos_xm':'pos_xm'})
StateMachine.add('PICKTURN',
PickTurn(),
transitions={'succeeded':'TURNGO','aborted':'ISTURN'},
remapping={'xm_pos':'pos_xm',
'turn_pos':'nav_pos'} )
StateMachine.add('TURNGO',NavStack(),
transitions={'succeeded':'ISTURN','aborted':'TURNGO','error':'error'},
remapping={'pos_xm':'turn_pos'})
Concurrence.add('TURNBACK',self.turn_back,
remapping={'pos_xm':'pos_xm'})
Concurrence.add('NAV',NavStack(),
remapping={'pos_xm':'pos_xm'})
def nav_outcome_cb(self,outcome_map):
if(outcome_map['NAV']=='succeeded'):
return 'succeeded'
elif(outcome_map['NAV'] == 'aborted'):
return 'aborted'
elif(outcome_map['TURNBACK']=='error'):
return 'error'
def nav_child_termination_cb(self,outcome_map):
if(outcome_map['NAV'] == 'succeeded'):
return True
| [
"2595858788@qq.com"
] | 2595858788@qq.com |
e0f6234d333f704a58a6ab9f101c42dd0a2db339 | a6884b99ff43422597a2c8eb57acb1e0a474178b | /converter.py | 1e942b5f7404b145bc630c096d1536a0aaeacd6f | [] | no_license | alroman/prime-graph-loop | 7b788523cefd6ab84dcaf5d2ed532931f3a4b2f0 | 82150529f350d15327435da629db6ac095aea917 | refs/heads/master | 2021-01-02T09:15:14.539948 | 2012-09-19T02:06:03 | 2012-09-19T02:06:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | '''
Read a file with list of primes, output primes per line
'''
def foo(fil):
f = open(fil, 'r')
w = open('prime_list.txt', 'w')
# advance the pointer
for i in range(4):
f.readline()
for line in f:
# split by ' '
s = line.split(' ')
# only save clean info
for l in s:
if(len(l.strip())):
w.write(l + '\n')
w.close()
f.close()
if __name__ == "__main__":
print "[.] Running converter program"
foo('10000.txt')
| [
"alromanb@gmail.com"
] | alromanb@gmail.com |
64fb2ef450cee3527d782c33dc9e0c7c0cdb864e | acdf43c3b2f415c759937493180f1e24b3262063 | /G_twoLayersNN.py | afeb0a2abf0ec1c2e8ca531d3292515dce9c38c7 | [] | no_license | gorbi/cse691_homework4 | 6e2d70b67c292e9c0f554d3f7121361db1876177 | 693171b1252665a68239fd0dbf4b225dec662f7c | refs/heads/master | 2021-07-25T21:10:33.581807 | 2017-11-01T02:29:53 | 2017-11-01T02:29:53 | 108,135,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,263 | py | import numpy as np
class TwoLayersNN (object):
"""" TwoLayersNN classifier """
def __init__ (self, inputDim, hiddenDim, outputDim, update=0):
self.params = dict()
self.update = update
self.params['w1'] = 0.0001 * np.random.randn(inputDim, hiddenDim)
self.params['b1'] = np.zeros(hiddenDim)
self.params['w2'] = 0.0001 * np.random.randn(hiddenDim, outputDim)
self.params['b2'] = np.zeros(outputDim)
def calLoss (self, x, y, reg):
grads = dict()
# Forward pass to calculate loss
tmp = x.dot(self.params['w1']) + self.params['b1']
hOutput = np.maximum(0.01 * tmp, tmp)
scores = hOutput.dot(self.params['w2']) + self.params['b2']
scores = np.maximum(0.01 * scores, scores)
scores -= np.max(scores, axis=1, keepdims=True)
scores = np.exp(scores)
scoresProbs = scores/np.sum(scores, axis=1, keepdims=True)
logProbs = -np.log(scoresProbs[np.arange(x.shape[0]), y])
loss = np.sum(logProbs) / x.shape[0]
loss += 0.5 * reg * np.sum(self.params['w1'] * self.params['w1']) + 0.5 * reg * np.sum(self.params['w2'] * self.params['w2'])
# Backward pass to calculate each gradient
dScoresProbs = scoresProbs
dScoresProbs[range(x.shape[0]), list(y)] -= 1
dScoresProbs /= x.shape[0]
grads['w2'] = hOutput.T.dot(dScoresProbs) + reg * self.params['w2']
grads['b2'] = np.sum(dScoresProbs, axis=0)
dhOutput = dScoresProbs.dot(self.params['w2'].T)
dhOutputAct = (hOutput >= 0) * dhOutput + (hOutput < 0) * dhOutput * 0.01
grads['w1'] = x.T.dot(dhOutputAct) + reg * self.params['w1']
grads['b1'] = np.sum(dhOutputAct, axis=0)
return loss, grads
def train (self, x, y, lr=1e-3, reg=1e-5, iterations=100, batchSize=200, decay=0.95, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
D: Input dimension.
C: Number of Classes.
N: Number of example.
Inputs:
- x: training data of shape (N, D)
- y: output data of shape (N, ) where value < C
- lr: (float) learning rate for optimization.
- reg: (float) regularization strength.
- iter: (integer) total number of iterations.
- batchSize: (integer) number of example in each batch running.
- verbose: (boolean) Print log of loss and training accuracy.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
# Run stochastic gradient descent to optimize W.
lossHistory = []
# Initialize value for each update optimizer
self.params['VW2'] = 0
self.params['VW1'] = 0
self.params['cacheW2'] = 0
self.params['cacheW1'] = 0
for i in range(iterations):
batchID = np.random.choice(x.shape[0], batchSize, replace=True)
xBatch = x[batchID]
yBatch = y[batchID]
loss, grads = self.calLoss(xBatch, yBatch, reg)
lossHistory.append(loss)
if self.update == 0:
#########################################################################
# TODO: 10 points #
# - Use Naive Update to update weight parameter #
#########################################################################
self.params['w1'] += -lr * grads['w1']
self.params['w2'] += -lr * grads['w2']
elif self.update == 1:
#########################################################################
# TODO: 10 points #
# - Use Momentum Update to update weight parameter #
# - Momentum = 0.9 #
#########################################################################
mu = 0.9
self.params['VW1'] = mu * self.params['VW1'] - lr * grads['w1']
self.params['w1'] += self.params['VW1']
self.params['VW2'] = mu * self.params['VW2'] - lr * grads['w2']
self.params['w2'] += self.params['VW2']
elif self.update == 2:
#########################################################################
# TODO: 20 points #
# - Use Nesterov Update to update weight parameter #
# - Momentum = 0.9 #
# - Hint #
# v_prev = v #
# v = mu * v - lr * dw #
# w += -mu * v_prev + (1 + mu) * v #
#########################################################################
mu = 0.9
VW1_prev = self.params['VW1']
self.params['VW1'] = mu * self.params['VW1'] - lr * grads['w1']
self.params['w1'] += -mu * VW1_prev + (1 + mu) * self.params['VW1']
VW2_prev = self.params['VW2']
self.params['VW2'] = mu * self.params['VW2'] - lr * grads['w2']
self.params['w2'] += -mu * VW2_prev + (1 + mu) * self.params['VW2']
elif self.update == 3:
#########################################################################
# TODO: 20 points #
# - Use AdaGrad Update to update weight parameter #
#########################################################################
self.params['cacheW1'] += (grads['w1'] * grads['w1'])
self.params['w1'] += -lr * grads['w1']/(np.sqrt(self.params['cacheW1'])+1e-7)
self.params['cacheW2'] += (grads['w2'] * grads['w2'])
self.params['w2'] += -lr * grads['w2']/(np.sqrt(self.params['cacheW2'])+1e-7)
elif self.update == 4:
#########################################################################
# TODO: 20 points #
# - Use RMSProp Update to update weight parameter #
#########################################################################
self.params['cacheW1'] = decay * self.params['cacheW1'] + (1 - decay) * (grads['w1'] * grads['w1'])
self.params['w1'] += -lr * grads['w1']/(np.sqrt(self.params['cacheW1'])+1e-7)
self.params['cacheW2'] = decay * self.params['cacheW2'] + (1 - decay) * (grads['w2'] * grads['w2'])
self.params['w2'] += -lr * grads['w2']/(np.sqrt(self.params['cacheW2'])+1e-7)
else:
#########################################################################
# TODO: 20 points #
# - Use Adam Update to update weight parameter #
# - B1 = 0.9, B2 = 0.999 #
#########################################################################
B1, B2 = 0.9, 0.999
self.params['VW1'] = B1 * self.params['VW1'] + (1 - B1) * grads['w1']
self.params['cacheW1'] = B2 * self.params['cacheW1'] + (1 - B2) * (grads['w1'] * grads['w1'])
VW1b = self.params['VW1'] / (1 - (B1 ** (i + 1)))
cacheW1b = self.params['cacheW1'] / (1 - (B2 ** (i + 1)))
self.params['w1'] += -lr * VW1b / (np.sqrt(cacheW1b) + 1e-7)
self.params['VW2'] = B1 * self.params['VW2'] + (1 - B1) * grads['w2']
self.params['cacheW2'] = B2 * self.params['cacheW2'] + (1 - B2) * (grads['w2'] * grads['w2'])
VW2b = self.params['VW2'] / (1 - (B1 ** (i + 1)))
cacheW2b = self.params['cacheW2'] / (1 - (B2 ** (i + 1)))
self.params['w2'] += -lr * VW2b / (np.sqrt(cacheW2b) + 1e-7)
self.params['b2'] += -lr * grads['b2']
self.params['b1'] += -lr * grads['b1']
lr *= decay
if verbose and i % 100 == 0 and len(lossHistory) is not 0:
print ('Loop {0} loss {1}'.format(i, lossHistory[i]))
return lossHistory
def predict (self, x,):
tmp = x.dot(self.params['w1']) + self.params['b1']
hOutput = np.maximum(0.01 * tmp, tmp)
scores = hOutput.dot(self.params['w2']) + self.params['b2']
yPred = np.argmax(scores, axis=1)
return yPred
def calAccuracy (self, x, y):
acc = 100.0 * (np.sum(self.predict(x) == y) / float(x.shape[0]))
return acc
| [
"nagaprasad@outlook.in"
] | nagaprasad@outlook.in |
ef4a126562505db34aa836430078148dcbfd71a4 | a462a24ff937e151e8151f3a1bdc9c3714b12c0e | /2021EJOR/scripts/mebb/mebb_11_51.py | 17f1585137674da26b982b1f87cdbfac36fdc275 | [] | no_license | noeliarico/kemeny | b4cbcac57203237769252de2c50ce959aa4ca50e | 50819f8bf0d19fb29a0b5c6d2ee031e8a811497d | refs/heads/main | 2023-03-29T14:36:37.931286 | 2023-03-16T09:04:12 | 2023-03-16T09:04:12 | 330,797,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188,718 | py |
import numpy as np
import pandas as pd
import time
from kemeny import algorithms as alg
rep = 3
results = np.zeros(0).reshape(0,7+rep)
##############################################################
om = np.array([
[0,32,14,21,25,27,30,23,22,16,21],
[19,0,15,18,16,21,21,18,18,17,17],
[37,36,0,32,28,30,31,22,19,25,23],
[30,33,19,0,33,25,27,29,27,19,23],
[26,35,23,18,0,24,24,21,26,24,20],
[24,30,21,26,27,0,27,20,29,26,20],
[21,30,20,24,27,24,0,22,22,24,22],
[28,33,29,22,30,31,29,0,26,28,24],
[29,33,32,24,25,22,29,25,0,22,21],
[35,34,26,32,27,25,27,23,29,0,28],
[30,34,28,28,31,31,29,27,30,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 1, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,31,29,29,24,22,25,26,28,22],
[22,0,27,25,22,23,23,26,19,23,22],
[20,24,0,25,22,21,22,22,21,27,21],
[22,26,26,0,25,25,26,27,24,27,23],
[22,29,29,26,0,27,26,29,20,31,26],
[27,28,30,26,24,0,22,28,23,33,23],
[29,28,29,25,25,29,0,26,22,30,25],
[26,25,29,24,22,23,25,0,20,27,25],
[25,32,30,27,31,28,29,31,0,32,20],
[23,28,24,24,20,18,21,24,19,0,24],
[29,29,30,28,25,28,26,26,31,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 2, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,14,19,23,21,18,15,26,26,30,23],
[37,0,24,24,23,31,29,29,29,32,32],
[32,27,0,29,23,31,24,22,26,26,27],
[28,27,22,0,25,31,22,26,25,33,30],
[30,28,28,26,0,30,22,28,24,37,27],
[33,20,20,20,21,0,19,28,23,31,29],
[36,22,27,29,29,32,0,32,34,33,31],
[25,22,29,25,23,23,19,0,24,29,23],
[25,22,25,26,27,28,17,27,0,27,22],
[21,19,25,18,14,20,18,22,24,0,22],
[28,19,24,21,24,22,20,28,29,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 3, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,51,30,20,30,20,30,30,20,30],
[21,0,31,51,41,31,21,51,31,21,41],
[0,20,0,30,20,0,0,30,0,0,20],
[21,0,21,0,21,21,21,31,21,21,41],
[31,10,31,30,0,31,31,51,31,31,51],
[21,20,51,30,20,0,41,51,21,41,41],
[31,30,51,30,20,10,0,51,31,21,51],
[21,0,21,20,0,0,0,0,0,0,41],
[21,20,51,30,20,30,20,51,0,20,41],
[31,30,51,30,20,10,30,51,31,0,51],
[21,10,31,10,0,10,0,10,10,0,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 4, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,32,29,33,31,21,19,28,24,24],
[20,0,22,22,20,25,25,16,21,24,17],
[19,29,0,20,31,28,27,20,24,25,21],
[22,29,31,0,32,32,25,25,27,25,28],
[18,31,20,19,0,25,24,16,25,20,24],
[20,26,23,19,26,0,26,17,21,27,17],
[30,26,24,26,27,25,0,24,21,16,20],
[32,35,31,26,35,34,27,0,34,22,30],
[23,30,27,24,26,30,30,17,0,23,18],
[27,27,26,26,31,24,35,29,28,0,22],
[27,34,30,23,27,34,31,21,33,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 5, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,31,25,33,25,28,30,25,29,33],
[23,0,20,19,36,26,29,34,25,36,24],
[20,31,0,22,28,24,31,27,20,23,37],
[26,32,29,0,29,25,24,27,27,31,36],
[18,15,23,22,0,28,26,22,23,21,20],
[26,25,27,26,23,0,28,20,22,22,26],
[23,22,20,27,25,23,0,22,29,21,18],
[21,17,24,24,29,31,29,0,29,29,22],
[26,26,31,24,28,29,22,22,0,24,28],
[22,15,28,20,30,29,30,22,27,0,22],
[18,27,14,15,31,25,33,29,23,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 6, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,28,29,27,22,24,26,27,28,23],
[22,0,26,29,25,24,18,22,16,23,16],
[23,25,0,26,24,20,21,22,22,25,25],
[22,22,25,0,26,23,19,24,19,21,20],
[24,26,27,25,0,26,22,25,22,22,21],
[29,27,31,28,25,0,23,25,25,25,27],
[27,33,30,32,29,28,0,29,24,27,28],
[25,29,29,27,26,26,22,0,20,24,22],
[24,35,29,32,29,26,27,31,0,27,26],
[23,28,26,30,29,26,24,27,24,0,25],
[28,35,26,31,30,24,23,29,25,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 7, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,27,34,27,26,28,24,30,31,29],
[22,0,24,22,24,20,22,19,25,22,18],
[24,27,0,28,25,27,29,22,25,30,29],
[17,29,23,0,25,30,27,23,27,24,16],
[24,27,26,26,0,25,34,25,29,25,17],
[25,31,24,21,26,0,22,20,26,28,23],
[23,29,22,24,17,29,0,24,23,32,19],
[27,32,29,28,26,31,27,0,28,28,24],
[21,26,26,24,22,25,28,23,0,26,21],
[20,29,21,27,26,23,19,23,25,0,16],
[22,33,22,35,34,28,32,27,30,35,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 8, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,28,28,34,28,25,25,28,27,28],
[22,0,22,14,26,22,21,18,23,26,17],
[23,29,0,23,27,24,27,22,27,26,22],
[23,37,28,0,37,34,25,26,32,29,25],
[17,25,24,14,0,21,19,14,21,22,20],
[23,29,27,17,30,0,22,24,26,26,25],
[26,30,24,26,32,29,0,28,30,24,27],
[26,33,29,25,37,27,23,0,29,26,26],
[23,28,24,19,30,25,21,22,0,24,22],
[24,25,25,22,29,25,27,25,27,0,23],
[23,34,29,26,31,26,24,25,29,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 9, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,24,47,47,47,47,34,51,34,47],
[27,0,40,36,36,47,51,23,40,27,51],
[27,11,0,23,36,47,47,34,27,23,34],
[4,15,28,0,51,51,28,11,15,27,15],
[4,15,15,0,0,28,15,11,4,27,15],
[4,4,4,0,23,0,15,0,4,27,4],
[4,0,4,23,36,36,0,23,4,27,23],
[17,28,17,40,40,51,28,0,17,27,51],
[0,11,24,36,47,47,47,34,0,23,47],
[17,24,28,24,24,24,24,24,28,0,24],
[4,0,17,36,36,47,28,0,4,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 10, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,16,26,26,31,25,20,28,19,23,32],
[35,0,32,23,32,27,26,31,23,28,28],
[25,19,0,28,38,30,24,29,38,25,27],
[25,28,23,0,38,32,21,25,30,24,27],
[20,19,13,13,0,15,20,15,22,18,21],
[26,24,21,19,36,0,21,23,26,20,27],
[31,25,27,30,31,30,0,27,26,25,28],
[23,20,22,26,36,28,24,0,23,25,38],
[32,28,13,21,29,25,25,28,0,30,30],
[28,23,26,27,33,31,26,26,21,0,28],
[19,23,24,24,30,24,23,13,21,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 11, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,39,44,44,45,33,38,34,23,20],
[21,0,28,30,39,28,23,34,28,22,18],
[12,23,0,22,44,39,27,26,22,17,18],
[7,21,29,0,34,25,24,27,21,17,12],
[7,12,7,17,0,28,7,11,21,12,19],
[6,23,12,26,23,0,17,17,11,16,11],
[18,28,24,27,44,34,0,29,22,16,12],
[13,17,25,24,40,34,22,0,22,18,19],
[17,23,29,30,30,40,29,29,0,22,30],
[28,29,34,34,39,35,35,33,29,0,19],
[31,33,33,39,32,40,39,32,21,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 12, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,23,36,31,27,20,33,27,29,17],
[24,0,20,34,29,28,25,28,23,29,19],
[28,31,0,38,30,29,17,25,29,30,22],
[15,17,13,0,15,21,7,7,12,20,15],
[20,22,21,36,0,23,19,22,7,29,18],
[24,23,22,30,28,0,21,26,22,29,26],
[31,26,34,44,32,30,0,36,25,40,31],
[18,23,26,44,29,25,15,0,15,25,17],
[24,28,22,39,44,29,26,36,0,33,24],
[22,22,21,31,22,22,11,26,18,0,17],
[34,32,29,36,33,25,20,34,27,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 13, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,21,25,27,25,25,21,23,18,23],
[28,0,21,29,31,23,26,26,32,28,32],
[30,30,0,32,37,27,28,30,29,24,28],
[26,22,19,0,29,22,24,24,26,19,20],
[24,20,14,22,0,23,25,23,25,17,16],
[26,28,24,29,28,0,26,28,25,19,24],
[26,25,23,27,26,25,0,23,25,22,17],
[30,25,21,27,28,23,28,0,27,27,28],
[28,19,22,25,26,26,26,24,0,18,22],
[33,23,27,32,34,32,29,24,33,0,27],
[28,19,23,31,35,27,34,23,29,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 14, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,30,24,25,24,26,29,28,20,28],
[20,0,23,23,20,26,20,22,22,21,28],
[21,28,0,22,18,29,21,21,26,15,30],
[27,28,29,0,26,30,17,18,26,21,29],
[26,31,33,25,0,32,25,22,28,24,26],
[27,25,22,21,19,0,17,21,24,21,27],
[25,31,30,34,26,34,0,27,28,30,33],
[22,29,30,33,29,30,24,0,28,23,31],
[23,29,25,25,23,27,23,23,0,21,33],
[31,30,36,30,27,30,21,28,30,0,29],
[23,23,21,22,25,24,18,20,18,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 15, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,20,19,25,13,18,18,22,23,21],
[28,0,17,26,27,19,17,26,19,22,21],
[31,34,0,32,29,22,26,27,23,29,27],
[32,25,19,0,24,21,22,24,26,26,24],
[26,24,22,27,0,22,22,26,22,24,24],
[38,32,29,30,29,0,29,31,33,30,24],
[33,34,25,29,29,22,0,37,32,30,25],
[33,25,24,27,25,20,14,0,23,26,21],
[29,32,28,25,29,18,19,28,0,26,31],
[28,29,22,25,27,21,21,25,25,0,25],
[30,30,24,27,27,27,26,30,20,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 16, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,27,24,29,22,34,34,24,33,30],
[21,0,23,25,29,24,32,25,23,26,26],
[24,28,0,27,34,25,31,25,17,31,29],
[27,26,24,0,30,30,33,26,24,30,29],
[22,22,17,21,0,22,22,26,18,20,22],
[29,27,26,21,29,0,29,30,28,31,27],
[17,19,20,18,29,22,0,24,23,20,22],
[17,26,26,25,25,21,27,0,21,25,33],
[27,28,34,27,33,23,28,30,0,29,27],
[18,25,20,21,31,20,31,26,22,0,26],
[21,25,22,22,29,24,29,18,24,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 17, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,25,27,19,20,27,22,19,18,29],
[31,0,31,30,26,26,30,25,22,28,28],
[26,20,0,26,23,19,25,21,19,23,25],
[24,21,25,0,20,22,27,18,17,23,27],
[32,25,28,31,0,30,29,31,28,29,30],
[31,25,32,29,21,0,28,28,23,23,28],
[24,21,26,24,22,23,0,19,21,23,22],
[29,26,30,33,20,23,32,0,24,26,29],
[32,29,32,34,23,28,30,27,0,27,33],
[33,23,28,28,22,28,28,25,24,0,30],
[22,23,26,24,21,23,29,22,18,21,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 18, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,21,29,26,31,20,27,31,27,21],
[23,0,19,26,21,33,21,28,27,29,20],
[30,32,0,25,32,28,26,23,29,31,20],
[22,25,26,0,23,24,20,20,27,21,24],
[25,30,19,28,0,27,21,25,24,25,19],
[20,18,23,27,24,0,20,19,26,21,17],
[31,30,25,31,30,31,0,30,32,33,29],
[24,23,28,31,26,32,21,0,28,25,23],
[20,24,22,24,27,25,19,23,0,23,22],
[24,22,20,30,26,30,18,26,28,0,18],
[30,31,31,27,32,34,22,28,29,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 19, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,28,25,32,17,23,31,28,25,26],
[23,0,21,26,32,19,26,22,26,27,23],
[23,30,0,25,41,26,24,22,28,26,23],
[26,25,26,0,31,22,33,32,29,22,26],
[19,19,10,20,0,11,23,23,19,19,23],
[34,32,25,29,40,0,28,32,30,32,27],
[28,25,27,18,28,23,0,27,24,25,23],
[20,29,29,19,28,19,24,0,24,19,26],
[23,25,23,22,32,21,27,27,0,27,30],
[26,24,25,29,32,19,26,32,24,0,19],
[25,28,28,25,28,24,28,25,21,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 20, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,33,34,25,36,31,35,27,21,32],
[31,0,25,37,33,34,30,43,33,34,34],
[18,26,0,27,29,23,22,34,18,22,18],
[17,14,24,0,21,25,31,32,10,21,22],
[26,18,22,30,0,26,24,35,32,27,25],
[15,17,28,26,25,0,30,31,15,30,20],
[20,21,29,20,27,21,0,34,23,21,21],
[16,8,17,19,16,20,17,0,25,22,11],
[24,18,33,41,19,36,28,26,0,19,28],
[30,17,29,30,24,21,30,29,32,0,27],
[19,17,33,29,26,31,30,40,23,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 21, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,20,31,19,21,16,25,33,26,16],
[30,0,24,27,31,25,26,26,31,23,31],
[31,27,0,25,25,28,17,34,30,27,30],
[20,24,26,0,21,21,16,34,32,21,22],
[32,20,26,30,0,30,19,27,29,28,23],
[30,26,23,30,21,0,16,32,36,28,21],
[35,25,34,35,32,35,0,31,35,28,30],
[26,25,17,17,24,19,20,0,29,26,20],
[18,20,21,19,22,15,16,22,0,19,24],
[25,28,24,30,23,23,23,25,32,0,22],
[35,20,21,29,28,30,21,31,27,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 22, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,16,18,25,19,17,18,20,20,21],
[32,0,24,20,25,28,20,24,24,26,22],
[35,27,0,26,32,27,25,31,26,28,28],
[33,31,25,0,32,25,21,24,29,29,27],
[26,26,19,19,0,24,18,23,22,22,26],
[32,23,24,26,27,0,27,33,22,25,31],
[34,31,26,30,33,24,0,25,34,28,28],
[33,27,20,27,28,18,26,0,25,20,25],
[31,27,25,22,29,29,17,26,0,27,27],
[31,25,23,22,29,26,23,31,24,0,24],
[30,29,23,24,25,20,23,26,24,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 23, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,22,18,24,20,18,18,21,24,22],
[28,0,26,28,28,29,27,29,33,22,31],
[29,25,0,25,31,26,27,25,26,28,25],
[33,23,26,0,33,26,27,23,27,31,28],
[27,23,20,18,0,24,19,20,25,22,20],
[31,22,25,25,27,0,20,25,26,24,26],
[33,24,24,24,32,31,0,27,24,25,29],
[33,22,26,28,31,26,24,0,29,25,22],
[30,18,25,24,26,25,27,22,0,22,24],
[27,29,23,20,29,27,26,26,29,0,25],
[29,20,26,23,31,25,22,29,27,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 24, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,40,31,30,36,40,30,24,51,32,30],
[11,0,32,29,36,20,20,19,34,22,17],
[20,19,0,19,13,21,14,0,35,6,14],
[21,22,32,0,34,32,30,13,34,10,29],
[15,15,38,17,0,17,15,10,30,16,19],
[11,31,30,19,34,0,14,26,35,21,18],
[21,31,37,21,36,37,0,16,42,20,20],
[27,32,51,38,41,25,35,0,39,34,29],
[0,17,16,17,21,16,9,12,0,20,4],
[19,29,45,41,35,30,31,17,31,0,30],
[21,34,37,22,32,33,31,22,47,21,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 25, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,24,21,24,19,30,25,20,27],
[24,0,28,27,21,25,24,28,27,18,31],
[19,23,0,21,20,24,17,27,23,19,28],
[27,24,30,0,28,28,24,24,25,22,31],
[30,30,31,23,0,31,23,34,32,27,39],
[27,26,27,23,20,0,23,28,29,18,29],
[32,27,34,27,28,28,0,31,31,24,29],
[21,23,24,27,17,23,20,0,23,27,27],
[26,24,28,26,19,22,20,28,0,17,26],
[31,33,32,29,24,33,27,24,34,0,35],
[24,20,23,20,12,22,22,24,25,16,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 26, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,28,29,23,29,29,28,26,26],
[26,0,25,25,24,24,21,26,24,25,24],
[25,26,0,31,28,31,26,32,28,32,30],
[23,26,20,0,29,23,22,24,26,27,26],
[22,27,23,22,0,25,19,28,23,24,23],
[28,27,20,28,26,0,21,29,23,29,24],
[22,30,25,29,32,30,0,24,30,30,27],
[22,25,19,27,23,22,27,0,29,24,30],
[23,27,23,25,28,28,21,22,0,29,23],
[25,26,19,24,27,22,21,27,22,0,25],
[25,27,21,25,28,27,24,21,28,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 27, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,26,26,23,33,30,27,31,24,28],
[21,0,23,20,26,32,26,24,34,19,27],
[25,28,0,22,28,32,26,24,33,28,29],
[25,31,29,0,24,31,29,28,33,28,28],
[28,25,23,27,0,31,25,29,33,25,25],
[18,19,19,20,20,0,15,23,27,19,24],
[21,25,25,22,26,36,0,28,33,24,25],
[24,27,27,23,22,28,23,0,32,25,33],
[20,17,18,18,18,24,18,19,0,17,17],
[27,32,23,23,26,32,27,26,34,0,26],
[23,24,22,23,26,27,26,18,34,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 28, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,27,24,35,29,28,31,36,25,30],
[26,0,22,28,29,22,28,24,32,25,25],
[24,29,0,28,30,30,24,26,29,24,30],
[27,23,23,0,30,25,26,27,34,22,24],
[16,22,21,21,0,23,17,15,25,19,21],
[22,29,21,26,28,0,28,26,30,28,29],
[23,23,27,25,34,23,0,25,28,24,28],
[20,27,25,24,36,25,26,0,29,22,24],
[15,19,22,17,26,21,23,22,0,16,19],
[26,26,27,29,32,23,27,29,35,0,27],
[21,26,21,27,30,22,23,27,32,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 29, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,31,23,27,27,30,25,24,24,23],
[21,0,21,25,21,20,22,27,20,16,22],
[20,30,0,27,23,25,22,29,22,25,24],
[28,26,24,0,25,28,24,30,29,24,29],
[24,30,28,26,0,32,27,30,30,29,29],
[24,31,26,23,19,0,27,28,24,27,24],
[21,29,29,27,24,24,0,28,27,26,30],
[26,24,22,21,21,23,23,0,25,19,26],
[27,31,29,22,21,27,24,26,0,24,24],
[27,35,26,27,22,24,25,32,27,0,29],
[28,29,27,22,22,27,21,25,27,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 30, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,23,21,28,24,29,25,22,24,21],
[30,0,24,25,28,27,26,28,24,24,22],
[28,27,0,26,28,29,30,26,26,31,25],
[30,26,25,0,28,27,33,27,25,29,29],
[23,23,23,23,0,24,29,24,22,27,25],
[27,24,22,24,27,0,30,26,26,29,27],
[22,25,21,18,22,21,0,17,23,26,20],
[26,23,25,24,27,25,34,0,20,29,22],
[29,27,25,26,29,25,28,31,0,29,25],
[27,27,20,22,24,22,25,22,22,0,21],
[30,29,26,22,26,24,31,29,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 31, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,27,25,27,34,25,17,23,26,25],
[25,0,37,26,33,42,42,28,33,43,29],
[24,14,0,39,35,34,37,23,15,26,28],
[26,25,12,0,27,33,25,22,23,27,33],
[24,18,16,24,0,24,48,15,19,32,31],
[17,9,17,18,27,0,36,18,9,24,29],
[26,9,14,26,3,15,0,9,2,23,22],
[34,23,28,29,36,33,42,0,13,34,25],
[28,18,36,28,32,42,49,38,0,41,40],
[25,8,25,24,19,27,28,17,10,0,22],
[26,22,23,18,20,22,29,26,11,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 32, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,31,29,27,27,28,29,21,21,23],
[19,0,24,27,24,18,21,28,27,20,23],
[20,27,0,27,22,25,31,22,26,23,22],
[22,24,24,0,25,18,26,29,26,20,20],
[24,27,29,26,0,27,28,27,22,23,21],
[24,33,26,33,24,0,26,27,25,23,24],
[23,30,20,25,23,25,0,25,19,21,21],
[22,23,29,22,24,24,26,0,24,24,19],
[30,24,25,25,29,26,32,27,0,28,23],
[30,31,28,31,28,28,30,27,23,0,27],
[28,28,29,31,30,27,30,32,28,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 33, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,28,14,19,34,23,21,20,19,26],
[28,0,28,22,18,33,18,28,27,27,30],
[23,23,0,20,27,30,25,17,26,23,28],
[37,29,31,0,25,38,31,30,32,32,37],
[32,33,24,26,0,33,21,27,37,24,32],
[17,18,21,13,18,0,16,21,15,19,25],
[28,33,26,20,30,35,0,37,31,20,32],
[30,23,34,21,24,30,14,0,24,20,25],
[31,24,25,19,14,36,20,27,0,27,28],
[32,24,28,19,27,32,31,31,24,0,27],
[25,21,23,14,19,26,19,26,23,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 34, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,12,18,12,18,21,18,21,12,30,33],
[39,0,18,39,18,39,18,39,18,18,51],
[33,33,0,33,30,33,12,33,12,12,33],
[39,12,18,0,18,39,18,21,18,18,51],
[33,33,21,33,0,33,21,33,12,33,33],
[30,12,18,12,18,0,18,33,12,30,12],
[33,33,39,33,30,33,0,33,12,12,33],
[30,12,18,30,18,18,18,0,30,30,30],
[39,33,39,33,39,39,39,21,0,51,51],
[21,33,39,33,18,21,39,21,0,0,33],
[18,0,18,0,18,39,18,21,0,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 35, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,26,25,32,27,31,30,31,26,27],
[25,0,27,21,25,25,22,21,22,20,24],
[25,24,0,27,26,27,28,23,26,19,28],
[26,30,24,0,31,23,23,19,26,24,34],
[19,26,25,20,0,24,19,22,23,17,24],
[24,26,24,28,27,0,27,21,24,23,23],
[20,29,23,28,32,24,0,25,26,22,27],
[21,30,28,32,29,30,26,0,28,31,30],
[20,29,25,25,28,27,25,23,0,25,26],
[25,31,32,27,34,28,29,20,26,0,30],
[24,27,23,17,27,28,24,21,25,21,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 36, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,24,32,27,20,23,25,29,22,31],
[27,0,17,26,21,20,27,27,21,24,21],
[27,34,0,33,32,23,31,26,36,26,29],
[19,25,18,0,27,23,22,24,27,16,19],
[24,30,19,24,0,24,23,27,31,22,23],
[31,31,28,28,27,0,27,26,30,25,28],
[28,24,20,29,28,24,0,27,31,22,29],
[26,24,25,27,24,25,24,0,33,27,25],
[22,30,15,24,20,21,20,18,0,18,23],
[29,27,25,35,29,26,29,24,33,0,29],
[20,30,22,32,28,23,22,26,28,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 37, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,17,24,24,19,15,22,8,14,17],
[26,0,21,30,25,22,21,22,18,25,28],
[34,30,0,34,24,20,28,25,13,27,22],
[27,21,17,0,17,18,22,29,7,20,16],
[27,26,27,34,0,31,30,28,15,30,28],
[32,29,31,33,20,0,36,23,27,32,29],
[36,30,23,29,21,15,0,24,11,22,33],
[29,29,26,22,23,28,27,0,17,26,25],
[43,33,38,44,36,24,40,34,0,42,45],
[37,26,24,31,21,19,29,25,9,0,22],
[34,23,29,35,23,22,18,26,6,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 38, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,25,27,29,26,20,27,26,32,24],
[34,0,23,32,39,32,28,40,33,33,33],
[26,28,0,26,27,25,34,32,22,36,29],
[24,19,25,0,30,22,31,29,31,32,30],
[22,12,24,21,0,16,16,23,18,26,16],
[25,19,26,29,35,0,28,27,32,29,29],
[31,23,17,20,35,23,0,25,30,23,25],
[24,11,19,22,28,24,26,0,29,28,30],
[25,18,29,20,33,19,21,22,0,27,15],
[19,18,15,19,25,22,28,23,24,0,18],
[27,18,22,21,35,22,26,21,36,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 39, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,17,30,36,17,30,17,17,26,38],
[34,0,21,40,45,15,28,34,32,15,38],
[34,30,0,51,30,30,24,45,24,39,45],
[21,11,0,0,17,0,11,26,11,15,32],
[15,6,21,34,0,15,34,21,6,15,38],
[34,36,21,51,36,0,45,45,30,28,51],
[21,23,27,40,17,6,0,38,17,21,38],
[34,17,6,25,30,6,13,0,11,15,32],
[34,19,27,40,45,21,34,40,0,21,32],
[25,36,12,36,36,23,30,36,30,0,36],
[13,13,6,19,13,0,13,19,19,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 40, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,14,11,18,20,17,17,23,20,30],
[25,0,23,11,13,23,33,39,30,33,27],
[37,28,0,19,22,27,39,36,31,13,25],
[40,40,32,0,25,42,39,42,39,31,34],
[33,38,29,26,0,32,29,35,26,23,42],
[31,28,24,9,19,0,28,33,25,25,25],
[34,18,12,12,22,23,0,24,9,16,19],
[34,12,15,9,16,18,27,0,21,18,19],
[28,21,20,12,25,26,42,30,0,16,25],
[31,18,38,20,28,26,35,33,35,0,36],
[21,24,26,17,9,26,32,32,26,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 41, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,35,32,30,29,24,30,35,33,21,31],
[16,0,24,23,15,26,18,29,15,14,14],
[19,27,0,19,17,20,19,21,18,17,19],
[21,28,32,0,25,23,21,23,28,26,28],
[22,36,34,26,0,28,29,35,31,24,22],
[27,25,31,28,23,0,32,34,25,24,25],
[21,33,32,30,22,19,0,32,30,23,26],
[16,22,30,28,16,17,19,0,28,17,13],
[18,36,33,23,20,26,21,23,0,13,26],
[30,37,34,25,27,27,28,34,38,0,23],
[20,37,32,23,29,26,25,38,25,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 42, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,26,17,32,34,38,39,24,17,32],
[25,0,18,20,39,29,19,33,13,28,35],
[25,33,0,26,26,35,35,32,30,32,33],
[34,31,25,0,29,31,42,25,18,25,19],
[19,12,25,22,0,37,14,10,10,26,19],
[17,22,16,20,14,0,23,7,12,23,9],
[13,32,16,9,37,28,0,33,18,17,26],
[12,18,19,26,41,44,18,0,13,28,35],
[27,38,21,33,41,39,33,38,0,32,26],
[34,23,19,26,25,28,34,23,19,0,26],
[19,16,18,32,32,42,25,16,25,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 43, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,18,17,17,16,20,14,10,20,15,12],
[33,0,31,28,29,35,32,23,30,32,26],
[34,20,0,26,32,31,32,31,26,34,28],
[34,23,25,0,24,37,21,21,23,26,27],
[35,22,19,27,0,36,21,24,25,28,29],
[31,16,20,14,15,0,14,20,17,26,18],
[37,19,19,30,30,37,0,28,25,31,25],
[41,28,20,30,27,31,23,0,29,30,26],
[31,21,25,28,26,34,26,22,0,37,32],
[36,19,17,25,23,25,20,21,14,0,22],
[39,25,23,24,22,33,26,25,19,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 44, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,17,24,35,9,27,23,13,29,39],
[23,0,17,12,20,21,26,20,22,25,34],
[34,34,0,30,49,31,24,31,31,29,39],
[27,39,21,0,47,30,29,25,17,32,30],
[16,31,2,4,0,14,10,10,7,27,24],
[42,30,20,21,37,0,35,35,20,29,44],
[24,25,27,22,41,16,0,30,19,30,31],
[28,31,20,26,41,16,21,0,18,29,22],
[38,29,20,34,44,31,32,33,0,21,34],
[22,26,22,19,24,22,21,22,30,0,28],
[12,17,12,21,27,7,20,29,17,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 45, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,39,13,50,27,24,39,38,36,15,51],
[12,0,12,36,26,24,38,12,24,14,36],
[38,39,0,50,27,24,39,38,36,27,51],
[1,15,1,0,15,12,27,26,12,15,39],
[24,25,24,36,0,24,24,12,36,27,36],
[27,27,27,39,27,0,27,26,25,15,39],
[12,13,12,24,27,24,0,12,24,15,25],
[13,39,13,25,39,25,39,0,25,15,25],
[15,27,15,39,15,26,27,26,0,15,39],
[36,37,24,36,24,36,36,36,36,0,36],
[0,15,0,12,15,12,26,26,12,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 46, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,25,30,32,30,31,26,31,33,24],
[25,0,24,33,23,28,23,20,19,25,24],
[26,27,0,25,26,25,23,22,20,31,26],
[21,18,26,0,28,22,23,19,19,23,24],
[19,28,25,23,0,22,26,28,22,29,28],
[21,23,26,29,29,0,21,24,16,27,17],
[20,28,28,28,25,30,0,25,22,31,22],
[25,31,29,32,23,27,26,0,17,32,28],
[20,32,31,32,29,35,29,34,0,36,30],
[18,26,20,28,22,24,20,19,15,0,22],
[27,27,25,27,23,34,29,23,21,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 47, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,23,28,20,21,26,24,26,24,23],
[28,0,26,25,19,26,26,29,31,26,30],
[28,25,0,27,22,22,27,24,29,26,27],
[23,26,24,0,19,21,30,23,25,23,23],
[31,32,29,32,0,23,30,29,28,25,33],
[30,25,29,30,28,0,27,27,31,26,30],
[25,25,24,21,21,24,0,19,29,20,23],
[27,22,27,28,22,24,32,0,27,24,26],
[25,20,22,26,23,20,22,24,0,21,23],
[27,25,25,28,26,25,31,27,30,0,26],
[28,21,24,28,18,21,28,25,28,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 48, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,19,17,18,18,22,15,16,23],
[24,0,35,30,26,21,24,23,18,26,29],
[19,16,0,26,16,13,15,21,14,16,19],
[32,21,25,0,23,27,25,27,27,24,25],
[34,25,35,28,0,26,29,29,24,23,27],
[33,30,38,24,25,0,29,24,24,23,17],
[33,27,36,26,22,22,0,23,26,29,33],
[29,28,30,24,22,27,28,0,19,24,26],
[36,33,37,24,27,27,25,32,0,31,28],
[35,25,35,27,28,28,22,27,20,0,22],
[28,22,32,26,24,34,18,25,23,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 49, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,18,31,14,23,23,20,23,29,14],
[20,0,15,15,17,22,33,21,18,18,6],
[33,36,0,25,25,35,32,32,31,36,27],
[20,36,26,0,16,26,28,19,25,25,18],
[37,34,26,35,0,34,26,22,36,30,20],
[28,29,16,25,17,0,23,19,20,16,17],
[28,18,19,23,25,28,0,20,24,27,12],
[31,30,19,32,29,32,31,0,34,26,13],
[28,33,20,26,15,31,27,17,0,24,15],
[22,33,15,26,21,35,24,25,27,0,18],
[37,45,24,33,31,34,39,38,36,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 50, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,23,31,23,20,27,23,30,19,26],
[31,0,21,32,30,26,24,22,40,28,33],
[28,30,0,27,26,29,23,23,35,27,29],
[20,19,24,0,18,15,19,22,34,25,17],
[28,21,25,33,0,20,20,21,31,25,22],
[31,25,22,36,31,0,27,29,40,23,27],
[24,27,28,32,31,24,0,25,34,28,25],
[28,29,28,29,30,22,26,0,28,26,25],
[21,11,16,17,20,11,17,23,0,14,19],
[32,23,24,26,26,28,23,25,37,0,26],
[25,18,22,34,29,24,26,26,32,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 51, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,23,25,22,23,24,24,20,21,22],
[25,0,23,24,22,25,26,21,22,21,24],
[28,28,0,24,22,20,23,25,22,16,26],
[26,27,27,0,17,27,29,31,24,27,28],
[29,29,29,34,0,23,28,26,25,26,33],
[28,26,31,24,28,0,29,28,28,25,32],
[27,25,28,22,23,22,0,22,18,23,29],
[27,30,26,20,25,23,29,0,20,26,33],
[31,29,29,27,26,23,33,31,0,24,32],
[30,30,35,24,25,26,28,25,27,0,32],
[29,27,25,23,18,19,22,18,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 52, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,24,26,30,23,26,27,22,22,18],
[21,0,16,16,17,13,18,19,18,17,20],
[27,35,0,24,32,21,28,26,29,19,26],
[25,35,27,0,35,20,22,25,23,22,27],
[21,34,19,16,0,22,22,23,22,16,21],
[28,38,30,31,29,0,26,27,24,22,26],
[25,33,23,29,29,25,0,21,20,24,19],
[24,32,25,26,28,24,30,0,20,24,17],
[29,33,22,28,29,27,31,31,0,18,23],
[29,34,32,29,35,29,27,27,33,0,18],
[33,31,25,24,30,25,32,34,28,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 53, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,31,24,42,11,32,29,32,41,25],
[15,0,13,32,23,9,28,25,22,36,29],
[20,38,0,28,36,30,29,35,31,41,33],
[27,19,23,0,25,17,26,31,27,37,24],
[9,28,15,26,0,3,15,18,11,30,24],
[40,42,21,34,48,0,27,33,37,44,31],
[19,23,22,25,36,24,0,33,32,39,26],
[22,26,16,20,33,18,18,0,20,33,16],
[19,29,20,24,40,14,19,31,0,34,18],
[10,15,10,14,21,7,12,18,17,0,14],
[26,22,18,27,27,20,25,35,33,37,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 54, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,24,30,24,26,23,28,26,27,27],
[24,0,26,31,29,23,26,32,24,27,29],
[27,25,0,29,29,29,25,34,24,22,27],
[21,20,22,0,24,24,16,24,21,24,21],
[27,22,22,27,0,24,22,26,25,28,23],
[25,28,22,27,27,0,20,30,25,25,28],
[28,25,26,35,29,31,0,29,27,29,27],
[23,19,17,27,25,21,22,0,23,24,29],
[25,27,27,30,26,26,24,28,0,29,27],
[24,24,29,27,23,26,22,27,22,0,23],
[24,22,24,30,28,23,24,22,24,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 55, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,22,27,24,22,36,26,28,25,29],
[19,0,15,25,26,22,32,22,31,23,26],
[29,36,0,34,24,21,37,38,34,33,31],
[24,26,17,0,23,23,30,24,25,23,31],
[27,25,27,28,0,28,29,25,31,27,27],
[29,29,30,28,23,0,39,29,31,26,25],
[15,19,14,21,22,12,0,16,24,22,20],
[25,29,13,27,26,22,35,0,26,24,29],
[23,20,17,26,20,20,27,25,0,24,24],
[26,28,18,28,24,25,29,27,27,0,24],
[22,25,20,20,24,26,31,22,27,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 56, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,22,21,32,25,31,26,25,29,27],
[24,0,27,24,30,25,31,27,27,26,24],
[29,24,0,25,38,31,32,22,25,25,27],
[30,27,26,0,33,24,25,28,26,22,22],
[19,21,13,18,0,22,25,16,15,23,14],
[26,26,20,27,29,0,23,20,24,22,20],
[20,20,19,26,26,28,0,21,25,26,19],
[25,24,29,23,35,31,30,0,30,28,24],
[26,24,26,25,36,27,26,21,0,26,20],
[22,25,26,29,28,29,25,23,25,0,25],
[24,27,24,29,37,31,32,27,31,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 57, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,24,41,41,41,24,31,24,24,27],
[27,0,34,24,27,24,24,7,31,51,3],
[27,17,0,24,27,24,17,7,31,51,3],
[10,27,27,0,44,41,27,34,27,27,27],
[10,24,24,7,0,41,24,31,24,27,3],
[10,27,27,10,10,0,3,34,27,34,3],
[27,27,34,24,27,48,0,34,31,34,3],
[20,44,44,17,20,17,17,0,41,44,20],
[27,20,20,24,27,24,20,10,0,27,3],
[27,0,0,24,24,17,17,7,24,0,3],
[24,48,48,24,48,48,48,31,48,48,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 58, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,22,9,13,25,10,23,14,20,33],
[34,0,34,16,22,26,29,34,29,35,28],
[29,17,0,11,12,18,10,42,19,18,29],
[42,35,40,0,30,30,22,31,27,31,42],
[38,29,39,21,0,29,21,32,26,24,32],
[26,25,33,21,22,0,20,31,25,26,31],
[41,22,41,29,30,31,0,41,24,36,43],
[28,17,9,20,19,20,10,0,13,17,29],
[37,22,32,24,25,26,27,38,0,26,31],
[31,16,33,20,27,25,15,34,25,0,31],
[18,23,22,9,19,20,8,22,20,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 59, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,31,30,34,28,16,23,28,21,23],
[20,0,29,27,18,27,25,35,26,20,25],
[20,22,0,28,30,28,22,32,23,22,19],
[21,24,23,0,25,22,20,26,26,23,25],
[17,33,21,26,0,23,16,27,23,14,21],
[23,24,23,29,28,0,31,28,26,25,29],
[35,26,29,31,35,20,0,29,24,15,31],
[28,16,19,25,24,23,22,0,28,22,30],
[23,25,28,25,28,25,27,23,0,27,29],
[30,31,29,28,37,26,36,29,24,0,36],
[28,26,32,26,30,22,20,21,22,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 60, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,20,14,40,36,25,20,28,22,17],
[34,0,23,30,40,32,26,39,26,22,33],
[31,28,0,33,37,35,29,32,29,25,30],
[37,21,18,0,39,27,24,32,22,23,23],
[11,11,14,12,0,23,5,15,14,8,10],
[15,19,16,24,28,0,20,19,13,3,14],
[26,25,22,27,46,31,0,32,26,12,17],
[31,12,19,19,36,32,19,0,26,27,21],
[23,25,22,29,37,38,25,25,0,15,25],
[29,29,26,28,43,48,39,24,36,0,31],
[34,18,21,28,41,37,34,30,26,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 61, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,27,22,26,17,26,27,22,21,16],
[26,0,27,25,26,24,32,34,24,30,25],
[24,24,0,25,29,27,32,33,28,28,28],
[29,26,26,0,27,22,26,28,24,23,28],
[25,25,22,24,0,24,27,31,21,19,26],
[34,27,24,29,27,0,26,32,28,25,30],
[25,19,19,25,24,25,0,30,27,23,23],
[24,17,18,23,20,19,21,0,24,19,23],
[29,27,23,27,30,23,24,27,0,28,19],
[30,21,23,28,32,26,28,32,23,0,24],
[35,26,23,23,25,21,28,28,32,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 62, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,48,22,41,33,25,51,48,51,48],
[15,0,26,15,26,26,40,18,37,29,41],
[3,25,0,15,15,29,25,18,37,44,26],
[29,36,36,0,41,14,36,29,48,36,41],
[10,25,36,10,0,21,25,25,40,36,51],
[18,25,22,37,30,0,40,40,37,51,41],
[26,11,26,15,26,11,0,26,48,29,41],
[0,33,33,22,26,11,25,0,37,51,41],
[3,14,14,3,11,14,3,14,0,14,14],
[0,22,7,15,15,0,22,0,37,0,15],
[3,10,25,10,0,10,10,10,37,36,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 63, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,23,29,24,22,22,29,30,26,24],
[25,0,21,29,30,27,24,31,31,31,31],
[28,30,0,26,20,17,28,34,26,30,25],
[22,22,25,0,19,19,24,25,25,24,24],
[27,21,31,32,0,29,23,28,26,28,31],
[29,24,34,32,22,0,27,30,30,30,32],
[29,27,23,27,28,24,0,30,28,29,28],
[22,20,17,26,23,21,21,0,35,31,27],
[21,20,25,26,25,21,23,16,0,25,25],
[25,20,21,27,23,21,22,20,26,0,22],
[27,20,26,27,20,19,23,24,26,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 64, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,33,37,28,32,33,25,34,28,35,27],
[18,0,36,30,27,32,15,21,17,27,22],
[14,15,0,19,29,24,14,24,21,24,17],
[23,21,32,0,24,28,17,30,20,33,22],
[19,24,22,27,0,26,25,28,28,25,20],
[18,19,27,23,25,0,14,22,13,29,12],
[26,36,37,34,26,37,0,28,24,27,23],
[17,30,27,21,23,29,23,0,19,26,19],
[23,34,30,31,23,38,27,32,0,34,24],
[16,24,27,18,26,22,24,25,17,0,26],
[24,29,34,29,31,39,28,32,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 65, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,11,23,19,10,21,21,21,23,22],
[32,0,28,29,25,23,23,19,22,24,27],
[40,23,0,32,31,26,27,29,23,25,28],
[28,22,19,0,18,18,20,23,16,24,23],
[32,26,20,33,0,17,21,25,21,22,27],
[41,28,25,33,34,0,38,35,23,26,29],
[30,28,24,31,30,13,0,24,23,20,25],
[30,32,22,28,26,16,27,0,23,23,23],
[30,29,28,35,30,28,28,28,0,25,27],
[28,27,26,27,29,25,31,28,26,0,25],
[29,24,23,28,24,22,26,28,24,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 66, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,31,29,18,28,33,25,19,15,32],
[23,0,23,25,14,25,30,20,8,18,27],
[20,28,0,27,20,28,29,24,17,17,25],
[22,26,24,0,14,23,25,25,15,14,31],
[33,37,31,37,0,40,38,31,31,24,39],
[23,26,23,28,11,0,21,20,22,19,18],
[18,21,22,26,13,30,0,13,13,20,31],
[26,31,27,26,20,31,38,0,19,19,33],
[32,43,34,36,20,29,38,32,0,30,38],
[36,33,34,37,27,32,31,32,21,0,40],
[19,24,26,20,12,33,20,18,13,11,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 67, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,10,0,0,0,31,21,39,10,10,21],
[41,0,29,33,29,39,21,39,31,31,29],
[51,22,0,12,20,39,31,51,22,22,33],
[51,18,39,0,8,39,39,39,10,10,29],
[51,22,31,43,0,39,31,51,31,43,21],
[20,12,12,12,12,0,21,30,22,22,33],
[30,30,20,12,20,30,0,30,22,22,41],
[12,12,0,12,0,21,21,0,10,10,21],
[41,20,29,41,20,29,29,41,0,51,29],
[41,20,29,41,8,29,29,41,0,0,29],
[30,22,18,22,30,18,10,30,22,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 68, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,24,16,21,26,21,21,25,17,18],
[25,0,23,20,24,25,24,20,24,16,20],
[27,28,0,22,25,28,21,23,27,18,20],
[35,31,29,0,27,32,28,29,31,25,30],
[30,27,26,24,0,30,22,25,26,20,25],
[25,26,23,19,21,0,18,18,26,17,21],
[30,27,30,23,29,33,0,25,29,26,23],
[30,31,28,22,26,33,26,0,31,19,23],
[26,27,24,20,25,25,22,20,0,15,23],
[34,35,33,26,31,34,25,32,36,0,29],
[33,31,31,21,26,30,28,28,28,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 69, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,35,28,31,25,28,23,21,27,35,32],
[16,0,26,27,29,29,21,28,24,36,32],
[23,25,0,24,28,27,26,22,22,30,30],
[20,24,27,0,28,22,22,22,19,30,33],
[26,22,23,23,0,23,25,27,26,31,31],
[23,22,24,29,28,0,24,32,23,33,30],
[28,30,25,29,26,27,0,35,30,33,31],
[30,23,29,29,24,19,16,0,24,29,34],
[24,27,29,32,25,28,21,27,0,28,35],
[16,15,21,21,20,18,18,22,23,0,28],
[19,19,21,18,20,21,20,17,16,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 70, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,21,26,31,20,24,29,18,34,30],
[15,0,21,16,19,9,17,24,13,24,23],
[30,30,0,24,33,17,30,29,29,28,33],
[25,35,27,0,32,29,23,31,26,27,35],
[20,32,18,19,0,22,15,19,14,22,31],
[31,42,34,22,29,0,31,23,24,26,40],
[27,34,21,28,36,20,0,37,27,27,27],
[22,27,22,20,32,28,14,0,20,21,27],
[33,38,22,25,37,27,24,31,0,31,38],
[17,27,23,24,29,25,24,30,20,0,29],
[21,28,18,16,20,11,24,24,13,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 71, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,27,19,26,22,20,21,25,17,28],
[32,0,35,33,41,23,34,26,34,30,37],
[24,16,0,23,23,8,23,13,17,14,24],
[32,18,28,0,31,19,32,21,24,18,28],
[25,10,28,20,0,17,21,21,23,22,32],
[29,28,43,32,34,0,28,32,24,30,38],
[31,17,28,19,30,23,0,22,20,18,30],
[30,25,38,30,30,19,29,0,23,17,36],
[26,17,34,27,28,27,31,28,0,23,32],
[34,21,37,33,29,21,33,34,28,0,32],
[23,14,27,23,19,13,21,15,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 72, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,33,38,26,23,29,33,27,23,30],
[24,0,29,31,24,22,22,27,24,25,34],
[18,22,0,29,22,21,26,26,23,18,28],
[13,20,22,0,22,13,14,24,12,15,18],
[25,27,29,29,0,22,24,27,22,26,32],
[28,29,30,38,29,0,31,30,24,22,32],
[22,29,25,37,27,20,0,27,24,19,28],
[18,24,25,27,24,21,24,0,23,27,28],
[24,27,28,39,29,27,27,28,0,24,28],
[28,26,33,36,25,29,32,24,27,0,33],
[21,17,23,33,19,19,23,23,23,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 73, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,25,26,22,27,29,27,24,31,26],
[28,0,32,25,31,28,33,29,29,31,31],
[26,19,0,25,25,23,31,29,24,31,27],
[25,26,26,0,26,24,25,29,25,33,24],
[29,20,26,25,0,30,28,30,28,33,28],
[24,23,28,27,21,0,23,26,25,31,24],
[22,18,20,26,23,28,0,25,26,31,28],
[24,22,22,22,21,25,26,0,20,23,25],
[27,22,27,26,23,26,25,31,0,28,24],
[20,20,20,18,18,20,20,28,23,0,23],
[25,20,24,27,23,27,23,26,27,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 74, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,37,34,27,26,32,32,30,26],
[26,0,26,25,29,31,25,36,34,24,34],
[25,25,0,34,40,24,24,33,32,30,28],
[14,26,17,0,34,31,24,37,40,29,31],
[17,22,11,17,0,17,22,20,24,25,7],
[24,20,27,20,34,0,17,31,23,22,28],
[25,26,27,27,29,34,0,35,28,20,28],
[19,15,18,14,31,20,16,0,23,22,15],
[19,17,19,11,27,28,23,28,0,18,27],
[21,27,21,22,26,29,31,29,33,0,27],
[25,17,23,20,44,23,23,36,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 75, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,26,23,31,17,18,24,19,19,17],
[28,0,27,24,29,15,24,24,23,29,13],
[25,24,0,18,25,14,20,20,17,25,14],
[28,27,33,0,27,17,21,23,18,32,22],
[20,22,26,24,0,18,18,18,17,19,14],
[34,36,37,34,33,0,23,29,25,30,21],
[33,27,31,30,33,28,0,32,25,33,30],
[27,27,31,28,33,22,19,0,24,26,23],
[32,28,34,33,34,26,26,27,0,34,23],
[32,22,26,19,32,21,18,25,17,0,18],
[34,38,37,29,37,30,21,28,28,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 76, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,30,25,23,20,22,23,30,30,23],
[32,0,24,35,25,33,22,17,25,31,31],
[21,27,0,26,21,21,31,31,26,35,18],
[26,16,25,0,19,13,13,17,24,31,25],
[28,26,30,32,0,19,24,21,26,34,18],
[31,18,30,38,32,0,21,22,26,31,26],
[29,29,20,38,27,30,0,15,30,28,24],
[28,34,20,34,30,29,36,0,34,31,30],
[21,26,25,27,25,25,21,17,0,20,20],
[21,20,16,20,17,20,23,20,31,0,17],
[28,20,33,26,33,25,27,21,31,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 77, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,19,34,29,20,30,28,26,28,13],
[24,0,23,33,27,27,28,34,18,28,20],
[32,28,0,29,26,25,24,38,20,24,29],
[17,18,22,0,28,19,23,22,22,34,22],
[22,24,25,23,0,20,28,31,21,30,14],
[31,24,26,32,31,0,26,28,21,32,29],
[21,23,27,28,23,25,0,23,22,27,22],
[23,17,13,29,20,23,28,0,21,28,20],
[25,33,31,29,30,30,29,30,0,31,25],
[23,23,27,17,21,19,24,23,20,0,21],
[38,31,22,29,37,22,29,31,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 78, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,16,12,15,10,27,24,25,26,22],
[22,0,20,20,19,15,30,21,17,32,28],
[35,31,0,29,27,30,30,25,30,32,38],
[39,31,22,0,28,28,26,23,28,30,39],
[36,32,24,23,0,25,32,24,23,37,35],
[41,36,21,23,26,0,33,29,31,36,33],
[24,21,21,25,19,18,0,27,25,29,30],
[27,30,26,28,27,22,24,0,25,26,30],
[26,34,21,23,28,20,26,26,0,32,27],
[25,19,19,21,14,15,22,25,19,0,31],
[29,23,13,12,16,18,21,21,24,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 79, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,17,24,26,27,20,22,28,30,23],
[23,0,17,24,28,26,27,17,31,24,17],
[34,34,0,35,32,30,26,25,37,37,29],
[27,27,16,0,30,26,23,21,28,30,17],
[25,23,19,21,0,28,21,20,25,31,16],
[24,25,21,25,23,0,21,23,30,32,23],
[31,24,25,28,30,30,0,26,32,31,22],
[29,34,26,30,31,28,25,0,34,34,27],
[23,20,14,23,26,21,19,17,0,24,19],
[21,27,14,21,20,19,20,17,27,0,19],
[28,34,22,34,35,28,29,24,32,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 80, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,30,21,30,33,24,23,30,18,25],
[24,0,18,21,32,23,36,21,20,14,32],
[21,33,0,22,37,25,35,32,26,26,32],
[30,30,29,0,36,32,34,29,27,18,33],
[21,19,14,15,0,23,20,15,19,13,23],
[18,28,26,19,28,0,22,18,28,15,27],
[27,15,16,17,31,29,0,14,22,14,26],
[28,30,19,22,36,33,37,0,34,24,25],
[21,31,25,24,32,23,29,17,0,23,27],
[33,37,25,33,38,36,37,27,28,0,34],
[26,19,19,18,28,24,25,26,24,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 81, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,40,28,40,23,37,21,29,34,30,42],
[11,0,13,25,12,32,6,14,25,26,31],
[23,38,0,36,30,40,26,36,35,30,34],
[11,26,15,0,16,29,10,23,27,17,38],
[28,39,21,35,0,36,15,28,30,37,32],
[14,19,11,22,15,0,7,21,27,21,28],
[30,45,25,41,36,44,0,42,45,41,44],
[22,37,15,28,23,30,9,0,23,30,39],
[17,26,16,24,21,24,6,28,0,21,39],
[21,25,21,34,14,30,10,21,30,0,32],
[9,20,17,13,19,23,7,12,12,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 82, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,37,29,28,31,29,35,34,37,31],
[26,0,35,23,28,30,20,30,30,19,34],
[14,16,0,20,27,18,12,28,23,17,29],
[22,28,31,0,33,32,21,36,25,25,35],
[23,23,24,18,0,20,19,27,23,25,21],
[20,21,33,19,31,0,15,27,23,13,32],
[22,31,39,30,32,36,0,27,31,25,35],
[16,21,23,15,24,24,24,0,17,17,18],
[17,21,28,26,28,28,20,34,0,24,33],
[14,32,34,26,26,38,26,34,27,0,39],
[20,17,22,16,30,19,16,33,18,12,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 83, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,30,30,27,20,14,32,19,20,27],
[27,0,20,20,28,26,19,23,22,27,21],
[21,31,0,22,27,26,16,30,16,28,23],
[21,31,29,0,25,20,27,39,28,31,19],
[24,23,24,26,0,23,20,38,20,23,15],
[31,25,25,31,28,0,20,36,20,26,21],
[37,32,35,24,31,31,0,33,33,25,26],
[19,28,21,12,13,15,18,0,13,18,19],
[32,29,35,23,31,31,18,38,0,19,20],
[31,24,23,20,28,25,26,33,32,0,22],
[24,30,28,32,36,30,25,32,31,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 84, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,28,18,25,16,16,16,28,18,18],
[29,0,22,14,23,16,35,19,28,30,21],
[23,29,0,13,28,32,22,16,34,27,9],
[33,37,38,0,32,32,32,26,45,32,7],
[26,28,23,19,0,19,22,31,17,39,14],
[35,35,19,19,32,0,19,28,19,31,25],
[35,16,29,19,29,32,0,25,19,32,7],
[35,32,35,25,20,23,26,0,25,31,32],
[23,23,17,6,34,32,32,26,0,27,9],
[33,21,24,19,12,20,19,20,24,0,25],
[33,30,42,44,37,26,44,19,42,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 85, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,37,33,42,22,32,29,30,35,24],
[21,0,27,26,31,14,30,21,29,26,19],
[14,24,0,33,36,27,29,19,33,32,27],
[18,25,18,0,37,19,25,26,29,19,21],
[9,20,15,14,0,17,23,22,23,10,19],
[29,37,24,32,34,0,36,24,26,22,24],
[19,21,22,26,28,15,0,17,26,23,18],
[22,30,32,25,29,27,34,0,36,30,28],
[21,22,18,22,28,25,25,15,0,17,13],
[16,25,19,32,41,29,28,21,34,0,28],
[27,32,24,30,32,27,33,23,38,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 86, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,12,30,21,20,15,8,18,18,21,16],
[39,0,33,29,19,26,17,19,18,31,21],
[21,18,0,26,20,18,20,12,18,19,15],
[30,22,25,0,27,23,17,8,27,32,19],
[31,32,31,24,0,27,27,27,25,31,18],
[36,25,33,28,24,0,27,24,18,27,29],
[43,34,31,34,24,24,0,23,29,41,20],
[33,32,39,43,24,27,28,0,35,33,28],
[33,33,33,24,26,33,22,16,0,37,22],
[30,20,32,19,20,24,10,18,14,0,21],
[35,30,36,32,33,22,31,23,29,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 87, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,29,22,23,26,31,30,27,29,31],
[24,0,23,20,24,31,22,30,27,26,31],
[22,28,0,21,26,31,23,32,28,32,30],
[29,31,30,0,28,35,23,33,33,36,32],
[28,27,25,23,0,32,24,34,27,35,27],
[25,20,20,16,19,0,17,31,20,28,25],
[20,29,28,28,27,34,0,34,32,26,32],
[21,21,19,18,17,20,17,0,27,26,32],
[24,24,23,18,24,31,19,24,0,28,28],
[22,25,19,15,16,23,25,25,23,0,24],
[20,20,21,19,24,26,19,19,23,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 88, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,29,27,21,25,16,19,21,27,45],
[29,0,32,19,16,22,23,29,30,27,27],
[22,19,0,14,16,15,30,8,26,17,21],
[24,32,37,0,18,27,30,16,22,26,45],
[30,35,35,33,0,26,22,30,35,25,34],
[26,29,36,24,25,0,30,21,38,30,36],
[35,28,21,21,29,21,0,19,21,25,34],
[32,22,43,35,21,30,32,0,25,29,43],
[30,21,25,29,16,13,30,26,0,26,36],
[24,24,34,25,26,21,26,22,25,0,28],
[6,24,30,6,17,15,17,8,15,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 89, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,25,25,27,31,30,31,27,24,34],
[25,0,32,29,32,30,31,31,27,28,34],
[26,19,0,27,24,24,22,21,25,23,24],
[26,22,24,0,25,25,24,18,25,21,25],
[24,19,27,26,0,28,27,25,23,24,27],
[20,21,27,26,23,0,27,23,20,20,26],
[21,20,29,27,24,24,0,26,30,23,26],
[20,20,30,33,26,28,25,0,22,24,29],
[24,24,26,26,28,31,21,29,0,24,29],
[27,23,28,30,27,31,28,27,27,0,31],
[17,17,27,26,24,25,25,22,22,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 90, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,27,28,30,29,29,23,34,38,27],
[25,0,25,24,24,29,24,27,32,29,25],
[24,26,0,29,36,31,29,25,33,31,32],
[23,27,22,0,33,31,30,33,35,27,25],
[21,27,15,18,0,25,24,20,35,32,28],
[22,22,20,20,26,0,27,30,29,25,25],
[22,27,22,21,27,24,0,22,29,31,30],
[28,24,26,18,31,21,29,0,31,32,28],
[17,19,18,16,16,22,22,20,0,19,15],
[13,22,20,24,19,26,20,19,32,0,23],
[24,26,19,26,23,26,21,23,36,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 91, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,16,20,27,28,24,24,21,17,17],
[25,0,13,23,22,21,22,24,18,21,18],
[35,38,0,28,27,32,30,28,28,23,20],
[31,28,23,0,24,27,29,27,22,15,25],
[24,29,24,27,0,25,27,22,22,11,18],
[23,30,19,24,26,0,23,22,17,15,21],
[27,29,21,22,24,28,0,25,21,20,24],
[27,27,23,24,29,29,26,0,26,22,27],
[30,33,23,29,29,34,30,25,0,27,27],
[34,30,28,36,40,36,31,29,24,0,23],
[34,33,31,26,33,30,27,24,24,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 92, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,14,29,27,10,36,20,12,19,5,22],
[37,0,41,39,12,36,15,19,22,25,22],
[22,10,0,39,12,34,20,19,10,10,27],
[24,12,12,0,12,26,10,22,3,15,17],
[41,39,39,39,0,38,32,25,39,32,30],
[15,15,17,25,13,0,20,20,20,13,30],
[31,36,31,41,19,31,0,33,24,29,24],
[39,32,32,29,26,31,18,0,30,32,29],
[32,29,41,48,12,31,27,21,0,20,24],
[46,26,41,36,19,38,22,19,31,0,22],
[29,29,24,34,21,21,27,22,27,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 93, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,25,35,19,35,29,32,36,25,26],
[27,0,22,27,19,26,28,22,27,28,33],
[26,29,0,37,20,39,21,31,35,25,27],
[16,24,14,0,15,34,13,25,26,19,15],
[32,32,31,36,0,36,20,31,38,21,33],
[16,25,12,17,15,0,14,19,27,19,20],
[22,23,30,38,31,37,0,35,41,22,26],
[19,29,20,26,20,32,16,0,33,27,28],
[15,24,16,25,13,24,10,18,0,16,21],
[26,23,26,32,30,32,29,24,35,0,37],
[25,18,24,36,18,31,25,23,30,14,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 94, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,13,18,12,19,19,18,22,18,16,22],
[38,0,32,23,30,32,28,35,33,31,31],
[33,19,0,24,23,27,27,24,25,26,30],
[39,28,27,0,29,30,20,25,31,25,31],
[32,21,28,22,0,25,26,20,22,25,25],
[32,19,24,21,26,0,27,25,22,24,26],
[33,23,24,31,25,24,0,23,27,27,30],
[29,16,27,26,31,26,28,0,29,29,29],
[33,18,26,20,29,29,24,22,0,29,27],
[35,20,25,26,26,27,24,22,22,0,27],
[29,20,21,20,26,25,21,22,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 95, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,32,35,32,29,29,30,34,24,27],
[25,0,28,28,33,32,26,25,34,25,23],
[19,23,0,28,28,22,19,18,26,26,27],
[16,23,23,0,25,25,20,21,25,20,18],
[19,18,23,26,0,23,21,26,26,24,29],
[22,19,29,26,28,0,21,24,25,22,25],
[22,25,32,31,30,30,0,23,29,25,25],
[21,26,33,30,25,27,28,0,31,25,28],
[17,17,25,26,25,26,22,20,0,23,25],
[27,26,25,31,27,29,26,26,28,0,28],
[24,28,24,33,22,26,26,23,26,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 96, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,28,23,25,32,20,27,18,18,34],
[32,0,31,24,30,28,33,23,27,24,30],
[23,20,0,25,23,24,27,22,19,16,40],
[28,27,26,0,26,25,30,23,21,22,27],
[26,21,28,25,0,24,28,27,30,26,22],
[19,23,27,26,27,0,25,33,25,19,36],
[31,18,24,21,23,26,0,27,24,18,28],
[24,28,29,28,24,18,24,0,22,20,25],
[33,24,32,30,21,26,27,29,0,20,32],
[33,27,35,29,25,32,33,31,31,0,38],
[17,21,11,24,29,15,23,26,19,13,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 97, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,26,19,21,25,23,23,20,23,20],
[25,0,25,25,27,18,20,22,19,22,17],
[25,26,0,25,29,20,25,25,25,24,23],
[32,26,26,0,31,25,31,25,24,26,26],
[30,24,22,20,0,25,25,25,19,24,20],
[26,33,31,26,26,0,28,28,26,27,25],
[28,31,26,20,26,23,0,22,22,26,20],
[28,29,26,26,26,23,29,0,25,25,21],
[31,32,26,27,32,25,29,26,0,30,22],
[28,29,27,25,27,24,25,26,21,0,20],
[31,34,28,25,31,26,31,30,29,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 98, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,26,31,30,28,31,25,24,25,20],
[19,0,22,27,27,22,27,23,17,26,18],
[25,29,0,30,32,25,29,27,24,25,21],
[20,24,21,0,23,21,25,23,18,23,15],
[21,24,19,28,0,22,27,23,22,23,22],
[23,29,26,30,29,0,27,30,23,29,26],
[20,24,22,26,24,24,0,27,21,22,21],
[26,28,24,28,28,21,24,0,21,30,23],
[27,34,27,33,29,28,30,30,0,29,25],
[26,25,26,28,28,22,29,21,22,0,18],
[31,33,30,36,29,25,30,28,26,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 99, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,14,18,12,17,8,19,13,9,8,13],
[37,0,36,30,30,19,26,29,33,22,22],
[33,15,0,19,24,15,12,20,26,25,10],
[39,21,32,0,32,27,17,28,28,11,16],
[34,21,27,19,0,21,27,25,25,23,15],
[43,32,36,24,30,0,25,37,34,28,27],
[32,25,39,34,24,26,0,24,25,22,24],
[38,22,31,23,26,14,27,0,29,17,22],
[42,18,25,23,26,17,26,22,0,17,23],
[43,29,26,40,28,23,29,34,34,0,24],
[38,29,41,35,36,24,27,29,28,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 100, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,26,25,19,23,26,25,25,22,20],
[30,0,35,26,29,25,31,33,30,30,28],
[25,16,0,20,22,19,26,27,27,23,20],
[26,25,31,0,27,27,30,27,33,28,27],
[32,22,29,24,0,25,29,29,24,22,28],
[28,26,32,24,26,0,32,26,26,23,24],
[25,20,25,21,22,19,0,26,22,22,21],
[26,18,24,24,22,25,25,0,23,23,25],
[26,21,24,18,27,25,29,28,0,24,24],
[29,21,28,23,29,28,29,28,27,0,28],
[31,23,31,24,23,27,30,26,27,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 101, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,27,28,23,29,19,32,26,27,25],
[26,0,27,27,25,30,27,30,27,31,28],
[24,24,0,22,24,28,22,28,24,29,24],
[23,24,29,0,20,28,26,32,22,30,29],
[28,26,27,31,0,27,28,31,25,34,29],
[22,21,23,23,24,0,21,26,25,28,21],
[32,24,29,25,23,30,0,31,27,32,30],
[19,21,23,19,20,25,20,0,22,22,17],
[25,24,27,29,26,26,24,29,0,31,24],
[24,20,22,21,17,23,19,29,20,0,19],
[26,23,27,22,22,30,21,34,27,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 102, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,22,31,19,32,28,28,38,18,37],
[27,0,32,32,28,29,23,16,26,18,30],
[29,19,0,20,32,38,34,14,45,32,23],
[20,19,31,0,35,32,34,16,39,24,24],
[32,23,19,16,0,33,33,18,39,17,22],
[19,22,13,19,18,0,24,19,38,18,21],
[23,28,17,17,18,27,0,16,35,19,23],
[23,35,37,35,33,32,35,0,39,36,23],
[13,25,6,12,12,13,16,12,0,12,23],
[33,33,19,27,34,33,32,15,39,0,21],
[14,21,28,27,29,30,28,28,28,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 103, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,3,12,28,14,16,20,23,24,12],
[27,0,25,28,23,13,18,32,34,20,23],
[48,26,0,25,25,32,26,32,38,30,14],
[39,23,26,0,39,25,23,29,39,28,24],
[23,28,26,12,0,20,23,26,24,23,22],
[37,38,19,26,31,0,21,38,40,16,27],
[35,33,25,28,28,30,0,29,45,34,27],
[31,19,19,22,25,13,22,0,16,16,9],
[28,17,13,12,27,11,6,35,0,17,14],
[27,31,21,23,28,35,17,35,34,0,26],
[39,28,37,27,29,24,24,42,37,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 104, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,26,29,28,21,25,21,23,28,23],
[22,0,22,20,21,21,19,23,22,25,25],
[25,29,0,27,27,20,26,18,21,32,23],
[22,31,24,0,20,17,21,22,19,26,23],
[23,30,24,31,0,21,24,22,24,36,29],
[30,30,31,34,30,0,31,26,22,35,29],
[26,32,25,30,27,20,0,25,29,29,29],
[30,28,33,29,29,25,26,0,28,31,24],
[28,29,30,32,27,29,22,23,0,34,26],
[23,26,19,25,15,16,22,20,17,0,27],
[28,26,28,28,22,22,22,27,25,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 105, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,29,30,28,29,21,34,29,17,26],
[28,0,26,25,28,30,26,30,28,26,23],
[22,25,0,25,28,24,20,32,31,20,26],
[21,26,26,0,23,22,27,32,29,28,24],
[23,23,23,28,0,27,20,32,27,24,26],
[22,21,27,29,24,0,28,32,32,21,22],
[30,25,31,24,31,23,0,33,33,25,28],
[17,21,19,19,19,19,18,0,26,19,21],
[22,23,20,22,24,19,18,25,0,17,25],
[34,25,31,23,27,30,26,32,34,0,27],
[25,28,25,27,25,29,23,30,26,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 106, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,27,22,29,31,29,24,24,21,20],
[20,0,25,22,28,26,22,23,20,23,18],
[24,26,0,23,33,28,25,27,23,22,24],
[29,29,28,0,27,24,27,28,26,26,29],
[22,23,18,24,0,23,21,22,22,18,14],
[20,25,23,27,28,0,24,21,24,25,17],
[22,29,26,24,30,27,0,25,25,24,24],
[27,28,24,23,29,30,26,0,25,25,21],
[27,31,28,25,29,27,26,26,0,24,26],
[30,28,29,25,33,26,27,26,27,0,31],
[31,33,27,22,37,34,27,30,25,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 107, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,25,23,31,25,27,30,20,29,20],
[23,0,22,25,31,21,20,22,28,34,25],
[26,29,0,21,23,21,30,27,21,29,20],
[28,26,30,0,25,17,22,21,19,25,28],
[20,20,28,26,0,14,21,28,24,23,19],
[26,30,30,34,37,0,25,33,27,34,33],
[24,31,21,29,30,26,0,30,30,28,29],
[21,29,24,30,23,18,21,0,27,32,29],
[31,23,30,32,27,24,21,24,0,22,26],
[22,17,22,26,28,17,23,19,29,0,22],
[31,26,31,23,32,18,22,22,25,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 108, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,27,21,23,22,20,26,21,25,21],
[24,0,30,22,23,26,22,28,27,26,24],
[24,21,0,22,18,24,16,23,25,26,24],
[30,29,29,0,30,26,23,30,32,29,23],
[28,28,33,21,0,26,25,37,30,27,27],
[29,25,27,25,25,0,28,29,33,32,23],
[31,29,35,28,26,23,0,30,28,28,24],
[25,23,28,21,14,22,21,0,22,27,17],
[30,24,26,19,21,18,23,29,0,27,22],
[26,25,25,22,24,19,23,24,24,0,25],
[30,27,27,28,24,28,27,34,29,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 109, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,15,23,21,24,13,31,26,21,32,16],
[36,0,35,30,35,26,36,30,31,40,23],
[28,16,0,31,30,16,31,24,29,37,28],
[30,21,20,0,23,23,37,30,29,38,21],
[27,16,21,28,0,13,26,23,22,30,18],
[38,25,35,28,38,0,37,30,29,37,21],
[20,15,20,14,25,14,0,20,18,32,11],
[25,21,27,21,28,21,31,0,24,36,24],
[30,20,22,22,29,22,33,27,0,36,22],
[19,11,14,13,21,14,19,15,15,0,5],
[35,28,23,30,33,30,40,27,29,46,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 110, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,33,31,24,26,30,28,28,29,26],
[26,0,28,30,26,28,25,29,23,28,22],
[18,23,0,24,21,25,23,23,21,24,22],
[20,21,27,0,25,28,23,24,25,22,20],
[27,25,30,26,0,29,26,25,24,27,23],
[25,23,26,23,22,0,22,30,24,25,19],
[21,26,28,28,25,29,0,30,27,29,26],
[23,22,28,27,26,21,21,0,21,25,21],
[23,28,30,26,27,27,24,30,0,26,25],
[22,23,27,29,24,26,22,26,25,0,21],
[25,29,29,31,28,32,25,30,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 111, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,32,25,33,22,23,42,27,37,34],
[23,0,24,26,28,15,25,32,25,27,27],
[19,27,0,17,24,13,20,34,21,27,27],
[26,25,34,0,27,26,27,36,19,32,34],
[18,23,27,24,0,14,20,30,19,26,31],
[29,36,38,25,37,0,34,45,31,34,32],
[28,26,31,24,31,17,0,41,24,36,37],
[9,19,17,15,21,6,10,0,13,19,28],
[24,26,30,32,32,20,27,38,0,41,40],
[14,24,24,19,25,17,15,32,10,0,31],
[17,24,24,17,20,19,14,23,11,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 112, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,23,31,36,35,33,36,33,36],
[26,0,32,31,22,34,34,24,28,26,30],
[25,19,0,25,21,35,37,29,21,25,37],
[28,20,26,0,28,36,22,12,28,20,36],
[20,29,30,23,0,24,26,22,24,32,24],
[15,17,16,15,27,0,37,19,24,25,28],
[16,17,14,29,25,14,0,12,20,16,26],
[18,27,22,39,29,32,39,0,32,37,30],
[15,23,30,23,27,27,31,19,0,27,28],
[18,25,26,31,19,26,35,14,24,0,20],
[15,21,14,15,27,23,25,21,23,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 113, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,21,30,22,22,31,25,26,26,34],
[24,0,25,26,23,19,28,27,22,23,26],
[30,26,0,25,26,22,28,21,25,27,29],
[21,25,26,0,26,28,33,27,23,28,34],
[29,28,25,25,0,24,28,20,26,29,30],
[29,32,29,23,27,0,34,30,20,27,32],
[20,23,23,18,23,17,0,24,20,25,23],
[26,24,30,24,31,21,27,0,24,32,29],
[25,29,26,28,25,31,31,27,0,26,31],
[25,28,24,23,22,24,26,19,25,0,26],
[17,25,22,17,21,19,28,22,20,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 114, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,23,27,20,24,22,25,22,26,28],
[22,0,22,24,26,23,23,17,23,24,22],
[28,29,0,29,27,27,28,28,24,30,33],
[24,27,22,0,29,24,25,23,21,29,30],
[31,25,24,22,0,23,19,24,25,24,24],
[27,28,24,27,28,0,26,18,20,23,30],
[29,28,23,26,32,25,0,22,31,28,32],
[26,34,23,28,27,33,29,0,31,28,31],
[29,28,27,30,26,31,20,20,0,25,32],
[25,27,21,22,27,28,23,23,26,0,32],
[23,29,18,21,27,21,19,20,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 115, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,16,27,23,27,17,25,23,26,29],
[23,0,25,30,37,29,24,28,26,25,28],
[35,26,0,27,26,28,26,23,25,32,25],
[24,21,24,0,32,29,19,27,19,27,24],
[28,14,25,19,0,22,9,24,18,19,22],
[24,22,23,22,29,0,17,21,22,24,21],
[34,27,25,32,42,34,0,29,28,37,32],
[26,23,28,24,27,30,22,0,20,27,19],
[28,25,26,32,33,29,23,31,0,27,23],
[25,26,19,24,32,27,14,24,24,0,19],
[22,23,26,27,29,30,19,32,28,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 116, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,21,19,21,21,29,28,26,24,22],
[25,0,20,19,20,19,25,21,21,17,17],
[30,31,0,27,22,21,29,32,30,26,24],
[32,32,24,0,30,26,30,28,34,29,28],
[30,31,29,21,0,23,29,24,25,29,27],
[30,32,30,25,28,0,31,33,29,30,25],
[22,26,22,21,22,20,0,26,26,27,25],
[23,30,19,23,27,18,25,0,25,24,22],
[25,30,21,17,26,22,25,26,0,25,26],
[27,34,25,22,22,21,24,27,26,0,21],
[29,34,27,23,24,26,26,29,25,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 117, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,18,30,18,26,25,7,21,16,47,12],
[33,0,34,22,30,33,29,15,24,29,22],
[21,17,0,16,30,27,17,32,27,41,26],
[33,29,35,0,30,39,29,25,35,35,22],
[25,21,21,21,0,25,21,16,21,21,22],
[26,18,24,12,26,0,21,21,42,47,12],
[44,22,34,22,30,30,0,25,39,51,21],
[30,36,19,26,35,30,26,0,42,42,31],
[35,27,24,16,30,9,12,9,0,31,12],
[4,22,10,16,30,4,0,9,20,0,16],
[39,29,25,29,29,39,30,20,39,35,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 118, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,13,38,47,21,29,41,38,41,41,46],
[38,0,42,47,39,21,45,43,34,46,50],
[13,9,0,47,1,25,25,31,42,30,41],
[4,4,4,0,4,20,12,26,20,12,37],
[30,12,50,47,0,33,45,34,46,29,45],
[22,30,26,31,18,0,29,27,42,30,45],
[10,6,26,39,6,22,0,27,34,22,38],
[13,8,20,25,17,24,24,0,25,13,41],
[10,17,9,31,5,9,17,26,0,17,50],
[10,5,21,39,22,21,29,38,34,0,38],
[5,1,10,14,6,6,13,10,1,13,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 119, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,19,22,24,25,26,25,23,25,30],
[27,0,20,21,17,25,23,21,21,26,21],
[32,31,0,26,28,26,29,23,26,28,29],
[29,30,25,0,24,27,30,25,25,26,30],
[27,34,23,27,0,25,23,23,21,29,29],
[26,26,25,24,26,0,24,22,20,27,31],
[25,28,22,21,28,27,0,28,25,26,27],
[26,30,28,26,28,29,23,0,24,27,30],
[28,30,25,26,30,31,26,27,0,26,28],
[26,25,23,25,22,24,25,24,25,0,23],
[21,30,22,21,22,20,24,21,23,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 120, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,25,38,28,37,27,31,34,40,30],
[20,0,28,29,25,17,24,22,25,31,30],
[26,23,0,27,17,25,20,26,17,30,28],
[13,22,24,0,14,19,22,30,31,32,24],
[23,26,34,37,0,26,24,30,36,31,22],
[14,34,26,32,25,0,27,18,28,33,25],
[24,27,31,29,27,24,0,28,23,34,33],
[20,29,25,21,21,33,23,0,23,29,21],
[17,26,34,20,15,23,28,28,0,27,32],
[11,20,21,19,20,18,17,22,24,0,32],
[21,21,23,27,29,26,18,30,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 121, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,18,8,8,12,16,17,10,8,26],
[27,0,21,18,18,28,18,14,25,19,28],
[33,30,0,19,21,23,21,25,22,21,34],
[43,33,32,0,19,25,25,24,16,27,30],
[43,33,30,32,0,31,21,28,22,32,27],
[39,23,28,26,20,0,22,27,26,14,29],
[35,33,30,26,30,29,0,24,27,26,27],
[34,37,26,27,23,24,27,0,22,23,32],
[41,26,29,35,29,25,24,29,0,18,36],
[43,32,30,24,19,37,25,28,33,0,33],
[25,23,17,21,24,22,24,19,15,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 122, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,24,20,29,23,32,33,8,27,22],
[15,0,22,14,29,19,12,29,17,19,19],
[27,29,0,13,24,37,29,29,22,32,26],
[31,37,38,0,30,29,30,29,38,37,19],
[22,22,27,21,0,32,24,29,22,32,27],
[28,32,14,22,19,0,25,20,22,26,27],
[19,39,22,21,27,26,0,27,22,30,27],
[18,22,22,22,22,31,24,0,17,16,17],
[43,34,29,13,29,29,29,34,0,37,31],
[24,32,19,14,19,25,21,35,14,0,19],
[29,32,25,32,24,24,24,34,20,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 123, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,23,35,25,30,39,32,26,25,28],
[24,0,29,37,30,36,37,25,28,30,33],
[28,22,0,34,32,32,34,36,33,33,33],
[16,14,17,0,17,19,30,20,23,24,19],
[26,21,19,34,0,28,32,25,26,24,26],
[21,15,19,32,23,0,29,23,27,25,25],
[12,14,17,21,19,22,0,21,26,18,17],
[19,26,15,31,26,28,30,0,28,21,24],
[25,23,18,28,25,24,25,23,0,25,27],
[26,21,18,27,27,26,33,30,26,0,27],
[23,18,18,32,25,26,34,27,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 124, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,22,32,27,18,21,26,35,32,32],
[32,0,27,40,26,20,27,29,32,29,31],
[29,24,0,38,26,29,25,28,35,27,33],
[19,11,13,0,26,14,18,16,27,25,17],
[24,25,25,25,0,16,23,25,29,30,22],
[33,31,22,37,35,0,29,32,29,33,27],
[30,24,26,33,28,22,0,25,35,28,31],
[25,22,23,35,26,19,26,0,26,31,23],
[16,19,16,24,22,22,16,25,0,23,19],
[19,22,24,26,21,18,23,20,28,0,22],
[19,20,18,34,29,24,20,28,32,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 125, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,23,27,29,25,26,21,21,30,26],
[28,0,28,29,30,25,31,25,29,34,28],
[28,23,0,26,27,23,26,20,21,30,22],
[24,22,25,0,29,28,23,26,27,26,25],
[22,21,24,22,0,27,31,24,24,28,24],
[26,26,28,23,24,0,30,30,23,32,29],
[25,20,25,28,20,21,0,24,26,28,23],
[30,26,31,25,27,21,27,0,23,28,24],
[30,22,30,24,27,28,25,28,0,27,30],
[21,17,21,25,23,19,23,23,24,0,22],
[25,23,29,26,27,22,28,27,21,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 126, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,27,24,27,31,30,28,27,30,28],
[28,0,24,27,28,35,34,31,26,31,30],
[24,27,0,23,27,28,29,30,26,31,23],
[27,24,28,0,26,29,40,33,28,31,31],
[24,23,24,25,0,28,31,27,24,28,24],
[20,16,23,22,23,0,29,26,21,28,25],
[21,17,22,11,20,22,0,23,21,26,22],
[23,20,21,18,24,25,28,0,21,26,24],
[24,25,25,23,27,30,30,30,0,30,31],
[21,20,20,20,23,23,25,25,21,0,22],
[23,21,28,20,27,26,29,27,20,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 127, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,27,27,27,31,27,20,29,23,21],
[19,0,14,18,21,22,16,16,19,14,24],
[24,37,0,22,27,32,31,27,29,32,16],
[24,33,29,0,26,29,27,29,28,33,29],
[24,30,24,25,0,26,23,20,24,23,25],
[20,29,19,22,25,0,25,13,17,16,21],
[24,35,20,24,28,26,0,24,27,27,22],
[31,35,24,22,31,38,27,0,30,28,23],
[22,32,22,23,27,34,24,21,0,24,21],
[28,37,19,18,28,35,24,23,27,0,23],
[30,27,35,22,26,30,29,28,30,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 128, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,16,22,23,12,35,16,22,16,32],
[20,0,10,20,11,4,33,16,22,27,27],
[35,41,0,35,21,11,40,21,14,35,49],
[29,31,16,0,23,13,24,5,17,16,29],
[28,40,30,28,0,11,33,14,20,35,35],
[39,47,40,38,40,0,30,24,28,27,40],
[16,18,11,27,18,21,0,14,11,17,18],
[35,35,30,46,37,27,37,0,41,21,35],
[29,29,37,34,31,23,40,10,0,23,37],
[35,24,16,35,16,24,34,30,28,0,34],
[19,24,2,22,16,11,33,16,14,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 129, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,30,18,22,31,19,23,29,28,25],
[25,0,30,22,20,30,24,26,28,28,23],
[21,21,0,14,21,27,18,19,17,22,23],
[33,29,37,0,26,35,20,25,30,35,31],
[29,31,30,25,0,36,25,25,28,32,29],
[20,21,24,16,15,0,17,15,17,26,19],
[32,27,33,31,26,34,0,26,25,27,32],
[28,25,32,26,26,36,25,0,24,29,31],
[22,23,34,21,23,34,26,27,0,31,26],
[23,23,29,16,19,25,24,22,20,0,23],
[26,28,28,20,22,32,19,20,25,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 130, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,23,22,25,29,28,27,32,31,27],
[29,0,28,26,21,34,25,23,30,31,25],
[28,23,0,23,20,29,29,25,23,30,27],
[29,25,28,0,27,29,27,28,30,29,27],
[26,30,31,24,0,34,31,27,26,36,28],
[22,17,22,22,17,0,20,18,26,26,24],
[23,26,22,24,20,31,0,20,30,23,28],
[24,28,26,23,24,33,31,0,29,31,25],
[19,21,28,21,25,25,21,22,0,30,18],
[20,20,21,22,15,25,28,20,21,0,22],
[24,26,24,24,23,27,23,26,33,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 131, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,18,22,20,31,13,15,24,24,24],
[30,0,18,21,28,34,19,27,19,28,30],
[33,33,0,24,31,39,29,33,34,36,34],
[29,30,27,0,28,30,12,26,26,33,28],
[31,23,20,23,0,24,21,17,25,23,26],
[20,17,12,21,27,0,17,19,27,25,24],
[38,32,22,39,30,34,0,31,35,36,26],
[36,24,18,25,34,32,20,0,27,34,22],
[27,32,17,25,26,24,16,24,0,30,28],
[27,23,15,18,28,26,15,17,21,0,24],
[27,21,17,23,25,27,25,29,23,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 132, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,28,22,27,21,23,30,19,21,18],
[20,0,27,17,25,15,19,40,17,26,25],
[23,24,0,17,35,16,18,31,21,29,17],
[29,34,34,0,34,19,25,35,19,35,26],
[24,26,16,17,0,20,24,25,22,26,22],
[30,36,35,32,31,0,25,34,25,33,26],
[28,32,33,26,27,26,0,33,30,36,24],
[21,11,20,16,26,17,18,0,17,21,11],
[32,34,30,32,29,26,21,34,0,29,23],
[30,25,22,16,25,18,15,30,22,0,17],
[33,26,34,25,29,25,27,40,28,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 133, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,42,40,26,42,31,20,29,24,26,30],
[9,0,25,5,18,9,16,27,18,20,18],
[11,26,0,13,33,27,31,21,22,24,18],
[25,46,38,0,30,32,22,38,31,29,34],
[9,33,18,21,0,10,9,25,24,18,21],
[20,42,24,19,41,0,21,24,35,23,31],
[31,35,20,29,42,30,0,29,32,23,38],
[22,24,30,13,26,27,22,0,29,24,29],
[27,33,29,20,27,16,19,22,0,19,21],
[25,31,27,22,33,28,28,27,32,0,25],
[21,33,33,17,30,20,13,22,30,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 134, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,34,25,29,33,37,35,40,23,40,24],
[17,0,19,25,24,30,34,36,12,33,27],
[26,32,0,25,34,36,31,42,26,35,24],
[22,26,26,0,22,36,31,37,25,30,21],
[18,27,17,29,0,33,38,33,20,31,18],
[14,21,15,15,18,0,20,28,19,25,17],
[16,17,20,20,13,31,0,21,15,29,15],
[11,15,9,14,18,23,30,0,9,23,10],
[28,39,25,26,31,32,36,42,0,42,32],
[11,18,16,21,20,26,22,28,9,0,16],
[27,24,27,30,33,34,36,41,19,35,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 135, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,23,27,23,27,24,28,20,22,20],
[26,0,17,24,24,24,23,28,17,20,15],
[28,34,0,29,24,29,28,32,26,25,23],
[24,27,22,0,21,30,28,31,23,25,26],
[28,27,27,30,0,30,29,30,25,29,26],
[24,27,22,21,21,0,26,27,24,21,21],
[27,28,23,23,22,25,0,27,23,22,20],
[23,23,19,20,21,24,24,0,19,16,14],
[31,34,25,28,26,27,28,32,0,29,25],
[29,31,26,26,22,30,29,35,22,0,21],
[31,36,28,25,25,30,31,37,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 136, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,29,29,41,28,33,28,20,32,32],
[29,0,19,29,29,24,29,24,29,37,27],
[22,32,0,47,51,24,28,38,28,22,18],
[22,22,4,0,12,18,22,18,28,22,12],
[10,22,0,39,0,18,10,18,16,18,12],
[23,27,27,33,33,0,33,41,33,21,19],
[18,22,23,29,41,18,0,28,28,8,12],
[23,27,13,33,33,10,23,0,19,21,13],
[31,22,23,23,35,18,23,32,0,12,12],
[19,14,29,29,33,30,43,30,39,0,33],
[19,24,33,39,39,32,39,38,39,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 137, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,16,10,11,26,25,19,16,21,21,26],
[35,0,25,36,36,25,26,16,36,36,18],
[41,26,0,33,40,25,23,16,15,30,26],
[40,15,18,0,19,26,23,16,18,26,23],
[25,15,11,32,0,19,21,12,22,22,16],
[26,26,26,25,32,0,26,15,15,15,16],
[32,25,28,28,30,25,0,26,18,18,33],
[35,35,35,35,39,36,25,0,36,35,26],
[30,15,36,33,29,36,33,15,0,39,33],
[30,15,21,25,29,36,33,16,12,0,26],
[25,33,25,28,35,35,18,25,18,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 138, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,25,21,23,21,25,21,22,24,22],
[30,0,32,27,28,26,31,28,24,29,29],
[26,19,0,28,25,23,25,19,20,22,24],
[30,24,23,0,27,25,26,24,27,22,25],
[28,23,26,24,0,25,28,30,22,22,25],
[30,25,28,26,26,0,29,18,27,28,21],
[26,20,26,25,23,22,0,24,22,25,26],
[30,23,32,27,21,33,27,0,22,31,29],
[29,27,31,24,29,24,29,29,0,24,28],
[27,22,29,29,29,23,26,20,27,0,25],
[29,22,27,26,26,30,25,22,23,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 139, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,24,25,16,29,22,22,25,19,30],
[25,0,31,26,22,33,26,28,28,18,24],
[27,20,0,30,19,34,31,28,35,28,36],
[26,25,21,0,17,26,16,17,28,12,18],
[35,29,32,34,0,36,27,27,39,19,36],
[22,18,17,25,15,0,11,24,26,14,24],
[29,25,20,35,24,40,0,23,34,24,33],
[29,23,23,34,24,27,28,0,33,23,34],
[26,23,16,23,12,25,17,18,0,17,24],
[32,33,23,39,32,37,27,28,34,0,29],
[21,27,15,33,15,27,18,17,27,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 140, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,26,20,30,27,22,23,24,29,33],
[32,0,32,23,34,30,22,26,28,28,30],
[25,19,0,17,29,30,21,22,18,24,32],
[31,28,34,0,32,29,33,27,21,30,33],
[21,17,22,19,0,30,19,19,16,26,30],
[24,21,21,22,21,0,25,18,26,23,27],
[29,29,30,18,32,26,0,28,26,27,31],
[28,25,29,24,32,33,23,0,23,27,31],
[27,23,33,30,35,25,25,28,0,34,29],
[22,23,27,21,25,28,24,24,17,0,34],
[18,21,19,18,21,24,20,20,22,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 141, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,26,20,30,23,26,25,17,24,24],
[20,0,26,18,25,25,27,29,22,28,22],
[25,25,0,25,22,21,23,29,24,27,27],
[31,33,26,0,32,25,32,30,26,31,26],
[21,26,29,19,0,23,30,26,17,29,26],
[28,26,30,26,28,0,23,28,23,23,19],
[25,24,28,19,21,28,0,27,22,25,24],
[26,22,22,21,25,23,24,0,17,20,27],
[34,29,27,25,34,28,29,34,0,29,24],
[27,23,24,20,22,28,26,31,22,0,22],
[27,29,24,25,25,32,27,24,27,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 142, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,25,19,25,25,22,24,28,26],
[24,0,22,24,22,26,23,24,25,30,26],
[19,29,0,23,24,25,33,27,23,28,26],
[26,27,28,0,20,28,26,23,28,25,29],
[32,29,27,31,0,26,29,24,23,32,32],
[26,25,26,23,25,0,26,31,22,26,27],
[26,28,18,25,22,25,0,24,25,29,29],
[29,27,24,28,27,20,27,0,26,26,27],
[27,26,28,23,28,29,26,25,0,27,29],
[23,21,23,26,19,25,22,25,24,0,23],
[25,25,25,22,19,24,22,24,22,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 143, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,30,25,32,27,26,33,32,26,25],
[25,0,25,21,27,25,22,31,37,22,22],
[21,26,0,24,33,28,25,28,31,24,27],
[26,30,27,0,33,27,27,37,35,25,29],
[19,24,18,18,0,15,15,30,26,21,16],
[24,26,23,24,36,0,24,28,33,25,28],
[25,29,26,24,36,27,0,28,32,25,28],
[18,20,23,14,21,23,23,0,29,20,17],
[19,14,20,16,25,18,19,22,0,19,20],
[25,29,27,26,30,26,26,31,32,0,27],
[26,29,24,22,35,23,23,34,31,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 144, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,19,30,32,21,23,38,30,23,25],
[15,0,21,23,25,24,20,33,29,22,23],
[32,30,0,27,29,31,29,39,25,30,22],
[21,28,24,0,37,28,27,42,30,25,28],
[19,26,22,14,0,30,31,36,31,18,28],
[30,27,20,23,21,0,26,33,28,23,21],
[28,31,22,24,20,25,0,27,27,23,35],
[13,18,12,9,15,18,24,0,15,11,20],
[21,22,26,21,20,23,24,36,0,13,25],
[28,29,21,26,33,28,28,40,38,0,31],
[26,28,29,23,23,30,16,31,26,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 145, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,38,30,32,29,29,31,27,21,22,31],
[13,0,15,29,22,25,23,25,19,13,14],
[21,36,0,33,29,21,21,28,30,25,14],
[19,22,18,0,23,17,18,13,26,20,11],
[22,29,22,28,0,19,20,24,31,18,22],
[22,26,30,34,32,0,30,31,35,23,28],
[20,28,30,33,31,21,0,24,30,26,13],
[24,26,23,38,27,20,27,0,25,22,19],
[30,32,21,25,20,16,21,26,0,19,17],
[29,38,26,31,33,28,25,29,32,0,26],
[20,37,37,40,29,23,38,32,34,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 146, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,20,26,27,20,24,18,31,29,30],
[19,0,20,29,25,18,29,26,31,25,41],
[31,31,0,35,23,18,33,24,36,31,36],
[25,22,16,0,21,21,14,23,25,21,29],
[24,26,28,30,0,28,22,30,29,31,37],
[31,33,33,30,23,0,23,26,24,25,31],
[27,22,18,37,29,28,0,21,22,32,37],
[33,25,27,28,21,25,30,0,33,28,29],
[20,20,15,26,22,27,29,18,0,18,30],
[22,26,20,30,20,26,19,23,33,0,33],
[21,10,15,22,14,20,14,22,21,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 147, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,25,30,27,26,31,26,29,27,30],
[30,0,33,31,30,29,35,34,23,31,34],
[26,18,0,27,31,28,35,30,26,26,30],
[21,20,24,0,22,23,28,22,16,20,22],
[24,21,20,29,0,27,29,28,26,22,27],
[25,22,23,28,24,0,33,26,28,23,25],
[20,16,16,23,22,18,0,22,24,22,21],
[25,17,21,29,23,25,29,0,22,26,24],
[22,28,25,35,25,23,27,29,0,27,26],
[24,20,25,31,29,28,29,25,24,0,25],
[21,17,21,29,24,26,30,27,25,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 148, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,31,24,34,30,24,22,25,27,27],
[20,0,25,22,24,25,22,18,24,21,25],
[20,26,0,27,27,24,26,23,23,21,20],
[27,29,24,0,31,28,29,20,30,30,24],
[17,27,24,20,0,23,20,20,23,18,21],
[21,26,27,23,28,0,27,22,23,23,22],
[27,29,25,22,31,24,0,23,21,24,25],
[29,33,28,31,31,29,28,0,29,27,24],
[26,27,28,21,28,28,30,22,0,24,23],
[24,30,30,21,33,28,27,24,27,0,23],
[24,26,31,27,30,29,26,27,28,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 149, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,22,26,20,31,25,19,24,25,24],
[25,0,20,23,21,27,19,21,28,21,27],
[29,31,0,29,23,32,31,23,28,28,32],
[25,28,22,0,22,28,22,28,26,26,31],
[31,30,28,29,0,29,31,25,32,26,33],
[20,24,19,23,22,0,19,21,25,20,24],
[26,32,20,29,20,32,0,24,30,25,31],
[32,30,28,23,26,30,27,0,31,28,33],
[27,23,23,25,19,26,21,20,0,23,25],
[26,30,23,25,25,31,26,23,28,0,26],
[27,24,19,20,18,27,20,18,26,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 150, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,20,23,17,14,8,22,15,29,20],
[25,0,25,20,23,25,10,16,14,28,22],
[31,26,0,35,16,31,8,22,15,29,17],
[28,31,16,0,27,17,14,14,10,23,15],
[34,28,35,24,0,31,28,22,24,30,28],
[37,26,20,34,20,0,19,28,28,37,25],
[43,41,43,37,23,32,0,24,19,36,26],
[29,35,29,37,29,23,27,0,32,42,27],
[36,37,36,41,27,23,32,19,0,24,22],
[22,23,22,28,21,14,15,9,27,0,14],
[31,29,34,36,23,26,25,24,29,37,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 151, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,25,28,23,26,25,30,25,29,21],
[29,0,25,31,25,26,19,29,24,29,22],
[26,26,0,22,28,31,24,28,26,30,22],
[23,20,29,0,25,26,20,31,28,25,26],
[28,26,23,26,0,23,23,29,27,25,18],
[25,25,20,25,28,0,24,27,22,23,16],
[26,32,27,31,28,27,0,26,25,35,23],
[21,22,23,20,22,24,25,0,27,24,24],
[26,27,25,23,24,29,26,24,0,28,18],
[22,22,21,26,26,28,16,27,23,0,19],
[30,29,29,25,33,35,28,27,33,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 152, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,22,26,28,20,31,24,31,29,34],
[30,0,26,23,33,31,31,31,30,34,35],
[29,25,0,23,32,27,29,25,25,31,32],
[25,28,28,0,27,29,31,28,30,35,31],
[23,18,19,24,0,21,31,27,26,29,29],
[31,20,24,22,30,0,31,27,28,34,33],
[20,20,22,20,20,20,0,28,28,25,26],
[27,20,26,23,24,24,23,0,32,26,29],
[20,21,26,21,25,23,23,19,0,23,27],
[22,17,20,16,22,17,26,25,28,0,29],
[17,16,19,20,22,18,25,22,24,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 153, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,30,32,28,29,28,20,29,24,26],
[26,0,28,31,31,27,23,29,27,25,24],
[21,23,0,28,26,30,24,23,29,29,29],
[19,20,23,0,24,28,24,21,28,21,27],
[23,20,25,27,0,21,21,22,26,29,20],
[22,24,21,23,30,0,25,27,27,25,25],
[23,28,27,27,30,26,0,22,27,24,25],
[31,22,28,30,29,24,29,0,32,21,22],
[22,24,22,23,25,24,24,19,0,21,20],
[27,26,22,30,22,26,27,30,30,0,27],
[25,27,22,24,31,26,26,29,31,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 154, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,20,18,20,16,23,23,20,23,21],
[34,0,27,24,24,25,29,32,27,28,26],
[31,24,0,22,23,21,21,25,23,25,28],
[33,27,29,0,27,23,26,29,28,31,26],
[31,27,28,24,0,20,30,31,25,30,27],
[35,26,30,28,31,0,30,28,25,29,29],
[28,22,30,25,21,21,0,33,26,22,30],
[28,19,26,22,20,23,18,0,23,23,28],
[31,24,28,23,26,26,25,28,0,27,28],
[28,23,26,20,21,22,29,28,24,0,28],
[30,25,23,25,24,22,21,23,23,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 155, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,35,33,27,25,33,28,34,34,26],
[15,0,20,18,21,18,19,15,27,21,20],
[16,31,0,23,27,26,23,17,29,25,18],
[18,33,28,0,24,22,22,27,28,29,18],
[24,30,24,27,0,26,27,30,34,32,25],
[26,33,25,29,25,0,28,34,35,32,28],
[18,32,28,29,24,23,0,26,29,35,24],
[23,36,34,24,21,17,25,0,32,30,19],
[17,24,22,23,17,16,22,19,0,24,17],
[17,30,26,22,19,19,16,21,27,0,22],
[25,31,33,33,26,23,27,32,34,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 156, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,26,26,29,33,28,24,20,21],
[24,0,25,25,28,20,25,26,25,23,23],
[19,26,0,25,23,18,28,24,20,20,21],
[25,26,26,0,25,20,35,30,30,24,25],
[25,23,28,26,0,22,26,28,20,25,17],
[22,31,33,31,29,0,33,35,28,26,26],
[18,26,23,16,25,18,0,25,21,15,20],
[23,25,27,21,23,16,26,0,19,20,17],
[27,26,31,21,31,23,30,32,0,22,26],
[31,28,31,27,26,25,36,31,29,0,24],
[30,28,30,26,34,25,31,34,25,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 157, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,30,28,24,24,22,25,19,21,25],
[21,0,24,21,21,19,18,24,16,21,23],
[21,27,0,22,19,18,25,25,17,27,22],
[23,30,29,0,18,27,25,24,22,26,26],
[27,30,32,33,0,29,28,23,28,26,23],
[27,32,33,24,22,0,27,25,20,25,27],
[29,33,26,26,23,24,0,29,24,30,24],
[26,27,26,27,28,26,22,0,17,24,24],
[32,35,34,29,23,31,27,34,0,35,35],
[30,30,24,25,25,26,21,27,16,0,27],
[26,28,29,25,28,24,27,27,16,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 158, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,21,28,28,24,25,32,22,22,30],
[31,0,31,30,36,29,38,38,23,25,23],
[30,20,0,28,29,18,27,36,21,32,21],
[23,21,23,0,30,25,29,31,20,24,22],
[23,15,22,21,0,15,19,30,14,20,18],
[27,22,33,26,36,0,28,38,24,26,22],
[26,13,24,22,32,23,0,26,18,30,24],
[19,13,15,20,21,13,25,0,9,17,13],
[29,28,30,31,37,27,33,42,0,31,23],
[29,26,19,27,31,25,21,34,20,0,23],
[21,28,30,29,33,29,27,38,28,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 159, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,18,24,22,37,35,28,26,32,25,27],
[33,0,26,28,39,26,21,28,36,31,33],
[27,25,0,20,22,33,22,30,32,27,25],
[29,23,31,0,27,30,19,27,34,28,34],
[14,12,29,24,0,25,10,19,30,20,27],
[16,25,18,21,26,0,17,24,30,25,24],
[23,30,29,32,41,34,0,29,33,34,35],
[25,23,21,24,32,27,22,0,29,24,33],
[19,15,19,17,21,21,18,22,0,24,22],
[26,20,24,23,31,26,17,27,27,0,34],
[24,18,26,17,24,27,16,18,29,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 160, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,30,30,26,26,24,30,28,28,23],
[27,0,32,28,25,28,25,26,26,32,23],
[21,19,0,24,23,20,15,23,18,24,18],
[21,23,27,0,26,24,23,26,22,29,22],
[25,26,28,25,0,24,26,25,25,32,24],
[25,23,31,27,27,0,23,29,25,29,22],
[27,26,36,28,25,28,0,29,26,35,20],
[21,25,28,25,26,22,22,0,23,29,27],
[23,25,33,29,26,26,25,28,0,31,24],
[23,19,27,22,19,22,16,22,20,0,19],
[28,28,33,29,27,29,31,24,27,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 161, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,37,27,28,23,33,28,22,29,33],
[22,0,22,12,22,16,27,26,24,25,23],
[14,29,0,23,28,24,23,31,19,24,21],
[24,39,28,0,34,29,29,41,25,30,26],
[23,29,23,17,0,21,22,30,25,21,32],
[28,35,27,22,30,0,33,34,26,26,34],
[18,24,28,22,29,18,0,29,26,24,24],
[23,25,20,10,21,17,22,0,27,25,20],
[29,27,32,26,26,25,25,24,0,28,26],
[22,26,27,21,30,25,27,26,23,0,26],
[18,28,30,25,19,17,27,31,25,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 162, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,8,23,20,26,27,18,27,8,16,17],
[43,0,43,21,43,27,37,34,18,35,18],
[28,8,0,28,25,19,26,27,18,23,10],
[31,30,23,0,38,32,32,32,30,38,33],
[25,8,26,13,0,27,19,27,11,10,10],
[24,24,32,19,24,0,34,25,9,17,33],
[33,14,25,19,32,17,0,34,16,15,8],
[24,17,24,19,24,26,17,0,17,24,17],
[43,33,33,21,40,42,35,34,0,25,25],
[35,16,28,13,41,34,36,27,26,0,25],
[34,33,41,18,41,18,43,34,26,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 163, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,14,20,17,8,20,21,25,18,29],
[30,0,30,31,20,22,20,29,33,29,40],
[37,21,0,34,22,19,31,10,30,23,34],
[31,20,17,0,23,14,23,10,25,18,16],
[34,31,29,28,0,34,23,29,25,34,37],
[43,29,32,37,17,0,23,29,25,29,37],
[31,31,20,28,28,28,0,27,17,18,21],
[30,22,41,41,22,22,24,0,38,25,41],
[26,18,21,26,26,26,34,13,0,20,18],
[33,22,28,33,17,22,33,26,31,0,33],
[22,11,17,35,14,14,30,10,33,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 164, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,17,34,25,26,10,23,31,8,9],
[23,0,20,41,22,27,13,35,34,24,11],
[34,31,0,41,18,31,28,45,32,36,40],
[17,10,10,0,1,13,8,30,20,10,9],
[26,29,33,50,0,28,13,34,34,33,32],
[25,24,20,38,23,0,10,33,30,21,24],
[41,38,23,43,38,41,0,43,30,37,47],
[28,16,6,21,17,18,8,0,32,6,22],
[20,17,19,31,17,21,21,19,0,19,18],
[43,27,15,41,18,30,14,45,32,0,32],
[42,40,11,42,19,27,4,29,33,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 165, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,20,16,25,21,22,24,26,23,26],
[32,0,24,27,28,26,25,31,25,28,23],
[31,27,0,25,26,20,24,26,23,27,26],
[35,24,26,0,22,22,26,30,26,29,30],
[26,23,25,29,0,24,27,24,25,26,26],
[30,25,31,29,27,0,26,28,27,32,25],
[29,26,27,25,24,25,0,27,24,24,29],
[27,20,25,21,27,23,24,0,25,24,21],
[25,26,28,25,26,24,27,26,0,23,26],
[28,23,24,22,25,19,27,27,28,0,26],
[25,28,25,21,25,26,22,30,25,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 166, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,20,20,21,19,18,27,19,26,16],
[29,0,21,19,23,22,25,24,21,27,21],
[31,30,0,27,29,27,19,27,20,34,27],
[31,32,24,0,23,28,24,28,24,31,25],
[30,28,22,28,0,23,19,27,19,34,22],
[32,29,24,23,28,0,28,29,30,32,27],
[33,26,32,27,32,23,0,32,23,32,25],
[24,27,24,23,24,22,19,0,23,27,18],
[32,30,31,27,32,21,28,28,0,33,27],
[25,24,17,20,17,19,19,24,18,0,18],
[35,30,24,26,29,24,26,33,24,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 167, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,21,17,26,22,19,35,21,24,26],
[30,0,23,25,24,28,28,37,27,28,29],
[30,28,0,26,22,22,28,26,28,24,22],
[34,26,25,0,26,21,31,33,30,32,28],
[25,27,29,25,0,15,22,30,22,22,18],
[29,23,29,30,36,0,28,40,25,27,32],
[32,23,23,20,29,23,0,27,29,24,20],
[16,14,25,18,21,11,24,0,17,27,17],
[30,24,23,21,29,26,22,34,0,27,24],
[27,23,27,19,29,24,27,24,24,0,24],
[25,22,29,23,33,19,31,34,27,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 168, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,33,21,27,25,30,36,34,51,32],
[32,0,33,24,47,30,30,41,30,47,28],
[18,18,0,21,27,10,19,27,10,31,19],
[30,27,30,0,40,32,27,32,23,36,40],
[24,4,24,11,0,34,17,20,25,30,23],
[26,21,41,19,17,0,23,26,17,30,32],
[21,21,32,24,34,28,0,30,17,21,23],
[15,10,24,19,31,25,21,0,25,38,14],
[17,21,41,28,26,34,34,26,0,27,36],
[0,4,20,15,21,21,30,13,24,0,19],
[19,23,32,11,28,19,28,37,15,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 169, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,19,30,27,30,26,30,25,27,30],
[24,0,25,28,24,27,26,31,26,33,28],
[32,26,0,24,23,32,30,34,28,25,31],
[21,23,27,0,23,27,26,31,25,26,29],
[24,27,28,28,0,29,25,32,26,30,35],
[21,24,19,24,22,0,24,28,23,28,28],
[25,25,21,25,26,27,0,26,27,28,27],
[21,20,17,20,19,23,25,0,22,26,25],
[26,25,23,26,25,28,24,29,0,25,33],
[24,18,26,25,21,23,23,25,26,0,28],
[21,23,20,22,16,23,24,26,18,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 170, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,24,27,28,34,32,34,31,29,22],
[22,0,24,26,28,28,29,27,30,28,23],
[27,27,0,28,27,32,24,28,37,28,28],
[24,25,23,0,23,31,24,32,27,26,25],
[23,23,24,28,0,26,27,31,27,20,17],
[17,23,19,20,25,0,21,30,27,22,20],
[19,22,27,27,24,30,0,34,30,25,23],
[17,24,23,19,20,21,17,0,24,17,15],
[20,21,14,24,24,24,21,27,0,22,19],
[22,23,23,25,31,29,26,34,29,0,25],
[29,28,23,26,34,31,28,36,32,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 171, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,38,28,22,29,30,30,31,26,30],
[28,0,29,29,23,31,24,29,31,27,34],
[13,22,0,19,11,19,26,15,14,11,24],
[23,22,32,0,23,25,28,23,24,19,26],
[29,28,40,28,0,25,28,33,25,24,32],
[22,20,32,26,26,0,30,31,25,19,31],
[21,27,25,23,23,21,0,22,24,21,22],
[21,22,36,28,18,20,29,0,24,22,23],
[20,20,37,27,26,26,27,27,0,24,28],
[25,24,40,32,27,32,30,29,27,0,36],
[21,17,27,25,19,20,29,28,23,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 172, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,35,24,35,35,27,27,51,27,35],
[28,0,51,24,51,23,27,35,39,35,23],
[16,0,0,0,11,0,12,11,24,12,11],
[27,27,51,0,39,31,23,39,39,39,31],
[16,0,40,12,0,4,12,11,28,12,15],
[16,28,51,20,47,0,12,39,51,39,27],
[24,24,39,28,39,39,0,39,39,51,39],
[24,16,40,12,40,12,12,0,28,28,23],
[0,12,27,12,23,0,12,23,0,12,11],
[24,16,39,12,39,12,0,23,39,0,23],
[16,28,40,20,36,24,12,28,40,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 173, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,15,17,24,20,22,14,20,26,23,17],
[36,0,20,29,28,23,25,24,30,30,28],
[34,31,0,34,17,28,27,26,30,33,32],
[27,22,17,0,17,19,19,24,27,21,22],
[31,23,34,34,0,27,23,29,34,30,29],
[29,28,23,32,24,0,19,24,32,24,22],
[37,26,24,32,28,32,0,31,35,32,28],
[31,27,25,27,22,27,20,0,31,19,22],
[25,21,21,24,17,19,16,20,0,21,22],
[28,21,18,30,21,27,19,32,30,0,23],
[34,23,19,29,22,29,23,29,29,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 174, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,17,17,17,9,26,11,16,18,20],
[34,0,28,26,20,30,41,24,27,24,32],
[34,23,0,24,25,22,24,20,25,32,23],
[34,25,27,0,24,24,32,21,28,32,30],
[34,31,26,27,0,22,35,21,20,29,29],
[42,21,29,27,29,0,26,27,19,28,30],
[25,10,27,19,16,25,0,22,18,22,26],
[40,27,31,30,30,24,29,0,21,25,30],
[35,24,26,23,31,32,33,30,0,28,29],
[33,27,19,19,22,23,29,26,23,0,25],
[31,19,28,21,22,21,25,21,22,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 175, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,34,33,27,29,32,34,30,31,24,26],
[17,0,22,15,19,23,28,23,23,21,22],
[18,29,0,18,18,20,24,21,24,19,21],
[24,36,33,0,31,30,25,31,28,26,28],
[22,32,33,20,0,28,27,25,27,25,27],
[19,28,31,21,23,0,27,18,25,23,24],
[17,23,27,26,24,24,0,26,20,26,24],
[21,28,30,20,26,33,25,0,26,23,27],
[20,28,27,23,24,26,31,25,0,25,32],
[27,30,32,25,26,28,25,28,26,0,25],
[25,29,30,23,24,27,27,24,19,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 176, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,25,35,22,31,31,33,36,24,29],
[26,0,33,34,28,26,31,18,33,30,32],
[26,18,0,29,22,18,26,18,24,18,21],
[16,17,22,0,28,20,19,14,21,17,19],
[29,23,29,23,0,32,37,21,36,20,27],
[20,25,33,31,19,0,27,16,35,27,21],
[20,20,25,32,14,24,0,12,22,15,17],
[18,33,33,37,30,35,39,0,37,33,27],
[15,18,27,30,15,16,29,14,0,27,21],
[27,21,33,34,31,24,36,18,24,0,27],
[22,19,30,32,24,30,34,24,30,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 177, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,31,29,30,31,28,25,26,27,25],
[24,0,29,29,28,23,28,28,27,29,18],
[20,22,0,22,23,23,21,23,14,19,18],
[22,22,29,0,26,28,26,27,24,26,22],
[21,23,28,25,0,32,26,24,19,23,25],
[20,28,28,23,19,0,25,24,22,22,21],
[23,23,30,25,25,26,0,22,26,25,22],
[26,23,28,24,27,27,29,0,25,23,25],
[25,24,37,27,32,29,25,26,0,30,26],
[24,22,32,25,28,29,26,28,21,0,27],
[26,33,33,29,26,30,29,26,25,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 178, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,23,24,27,26,29,30,20,22,26],
[32,0,26,31,36,26,25,29,24,31,26],
[28,25,0,26,27,25,22,24,24,31,27],
[27,20,25,0,22,22,22,26,28,29,24],
[24,15,24,29,0,26,23,28,20,30,20],
[25,25,26,29,25,0,25,27,29,27,28],
[22,26,29,29,28,26,0,30,28,33,28],
[21,22,27,25,23,24,21,0,22,30,24],
[31,27,27,23,31,22,23,29,0,28,24],
[29,20,20,22,21,24,18,21,23,0,21],
[25,25,24,27,31,23,23,27,27,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 179, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,33,42,33,34,31,18,20,40,33,39],
[18,0,20,26,32,35,18,26,26,35,24],
[9,31,0,19,29,37,18,26,26,28,17],
[18,25,32,0,23,23,18,29,35,31,20],
[17,19,22,28,0,26,18,15,15,8,17],
[20,16,14,28,25,0,18,26,37,30,28],
[33,33,33,33,33,33,0,20,31,33,39],
[31,25,25,22,36,25,31,0,40,25,31],
[11,25,25,16,36,14,20,11,0,20,31],
[18,16,23,20,43,21,18,26,31,0,29],
[12,27,34,31,34,23,12,20,20,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 180, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,16,29,21,33,19,24,33,21,15,18],
[35,0,36,28,44,38,26,37,23,28,26],
[22,15,0,16,21,20,16,21,14,17,14],
[30,23,35,0,30,34,32,30,29,29,25],
[18,7,30,21,0,28,19,30,21,17,24],
[32,13,31,17,23,0,17,22,23,20,17],
[27,25,35,19,32,34,0,30,19,19,18],
[18,14,30,21,21,29,21,0,26,21,19],
[30,28,37,22,30,28,32,25,0,25,24],
[36,23,34,22,34,31,32,30,26,0,20],
[33,25,37,26,27,34,33,32,27,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 181, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,17,20,19,23,16,30,28,22,16],
[27,0,20,20,26,27,24,30,25,26,23],
[34,31,0,26,28,31,25,34,36,28,26],
[31,31,25,0,24,22,21,27,31,25,20],
[32,25,23,27,0,30,22,27,32,27,25],
[28,24,20,29,21,0,25,28,27,26,19],
[35,27,26,30,29,26,0,29,28,27,20],
[21,21,17,24,24,23,22,0,27,23,14],
[23,26,15,20,19,24,23,24,0,23,14],
[29,25,23,26,24,25,24,28,28,0,21],
[35,28,25,31,26,32,31,37,37,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 182, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,17,11,18,24,12,24,21,18,18],
[27,0,27,14,23,29,22,26,16,22,21],
[34,24,0,19,27,24,22,29,22,22,26],
[40,37,32,0,30,32,26,35,22,33,31],
[33,28,24,21,0,28,27,34,21,24,26],
[27,22,27,19,23,0,16,23,19,20,22],
[39,29,29,25,24,35,0,28,25,25,25],
[27,25,22,16,17,28,23,0,21,22,21],
[30,35,29,29,30,32,26,30,0,29,24],
[33,29,29,18,27,31,26,29,22,0,26],
[33,30,25,20,25,29,26,30,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 183, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,26,28,27,25,24,33,22,20,22],
[28,0,28,28,30,28,24,36,28,28,25],
[25,23,0,28,25,20,19,29,22,22,18],
[23,23,23,0,24,25,24,29,21,17,23],
[24,21,26,27,0,24,28,24,25,27,24],
[26,23,31,26,27,0,24,29,24,29,28],
[27,27,32,27,23,27,0,29,24,23,25],
[18,15,22,22,27,22,22,0,21,20,14],
[29,23,29,30,26,27,27,30,0,27,24],
[31,23,29,34,24,22,28,31,24,0,28],
[29,26,33,28,27,23,26,37,27,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 184, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,24,27,22,25,21,27,24,30,31],
[26,0,27,24,23,31,21,24,25,28,32],
[27,24,0,25,26,28,27,24,25,29,31],
[24,27,26,0,28,30,23,29,27,28,31],
[29,28,25,23,0,34,25,27,23,28,29],
[26,20,23,21,17,0,22,26,20,26,25],
[30,30,24,28,26,29,0,25,28,27,30],
[24,27,27,22,24,25,26,0,23,26,29],
[27,26,26,24,28,31,23,28,0,27,27],
[21,23,22,23,23,25,24,25,24,0,25],
[20,19,20,20,22,26,21,22,24,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 185, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,22,25,29,23,26,29,25,17,27],
[22,0,20,20,22,23,20,27,25,18,23],
[29,31,0,26,33,28,24,31,26,18,31],
[26,31,25,0,31,25,24,26,26,22,27],
[22,29,18,20,0,20,22,19,20,21,27],
[28,28,23,26,31,0,29,28,23,22,22],
[25,31,27,27,29,22,0,26,28,25,27],
[22,24,20,25,32,23,25,0,25,26,28],
[26,26,25,25,31,28,23,26,0,24,25],
[34,33,33,29,30,29,26,25,27,0,27],
[24,28,20,24,24,29,24,23,26,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 186, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,28,28,27,26,21,23,19,25,26],
[23,0,24,19,27,20,21,16,18,24,19],
[23,27,0,24,30,19,21,17,24,24,21],
[23,32,27,0,32,22,23,17,25,25,22],
[24,24,21,19,0,20,20,18,19,24,17],
[25,31,32,29,31,0,21,21,21,25,18],
[30,30,30,28,31,30,0,25,24,28,28],
[28,35,34,34,33,30,26,0,25,30,26],
[32,33,27,26,32,30,27,26,0,26,22],
[26,27,27,26,27,26,23,21,25,0,19],
[25,32,30,29,34,33,23,25,29,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 187, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,33,25,32,34,31,28,23,28,31],
[23,0,32,28,27,31,31,25,25,18,29],
[18,19,0,21,28,26,28,31,23,25,24],
[26,23,30,0,29,31,23,35,19,17,30],
[19,24,23,22,0,14,23,18,19,19,19],
[17,20,25,20,37,0,31,32,29,21,27],
[20,20,23,28,28,20,0,24,23,20,25],
[23,26,20,16,33,19,27,0,24,25,25],
[28,26,28,32,32,22,28,27,0,31,32],
[23,33,26,34,32,30,31,26,20,0,28],
[20,22,27,21,32,24,26,26,19,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 188, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,28,23,24,23,29,25,30,26,27],
[28,0,31,31,33,28,23,27,29,24,35],
[23,20,0,17,20,18,18,12,15,21,25],
[28,20,34,0,26,28,26,25,25,29,31],
[27,18,31,25,0,26,29,19,22,24,30],
[28,23,33,23,25,0,29,20,23,20,32],
[22,28,33,25,22,22,0,20,21,27,35],
[26,24,39,26,32,31,31,0,25,27,30],
[21,22,36,26,29,28,30,26,0,26,30],
[25,27,30,22,27,31,24,24,25,0,31],
[24,16,26,20,21,19,16,21,21,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 189, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,28,31,35,29,24,28,27,26],
[26,0,25,27,31,34,25,28,30,29,28],
[25,26,0,27,30,30,26,33,27,27,28],
[23,24,24,0,33,33,28,29,31,26,30],
[20,20,21,18,0,25,21,22,19,22,20],
[16,17,21,18,26,0,24,25,24,21,19],
[22,26,25,23,30,27,0,24,28,21,25],
[27,23,18,22,29,26,27,0,26,26,26],
[23,21,24,20,32,27,23,25,0,29,18],
[24,22,24,25,29,30,30,25,22,0,25],
[25,23,23,21,31,32,26,25,33,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 190, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,31,30,28,33,27,30,25,30,29],
[24,0,25,26,22,24,26,24,28,28,24],
[20,26,0,26,25,23,24,23,26,26,20],
[21,25,25,0,25,28,25,22,26,27,26],
[23,29,26,26,0,27,25,24,25,28,28],
[18,27,28,23,24,0,27,21,24,26,29],
[24,25,27,26,26,24,0,26,25,28,24],
[21,27,28,29,27,30,25,0,27,30,27],
[26,23,25,25,26,27,26,24,0,27,28],
[21,23,25,24,23,25,23,21,24,0,20],
[22,27,31,25,23,22,27,24,23,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 191, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,23,27,25,24,26,25,25,23,23],
[26,0,22,26,25,24,25,31,29,27,24],
[28,29,0,27,24,25,27,29,26,26,24],
[24,25,24,0,21,25,21,26,24,23,24],
[26,26,27,30,0,24,29,25,28,23,29],
[27,27,26,26,27,0,24,30,30,29,28],
[25,26,24,30,22,27,0,28,25,27,22],
[26,20,22,25,26,21,23,0,26,23,22],
[26,22,25,27,23,21,26,25,0,19,24],
[28,24,25,28,28,22,24,28,32,0,26],
[28,27,27,27,22,23,29,29,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 192, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,29,25,28,38,39,33,32,33,30],
[20,0,26,36,27,33,39,22,24,25,16],
[22,25,0,23,22,33,36,22,32,33,24],
[26,15,28,0,24,33,39,25,22,33,20],
[23,24,29,27,0,30,32,25,20,23,19],
[13,18,18,18,21,0,25,23,18,20,16],
[12,12,15,12,19,26,0,18,17,17,19],
[18,29,29,26,26,28,33,0,28,32,20],
[19,27,19,29,31,33,34,23,0,23,19],
[18,26,18,18,28,31,34,19,28,0,17],
[21,35,27,31,32,35,32,31,32,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 193, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,27,26,26,24,28,28,29,29,28],
[24,0,20,22,25,24,25,23,28,30,23],
[24,31,0,28,32,34,31,28,32,31,26],
[25,29,23,0,27,24,30,26,22,29,26],
[25,26,19,24,0,27,29,25,19,28,20],
[27,27,17,27,24,0,28,29,19,30,23],
[23,26,20,21,22,23,0,24,19,24,22],
[23,28,23,25,26,22,27,0,25,22,24],
[22,23,19,29,32,32,32,26,0,34,27],
[22,21,20,22,23,21,27,29,17,0,22],
[23,28,25,25,31,28,29,27,24,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 194, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,26,23,22,25,22,31,23,24,29],
[31,0,31,23,29,28,29,27,29,29,25],
[25,20,0,20,23,25,26,25,24,21,26],
[28,28,31,0,29,26,23,31,25,27,24],
[29,22,28,22,0,29,28,31,27,28,26],
[26,23,26,25,22,0,21,25,24,21,22],
[29,22,25,28,23,30,0,28,28,24,26],
[20,24,26,20,20,26,23,0,22,20,27],
[28,22,27,26,24,27,23,29,0,23,23],
[27,22,30,24,23,30,27,31,28,0,31],
[22,26,25,27,25,29,25,24,28,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 195, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,31,26,27,33,23,24,28,24,27],
[23,0,28,25,28,25,23,25,27,28,30],
[20,23,0,25,24,25,20,24,22,23,22],
[25,26,26,0,28,28,20,29,27,26,26],
[24,23,27,23,0,27,21,22,27,24,21],
[18,26,26,23,24,0,26,22,20,22,26],
[28,28,31,31,30,25,0,30,28,29,27],
[27,26,27,22,29,29,21,0,24,27,27],
[23,24,29,24,24,31,23,27,0,23,24],
[27,23,28,25,27,29,22,24,28,0,26],
[24,21,29,25,30,25,24,24,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 196, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,19,29,27,28,21,23,28,26,25],
[25,0,19,26,22,26,23,25,28,19,25],
[32,32,0,30,34,26,26,25,29,26,29],
[22,25,21,0,26,21,27,23,28,22,19],
[24,29,17,25,0,22,19,22,24,20,22],
[23,25,25,30,29,0,27,24,27,28,31],
[30,28,25,24,32,24,0,24,26,26,24],
[28,26,26,28,29,27,27,0,29,24,31],
[23,23,22,23,27,24,25,22,0,24,22],
[25,32,25,29,31,23,25,27,27,0,28],
[26,26,22,32,29,20,27,20,29,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 197, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,19,31,24,25,24,25,26,28,31],
[19,0,17,26,29,29,30,23,16,27,25],
[32,34,0,41,35,28,38,25,24,31,47],
[20,25,10,0,17,22,21,29,19,24,17],
[27,22,16,34,0,22,22,26,19,21,24],
[26,22,23,29,29,0,35,24,20,29,36],
[27,21,13,30,29,16,0,20,18,24,28],
[26,28,26,22,25,27,31,0,23,26,31],
[25,35,27,32,32,31,33,28,0,28,35],
[23,24,20,27,30,22,27,25,23,0,25],
[20,26,4,34,27,15,23,20,16,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 198, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,30,27,29,25,26,30,31,26,30],
[26,0,30,24,33,28,25,29,29,27,27],
[21,21,0,23,23,18,18,23,24,21,23],
[24,27,28,0,26,23,30,29,27,28,26],
[22,18,28,25,0,22,21,26,23,23,22],
[26,23,33,28,29,0,30,32,30,28,31],
[25,26,33,21,30,21,0,30,28,25,26],
[21,22,28,22,25,19,21,0,23,22,24],
[20,22,27,24,28,21,23,28,0,24,27],
[25,24,30,23,28,23,26,29,27,0,27],
[21,24,28,25,29,20,25,27,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 199, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,25,21,20,23,22,24,23,22,22],
[27,0,25,17,23,22,20,25,26,28,24],
[26,26,0,22,24,27,24,24,29,31,26],
[30,34,29,0,26,31,23,27,30,28,26],
[31,28,27,25,0,22,25,23,26,20,31],
[28,29,24,20,29,0,24,23,29,28,26],
[29,31,27,28,26,27,0,28,25,26,27],
[27,26,27,24,28,28,23,0,24,27,24],
[28,25,22,21,25,22,26,27,0,26,29],
[29,23,20,23,31,23,25,24,25,0,21],
[29,27,25,25,20,25,24,27,22,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 200, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
pd.DataFrame(results).to_csv("/Users/noeliarico/Desktop/folder-kemeny/2021EJOR/results/mebb/mebb_11_51.csv", index=False, header=False) | [
"noeliarico@uniovi.es"
] | noeliarico@uniovi.es |
78b74da7f2aebedbe38658a2a3381a3fe4a7698a | 81e12e3d86ccf7491b5dad29161bfc3ce5d9080c | /withoutrestm2/urls.py | 6500c7f62f3af208cab517270725271af28e7534 | [] | no_license | hack000025/CRUD_WITHOUT_REST_USING_PYTHON | b4ad5388a702510373b93810ef80e820aa24808c | ba817e1cf315477ccf676fca101f5c4d93ddba6c | refs/heads/main | 2023-08-13T21:03:22.187086 | 2021-10-02T11:35:43 | 2021-10-02T11:35:43 | 412,777,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """withoutrestm2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('wrestm.urls')),
]
| [
"pradipnishad67@gmail.com"
] | pradipnishad67@gmail.com |
223706274a7f1956fe2337f2225c386d7e53bd30 | 743481909ae50170f76b5a8ff9526ae97942d1ac | /tests/ut/python/attacks/black/test_pso_attack.py | 1763580d64ed803866130a357c5bec31b5dfb730 | [
"Apache-2.0"
] | permissive | zengchen1024/mindarmour | 1a888f51aefd25ad3ddb53673033482df221a5ad | eed59453cf048da92fe15f57dbe3ca7de8b7adcb | refs/heads/master | 2021-05-20T20:53:36.777515 | 2020-04-02T09:49:34 | 2020-04-02T09:49:34 | 252,413,321 | 0 | 0 | Apache-2.0 | 2020-04-02T09:38:24 | 2020-04-02T09:38:24 | null | UTF-8 | Python | false | false | 4,515 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PSO-Attack test.
"""
import numpy as np
import pytest
from mindspore import Tensor
import mindspore.nn as nn
from mindspore.nn import Cell
from mindspore import context
from mindarmour.attacks.black.pso_attack import PSOAttack
from mindarmour.attacks.black.black_model import BlackModel
# for user
class ModelToBeAttacked(BlackModel):
"""model to be attack"""
def __init__(self, network):
super(ModelToBeAttacked, self).__init__()
self._network = network
def predict(self, inputs):
"""predict"""
result = self._network(Tensor(inputs.astype(np.float32)))
return result.asnumpy()
class SimpleNet(Cell):
"""
Construct the network of target model.
Examples:
>>> net = SimpleNet()
"""
def __init__(self):
"""
Introduce the layers used for network construction.
"""
super(SimpleNet, self).__init__()
self._relu = nn.ReLU()
def construct(self, inputs):
"""
Construct network.
Args:
inputs (Tensor): Input data.
"""
out = self._relu(inputs)
return out
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pso_attack():
"""
PSO_Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
batch_size = 6
net = SimpleNet()
inputs = np.random.rand(batch_size, 10)
model = ModelToBeAttacked(net)
labels = np.random.randint(low=0, high=10, size=batch_size)
labels = np.eye(10)[labels]
labels = labels.astype(np.float32)
attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
_, adv_data, _ = attack.generate(inputs, labels)
assert np.any(inputs != adv_data)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pso_attack_targeted():
"""
PSO_Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
batch_size = 6
net = SimpleNet()
inputs = np.random.rand(batch_size, 10)
model = ModelToBeAttacked(net)
labels = np.random.randint(low=0, high=10, size=batch_size)
labels = np.eye(10)[labels]
labels = labels.astype(np.float32)
attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, targeted=True,
sparse=False)
_, adv_data, _ = attack.generate(inputs, labels)
assert np.any(inputs != adv_data)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_inference
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pso_attack_gpu():
"""
PSO_Attack test
"""
context.set_context(device_target="GPU")
batch_size = 6
net = SimpleNet()
inputs = np.random.rand(batch_size, 10)
model = ModelToBeAttacked(net)
labels = np.random.randint(low=0, high=10, size=batch_size)
labels = np.eye(10)[labels]
labels = labels.astype(np.float32)
attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
_, adv_data, _ = attack.generate(inputs, labels)
assert np.any(inputs != adv_data)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_pso_attack_cpu():
"""
PSO_Attack test
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
batch_size = 6
net = SimpleNet()
inputs = np.random.rand(batch_size, 10)
model = ModelToBeAttacked(net)
labels = np.random.randint(low=0, high=10, size=batch_size)
labels = np.eye(10)[labels]
labels = labels.astype(np.float32)
attack = PSOAttack(model, bounds=(0.0, 1.0), pm=0.5, sparse=False)
_, adv_data, _ = attack.generate(inputs, labels)
assert np.any(inputs != adv_data)
| [
"zhenghuanhuan5@huawei.com"
] | zhenghuanhuan5@huawei.com |
811e78d3cba821de4ff51a45377afe327baeb169 | 9fbafc49eeadc0df5882c9d3b244df447d70ad44 | /Homework/Week_2/eda.py | 77b15cb0f22e8b59e4c291ef6268aa996ba627e4 | [] | no_license | TulaKaptein/DataProcessing | 84f8be4b2a5c0a21210298f5f75968326ead510a | c4ad2dec18ee5ab8be7d8c17ceb8deb2e9118a4f | refs/heads/master | 2020-04-04T19:12:11.781653 | 2018-12-17T14:14:27 | 2018-12-17T14:14:27 | 156,196,173 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | #!/usr/bin/env python
# Name: Tula Kaptein
# Student number: 11013478
"""
This script improves data from an input file and writes it to a JSON file.
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas.api.types import is_numeric_dtype
# a function to remove outliers from
# https://gist.github.com/ariffyasri/70f1e9139da770cb8514998124560281
def remove_outlier(df):
low = .05
high = .95
quant_df = df.quantile([low, high])
for name in list(df.columns):
if is_numeric_dtype(df[name]):
df = df[(df[name] > quant_df.loc[low, name]) & (df[name] <
quant_df.loc[high, name])]
return df
# hard coding input and output
INPUT_CSV = "input.csv"
OUTPUT_CSV = "data.csv"
OUTPUT = "data.json"
# make a dataframe with the important columns.
df = pd.read_csv(INPUT_CSV, na_values=['unknown', ''], usecols=['Country',
'Region', 'Pop. Density (per sq. mi.)',
'Infant mortality (per 1000 births)',
'GDP ($ per capita) dollars'])
# preprocess the data
df['Region'] = df['Region'].str.strip()
df['Pop. Density (per sq. mi.)'] = df['Pop. Density (per sq.\
mi.)'].str.replace(',', '.').astype('float64')
df['GDP ($ per capita) dollars'] = df['GDP ($ per capita) dollars'].str.strip('\
dollars').astype('float64')
df['Infant mortality (per 1000 births)'] = df['Infant mortality\
(per 1000 births)'].str.replace(',', '.').astype('float64')
# delete outliers using a function provided by
df = remove_outlier(df)
# calculate mean, median, mode and std
mean = round(df['GDP ($ per capita) dollars'].mean(), 2)
median = df['GDP ($ per capita) dollars'].median()
mode = df['GDP ($ per capita) dollars'].mode().iloc[0]
std = df['GDP ($ per capita) dollars'].std()
# produce a histogram of the 'GDP ($ per capita) dollars' column
hist = df['GDP ($ per capita) dollars'].hist()
hist.plot()
plt.show()
# calculate the Five Number Summary of the 'Infant mortality' column
data = df['Infant mortality (per 1000 births)'].tolist()
data_min = min(data)
first_quart = np.nanpercentile(data, 25)
median = np.nanpercentile(data, 50)
third_quart = np.nanpercentile(data, 75)
data_max = max(data)
# produce a boxplot of the 'Infant mortality' column
box = df[['Infant mortality (per 1000 births)']].boxplot()
box.plot()
plt.show()
df.to_csv(OUTPUT_CSV)
# write a .JSON file
df.set_index('Country', inplace=True)
df.to_json(OUTPUT, orient='index')
| [
"tula.kaptein@gmail.com"
] | tula.kaptein@gmail.com |
c4c8dcbdff9aa6c4b969d59f1be6baaeb9cae7e9 | 5ef4f200b9f3a9727a17157c0631d9a69268bee6 | /src/config/settings/main/local.py | 64b984a713649799d952611f1704d26d7544d3fa | [] | no_license | Lost-tail/EducationPortal | 11002a663c86c62c84d6987c95aaee1713df7971 | a80e7267cefd501f1867d740c6e2bed0d5d810be | refs/heads/master | 2023-07-16T16:10:32.726335 | 2021-08-11T19:56:29 | 2021-08-11T19:56:29 | 395,100,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from .base import *
DEBUG = True
ALLOWED_HOSTS = ['*'] | [
"dead43rus@gmail.com"
] | dead43rus@gmail.com |
6ae2af63c360ac6ce8e469d4ef399d5bd20040d2 | 6e4e6b64c035881f1cff39db616b0a80e1568c51 | /JOI7Qual/q1.py | 360741c86f3ad98b0fc70d4bc433923644dfa0f2 | [] | no_license | Lischero/Atcoder | f7471a85ee553e3ae791e3e5670468aea1fa53cc | f674d6a20a56eebdafa6d50d5d2d0f4030e5eace | refs/heads/master | 2020-05-21T16:23:36.095929 | 2018-10-18T04:27:55 | 2018-10-18T04:27:55 | 60,671,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # -*- coding:utf-8 -*-
N = int(input())
change = 1000 - N
factors = [500, 100, 50, 10, 5, 1]
ans = 0
for factor in factors:
while change >= factor:
change -= factor
ans += 1
print(ans)
| [
"vermouth.lischero@gmail.com"
] | vermouth.lischero@gmail.com |
3964fcceaa73f6a56c27b58493d62552be76a1fb | 89ade40b52968d3ca1ac2a3725d53425f18fa203 | /Intermediate Python/Add column (1).py | 1d2971b461fd7c3c766ee8975ef0ebb7daf26b48 | [] | no_license | Diganta-droid/Data-Camp-Exercise | bdc796abc476d1d7ab201f6911ce56580c335b2b | 4bfd2e3bb02b382f5876e4010ed04e5e1aa147c7 | refs/heads/master | 2022-09-17T14:31:26.619462 | 2020-06-03T07:45:16 | 2020-06-03T07:45:16 | 266,725,467 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | Add column (1)
In the video, Hugo showed you how to add the length of the country names of the brics DataFrame in a new column:
for lab, row in brics.iterrows() :
brics.loc[lab, "name_length"] = len(row["country"])
You can do similar things on the cars DataFrame.
Instructions
100 XP
Use a for loop to add a new column, named COUNTRY, that contains a uppercase version of the country names in the "country" column. You can use the string method upper() for this.
To see if your code worked, print out cars. Don't indent this code, so that it's not part of the for loop.
Code::
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Code for loop that adds COUNTRY column
for lab,row in cars.iterrows():
cars.loc[lab,"COUNTRY"] = row['country'].upper()
# Print cars
print(cars)
| [
"noreply@github.com"
] | Diganta-droid.noreply@github.com |
0ea3d016199d5fb419605b44d6498d4c67bd7528 | 1a2636bb831c727e26a9995fc2b6f535465905f9 | /summerfield/chapter1/bigdigits.py | b28f83c8651e7d0ef012d9012683ae097d8ee372 | [] | no_license | gsaronni/showcase | a7965135f855c9ad6b8963cec091e81b2c88aa9c | e931e4591fb1351ff775965ec7bf7cb1ca6ac10c | refs/heads/master | 2023-03-03T03:47:45.446720 | 2021-02-13T09:23:00 | 2021-02-13T09:23:00 | 293,840,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | '''
Copyright 2010 Pearson Education, Inc.
This program is free software:
you can redistribute it and/or modify it under the terms of the GNU General
Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import sys
zero = [" *** ", " * * ", "* *", "* *", "* *", " * * ", " *** "]
one = [" * ", " * * ", "* * ", " * ", " * ", " * ", " ***"]
two = [" *** ", "* *", "* * ", " * ", "* ", "* ", "*****"]
three = [" *** ", "* *", " *", " **", " *", "* *", " *** "]
four = [" * ", " ** ", " * * ", "* * ", "*****", " * ", " * "]
five = ["*****", "* ", "* ", " *** ", " *", " *", " *** "]
six = [" *** ", "* ", "* ", " *** ", "* *", "* *", " *** "]
seven = ["*****", " *", " *", " * ", " * ", " * ", "* "]
eight = [" *** ", "* *", "* *", " *** ", "* *", "* *", " *** "]
nine = [" *** ", "* *", "* *", " *** ", " *", " *", " *** "]
Digits = [zero, one, two, three, four, five, six, seven, eight, nine]
try:
digits = sys.argv[1]
row = 0
while row < 7:
line = ""
column = 0
while column < len(digits):
number = int(digits[column])
digit = Digits[number]
line += digit[row] + " "
column += 1
print(line)
row += 1
except IndexError:
print("usage: bigdigits.py <number>")
except ValueError as err:
print(err, "in", digits)
| [
"garloni@protonmail.com"
] | garloni@protonmail.com |
3c88dfea19732bd183b7564c1bd2e335aa20e557 | a8311e351ae9ba0e929daa757187b3dd5dc6bc83 | /UDM/Nudm_UEAU/__init__.py | d6fde40e4e4860149d95ccc7233f3d482ff4dbbd | [] | no_license | carloshtobar/A5GCoreNetworkPrototype | 5172cebdc80a27bd9ca4f4568215aa0f4a83dfdb | fb182049b735526419c3635825dd15eb68c65c74 | refs/heads/master | 2020-05-18T07:24:34.306171 | 2019-05-13T14:31:10 | 2019-05-13T14:31:10 | 184,263,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # -*- coding: utf-8 -*-
from flask import Flask
from flask import Blueprint
import flask_restful as restful
from v1.api.AuthDataGeneration import AuthDataGeneration
routes = [
dict(resource=AuthDataGeneration, urls=['/AuthDataGeneration'], endpoint='AuthDataGeneration')
]
def create_app():
app = Flask(__name__, static_folder='static')
bp = Blueprint('v1',__name__,static_folder='static')
api = restful.Api(bp,catch_all_404s=True)
for route in routes:
api.add_resource(route.pop('resource'), *route.pop('urls'), **route)
app.register_blueprint(bp,url_prefix='/nudm-ueau/v1')
return app
if __name__ == '__main__':
print("Creating UDM")
create_app().run(host='127.0.0.1',port=5031,debug=True) | [
"noreply@github.com"
] | carloshtobar.noreply@github.com |
6226c2da30b3f1bfc231a556d691699f08397741 | 835b99cf3284926bc4fe36f5b67404a3626617be | /pypeline/entities.py | 05b4f88091438ed055d24f95d421e3b784ab75c8 | [] | no_license | carloszanella/pypeline | 1a016ca1291b84653d766608a2e7f3bbdf346deb | 1409993df853551a839eaae0bbf162166c2b896a | refs/heads/master | 2022-07-03T14:17:53.223101 | 2020-05-17T07:27:41 | 2020-05-17T07:27:41 | 264,604,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,210 | py | from dataclasses import dataclass
from logging import getLogger, DEBUG
from pathlib import Path
from typing import List
import dask.dataframe as dd
import h5py
import dask.array as da
import pandas as pd
import numpy as np
from pypeline.structure import structure
from pypeline.training.models import Model
log = getLogger(__name__)
log.setLevel(DEBUG)
@dataclass
class SubjectFMRI:
id: int
set_id: str = "train"
fmri_map: da.array = None
def load_data(self, fmri_path: str):
f = h5py.File(fmri_path, "r")
self.fmri_map = da.array(f["SM_feature"])
def compute(self):
return self.fmri_map.compute()
@dataclass
class RawData:
ids: np.ndarray
set_id: str = "train"
correlations: dd.DataFrame = None
fmri_maps: List[SubjectFMRI] = None
loadings: dd.DataFrame = None
icn: pd.Series = None
y: pd.DataFrame = None
def load_data_in_memory(
self,
correlations_path: Path = None,
y_path: Path = structure.raw.y_train,
fmri_path: Path = None,
loadings_path: Path = None,
icn_path: Path = None,
):
# load y
self.load_y(y_path)
# maybe load correlations
if correlations_path:
self.load_correlations(correlations_path)
# maybe load fmri data
if fmri_path:
self.load_fmri(fmri_path)
# maybe load loading data
if loadings_path:
self.load_loading_data(loadings_path)
# maybe load ICN
if icn_path:
self.load_icn(icn_path)
def load_y(self, path: Path):
y_train = pd.read_csv(path, index_col=0)
self.y = y_train.loc[self.ids]
def load_correlations(self, path: Path):
corr_ddf = dd.read_csv(path).set_index("Id")
self.correlations = corr_ddf.loc[self.ids]
def load_fmri(self, path: Path):
subjects_fmri = [SubjectFMRI(id, self.set_id) for id in self.ids]
self.fmri_maps = subjects_fmri
_ = [
subj.load_data(str(path).format(set_id=self.set_id, id=subj.id))
for subj in self.fmri_maps
]
def load_loading_data(self, path: Path):
loading_ddf = dd.read_csv(path).set_index("Id")
self.loadings = loading_ddf.loc[self.ids]
def load_icn(self, path: Path):
icn = pd.read_csv(path)
self.icn = icn.values
@dataclass
class TrainingResults:
model_version: str = None
dataset_version: str = None
model: Model = None
model_params: dict = None
train_mae: List[float] = None
train_weighted_mae: float = None
validation_mae: List[float] = None
validation_weighted_mae: float = None
model_path: Path = None
train_ids: np.ndarray = None
val_ids: np.ndarray = None
def print_score_results(self):
print(f"Scores for model {self.model_version} - {self.dataset_version}")
print("---------------------------------------------------\n")
print("Train MAE: ", self.train_mae)
print("Train Weighted MAE: ", self.train_weighted_mae)
print("Validation MAE: ", self.validation_mae)
print("Validation Weighted MAE: ", self.validation_weighted_mae)
| [
"cfszanella@gmail.com"
] | cfszanella@gmail.com |
e2e6ae133a3c7d5e2a67478e807b2afbce460c4e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02921/s327676216.py | 8d79966a0d9b41817f7a2c90ca060bbf016f3e46 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | # -*- coding: utf-8 -*-
## Library
import sys
from fractions import gcd
import math
from math import ceil,floor
import collections
from collections import Counter
import itertools
import copy
## input
# N=int(input())
# A,B,C,D=map(int, input().split())
# S = input()
# yoko = list(map(int, input().split()))
# tate = [int(input()) for _ in range(N)]
# N, M = map(int,input().split())
# P = [list(map(int,input().split())) for i in range(M)]
# S = []
# for _ in range(N):
# S.append(list(input()))
S = input()
T = input()
ans = 0
for i in range(3):
if S[i] == T[i]:
ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e2c4da8a50b9ac32f6024c7d70fd94d3bb2a17d8 | 8e012df5165be2559d2950e69b39d41e7c6945f1 | /blog_project/blog/admin.py | 27004ca877029e90c1568f36410b60a8e290fc30 | [] | no_license | Satya-mac/blogproject | 1a1ce6e0c123fe68c08568f3ca7bef806befe42a | e91df9dfb6657a3f2516cfeac918841dc527ec68 | refs/heads/master | 2023-06-17T07:21:47.090183 | 2021-07-13T14:51:20 | 2021-07-13T14:51:20 | 385,636,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from django.contrib import admin
from blog.models import Post,Comment
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display = ['title','slug','author','body','publish','created','updated','status']
list_filter = ('author','status','publish')
search_fields = ('title','body')
raw_id_fields = ('author',)
date_hierarchy = 'publish'
ordering = ['status','publish']
prepopulated_fields = {'slug':('title',)}
class CommentAdmin(admin.ModelAdmin):
list_display = ['name','email','post','body','created','updated','active']
list_filter = ('active','created','updated')
search_fields = ('name','email','body')
admin.site.register(Post,PostAdmin)
admin.site.register(Comment,CommentAdmin)
| [
"psatyajit185@gmail.com"
] | psatyajit185@gmail.com |
21fdd49fc1fb76a3cc03f725d4b487201b4c2880 | b9a4efbcf48e52a1333f6a548338e2f62aed30e3 | /forms/migrations/0002_alter_medical_receipt_line_inheritance.py | 5ea2a585642bb2db4cff443c9dea7f988e09f6fa | [] | no_license | Rabin5/formcollection | 0747639d9a2ff291457aacce874eb5a6428dea73 | 38c0bf763ae0a15c301c020d76ff0596c561da14 | refs/heads/main | 2023-08-10T18:48:26.736876 | 2021-09-26T06:19:09 | 2021-09-26T06:19:09 | 410,467,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # Generated by Django 3.1.4 on 2021-01-28 08:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='medicalreceiptline',
name='create_user',
),
]
| [
"jenish.acharya@infodevelopers.com.np"
] | jenish.acharya@infodevelopers.com.np |
1edcf8a3dd8960ba01f77009fe807a84eda11bc0 | 6cf2467285a128987b438a12081ce5a50c3a3104 | /.ipynb_checkpoints/cleaning-checkpoint.py | 9d9af0b76b0e88cc9cf53279164cc46b25152d86 | [] | no_license | Carterbouley/mod_5_project | 5a8cef973ca140ada4d98bc6c01c1d4e5cb21b42 | 1de498a8ef8589a64a5f92eeb9e69940d91ab860 | refs/heads/master | 2020-09-29T19:30:05.537296 | 2020-07-21T14:28:04 | 2020-07-21T14:28:04 | 227,105,569 | 0 | 0 | null | 2019-12-13T13:04:54 | 2019-12-10T11:40:44 | Jupyter Notebook | UTF-8 | Python | false | false | 2,644 | py | def FixEducation(df):
to_drop = (df.loc[(df.Education > 4 )|(df.Education == 0) ]).index
to_drop_again = (df.loc[df.MaritalStatus == 0]).index
df = df.drop(to_drop)
df = df.drop(to_drop_again)
return df
def AddAverages(df):
df['average_bill'] = (df['BillApr'] + df['BillMay'] + df['BillJun'] + df['BillJul'] + df['BillAug'] + df['BillSep'])/6
df['average_payment'] = (df['PrevPaymentSep'] + df['PrevPaymentAug'] + df['PrevPaymentJul'] + df['PrevPaymentJun'] + df['PrevPaymentMay'] + df['PrevPaymentApr'])/6
df['total_payment'] = (df['PrevPaymentSep'] + df['PrevPaymentAug'] + df['PrevPaymentJul'] + df['PrevPaymentJun'] + df['PrevPaymentMay'] + df['PrevPaymentApr'])
df['average_percentage_of_bill_paid'] = (df['average_payment']/df['average_bill'])*100
df['bill_paid/credit_limit'] = (df['total_payment']/df['CreditLimit'])*100
df['average_bill_paid/credit_limit'] = (df['average_payment']/df['CreditLimit'])*100
return df
def AddStrings(df):
gender_dict ={1:'male', 2:'female'}
education_dict = {1: 'graduate school', 2: 'university', 3: 'high school', 4: 'others'}
marriage_dict = {1 : 'married', 2 : 'single', 3 : 'others'}
df['Gender'] = df['Gender'].map(gender_dict)
df['Education'] = df['Education'].map(education_dict)
df['MaritalStatus'] = df['MaritalStatus'].map(marriage_dict)
return df
def DropNonUsers(df):
drop_non_users =( df.loc[(df.average_bill == 0) & (df.total_payment == 0)]).index
df = df.drop(drop_non_users)
df_test = df.loc[df.average_percentage_of_bill_paid == np.inf].index
df = df.drop(df_test)
return df
def FixNegativestats(df):
fil = (df.RepayStatApr == -2) | (df.RepayStatApr == -1) | (df.RepayStatApr == 0)
df.loc[fil, 'RepayStatApr'] = 0
fil = (df.RepayStatMay == -2) | (df.RepayStatMay == -1) | (df.RepayStatMay == 0)
df.loc[fil, 'RepayStatMay'] = 0
fil = (df.RepayStatJun == -2) | (df.RepayStatJun == -1) | (df.RepayStatJun == 0)
df.loc[fil, 'RepayStatJun'] = 0
fil = (df.RepayStatJul == -2) | (df.RepayStatJul == -1) | (df.RepayStatJul == 0)
df.loc[fil, 'RepayStatJul'] = 0
fil = (df.RepayStatAug == -2) | (df.RepayStatAug == -1) | (df.RepayStatAug == 0)
df.loc[fil, 'RepayStatAug'] = 0
fil = (df.RepayStatSep == -2) | (df.RepayStatSep == -1) | (df.RepayStatSep == 0)
df.loc[fil, 'RepayStatSep'] = 0
return df
def TheUltimateCleaner(df):
df = FixEducation(df)
| [
"zarialevi@gmail.com"
] | zarialevi@gmail.com |
01a126faa7b657053d60373cfecb5713580793a1 | 2cf28bd139a041935b1c6d65b4fbfd8b5e3b0998 | /Code/DataCreate.py | f2b4e33be8efce85d405ae9beb4ce0b4715aadfb | [] | no_license | TrickyJustice/Movement-Command-Recognition-Classifier | a978f59a454735c86e2397df6599d04e2fd91a0b | acd324e7802e0efb61f7d2419a86751ca37bd00b | refs/heads/main | 2023-07-29T18:06:31.873554 | 2021-09-07T20:34:00 | 2021-09-07T20:34:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | import pyaudio
import time
from playsound import playsound
from datetime import datetime
import wave
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print('Hi, please enter initials- ')
print("Current Time =", current_time)
ini=input()
options=['Left','Right','Forward','Backward','Left','Right','Up','Down','Stop','Select']
for i,option in enumerate(options):
print('Recording number'+str(i))
filename=str(ini)+'-'+ option +'-'+ current_time+'.wav'
chunk = 1024
FORMAT = pyaudio.paInt16
channels = 1
sample_rate = 44100
record_seconds = 2
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,channels=channels,rate=sample_rate,input=True,output=True,frames_per_buffer=chunk)
frames = []
print('Say * '+ option +' * After the beep ends')
time.sleep(1)
playsound("beep-01a.wav")
time.sleep(0.15)
print("Recording...")
for i in range(int(44100 / chunk * record_seconds)):
data = stream.read(chunk,exception_on_overflow = False)
# stream.write(data)
frames.append(data)
print('Recorded, press control c to quit. Wait 3 seconds then i will save')
stream.stop_stream()
stream.close()
p.terminate()
time.sleep(3)
wf = wave.open(filename, "wb")
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(sample_rate)
wf.writeframes(b"".join(frames))
wf.close()
print('Session over.')
time.sleep(4)
| [
"noreply@github.com"
] | TrickyJustice.noreply@github.com |
40a70af1e5e64883ea8462a87e6a8fade31fcd78 | 9b492088dee2c276346558dc6c9e637ea018e061 | /mutability_and_immutability.py | 54acd8aa7fd13957e6acdf7135f4c86b01cdff22 | [] | no_license | KaustubhDhokte/python-code-snippets | 8ba8bf2feab93e1acdf402481b5f327285664cf4 | a86895ccb8413ad329c9da780521629edb3cbf07 | refs/heads/master | 2020-12-06T08:53:17.748974 | 2018-05-22T11:58:44 | 2018-05-22T11:58:44 | 66,947,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | a=b=4
b=5
print a # 4
print b # 5
a = [1,2]; b = a; b[0] = 4;
print a # [4, 2]
print b # [4, 2]
# Shallow copy
a = [1,2,3]; b = a[:]; b[0] = 8;
print a
print b
c = [4,5,6]; d = list(c); d[0] = 8;
print c
print d
p = [1,2,[3,4]];
q = p[:];
q[2][1] = 99
print p
print q | [
"kaustubh.dhokte@gmail.com"
] | kaustubh.dhokte@gmail.com |
be464077a5b6a83c4e5e8f5e3d3dca5c80b13cb5 | b5b060b715d560c0534c111b1315043605a9df41 | /tools/rolldown_matrix.py | b6030a7a135ad3babde21e0a233e6da3313e4283 | [] | no_license | DominicHong/FXIncome | d354a812b6dc494da75245558a1814b5dab43131 | dfa3d091534e964c431226b673c211971a4cf73a | refs/heads/master | 2023-08-17T05:58:25.144684 | 2023-08-15T07:57:24 | 2023-08-15T07:57:24 | 370,220,289 | 4 | 4 | null | 2022-12-03T08:25:37 | 2021-05-24T03:50:53 | Python | UTF-8 | Python | false | false | 9,858 | py | from fxincome.asset import Bond
from fxincome.utils import get_curve
import datetime
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from tqdm import tqdm
from dateutil.relativedelta import relativedelta
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_rows', None)
if __name__ == '__main__':
# init_date=datetime.datetime(2022,5,20)
# end_date=datetime.datetime(2022,7,29)
# init_ytm=3.4345
# end_ytm=5
#
# bond=Bond(code='190210',
# initial_date=datetime.datetime(2021,11,19),
# end_date=datetime.datetime(2051,11,19),
# issue_price=100,
# coupon_rate=3.56,
# coupon_type='附息',
# coupon_frequency=1)
# print(bond.get_profit(init_date,end_date,init_ytm,end_ytm))
# import sys
# sys.exit()
address = './rolldown_matrix.xlsx'
bond_type_need = ['政策银行债', '国债', '地方政府债']
asset_df = pd.read_excel(address, header=3, sheet_name='asset')
parameter_df = pd.read_excel(address, sheet_name='parameter').set_index('参数')
# print(parameter_df)
date = parameter_df.at['基准日', '数值']
asset_df['initial_date'] = pd.to_datetime(asset_df['initial_date'])
asset_df['end_date'] = pd.to_datetime(asset_df['end_date'])
asset_df = asset_df[(asset_df['bond_type'].isin(bond_type_need)) &
(asset_df['end_date'] > date) &
(asset_df['code'].str.contains('IB'))].copy()
asset_df['period'] = asset_df['end_date'].apply(lambda x: round((x - date).days / 365))
asset_df['period2'] = asset_df['end_date'].apply(lambda x: round((x - date).days / 365, 2))
def maxx(x, i):
i = len(x) if len(x) < i else i
sort_x = sorted(x)[-i]
return sort_x
asset_df['ranking'] = asset_df[['trading', 'period']].groupby('period').transform(lambda x: x >= maxx(x, 2))
asset_df = asset_df[(asset_df['ranking']) & (asset_df['trading'] > 0)].sort_values(['period2'], ignore_index=True)
asset_df = asset_df.iloc[:, 10:].set_index('code')
curve_dot = asset_df[['period2', 'ytm']].to_numpy()
curve = get_curve(curve_dot, 'HERMIT')
# plt.figure()
# x=np.linspace(0,30,10000)
# plt.plot(x,[curve(i) for i in x] )
# plt.scatter(curve_dot[:,0],curve_dot[:,1],marker='*')
# plt.grid(True)
# plt.xticks(range(0,31))
# # plt.show()
#
# address=r'.\result\rm_result_{}.jpg'.format(123)
# plt.savefig(address,dpi=600)
# sys.exit()
specail_period = parameter_df.at['特殊参考期限', '数值'].split(',')
for spi in specail_period:
spi_code = 'STD.{}Y'.format(spi)
spi_bond_name = '标准券{}Y'.format(spi)
spi_end_date = date + relativedelta(years=int(spi))
spi_rate = curve(float(spi))
asset_df.loc[spi_code] = [spi_bond_name, date, spi_end_date, 100, spi_rate, '附息', 1, '标准券', 1, spi_rate,
float(spi), float(spi), True]
asset_df = asset_df.sort_values(['period2'])
# print(asset_df)
asset_dic = {}
for i, j in asset_df.iterrows():
bond_i = Bond(code=i,
initial_date=j['initial_date'],
end_date=j['end_date'],
issue_price=j['issue_price'],
coupon_rate=j['coupon_rate'],
coupon_type=j['coupon_type'],
coupon_frequency=j['coupon_frequency'])
asset_dic[i] = bond_i
result_columns = [[i, j] for i in asset_dic.keys() for j in asset_dic.keys() if
asset_df.at[i, 'end_date'] <= asset_df.at[j, 'end_date']]
result_df = pd.DataFrame(result_columns, columns=['code_holding', 'code_rolldown'])
# print(asset_df)
with tqdm(total=len(result_df)) as step:
for i, j in result_df.iterrows():
result_df.at[i, 'code_holding_period'] = asset_df.at[j['code_holding'], 'period2']
result_df.at[i, 'code_holding_ytm'] = asset_df.at[j['code_holding'], 'ytm']
result_df.at[i, 'code_rolldown_period'] = asset_df.at[j['code_rolldown'], 'period2']
result_df.at[i, 'code_rolldown_ytm'] = asset_df.at[j['code_rolldown'], 'ytm']
# print(j['code_holding'],date,
# asset_df.at[j['code_holding'],'end_date'],
# asset_df.at[j['code_holding'],'ytm'],
# asset_df.at[j['code_holding'],'ytm'])
result_df.at[i, 'holding_yeild'] = asset_dic[j['code_holding']].get_profit(date,
asset_df.at[j[
'code_holding'], 'end_date'],
asset_df.at[
j['code_holding'], 'ytm'],
asset_df.at[
j['code_holding'], 'ytm'])[1]
rolldown_end_period = (asset_df.at[j['code_rolldown'], 'end_date'] - asset_df.at[
j['code_holding'], 'end_date']).days / 365
rolldown_end_ytm = curve(rolldown_end_period)
# print(j['code_rolldown'],date,
# asset_df.at[j['code_holding'],'end_date'],
# asset_df.at[j['code_rolldown'],'ytm'],
# rolldown_end_ytm)
result_df.at[i, 'yeild'] = asset_dic[j['code_rolldown']].get_profit(date,
asset_df.at[
j['code_holding'], 'end_date'],
asset_df.at[j['code_rolldown'], 'ytm'],
rolldown_end_ytm)[1]
if j['code_holding'] == j['code_rolldown']:
y = result_df.at[i, 'code_rolldown_ytm']
else:
y1 = -5
y2 = 10
while True:
# print(j['code_rolldown'],date,
# asset_df.at[j['code_holding'],'end_date'],
# asset_df.at[j['code_rolldown'],'ytm'],
# y1)
# yeild1=asset_dic[j['code_rolldown']].get_profit(date,
# asset_df.at[j['code_holding'],'end_date'],
# asset_df.at[j['code_rolldown'],'ytm'],
# y1)[1]
y = (y1 + y2) / 2
# print(j['code_rolldown'],date,
# asset_df.at[j['code_holding'],'end_date'],
# asset_df.at[j['code_rolldown'],'ytm'],
# y)
yeild = asset_dic[j['code_rolldown']].get_profit(date,
asset_df.at[j['code_holding'], 'end_date'],
asset_df.at[j['code_rolldown'], 'ytm'],
y)[1]
# print(yeild,result_df.at[i,'holding_yeild'],y)
if abs(yeild - result_df.at[i, 'holding_yeild']) < 0.01:
break
if yeild < result_df.at[i, 'holding_yeild']:
y2 = y
else:
y1 = y
result_df.at[i, 'balance_ytm'] = y
result_df.at[i, 'bp'] = (y - result_df.at[i, 'code_rolldown_ytm']) * 100
step.update(1)
# print(result_df.iloc[:i+1,:])
# print(result_df)
result_df['holding'] = result_df.apply(
lambda x: '{}\n({:.2f}Y,{:.2f}%)'.format(x['code_holding'], x['code_holding_period'], x['code_holding_ytm']),
axis=1)
result_df['rolldown'] = result_df.apply(
lambda x: '{}\n({:.2f}Y,{:.2f}%)'.format(x['code_rolldown'], x['code_rolldown_period'], x['code_rolldown_ytm']),
axis=1)
rank_type = CategoricalDtype(list(result_df['holding'].drop_duplicates()[::-1]), ordered=True)
columns = pd.MultiIndex.from_product([list(result_df['holding'].drop_duplicates()[::-1]), ['yeild', 'bp']])
result_df['holding'] = result_df['holding'].astype(rank_type)
result_df['rolldown'] = result_df['rolldown'].astype(rank_type)
result_df = pd.pivot_table(result_df, index='holding', columns='rolldown', values=['yeild', 'bp'], aggfunc='sum')
result_df.columns = result_df.columns.swaplevel()
result_df = result_df[columns]
result_df = result_df.applymap(lambda x: round(x, 2) if pd.notnull(x) else x)
# print(result_df.columns)
# print(result_df[columns])
time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
address = r'.\result\rm_result_{}.xlsx'.format(time)
wirter = pd.ExcelWriter(address)
result_df.to_excel(wirter, sheet_name='result')
wirter.save()
plt.figure()
x = np.linspace(0, 30, 10000)
plt.plot(x, [curve(i) for i in x])
plt.scatter(curve_dot[:, 0], curve_dot[:, 1], marker='*')
plt.grid(True)
plt.xticks(range(0, 31))
address = r'.\result\rm_result_{}.jpg'.format(time)
plt.savefig(address, dpi=600)
| [
"panda@vip.qq.com"
] | panda@vip.qq.com |
8a1420991c7365f09dd23479368f9c23d3c181f4 | 485cf3c70fcaa68689a2b690b6465f1d6bcf21bd | /Python_Coding_Tips/Code_py/Code(实例源码及使用说明)/01/11/2.列表拼接的4种方法/demo04.py | 9c2228030fefdd2ff56cc3049a75ad004b1c1f83 | [] | no_license | lxz0503/study_20190608 | 5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4 | 47c37798140883b8d6dc21ec5da5bc7a20988ce9 | refs/heads/master | 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 | Python | UTF-8 | Python | false | false | 1,015 | py | # *_* coding : UTF-8 *_*
# 开发团队 :明日科技
# 开发人员 :Administrator
# 开发时间 :2019/7/1 15:32
# 文件名称 :demo04.py
# 开发工具 :PyCharm
gem = [["大众",643518],["奔驰",319163],["宝马",265051],["福特",252323],["雪铁龙",227967],["奥迪",255300]]
fra = [["雪铁龙", 698985],["雷诺",547704],["大众",259268],["福特",82633],["宝马",84931],["奔驰",73254]]
eng = [["福特",254082],["大众",203150],["雪铁龙",177298],["奔驰",172238],["宝马",172048],["奥迪",143739]]
for item1, item2, item3 in zip(gem, fra, eng):
print(item1[0], item1[1], " ", item2[0], item2[1], " ", item3[0], item3[1])
for item1, item2, item3 in zip(gem, fra, eng):
item11 = item1[0].ljust(8)
item12 = str(item1[1]).ljust(8)
item21 = item2[0].ljust(8)
item22 = str(item2[1]).ljust(8)
item31 = item1[0].ljust(8)
item32 = str(item3[1]).ljust(8)
print(item11+"\t", item12+"\t", " ", item21+"\t", item22+"\t", " ", item31+"\t", item32)
| [
"lxz_20081025@163.com"
] | lxz_20081025@163.com |
a79f7bf22650c258ef1a7b4b3c06fc6b29264df0 | 4a8085c1a18bc25941af4f45be12640efba28ce1 | /Python Scripts/data_processing.py | 4b2a8f358383bfa9b88e026e001c41fad66bf530 | [] | no_license | Wernerpede/Coursera-Capstone | 2cea18f8c479ca6335fb3463296ad601f40943b1 | e6c214d0ee76a6ff6406ebb8da93cd9166022c47 | refs/heads/master | 2022-12-26T08:23:08.473818 | 2020-09-13T21:23:24 | 2020-09-13T21:23:24 | 290,603,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py |
import pandas as pd
import numpy as np
print('Hello Capstone Project Course!')
df = pd.read_csv('Data-Collisions.csv')
columns = df.columns
unknown = df['ROADCOND'].isnull().sum()
correlation = df.corr()
| [
"gui.werner.007@gmail.com"
] | gui.werner.007@gmail.com |
34a357874eb041b8a0e731878be4b0c4285e7f06 | 4ddb0aeb6e568abb5ea11dafb2ac36c67f02dc63 | /src/ui/web/register_images.py | aa863da8e8722f639984e30f105990b60741c678 | [
"BSD-2-Clause"
] | permissive | longamu/vise | 409175074f85d3daddfd6bb095242400ef2033e8 | 1a8bf5e97cbcdad302cd8d8532fe818b8272382c | refs/heads/master | 2022-12-15T12:07:27.666756 | 2020-09-02T09:37:21 | 2020-09-02T09:37:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,727 | py | #
# ==== Author:
#
# Relja Arandjelovic (relja@robots.ox.ac.uk)
# Visual Geometry Group,
# Department of Engineering Science
# University of Oxford
#
# ==== Copyright:
#
# The library belongs to Relja Arandjelovic and the University of Oxford.
# No usage or redistribution is allowed without explicit permission.
#
import os;
import get_scriptroot;
scriptroot= get_scriptroot.getScriptroot();
tmpDir= os.path.join( scriptroot, 'tmp/' );
import cherrypy;
from PIL import Image;
try:
import PngImagePlugin, JpegImagePlugin, TiffImagePlugin, GifImagePlugin, BmpImagePlugin, PpmImagePlugin; # all this stuff for cx_freeze
except:
pass;
import StringIO;
from dynamic_image import dynamicImage;
from upload import savedTemp;
class registerImages:
def __init__(self, pageTemplate, API_obj):
self.pT= pageTemplate;
self.API_obj= API_obj;
self.def_dsetname= self.API_obj.keys()[0];
@cherrypy.expose
def index(self, docID1= None, uploadID1= None, docID2= None, xl= None, xu= None, yl= None, yu= None, dsetname= None):
if dsetname==None: dsetname= self.def_dsetname;
if docID1!=None:
docID1= int(docID1);
if docID2!=None:
docID2= int(docID2);
if xl!=None: xl= float(xl);
if xu!=None: xu= float(xu);
if yl!=None: yl= float(yl);
if yu!=None: yu= float(yu);
if uploadID1==None:
registerID= self.API_obj[dsetname].register( docID1= docID1, docID2= docID2, xl= xl, xu= xu, yl= yl, yu= yu );
else:
st= savedTemp.load(uploadID1);
registerID= self.API_obj[dsetname].registerExternal( st['compDataFilename'], uploadID1, docID2= docID2, xl= xl, xu= xu, yl= yl, yu= yu );
del st;
outFnPrefix= os.path.join( scriptroot, 'tmp' );
width1= Image.open( os.path.join( outFnPrefix, '%s_%s.jpg' % (registerID,"im1") ) ).size[0];
title= "Image comparison";
headExtra= """
<script language="javascript">
var isIE = document.all ? true : false;
document.onmousemove = getMousePosition;
jsIm1 = new Image();
jsIm2t= new Image();
jsIm1.src ="tmpImage?registerID=%s&imName=im1";
jsIm2t.src="tmpImage?registerID=%s&imName=im2t";
var currentImage= 1;
function getMousePosition(e){
if (!isIE) {
posX= e.pageX; posY= e.pageY;
}
if (isIE) {
posX= event.clientX + document.body.scrollLeft;
posY= event.clientY + document.body.scrollTop;
}
}
function changeTo1(){
document['image'].src= jsIm1.src;
currentImage= 1;
}
function changeTo2(){
document['image'].src= jsIm2t.src;
currentImage= 2;
}
function swapImage(){
if (currentImage==1){
changeTo2();
} else {
changeTo1();
}
}
function findPosX( obj ){
x= 0;
if (obj.offsetParent){
while (1) {
x+= obj.offsetLeft;
if (!obj.offsetParent) break;
obj= obj.offsetParent;
}
}
return x;
}
function mouseMove( obj, e ){
clickX= posX - findPosX(obj);
if (clickX > (obj.width)/2){
changeTo2();
} else {
changeTo1();
}
}
</script>
""" % (registerID, registerID);
body= """
<center>
<table>
<tr>
<td align="center">
<center>Image 1</center>
</td>
<td align="center">
Flip between images by moving the mouse to the left (image 1) or right (image 2) part of the image.
</td>
<td align="center">
<center>Image 2</center>
</td>
</tr>
<tr>
<td align="center">
<img name="im1" onmouseover="javascript:changeTo1();" onmouseclick="javascript:changeTo1();">
<script language="javascript">
document['im1'].src= jsIm1.src
</script>
</td>
<td align="center">
<img name="image" onmousemove="javascript:mouseMove(this);" onmouseclick="javascript:swapImage();">
<script language="javascript">
changeTo1();
</script>
</td>
<td align="center">
<img name="im2" src="tmpImage?registerID=%s&imName=im2&width=%d" onmouseover="javascript:changeTo2();" onmouseclick="javascript:changeTo2();">
</td>
</tr>
<tr>
<td align="center">
<a href="getImageFull?%s">High resolution full image</a><br>
</td>
<td></td>
<td align="center">
<a href="getImageFull?docID=%s">High resolution full image</a><br>
</td>
</tr>
<tr>
<td align="center">
<a href="search?%s">Search on full image</a><br>
</td>
<td></td>
<td align="center">
<a href="search?docID=%s">Search on full image</a><br>
</td>
</tr>
</table>
</center>
""" % ( registerID, width1, \
("docID=%s" % docID1) if uploadID1==None else ("uploadID=%s" % uploadID1), docID2, \
("docID=%s" % docID1) if uploadID1==None else ("uploadID=%s" % uploadID1), docID2 );
return self.pT.get(title= title, headExtra= headExtra, body= body, outOfContainer= True);
@cherrypy.expose
def tmpImage(self, registerID, imName, width= None):
outFnPrefix= os.path.join( scriptroot, 'tmp' );
fn= os.path.join( outFnPrefix, '%s_%s.jpg' % (registerID,imName) );
# for security check filename - !!TODO
cherrypy.response.headers['Content-Type'] = 'image/jpeg';
return dynamicImage.getImageFromFile( fn, width= width );
| [
"thelinuxmaniac@gmail.com"
] | thelinuxmaniac@gmail.com |
c7c5b0151c352832384a07e85f6e49c5f966ec94 | a0947c2778742aec26b1c0600ceca17df42326cd | /Python/PythonInADay2/CSV-Files-Drill/37of79-87.py | c6d72c705eb76b99aaf1d8f9ab163131ca821099 | [] | no_license | JohnCDunn/Course-Work-TTA | 5758319d4607114914ba9723328658bed8fb2024 | 8c4f60d51007dac2ac4cceb84b0f9666e143c0d7 | refs/heads/master | 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import wx
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(300,250))
panel = wx.Panel(self)
wx.SpinCtrl(panel, value='0', pos=(130, 50), size=(70, 25))
app = wx.App()
frame = Frame("wxPython Widgets!")
frame.Show()
app.MainLoop()
| [
"JohnClydeDunn@Gmail.com"
] | JohnClydeDunn@Gmail.com |
3cacda28f5023df250d156ab5a4eff4b61274f2e | dc77896138400114f6770310591fbfb02e36d3cd | /{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/common/utils.py | cf5bc6fc70109d2f501aa0fa00154039301d810c | [
"MIT"
] | permissive | drgarcia1986/cookiecutter-muffin | 97163a66a57d83dc802223ccbd5307bd1896429d | 7aa861787b4280477a726da99cf9de4047b01d91 | refs/heads/master | 2021-01-01T16:34:08.043952 | 2015-08-27T22:19:35 | 2015-08-27T22:31:22 | 40,458,394 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import muffin
from .. import app
@app.ps.jinja2.context_processor
def current_user_context():
local = muffin.local(app.loop)
current_user = getattr(local, 'current_user')
return {'user': current_user}
| [
"drgarcia1986@gmail.com"
] | drgarcia1986@gmail.com |
e2b090d8dd6aa936d6da61a45b0266a9f18fbee8 | a53453e2290e7a0f3ed5e885dd212c9601a9220d | /bidimensional.py | 093d424fb51f471093147006de2ad0cd622450ea | [] | no_license | matteog23/Mean-fieldPMP-NeurODE-training | acf8a290a9201b1936c7a41d009234b2c5d40404 | cac5cc0131155488214f04e3a71e925166a8e872 | refs/heads/main | 2023-06-21T11:42:03.585022 | 2021-07-20T08:58:07 | 2021-07-20T08:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,994 | py | import argparse
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
#%matplotlib inline
import time
from IPython import display
from scipy import stats
from scipy import interpolate
from sklearn.neighbors import KernelDensity
from modules.training_nobias_2D import MFOC as MFOC_nobias
from modules.training_bias_2D import MFOC as MFOC_bias
parser = argparse.ArgumentParser(description='Description of all the parameters below')
parser.add_argument("--mu_0",
choices=["bigaussian", "gaussian"],
required=True, type=str,
help="This decides if the initial distirbution mu_0 is a bimodal or unimodal gaussian")
parser.add_argument("--bias", default = False,
help="This decides if the activation function contains a bias or not")
parser.add_argument("--dt", default=0.1, help="This is time-discretization dt")
parser.add_argument("--Lambda", default=0.1, help="This is regularization parameter lambda")
parser.add_argument("--iterations", default=10, help="This is the number of outer iterations (of the shooting method)")
args = parser.parse_args()
mu_0 = args.mu_0
bias = args.bias
dt = args.dt
lbd = args.Lambda
num_iterations = args.iterations
# Setting the right format
dt = np.float(dt)
lbd = np.float(lbd)
num_iterations = np.int(num_iterations)
#Other parameters
N_points = 100
d = 2
T = 1
dt = 0.1
Nt = int(round(T/float(dt)))
print("dt is %s, hence the networks has %s layers" %(dt, Nt))
xmin = -3
xmax = 3
grid_points = 61
# Initial distribution
R = 0.2
if mu_0 == "bigaussian":
center_left = np.array([-1, -1])
center_right = np.array([1, 1])
mid_point = 0
y_left = np.array([-2, -2])
y_right = np.array([2, 2])
else:
center_left = np.array([0, 0])
center_right = np.array([0, 0])
mid_point = 0
y_left = np.array([-1, -1])
y_right = np.array([1, 1])
#Activation functions
def F_nobias(x, theta):
return np.tanh(theta @ x)
def F_bias(x, theta):
return np.tanh(theta[:,:d] @ x + theta[:,d])
if bias == False:
# Setting the parameters needed for the case without bias
theta = np.ones((Nt-1,d,d))
F = F_nobias
Lambda = lbd*np.ones((d,d))
# Running the algorithm
theta, theta_trace = MFOC_nobias(N_points, d, T, dt, R, mu_0, center_left, center_right, y_left, y_right, xmin, xmax, grid_points, theta, F, mid_point, Lambda, num_iterations)
# Plotting the evolution of theta and saving it in the current directory
fig, axs = plt.subplots(theta.shape[1], theta.shape[2], figsize=(15,10))
for k in range(theta_trace.shape[0]):
axs[0,0].scatter(range(Nt-1), theta_trace[k,:,0,0], label="Iteration %s" %k)
axs[0,0].plot(range(Nt-1), theta_trace[k,:,0,0])
axs[0,0].set_xlabel("time")
axs[0,0].legend()
axs[0,0].set_title("Evolution of theta[0,0]")
axs[0,1].scatter(range(Nt-1), theta_trace[k,:,0,1], label="Iteration %s" %k)
axs[0,1].plot(range(Nt-1), theta_trace[k,:,0,1])
axs[0,1].set_xlabel("time")
axs[0,1].legend()
axs[0,1].set_title("Evolution of theta[0,1]")
axs[1,0].scatter(range(Nt-1), theta_trace[k,:,1,0], label="Iteration %s" %k)
axs[1,0].plot(range(Nt-1), theta_trace[k,:,1,0])
axs[1,0].set_xlabel("time")
axs[1,0].legend()
axs[1,0].set_title("Evolution of theta[1,0]")
axs[1,1].scatter(range(Nt-1), theta_trace[k,:,1,1], label="Iteration %s" %k)
axs[1,1].plot(range(Nt-1), theta_trace[k,:,1,1])
axs[1,1].set_xlabel("time")
axs[1,1].legend()
axs[1,1].set_title("Evolution of theta[1,1]")
fig.savefig("theta_evolution.png")
#fig.show()
else:
# Setting the parameters needed for the case with bias
theta = np.ones((Nt-1,d,d+1))
F = F_bias
Lambda = lbd*np.ones((d,d+1))
Lambda[:, d] = 0.1*np.ones(d)
# Running the algorithm
theta, theta_trace = MFOC_bias(N_points, d, T, dt, R, mu_0, center_left, center_right, y_left, y_right, xmin, xmax, grid_points, theta, F, mid_point, Lambda, num_iterations)
# Plotting the evolution of theta and saving it in the current directory
fig, axs = plt.subplots(theta.shape[1], theta.shape[2], figsize=(15,10))
for k in range(theta_trace.shape[0]):
axs[0,0].scatter(range(Nt-1), theta_trace[k,:,0,0], label="Iteration %s" %k)
axs[0,0].plot(range(Nt-1), theta_trace[k,:,0,0])
axs[0,0].set_xlabel("time")
axs[0,0].legend()
axs[0,0].set_title("Evolution of W[0,0]")
axs[0,1].scatter(range(Nt-1), theta_trace[k,:,0,1], label="Iteration %s" %k)
axs[0,1].plot(range(Nt-1), theta_trace[k,:,0,1])
axs[0,1].set_xlabel("time")
axs[0,1].legend()
axs[0,1].set_title("Evolution of W[0,1]")
axs[1,0].scatter(range(Nt-1), theta_trace[k,:,1,0], label="Iteration %s" %k)
axs[1,0].plot(range(Nt-1), theta_trace[k,:,1,0])
axs[1,0].set_xlabel("time")
axs[1,0].legend()
axs[1,0].set_title("Evolution of W[1,0]")
axs[1,1].scatter(range(Nt-1), theta_trace[k,:,1,1], label="Iteration %s" %k)
axs[1,1].plot(range(Nt-1), theta_trace[k,:,1,1])
axs[1,1].set_xlabel("time")
axs[1,1].legend()
axs[1,1].set_title("Evolution of W[1,1]")
axs[0,2].scatter(range(Nt-1), theta_trace[k,:,0,2], label="Iteration %s" %k)
axs[0,2].plot(range(Nt-1), theta_trace[k,:,0,2])
axs[0,2].set_xlabel("time")
axs[0,2].legend()
axs[0,2].set_title("Evolution of tau[0]")
axs[1,2].scatter(range(Nt-1), theta_trace[k,:,1,2], label="Iteration %s" %k)
axs[1,2].plot(range(Nt-1), theta_trace[k,:,1,2])
axs[1,2].set_xlabel("time")
axs[1,2].legend()
axs[1,2].set_title("Evolution of tau[1]")
fig.savefig("theta_evolution.png")
fig.show()
print("End of training, two images have been saved in the current directory")
| [
"81622069+CristinaCipriani@users.noreply.github.com"
] | 81622069+CristinaCipriani@users.noreply.github.com |
d9619bb15f9b45b02de8603b80c741cced2ef501 | f1a461a36df64117a16c3afe7c9b2beb1c1b96cd | /hw2/code/A4_b.py | dd9b055eb19a3b136966d6bd222ee1dc97016163 | [
"MIT"
] | permissive | bobbydyr/CSE546-Machine-Learning | c368f3124d598fcc2e6b248a922e60ef78190c4a | c3f7e487b60506acfa7886d7cc64dfa61550ee4b | refs/heads/master | 2022-12-10T13:53:33.440188 | 2020-09-10T18:05:06 | 2020-09-10T18:05:06 | 269,527,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | from A4_A5_starter import *
if __name__ == '__main__':
n = 500
d = 1000
k = 100
X_train = generate_x(n, d)
y_train, W_init = generate_y(n, d, k, X_train)
lam = compute_initial_lamb(X_train, y_train)[0]
# lam_list = []
number_of_nonezero_feature = []
FDR_list = []
TPR_list = []
lam_list = lam * (1/1.5) ** np.arange(0, 40)
for lam in lam_list:
print("lam", lam)
lasso = LASSO(lam, delta=0.001)
lasso.coordinate_descent(X_train, y_train, np.zeros((d,1)))
last_w = lasso.last_w
print("Number of coe > 0:", sum(abs(last_w) > 0))
number_nonezero = sum(last_w != 0)
number_of_nonezero_feature.append(number_nonezero)
incorrect_none_zero = sum(last_w[W_init == 0] != 0)
number_correct_none_zero = sum(last_w[W_init != 0] != 0)
if incorrect_none_zero == 0:
FDR = 0
FDR_list.append(0)
else:
FDR = incorrect_none_zero / number_nonezero
FDR_list.append(FDR)
TPR = number_correct_none_zero / k
TPR_list.append(TPR)
print("FDR: ", FDR, " TPR: ", TPR)
plt.plot(FDR_list, TPR_list)
plt.xlabel("FDR")
plt.ylabel("TPR")
plt.show()
| [
"bobbydyr@gmail.com"
] | bobbydyr@gmail.com |
1cf474edeb8af5d436ca5e0746c5e49a06c27da6 | 77484e7e53da51690d611b4c208b680e3ffb8bd7 | /5338.py | 60680995be6c06e858e6e8321b678fc4ee792007 | [] | no_license | n-agi/acm-icpc-study | 82355bdfa2d3ab1169065a34c736d46533f72ae8 | 6702b745733b9f0520e719bea0a7bd8456c73e15 | refs/heads/master | 2021-01-17T11:16:15.714638 | 2016-04-17T18:13:31 | 2016-04-17T18:13:31 | 34,612,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | print """ _.-;;-._
'-..-'| || |
'-..-'|_.-;;-._|
'-..-'| || |
'-..-'|_.-''-._|"""
| [
"akanagi95@gmail.com"
] | akanagi95@gmail.com |
c843fa0f28a644a7ffdac1bbce2db916708168ce | 7324209db425ceb226e7d5d429c473d9687b6e79 | /library/api/pgoapi/utilities.py | ae3e9d32802e6183f201aca9421f7a4bb35f81c2 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | infinitewarp/poketrainer | 937be072892e61ecbe90e0264bec9bce4b4ba2f4 | 1b93fea488553ea7ce16103913a0940c22d3f24a | refs/heads/master | 2021-01-14T11:29:54.934138 | 2016-08-17T00:01:11 | 2016-08-17T00:01:11 | 64,431,149 | 1 | 0 | NOASSERTION | 2021-10-10T17:14:44 | 2016-07-28T22:05:26 | Python | UTF-8 | Python | false | false | 6,012 | py | """
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import re
import time
import struct
import ctypes
import xxhash
import logging
from json import JSONEncoder
from binascii import unhexlify
# other stuff
from google.protobuf.internal import encoder
from geopy.geocoders import GoogleV3
from s2sphere import LatLng, Angle, Cap, RegionCoverer, math
log = logging.getLogger(__name__)
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex,16)))[0]
def to_camel_case(value):
return ''.join(word.capitalize() if word else '_' for word in value.split('_'))
# JSON Encoder to handle bytes
class JSONByteEncoder(JSONEncoder):
def default(self, o):
return o.decode('utf-8')
def get_pos_by_name(location_name):
geolocator = GoogleV3()
loc = geolocator.geocode(location_name, timeout=10)
if not loc:
return None
log.info("Location for '%s' found: %s", location_name, loc.address)
log.info('Coordinates (lat/long/alt) for location: %s %s %s', loc.latitude, loc.longitude, loc.altitude)
return (loc.latitude, loc.longitude, loc.altitude)
EARTH_RADIUS = 6371 * 1000
def get_cell_ids(lat, long, radius=1000):
# Max values allowed by server according to this comment:
# https://github.com/AeonLucid/POGOProtos/issues/83#issuecomment-235612285
if radius > 1500:
radius = 1500 # radius = 1500 is max allowed by the server
region = Cap.from_axis_angle(LatLng.from_degrees(lat, long).to_point(), Angle.from_degrees(360*radius/(2*math.pi*EARTH_RADIUS)))
coverer = RegionCoverer()
coverer.min_level = 15
coverer.max_level = 15
cells = coverer.get_covering(region)
cells = cells[:100] # len(cells) = 100 is max allowed by the server
return sorted([x.id() for x in cells])
def get_time(ms = False):
if ms:
return int(round(time.time() * 1000))
else:
return int(round(time.time()))
def get_format_time_diff(low, high, ms = True):
diff = (high - low)
if ms:
m, s = divmod(diff / 1000, 60)
else:
m, s = divmod(diff, 60)
h, m = divmod(m, 60)
return (h, m, s)
def parse_api_endpoint(api_url):
if not api_url.startswith("https"):
api_url = 'https://{}/rpc'.format(api_url)
return api_url
class Rand48(object):
def __init__(self, seed):
self.n = seed
def seed(self, seed):
self.n = seed
def srand(self, seed):
self.n = (seed << 16) + 0x330e
def next(self):
self.n = (25214903917 * self.n + 11) & (2**48 - 1)
return self.n
def drand(self):
return self.next() / 2**48
def lrand(self):
return self.next() >> 17
def mrand(self):
n = self.next() >> 16
if n & (1 << 31):
n -= 1 << 32
return n
def long_to_bytes (val, endianness='big'):
"""
Use :ref:`string formatting` and :func:`~binascii.unhexlify` to
convert ``val``, a :func:`long`, to a byte :func:`str`.
:param long val: The value to pack
:param str endianness: The endianness of the result. ``'big'`` for
big-endian, ``'little'`` for little-endian.
If you want byte- and word-ordering to differ, you're on your own.
Using :ref:`string formatting` lets us use Python's C innards.
"""
# one (1) hex digit per four (4) bits
width = val.bit_length()
# unhexlify wants an even multiple of eight (8) bits, but we don't
# want more digits than we need (hence the ternary-ish 'or')
width += 8 - ((width % 8) or 8)
# format width specifier: four (4) bits per hex digit
fmt = '%%0%dx' % (width // 4)
# prepend zero (0) to the width, to zero-pad the output
s = unhexlify(fmt % val)
if endianness == 'little':
# see http://stackoverflow.com/a/931095/309233
s = s[::-1]
return s
def generateLocation1(authticket, lat, lng, alt):
firstHash = xxhash.xxh32(authticket, seed=0x1B845238).intdigest()
locationBytes = d2h(lat) + d2h(lng) + d2h(alt)
if not alt:
alt = "\x00\x00\x00\x00\x00\x00\x00\x00"
return xxhash.xxh32(locationBytes, seed=firstHash).intdigest()
def generateLocation2(lat, lng, alt):
locationBytes = d2h(lat) + d2h(lng) + d2h(alt)
if not alt:
alt = "\x00\x00\x00\x00\x00\x00\x00\x00"
return xxhash.xxh32(locationBytes, seed=0x1B845238).intdigest() #Hash of location using static seed 0x1B845238
def generateRequestHash(authticket, request):
firstHash = xxhash.xxh64(authticket, seed=0x1B845238).intdigest()
return xxhash.xxh64(request, seed=firstHash).intdigest()
def d2h(f):
hex_str = f2h(f)[2:].replace('L','')
hex_str = ("0" * (len(hex_str) % 2)) + hex_str
return unhexlify(hex_str)
| [
"m.hofer117@gmail.com"
] | m.hofer117@gmail.com |
bbc418a42973b051de3e9c10d573895219af86b0 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/web/v20200901/get_web_app_slot.py | dae31f66a42b428754b1c8f79c1670fe27468c36 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,519 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebAppSlotResult',
'AwaitableGetWebAppSlotResult',
'get_web_app_slot',
'get_web_app_slot_output',
]
@pulumi.output_type
class GetWebAppSlotResult:
"""
A web app, a mobile app backend, or an API app.
"""
def __init__(__self__, availability_state=None, client_affinity_enabled=None, client_cert_enabled=None, client_cert_exclusion_paths=None, client_cert_mode=None, container_size=None, custom_domain_verification_id=None, daily_memory_time_quota=None, default_host_name=None, enabled=None, enabled_host_names=None, host_name_ssl_states=None, host_names=None, host_names_disabled=None, hosting_environment_profile=None, https_only=None, hyper_v=None, id=None, identity=None, in_progress_operation_id=None, is_default_container=None, is_xenon=None, kind=None, last_modified_time_utc=None, location=None, max_number_of_workers=None, name=None, outbound_ip_addresses=None, possible_outbound_ip_addresses=None, redundancy_mode=None, repository_site_name=None, reserved=None, resource_group=None, scm_site_also_stopped=None, server_farm_id=None, site_config=None, slot_swap_status=None, state=None, suspended_till=None, system_data=None, tags=None, target_swap_slot=None, traffic_manager_host_names=None, type=None, usage_state=None):
if availability_state and not isinstance(availability_state, str):
raise TypeError("Expected argument 'availability_state' to be a str")
pulumi.set(__self__, "availability_state", availability_state)
if client_affinity_enabled and not isinstance(client_affinity_enabled, bool):
raise TypeError("Expected argument 'client_affinity_enabled' to be a bool")
pulumi.set(__self__, "client_affinity_enabled", client_affinity_enabled)
if client_cert_enabled and not isinstance(client_cert_enabled, bool):
raise TypeError("Expected argument 'client_cert_enabled' to be a bool")
pulumi.set(__self__, "client_cert_enabled", client_cert_enabled)
if client_cert_exclusion_paths and not isinstance(client_cert_exclusion_paths, str):
raise TypeError("Expected argument 'client_cert_exclusion_paths' to be a str")
pulumi.set(__self__, "client_cert_exclusion_paths", client_cert_exclusion_paths)
if client_cert_mode and not isinstance(client_cert_mode, str):
raise TypeError("Expected argument 'client_cert_mode' to be a str")
pulumi.set(__self__, "client_cert_mode", client_cert_mode)
if container_size and not isinstance(container_size, int):
raise TypeError("Expected argument 'container_size' to be a int")
pulumi.set(__self__, "container_size", container_size)
if custom_domain_verification_id and not isinstance(custom_domain_verification_id, str):
raise TypeError("Expected argument 'custom_domain_verification_id' to be a str")
pulumi.set(__self__, "custom_domain_verification_id", custom_domain_verification_id)
if daily_memory_time_quota and not isinstance(daily_memory_time_quota, int):
raise TypeError("Expected argument 'daily_memory_time_quota' to be a int")
pulumi.set(__self__, "daily_memory_time_quota", daily_memory_time_quota)
if default_host_name and not isinstance(default_host_name, str):
raise TypeError("Expected argument 'default_host_name' to be a str")
pulumi.set(__self__, "default_host_name", default_host_name)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if enabled_host_names and not isinstance(enabled_host_names, list):
raise TypeError("Expected argument 'enabled_host_names' to be a list")
pulumi.set(__self__, "enabled_host_names", enabled_host_names)
if host_name_ssl_states and not isinstance(host_name_ssl_states, list):
raise TypeError("Expected argument 'host_name_ssl_states' to be a list")
pulumi.set(__self__, "host_name_ssl_states", host_name_ssl_states)
if host_names and not isinstance(host_names, list):
raise TypeError("Expected argument 'host_names' to be a list")
pulumi.set(__self__, "host_names", host_names)
if host_names_disabled and not isinstance(host_names_disabled, bool):
raise TypeError("Expected argument 'host_names_disabled' to be a bool")
pulumi.set(__self__, "host_names_disabled", host_names_disabled)
if hosting_environment_profile and not isinstance(hosting_environment_profile, dict):
raise TypeError("Expected argument 'hosting_environment_profile' to be a dict")
pulumi.set(__self__, "hosting_environment_profile", hosting_environment_profile)
if https_only and not isinstance(https_only, bool):
raise TypeError("Expected argument 'https_only' to be a bool")
pulumi.set(__self__, "https_only", https_only)
if hyper_v and not isinstance(hyper_v, bool):
raise TypeError("Expected argument 'hyper_v' to be a bool")
pulumi.set(__self__, "hyper_v", hyper_v)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if in_progress_operation_id and not isinstance(in_progress_operation_id, str):
raise TypeError("Expected argument 'in_progress_operation_id' to be a str")
pulumi.set(__self__, "in_progress_operation_id", in_progress_operation_id)
if is_default_container and not isinstance(is_default_container, bool):
raise TypeError("Expected argument 'is_default_container' to be a bool")
pulumi.set(__self__, "is_default_container", is_default_container)
if is_xenon and not isinstance(is_xenon, bool):
raise TypeError("Expected argument 'is_xenon' to be a bool")
pulumi.set(__self__, "is_xenon", is_xenon)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if last_modified_time_utc and not isinstance(last_modified_time_utc, str):
raise TypeError("Expected argument 'last_modified_time_utc' to be a str")
pulumi.set(__self__, "last_modified_time_utc", last_modified_time_utc)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if max_number_of_workers and not isinstance(max_number_of_workers, int):
raise TypeError("Expected argument 'max_number_of_workers' to be a int")
pulumi.set(__self__, "max_number_of_workers", max_number_of_workers)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outbound_ip_addresses and not isinstance(outbound_ip_addresses, str):
raise TypeError("Expected argument 'outbound_ip_addresses' to be a str")
pulumi.set(__self__, "outbound_ip_addresses", outbound_ip_addresses)
if possible_outbound_ip_addresses and not isinstance(possible_outbound_ip_addresses, str):
raise TypeError("Expected argument 'possible_outbound_ip_addresses' to be a str")
pulumi.set(__self__, "possible_outbound_ip_addresses", possible_outbound_ip_addresses)
if redundancy_mode and not isinstance(redundancy_mode, str):
raise TypeError("Expected argument 'redundancy_mode' to be a str")
pulumi.set(__self__, "redundancy_mode", redundancy_mode)
if repository_site_name and not isinstance(repository_site_name, str):
raise TypeError("Expected argument 'repository_site_name' to be a str")
pulumi.set(__self__, "repository_site_name", repository_site_name)
if reserved and not isinstance(reserved, bool):
raise TypeError("Expected argument 'reserved' to be a bool")
pulumi.set(__self__, "reserved", reserved)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if scm_site_also_stopped and not isinstance(scm_site_also_stopped, bool):
raise TypeError("Expected argument 'scm_site_also_stopped' to be a bool")
pulumi.set(__self__, "scm_site_also_stopped", scm_site_also_stopped)
if server_farm_id and not isinstance(server_farm_id, str):
raise TypeError("Expected argument 'server_farm_id' to be a str")
pulumi.set(__self__, "server_farm_id", server_farm_id)
if site_config and not isinstance(site_config, dict):
raise TypeError("Expected argument 'site_config' to be a dict")
pulumi.set(__self__, "site_config", site_config)
if slot_swap_status and not isinstance(slot_swap_status, dict):
raise TypeError("Expected argument 'slot_swap_status' to be a dict")
pulumi.set(__self__, "slot_swap_status", slot_swap_status)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if suspended_till and not isinstance(suspended_till, str):
raise TypeError("Expected argument 'suspended_till' to be a str")
pulumi.set(__self__, "suspended_till", suspended_till)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if target_swap_slot and not isinstance(target_swap_slot, str):
raise TypeError("Expected argument 'target_swap_slot' to be a str")
pulumi.set(__self__, "target_swap_slot", target_swap_slot)
if traffic_manager_host_names and not isinstance(traffic_manager_host_names, list):
raise TypeError("Expected argument 'traffic_manager_host_names' to be a list")
pulumi.set(__self__, "traffic_manager_host_names", traffic_manager_host_names)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if usage_state and not isinstance(usage_state, str):
raise TypeError("Expected argument 'usage_state' to be a str")
pulumi.set(__self__, "usage_state", usage_state)
@property
@pulumi.getter(name="availabilityState")
def availability_state(self) -> str:
"""
Management information availability state for the app.
"""
return pulumi.get(self, "availability_state")
@property
@pulumi.getter(name="clientAffinityEnabled")
def client_affinity_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
"""
return pulumi.get(self, "client_affinity_enabled")
@property
@pulumi.getter(name="clientCertEnabled")
def client_cert_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
"""
return pulumi.get(self, "client_cert_enabled")
@property
@pulumi.getter(name="clientCertExclusionPaths")
def client_cert_exclusion_paths(self) -> Optional[str]:
"""
client certificate authentication comma-separated exclusion paths
"""
return pulumi.get(self, "client_cert_exclusion_paths")
@property
@pulumi.getter(name="clientCertMode")
def client_cert_mode(self) -> Optional[str]:
"""
This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
"""
return pulumi.get(self, "client_cert_mode")
@property
@pulumi.getter(name="containerSize")
def container_size(self) -> Optional[int]:
"""
Size of the function container.
"""
return pulumi.get(self, "container_size")
@property
@pulumi.getter(name="customDomainVerificationId")
def custom_domain_verification_id(self) -> Optional[str]:
"""
Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
"""
return pulumi.get(self, "custom_domain_verification_id")
@property
@pulumi.getter(name="dailyMemoryTimeQuota")
def daily_memory_time_quota(self) -> Optional[int]:
"""
Maximum allowed daily memory-time quota (applicable on dynamic apps only).
"""
return pulumi.get(self, "daily_memory_time_quota")
@property
@pulumi.getter(name="defaultHostName")
def default_host_name(self) -> str:
"""
Default hostname of the app. Read-only.
"""
return pulumi.get(self, "default_host_name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
<code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="enabledHostNames")
def enabled_host_names(self) -> Sequence[str]:
"""
Enabled hostnames for the app.Hostnames need to be assigned (see HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
"""
return pulumi.get(self, "enabled_host_names")
@property
@pulumi.getter(name="hostNameSslStates")
def host_name_ssl_states(self) -> Optional[Sequence['outputs.HostNameSslStateResponse']]:
"""
Hostname SSL states are used to manage the SSL bindings for app's hostnames.
"""
return pulumi.get(self, "host_name_ssl_states")
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> Sequence[str]:
"""
Hostnames associated with the app.
"""
return pulumi.get(self, "host_names")
@property
@pulumi.getter(name="hostNamesDisabled")
def host_names_disabled(self) -> Optional[bool]:
"""
<code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
"""
return pulumi.get(self, "host_names_disabled")
@property
@pulumi.getter(name="hostingEnvironmentProfile")
def hosting_environment_profile(self) -> Optional['outputs.HostingEnvironmentProfileResponse']:
"""
App Service Environment to use for the app.
"""
return pulumi.get(self, "hosting_environment_profile")
@property
@pulumi.getter(name="httpsOnly")
def https_only(self) -> Optional[bool]:
"""
HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
"""
return pulumi.get(self, "https_only")
@property
@pulumi.getter(name="hyperV")
def hyper_v(self) -> Optional[bool]:
"""
Hyper-V sandbox.
"""
return pulumi.get(self, "hyper_v")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inProgressOperationId")
def in_progress_operation_id(self) -> str:
"""
Specifies an operation id if this site has a pending operation.
"""
return pulumi.get(self, "in_progress_operation_id")
@property
@pulumi.getter(name="isDefaultContainer")
def is_default_container(self) -> bool:
"""
<code>true</code> if the app is a default container; otherwise, <code>false</code>.
"""
return pulumi.get(self, "is_default_container")
@property
@pulumi.getter(name="isXenon")
def is_xenon(self) -> Optional[bool]:
"""
Obsolete: Hyper-V sandbox.
"""
return pulumi.get(self, "is_xenon")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastModifiedTimeUtc")
def last_modified_time_utc(self) -> str:
"""
Last time the app was modified, in UTC. Read-only.
"""
return pulumi.get(self, "last_modified_time_utc")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfWorkers")
def max_number_of_workers(self) -> int:
"""
Maximum number of workers.
This only applies to Functions container.
"""
return pulumi.get(self, "max_number_of_workers")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> str:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from tenants that site can be hosted with current settings. Read-only.
"""
return pulumi.get(self, "outbound_ip_addresses")
@property
@pulumi.getter(name="possibleOutboundIpAddresses")
def possible_outbound_ip_addresses(self) -> str:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from all tenants except dataComponent. Read-only.
"""
return pulumi.get(self, "possible_outbound_ip_addresses")
@property
@pulumi.getter(name="redundancyMode")
def redundancy_mode(self) -> Optional[str]:
"""
Site redundancy mode
"""
return pulumi.get(self, "redundancy_mode")
@property
@pulumi.getter(name="repositorySiteName")
def repository_site_name(self) -> str:
"""
Name of the repository site.
"""
return pulumi.get(self, "repository_site_name")
@property
@pulumi.getter
def reserved(self) -> Optional[bool]:
"""
<code>true</code> if reserved; otherwise, <code>false</code>.
"""
return pulumi.get(self, "reserved")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Name of the resource group the app belongs to. Read-only.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="scmSiteAlsoStopped")
def scm_site_also_stopped(self) -> Optional[bool]:
"""
<code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
"""
return pulumi.get(self, "scm_site_also_stopped")
@property
@pulumi.getter(name="serverFarmId")
def server_farm_id(self) -> Optional[str]:
"""
Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
"""
return pulumi.get(self, "server_farm_id")
@property
@pulumi.getter(name="siteConfig")
def site_config(self) -> Optional['outputs.SiteConfigResponse']:
"""
Configuration of the app.
"""
return pulumi.get(self, "site_config")
@property
@pulumi.getter(name="slotSwapStatus")
def slot_swap_status(self) -> 'outputs.SlotSwapStatusResponse':
"""
Status of the last deployment slot swap operation.
"""
return pulumi.get(self, "slot_swap_status")
@property
@pulumi.getter
def state(self) -> str:
"""
Current state of the app.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="suspendedTill")
def suspended_till(self) -> str:
"""
App suspended till in case memory-time quota is exceeded.
"""
return pulumi.get(self, "suspended_till")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetSwapSlot")
def target_swap_slot(self) -> str:
"""
Specifies which deployment slot this app will swap into. Read-only.
"""
return pulumi.get(self, "target_swap_slot")
@property
@pulumi.getter(name="trafficManagerHostNames")
def traffic_manager_host_names(self) -> Sequence[str]:
"""
Azure Traffic Manager hostnames associated with the app. Read-only.
"""
return pulumi.get(self, "traffic_manager_host_names")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="usageState")
def usage_state(self) -> str:
"""
State indicating whether the app has exceeded its quota usage. Read-only.
"""
return pulumi.get(self, "usage_state")
class AwaitableGetWebAppSlotResult(GetWebAppSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSlotResult(
availability_state=self.availability_state,
client_affinity_enabled=self.client_affinity_enabled,
client_cert_enabled=self.client_cert_enabled,
client_cert_exclusion_paths=self.client_cert_exclusion_paths,
client_cert_mode=self.client_cert_mode,
container_size=self.container_size,
custom_domain_verification_id=self.custom_domain_verification_id,
daily_memory_time_quota=self.daily_memory_time_quota,
default_host_name=self.default_host_name,
enabled=self.enabled,
enabled_host_names=self.enabled_host_names,
host_name_ssl_states=self.host_name_ssl_states,
host_names=self.host_names,
host_names_disabled=self.host_names_disabled,
hosting_environment_profile=self.hosting_environment_profile,
https_only=self.https_only,
hyper_v=self.hyper_v,
id=self.id,
identity=self.identity,
in_progress_operation_id=self.in_progress_operation_id,
is_default_container=self.is_default_container,
is_xenon=self.is_xenon,
kind=self.kind,
last_modified_time_utc=self.last_modified_time_utc,
location=self.location,
max_number_of_workers=self.max_number_of_workers,
name=self.name,
outbound_ip_addresses=self.outbound_ip_addresses,
possible_outbound_ip_addresses=self.possible_outbound_ip_addresses,
redundancy_mode=self.redundancy_mode,
repository_site_name=self.repository_site_name,
reserved=self.reserved,
resource_group=self.resource_group,
scm_site_also_stopped=self.scm_site_also_stopped,
server_farm_id=self.server_farm_id,
site_config=self.site_config,
slot_swap_status=self.slot_swap_status,
state=self.state,
suspended_till=self.suspended_till,
system_data=self.system_data,
tags=self.tags,
target_swap_slot=self.target_swap_slot,
traffic_manager_host_names=self.traffic_manager_host_names,
type=self.type,
usage_state=self.usage_state)
def get_web_app_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSlotResult:
"""
A web app, a mobile app backend, or an API app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. By default, this API returns the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20200901:getWebAppSlot', __args__, opts=opts, typ=GetWebAppSlotResult).value
return AwaitableGetWebAppSlotResult(
availability_state=__ret__.availability_state,
client_affinity_enabled=__ret__.client_affinity_enabled,
client_cert_enabled=__ret__.client_cert_enabled,
client_cert_exclusion_paths=__ret__.client_cert_exclusion_paths,
client_cert_mode=__ret__.client_cert_mode,
container_size=__ret__.container_size,
custom_domain_verification_id=__ret__.custom_domain_verification_id,
daily_memory_time_quota=__ret__.daily_memory_time_quota,
default_host_name=__ret__.default_host_name,
enabled=__ret__.enabled,
enabled_host_names=__ret__.enabled_host_names,
host_name_ssl_states=__ret__.host_name_ssl_states,
host_names=__ret__.host_names,
host_names_disabled=__ret__.host_names_disabled,
hosting_environment_profile=__ret__.hosting_environment_profile,
https_only=__ret__.https_only,
hyper_v=__ret__.hyper_v,
id=__ret__.id,
identity=__ret__.identity,
in_progress_operation_id=__ret__.in_progress_operation_id,
is_default_container=__ret__.is_default_container,
is_xenon=__ret__.is_xenon,
kind=__ret__.kind,
last_modified_time_utc=__ret__.last_modified_time_utc,
location=__ret__.location,
max_number_of_workers=__ret__.max_number_of_workers,
name=__ret__.name,
outbound_ip_addresses=__ret__.outbound_ip_addresses,
possible_outbound_ip_addresses=__ret__.possible_outbound_ip_addresses,
redundancy_mode=__ret__.redundancy_mode,
repository_site_name=__ret__.repository_site_name,
reserved=__ret__.reserved,
resource_group=__ret__.resource_group,
scm_site_also_stopped=__ret__.scm_site_also_stopped,
server_farm_id=__ret__.server_farm_id,
site_config=__ret__.site_config,
slot_swap_status=__ret__.slot_swap_status,
state=__ret__.state,
suspended_till=__ret__.suspended_till,
system_data=__ret__.system_data,
tags=__ret__.tags,
target_swap_slot=__ret__.target_swap_slot,
traffic_manager_host_names=__ret__.traffic_manager_host_names,
type=__ret__.type,
usage_state=__ret__.usage_state)
@_utilities.lift_output_func(get_web_app_slot)
def get_web_app_slot_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAppSlotResult]:
"""
A web app, a mobile app backend, or an API app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. By default, this API returns the production slot.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
312531a60ba1aa5b50498679bb38f3a2ac0b3c92 | a2542199b6895ac931897445fb5f3e20a1f3e161 | /cogs/background_tasks.py | 49c43c99ad3352ca3e3d30f06b45294d0061aad4 | [
"MIT"
] | permissive | rasceta/minigames-bot | e069b1fcf4247681aa06a0c4ec2896ba3a783e34 | 18c1b82af8707d92b2efdcb2f7f7b7de31769607 | refs/heads/master | 2023-02-05T06:22:26.112210 | 2020-12-19T04:27:02 | 2020-12-19T04:27:02 | 273,402,649 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,092 | py | import discord
import asyncio
import random
import datetime
import util
from discord.ext import commands, tasks
class BackgroundTasks(commands.Cog):
def __init__(self, bot):
self.bot = bot
@tasks.loop(minutes=10)
async def free_coins(self):
conn = self.bot.conn
cursor = conn.cursor()
query = "SELECT free_coins_channel_id from servers"
cursor.execute(query)
result = cursor.fetchall()
free_coins_channel_id_list = [e[0] for e in result]
img_url = "https://i.imgur.com/egt7kT0.png"
free_coins_amount = 50
for channel_id in free_coins_channel_id_list:
channel = self.bot.get_channel(channel_id)
if channel is None:
continue
embed = discord.Embed(title="Free Coins",
description=f"Hello, hello! The mysterious coin creature's here. It has returned for all to see! It's here to give you all free coins! Yes! You heard that right! Free coins!",
color=discord.Color.dark_gold())
embed.set_thumbnail(url=img_url)
embed.set_footer(text="React with 💰 quickly!")
new_message = await channel.send(embed=embed)
await new_message.add_reaction("💰")
max_reaction_time = datetime.datetime.now() + datetime.timedelta(seconds=30)
query = "UPDATE servers SET last_free_coins_message_id = %s, max_free_coins_reaction_time = %s, free_coins_amount = %s WHERE free_coins_channel_id = %s"
data = (new_message.id, max_reaction_time, free_coins_amount, channel.id)
cursor.execute(query,data)
conn.commit()
await asyncio.sleep(30)
cursor = conn.cursor()
query = "SELECT free_coins_channel_id, last_free_coins_message_id from servers"
cursor.execute(query)
result_list = cursor.fetchall()
free_coins_channel_id_list = [e[0] for e in result_list]
free_coins_message_id_list = [e[1] for e in result_list]
new_embed = discord.Embed(title="Free Coins",
description="I must go now! I'll be back whenever!")
new_embed.set_thumbnail(url="https://i.imgur.com/egt7kT0.png")
for idx, channel_id in enumerate(free_coins_channel_id_list):
channel = self.bot.get_channel(channel_id)
if channel is None:
continue
message_id = free_coins_message_id_list[idx]
message = await channel.fetch_message(message_id)
if channel is not None:
await message.edit(embed=new_embed)
@free_coins.before_loop
async def free_coins_before(self):
await self.bot.wait_until_ready()
@commands.command(name='start')
async def start(self, ctx, task_name:str=None):
if ctx.author.guild_permissions.administrator:
task_list = ["freecoins"]
if task_name is None:
return
if task_name in task_list:
if task_name == "freecoins":
self.free_coins.start()
embed = util.log_embed(f"{task_name} task has been started", "success")
await ctx.send(embed=embed)
else:
await ctx.send(f"There's only {' and '.join(task_list)} task")
await ctx.message.delete()
@commands.command(name='stop')
async def stop(self, ctx, task_name:str=None):
if ctx.author.guild_permissions.administrator:
task_list = ["freecoins"]
if task_name is None:
return
if task_name in task_list:
if task_name == "freecoins":
self.free_coins.stop()
embed = util.log_embed(f"{task_name} task has been stopped", "success")
await ctx.send(embed=embed)
else:
await ctx.send(f"There's only {' and '.join(task_list)} task")
await ctx.message.delete()
def setup(bot):
bot.add_cog(BackgroundTasks(bot)) | [
"rio.sufilin@gmail.com"
] | rio.sufilin@gmail.com |
d41156456c1a71e84a5cdbde4dfaaa83d4cdfa56 | 64b9c531b7a55ebe13706d150dc2ad0152285f08 | /pylotVenv/bin/pip2 | c3403704d8a47139fa2c1aed2620b6eed6544903 | [] | no_license | AdamAly831/course | b69428e057aed2b226280e0f4f93e58246ff5a64 | e0e19e9717370314762885a36a669ed234873124 | refs/heads/master | 2020-05-29T09:16:55.668541 | 2016-09-23T17:45:23 | 2016-09-23T17:45:23 | 69,044,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/Adam/Documents/CodingDdojoPylot/Pylot/pylotVenv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"Adam@Adams-MacBook-Pro-5.local"
] | Adam@Adams-MacBook-Pro-5.local | |
41032164b3bc13db8bd6caa6f95d09bf89111680 | 05c894ee753e3b0610bec82890ac178dc4810dd9 | /week6/tiaozhan24/apple_analysis.py | d7154213968c4f9da2bf3607b85f2ad6bbbc9ede | [] | no_license | monster-shang/shiyanlou | b3d2c1a566508f1ae93bdbbd6383ee8689edad83 | 84a323ae91693b24f67271a82df43a369a0de13f | refs/heads/master | 2021-04-06T16:53:20.822186 | 2018-05-08T14:18:33 | 2018-05-08T14:18:33 | 125,403,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import pandas as pd
def quarter_volume():
data = pd.read_csv('apple.csv',header=0)
data.index = pd.to_datetime(data['Date'])
date = data.drop('Date',axis=1)
date = date.resample('Q').sum()
date = date.sort_values(by = 'Volume',ascending=False)
second_volume = date.iloc[1].Volume
return second_volume
| [
"jshang@live.cn"
] | jshang@live.cn |
cdc2803ddd5193d2deda25b5ec5ccc5bc7cff350 | c13734d2bbe9803293a43edae5b62bc12c05af8e | /Computer Science Workshop/exp15.py | 948a7d07ce0f9c307df718220522740a3bcb9cc5 | [] | no_license | abinjosephjosegiri/KtuCseLab | d2ee9ac870c581ef406ca4b8b7f62d604ce52084 | 3dc7828f46deff6b316dee61a784d328d3899f58 | refs/heads/master | 2021-07-20T14:35:41.905282 | 2019-10-17T16:18:26 | 2019-10-17T16:18:26 | 218,273,487 | 0 | 1 | null | 2020-09-29T13:14:15 | 2019-10-29T11:42:00 | null | UTF-8 | Python | false | false | 254 | py | 15.Diplay a pyramid
#a=10
for i in range(1,11):
for j in range(11,-i):
print " ",
for j in range(1,i):
print "*",
for i in range(i,0,-1):
print "&",
"""for s in range(a,1,-1):
print " ",
print "*",
print "\n"
a=a-1"""
print "\n"
| [
"jaseemckclt@gmail.com"
] | jaseemckclt@gmail.com |
7b7fd334b67b1727da4bdc482d2cdcaadfa4dab1 | 0403dcc7cdf0e8174300569969914e885ebc4a9b | /tests/test_scriptdata_longstring.py | e12af73e657048fee3f976929a27d7d4d20b3bfb | [
"BSD-2-Clause"
] | permissive | chrippa/python-flashmedia | 03ea9029ef51871872c87d26384bf8433d8b165c | f5df4987d6d6661a240756435bb8729f82d8d31f | refs/heads/master | 2021-01-19T19:36:09.256165 | 2013-04-29T10:30:07 | 2013-04-29T10:30:07 | 5,651,549 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | # vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from . import with_fd
from flashmedia.types import ScriptDataLongString
ASCII = b"\x00\x00\x00\x03ABC"
ASCII_SIZE = len(ASCII)
UTF8 = b"\x00\x00\x00\t\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e"
UTF8_SIZE = len(UTF8)
BROKEN_UTF8 = b"\x00\x00\x00\x08\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
BROKEN_UTF8_SIZE = len(BROKEN_UTF8)
def test_pack_ascii():
assert ScriptDataLongString("ABC", "ascii") == ASCII
def test_pack_utf8():
assert ScriptDataLongString("日本語") == UTF8
def test_pack_into():
size = ASCII_SIZE + UTF8_SIZE
buf = bytearray(size)
offset = 0
offset = ScriptDataLongString.pack_into(buf, offset, "ABC", "ascii")
offset = ScriptDataLongString.pack_into(buf, offset, "日本語")
assert buf == (ASCII + UTF8)
assert offset == size
def test_size_ascii():
assert ScriptDataLongString.size("ABC", "ascii") == ASCII_SIZE
def test_size_utf8():
assert ScriptDataLongString.size("日本語") == UTF8_SIZE
@with_fd(ASCII)
def test_read_ascii(fd):
assert ScriptDataLongString.read(fd, "ascii") == "ABC"
assert fd.tell() == ASCII_SIZE
@with_fd(UTF8)
def test_read_utf8(fd):
assert ScriptDataLongString.read(fd) == "日本語"
assert fd.tell() == UTF8_SIZE
@with_fd(BROKEN_UTF8)
def test_read_broken_utf8(fd):
assert ScriptDataLongString.read(fd) == "日本"
assert fd.tell() == BROKEN_UTF8_SIZE
def test_unpack_from():
buf = ASCII + UTF8 + BROKEN_UTF8
offset = 0
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "ABC"
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "日本語"
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "日本"
| [
"chrippa@tanuki.se"
] | chrippa@tanuki.se |
09ce422a599985115f743d7053d33f256b48c224 | 5eb98f99c54db6977522b270267ba2bceba3ab00 | /ImageNet.py | 925f5ea74a28c4ed4c7bb6ac84fd2898998deb58 | [] | no_license | hmaciej/robustness_score | f88d5efd6dc89bf36d25e9782b69b844f8dd1747 | 14e3179b2e419f299810fdf1546e46e1089bfa46 | refs/heads/main | 2023-06-23T03:52:46.726595 | 2021-07-21T17:31:19 | 2021-07-21T17:31:19 | 381,945,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,313 | py | CLASS_TO_CALCULATE = 1000 # reduce it for quick test
PATH_LABEL_TO_WORDNET = '/home/projects/RobutnessScore/imagenet_label_to_wordnet_synset.txt'
PATH_LABEL_TO_WORDNET = '/home/projects/RobutnessScore/imagenet_label_to_wordnet_synset.txt'
PATH_IMAGENET_CLASS = '/home/datasets/imagenet_2012/val/{}/'
PATH_IMAGENET_BBOX = '/home/datasets/imagenet_2012/val/xml/'
PATH_OUT = './'
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import xml.etree.ElementTree as ET
import os
import gc
import sys
import json
import random
import warnings
from PIL import Image
from torchvision import models
from sklearn.metrics import accuracy_score
from cam import CAM, GradCAMpp, SmoothGradCAMpp, ScoreCAM
from efficientnet_pytorch import EfficientNet
np.random.seed(0)
torch.manual_seed(0)
random.seed(0)
os.environ["CUDA_VISIBLE_DEVICES"]=str(0);
device = torch.device("cuda:0")
###
def class_id_to_name(class_id):
with open(PATH_LABEL_TO_WORDNET) as f:
json_dict = json.load(f)
return json_dict[str(class_id)]['label'].replace(" ", "_").replace(",", "__")
def class_id_to_code(class_id):
with open(PATH_LABEL_TO_WORDNET) as f:
json_dict = json.load(f)
return "n{}".format(json_dict[str(class_id)]['id'].split("-")[0])
def openXML(path):
file = open(path)
root = ET.fromstring(file.read())
file.close()
bbox = []
for box in root.findall('object'):
xmin = int (box.find('bndbox').find('xmin').text)
ymin = int (box.find('bndbox').find('ymin').text)
xmax = int (box.find('bndbox').find('xmax').text)
ymax = int (box.find('bndbox').find('ymax').text)
bbox.append((xmin, ymin, xmax, ymax))
return bbox
def getData(class_code):
image_class_path = PATH_IMAGENET_CLASS.format(class_code)
bbox_class_path = PATH_IMAGENET_BBOX.format(class_code)
results = []
for name in os.listdir(image_class_path):
jpg_file = os.path.join(image_class_path, name)
xml_file = os.path.join(bbox_class_path, name).replace("JPEG", "xml")
if not (os.path.isfile(jpg_file) and os.path.isfile(xml_file)):
continue
img = Image.open(jpg_file).convert('RGB')
bbox = openXML(xml_file)
results.append((img, bbox))
return results
def tansform_bbox(bbox, img, image_size):
x1, y1, x2, y2 = bbox
width, height = img.width, img.height
if height > width:
new_sizes = [image_size, image_size * height / width]
else:
new_sizes = [image_size * width/ height, image_size]
new_sizes[0] = int(new_sizes[0])
new_sizes[1] = int(new_sizes[1])
x1 = int(x1 * new_sizes[0]/width)
x2 = int(x2 * new_sizes[0]/width)
y1 = int(y1 * new_sizes[1]/height)
y2 = int(y2 * new_sizes[1]/height)
bbox = (x1, y1, x2, y2)
if new_sizes[0] > image_size:
x1 -= (new_sizes[0] - image_size)//2
x2 -= (new_sizes[0] - image_size)//2
if new_sizes[1] > image_size:
y1 -= (new_sizes[1] - image_size)//2
y2 -= (new_sizes[1] - image_size)//2
x1 = max(0, x1)
x2 = max(0, x2)
y1 = max(0, y1)
y2 = max(0, y2)
bbox = (x1, y1, x2, y2)
return bbox
def get_transforms():
return transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
def calculate_rs(img, bbox, id_class):
orginal_mask, _= saliency_maps(transform_net(img).to(device).unsqueeze(0), id_class)
orginal_mask = torch.nn.functional.upsample(orginal_mask, size=(image_size, image_size), mode='bilinear', align_corners=False)
orginal_mask = orginal_mask.to(device)
mask = torch.ones(orginal_mask.shape).to(device)
for box in bbox:
x1, y1, x2, y2 = tansform_bbox(box, img, image_size)
sub_mask_x = torch.ones((image_size)).to(device)
sub_mask_x[int(x1):int(x2)] = 0
sub_mask_y = torch.zeros((image_size))
sub_mask_y[int(y1):int(y2)] = 1
mask[0][0][sub_mask_y.nonzero()] *= sub_mask_x
mask_in = orginal_mask.detach().clone().mul(1-mask).clamp(max = 1)
mask_out = orginal_mask.detach().clone().mul(mask).clamp(max = 1)
orginal_mask = orginal_mask.clamp(max = 1)
rs = mask_in.sum().item() / orginal_mask.sum().item() if orginal_mask.sum().item() > 0 else 0
return rs, mask_in, mask_out
def get_top_5_classes(img):
out = net(transform_net(img).unsqueeze(0).to(device))
return out.argsort().detach().cpu().numpy()[0][::-1][:5]
def calclulate_class(data, id_class):
result = []
result_rs = []
for index, (img, bbox) in enumerate(data):
is_in_top_5 = id_class in get_top_5_classes(img)
rs, _, _ = calculate_rs(img, bbox, id_class)
result.append(is_in_top_5)
result_rs.append(rs)
ground_true = [True]*len(result)
acc = accuracy_score(result, ground_true)
rs_mean = np.array(result_rs).sum()/len(result_rs)
print("id:{} acc:{:.4f} rs:{:.4f} name:{}".format(id_class, acc, rs_mean, class_id_to_name(id_class)))
return (id_class, acc, rs_mean, class_id_to_name(id_class))
def run(name):
path = os.path.join(PATH_OUT, name)
if os.path.isfile(path):
return
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with open(path, 'w') as csv:
csv.write("id;acc;crs;name\r\n")
for i in range(CLASS_TO_CALCULATE):
data = getData(class_id_to_code(i))
results = calclulate_class(data, i)
csv.write("{};{:.4f};{:.4f};{}\r\n".format(results[0], results[1], results[2], results[3]))
###
if __name__ == "__main__":
net = models.resnet152(pretrained=True)
net = net.to(device)
net.eval()
image_size = 224
# ImageNet + ResNet-152 + GradCAM++
print(">>> ImageNet + ResNet-152 + GradCAM++")
saliency_maps = GradCAMpp(net, net.layer4[2].conv3)
transform_net = get_transforms()
run("ImageNet__ResNet_152__GradCAMpp.csv")
# ImageNet + ResNet-152 + SmoothGrad-Cam++
print(">>> ImageNet + ResNet-152 + SmoothGrad-Cam++")
saliency_maps = SmoothGradCAMpp(net, net.layer4[2].conv3)
transform_net = get_transforms()
run("ImageNet__ResNet_152__SmoothGradCAMpp.csv")
# ImageNet + ResNet-152 + ScoreCAM
print(">>> ImageNet + ResNet-152 + ScoreCAM")
saliency_maps = ScoreCAM(net, net.layer4[2].conv3)
transform_net = get_transforms()
run("ImageNet__ResNet_152__ScoreCAM.csv")
###
# ImageNet + AlexNet + GradCAM++
net = models.alexnet(pretrained=True)
net = net.to(device)
net.eval()
image_size = 224
print(">>> ImageNet + AlexNet + GradCAM++")
saliency_maps = GradCAMpp(net, net._modules['avgpool'])
transform_net = get_transforms()
run("ImageNet__AlexNet__GradCAMpp.csv")
###
# ImageNet + EfficientNet-B0 + GradCAM++
net = EfficientNet.from_pretrained('efficientnet-b0')
net = net.to(device)
net.eval()
image_size = 224
print(">>> ImageNet + EfficientNet-B0 + GradCAM++")
saliency_maps = GradCAMpp(net, net._modules['_conv_head'])
transform_net = get_transforms()
run("ImageNet__EfficientNet-B0__GradCAMpp.csv")
# ImageNet + EfficientNet-B3 + GradCAM++
net = EfficientNet.from_pretrained('efficientnet-b3')
net = net.to(device)
net.eval()
image_size = 300
print(">>> ImageNet + EfficientNet-B3 + GradCAM++")
saliency_maps = GradCAMpp(net, net._modules['_conv_head'])
transform_net = get_transforms()
run("ImageNet__EfficientNet-B3__GradCAMpp.csv")
# ImageNet + EfficientNet-B7 + GradCAM++
net = EfficientNet.from_pretrained('efficientnet-b7')
net = net.to(device)
net.eval()
image_size = 600
print(">>> ImageNet + EfficientNet-B7 + GradCAM++")
saliency_maps = GradCAMpp(net, net._modules['_conv_head'])
transform_net = get_transforms()
run("ImageNet__EfficientNet-B7__GradCAMpp.csv") | [
"kamil@szyc.org"
] | kamil@szyc.org |
d8008a32e7bb7e5c99b26969c80a158e3039a4bb | 408ffc3d540db66a44565c27b7b99985874fe2e6 | /www/markdown2.py | cd07e4bdf3980e4cf6ba5f8a416f6e0520559ce3 | [
"Apache-2.0"
] | permissive | cocomilk2012/awesome-python3-webapp-github | 665b1c4c5b9fe163c1f3dbba059a8520f8a0234c | 2c1dd28f3dbcf1d72045e710703aba18a9310309 | refs/heads/master | 2020-03-26T07:38:38.670697 | 2018-08-22T06:00:27 | 2018-08-22T06:00:27 | 144,663,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97,649 | py | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
* tables: Tables using the same format as GFM
<https://help.github.com/articles/github-flavored-markdown#tables> and
PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import sys
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
#---- Python version compat
try:
from urllib.parse import quote # python3
except ImportError:
from urllib import quote # python2
if sys.version_info[:2] < (2,4):
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if "fenced-code-blocks" in self.extras and not self.safe_mode:
text = self._do_fenced_code_blocks(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
if "fenced-code-blocks" in self.extras and self.safe_mode:
text = self._do_fenced_code_blocks(text)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
_metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
def _extract_metadata(self, text):
# fast test
if not text.startswith("---"):
return text
match = self._metadata_pat.match(text)
if not match:
return text
tail = text[len(match.group(0)):]
metadata_str = match.group(1).strip()
for line in metadata_str.split('\n'):
key, value = line.split(':', 1)
self.metadata[key.strip()] = value.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
text = re.sub(self._hr_re, hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
if "tables" in self.extras:
text = self._do_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _table_sub(self, match):
head, underline, body = match.groups()
# Determine aligns for columns.
cols = [cell.strip() for cell in underline.strip('| \t\n').split('|')]
align_from_col_idx = {}
for col_idx, col in enumerate(cols):
if col[0] == ':' and col[-1] == ':':
align_from_col_idx[col_idx] = ' align="center"'
elif col[0] == ':':
align_from_col_idx[col_idx] = ' align="left"'
elif col[-1] == ':':
align_from_col_idx[col_idx] = ' align="right"'
# thead
hlines = ['<table>', '<thead>', '<tr>']
cols = [cell.strip() for cell in head.strip('| \t\n').split('|')]
for col_idx, col in enumerate(cols):
hlines.append(' <th%s>%s</th>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</thead>')
# tbody
hlines.append('<tbody>')
for line in body.strip('\n').split('\n'):
hlines.append('<tr>')
cols = [cell.strip() for cell in line.strip('| \t\n').split('|')]
for col_idx, col in enumerate(cols):
hlines.append(' <td%s>%s</td>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</tbody>')
hlines.append('</table>')
return '\n'.join(hlines) + '\n'
def _do_tables(self, text):
"""Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
"""
less_than_tab = self.tab_width - 1
table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^[ ]{0,%d} # allowed whitespace
(.*[|].*) \n # $1: header row (at least one pipe)
^[ ]{0,%d} # allowed whitespace
( # $2: underline row
# underline row with leading bar
(?: \|\ *:?-+:?\ * )+ \|? \n
|
# or, underline row without leading bar
(?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n
)
( # $3: data rows
(?:
^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces
.*\|.* \n
)+
)
''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
return table_re.sub(self._table_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
#print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
#pprint(rows)
hlines = ['<table>', '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
if "break-on-newline" in self.extras:
text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
else:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_inline_link_title = re.compile(r'''
( # \1
[ \t]+
(['"]) # quote char = \2
(?P<title>.*?)
\2
)? # title is optional
\)$
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
_whitespace = re.compile(r'\s*')
_strip_anglebrackets = re.compile(r'<(.*)>.*')
def _find_non_whitespace(self, text, start):
"""Returns the index of the first non-whitespace character in text
after (and including) start
"""
match = self._whitespace.match(text, start)
return match.end()
def _find_balanced(self, text, start, open_c, close_c):
"""Returns the index where the open_c and close_c characters balance
out - the same number of open_c and close_c are encountered - or the
end of string if it's reached before the balance point is found.
"""
i = start
l = len(text)
count = 1
while count > 0 and i < l:
if text[i] == open_c:
count += 1
elif text[i] == close_c:
count -= 1
i += 1
return i
def _extract_url_and_title(self, text, start):
"""Extracts the url and (optional) title from the tail of a link"""
# text[start] equals the opening parenthesis
idx = self._find_non_whitespace(text, start+1)
if idx == len(text):
return None, None, None
end_idx = idx
has_anglebrackets = text[idx] == "<"
if has_anglebrackets:
end_idx = self._find_balanced(text, end_idx+1, "<", ">")
end_idx = self._find_balanced(text, end_idx, "(", ")")
match = self._inline_link_title.search(text, idx, end_idx)
if not match:
return None, None, None
url, title = text[idx:match.start()], match.group("title")
if has_anglebrackets:
url = self._strip_anglebrackets.sub(r'\1', url)
return url, title, end_idx
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
url, title, url_end_idx = self._extract_url_and_title(text, p)
if url is not None:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_h_re_base = r'''
(^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
|
(^(\#{1,6}) # \1 = string of #'s
[ \t]%s
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
)
'''
_h_re = re.compile(_h_re_base % '*', re.X | re.M)
_h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
def _h_sub(self, match):
if match.group(1) is not None:
# Setext header
n = {"=": 1, "-": 2}[match.group(3)[0]]
header_group = match.group(2)
else:
# atx header
n = len(match.group(5))
header_group = match.group(6)
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(header_group,
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(header_group)
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
if 'tag-friendly' in self.extras:
return self._h_re_tag_friendly.sub(self._h_sub, text)
return self._h_re.sub(self._h_sub, text)
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
middle = self._list_sub(match)
text = text[:start] + middle + text[end:]
pos = start + len(middle) # start pos for next attempted match
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
def unhash_code( codeblock ):
for key, sanitized in list(self.html_spans.items()):
codeblock = codeblock.replace(key, sanitized)
replacements = [
("&", "&"),
("<", "<"),
(">", ">")
]
for old, new in replacements:
codeblock = codeblock.replace(old, new)
return codeblock
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
codeblock = unhash_code( codeblock )
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
# Lookahead to make sure this block isn't already in a code block.
# Needed when syntax highlighting is being used.
(?![^<]*\</code\>)
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) ) | [
"314734119@qq.com"
] | 314734119@qq.com |
f331fd7e97ee7b3e66b6deaf8dff3920689dcf7c | c55b9c173bd5717057f32796a7db278c0271ea19 | /MultiInfection/utils.py | c3547c7261ed96fc290d9ab03bef81f3d4b1fc66 | [] | no_license | quadcure/covid_ct_segmentation | 9ad9f517793a077f396ecd525082af15f6d69bbd | cbc4e23c270fe0aa0ad4a63108a08b8591d866fd | refs/heads/main | 2023-07-24T07:03:07.584910 | 2021-08-08T09:42:19 | 2021-08-08T09:42:19 | 379,636,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | import torchvision as tv
from PIL import Image
import requests
import numpy as np
from configuration import Config
config = Config()
transform = tv.transforms.Compose([
tv.transforms.Resize((config.test_size, config.test_size)),
tv.transforms.ToTensor(),
tv.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
def prepare_image(image_url, psuedo_url):
psuedo_image = Image.open(requests.get(psuedo_url, stream=True).raw)
if psuedo_image.mode != "RGB":
psuedo_image = psuedo_image.convert("RGB")
image = Image.open(requests.get(image_url, stream=True).raw)
if image.mode != "RGB":
image = image.convert("RGB")
image = transform(image).unsqueeze(0)
psuedo_image = transform(psuedo_image).unsqueeze(0)
return image, psuedo_image
def split_class(path, w, h):
im = Image.open(path).convert('L')
im_array_red = np.array(im) # 0, 38
im_array_green = np.array(im) # 0, 75
uniquemidfinder = np.unique(im_array_red)
mid = uniquemidfinder[1]
print(np.unique(im_array_red))
im_array_red[im_array_red != 0] = 1
im_array_red[im_array_red == 0] = 255
im_array_red[im_array_red == 1] = 0
im_array_green[im_array_green != mid] = 0
im_array_green[im_array_green == mid] = 255
# Class1 = GroundGlassOpacities
# Class2 = Consolidation
class_one = Image.fromarray(im_array_red).convert('1').resize(size=(h, w))
class_two = Image.fromarray(im_array_green).convert('1').resize(size=(h, w))
return class_one, class_two
| [
"sahiluppal2k@gmail.com"
] | sahiluppal2k@gmail.com |
268b3297ca1dcd36e1d494fb49282a1fc9d57fbe | bb3c9712978832e0fda964b3dc4491628e935246 | /decision_tree/Coding A Decision Tree/classifyDT.py | 97c37046d490a13f4b4b2af416cb40731b437817 | [] | no_license | skosko/udacity-machine-learning | 5747c08fadae2ce0bc50c1d4986e6b92a97b1349 | 33abdc134e1fa2e274f6fa0483601e9539d11061 | refs/heads/master | 2021-01-11T17:06:11.479127 | 2017-02-04T18:09:27 | 2017-02-04T18:09:27 | 79,718,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | def classify(features_train, labels_train):
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(random_state=0)
clf.fit(features_train, labels_train)
return clf | [
"skoerbitz@gmail.com"
] | skoerbitz@gmail.com |
676dd03f80716276969d1d992ae87e6a3525f8db | 515cbb646c3646e74a49aa607023a4325ee8b3a2 | /app.py | bc43068a76b52deb29af549e3d8d980b0701a560 | [] | no_license | YouMellouki/Flask | b88999724d63cecbcf5235e4170f3d4dc6c9b8cb | 6422150061ffe0d73cbb2f190573177bb3aaab5c | refs/heads/master | 2022-12-17T00:34:56.734554 | 2020-09-19T12:32:42 | 2020-09-19T12:32:42 | 296,863,350 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | from flask import Flask,render_template,url_for,request
import numpy as np
# ML Packages
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
app = Flask(__name__)
# Prediction
def predict_gender(x):
vect = gender_cv.transform(data).toarray()
result = gender_clf.predict(vect)
return result
# Prediction
def predict_nationality(x):
vect = nationality_cv.transform(data).toarray()
result = nationality_clf.predict(vect)
return result
@app.route('/')
def index():
return render_template('index.html')
@app.route('/gender')
def gender():
return render_template('gender.html')
@app.route('/predict', methods=['POST'])
def predict():
# Load Our Count Vectorizer
nationality_vectorizer = open("models/nationality_vectorizer.pkl","rb")
cv_nationality = joblib.load(nationality_vectorizer)
# Loading our ML Model
nationality_nv_model = open("models/nationality_nv_model.pkl","rb")
nationality_clf = joblib.load(nationality_nv_model)
# Receives the input query from form
if request.method == 'POST':
namequery = request.form['namequery']
data = [namequery]
vect = cv_nationality.transform(data).toarray()
result = nationality_clf.predict(vect)
return render_template('index.html',prediction = result ,name = namequery.upper())
@app.route('/predict_gender', methods=['POST'])
def predict_gender():
# Load Our Count Vectorizer
gender_vectorizer = open("models/gender_vectorizer.pkl","rb")
cv_gender = joblib.load(gender_vectorizer)
# Loading our ML Model
gender_clf_nv_model = open("models/naivebayesgendermodel.pkl","rb")
gender_clf = joblib.load(gender_clf_nv_model)
# Receives the input query from form
if request.method == 'POST':
namequery = request.form['namequery']
data = [namequery]
vect = cv_gender.transform(data).toarray()
result = gender_clf.predict(vect)
return render_template('gender.html',prediction = result ,name = namequery.upper())
if __name__ == '__main__':
app.run(debug=True) | [
"noreply@github.com"
] | YouMellouki.noreply@github.com |
3468f78680d2c6fa3b3616f9121f4dae00214184 | ce55c319f5a78b69fefc63595d433864a2e531b5 | /爬虫知识/爬虫/04day/04-爬取音乐.py | 66b60b9b5ade7ecbd06ebc3bde5dd9fae6443f39 | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 360 | py | import urllib.request
proxy={
'http':'61.176.223.7:58822',
'https':'119.102.132.60:31325'
}
handler = urllib.request.ProxyHandler(
proxies=proxy
)
opener = urllib.request.build_opener(handler)
request = urllib.request.Request(url='http://www.xicidaili.com/')
response = opener.open(request)
content = response.read().decode()
print(content) | [
"1627765913@qq.com"
] | 1627765913@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.