content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import sys
if len(sys.argv) != 3:
sys.exit("Wrong argument. getSeq.py <.fasta> <seqID>")
targetid = str(sys.argv[2])
# Flag
seq2print = False
with open(sys.argv[1], "r") as f:
for line in f:
if not seq2print:
if line.startswith(">"):
#print(line.lstrip(">"))
if line.rstrip().lstrip(">") == targetid:
print(line.rstrip())
seq2print = True
continue
else:
continue
else:
continue
else: # seq2print == Ture
if not line.startswith(">"):
print(line.rstrip())
else:
break
|
nilq/baby-python
|
python
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import mcl.object.MclTime
PARAMS_QUERY_TYPE_ALL = 0
PARAMS_QUERY_TYPE_IP_ONLY = 1
PARAMS_QUERY_TYPE_TCP_ONLY = 2
PARAMS_QUERY_TYPE_UDP_ONLY = 3
PARAMS_QUERY_TYPE_PIPES_ONLY = 4
class Params:
def __init__(self):
self.__dict__['monitor'] = False
self.__dict__['delay'] = mcl.object.MclTime.MclTime()
self.__dict__['queryType'] = PARAMS_QUERY_TYPE_IP_ONLY
self.__dict__['maximum'] = 1000
def __getattr__(self, name):
if name == 'monitor':
return self.__dict__['monitor']
if name == 'delay':
return self.__dict__['delay']
if name == 'queryType':
return self.__dict__['queryType']
if name == 'maximum':
return self.__dict__['maximum']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'monitor':
self.__dict__['monitor'] = value
elif name == 'delay':
self.__dict__['delay'] = value
elif name == 'queryType':
self.__dict__['queryType'] = value
elif name == 'maximum':
self.__dict__['maximum'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddBool(MSG_KEY_PARAMS_MONITOR, self.__dict__['monitor'])
submsg.AddTime(MSG_KEY_PARAMS_DELAY, self.__dict__['delay'])
submsg.AddU8(MSG_KEY_PARAMS_QUERY_TYPE, self.__dict__['queryType'])
submsg.AddU32(MSG_KEY_PARAMS_MAXIMUM, self.__dict__['maximum'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
try:
self.__dict__['monitor'] = submsg.FindBool(MSG_KEY_PARAMS_MONITOR)
except:
pass
try:
self.__dict__['delay'] = submsg.FindTime(MSG_KEY_PARAMS_DELAY)
except:
pass
try:
self.__dict__['queryType'] = submsg.FindU8(MSG_KEY_PARAMS_QUERY_TYPE)
except:
pass
try:
self.__dict__['maximum'] = submsg.FindU32(MSG_KEY_PARAMS_MAXIMUM)
except:
pass
|
nilq/baby-python
|
python
|
from typing import Any, Dict
from . import State
from app import app
from models import User
class AssetState(State[User]):
def __init__(self) -> None:
super().__init__()
self.pending_file_upload_cache: Dict[str, Any] = {}
def get_user(self, sid: str) -> User:
return self._sid_map[sid]
asset_state = AssetState()
app["state"]["asset"] = asset_state
|
nilq/baby-python
|
python
|
from .dijkstras_algorithm import DijkstraNode, DijkstraEdge, DijkstraGraph
from .a_star import AStarNode, AStarEdge, AStarGraph
from .custom_dijkstras_algorithm import CDijkstraNode, CDijkstraEdge, CDijkstraGraph
|
nilq/baby-python
|
python
|
from .test_case import TestCase
from infi.unittest.parameters import iterate
class IsolatedPythonVersion(TestCase):
def test(self):
with self.temporary_directory_context():
self.projector("repository init a.b.c none short long")
self.projector("isolated-python python-version get")
self.projector("isolated-python python-version set v2.7.5.5 --commit-changes")
|
nilq/baby-python
|
python
|
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import xml
from xml.dom.minidom import parse, parseString
def send_email(to, server, subj, body, attachments):
"""Send an email with the given information.
Args:
to: a String, the email address to send the email to
server: a String, the mail server to send from
subj: a String, the subject line of the message
body: a String, the body of the message
attachments: a listof_pathto_File, the attachements to include
"""
msg = MIMEMultipart()
msg['Subject'] = subj
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = 'AutopsyTest'
msg['To'] = to
msg.preamble = 'This is a test'
container = MIMEText(body, 'plain')
msg.attach(container)
Build_email(msg, attachments)
s = smtplib.SMTP(server)
try:
print('Sending Email')
s.sendmail(msg['From'], msg['To'], msg.as_string())
except Exception as e:
print(str(e))
s.quit()
def Build_email(msg, attachments):
for file in attachments:
part = MIMEBase('application', "octet-stream")
atach = open(file, "rb")
attch = atach.read()
noml = file.split("\\")
nom = noml[len(noml)-1]
part.set_payload(attch)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="' + nom + '"')
msg.attach(part)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import time,serial,math,sys,numpy as np,matplotlib.pyplot as plt
print '*** Graf periode pulzarja - 11.05.2017 ***'
povpstolp=20 #stevilo povprecenj stolpcev (integer)
perioda=7145.117 #perioda pulzarja v stevilu vzorcev (float)
odmik=1000 #odmik zacetka (integer) 0<odmik=<perioda
zacetek=8000 #zacetek<konec period povprecenja (integer)
konec=15000 #konec period povprecenja (integer)
print 'Zacetek racunanja ',time.ctime() #zabelezi zacetek racunanja
k=0.0 #kazalec v polju (float)
v=0 #kazalec vzorcev (integer)
p=0 #kazalec stolpcev (integer)
m=0 #stevec povprecenj (integer)
z=0 #stevilo znakov izvornega zapisa (integer) samo informativno
dolzina=konec-zacetek #stevilo period dolzine povprecenja
sirina=int(perioda/povpstolp) #sirina slike kot stevilo stolpcev
A=np.zeros([sirina],dtype=float) #naredim prazno polje periode
datoteka=str(sys.argv[1]) #prebere argument programa:program.py spremenljivka
f1=open(datoteka, 'r') #odpri izvorno datoteko
string=f1.read(odmik) #precitamo v prazno <odmik> bajtov
z=z+odmik
si=int(perioda) #precitaj v prazno <zacetek> povprecnih period
i=zacetek
while i>0: #celi del periode v prazno
string=f1.read(si)
z=z+si
i=i-1
i=int(zacetek*(perioda-si)) #in se ostanek periode v prazno
string=f1.read(i)
z=z+i
print 'Izvor ',datoteka
print 'Stevilo povprecenj stolpcev ',povpstolp
print 'Perioda pulzarja ',perioda,' vzorcev'
print 'Odmik zacetka ',odmik,' vzorcev'
print 'Povprecenje ',zacetek,' ... ',konec,' period'
print 'Sirina grafa ',sirina,' tock'
while string!="": #zanko ponavljam, dokler ne pridem do praznega znaka
string=f1.read(1)
z=z+1
if string!="": #konec izvornega zapisa?
if p<sirina: #odstranim zadnji neuporaben stolpec?
A[p]=A[p]+float(ord(string)) #dodam vhodno vrednost v povprecje
v=v+1
if v>=povpstolp:
v=0
p=p+1
k=k+1
if k>=perioda: #ena cela perioda pulzarja?
v=0
p=0
k=k-perioda
print m,' period ',z/1024, ' kByte',chr(13),
m=m+1
if m>=dolzina: #konec povprecenja?
string=""
f1.close() #zapri izvorni zapis
A=(A-(np.sum(A)/float(sirina)))/float(dolzina) #normalizacija rezultata
print chr(10),'Konec racunanja ',time.ctime() #konec obdelave datoteke
spik=np.amax(A) #izracunaj sirino impulza
mspik=np.argmax(A)
meja=spik/2.0 #izbrana meja za sirino
w=0.0
varna=sirina-1 #varna meja racunanja !!!
if mspik>1 and mspik<varna-1:
p=mspik #dodaj sirino pred max
while p>1 and A[p-1]>meja:
w=w+1.0
p=p-1
if p>0:
w=w+(A[p]-meja)/(A[p]-A[p-1])
p=mspik #dodaj sirino za max
while p<varna-1 and A[p+1]>meja:
w=w+1.0
p=p+1
if p<varna:
w=w+(A[p]-meja)/(A[p]-A[p+1])
w=w*float(povpstolp) #preracunaj v stevilo vzorcev
print 'Sirina impulza ',w,' vzorcev'
fig=plt.figure() #spravimo risanje v slikovni zapis
plt.plot([0,sirina],[meja,meja],'y-') #narisi polovicno visino spika
plt.plot(A,'b-') #narisi pulz
plt.title('Izvor: '+datoteka+'\nOdmik: '+str(odmik)+' vzorcev @ Perioda: '+str(perioda)+' vzorcev')
plt.xlabel('Povprecenje: '+str(povpstolp)+' vzorcev/stolpec Sirina impulza: '+str(w)+' vzorcev')
plt.ylabel('Povprecenje: '+str(zacetek)+'...'+str(konec)+' period')
fig.savefig(datoteka+'-pulz.png') #narise sliko v datoteko
#konec programa
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import *
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
class ServiceAdmin(admin.ModelAdmin):
list_display = ['title','status']
class CategoryAdmin(admin.ModelAdmin):
list_display = ['title','parent','slug']
class BrandAdmin(admin.ModelAdmin):
list_display = ['name','status']
class GalleryAdmin(admin.ModelAdmin):
list_display = ['name','category','gallerytype','status']
class ContactAdmin(admin.ModelAdmin):
list_display = ['name','phone','email','subject','status']
class CmsAdmin(admin.ModelAdmin):
list_display = ['title','slug','type','short_desc','status']
admin.site.register(Banner)
admin.site.register(Service, ServiceAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Team)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(Contact, ContactAdmin)
admin.site.register(ContactUs)
admin.site.register(FAQ)
admin.site.register(PrivacyAndPolicy)
admin.site.register(Brand, BrandAdmin)
admin.site.register(Cms, CmsAdmin)
|
nilq/baby-python
|
python
|
"""
FIFO
Queue = []
Queue = [1,2,3,4] push
[2,3,4] pop
[3,4] pop
[4] pop
[] pop
empty stack
"""
class Queue(object):
def __init__(self):
self.queue = []
self.length = 0
def enque(self, data):
self.queue.append(data)
self.length += 1
def deque(self):
if self.length < 1:
return None
data = self.queue[0]
self.queue = self.queue[1:self.length + 1]
self.length -= 1
return data
def main():
new_queue = Queue()
new_queue.enque(1)
new_queue.enque(2)
new_queue.enque(3)
new_queue.enque(4)
print(new_queue.deque()) # 1
print(new_queue.deque()) # 2
print(new_queue.deque()) # 3
print(new_queue.deque()) # 4
print(new_queue.deque()) # None
print(new_queue.deque()) # None
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
def test_canary():
assert True
|
nilq/baby-python
|
python
|
import numpy as np
import random
import sys
from scipy.stats import f
from scipy.stats import norm
param= int(sys.argv[1])
np.random.seed(param)
n=500 # mediciones efectuadas
p=100 # variables medidas
mu=0.0
sigma=1.0
X=np.random.normal(mu,sigma,size=(n,p))
Y=np.random.normal(mu,sigma,size=(n,1))
XT=X.T
YT=Y.T
Inv=np.linalg.inv(np.matmul(XT,X))
beta1=np.matmul(Inv,XT)
beta=np.matmul(beta1,Y)
Hhat=np.matmul(X,beta1)
Yideal=np.matmul(X,beta)
SST1=np.matmul(np.identity(n)-(1.0/n)*np.ones((n,n)),Y)
SST=np.matmul(YT,SST1)
SSR1=np.matmul(Hhat-(1.0/n)*np.ones((n,n)),Y)
SSR=np.matmul(YT,SSR1)
SSE1=np.matmul(np.identity(n)-Hhat,Y)
SSE=np.matmul(YT,SSE1)
Rsq=SSR[0,0]/SST[0,0]
sigma2=SSE[0,0]/(n-1.)
sigmamatrix=sigma2*Inv
sigma_i=np.zeros(p)
for i in range(p):
sigma_i[i]=sigmamatrix[i,i]
sigma_i=np.sqrt(sigma_i)
MSE=SSE[0,0]/(n-p-1)
# Calculamos el MSR
MSR=SSR[0,0]/p
# Calculamos el MST
MST=SST[0,0]/(n-1)
F=(Rsq*(n-p-1))/((1-Rsq)*p)
Rango=0.9 # se define un rango, es decir cuanto porcentaje de la curva se quiere
Ftest=f.ppf(Rango,p,n-(p+1))
P_i=np.zeros(p)
if F > Ftest:
tzeros=beta[:,0]/sigma_i
P_value=2*(1-norm.cdf(tzeros)) # se integran las colas
for i in range(p):
if P_value[i]<0.5:
P_i[i]=1
else:
P_i[i]=0
else:
quit()
p_prime=np.sum(P_i)
X_new=np.zeros((n,int(p_prime)))
aux=0
for i in range(p):
if P_i[i]==1:
X_new[:,aux]=X[:,i]
aux+=1
p=X_new.shape[1]
X=X_new
XT=X.T
YT=Y.T
Inv=np.linalg.inv(np.matmul(XT,X))
beta1=np.matmul(Inv,XT)
beta=np.matmul(beta1,Y)
Hhat=np.matmul(X,beta1)
Yideal=np.matmul(X,beta)
SST1=np.matmul(np.identity(n)-(1.0/n)*np.ones((n,n)),Y)
SST=np.matmul(YT,SST1)
SSR1=np.matmul(Hhat-(1.0/n)*np.ones((n,n)),Y)
SSR=np.matmul(YT,SSR1)
SSE1=np.matmul(np.identity(n)-Hhat,Y)
SSE=np.matmul(YT,SSE1)
Rnuevo= SSR[0,0]/SST[0,0]
Fnuevo= (Rnuevo*(n-p-1))/((1-Rnuevo)*p)
print(str(Rsq), str(F), str(Rnuevo), str(Fnuevo))
|
nilq/baby-python
|
python
|
# Permission mixins to override default django-guardian behaviour
from guardian.mixins import PermissionRequiredMixin
class SetChildPermissionObjectMixin:
"""
Sets child object as the focus of the permission check in the view.
"""
def get_permission_object(self):
return self.child
class PermissionRequired403Mixin(PermissionRequiredMixin):
"""
Basic PermissionRequired mixin to use in views.
Forces 403 http error on failed permission check.
"""
return_403 = True
class PermissionRequired403GlobalMixin(PermissionRequiredMixin):
"""
Basic Global PermissionRequired mixin to use in views.
Forces 403 http error on failed permission check. Disables permission object (only global check is made for User
instance)
"""
return_403 = True
accept_global_perms = True
permission_object = None
class PermissionRequiredSetChild403Mixin(SetChildPermissionObjectMixin, PermissionRequired403Mixin):
"""
PermissionRequired mixin to be used in views when we have to provide child object as the one for which we want to
check the permission for (i.e. AddSmiley / EditChild where the view object is a Smiley / User but check has to be
made for Child.
"""
pass
|
nilq/baby-python
|
python
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Get score by given metric."""
from .ppl_score import ngram_ppl
from .rouge_score import rouge
def get_ppl_score(result):
"""
Calculate Perplexity(PPL) score.
Args:
List[Dict], prediction, each example has 4 keys, "source",
"target", "log_prob" and "length".
Returns:
Float, ppl score.
"""
log_probs = []
total_length = 0
for sample in result:
log_prob = sample['log_prob']
length = sample['length']
log_probs.extend(log_prob)
total_length += length
print(f" | log_prob:{log_prob}")
print(f" | length:{length}")
ppl = ngram_ppl(log_probs, total_length, log_softmax=True)
print(f" | final PPL={ppl}.")
return ppl
def get_rouge_score(result, vocab):
"""
Calculate ROUGE score.
Args:
List[Dict], prediction, each example has 4 keys, "source",
"target", "prediction" and "prediction_prob".
Dictionary, dict instance.
return:
Str, rouge score.
"""
predictions = []
targets = []
for sample in result:
predictions.append(' '.join([vocab[t] for t in sample['prediction']]))
targets.append(' '.join([vocab[t] for t in sample['target']]))
print(f" | source: {' '.join([vocab[t] for t in sample['source']])}")
print(f" | target: {targets[-1]}")
return rouge(predictions, targets)
def get_score(result, vocab=None, metric='rouge'):
"""
Get eval score.
Args:
List[Dict], prediction.
Dictionary, dict instance.
Str, metric function, default is rouge.
Return:
Str, Score.
"""
score = None
if metric == 'rouge':
score = get_rouge_score(result, vocab)
elif metric == 'ppl':
score = get_ppl_score(result)
else:
print(f" |metric not in (rouge, ppl)")
return score
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from ttk.corpus.CategorizedDatedCorpusReader import CategorizedDatedCorpusReader
class CategorizedDatedCorpusReporter(object):
""" Reporting utility for CategorizedDatedCorpusReporter corpora. """
def __init__(self):
self._output_formats = ['list', 'str', 'dataframe']
def summary(self, corpus, categories=None, dates=None, output='str', verbose=False):
if not self._is_valid_output(output, verbose=verbose):
return None
# get summary data
num_categories = len(corpus.categories(categories=categories, dates=dates))
num_dates = len(corpus.dates(categories=categories, dates=dates))
num_uniq_words = len(set(corpus.words(categories=categories, dates=dates)))
num_sents = len(corpus.sents(categories=categories, dates=dates))
num_words = len(corpus.words(categories=categories, dates=dates))
num_files = len(corpus.fileids(categories=categories, dates=dates))
# store in dict for consistency
summary = {
'categories':num_categories,
'dates':num_dates,
'sentences':num_sents,
'words':num_words,
'uniq_words':num_uniq_words,
'files':num_files,
}
# convert to output
if output == 'str' or output == 'list':
summary = self._get_summary_formatted_list(summary)
if output == 'str':
summary = '\n'.join(summary)
elif output == 'dataframe':
summary = pd.DataFrame([summary])
else:
print ('Output mode %s is not supported by %s, use one of the following:\n%s'
% (output, 'summary', self._output_formats))
return None
return summary
def date_summary(self, corpus, categories=None, dates=None, output='str', display_zeros=True, verbose=False):
if not self._is_valid_output(output, verbose=verbose):
return None
# generate a list of summary dictionaries
summaries = (s for s in self._iter_date_summaries(
corpus, dates=dates, categories=categories, display_zeros=display_zeros, verbose=verbose))
# convert to output type
if output == 'str':
summaries = self._get_formatted_date_summary_string(summaries)
elif output == 'dataframe':
summaries = pd.DataFrame(summaries)
elif output == 'list':
summaries = list(summaries)
else:
print ('Output mode %s is not supported by %s, use one of the following:\n%s'
% (output, 'date_summary', self._output_formats))
return None
return summaries
def category_summary(self, corpus, categories=None, dates=None, output='str', display_zeros=True, verbose=False):
if not self._is_valid_output(output, verbose=verbose):
return None
# generate category summaries
summaries = (s for s in self._iter_category_summaries(corpus,
categories=categories,
dates=dates,
display_zeros=display_zeros,
verbose=verbose))
# convert to output type
if output == 'str':
summaries = self._get_formatted_category_summary_string(summaries)
elif output == 'dataframe':
summaries = pd.DataFrame(summaries)
elif output == 'list':
summaries = list(summaries)
else:
print ('Output mode %s is not supported by %s, use one of the following:\n%s'
% (output, 'category_summary', self._output_formats))
return None
return summaries
def sample(self, corpus, categories=None, dates=None):
pass
def to_data_frame(self, corpus, categories=None, dates=None, content_scope='sents', verbose=False):
return corpus.to_data_frame(categories=categories, dates=dates, content_scope=content_scope, verbose=verbose)
"""
Iterators
"""
def _iter_date_summaries(self, corpus, dates=None, categories=None, display_zeros=True, verbose=False):
# don't filter categories to display dates with 0 records
if display_zeros:
cat_filter = None
else:
cat_filter = categories
for date in corpus.dates(dates=dates, categories=cat_filter):
# get date summary data
words = corpus.words(categories=categories, dates=[date])
num_words = len(words)
num_uniq_words = len(set(words))
num_categories = len(corpus.categories(categories=categories, dates=[date]))
num_sents = len(corpus.sents(categories=categories, dates=[date]))
num_files = len(corpus.fileids(categories=categories, dates=[date]))
# yield dictionary of summary data
summary = {'date':date,
'categories':num_categories,
'sentences':num_sents,
'words':num_words,
'uniq_words':num_uniq_words,
'files':num_files,
}
yield summary
def _iter_category_summaries(self, corpus, categories=None, dates=None, display_zeros=True, verbose=False):
# don't filter dates to display categories with 0 records
if display_zeros:
date_filter = None
else:
date_filter = dates
for cat in corpus.categories(categories=categories, dates=date_filter):
# get category summary data
words = corpus.words(categories=[cat], dates=dates)
num_words = len(words)
num_uniq_words = len(set(words))
num_date = len(corpus.dates(categories=[cat], dates=dates))
num_sents = len(corpus.sents(categories=[cat], dates=dates))
num_files = len(corpus.fileids(categories=[cat], dates=dates))
# yield dictionary of summary data
summary = {'category':cat,
'dates':num_date,
'sentences':num_sents,
'words':num_words,
'uniq_words':num_uniq_words,
'files':num_files,
}
yield summary
"""
Formatting
"""
def _get_summary_formatted_list(self, summary):
formatted = []
formatted.append('Summary for %i categories and %i dates'
% (summary['categories'], summary['dates']))
formatted.append('{:8} sentences'.format(summary['sentences']))
formatted.append('{:8} total words'.format(summary['words']))
formatted.append('{:8} unique words'.format(summary['uniq_words']))
formatted.append('{:8} files'.format(summary['files']))
return formatted
def _get_formatted_date_summary_string(self, summaries):
formatted = []
for s in summaries:
date_summary = str(
'{}: {:2} categories {:4} sentences {:5} words {:5} unique words {:3} files'
.format(s['date'], s['categories'], s['sentences'], s['words'], s['uniq_words'], s['files']))
formatted.append(date_summary)
summaries = '\n'.join(formatted)
return summaries
def _get_formatted_category_summary_string(self, summaries):
formatted = []
for s in summaries:
category_summary = str(
"{:20} {:3} dates {:6} sentences {:7} words {:6} unique words {:3} files"
.format(s['category'], s['dates'], s['sentences'], s['words'], s['uniq_words'], s['files']))
formatted.append(category_summary)
return '\n'.join(formatted)
"""
Private helpers
"""
def _is_valid_output(self, output, verbose=False):
if output in self._output_formats:
return True
else:
print ('Output mode %s is not supported, use one of the following:\n%s'
% (output, self._output_formats))
return False
|
nilq/baby-python
|
python
|
import os
import math
import sys
import datetime
import re
import numpy as np
import traceback
import pprint
import json
from rafiki.model import BaseModel, InvalidModelParamsException, test_model_class
from rafiki.constants import TaskType
# Min numeric value
MIN_VALUE = -9999999999
class BigramHmm(BaseModel):
'''
Implements Bigram Hidden Markov Model (HMM) for POS tagging
'''
def get_knob_config(self):
return {
'knobs': {}
}
def init(self, knobs):
pass
def train(self, dataset_uri):
dataset = self.utils.load_dataset_of_corpus(dataset_uri)
(sents_tokens, sents_tags) = zip(*[zip(*sent) for sent in dataset])
self._num_tags = dataset.tag_num_classes[0]
(self._trans_probs, self._emiss_probs) = self._compute_probs(self._num_tags, sents_tokens, sents_tags)
self.utils.log('No. of tags: {}'.format(self._num_tags))
def evaluate(self, dataset_uri):
dataset = self.utils.load_dataset_of_corpus(dataset_uri)
(sents_tokens, sents_tags) = zip(*[zip(*sent) for sent in dataset])
(sents_pred_tags) = self._tag_sents(self._num_tags, sents_tokens, self._trans_probs, self._emiss_probs)
acc = self._compute_accuracy(sents_tags, sents_pred_tags)
return acc
def predict(self, queries):
sents_tokens = queries
(sents_tags) = self._tag_sents(self._num_tags, sents_tokens, self._trans_probs, self._emiss_probs)
return sents_tags
def destroy(self):
pass
def dump_parameters(self):
params = {}
params['emiss_probs'] = self._emiss_probs
params['trans_probs'] = self._trans_probs
params['num_tags'] = self._num_tags
return params
def load_parameters(self, params):
self._emiss_probs = params['emiss_probs']
self._trans_probs = params['trans_probs']
self._num_tags = params['num_tags']
def _compute_accuracy(self, sents_tags, sents_pred_tags):
total = 0
correct = 0
for (tags, pred_tags) in zip(sents_tags, sents_pred_tags):
for (tag, pred_tag) in zip(tags, pred_tags):
total += 1
if tag == pred_tag: correct += 1
return correct / total
def _compute_probs(self, num_tags, sents_tokens, sents_tags):
# Total number of states in HMM as tags
T = num_tags + 2 # Last 2 for START & END tags
START = num_tags # <s>
END = num_tags + 1 # </s>
# Unigram (tag i) counts
uni_counts = [0 for i in range(T)]
# Bigram (tag i, tag j) counts
bi_counts = [[0 for j in range(T)] for i in range(T)]
# Counts for (tag i, word w) as [{ w -> count }]
word_counts = [{} for i in range(T)]
# For each sentence
for (tokens, tags) in zip(sents_tokens, sents_tags):
uni_counts[START] += 1
# Run through sentence and update counts
prev_tag = None
for (word, tag) in zip(tokens, tags):
if prev_tag is None:
bi_counts[START][tag] += 1
else:
bi_counts[prev_tag][tag] += 1
uni_counts[tag] += 1
word_counts[tag][word] = word_counts[tag].get(word, 0) + 1
prev_tag = tag
uni_counts[END] += 1
# Account for last bigram with </s>
if len(tokens) > 0:
last_tag = tags[-1]
bi_counts[last_tag][END] += 1
# Transition function (tag i, tag j) -> <log prob of transition from state i to j>
trans_probs = [[0 for j in range(T)] for i in range(T)]
for i in range(T):
for j in range(T):
if bi_counts[i][j] == 0:
trans_probs[i][j] = MIN_VALUE
else:
trans_probs[i][j] = math.log(bi_counts[i][j] / uni_counts[i])
# Emission function as (tag i, word w) -> <log prob of emitting word w at state i>
emiss_probs = [{} for i in range(T)]
for i in range(T):
for w in word_counts[i]:
emiss_probs[i][w] = math.log(word_counts[i][w] / uni_counts[i])
return (trans_probs, emiss_probs)
def _tag_sents(self, num_tags, sents_tokens, trans_probs, emiss_probs):
sents_tags = []
T = num_tags + 2 # Last 2 for START & END tags
START = num_tags # <s>
END = num_tags + 1 # </s>
for tokens in sents_tokens:
if len(tokens) == 0:
continue
# Maximum log probabilities for sentence up to word w, where the last word's tag is i
log_probs = [[None for i in range(T)] for w in range(len(tokens))]
# Backpointers to previous best tags for log probabilities
backpointers = [[None for i in log_probs[0]] for w in log_probs]
# Process 1st word that is conditioned on <s>
for i in range(T):
trans = trans_probs[START][i]
emiss = emiss_probs[i].get(tokens[0], MIN_VALUE)
log_probs[0][i] = trans + emiss
# For each word w after the 1st word
for w in range(1, len(tokens)):
# For each tag i
for i in range(T):
# For each prev tag j
for j in range(T):
# Compute probability for (tag j, tag i) for sentence up to word w
trans = trans_probs[j][i]
emiss = emiss_probs[i].get(tokens[w], MIN_VALUE)
prob = log_probs[w - 1][j] + trans + emiss
if log_probs[w][i] is None or prob > log_probs[w][i]:
log_probs[w][i] = prob
backpointers[w][i] = j
# Compare probabilities with </s> across all tags of last word
backpointer = None
best_prob = None
for i in range(T):
trans = trans_probs[i][END]
prob = log_probs[-1][i] + trans
if best_prob is None or prob > best_prob:
best_prob = prob
backpointer = i
# Traverse backpointers to get most probable tags
cur = backpointer
w = len(tokens) - 1
sent_tags = []
while cur is not None:
sent_tags.append(cur)
cur = backpointers[w][cur]
w -= 1
sent_tags.reverse()
sents_tags.append(sent_tags)
return sents_tags
if __name__ == '__main__':
test_model_class(
model_file_path=__file__,
model_class='BigramHmm',
task=TaskType.POS_TAGGING,
dependencies={},
train_dataset_uri='data/ptb_for_pos_tagging_train.zip',
test_dataset_uri='data/ptb_for_pos_tagging_test.zip',
queries=[
['Ms.', 'Haag', 'plays', 'Elianti', '18', '.'],
['The', 'luxury', 'auto', 'maker', 'last', 'year', 'sold', '1,214', 'cars', 'in', 'the', 'U.S.']
]
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 26 09:03:17 2022
@author: apauron
"""
import os
import get_files_cluster
import pandas as pd
from numpy import genfromtxt
### Get the parent folder of the working directory. Change it if you modify the name of the folders
path_parent = os.path.dirname(os.getcwd())
path_SB1 = os.path.join(path_parent,"Results_SB1_intra") #location of SB1 intrachromosomal results to convert
folder_results = "Results_Intra"
path_SB3 = os.path.join(path_parent,folder_results) #location of SB3 intrachromosomal results to convert
list_chr = os.listdir(os.path.join(path_parent,folder_results,"HUVEC","25kb_resolution_intrachromosomal")) ## All the chromosomes
###################################################Convert SB1 results to SB3 results##########################################
def SB1toSB3(path):
"""
A pipeline to convert SB1 generated compartments files into SB3 format.
Keyword arguments :
path -- the path containing the folder in which there are the files containing SB1 results
Returns :
all the converted SB1 results in SB3 format in the "SB1_converted_SB3" folder
"""
filestoconvert = get_files_cluster.getfiles(path,"") #get all the files in the path
for file in filestoconvert :
cell_type = file.split("/")[-1].split("_")[0]
for resolution in ["25kb","100kb"] :
if resolution in file :
df_file = pd.read_csv(file,sep = ' ',header = None) #get the SB1 file
df_file["chrname"] = df_file[0] + df_file[1].astype(str) #transform chr x to chrx
df_file["comp"] = df_file[4] #get the comp number
df_file = df_file[["chrname","comp"]] #because SB3 type file is only chrname and comp
chr_values = pd.unique(df_file.chrname) #get the chr values
grouped = df_file.groupby(df_file.chrname) #to split according to chr name
for chr in chr_values :
split_df = grouped.get_group(chr)
split_df.comp = split_df.comp.replace([-1.0,0.0],[0.0,-1.0]) ## Change the format of filtered and B compartment bins
if not os.path.exists(os.path.join(path_parent,"SB1_converted_SB3",cell_type,resolution)): #Create folder if not exists
os.makedirs(os.path.join(path_parent,"SB1_converted_SB3",cell_type,resolution))
filename = os.path.join(path_parent,"SB1_converted_SB3",cell_type,resolution,chr + "_" + resolution + "_comp.txt")
split_df.comp.to_csv(filename,header = False, index = False) #create the files corresponding to our metric
###################################################Convert SB3 results to SB1 results##########################################
def SB3toSB1(path):
"""
A pipeline to convert SB3 generated compartments files into SB1 format.
Keyword arguments :
path -- the path containing the folder in which there are the files containing SB1 results
Returns :
all the converted SB3 results in SB1 format in the "SB3_converted_SB1" folder
"""
files_results = get_files_cluster.getfiles(path,"comp") #get files inside the path given
for resolution in ["25kb","100kb"] : ## Because those are intrachromosomal results
for cell_type in os.listdir(os.path.join(path_parent,folder_results)): ## adapt if not all cell types are present
if os.path.isdir(os.path.join(path_parent,folder_results,cell_type)):
list_df = []
for chr in list_chr : ## List all the chromosomes
for file_results in files_results :
# find the good corresponding file to chr,cell_type and results
if chr in file_results and cell_type in file_results and resolution in file_results :
file_df = pd.DataFrame()
# Transformation into a SB1 type file : chr x start end comp
lresults = genfromtxt(file_results, delimiter='\n')
file_df["comp"] = lresults
file_df["chromosome"] = ["chr" for i in range(len(lresults))]
file_df["chrnum"] = [chr.replace("chr","") for i in range(len(lresults))]
#According to resolution, create the start and end bins
if resolution == "100kb" :
file_df["start"] = [100000.0*x for x in file_df.index.tolist()]
else :
file_df["start"] = [25000.0*x for x in file_df.index.tolist()]
if resolution == "100kb" :
file_df["end"] = [100000.0*(x+1) for x in file_df.index.tolist()]
else :
file_df["end"] = [25000.0*(x+1) for x in file_df.index.tolist()]
#Append to a list the dataframe corresponding to the chromosome
file_df_copy = file_df.copy()
file_df_copy = file_df_copy[["chromosome","chrnum","start","end","comp"]]
file_df_copy.comp[file_df.comp == 0.0] = -1.0
file_df_copy.comp[file_df.comp == -1.0] = 0.0
list_df.append(file_df_copy)
#Concatenate all the dataframes with chromosomes of the same cell type
res_df = pd.concat(list_df)
res_df = res_df.sort_values(by = ["chrnum","start"])
filename = os.path.join(path_parent,"SB3_converted_SB1",cell_type + "_" + resolution + "_COMPARTMENT" )
res_df.to_csv(filename,header = False, index = False, sep = " ")
|
nilq/baby-python
|
python
|
# Exercice 3.3 : Nombres premiers
## Question 1
def divise(n : int, p : int) -> bool:
"""Précondition : n > 0 et p >= 0
Renvoie True si et seulement si n divise p.
"""
return p % n == 0
# Jeu de tests
assert divise(1, 4) == True
assert divise(2, 4) == True
assert divise(3, 4) == False
assert divise(4, 4) == True
assert divise(4, 2) == False
assert divise(17, 123) == False
assert divise(17, 357) == True
assert divise(21, 357) == True
## Question 2
## Réponse
### Sans sortie anticipée :
def est_premier(n : int) -> bool:
"""Précondition: n >= 0
renvoie True si et seulement si n est premier.
"""
if n < 2:
return False
else:
# pas de diviseur trouvé ?
b : bool = True
# prochain diviseur potentiel
i : int = 2
while b and (i < n):
if divise(i, n):
b = False
else:
i = i + 1
return b
# Jeu de tests
assert est_premier(0) == False
assert est_premier(1) == False
assert est_premier(2) == True
assert est_premier(17) == True
assert est_premier(357) == False
### Avec sortie anticipée :
def est_premier2(n : int) -> bool:
""" ... cf. ci-dessus ...
"""
if n < 2:
return False
else:
# prochain diviseur potentiel
i : int = 2
while i < n:
if divise(i, n):
return False
else:
i = i + 1
return True
# Jeu de tests
assert est_premier2(0) == False
assert est_premier2(1) == False
assert est_premier2(2) == True
assert est_premier2(17) == True
assert est_premier2(357) == False
|
nilq/baby-python
|
python
|
import os
import re
import subprocess
import time
import urllib
import glanceclient
import keystoneauth1
import keystoneauth1.identity.v2 as keystoneauth1_v2
import keystoneauth1.session as keystoneauth1_session
import keystoneclient.v2_0.client as keystoneclient_v2
import keystoneclient.v3.client as keystoneclient_v3
import keystoneclient.auth.identity.v3 as keystone_id_v3
import keystoneclient.session as session
import neutronclient.v2_0.client as neutronclient
import novaclient.client as novaclient_client
import charms_openstack.charm as charm
import charms_openstack.adapters as adapters
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as host
import charmhelpers.fetch as fetch
def install():
"""Use the singleton from the TempestCharm to install the packages on the
unit
"""
TempestCharm.singleton.install()
def render_configs(interfaces_list):
"""Using a list of interfaces, render the configs and, if they have
changes, restart the services on the unit.
"""
if not os.path.isdir(TempestCharm.TEMPEST_LOGDIR):
os.makedirs(TempestCharm.TEMPEST_LOGDIR)
TempestCharm.singleton.render_with_interfaces(interfaces_list)
TempestCharm.singleton.assess_status()
def run_test(tox_target):
"""Use the singleton from the TempestCharm to install the packages on the
unit
"""
TempestCharm.singleton.run_test(tox_target)
def assess_status():
"""Use the singleton from the TempestCharm to install the packages on the
unit
"""
TempestCharm.singleton.assess_status()
class TempestAdminAdapter(adapters.OpenStackRelationAdapter):
"""Inspect relations and provide properties that can be used when
rendering templates"""
interface_type = "identity-admin"
def __init__(self, relation):
"""Initialise a keystone client and collect user defined config"""
self.kc = None
self.keystone_session = None
self.api_version = '2'
super(TempestAdminAdapter, self).__init__(relation)
self.init_keystone_client()
self.uconfig = hookenv.config()
@property
def keystone_info(self):
"""Collection keystone information from keystone relation"""
ks_info = self.relation.credentials()
ks_info['default_credentials_domain_name'] = 'default'
if ks_info.get('api_version'):
ks_info['api_version'] = ks_info.get('api_version')
else:
ks_info['api_version'] = self.api_version
if not ks_info.get('service_user_domain_name'):
ks_info['service_user_domain_name'] = 'admin_domain'
return ks_info
@property
def ks_client(self):
if not self.kc:
self.init_keystone_client()
return self.kc
def keystone_auth_url(self, api_version=None):
if not api_version:
api_version = self.keystone_info.get('api_version', '2')
ep_suffix = {
'2': 'v2.0',
'3': 'v3'}[api_version]
return '{}://{}:{}/{}'.format(
'http',
self.keystone_info['service_hostname'],
self.keystone_info['service_port'],
ep_suffix,
)
def resolve_endpoint(self, service_type, interface):
if self.api_version == '2':
ep = self.ks_client.service_catalog.url_for(
service_type=service_type,
endpoint_type='{}URL'.format(interface)
)
else:
svc_id = self.ks_client.services.find(type=service_type).id
ep = self.ks_client.endpoints.find(
service_id=svc_id,
interface=interface).url
return ep
def set_keystone_v2_client(self):
self.keystone_session = None
self.kc = keystoneclient_v2.Client(**self.admin_creds_v2)
def set_keystone_v3_client(self):
auth = keystone_id_v3.Password(**self.admin_creds_v3)
self.keystone_session = session.Session(auth=auth)
self.kc = keystoneclient_v3.Client(session=self.keystone_session)
def init_keystone_client(self):
"""Initialise keystone client"""
if self.kc:
return
if self.keystone_info.get('api_version', '2') > '2':
self.set_keystone_v3_client()
self.api_version = '3'
else:
# XXX Temporarily catching the Unauthorized exception to deal with
# the case (pre-17.02) where the keystone charm maybe in v3 mode
# without telling charms via the identity-admin relation
try:
self.set_keystone_v2_client()
self.api_version = '2'
except keystoneauth1.exceptions.http.Unauthorized:
self.set_keystone_v3_client()
self.api_version = '3'
self.kc.services.list()
def admin_creds_base(self, api_version):
return {
'username': self.keystone_info['service_username'],
'password': self.keystone_info['service_password'],
'auth_url': self.keystone_auth_url(api_version=api_version)}
@property
def admin_creds_v2(self):
creds = self.admin_creds_base(api_version='2')
creds['tenant_name'] = self.keystone_info['service_tenant_name']
creds['region_name'] = self.keystone_info['service_region']
return creds
@property
def admin_creds_v3(self):
creds = self.admin_creds_base(api_version='3')
creds['project_name'] = self.keystone_info.get(
'service_project_name',
'admin')
creds['user_domain_name'] = self.keystone_info.get(
'service_user_domain_name',
'admin_domain')
creds['project_domain_name'] = self.keystone_info.get(
'service_project_domain_name',
'Default')
return creds
@property
def ec2_creds(self):
"""Generate EC2 style tokens or return existing EC2 tokens
@returns {'access_token' token1, 'secret_token': token2}
"""
_ec2creds = {}
if self.api_version == '2':
current_creds = self.ks_client.ec2.list(self.ks_client.user_id)
if current_creds:
_ec2creds = current_creds[0]
else:
creds = self.ks_client.ec2.create(
self.ks_client.user_id,
self.ks_client.tenant_id)
_ec2creds = {
'access_token': creds.access,
'secret_token': creds.secret}
return _ec2creds
@property
def image_info(self):
"""Return image ids for the user-defined image names
@returns {'image_id' id1, 'image_alt_id': id2}
"""
image_info = {}
if self.service_present('glance'):
if self.keystone_session:
glance_client = glanceclient.Client(
'2', session=self.keystone_session)
else:
glance_ep = self.resolve_endpoint('image', 'public')
glance_client = glanceclient.Client(
'2', glance_ep, token=self.ks_client.auth_token)
for image in glance_client.images.list():
if self.uconfig.get('glance-image-name') == image.name:
image_info['image_id'] = image.id
if self.uconfig.get('image-ssh-user'):
image_info['image_ssh_user'] = \
self.uconfig.get('image-ssh-user')
if self.uconfig.get('glance-alt-image-name') == image.name:
image_info['image_alt_id'] = image.id
if self.uconfig.get('image-alt-ssh-user'):
image_info['image_alt_ssh_user'] = \
self.uconfig.get('image-alt-ssh-user')
return image_info
@property
def network_info(self):
"""Return public network and router ids for user-defined router and
network names
@returns {'public_network_id' id1, 'router_id': id2}
"""
network_info = {}
if self.service_present('neutron'):
if self.keystone_session:
neutron_client = neutronclient.Client(
session=self.keystone_session)
else:
neutron_ep = self.ks_client.service_catalog.url_for(
service_type='network',
endpoint_type='publicURL')
neutron_client = neutronclient.Client(
endpoint_url=neutron_ep,
token=self.ks_client.auth_token)
routers = neutron_client.list_routers(
name=self.uconfig['router-name'])
if len(routers['routers']) == 0:
hookenv.log("Router not found")
else:
router = routers['routers'][0]
network_info['router_id'] = router['id']
networks = neutron_client.list_networks(
name=self.uconfig['network-name'])
if len(networks['networks']) == 0:
hookenv.log("network not found")
else:
network = networks['networks'][0]
network_info['public_network_id'] = network['id']
networks = neutron_client.list_networks(
name=self.uconfig['floating-network-name'])
if len(networks['networks']) == 0:
hookenv.log("Floating network name not found")
else:
network_info['floating_network_name'] = \
self.uconfig['floating-network-name']
return network_info
def service_present(self, service):
"""Check if a given service type is registered in the catalogue
:params service: string Service type
@returns Boolean: True if service is registered
"""
return service in self.get_present_services()
def get_nova_client(self):
if not self.keystone_session:
auth = keystoneauth1_v2.Password(
auth_url=self.keystone_auth_url(),
username=self.keystone_info['service_username'],
password=self.keystone_info['service_password'],
tenant_name=self.keystone_info['service_tenant_name'])
self.keystone_session = keystoneauth1_session.Session(auth=auth)
return novaclient_client.Client(
2, session=self.keystone_session)
@property
def compute_info(self):
"""Return flavor ids for user-defined flavors
@returns {'flavor_id' id1, 'flavor_alt_id': id2}
"""
compute_info = {}
if self.service_present('nova'):
nova_client = self.get_nova_client()
nova_ep = self.resolve_endpoint('compute', 'public')
url = urllib.parse.urlparse(nova_ep)
compute_info['nova_base'] = '{}://{}'.format(
url.scheme,
url.netloc.split(':')[0])
for flavor in nova_client.flavors.list():
if self.uconfig['flavor-name'] == flavor.name:
compute_info['flavor_id'] = flavor.id
if self.uconfig['flavor-alt-name'] == flavor.name:
compute_info['flavor_alt_id'] = flavor.id
return compute_info
def get_present_services(self):
"""Query keystone catalogue for a list for registered services
@returns [svc1, svc2, ...]: List of registered services
"""
services = [svc.name
for svc in self.ks_client.services.list()
if svc.enabled]
return services
@property
def service_info(self):
"""Assemble a list of services tempest should tests
Compare the list of keystone registered services with the services the
user has requested be tested. If in 'auto' mode test all services
registered in keystone.
@returns [svc1, svc2, ...]: List of services to test
"""
service_info = {}
tempest_candidates = ['ceilometer', 'cinder', 'glance', 'heat',
'horizon', 'ironic', 'neutron', 'nova',
'sahara', 'swift', 'trove', 'zaqar', 'neutron']
present_svcs = self.get_present_services()
# If not running in an action context asssume auto mode
try:
action_args = hookenv.action_get()
except Exception:
action_args = {'service-whitelist': 'auto'}
if action_args['service-whitelist'] == 'auto':
white_list = []
for svc in present_svcs:
if svc in tempest_candidates:
white_list.append(svc)
else:
white_list = action_args['service-whitelist']
for svc in tempest_candidates:
if svc in white_list:
service_info[svc] = 'true'
else:
service_info[svc] = 'false'
return service_info
class TempestAdapters(adapters.OpenStackRelationAdapters):
"""
Adapters class for the Tempest charm.
"""
relation_adapters = {
'identity_admin': TempestAdminAdapter,
}
def __init__(self, relations):
super(TempestAdapters, self).__init__(
relations,
options=TempestConfigurationAdapter)
class TempestConfigurationAdapter(adapters.ConfigurationAdapter):
"""
Manipulate user supplied config as needed
"""
def __init__(self):
super(TempestConfigurationAdapter, self).__init__()
class TempestCharm(charm.OpenStackCharm):
release = 'liberty'
name = 'tempest'
required_relations = ['identity-admin']
"""Directories and files used for running tempest"""
TEMPEST_ROOT = '/var/lib/tempest'
TEMPEST_LOGDIR = TEMPEST_ROOT + '/logs'
TEMPEST_CONF = TEMPEST_ROOT + '/tempest.conf'
"""pip.conf for proxy settings etc"""
PIP_CONF = '/root/.pip/pip.conf'
"""List of packages charm should install
XXX The install hook is currently installing most packages ahead of
this because modules like keystoneclient are needed at load time
"""
packages = [
'git', 'testrepository', 'subunit', 'python-nose', 'python-lxml',
'python-boto', 'python-junitxml', 'python-subunit',
'python-testresources', 'python-oslotest', 'python-stevedore',
'python-cinderclient', 'python-glanceclient', 'python-heatclient',
'python-keystoneclient', 'python-neutronclient', 'python-novaclient',
'python-swiftclient', 'python-ceilometerclient', 'openvswitch-test',
'python3-cinderclient', 'python3-glanceclient', 'python3-heatclient',
'python3-keystoneclient', 'python3-neutronclient',
'python3-novaclient', 'python3-swiftclient',
'python3-ceilometerclient', 'openvswitch-common', 'libffi-dev',
'libssl-dev', 'python-dev', 'python-cffi'
]
"""Use the Tempest specific adapters"""
adapters_class = TempestAdapters
"""Tempest has no running services so no services need restarting on
config file change
"""
restart_map = {
TEMPEST_CONF: [],
PIP_CONF: [],
}
@property
def all_packages(self):
_packages = self.packages[:]
if host.lsb_release()['DISTRIB_RELEASE'] > '14.04':
_packages.append('tox')
else:
_packages.append('python-tox')
return _packages
def setup_directories(self):
for tempest_dir in [self.TEMPEST_ROOT, self.TEMPEST_LOGDIR]:
if not os.path.exists(tempest_dir):
os.mkdir(tempest_dir)
def setup_git(self, branch, git_dir):
"""Clone tempest and symlink in rendered tempest.conf"""
conf = hookenv.config()
if not os.path.exists(git_dir):
git_url = conf['tempest-source']
fetch.install_remote(str(git_url), dest=str(git_dir),
branch=str(branch), depth=str(1))
conf_symlink = git_dir + '/tempest/etc/tempest.conf'
if not os.path.exists(conf_symlink):
os.symlink(self.TEMPEST_CONF, conf_symlink)
def execute_tox(self, run_dir, logfile, tox_target):
"""Trigger tempest run through tox setting proxies if needed"""
env = os.environ.copy()
conf = hookenv.config()
if conf.get('http-proxy'):
env['http_proxy'] = conf['http-proxy']
if conf.get('https-proxy'):
env['https_proxy'] = conf['https-proxy']
cmd = ['tox', '-e', tox_target]
f = open(logfile, "w")
subprocess.call(cmd, cwd=run_dir, stdout=f, stderr=f, env=env)
def get_tempest_files(self, branch_name):
"""Prepare tempest files and directories
@return git_dir, logfile, run_dir
"""
log_time_str = time.strftime("%Y%m%d%H%M%S", time.gmtime())
git_dir = '{}/tempest-{}'.format(self.TEMPEST_ROOT, branch_name)
logfile = '{}/run_{}.log'.format(self.TEMPEST_LOGDIR, log_time_str)
run_dir = '{}/tempest'.format(git_dir)
return git_dir, logfile, run_dir
def parse_tempest_log(self, logfile):
"""Read tempest logfile and return summary as dict
@return dict: Dictonary of summary data
"""
summary = {}
with open(logfile, 'r') as tempest_log:
summary_line = False
for line in tempest_log:
if line.strip() == "Totals":
summary_line = True
if line.strip() == "Worker Balance":
summary_line = False
if summary_line:
# Match lines like: ' - Unexpected Success: 0'
matchObj = re.match(
r'(.*)- (.*?):\s+(.*)', line, re.M | re.I)
if matchObj:
key = matchObj.group(2)
key = key.replace(' ', '-').replace(':', '').lower()
summary[key] = matchObj.group(3)
return summary
def run_test(self, tox_target):
"""Run smoke tests"""
action_args = hookenv.action_get()
branch_name = action_args['branch']
git_dir, logfile, run_dir = self.get_tempest_files(branch_name)
self.setup_directories()
self.setup_git(branch_name, git_dir)
self.execute_tox(run_dir, logfile, tox_target)
action_info = self.parse_tempest_log(logfile)
action_info['tempest-logfile'] = logfile
hookenv.action_set(action_info)
class TempestCharmRocky(TempestCharm):
release = 'rocky'
packages = [
'git', 'testrepository', 'subunit', 'python3-nose', 'python3-lxml',
'python3-boto', 'python3-junitxml', 'python3-subunit',
'python3-testresources', 'python3-oslotest', 'python3-stevedore',
'python3-cinderclient', 'python3-glanceclient', 'python3-heatclient',
'python3-keystoneclient', 'python3-neutronclient',
'python3-novaclient', 'python3-swiftclient',
'python3-ceilometerclient', 'openvswitch-test', 'openvswitch-common',
'libffi-dev', 'libssl-dev', 'python3-dev', 'python3-cffi'
]
purge_packages = [
'python-nose', 'python-lxml', 'python-boto', 'python-junitxml',
'python-subunit', 'python-testresources', 'python-oslotest',
'python-stevedore', 'python-cinderclient', 'python-glanceclient',
'python-heatclient', 'python-keystoneclient', 'python-neutronclient',
'python-novaclient', 'python-swiftclient', 'python-ceilometerclient',
'python-dev', 'python-cffi'
]
python_version = 3
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from flask import Flask, abort
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from config import basedir, UPLOAD_FOLDER
#from flask.ext.mail import Mail
theapp = Flask(__name__)
theapp.config.from_object('config')
#mail = Mail(theapp)
bootstrap = Bootstrap(theapp)
db = SQLAlchemy(theapp)
from app import views, models
|
nilq/baby-python
|
python
|
import Sofa
import SofaPython.Tools
import SofaTest
def createScene(node):
node.createObject('PythonScriptController', filename=__file__, classname='VerifController')
class VerifController(SofaTest.Controller):
def initGraph(self, node):
Sofa.msg_info("initGraph ENTER")
child = node.createChild("temporary_node")
# FROM HERE, 'child' was added to the nodes to init in ScriptEnvironment, but it is not anymore
node.removeChild( child )
# 'child' is no longer in the scene graph but still was in ScriptEnvironment, but it is not anymore
Sofa.msg_info("initGraph EXIT")
# Coming back to SofaPython:
# Nobody is no longer pointing to 'child', it will be deleted (smart pointer).
# ScriptEnvironment was calling 'init' to an invalid pointer or
# at least to a node detached from the scene graph,
# but it does not anymore.
# This could bring tons of potential troubles (including crashes).
def onEndAnimationStep(self, dt):
Sofa.msg_info("onEndAnimationStep")
self.sendSuccess()
|
nilq/baby-python
|
python
|
"""
This application demonstrates how to create a Tag Template in Data Catalog,
loading its information from Google Sheets.
"""
import argparse
import logging
import re
import stringcase
import unicodedata
from google.api_core import exceptions
from google.cloud import datacatalog
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client import service_account
_CLOUD_PLATFORM_REGION = 'us-central1'
_CUSTOM_MULTIVALUED_TYPE = 'MULTI'
_DATA_CATALOG_BOOL_TYPE = 'BOOL'
_DATA_CATALOG_ENUM_TYPE = 'ENUM'
_DATA_CATALOG_NATIVE_TYPES = ['BOOL', 'DOUBLE', 'ENUM', 'STRING', 'TIMESTAMP']
_LOOKING_FOR_SHEET_LOG_FORMAT = 'Looking for {} sheet {} | {}...'
class TemplateMaker:
def __init__(self):
self.__sheets_reader = GoogleSheetsReader()
self.__datacatalog_facade = DataCatalogFacade()
def run(self, spreadsheet_id, project_id, template_id, display_name, delete_existing=False):
master_template_fields = self.__sheets_reader.read_master(
spreadsheet_id, stringcase.spinalcase(template_id))
self.__process_native_fields(spreadsheet_id, project_id, template_id, display_name,
master_template_fields, delete_existing)
self.__process_custom_multivalued_fields(spreadsheet_id, project_id, template_id,
display_name, master_template_fields,
delete_existing)
def __process_native_fields(self, spreadsheet_id, project_id, template_id, display_name,
master_template_fields, delete_existing_template):
native_fields = self.__filter_fields_by_types(master_template_fields,
_DATA_CATALOG_NATIVE_TYPES)
StringFormatter.format_elements_to_snakecase(native_fields, 0)
enums_names = {}
for field in native_fields:
if not field[2] == _DATA_CATALOG_ENUM_TYPE:
continue
names_from_sheet = self.__sheets_reader.read_helper(spreadsheet_id,
stringcase.spinalcase(field[0]))
enums_names[field[0]] = [name[0] for name in names_from_sheet]
template_name = datacatalog.DataCatalogClient.tag_template_path(
project_id, _CLOUD_PLATFORM_REGION, template_id)
if delete_existing_template:
self.__datacatalog_facade.delete_tag_template(template_name)
if not self.__datacatalog_facade.tag_template_exists(template_name):
self.__datacatalog_facade.create_tag_template(project_id, template_id, display_name,
native_fields, enums_names)
def __process_custom_multivalued_fields(self, spreadsheet_id, project_id, template_id,
display_name, master_template_fields,
delete_existing_template):
multivalued_fields = self.__filter_fields_by_types(master_template_fields,
[_CUSTOM_MULTIVALUED_TYPE])
StringFormatter.format_elements_to_snakecase(multivalued_fields, 0)
for field in multivalued_fields:
try:
values_from_sheet = self.__sheets_reader.read_helper(
spreadsheet_id, stringcase.spinalcase(field[0]))
fields = [(StringFormatter.format_to_snakecase(value[0]), value[0],
_DATA_CATALOG_BOOL_TYPE) for value in values_from_sheet]
except errors.HttpError as err:
if err.resp.status in [400]:
logging.info('NOT FOUND. Ignoring...')
continue # Ignore creating a new template representing the multivalued field
else:
raise
custom_template_id = f'{template_id}_{field[0]}'
custom_display_name = f'{display_name} - {field[1]}'
template_name = datacatalog.DataCatalogClient.tag_template_path(
project_id, _CLOUD_PLATFORM_REGION, custom_template_id)
if delete_existing_template:
self.__datacatalog_facade.delete_tag_template(template_name)
if not self.__datacatalog_facade.tag_template_exists(template_name):
self.__datacatalog_facade.create_tag_template(project_id, custom_template_id,
custom_display_name, fields)
@classmethod
def __filter_fields_by_types(cls, fields, valid_types):
return [field for field in fields if field[2] in valid_types]
"""
Input reader
========================================
"""
class GoogleSheetsReader:
def __init__(self):
self.__sheets_facade = GoogleSheetsFacade()
def read_master(self, spreadsheet_id, sheet_name, values_per_line=3):
return self.__read(spreadsheet_id, sheet_name, 'master', values_per_line)
def read_helper(self, spreadsheet_id, sheet_name, values_per_line=1):
return self.__read(spreadsheet_id, sheet_name, 'helper', values_per_line)
def __read(self, spreadsheet_id, sheet_name, sheet_type, values_per_line):
"""
Read the requested values from each line and store them into a list.
:param spreadsheet_id: Spreadsheet ID.
:param sheet_name: Sheet name.
:param sheet_type: Sheet type {'master', 'helper'}.
:param values_per_line: Number of consecutive values to be read from each line.
"""
logging.info(_LOOKING_FOR_SHEET_LOG_FORMAT.format(sheet_type, spreadsheet_id, sheet_name))
sheet_data = self.__sheets_facade.read_sheet(spreadsheet_id, sheet_name, values_per_line)
data = []
logging.info(f'Reading spreadsheet {spreadsheet_id} | {sheet_name}...')
for row in sheet_data.get('valueRanges')[0].get('values'):
row_data = []
for counter in range(values_per_line):
row_data.append(row[counter].strip())
data.append(row_data)
# The first line is usually used for headers, so it's discarded.
del (data[0])
logging.info('DONE')
return data
"""
API communication classes
========================================
"""
class DataCatalogFacade:
"""
Manage Templates by communicating to Data Catalog's API.
"""
def __init__(self):
# Initialize the API client.
self.__datacatalog = datacatalog.DataCatalogClient()
def create_tag_template(self,
project_id,
template_id,
display_name,
fields_descriptors,
enums_names=None):
"""Create a Tag Template."""
location = datacatalog.DataCatalogClient.common_location_path(
project_id, _CLOUD_PLATFORM_REGION)
tag_template = datacatalog.TagTemplate()
tag_template.display_name = display_name
for descriptor in fields_descriptors:
field = datacatalog.TagTemplateField()
field.display_name = descriptor[1]
field_id = descriptor[0]
field_type = descriptor[2]
if not field_type == _DATA_CATALOG_ENUM_TYPE:
field.type_.primitive_type = datacatalog.FieldType.PrimitiveType[field_type]
else:
for enum_name in enums_names[field_id]:
enum_value = datacatalog.FieldType.EnumType.EnumValue()
enum_value.display_name = enum_name
field.type_.enum_type.allowed_values.append(enum_value)
tag_template.fields[field_id] = field
created_tag_template = self.__datacatalog.create_tag_template(parent=location,
tag_template_id=template_id,
tag_template=tag_template)
logging.info(f'===> Template created: {created_tag_template.name}')
def delete_tag_template(self, name):
"""Delete a Tag Template."""
try:
self.__datacatalog.delete_tag_template(name=name, force=True)
logging.info(f'===> Template deleted: {name}')
except exceptions.PermissionDenied:
pass
def tag_template_exists(self, name):
"""Check if a Tag Template with the provided name already exists."""
try:
self.__datacatalog.get_tag_template(name=name)
return True
except exceptions.PermissionDenied:
return False
class GoogleSheetsFacade:
"""
Access spreadsheets data by communicating to the Google Sheets API.
"""
def __init__(self):
# Initialize the API client.
self.__service = discovery.build(
serviceName='sheets',
version='v4',
credentials=service_account.ServiceAccountCredentials.get_application_default(),
cache_discovery=False)
def read_sheet(self, spreadsheet_id, sheet_name, values_per_line):
return self.__service.spreadsheets().values().batchGet(
spreadsheetId=spreadsheet_id,
ranges=f'{sheet_name}!A:{chr(ord("@") + values_per_line)}').execute()
"""
Tools & utilities
========================================
"""
class StringFormatter:
@classmethod
def format_elements_to_snakecase(cls, a_list, internal_index=None):
if internal_index is None:
for counter in range(len(a_list)):
a_list[counter] = cls.format_to_snakecase(a_list[counter])
else:
for element in a_list:
element[internal_index] = cls.format_to_snakecase(element[internal_index])
@classmethod
def format_to_snakecase(cls, string):
normalized_str = unicodedata.normalize('NFKD', string).encode('ASCII', 'ignore').decode()
normalized_str = re.sub(r'[^a-zA-Z0-9]+', ' ', normalized_str)
normalized_str = normalized_str.strip()
normalized_str = normalized_str.lower() \
if (' ' in normalized_str) or (normalized_str.isupper()) \
else stringcase.camelcase(normalized_str) # FooBarBaz => fooBarBaz
return stringcase.snakecase(normalized_str) # foo-bar-baz => foo_bar_baz
"""
Main program entry point
========================================
"""
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.getLogger('googleapiclient.discovery').setLevel(logging.ERROR)
logging.getLogger('oauth2client.client').setLevel(logging.ERROR)
logging.getLogger('oauth2client.transport').setLevel(logging.ERROR)
parser = argparse.ArgumentParser(description='Load Tag Template from CSV')
parser.add_argument('--template-id', help='the template ID', required=True)
parser.add_argument('--display-name', help='template\'s Display Name', required=True)
parser.add_argument('--project-id',
help='GCP Project in which the Template will be created',
required=True)
parser.add_argument('--spreadsheet-id', help='Google Spreadsheet ID', required=True)
parser.add_argument(
'--delete-existing',
action='store_true',
help='delete existing Templates and recreate them with the provided metadata')
args = parser.parse_args()
TemplateMaker().run(args.spreadsheet_id, args.project_id, args.template_id, args.display_name,
args.delete_existing)
|
nilq/baby-python
|
python
|
import cloudpassage
import sys
import os
import pytest
import datetime
import time
import platform
sys.path.append(os.path.join(os.path.dirname(__file__), '../../', ''))
import lib.validate as validate
class TestUnitValidate:
def test_validate_valid_time(self):
accepted = True
try:
validate.validate_time("2016-08-20")
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_time(self):
accepted = False
try:
validate.validate_time("foobar")
except ValueError as e:
accepted = True
assert accepted
def test_validate_valid_time_range(self):
accepted = True
today = datetime.datetime.now().strftime("%Y-%m-%d")
try:
validate.validate_time_range(today)
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_time_range(self):
accepted = False
today = datetime.datetime.now()
date = (today - datetime.timedelta(days=90)).strftime("%Y-%m-%d")
try:
validate.validate_time_range(date)
except ValueError as e:
accepted = True
assert accepted
def test_validate_valid_batchsize(self):
accepted = True
size = 10
try:
validate.batchsize(size)
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_batchsize(self):
accepted = False
size = 100
try:
validate.batchsize(size)
except ValueError as e:
accepted = True
assert accepted
def test_validate_valid_thread(self):
accepted = True
thread = 1
try:
validate.thread(thread)
except ValueError as e:
accepted = False
assert accepted
def test_validate_invalid_str_thread(self):
accepted = False
thread = 'foobar'
try:
validate.thread(thread)
except ValueError as e:
accepted = True
assert accepted
def test_validate_invalid_count_thread(self):
accepted = False
thread = 10
try:
validate.thread(thread)
except ValueError as e:
accepted = True
assert accepted
def test_validate_operating_system(self):
current_platform = platform.system()
if current_platform is not 'Windows':
current_platform = 'linux'
actual = validate.operating_system()
assert current_platform is actual
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Evolve life in a landscape.
Life evolves alongside landscapes by biotic and abiotic processes under complex
dynamics at Earth's surface. Researchers who wish to explore these dynamics can
use this component as a tool for them to build landscape-life evolution models.
Landlab components, including SpeciesEvolver are designed to work with a shared
model grid. Researchers can build novel models using plug-and-play surface
process components to evolve the grid's landscape alongside the life tracked by
SpeciesEvolver. The simulated life evolves following customizable processes.
Component written by Nathan Lyons beginning August 2017.
"""
from collections import OrderedDict
import numpy as np
from pandas import DataFrame
from landlab import Component
from .record import Record
class SpeciesEvolver(Component):
"""Evolve life in a landscape.
This component tracks ``Taxon`` objects as they evolve in a landscape. The
component calls the evolutionary process methods of tracked ``Taxon``
objects. ``Taxon`` are intended to be subclassed for unique behavior,
attributes, and model approaches, including different implementations of
evolutionary processes.
The general workflow to use this component in a model is
1. Instantiate the component.
2. Instantiate taxa.
3. Introduce taxa to SpeciesEvolver using the ``track_taxon`` method.
4. Advance the component instance in time using ``run_one_step`` method.
Taxa can be introduced at model onset and later time steps. Multiple types
can be tracked by the same SpeciesEvolver instance.
The taxon type, ``ZoneTaxon`` is distributed with SpeciesEvolver. The
spatial aspect of ``ZoneTaxon`` macroevolutionary processes is determined
using ``Zone`` objects. A ``ZoneController`` is used to create and manage
zones as well as efficiently create multiple ZoneTaxon objects. See the
documentation of ``ZoneController`` and ``ZoneTaxon`` for more information.
SpeciesEvolver knows nothing about zones and their controller, meaning the
concept of zones are not required for other taxon types.
Model time and other variables can be viewed with the class attribute,
``record_data_frame``. Time is recorded to track the history of taxa
lineages. The unit of time is not considered within the component other
than the record, and can be thought of as in years or whatever unit is
needed. Time is advanced with the ``dt`` parameter of the ``run_one_step``
method.
The geographic ranges of the taxa at the current model time are evaluated
during the ``run_one_step`` method. Each taxon object determines if it
persists or becomes extinct, and if it creates child ``Taxon`` objects.
Metadata of all taxa introduced to the component can be viewed with the
attribute, ``taxa_data_frame``.
Taxa are automatically assigned unique taxon identifiers, ``tid``.
Identifiers are used to reference and retrieve taxon objects. Identifiers
are assigned in the order taxa are introduced to SpeciesEvolver.
Examples
--------
The evolution of a lowland taxa lineage in response to mountain range
formation is simulated using ZoneTaxon managed by ZoneController. Mountain
range formation is forced without processes for simplicity in this example.
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components import SpeciesEvolver
>>> from landlab.components.species_evolution import ZoneController
Create a model grid with mountain scale resolution. The elevation is
equally low throughout the grid at model onset.
>>> mg = RasterModelGrid((3, 7), 1000)
>>> z = mg.add_ones('topographic__elevation', at='node')
>>> z.reshape(mg.shape)
array([[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]])
Instantiate the component with the grid as the first parameter.
>>> se = SpeciesEvolver(mg)
ZoneController requires a function that returns a mask of the total extent
of taxa habitat. The mask is a boolean array where `True` values represent
nodes that satisfy habitat conditions. Zone objects are not created here.
The mask only maps the extent where taxa can exist. This function returns
`True` where elevation is below 100, which is where the simulated lowland
taxa of this model can inhabit.
>>> def zone_func(grid):
... return grid.at_node['topographic__elevation'] < 100
Instantiate ZoneController with the grid and zone function. The initial
zones are created at controller instantiation. In this example, one zone is
created because all nodes of the zone mask are adjacent to each other.
>>> zc = ZoneController(mg, zone_func)
>>> len(zc.zones) == 1
True
Additional examples of controller usage are provided in ``ZoneController``
documentation.
The ``mask`` of the zone is True where the conditions of the zone function
are met. All nodes of the grid are included because the elevation of each
node is below 100. The ``zones`` attribute of ``ZoneController`` returns a
list of the zones that currently exist in the model. Below we return the
mask of the single zone by indexing this list.
>>> zc.zones[0].mask
array([ True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True], dtype=bool)
Populate a taxon to the zone.
>>> taxon = zc.populate_zones_uniformly(1)
>>> se.track_taxa(taxon)
The attribute, ``taxa_data_frame`` indicates only the one taxon exists
because we populated each zone with one taxon, and only the one zone
exists.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
The identifier of the taxon, ``tid`` is 0. The identifier of the taxon's
parent, ``pid``, is '<NA>' because it does not have a parent taxon given
that it was manually introduced using the ``track_taxa`` method. The taxon
was introduced at time, ``t_first`` and time, ``t_final`` is '<NA>'
because the taxon remains extant. See the documentation of this attribute
for further explanation of data frame columns.
Force a change in the zone mask to demonstrate component functionality.
Here we begin a new time step where topography is uplifted by 200 that
forms a ridge trending north-south in the center of the grid.
>>> z[[3, 10, 17]] = 200
>>> z.reshape(mg.shape)
array([[ 1., 1., 1., 200., 1., 1., 1.],
[ 1., 1., 1., 200., 1., 1., 1.],
[ 1., 1., 1., 200., 1., 1., 1.]])
The current elevation, the elevation following uplift, is represented here.
::
- - - ^ - - - elevation: - 1
- - - ^ - - - ^ 200
- - - ^ - - -
The updated zone mask is below.
::
. . . x . . . key: . node in zone mask
. . . x . . . x node outside of zone mask
. . . x . . .
Run a step of both the ZoneController and SpeciesEvolver. Both are run to
keep time in sync between the ``ZoneController``and ``SpeciesEvolver``
instances.
>>> delta_time = 1000
>>> zc.run_one_step(delta_time)
>>> se.run_one_step(delta_time)
Two zones exist following this time step.
>>> len(zc.zones) == 2
True
An additional zone was created because the zone mask was not continuous.
::
. . . ^ * * * key: . a zone
. . . ^ * * * * another zone
. . . ^ * * * ^ mountain range
The split of the initial zone triggered speciation of taxon 1 by taxon 0.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
1 0 ZoneTaxon 1000 <NA>
The phylogenetic tree of the simulated taxa is represented below. The
number at the line tips are the taxa identifiers.
::
0 ──────┬── 0
│
└── 1
_________
0 1000
time
The split of the initial zone into two zones at time 1000 triggered taxon 0
to speciate. Taxon 0 occupies a zone on one side of the mountain range, and
the child, taxon 1 occupies a zone on the other side. This outcome is the
result of the evolutionary processes programmed within ``ZoneTaxon`` as
well as the parameters used in this example (default values were used
as optional parameters were not set). Different behavior can be achieved by
subclassing ``ZoneTaxon`` or ``Taxon``.
References
----------
**Required Software Citation(s) Specific to this Component**
Lyons, N.J., Albert, J.S., Gasparini, N.M. (2020). SpeciesEvolver: A
Landlab component to evolve life in simulated landscapes. Journal of Open
Source Software 5(46), 2066, https://doi.org/10.21105/joss.02066
**Additional References**
Albert, J.S., Schoolmaster Jr, D.R., Tagliacollo, V., Duke-Sylvester, S.M.
(2016). Barrier displacement on a neutral landscape: Toward a theory of
continental biogeography. Systematic Biology 66(2), 167–182.
Lyons, N.J., Val, P., Albert, J.S., Willenbring, J.K., Gasparini, N.M., in
review. Topographic controls on divide migration, stream capture, and
diversification in riverine life. Earth Surface Dynamics.
"""
_name = "SpeciesEvolver"
_unit_agnostic = True
_info = {
"taxa__richness": {
"dtype": int,
"intent": "out",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "The number of taxa at each node",
}
}
_cite_as = """@article{lyons2020species,
author = {Lyons, N.J. and Albert, J.S. and Gasparini, N.M.},
title = {SpeciesEvolver: A Landlab component to evolve life in simulated landscapes},
year = {2020},
journal = {Journal of Open Source Software},
volume = {5},
number = {46},
doi = {10.21105/joss.02066},
url = {https://doi.org/10.21105/joss.02066}
}"""
def __init__(self, grid, initial_time=0):
"""Instantiate SpeciesEvolver.
Parameters
----------
grid : ModelGrid
A Landlab ModelGrid.
initial_time : float, int, optional
The initial time. The unit of time is not considered within the
component, with the exception that time is logged in the record.
The default value of this parameter is 0.
"""
super().__init__(grid)
# Create data structures.
self._record = Record(initial_time)
self._record.set_value("taxa", 0)
self._taxa_data = OrderedDict(
[("tid", []), ("pid", []), ("type", []), ("t_first", []), ("t_final", [])]
)
self._taxon_objs = []
# Create a taxa richness field.
_ = grid.add_zeros("taxa__richness", at="node", dtype=int, clobber=True)
@property
def record_data_frame(self):
"""A Pandas DataFrame of SpeciesEvolver variables over time.
Each row is data of a model time step. The time of the step is recorded
in the `time` column. `taxa` is the count of taxa extant at a time.
Additional columns can be added and updated by SpeciesEvolver objects
during the component ``run_one_step`` method. See documention of Taxon
objects for an explanation of these columns.
The DataFrame is created from a dictionary associated with a
SpeciesEvolver ``Record`` object. nan values in Pandas DataFrame force
the column to become float values even when data are integers. The
original value type is retained in the ``Record`` object.
"""
return self._record.data_frame
@property
def taxa_data_frame(self):
"""A Pandas DataFrame of taxa metadata.
Each row is the metadata of a taxon. The column, ``tid`` is the taxon
identifier assigned when SpeciesEvolver begins tracking the taxon. The
column, ``pid`` is the tid of the parent of the taxon. A pid of `<NA>`
indicates no parent taxon. ``type`` is the type of ``Taxon`` object.
``t_first`` is the initial model time the taxon was added to
SpeciesEvolver. ``t_final`` is the model time the taxon was recognized
as extinct. A t_final of `<NA>` indicates the taxon is extant.
Additional columns may be added by some taxon types. See the
documentation of these taxa for column description.
The DataFrame is created from a data structure within the component.
"""
data = self._taxa_data
cols = list(data.keys())
cols.remove("tid")
df = DataFrame(data, columns=cols, index=data["tid"])
df.index.name = "tid"
# Change column number type because pandas makes a column float if it
# includes nan values.
df["pid"] = df["pid"].astype("Int64")
if all(isinstance(item, int) for item in data["t_final"] if not np.isnan(item)):
df["t_final"] = df["t_final"].astype("Int64")
return df
def run_one_step(self, dt):
"""Update the taxa for a single time step.
This method advances the model time in the component record, calls the
evolve method of taxa extant at the current time, and updates the
variables in the record and taxa dataframes.
Parameters
----------
dt : float
The model time step duration. Time in the record is advanced by the
value of this parameter.
"""
record = self._record
record.advance_time(dt)
# Create a dictionary of the taxa to update at the current model time.
# Keys are objects of extant taxa. Values are booleans indicating if
# stages remain for respective taxa.
time_dict = OrderedDict.fromkeys(self._taxon_objs, True)
# Iteratively call taxa ``_evolve`` method until all stages of all taxa
# have run.
stage = 0
while any(time_dict.values()):
# Run evolution stage.
stage_dict = OrderedDict([])
evolving_taxa = filter(time_dict.get, time_dict)
for taxon in evolving_taxa:
# Run evolution stage of taxon with remaining stages.
stages_remain, taxon_children = taxon._evolve(dt, stage, record)
if taxon_children:
stage_dict.update(
OrderedDict.fromkeys(taxon_children, stages_remain)
)
stage_dict[taxon] = stages_remain and taxon.extant
time_dict.update(stage_dict)
stage += 1
self._update_taxa_data(time_dict.keys())
def track_taxa(self, taxa):
"""Add taxa to be tracked over time by SpeciesEvolver.
The taxon/taxa are introduced at the latest time in the record and
also tracked during following model times. Each taxon is assigned an
identifier and then can be viewed in ``taxa_data_frame``.
Parameters
----------
taxa : Taxon or list of Taxon
The taxa to introduce.
Examples
--------
ZoneTaxon are used to demonstrate this method.
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components import SpeciesEvolver
>>> from landlab.components.species_evolution import ZoneController
Create a model grid with flat topography.
>>> mg = RasterModelGrid((3, 7), 1000)
>>> z = mg.add_ones('topographic__elevation', at='node')
Instantiate SpeciesEvolver and a ZoneController. Instantiate the
latter with a function that masks the low elevation zone extent. Only
one zone is created.
>>> se = SpeciesEvolver(mg)
>>> def zone_func(grid):
... return grid.at_node['topographic__elevation'] < 100
>>> zc = ZoneController(mg, zone_func)
>>> len(zc.zones) == 1
True
Track the taxon of the one zone.
>>> taxon = zc.populate_zones_uniformly(1)
>>> se.track_taxa(taxon)
The one taxon is now tracked by SpeciesEvolver as indicated by the taxa
DataFrame.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
"""
if not isinstance(taxa, list):
taxa = [taxa]
self._update_taxa_data(taxa)
def _update_taxa_data(self, taxa_at_time):
"""Update the taxa data structure, set identifiers, and taxa statistics.
This method sets identifiers and metadata for the newly introduced
taxa. For the previously introduced, this method updates the
'latest_time` value of the taxa metadata.
Parameters
----------
taxa_at_time : list of Taxon
The taxa at the current model time.
"""
time = self._record.latest_time
data = self._taxa_data
objs = self._taxon_objs
t_recorded = self._taxon_objs
t_introduced = [taxon for taxon in taxa_at_time if taxon in t_recorded]
t_new = [taxon for taxon in taxa_at_time if taxon not in t_recorded]
# Update previously introduced taxa.
for taxon in t_introduced:
if not taxon.extant:
idx = data["tid"].index(taxon.tid)
data["t_final"][idx] = time
objs.remove(taxon)
# Set the data of new taxa.
for taxon in t_new:
# Set identifier.
if data["tid"]:
taxon._tid = max(data["tid"]) + 1
else:
taxon._tid = 0
# Append taxon data.
data["tid"].append(taxon.tid)
if taxon.parent is not None:
data["pid"].append(taxon.parent.tid)
else:
data["pid"].append(np.nan)
data["type"].append(type(taxon).__name__)
data["t_first"].append(time)
if taxon.extant:
data["t_final"].append(np.nan)
objs.append(taxon)
else:
data["t_final"].append(time)
# Update taxa stats.
self._record.set_value("taxa", len(objs))
self._grid.at_node["taxa__richness"] = self._get_taxa_richness_map()
def get_extant_taxon_objects(self, tids=np.nan, ancestor=np.nan, time=np.nan):
"""Get extant taxon objects filtered by parameters.
This method returns all taxon objects tracked by the component when no
optional parameters are included. The objects returned can be limited
using one or more parameters.
Parameters
----------
tids : list of int, optional
The taxa with these identifiers will be returned. A list is
returned even if only one object is contained within the list. By
default, when `tids` is not specified, extant taxa with any
identifier can be returned.
ancestor : int, optional
Limit the taxa returned to those descending from the taxon
designated as the ancestor. The ancestor is designated using its
``tid``. By default, taxa with any or no ancestors are returned.
time : float, int, optional
Limit the taxa returned to those that were extant at the time
designated by this parameter as well as extant at the current model
time. By default, extant taxa at all of the times listed in the
component record can be returned.
Returns
-------
taxa : a list of Taxon
The Taxon objects that pass through the filter. The list is sorted
by ``tid``. An empty list is returned if no taxa pass through the
filter.
Examples
--------
ZoneTaxon are used to demonstrate this method.
Import modules used in the following examples.
>>> from landlab import RasterModelGrid
>>> from landlab.components import SpeciesEvolver
>>> from landlab.components.species_evolution import ZoneController
Create a model grid.
>>> mg = RasterModelGrid((3, 7), 1000)
>>> z = mg.add_ones('topographic__elevation', at='node')
Instantiate SpeciesEvolver and a ZoneController. Instantiate the latter
with a function that masks the low elevation zone extent. Only one zone
is created.
>>> se = SpeciesEvolver(mg)
>>> def zone_func(grid):
... return grid.at_node['topographic__elevation'] < 100
>>> zc = ZoneController(mg, zone_func)
>>> len(zc.zones) == 1
True
Introduce two taxa to the zone.
>>> taxa = zc.populate_zones_uniformly(2)
>>> se.track_taxa(taxa)
Force north-south mountain ranges over two time steps that drives taxa
evolution.
>>> z[mg.x_of_node == 2000] = 200
>>> zc.run_one_step(1000)
>>> se.run_one_step(1000)
>>> z[mg.x_of_node == 4000] = 200
>>> zc.run_one_step(1000)
>>> se.run_one_step(1000)
Display taxa metadata.
>>> se.taxa_data_frame # doctest: +NORMALIZE_WHITESPACE
pid type t_first t_final
tid
0 <NA> ZoneTaxon 0 <NA>
1 <NA> ZoneTaxon 0 <NA>
2 0 ZoneTaxon 1000 <NA>
3 1 ZoneTaxon 1000 <NA>
4 0 ZoneTaxon 2000 <NA>
5 1 ZoneTaxon 2000 <NA>
Objects of all extant taxon are returned when no parameters are
inputted.
>>> se.get_extant_taxon_objects() # doctest: +NORMALIZE_WHITESPACE
[<ZoneTaxon, tid=0>,
<ZoneTaxon, tid=1>,
<ZoneTaxon, tid=2>,
<ZoneTaxon, tid=3>,
<ZoneTaxon, tid=4>,
<ZoneTaxon, tid=5>]
The returned objects of extant species can be limited using parameters.
Here, get the taxon objects with identifiers, 4 and 5.
>>> se.get_extant_taxon_objects(tids=[4, 5])
[<ZoneTaxon, tid=4>, <ZoneTaxon, tid=5>]
Extant taxon objects descending from a taxon can be obtained using the
``ancestor`` property. Here, get the taxa that descended from taxon 0.
>>> se.get_extant_taxon_objects(ancestor=0)
[<ZoneTaxon, tid=2>, <ZoneTaxon, tid=4>]
Taxa can be limited to those that were extant ``time``.
>>> se.get_extant_taxon_objects(time=1000) # doctest: +NORMALIZE_WHITESPACE
[<ZoneTaxon, tid=0>,
<ZoneTaxon, tid=1>,
<ZoneTaxon, tid=2>,
<ZoneTaxon, tid=3>]
The returned taxa can be further limited by including multiple
method properties.
>>> se.get_extant_taxon_objects(ancestor=0, time=1000)
[<ZoneTaxon, tid=2>]
An empty list is returned when no extant taxa match parameter criteria.
>>> se.get_extant_taxon_objects(tids=[11])
[]
"""
# Create `results` that contains tids of the taxa matching parameter
# criteria.
extant_tids = [taxon.tid for taxon in self._taxon_objs]
results = set(extant_tids)
data = self._taxa_data
# Query by identifiers.
if isinstance(tids, list):
results = results.intersection(tids)
# Query by ancestor.
if not np.isnan(ancestor) and ancestor in data["tid"]:
df = self.taxa_data_frame
df["pid"] = df["pid"].fillna(-1)
taxon = ancestor
descendants = []
stack = [taxon]
while stack:
children = df.index[df["pid"] == taxon].tolist()
if children:
descendants.extend(children)
stack.extend(children)
stack.remove(taxon)
if stack:
taxon = stack[0]
results = results.intersection(descendants)
elif not np.isnan(ancestor):
results = []
# Query by time.
if not np.isnan(time):
t_first = np.array(data["t_first"])
t_latest = np.nan_to_num(data["t_final"], nan=self._record.latest_time)
mask = np.all([time >= t_first, time <= t_latest], 0)
results = results.intersection(np.array(data["tid"])[mask].tolist())
# Get the Taxon objects that match all parameter query results.
taxa = [taxon for taxon in self._taxon_objs if taxon.tid in results]
taxa.sort(key=lambda taxon: taxon.tid)
return taxa
def _get_taxa_richness_map(self):
"""Get a map of the number of taxa."""
objs = self._taxon_objs
if objs:
masks = np.stack([taxon.range_mask for taxon in objs])
richness_mask = masks.sum(axis=0).astype(int)
else:
richness_mask = np.zeros(self._grid.number_of_nodes, dtype=int)
return richness_mask
|
nilq/baby-python
|
python
|
import math
import timeit
import random
import sympy
import warnings
from random import randint, seed
import sys
from ecpy.curves import Curve,Point
from Crypto.Hash import SHA3_256, SHA256, HMAC
import requests
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Util.Padding import pad
from Crypto.Util.Padding import unpad
import random
import hashlib, hmac, binascii
import json
API_URL = 'http://cryptlygos.pythonanywhere.com'
stuID = 24775
stuID_B = 18007
def key_generation(n,P):
sA = random.randrange(0,n-1)
QA = sA*P
return sA,QA
def signature_generation(n,m,P,sA):
k = random.randrange(1, n-2)
R = k*P
r = R.x % n
temp = m + r.to_bytes((r.bit_length() + 7) // 8,byteorder= 'big')
h = SHA3_256.new(temp)
h = int.from_bytes(h.digest(), byteorder='big') % n
s = (sA*h + k) % n
return(h,s)
#testarray for id 18007
test=["The world is full of lonely people afraid to make the first move.",
"I don’t like sand. It’s all coarse, and rough, and irritating. And it gets everywhere.",
"Hate is baggage. Life’s too short to be pissed off all the time. It’s just not worth it.",
"Well, sir, it’s this rug I have, it really tied the room together.",
"Love is like taking a dump, Butters. Sometimes it works itself out. But sometimes, you need to give it a nice hard slimy push."]
#create a long term key
curve = Curve.get_curve('secp256k1')
n = curve.order
P = curve.generator
#sA_l,QA_l=key_generation(n, P);
sA_l = 47739507727097583103574014533029612368096643715089728534014772436197620809295 #long term key
QA_l = sA_l*P
lkey=QA_l
lpkey=sA_l
print('sA_l:',sA_l)
print('QA_l:',QA_l)
m = str(stuID)
m = str.encode(m)
h,s = signature_generation(n, m, P, sA_l)
####Register Long Term Key
#s, h = SignGen(str(stuID).encode(), curve, sCli_long)
mes = {'ID':stuID, 'H': h, 'S': s, 'LKEY.X': lkey.x, 'LKEY.Y': lkey.y}
response = requests.put('{}/{}'.format(API_URL, "RegLongRqst"), json = mes)
print(response.json())
print("Please enter your code:")
#code is 466773
code = int(input())
mes = {'ID':stuID, 'CODE': code}
response = requests.put('{}/{}'.format(API_URL, "RegLong"), json = mes)
print(response.json())
#Check Status
mes = {'ID_A':stuID, 'H': h, 'S': s}
response = requests.get('{}/{}'.format(API_URL, "Status"), json = mes)
print("Status ", response.json())
arraysA = [112184962276357808309568989833684271625049885675934630372866963801085964072493, 33584358211224784840202436168184815276628420769928064070743091943999268712786, 40726025470150288236659679056057720728221590797096143441172221355007043466450, 101381661083810846279577444932520014627629962066304212964928490092267766004985, 100594443061284668939798088235910436479618491421149817836807710501808402577492, 103568589245276105481949302052504652358633223871875756153798369465269147623829, 100051855146607783942326414928800209257532033065159727699014006828344258666423, 105040970101702829213395228783284792201809442061444673860747455870055614779455, 90156357612886126425473480757697158114559706965764952860166983492293539640483, 635398702918226938233284394615151078835074431754073593651417155565356312859]
arrayQAx = [82130022647859882453134084051369598210823951790545515364285068773611035505062, 51140706821905322921805595227209017018799214209971934540801379729473715539128, 49432472692951339492988178726505370500642699982361951313863393202596519914764, 36018325104317080292711623581486586963933141534504250517355266336334872881775, 76692236566180327558062509272400907882990103538569245665502423108051513335006, 69244633031946208542865994268283620303066389860002324026838412654858935857089, 60912054363237728725479112436389557995283036613828053875989391141033721671154, 9777050861158665235931399469284756599748691198285459487242387650264524106086, 71550389124668400681353157799625559428935445146334133779133788925648770731366, 95236147036073182418341514866602126427742987229922482216352098259662503571995]
arrayQAy = [99978483075519520341321215838600373635619019340293769668813125239291817052190, 109176970583477354468613775592241268156447296020122447619846616252849971527226, 41332704556124677749576587514370939479369122228554359024467723589101993498497, 111937169526343050247843961739629074374183481131752761679441414840787470387010, 31521753310428267762560716570334134560699001095409851645874368613812130826067, 83285583670825079302361649195684356772932386767124262353380806840970324007896, 66326982281265332508620837991901241925785044086964866582111351960359855191393, 5717418184376653044842346172847011511487124169152806246338268537374033277405, 34439977629883966899844059601494016249411403363018406998878545235430372004112, 45115106056023629667663131952612957462385127590246861803653084571856409210418]
for i in range(0,10):
#sA,QA = key_generation(n, P)
QA = arraysA[i]*P
mes = (str(QA.x)+str(QA.y)).encode()
# arraysA.append(sA)
# arrayQAx.append(QA.x)
# arrayQAy.append(QA.y)
hx, sx = signature_generation(n,mes,P,sA_l)
#Send Ephemeral keys
mes = {'ID': stuID, 'KEYID': i , 'QAI.X': QA.x, 'QAI.Y': QA.y, 'Si': sx, 'Hi': hx}
response = requests.put('{}/{}'.format(API_URL, "SendKey"), json = mes)
print(response.json())
### Get key of the Student B
m = str(stuID_B)
m = str.encode(m)
h1,s1 = signature_generation(n, m, P, sA_l)
mes = {'ID_A': stuID, 'ID_B':stuID_B, 'S': s1, 'H': h1}
response = requests.get('{}/{}'.format(API_URL, "ReqKey"), json = mes)
res = response.json()
print(res)
i = int(res['i'])
j = res['j']
QBj = Point(res['QBJ.x'] , res['QBJ.y'], curve)
#mesg to send
#mesg = "You can dance, you can jive"
#print("This is my message:", mesg)
for i in range(len(test)):
mesg = test[i]
print("This is my message:", mesg)
#calculations from pdf
T = arraysA[i]*QBj
U = str(T.x)+str(T.y)+"NoNeedToRunAndHide"
U = str.encode(U)
K_ENC = SHA3_256.new(U)
K_ENC = K_ENC.digest()
K_MAC = SHA3_256.new(K_ENC)
K_MAC = K_MAC.digest()
# Encyption
cipher = AES.new(K_ENC, AES.MODE_CTR)
ctext=str.encode(mesg)
ctext=cipher.encrypt(ctext)
#hmac calculation
hmac=HMAC.new(K_MAC,digestmod=SHA256)
hmac=hmac.update(ctext)
hmac=hmac.digest()
msg = cipher.nonce + ctext + hmac
msg = int.from_bytes(msg, byteorder="big")
### Send message to student B
mes = {'ID_A': stuID, 'ID_B':stuID_B, 'I': i, 'J':j, 'MSG': msg}
response = requests.put('{}/{}'.format(API_URL, "SendMsg"), json = mes)
print(response.json())
'''
## Get your message
mes = {'ID_A': stuID, 'S': s, 'H': h}
response = requests.get('{}/{}'.format(API_URL, "ReqMsg_PH3"), json = mes)
print(response.json())
if(response.ok): ## Decrypt message
res = response.json()
mes = res['MSG']
i = res['KEYID']
print("KEYID:",i)
QBj = Point(res['QBJ.X'] , res['QBJ.Y'], curve)
sa_m = arraysA[i]
print("sA for this message:",sa_m)
mes = mes.to_bytes((mes.bit_length()+7)//8, byteorder='big')
print("msg:", mes)
T = sa_m * QBj
print("T:",T)
U = str(T.x)+str(T.y)+"NoNeedToRunAndHide"
print("U:",U)
U = str.encode(U)
print("U_encode:",U)
K_ENC = SHA3_256.new(U)
K_ENC = K_ENC.digest()
print("kenc:",K_ENC)
K_MAC = SHA3_256.new(K_ENC)
K_MAC = K_MAC.digest()
print("k_mac:",K_MAC)
#decrypted msg
print("message:",mes)
cipher = AES.new(K_ENC, AES.MODE_CTR, nonce=mes[0:8])
dtext = cipher.decrypt(mes[8:-32]).decode()
#dtext = str(dtext)
print("ciphertext:", dtext)
#hmac calculation
temp = mes[8:len(mes)-32]
hmac2=HMAC.new(K_MAC,digestmod=SHA256)
hmac2=hmac2.update(temp)
hmac2=hmac2.digest()
print("hmac:",hmac2)
'''
#####Reset Ephemeral Keys
'''
#s, h = SignGen("18007".encode(), curve, sCli_long)
mes = {'ID': stuID, 'S': s, 'H': h}
print(mes)
response = requests.get('{}/{}'.format(API_URL, "RstEKey"), json = mes)
print(response.json())
'''
'''
#####Reset Long Term Key
mes = {'ID': stuID}
response = requests.get('{}/{}'.format(API_URL, "RstLongRqst"), json = mes)
print(response.json())
code = int(input())
mes = {'ID': stuID ,'CODE': code}
response = requests.get('{}/{}'.format(API_URL, "RstLong"), json = mes)
print(response.json())
'''
|
nilq/baby-python
|
python
|
from singly_linked_lists.remove_nth_node_from_list import remove_nth_from_end
from data_structures.singly_linked_list_node import SinglyLinkedListNode
def test_remove_nth_from_end():
head = SinglyLinkedListNode(1)
assert remove_nth_from_end(head, 1) is None
head = SinglyLinkedListNode(1)
head.next = SinglyLinkedListNode(2)
assert remove_nth_from_end(head, 2).data == 2
assert remove_nth_from_end(head, 1).data == 1
head = SinglyLinkedListNode(1)
head.next = SinglyLinkedListNode(2)
head.next.next = SinglyLinkedListNode(3)
head.next.next.next = SinglyLinkedListNode(4)
assert remove_nth_from_end(head, 2).next.next.data == 4
|
nilq/baby-python
|
python
|
# Copyright (c) 2009 Alexandre Quessy, Arjan Scherpenisse
# See LICENSE for details.
"""
Tests for txosc/osc.py
Maintainer: Arjan Scherpenisse
"""
from twisted.trial import unittest
from twisted.internet import reactor, defer, task
from txosc import osc
from txosc import async
from txosc import dispatch
class TestGetAddressParts(unittest.TestCase):
"""
Test the getAddressParts function.
"""
def testGetAddressParts(self):
addresses = {
"/foo": ["foo"],
"/foo/bar": ["foo", "bar"],
"/foo/bar/ham": ["foo", "bar", "ham"],
"/egg/[1-2]": ["egg", "[1-2]"],
"/egg/*": ["egg", "*"],
"/egg/?": ["egg", "?"],
}
for k, v in addresses.iteritems():
self.failUnlessEqual(osc.getAddressParts(k), v)
class TestArgumentCreation(unittest.TestCase):
"""
Test the L{osc.CreateArgument} function.
"""
def testCreateFromValue(self):
self.assertEquals(type(osc.createArgument(True)), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(False)), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(None)), osc.NullArgument)
self.assertEquals(type(osc.createArgument(123)), osc.IntArgument)
self.assertEquals(type(osc.createArgument(3.14156)), osc.FloatArgument)
# Unicode is not supported.
self.assertRaises(osc.OscError, osc.createArgument, u'test')
def testCreateFromTypeTag(self):
self.assertEquals(type(osc.createArgument(123, "T")), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(123, "F")), osc.BooleanArgument)
self.assertEquals(type(osc.createArgument(123, "N")), osc.NullArgument)
self.assertEquals(type(osc.createArgument(123, "I")), osc.ImpulseArgument)
self.assertEquals(type(osc.createArgument(123, "i")), osc.IntArgument)
self.assertEquals(type(osc.createArgument(123, "f")), osc.FloatArgument)
self.assertRaises(osc.OscError, osc.createArgument, 123, "?")
class TestArgument(unittest.TestCase):
"""
Encoding and decoding of a string argument.
"""
def testAbstractArgument(self):
a = osc.Argument(None)
self.assertRaises(NotImplementedError, a.toBinary)
self.assertRaises(NotImplementedError, a.fromBinary, "")
class TestBlobArgument(unittest.TestCase):
"""
Encoding and decoding of a string argument.
"""
def testToBinary(self):
self.assertEquals(osc.BlobArgument("").toBinary(), "\0\0\0\0\0\0\0\0")
self.assertEquals(osc.BlobArgument("a").toBinary(), "\0\0\0\1a\0\0\0")
self.assertEquals(osc.BlobArgument("hi").toBinary(), "\0\0\0\2hi\0\0")
self.assertEquals(osc.BlobArgument("hello").toBinary(), "\0\0\0\5hello\0\0\0")
def testFromBinary(self):
data = "\0\0\0\2hi\0\0\0\0\0\5hello\0\0\0"
first, leftover = osc.BlobArgument.fromBinary(data)
self.assertEquals(first.value, "hi")
self.assertEquals(leftover, "\0\0\0\5hello\0\0\0")
second, leftover = osc.BlobArgument.fromBinary(leftover)
self.assertEquals(second.value, "hello")
self.assertEquals(leftover, "")
# invalid formatted
self.assertRaises(osc.OscError, osc.BlobArgument.fromBinary, "\0\0\0") # invalid length packet
self.assertRaises(osc.OscError, osc.BlobArgument.fromBinary, "\0\0\0\99")
class TestStringArgument(unittest.TestCase):
"""
Encoding and decoding of a string argument.
"""
def testToBinary(self):
self.assertEquals(osc.StringArgument("").toBinary(), "\0\0\0\0")
self.assertEquals(osc.StringArgument("OSC").toBinary(), "OSC\0")
self.assertEquals(osc.StringArgument("Hello").toBinary(), "Hello\0\0\0")
def testFromBinary(self):
data = "aaa\0bb\0\0c\0\0\0dddd"
first, leftover = osc.StringArgument.fromBinary(data)
#padding with 0 to make strings length multiples of 4 chars
self.assertEquals(first.value, "aaa")
self.assertEquals(leftover, "bb\0\0c\0\0\0dddd")
second, leftover = osc.StringArgument.fromBinary(leftover)
self.assertEquals(second.value, "bb")
self.assertEquals(leftover, "c\0\0\0dddd")
third, leftover = osc.StringArgument.fromBinary(leftover)
self.assertEquals(third.value, "c")
self.assertEquals(leftover, "dddd")
class TestFloatArgument(unittest.TestCase):
def testToAndFromBinary(self):
binary = osc.FloatArgument(3.14159).toBinary()
float_arg = osc.FloatArgument.fromBinary(binary)[0]
#FIXME: how should we compare floats? use decimal?
if float_arg.value < 3.1415:
self.fail("value is too small")
if float_arg.value > 3.1416:
self.fail("value is too big")
self.assertRaises(osc.OscError, osc.FloatArgument.fromBinary, "\0\0\0") # invalid value
def testCasting(self):
# we should be able to cast the argument to float to get its float value
value = 3.14159
float_arg = osc.FloatArgument(value)
if float(float_arg) < 3.1415:
self.fail("value is too small")
if float(float_arg) > 3.1416:
self.fail("value is too big")
class TestIntArgument(unittest.TestCase):
def testToAndFromBinary(self):
def test(value):
int_arg = osc.IntArgument.fromBinary(osc.IntArgument(value).toBinary())[0]
self.assertEquals(int_arg.value, value)
test(0)
test(1)
test(-1)
test(1<<31-1)
test(-1<<31)
self.assertRaises(osc.OscError, osc.IntArgument.fromBinary, "\0\0\0") # invalid value
def testIntOverflow(self):
self.assertRaises(OverflowError, osc.IntArgument(1<<31).toBinary)
self.assertRaises(OverflowError, osc.IntArgument((-1<<31) - 1).toBinary)
class TestColorArgument(unittest.TestCase):
def testToAndFromBinary(self):
def _test(value):
color_arg = osc.ColorArgument.fromBinary(osc.ColorArgument(value).toBinary())[0]
self.assertEquals(color_arg.value, value)
_test((255, 255, 255, 255))
_test((0, 0, 0, 0))
self.assertRaises(osc.OscError, osc.ColorArgument.fromBinary, "\0\0\0") # invalid value
self.assertRaises(TypeError, osc.ColorArgument.toBinary, (-244, 0, 0, 0)) # invalid value
self.assertRaises(TypeError, osc.ColorArgument.toBinary, ()) # invalid value
class TestMidiArgument(unittest.TestCase):
def testToAndFromBinary(self):
def _test(value):
midi_arg = osc.MidiArgument.fromBinary(osc.MidiArgument(value).toBinary())[0]
self.assertEquals(midi_arg.value, value)
_test((255, 255, 255, 255))
_test((0, 0, 0, 0))
self.assertRaises(osc.OscError, osc.MidiArgument.fromBinary, "\0\0\0") # invalid value
self.assertRaises(TypeError, osc.MidiArgument.toBinary, (-244, 0, 0, 0)) # invalid value
self.assertRaises(TypeError, osc.MidiArgument.toBinary, ()) # invalid value
class TestTimeTagArgument(unittest.TestCase):
def testToBinary(self):
# 1 second since Jan 1, 1900
arg = osc.TimeTagArgument(1)
binary = arg.toBinary()
self.assertEquals(binary, "\0\0\0\1\0\0\0\0")
def testFromBinary(self):
# 1 second since Jan 1, 1900
self.assertEquals(1.0, osc.TimeTagArgument.fromBinary("\0\0\0\1\0\0\0\0")[0].value)
# immediately
self.assertEquals(True, osc.TimeTagArgument.fromBinary("\0\0\0\0\0\0\0\1")[0].value)
# error
self.assertRaises(osc.OscError, osc.TimeTagArgument.fromBinary, "\0\0\0\0\0\0")
def testToAndFromBinary(self):
# 1 second since Jan 1, 1900
def test(value):
timetag_arg, leftover = osc.TimeTagArgument.fromBinary(osc.TimeTagArgument(value).toBinary())
self.assertEquals(leftover, "")
self.assertTrue(abs(timetag_arg.value - value) < 1e-6)
test(1.0)
test(1.1331)
class TestMessage(unittest.TestCase):
def testComparisons(self):
osc.Message('/foo') == None
def testMessageStringRepresentation(self):
self.assertEquals("/hello", str(osc.Message("/hello")))
self.assertEquals("/hello ,i i:1 ", str(osc.Message("/hello", 1)))
self.assertEquals("/hello ,T T:True ", str(osc.Message("/hello", True)))
def testAddMessageArguments(self):
"""
Test adding arguments to a message
"""
m = osc.Message("/example", osc.IntArgument(33), osc.BooleanArgument(True))
self.assertEquals(m.arguments[0].value, 33)
self.assertEquals(m.arguments[1].value, True)
m = osc.Message("/example", 33, True)
self.assertEquals(m.arguments[0].value, 33)
self.assertEquals(m.arguments[1].value, True)
m = osc.Message("/example")
m.add(33)
self.assertEquals(m.arguments[0].value, 33)
self.assertEquals(m.arguments[0].typeTag, "i")
m.add(True)
self.assertEquals(m.arguments[1].typeTag, "T")
def testEquality(self):
self.assertEquals(osc.Message("/example"),
osc.Message("/example"))
self.assertNotEqual(osc.Message("/example"),
osc.Message("/example2"))
self.assertEquals(osc.Message("/example", 33),
osc.Message("/example", 33))
self.assertNotEqual(osc.Message("/example", 33),
osc.Message("/example", 34))
self.assertNotEqual(osc.Message("/example", 33),
osc.Message("/example", 33.0))
self.assertNotEqual(osc.Message("/example", 33),
osc.Message("/example", 33, True))
self.assertEquals(osc.Message("/example", 33, True),
osc.Message("/example", 33, True))
def testGetTypeTag(self):
m = osc.Message("/example")
self.assertEquals(m.getTypeTags(), "")
m.arguments.append(osc.StringArgument("egg"))
self.assertEquals(m.getTypeTags(), "s")
m.arguments.append(osc.StringArgument("spam"))
self.assertEquals(m.getTypeTags(), "ss")
def testToAndFromBinary(self):
self.assertRaises(osc.OscError, osc.Message.fromBinary, "invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Message.fromBinary, "/example,invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Message.fromBinary, "/hello\0\0,xxx\0")
def test(m):
binary = m.toBinary()
m2, leftover = osc.Message.fromBinary(binary)
self.assertEquals(leftover, "")
self.assertEquals(m, m2)
test(osc.Message("/example"))
test(osc.Message("/example", osc.StringArgument("hello")))
test(osc.Message("/example", osc.IntArgument(1), osc.IntArgument(2), osc.IntArgument(-1)))
test(osc.Message("/example", osc.BooleanArgument(True)))
test(osc.Message("/example", osc.BooleanArgument(False), osc.NullArgument(), osc.StringArgument("hello")))
test(osc.Message("/example", osc.ImpulseArgument()))
def testGetValues(self):
# tests calling txosc.osc.Message.getValues()
message = osc.Message("/foo", 2, True, 3.14159)
values = message.getValues()
self.failUnlessEqual(values[0], 2)
self.failUnlessEqual(values[1], True)
self.failUnlessEqual(values[2], 3.14159)
class TestBundle(unittest.TestCase):
def testEquality(self):
self.assertEquals(osc.Bundle(), osc.Bundle())
self.assertNotEqual(osc.Bundle(), None)
self.assertNotEqual(osc.Bundle([osc.Message("/hello")]),
osc.Bundle())
self.assertEquals(osc.Bundle([osc.Message("/hello")]),
osc.Bundle([osc.Message("/hello")]))
self.assertNotEqual(osc.Bundle([osc.Message("/hello")]),
osc.Bundle([osc.Message("/hello2")]))
def testToAndFromBinary(self):
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "#bundle|invalidbinarydata..")
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "#bundle\0\0\0\0\1\0\0\0\0hello")
self.assertRaises(osc.OscError, osc.Bundle.fromBinary, "#bundle\0\0\0\0\1\0\0\0\0\0\0\0\5hellofdsfds")
def test(b):
binary = b.toBinary()
b2, leftover = osc.Bundle.fromBinary(binary)
self.assertEquals(leftover, "")
self.assertEquals(b, b2)
test(osc.Bundle())
test(osc.Bundle([osc.Message("/foo")]))
test(osc.Bundle([osc.Message("/foo"), osc.Message("/bar")]))
test(osc.Bundle([osc.Message("/foo"), osc.Message("/bar", osc.StringArgument("hello"))]))
nested = osc.Bundle([osc.Message("/hello")])
test(osc.Bundle([nested, osc.Message("/foo")]))
def testGetMessages(self):
m1 = osc.Message("/foo")
m2 = osc.Message("/bar")
m3 = osc.Message("/foo/baz")
b = osc.Bundle()
b.add(m1)
self.assertEquals(b.getMessages(), set([m1]))
b = osc.Bundle()
b.add(m1)
b.add(m2)
self.assertEquals(b.getMessages(), set([m1, m2]))
b = osc.Bundle()
b.add(m1)
b.add(osc.Bundle([m2]))
b.add(osc.Bundle([m3]))
self.assertEquals(b.getMessages(), set([m1, m2, m3]))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from __future__ import absolute_import, division, print_function, unicode_literals
import yaml
import struct
import hexdump
import math
import re
import time
import os
import layout.mapped_keycodes as mapped_keycodes
from layout.common import *
from layout.scan_mode import *
from layout.rf_settings import *
from layout.device import *
from layout.ekc_data import EKCDataMain
RF_INFO_SIZE = 64
class Layout:
def __init__(self, layout, layout_id, layout_name):
self.id = layout_id
self.layers = try_get(layout, "layers", layout_name, val_type=list)
self.sub_matrix_sizes = []
self.name = layout_name
# optional default layer
if "default_layer" in layout:
self.default_layer = try_get(layout, "default_layer", layout_name, val_type=int)
else:
self.default_layer = 0
# first determine the layer structure for the keyboard
try:
self.layer_count = len(self.layers)
except:
raise ParseError("Expected at least one layer in {}".format(layout_name))
# check for at least one keyboard
try:
self.keyboard_count = len(self.layers[0])
except:
raise ParseError("Expected at least one keyboard device in 'layers' field of {}".format(layout_name))
# number of keys in keyboards
try:
self.sub_matrix_sizes = [len(kb) for kb in self.layers[0]]
except:
raise ParseError("Couldn't get keyboard sizes {}".format(layout_name))
# check that all the layers have the same dimensions
for layer_i in range(self.layer_count):
device_count_i = len(self.layers[layer_i])
if device_count_i != self.keyboard_count:
raise ParseError("Unbalanced layer structure in layout '{}'. "
" The first layer has '{}' devices, but the {} layer has '{}' devices."
.format(layout_name, self.layer_count,
num_to_ordinal_str(layer_i+1), device_count_i)
)
for device_i in range(self.keyboard_count):
expected_size = self.sub_matrix_sizes[device_i]
actual_size = len(self.layers[layer_i][device_i])
if actual_size != expected_size:
raise ParseError("Mismatching devices in layout '{}'. "
"The {} device has '{}' keycodes in the first layer, but "
"in the {} layer the same device has '{}' keycodes."
.format(
layout_name,
num_to_ordinal_str(device_i+1), expected_size,
num_to_ordinal_str(layer_i+1), actual_size
)
)
# total matrix size of layout
self.matrix_size = self.calc_total_matrix_size()
# which matrix_maps are used (if any) for the layout
if "matrix_maps" in layout:
self.matrix_maps = try_get(layout, "matrix_maps", layout_name, val_type=list)
if len(self.matrix_maps) != self.keyboard_count:
raise ParseError("In layout '{}', found '{}' maps in 'matrix_maps', "
"but found {} devices in its 'layers' list".found(
self.name, len(self.matrix_maps), self.keyboard_count))
else:
self.matrix_maps = None
def calc_total_matrix_size(self):
# total size need for the key matrices in the layout
matrix_size = 0
for size in self.sub_matrix_sizes:
matrix_size += int(math.ceil(size / 8))
if matrix_size > MAX_MATRIX_SIZE:
raise ParseError("Too many keys in layout '{}'".format(layout_name))
return matrix_size
def check_layers(self, layout, debug_hint, layout_id):
# check all layers have the same number of keyboards and all
# keyboards have the same number of keys
for (l_i, layer) in enumerate(self.layers):
if len(layer) != self.layer_count:
raise ParseError("'{}' has a mismatched number of keyboards "
"in its layers starting at layer '{}'".format(debug_hint, l_i))
for (kb_i, kb) in enumerate(layer):
if len(kb) != self.sub_matrix_sizes[kb_i]:
raise ParseError("'{}' has a mismatched number of keys "
"starting at keyboard '{}' of layer '{}'".format(debug_hint, kb_i, l_i))
class SettingsGenerator:
def __init__(self, layout_data, rf_settings):
self.layout = layout_data
self.rf = rf_settings
self.ekc_data = EKCDataMain()
self.build_device_data()
def gen_single_layout(self, layout):
result = bytearray(0)
for layer in layout.layers:
for (kb_i, kb) in enumerate(layer):
kc_map = None
size = 0
# # Check for 'matrix_maps'. It is a list of device names with
# # one for each sub-matrix in the layout. The matrix_map is
# # used to map the keys from how they are "visually arranged" to
# # to how they are physically wired.
# # The matrix_maps is optional. If it is not given, then the
# # list of keys in the matrix will match how they are physically
# # wired.
# if layout.matrix_maps != None:
# map_name = layout.matrix_maps[kb_i]
# try:
# map_device = self.get_device_by_name(map_name)
# kc_map = map_device.scan_mode.matrix_map
# sm = map_device.scan_mode
# size = sm.rows * sm.cols
# size = int(math.ceil(len(kb)/8))*8 # pad to multiple of 8
# except:
# raise ParseError("Couldn't find matrix_map for '{}' in"
# " layout '{}'".format(map_name, layout.name))
# if len(kc_map) != len(kb):
# raise ParseError("The matrix_map for '{}' has '{}' "
# "keys, but the corresponding matrix in the layout "
# "'{}' has '{}' keys".format(
# map_name, len(kc_map),
# layout.name, len(kb)))
# else:
# # no map given, so generate a list that is a 1-to-1 mapping
# kc_map = list(range(len(kb)))
# size = int(math.ceil(len(kb)/8))*8 # pad to multiple of 8
size = int(math.ceil(len(kb)/8))*8 # pad to multiple of 8
keycodes = [0] * size
for (kc_i, kc_str) in enumerate(kb):
kc = mapped_keycodes.interpret_keycode(kc_str)
keycodes[kc_i] = kc
# pack all the keycodes as uint16_t
for kc in keycodes:
result += struct.pack('<H', kc)
return result
def gen_layout_section(self, dev_id):
# Layout section has the format
# matrix_keynumber_map for this specific device[rows * cols]
# layouts for all devices
self.build_device_data()
result = bytearray(0)
dev_data = self.get_device_by_id(dev_id)
if dev_data.scan_mode.mode != ScanMode.NO_MATRIX:
# Add matrix map to the layout section
for key_num in dev_data.scan_mode.inverse_map:
result += struct.pack('<B', key_num)
# Add ekc data to the layout section
result += self.ekc_data.to_bytes()
for layout_id in range(self.number_layouts):
layout = self.get_layout_by_id(layout_id)
result += self.gen_single_layout(layout)
return result
def gen_settings_section(self, device_id):
result = bytearray(0);
result += self.gen_global_settings(device_id)
result += self.gen_layout_settings()
result += self.gen_rf_settings()
return result
def gen_global_settings(self, device_id):
# uint8_t device_id;
# char device_name[32];
# uint8_t timestamp[8]; // utc time stamp of last update
# uint8_t default_report_mode;
# uint8_t scan_mode;
# uint8_t row_count;
# uint8_t col_count;
# uint8_t _reserved[51]; // total size == 96
result = bytearray(0)
device = self.get_device_by_id(device_id)
# device_id
result += struct.pack('<B', device.id)
# device_id
result += struct.pack('<32s', device.name.encode('utf-8'))
# build timestamp, 64 bit UTC
result += struct.pack('<q', int(time.time()) )
# default_report_mode
result += struct.pack('<B', self.get_report_mode())
# scan mode information
result += self.gen_scan_mode_info(device_id)
result += bytearray(51)
return result
def parse_layouts(self):
self.layout_data = {}
layout_id = 0
for (layout_name, layout) in try_get(self.layout, 'layouts').items():
self.layout_data[layout_name] = Layout(layout, layout_id, layout_name)
layout_id += 1
self.number_layouts = layout_id
def get_layout_by_id(self, layout_id):
for (_, layout) in self.layout_data.items():
if layout.id == layout_id:
return layout
raise ParseError("Couldn't find layout with id: {}".format(layout_id))
def get_layout_by_name(self, layout_name):
if layout_name in self.layout_data:
return self.layout_data[layout_name]
raise ParseError("Couldn't find layout with name: {}".format(layout_name))
def get_device_by_name(self, device_name):
if device_name in self.device_name_map:
dev_id = self.device_name_map[device_name]
return self.device_data[dev_id]
else:
raise ParseError("Couldn't find device named: {}".format(device_name))
def get_device_by_id(self, dev_id):
if dev_id in self.device_data:
return self.device_data[dev_id]
else:
raise ParseError("Couldn't find device with id: {}".format(dev_id))
def parse_devices(self):
self.device_data = {}
self.device_name_map = {}
self.largest_device_id = 0
for (device_name, device_data) in try_get(self.layout, 'devices').items():
dev = Device.from_json_obj(device_data, device_name)
self.assert_validate_device(dev, device_name)
self.device_data[dev.id] = dev
self.device_name_map[device_name] = dev.id
self.largest_device_id = max(self.largest_device_id, dev.id)
def assert_validate_device(self, dev, device_name):
if dev.scan_mode.mode == ScanMode.NO_MATRIX:
return
if not dev.id < MAX_DEVICE_ID:
raise ParseError("Device id '{}' too large. Max allowed value is {}"
.format(dev.id, MAX_DEVICE_ID))
# if not dev.id in self.device_data:
# raise ParseError("Tried to build layout for device id '{}', but no"
# " matching device was found in the layout file."
# .format(dev.id))
# check layout identifier
if not dev.layout_name in self.layout_data:
raise ParseError("Couldn't find layout with name '{}' for "
"keyboard '{}'".format(dev.layout_name, device_name))
if (dev.id in self.device_data):
raise ParseError("Duplicate device id '{}' used in both "
"'{}' and '{}'".format(dev.id, device_name, self.device_data[dev.id].name))
# check layout offset
offset_max = self.layout_data[dev.layout_name].keyboard_count
if not dev.layout_offset < offset_max:
raise ParseError("'layout_offset' too large. Got '{}' but "
"'{}' only has {} device in its layout".format(dev.layout_offset, dev.layout_name, offset_max))
def build_device_data(self):
self.parse_layouts()
self.parse_devices()
def gen_layout_settings(self):
# uint8_t number_layouts;
# uint8_t number_devices;
# uint8_t _reserved[30]; // 32
# keyboard_info_t layouts[64];
# device_info_t devices[64];
result = bytearray(0)
result += struct.pack('<B', self.number_layouts)
result += struct.pack('<B', self.largest_device_id)
result += bytearray(30)
# layout_info_t {
# uint8_t matrix_size;
# uint8_t layer_count;
# }[64]
for layout_id in range(MAX_LAYOUT_ID):
if layout_id >= self.number_layouts:
result += bytearray(2)
continue
layout = self.get_layout_by_id(layout_id)
layout_name = layout.name
# calculate how many bytes are needed for the matrix.
# each keyboard in the layout needs ceil(kb_size/8)
result += struct.pack('<B', layout.matrix_size)
result += struct.pack('<B', layout.layer_count)
# typedef struct device_info_t {
# uint8_t keyboard_id; // the keyboard layout that this device maps to
# uint8_t matrix_offset; // the component byte offset into the given keyboard
# uint8_t matrix_size; // the size of this component == ceil(rows*cols/8)
# } [64]
for device_id in range(MAX_DEVICE_ID):
if not device_id in self.device_data or \
self.device_data[device_id].scan_mode.mode == ScanMode.NO_MATRIX:
result += bytearray(3)
continue
device = self.device_data[device_id]
layout = self.layout_data[device.layout_name]
layout_id = layout.id
# TODO: validate this value
matrix_size = device.scan_mode.calc_matrix_size()
keyboard_offset = device.layout_offset
matrix_offset = 0
for (i, size) in enumerate(layout.sub_matrix_sizes):
if not i < keyboard_offset:
break;
matrix_offset += int(math.ceil(size / 8))
if matrix_offset + matrix_size > layout.matrix_size:
raise ParseError("The matrix for device '{}' doesn't fit in "
"layout '{}'".format(device.name, layout.name))
result += struct.pack('<B', layout_id)
result += struct.pack('<B', matrix_offset)
result += struct.pack('<B', matrix_size)
return result
def gen_rf_settings(self):
if self.rf == None:
return bytearray([0xff] * RF_INFO_SIZE)
else:
rf_settings = RFSettings.from_json_obj(self.rf)
return rf_settings.to_bytes()
def get_report_mode(self):
mode = try_get(self.layout, 'report_mode')
# KEYBOARD_REPORT_MODE_AUTO = 0, // 6kro -> nkro if more than 6 keys pressed
# KEYBOARD_REPORT_MODE_NKRO = 1, // nkro
# KEYBOARD_REPORT_MODE_6KRO = 2, // 6kro
if mode == "auto_nkro":
return KEYBOARD_REPORT_MODE_AUTO
elif mode == "6kro":
return KEYBOARD_REPORT_MODE_6KRO
elif mode == "nkro":
return KEYBOARD_REPORT_MODE_NKRO
else:
raise ParseError("Unknown report mode {}".format(mode))
def get_scan_mode(self, device_id):
for (kb_name, kb) in try_get(self.layout, 'devices').items():
if (try_get(kb, 'id', kb_name) == device_id):
return try_get(kb, 'scan_mode', kb_name)
raise ParseError("No device defined for id={}".format(device_id))
def gen_scan_mode_info(self, device_id):
scan_mode = self.get_scan_mode(device_id)
mode = try_get(scan_mode, 'mode', 'scan_mode')
if mode == 'none' or mode == 'no_matrix':
return struct.pack('<BBB', MATRIX_SCANNER_MODE_NONE, 0, 0)
elif mode == 'col_row':
rows = try_get(scan_mode, 'rows', 'scan_mode')
cols = try_get(scan_mode, 'cols', 'scan_mode')
return struct.pack('<BBB', MATRIX_SCANNER_MODE_COL_ROW, rows, cols)
elif mode == 'pins':
# count = scan_mode['pin_count']
# return struct.pack('<BBB', MATRIX_SCANNER_MODE_PINS, count, 0)
raise ParseError("TODO: 'pins' scan mode not implemented yet")
else:
raise ParseError("Unsupported scan mode {}".format(mode))
if __name__ == "__main__":
layout = None
rf = None
with open("test_layout.yaml") as file_name:
layout = yaml.safe_load(file_name.read())
with open("test_rf_config.yaml") as file_name:
rf = yaml.safe_load(file_name.read())
settings = SettingsGenerator(layout_data=layout, rf_settings=rf)
target_layout_id = 0x30
print("settings:")
try:
hexdump.hexdump(bytes(settings.gen_settings_section(target_layout_id)))
except ParseError as e:
print(e)
# print(e.with_traceback())
exit(1)
print("layout:")
try:
hexdump.hexdump(bytes(settings.gen_layout_section(target_layout_id)))
except ParseError as e:
print(e)
exit(1)
print()
print()
settings = RFSettings.from_rand()
print(settings.to_json_obj())
print(settings.to_yaml())
hexdump.hexdump(settings.to_bytes())
|
nilq/baby-python
|
python
|
# This file adds code completion to the auto-generated pressuresense_pb2 file.
from .pressuresense_pb2 import PressureQuanta, PressureLog
from .common_proto import _TimeStamp
from typing import List, Callable, Union
class _PressureProfile( object ):
mpa = 0
class _PressureQuanta( object ):
profiles = _PressureProfile() # type: _PressureProfile
time = _TimeStamp() # type: _TimeStamp
PressureQuanta = PressureQuanta # type: Callable[[],_PressureQuanta]
class _PressureLog( object ):
class QuantasList(list):
def add(self): # type: (...)->_PressureQuanta
return self[0]
quantas = QuantasList() # type: Union[List[_PressureQuanta],QuantasList]
def ParseFromString(self, string):
return self
def SerializeToString(self):
return ""
PressureLog = PressureLog # type: Callable[[],_PressureLog]
|
nilq/baby-python
|
python
|
import sys
import logging
import argparse
from pprint import pprint
from . import *
def dumpSubject(cert):
info = getSubjectFromCertFile(cert)
pprint(info, indent=2)
def main():
services = ",".join(LOGIN_SERVICE.keys())
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--log_level',
action='count',
default=0,
help='Set logging level, multiples for more detailed.')
parser.add_argument('-C','--cert',
default=None,
help='Show information for existing certificate')
parser.add_argument('-s', '--service',
default='production',
help='Service to login, one of ({})'.format(services))
parser.add_argument('-j', '--jnlp',
default=None,
help='Process specified JNLP file')
parser.add_argument('-t', '--ttl',
default=None,
help='Certificate lifetime in seconds, use JNLP default if not set')
args = parser.parse_args()
# Setup logging verbosity
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
level = levels[min(len(levels) - 1, args.log_level)]
logging.basicConfig(level=level,
format="%(asctime)s %(levelname)s %(message)s")
if args.cert is not None:
cert_file = args.cert
if cert_file == "default":
cert = getDefaultCertificatePath()
dumpSubject(cert_file)
sys.exit(0)
if args.service not in LOGIN_SERVICE.keys():
logging.error("Uknown service: %s", args.service)
sys.exit(1)
cert_file = None
if args.jnlp is not None:
cert_file = grid_shib.retrieveCertificate(args.jnlp,
getDefaultCertificatePath(),
lifetime_seconds=args.ttl)
else:
cert_file = login(overwrite=True,
service=LOGIN_SERVICE[args.service],
lifetime_seconds=args.ttl )
print("Certificate downloaded to: {}\n".format(cert_file))
print("Certificate info:")
dumpSubject(cert_file)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
'''
knowyourmeme.com image crawler:
-------------------------------------------
Script designed to specifically crawl meme templates to be used in ml(and self enjoyment).
url: https://knowyourmeme.com/photos/templates/page/<page_number>
So, as you can see, we are lucky enough that knowyoumeme has pagination here
IMPORTANT: check robots.txt
* http://www.useragentstring.com/pages/useragentstring.php
* https://knowyourmeme.com/robots.txt
Also, check that the folder where you are going to save the images already exists...
too lazy to write something that creates the folder
'''
from bs4 import BeautifulSoup as bs
import requests
import shutil
import json
import time
import sys
import os
url = 'https://knowyourmeme.com'
img_save_path = 'templates/'
json_save_path = 'data.json'
paging_path = '/photos/templates/page/'
headers = {'User-Agent':'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'}
pages = 47 #remeber to check number of pages beforehand
ids = 1
structure = {}
#crawls template and tags
def img_crawls(template_path, headers):
site_url = url + template_path
t0 = time.time()
r = requests.get(site_url, headers = headers)
response_delay = time.time()-t0
data = r.text
soup = bs(data, 'lxml')
section = soup.body.find(id='content')
left = section.find(id='maru')
right = section.find(class_='right').select('.sidebar_box')[0]
template_url = left.select('div#photo_wrapper a')[0]['href']
taglist = right.select('p#tag_list a')
tags = [str(tag.string) for tag in taglist]
time.sleep(10*response_delay)
return {'site_url': site_url,
'template_url': template_url,
'tags': tags}
for i in range(1,pages):
page_url = url + paging_path + str(i)
r = requests.get(page_url, headers = headers)
data = r.text
soup = bs(data,'lxml')
section = soup.body.find(id='content').find(id='maru').find(id="infinite-scroll-wrapper")
urls = section.select("div.item a")
for template in urls:
template_path = template['href']
info = img_crawls(template_path, headers)
print(info['site_url']) #### DEBUG
# store
structure[ids]=info
img_type = '.' + info['template_url'].split('.')[-1]
if not img_type in ['.jpg','.png','.jpeg'] :
img_type='.jpeg'
img_get = requests.get(info['template_url'], stream = True)
with open(img_save_path + str(ids) + img_type, 'wb') as out_file:
shutil.copyfileobj(img_get.raw, out_file)
print('Image '+str(ids)+' crawled...') #### DEBUG
del img_get
ids+=1
time.sleep(5)
with open(json_save_path,'w') as out_file:
json.dump(structure,out_file)
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.1 on 2019-06-03 04:58
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('Profesor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Estudiante',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('apellido', models.CharField(max_length=100)),
('edad', models.IntegerField()),
('sexo', models.CharField(max_length=100)),
('direccion', models.CharField(max_length=250)),
('matricula', models.IntegerField()),
('numeroTelefonico', models.IntegerField()),
('fechaNacimiento', models.DateField(default=django.utils.timezone.now)),
('materia', models.CharField(max_length=100)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('profesor', models.ForeignKey(on_delete=models.SET(-1), to='Profesor.Profesor')),
],
options={
'db_table': 'Estudiante',
},
),
]
|
nilq/baby-python
|
python
|
import sys
sys.path.append(".")
import numpy as np
from DDPG import *
from main import *
import os.path
import argparse
from Environment import Environment
from shield import Shield
def carplatoon(learning_method, number_of_rollouts, simulation_steps, learning_eposides, actor_structure, critic_structure, train_dir, \
nn_test=False, retrain_shield=False, shield_test=False, test_episodes=100, retrain_nn=False):
A = np.matrix([
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,1, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,1, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,1, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,1, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,1, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,1, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,1],
[0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0]
])
B = np.matrix([
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, -1],
])
#intial state space
s_min = np.array([[ 19.9],[ 0.9], [-0.1], [ 0.9],[-0.1], [ 0.9], [-0.1], [ 0.9], [-0.1], [ 0.9],[-0.1], [ 0.9], [-0.1], [ 0.9], [-0.1]])
s_max = np.array([[ 20.1],[ 1.1], [ 0.1], [ 1.1],[ 0.1], [ 1.1], [ 0.1], [ 1.1], [ 0.1], [ 1.1],[ 0.1], [ 1.1], [ 0.1], [ 1.1], [ 0.1]])
x_min = np.array([[18],[0.1],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1],[0.5],[-1]])
x_max = np.array([[22],[1.5], [1],[1.5],[ 1],[1.5],[ 1],[1.5], [1],[1.5],[ 1],[1.5],[ 1],[1.5],[ 1]])
u_min = np.array([[-10.], [-10.], [-10.], [-10.], [-10.], [-10.], [-10.], [-10.]])
u_max = np.array([[ 10.], [ 10.], [ 10.], [ 10.], [ 10.], [ 10.], [ 10.], [ 10.]])
target = np.array([[20],[1], [0], [1], [0], [1], [0], [1], [0], [1], [0], [1], [0], [1], [0]])
s_min -= target
s_max -= target
x_min -= target
x_max -= target
Q = np.zeros((15, 15), float)
np.fill_diagonal(Q, 1)
R = np.zeros((8,8), float)
np.fill_diagonal(R, 1)
env = Environment(A, B, u_min, u_max, s_min, s_max, x_min, x_max, Q, R, continuous=True, bad_reward=-1000)
if retrain_nn:
args = { 'actor_lr': 0.000001,
'critic_lr': 0.00001,
'actor_structure': actor_structure,
'critic_structure': critic_structure,
'buffer_size': 1000000,
'gamma': 0.999,
'max_episode_len': 400,
'max_episodes': 1000,
'minibatch_size': 64,
'random_seed': 122,
'tau': 0.005,
'model_path': train_dir+"retrained_model.chkp",
'enable_test': nn_test,
'test_episodes': test_episodes,
'test_episodes_len': 1200}
else:
args = { 'actor_lr': 0.000001,
'critic_lr': 0.00001,
'actor_structure': actor_structure,
'critic_structure': critic_structure,
'buffer_size': 1000000,
'gamma': 0.999,
'max_episode_len': 400,
'max_episodes': learning_eposides,
'minibatch_size': 64,
'random_seed': 122,
'tau': 0.005,
'model_path': train_dir+"model.chkp",
'enable_test': nn_test,
'test_episodes': test_episodes,
'test_episodes_len': 1200}
actor = DDPG(env, args)
#################### Shield #################
model_path = os.path.split(args['model_path'])[0]+'/'
linear_func_model_name = 'K.model'
model_path = model_path+linear_func_model_name+'.npy'
def rewardf(x, Q, u, R):
return env.reward(x, u)
names = {0:"x0", 1:"x1", 2:"x2", 3:"x3", 4:"x4", 5:"x5", 6:"x6", 7:"x7", 8:"x8", 9:"x9", 10:"x10", 11:"x11", 12:"x12", 13:"x13", 14:"x14"}
shield = Shield(env, actor, model_path, force_learning=retrain_shield)
shield.train_shield(learning_method, number_of_rollouts, simulation_steps, rewardf=rewardf, names=names, explore_mag = 0.1, step_size = 0.1, enable_jit=True)
if shield_test:
shield.test_shield(test_episodes, 1200)
actor.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Running Options')
parser.add_argument('--nn_test', action="store_true", dest="nn_test")
parser.add_argument('--retrain_shield', action="store_true", dest="retrain_shield")
parser.add_argument('--shield_test', action="store_true", dest="shield_test")
parser.add_argument('--test_episodes', action="store", dest="test_episodes", type=int)
parser.add_argument('--retrain_nn', action="store_true", dest="retrain_nn")
parser_res = parser.parse_args()
nn_test = parser_res.nn_test
retrain_shield = parser_res.retrain_shield
shield_test = parser_res.shield_test
test_episodes = parser_res.test_episodes if parser_res.test_episodes is not None else 100
retrain_nn = parser_res.retrain_nn
carplatoon("random_search", 500, 2000, 0, [400, 300, 200], [500, 400, 300, 200], "ddpg_chkp/car-platoon/continuous/8/400300200500400300200/",
nn_test=nn_test, retrain_shield=retrain_shield, shield_test=shield_test, test_episodes=test_episodes, retrain_nn=retrain_nn)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
#
# Scratchpad for working with raw U2F messages, useful for creating raw messages as test data.
# Example keys from secion 8.2 of
# https://fidoalliance.org/specs/fido-u2f-v1.0-nfc-bt-amendment-20150514/fido-u2f-raw-message-formats.html#authentication-response-message-success
from binascii import hexlify, unhexlify
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
sig_alg = ec.ECDSA(hashes.SHA256())
private_key_hex = 'ffa1e110dde5a2f8d93c4df71e2d4337b7bf5ddb60c75dc2b6b81433b54dd3c0'
public_key_hex = '04d368f1b665bade3c33a20f1e429c7750d5033660c019119d29aa4ba7abc04aa7c80a46bbe11ca8cb5674d74f31f8a903f6bad105fb6ab74aefef4db8b0025e1d'
example_payload_hex = '4b0be934baebb5d12d26011b69227fa5e86df94e7d94aa2949a89f2d493992ca0100000001ccd6ee2e47baef244d49a222db496bad0ef5b6f93aa7cc4d30c4821b3b9dbc57'
example_signature_hex = '304402204b5f0cd17534cedd8c34ee09570ef542a353df4436030ce43d406de870b847780220267bb998fac9b7266eb60e7cb0b5eabdfd5ba9614f53c7b22272ec10047a923f'
s = int(private_key_hex, 16)
x = int(public_key_hex[2:66], 16)
y = int(public_key_hex[66:], 16)
keynums = ec.EllipticCurvePrivateNumbers(s, ec.EllipticCurvePublicNumbers(x, y, ec.SECP256R1()))
private_key = keynums.private_key(default_backend())
public_key = private_key.public_key()
# Just ensure that we can successfully verify the example signature against the example key
public_key.verify(unhexlify(example_signature_hex), unhexlify(example_payload_hex), sig_alg)
# Successful authentication message, but with invalid user presence byte
payload_hex = '4b0be934baebb5d12d26011b69227fa5e86df94e7d94aa2949a89f2d493992ca0000000001ccd6ee2e47baef244d49a222db496bad0ef5b6f93aa7cc4d30c4821b3b9dbc57'
payload_signature = private_key.sign(unhexlify(payload_hex), sig_alg)
print("Private key:", private_key_hex)
print("Public key:", public_key_hex)
print("Signing payload:", payload_hex)
print("Signature:", hexlify(payload_signature))
|
nilq/baby-python
|
python
|
# encoding: utf-8
from workflow import web, Workflow, PasswordNotFound
def get_saved_searches(api_key, url):
"""
Parse all pages of projects
:return: list
"""
return get_saved_searches_page(api_key, url, 1, [])
def get_dashboards(api_key, url):
"""
Parse all pages of projects
:return: list
"""
return get_dashboard_page(api_key, url, 1, [])
def get_saved_searches_page(api_key, url, page, list):
log.info("Calling searches API page {page}".format(page=page))
params = dict(type='search', per_page=100, page=page, search_fields='title')
headers = {'accept-encoding':'gzip'}
r = web.get(url + '/api/saved_objects/', params, headers)
# throw an error if request failed
# Workflow will catch this and show it to the user
r.raise_for_status()
# Parse the JSON returned by Kibana and extract the saved objects
result = list + r.json()['saved_objects']
nextpage = r.headers.get('X-Next-Page')
if nextpage:
result = get_saved_searches_page(api_key, url, nextpage, result)
return result
def get_dashboard_page(api_key, url, page, list):
log.info("Calling dashboards API page {page}".format(page=page))
params = dict(type='dashboard', per_page=100, page=page, search_fields='title')
headers = {'accept-encoding':'gzip'}
r = web.get(url + '/api/saved_objects/', params, headers)
# throw an error if request failed
# Workflow will catch this and show it to the user
r.raise_for_status()
# Parse the JSON returned by Kibana and extract the saved objects
result = list + r.json()['saved_objects']
nextpage = r.headers.get('X-Next-Page')
if nextpage:
result = get_dashboard_page(api_key, url, nextpage, result)
return result
def main(wf):
try:
api_url = wf.settings.get('api_url')
# A wrapper function for the cached call below
def search_wrapper():
return get_saved_searches('', api_url)
def dashboard_wrapper():
return get_dashboards('', api_url)
saved_searches = wf.cached_data('saved_searches', search_wrapper, max_age=3600)
dashboards = wf.cached_data('dashboards', dashboard_wrapper, max_age=3600)
# Record our progress in the log file
log.debug('{} kibana searches cached'.format(len(saved_searches)))
log.debug('{} kibana dashboards cached'.format(len(dashboards)))
except PasswordNotFound: # API key has not yet been set
# Nothing we can do about this, so just log it
wf.logger.error('No API key saved')
if __name__ == u"__main__":
wf = Workflow()
log = wf.logger
wf.run(main)
|
nilq/baby-python
|
python
|
##############################################################################
# Written by: Cachen Chen <cachen@novell.com>
# Date: 08/05/2008
# Description: hscrollbar.py wrapper script
# Used by the hscrollbar-*.py tests
##############################################################################$
import sys
import os
import actions
import states
from strongwind import *
from hscrollbar import *
# class to represent the main window.
class HScrollBarFrame(accessibles.Frame):
# constants
# the available widgets on the window
LABEL = "Value:"
MAXVAL = 100
MINVAL = 0
MININCREMENT = 10
def __init__(self, accessible):
super(HScrollBarFrame, self).__init__(accessible)
self.label = self.findLabel(self.LABEL)
self.hscrollbar = self.findScrollBar(None)
self.maximumValue = \
self.hscrollbar._accessible.queryValue().maximumValue
self.minimumValue = \
self.hscrollbar._accessible.queryValue().minimumValue
self.minimumIncrement = \
self.hscrollbar._accessible.queryValue().minimumIncrement
# BUG499883 - Accessible maximum value of a scroll bar is 119
#assert self.maximumValue == self.MAXVAL, \
# "maximum value was %s, expected %s" % \
# (self.maximumValue, self.MAXVAL)
assert self.minimumValue == self.MINVAL, \
"minimum value was %s, expected %s" % \
(self.minimumValue, self.MINVAL)
assert self.minimumIncrement == self.MININCREMENT, \
"minimum increment value was %s, expected %s" % \
(self.minimumIncrement, self.MININCREMENT)
# change hscrollbar's value
def assignScrollBar(self, new_value):
procedurelogger.action('set scrollbar value to "%s"' % new_value)
self.hscrollbar.value = new_value
def assertLabel(self, value):
procedurelogger.expectedResult('label\'s value changed to "%s"' % value)
expected_label = "Value: %s" % value
assert self.label.text == expected_label, \
'Label reads "%s", expected "%s"' % (self.label, expected_label)
def assertMaximumValue(self):
procedurelogger.action("Ensure that %s's maximum value is what we expect" % self.hscrollbar)
procedurelogger.expectedResult("%s's maximum value is %s" % \
(self.hscrollbar, self.MAXVAL))
self.maximumValue = \
self.hscrollbar._accessible.queryValue().maximumValue
assert self.maximumValue == self.MAXVAL, \
"Maximum value is %s, expected %s" % \
(self.maximumValue, self.MAXVAL)
def assertMinimumValue(self):
procedurelogger.action("Ensure that %s's minimum value is what we expect" % self.hscrollbar)
procedurelogger.expectedResult("%s's minimum value is %s" % \
(self.hscrollbar, self.MINVAL))
self.minimumValue = \
self.hscrollbar._accessible.queryValue().minimumValue
assert self.minimumValue == self.MINVAL, \
"Minimum value is %s, expected %s" % \
(self.minimumValue, self.MINVAL)
def assertMinimumIncrement(self):
procedurelogger.action("Ensure that %s's minimum increment is what we expect" % self.hscrollbar)
procedurelogger.expectedResult("%s's minimum increment is %s" % \
(self.hscrollbar, self.MINVAL))
self.minimumIncrement = \
self.hscrollbar._accessible.queryValue().minimumIncrement
assert self.minimumIncrement == self.MININCREMENT, \
"minimum increment value was %s, expected %s" % \
(self.minimumIncrement, self.MININCREMENT)
def assertScrollBar(self, expected_value):
procedurelogger.expectedResult('the scrollbar\'s current value is %s' % expected_value)
assert self.hscrollbar.value == expected_value, \
"scrollbar's current value is %s, expected %s" % \
(self.hscrollbar.value, expected_value)
# close application window
def quit(self):
self.altF4()
|
nilq/baby-python
|
python
|
from dotenv import load_dotenv
import os
load_dotenv(verbose=True)
DISCORD_BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN')
|
nilq/baby-python
|
python
|
# Time: O(log n)
# Space: O(n) Call stack size
class Solution:
def searchRange(self, nums, target):
first = self.binarySearch(nums, 0, len(nums) - 1, target, True)
last = self.binarySearch(nums, 0, len(nums) - 1, target, False)
return [first, last]
def binarySearch(self, nums, low, high, target, findFirst):
if high < low:
return -1
mid = low + (high - low) // 2
if findFirst:
if ((mid == 0 or target > nums[mid - 1]) and nums[mid] == target):
return mid
elif (target > nums[mid]):
return self.binarySearch(nums, (mid + 1), high, target, findFirst)
else:
return self.binarySearch(nums, low, (mid - 1), target, findFirst)
else:
if ((mid == len(nums) - 1 or target < nums[mid + 1]) and nums[mid] == target):
return mid
elif (target < nums[mid]):
return self.binarySearch(nums, low, (mid - 1), target, findFirst)
else:
return self.binarySearch(nums, (mid + 1), high, target, findFirst)
# Time: O(log n)
# Space: O(1)
class SolutionIterative:
def searchRange(self, nums: List[int], target: int) -> List[int]:
first = self.binarySearch(nums, 0, len(nums) - 1, target, True)
last = self.binarySearch(nums, 0, len(nums) - 1, target, False)
return [first, last]
def binarySearch(self, nums, low, high, target, findFirst):
while low <= high:
mid = low + (high - low) // 2
if findFirst:
if ((mid == 0 or target > nums[mid - 1]) and nums[mid] == target):
return mid
elif (target > nums[mid]):
low = mid + 1
else:
high = mid - 1
else:
if ((mid == len(nums) - 1 or target < nums[mid + 1]) and nums[mid] == target):
return mid
elif (target < nums[mid]):
high = mid - 1
else:
low = mid + 1
return -1
arr = [1, 3, 3, 5, 7, 9, 9, 10, 12]
x = 9
solution = Solution()
print(solution.searchRange(arr, x))
solution_iterative = SolutionIterative()
print(solution_iterative.searchRange(arr, x))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# import model interface
from . import models
# import constraints
from . import constraints
# import tasks
from . import tasks
# import solvers
from . import solvers
|
nilq/baby-python
|
python
|
import csv
from django.db import models
import reversion
from django.core.exceptions import ObjectDoesNotExist
@reversion.register()
class FileTemplate(models.Model):
FILE_FOR_CHOICES = (
('input', 'Input'),
('equip', 'Equipment'),
('output', 'Output'),
)
name = models.CharField(max_length=200, db_index=True, unique=True)
file_for = models.CharField(max_length=6, choices=FILE_FOR_CHOICES)
# Task specific options
# Output each input item (excluding labware) by line rather than product
use_inputs = models.BooleanField(default=False)
# Collate inputs, only provide total amounts from task
# By default each input is broken down per product
total_inputs_only = models.BooleanField(default=False)
class Meta:
ordering = ['-id']
def field_name(self):
return self.name.lower().replace(' ', '_')
def _get_field_key(self, field):
if field.map_to:
return field.map_to
return field.name
def _validate_headers(self, header_list):
if header_list is None:
return False
for field in self.fields.all():
if field.required and field.name not in header_list:
return False
return True
def read(self, input_file, as_list=False):
csv_file = csv.DictReader(input_file)
try:
identifier_fields = self.fields.filter(is_identifier=True)
except ObjectDoesNotExist:
return False
else:
if as_list:
indexed = []
else:
indexed = {}
if self._validate_headers(csv_file.fieldnames):
for line in csv_file:
line = dict([(k, v) for k, v in line.items() if v.strip()])
if any(line):
# Get the identifier fields from the file
identifier = frozenset(line[n.name] for n in identifier_fields)
# Get a list of identifiers and remove from line
ifn = [i.name for i in identifier_fields]
# We don't want to used identifiers if it's a list as they'll be
# discarded.
if as_list and len(ifn) > 0:
return False
generated_line = {}
# TODO: Currently we discard extra fields in CSV that are not in
# filetemplate. Change this?
for field in self.fields.all():
# Don't add identifier fields
if field.name not in ifn and field.name in line:
field_value = line[field.name]
# May map to different DB field
field_key = self._get_field_key(field)
if field.is_property:
if 'properties' not in generated_line:
generated_line['properties'] = []
prop = {
'name': field_key,
'value': field_value
}
generated_line['properties'].append(prop)
else:
generated_line[field_key] = field_value
if as_list:
indexed.append(generated_line)
else:
indexed[identifier] = generated_line
return indexed
return False
def write(self, output_file, data, column_order='name'):
fieldnames = [item.name for item in self.fields.all().order_by(column_order)]
csv_output = csv.DictWriter(output_file, fieldnames=fieldnames,
extrasaction='ignore', lineterminator='\n')
csv_output.writeheader()
csv_output.writerows(data)
return output_file
def __str__(self):
return self.name
@reversion.register()
class FileTemplateField(models.Model):
# Name of the field in the file
name = models.CharField(max_length=50)
# Name of the field in the DB (if different to file header)
map_to = models.CharField(max_length=50, null=True, blank=True)
required = models.BooleanField(default=False)
is_identifier = models.BooleanField(default=False)
# Is to be used as/read from a property not a field
# Ignore on anything that does not support reading/writing
# properties on objects.
is_property = models.BooleanField(default=False)
template = models.ForeignKey(FileTemplate, related_name='fields')
def get_key(self):
if self.map_to:
return self.map_to
return self.name
def key_to_path(self):
key = self.get_key()
return key.split('.')
def __str__(self):
return self.name
|
nilq/baby-python
|
python
|
import daisy
import unittest
class TestMetaCollection(unittest.TestCase):
def get_mongo_graph_provider(self, mode, directed, total_roi):
return daisy.persistence.MongoDbGraphProvider(
'test_daisy_graph',
directed=directed,
total_roi=total_roi,
mode=mode)
def test_graph_read_meta_values(self):
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
self.get_mongo_graph_provider(
'w', True, roi)
graph_provider = self.get_mongo_graph_provider(
'r', None, None)
self.assertEqual(True, graph_provider.directed)
self.assertEqual(roi, graph_provider.total_roi)
def test_graph_default_meta_values(self):
provider = self.get_mongo_graph_provider(
'w', None, None)
self.assertEqual(False, provider.directed)
self.assertIsNone(provider.total_roi)
graph_provider = self.get_mongo_graph_provider(
'r', None, None)
self.assertEqual(False, graph_provider.directed)
self.assertIsNone(graph_provider.total_roi)
def test_graph_nonmatching_meta_values(self):
roi = daisy.Roi((0, 0, 0),
(10, 10, 10))
roi2 = daisy.Roi((1, 0, 0),
(10, 10, 10))
self.get_mongo_graph_provider(
'w', True, None)
with self.assertRaises(ValueError):
self.get_mongo_graph_provider(
'r', False, None)
self.get_mongo_graph_provider(
'w', None, roi)
with self.assertRaises(ValueError):
self.get_mongo_graph_provider(
'r', None, roi2)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 29 08:40:49 2018
@author: user
"""
import numpy as np
np.random.seed(1)
from matplotlib import pyplot as plt
import skimage.data
from skimage.color import rgb2gray
from skimage.filters import threshold_mean
from skimage.transform import resize
import network
# Utils
def get_corrupted_input(input, corruption_level):
corrupted = np.copy(input)
inv = np.random.binomial(n=1, p=corruption_level, size=len(input))
for i, v in enumerate(input):
if inv[i]:
corrupted[i] = -1 * v
return corrupted
def reshape(data):
dim = int(np.sqrt(len(data)))
data = np.reshape(data, (dim, dim))
return data
def plot(data, test, predicted, figsize=(5, 6)):
data = [reshape(d) for d in data]
test = [reshape(d) for d in test]
predicted = [reshape(d) for d in predicted]
fig, axarr = plt.subplots(len(data), 3, figsize=figsize)
for i in range(len(data)):
if i==0:
axarr[i, 0].set_title('Train data')
axarr[i, 1].set_title("Input data")
axarr[i, 2].set_title('Output data')
axarr[i, 0].imshow(data[i])
axarr[i, 0].axis('off')
axarr[i, 1].imshow(test[i])
axarr[i, 1].axis('off')
axarr[i, 2].imshow(predicted[i])
axarr[i, 2].axis('off')
plt.tight_layout()
plt.savefig("result.png")
plt.show()
def preprocessing(img, w=128, h=128):
# Resize image
img = resize(img, (w,h), mode='reflect')
# Thresholding
thresh = threshold_mean(img)
binary = img > thresh
shift = 2*(binary*1)-1 # Boolian to int
# Reshape
flatten = np.reshape(shift, (w*h))
return flatten
def main():
# Load data
camera = skimage.data.camera()
astronaut = rgb2gray(skimage.data.astronaut())
horse = skimage.data.horse()
coffee = rgb2gray(skimage.data.coffee())
# Marge data
data = [camera, astronaut, horse, coffee]
# Preprocessing
print("Start to data preprocessing...")
data = [preprocessing(d) for d in data]
# Create Hopfield Network Model
model = network.HopfieldNetwork()
model.train_weights(data)
# Generate testset
test = [get_corrupted_input(d, 0.3) for d in data]
predicted = model.predict(test, threshold=0, asyn=False)
print("Show prediction results...")
plot(data, test, predicted)
print("Show network weights matrix...")
#model.plot_weights()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
def main() -> None:
N, K = map(int, input().split())
assert 1 <= K <= N <= 100
for _ in range(N):
P_i = tuple(map(int, input().split()))
assert len(P_i) == 3
assert all(0 <= P_ij <= 300 for P_ij in P_i)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
import sys,io,os
from mitie import *
from collections import defaultdict
reload(sys)
sys.setdefaultencoding('utf-8')
#此代码参考:https://nlu.rasa.com/python.html
#这个代码是为了测试,直接通过python api去获取rasa nlu的意图和实体识别接口
sys.path.append('../MITIE/mitielib')
from rasa_nlu.model import Metadata, Interpreter
def print_beatuiful(obj):
if isinstance(obj,dict):
for k,v in obj.items():
print "\t",
print str(k).decode("unicode-escape"),
print " = " ,
print str(v).decode("unicode-escape")
# where `model_directory points to the folder the model is persisted in
interpreter = Interpreter.load("../model/default/latest/")
sentence = u"我 的 手机号 是 xxxxxxx"
result = interpreter.parse(sentence)
print sentence
print "预测结果为:"
import json
print type(result)
print json.dumps(result, indent=4, sort_keys=True).decode("unicode-escape")
# print print_beatuiful(result)
|
nilq/baby-python
|
python
|
from tir import Webapp
import unittest
class GTPA107(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAGTP", "20/04/2020", "T1", "D MG 01 ")
inst.oHelper.Program('GTPA107')
def test_GTPA107_CT001(self):
self.oHelper.SearchBrowse("D MG 000033", "Filial+lote Remessa")
self.oHelper.SetButton("Visualizar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT002(self):
self.oHelper.SearchBrowse("D MG 000033", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Cancelar Remessa")
self.oHelper.SetButton("OK")
self.oHelper.SetValue('GQG_NUMINI', '000010')
self.oHelper.SetValue('GQG_NUMFIM', '000010')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT003(self):
self.oHelper.SearchBrowse("D MG 000034", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Transferir Remessa")
self.oHelper.SetButton("OK")
self.oHelper.SetValue('GQG_NUMINI', '000010')
self.oHelper.SetValue('GQG_NUMFIM', '000010')
self.oHelper.SetValue('GQG_AGENCI', 'AGREM5')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT004(self):
self.oHelper.SearchBrowse("D MG 000035", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Devolução de Remessa")
self.oHelper.SetButton("OK")
self.oHelper.SetValue('GQG_NUMINI', '000010')
self.oHelper.SetValue('GQG_NUMFIM', '000010')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT006(self):
self.oHelper.SearchBrowse("D MG 000042", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Baixa Protocolo")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
def test_GTPA107_CT007(self):
self.oHelper.SearchBrowse("D MG 000043", "Filial+lote Remessa")
self.oHelper.SetButton("Outras Ações", "Estorno Baixa Protocolo")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Fechar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import unittest
import os
import time
from bones.utils import *
class TestUtils(unittest.TestCase):
def test_temp_filename_collision(self):
fn1 = temp_filename()
fn2 = temp_filename()
self.assertNotEqual(fn1, fn2)
def test_temp_filename_kwargs(self):
fn = temp_filename(prefix="temp_")
self.assertTrue(fn.startswith("temp_"))
fn = temp_filename(postfix="_temp")
self.assertTrue(fn.endswith("_temp"))
fn = temp_filename(ext="dat")
self.assertTrue(fn.endswith(".dat"))
fn = temp_filename(prefix="/usr/local/", postfix="_temp", ext="dat")
self.assertTrue(fn.startswith("/usr/local/"))
self.assertTrue(fn.endswith("_temp.dat"))
def test_is_stale(self):
younger_fn = temp_filename(prefix="/tmp/")
older_fn = temp_filename(prefix="/tmp/")
ts = time.time()
touch(older_fn, mtime=ts)
touch(younger_fn, mtime=ts - 100)
try:
self.assertFalse(is_stale(younger_fn, older_fn))
self.assertTrue(is_stale(older_fn, younger_fn))
finally:
os.unlink(younger_fn)
os.unlink(older_fn)
def test_common_filename(self):
fn1 = "/this/is/common/filename_elephant"
fn2 = "/this/is/common/filename_rhino"
fn3 = "/this/is/common/filename_cat"
cfn = common_filename(fn1, fn2, fn3)
self.assertEquals(cfn, "/this/is/common/filename_")
# nothing similar
fn4 = "not like the others"
cfn = common_filename(fn1, fn2, fn3, fn4)
self.assertEquals(cfn, "")
# short match
fn5 = "/this/is/common/filename_"
cfn = common_filename(fn1, fn2, fn3, fn5)
self.assertEquals(cfn, "/this/is/common/filename_")
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from django.http import HttpRequest
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from project_core.tests import database_population
class CallListTest(TestCase):
def setUp(self):
self._user = database_population.create_management_user()
self._funding_instrument = database_population.create_funding_instrument()
self._client_management = database_population.create_management_logged_client()
def test_load_funding_instrument_add(self):
login = self._client_management.login(username='unittest_management', password='12345', request=HttpRequest())
self.assertTrue(login)
response = self._client_management.get(reverse('logged-funding-instrument-add'))
self.assertEqual(response.status_code, 200)
def test_load_funding_instruments_list(self):
response = self._client_management.get(reverse('logged-funding-instrument-list'))
self.assertEqual(response.status_code, 200)
def test_load_funding_instrument_update_get(self):
response = self._client_management.get(reverse('logged-funding-instrument-update', kwargs={'pk': self._funding_instrument.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self._funding_instrument.long_name)
def test_load_funding_instrument_detail(self):
response = self._client_management.get(reverse('logged-funding-instrument-detail', kwargs={'pk': self._funding_instrument.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self._funding_instrument.long_name)
|
nilq/baby-python
|
python
|
from .base import Base
class Ls(Base):
"""Show List"""
def run(self):
if self.options['<ctgr>'] == "done":
self.show(None, 1)
elif self.options['<ctgr>'] == "all":
self.show(None, None)
else:
self.show(self.options['<ctgr>'],1 if self.options['<done>'] == "done" else 0)
|
nilq/baby-python
|
python
|
import io
import os
import sys
from setuptools import setup
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6.0 is not supported')
DESCRIPTION = 'Images Generator for bouncing objects movie'
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# load __version__
exec(open(os.path.join(here, 'bouncing_objects_generator', '_version.py')).read())
setup(
name='bouncing_objects_generator',
version=__version__,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author='Kazuhiro Serizawa',
author_email='nserihiro@gmail.com',
url='https://github.com/serihiro/bouncing_objects_generator',
license='MIT',
packages=['bouncing_objects_generator'],
install_requires=['numpy>=1.15', 'pillow>=5.0'],
entry_points={
'console_scripts': ['bouncing_objects_generator=bouncing_objects_generator.cli:main']
}
)
|
nilq/baby-python
|
python
|
import os
from tqdm import tqdm
from PIL import Image, UnidentifiedImageError
if __name__ == '__main__':
jpg_path = '../shufa_pic/shufa'
broken_jpg_path = '../shufa_pic/broken_img'
for jpg_file in tqdm(os.listdir(jpg_path)):
src = os.path.join(jpg_path, jpg_file)
try:
image = Image.open(src)
except UnidentifiedImageError:
trg = os.path.join(broken_jpg_path, jpg_file)
os.rename(src, trg)
continue
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# this just calculates the roots, it doesn't generate the heat map
# see https://thoughtstreams.io/jtauber/littlewood-fractals/
import itertools
import sys
import time
import numpy
DEGREE = 16
INNER_ONLY = False
print "generating roots for degree={}".format(DEGREE,)
start = time.time()
count = 0
click = 2 ** DEGREE / 10
next = click
if INNER_ONLY:
filename = "roots_{}b.txt".format(DEGREE)
else:
filename = "roots_{}.txt".format(DEGREE)
with open(filename, "wb") as f:
for poly in itertools.product(*([[-1, 1]] * DEGREE)):
count += 1
if count == next:
print >> sys.stderr, count
next += click
for root in numpy.roots((1,) + poly):
if root.real >= 0 and root.imag >= 0:
if not INNER_ONLY or abs(root) <= 1:
print >> f, root.real, root.imag
print >> sys.stderr, "wrote out {} in {} seconds".format(filename, time.time() - start)
|
nilq/baby-python
|
python
|
from aiohttp.test_utils import TestClient
from server.serializer import JSendSchema, JSendStatus
from server.serializer.fields import Many
from server.serializer.models import RentalSchema
class TestRentalsView:
async def test_get_rentals(self, client: TestClient, random_admin, random_bike):
"""Assert that you can get a list of all rentals."""
await client.app["rental_manager"].create(random_admin, random_bike)
response = await client.get('/api/v1/rentals', headers={"Authorization": f"Bearer {random_admin.firebase_id}"})
response_schema = JSendSchema.of(rentals=Many(RentalSchema()))
response_data = response_schema.load(await response.json())
assert response_data["status"] == JSendStatus.SUCCESS
assert len(response_data["data"]["rentals"]) == 1
rental = response_data["data"]["rentals"][0]
assert rental["bike_identifier"] == random_bike.identifier
assert (await client.get(rental["bike_url"])).status != 404
class TestRentalView:
async def test_get_rental(self, client: TestClient, random_admin, random_bike):
"""Assert that you get gets a single rental from the system."""
rental, location = await client.app["rental_manager"].create(random_admin, random_bike)
response = await client.get(f'/api/v1/rentals/{rental.id}',
headers={"Authorization": f"Bearer {random_admin.firebase_id}"})
response_schema = JSendSchema.of(rental=RentalSchema())
response_data = response_schema.load(await response.json())
assert response_data["status"] == JSendStatus.SUCCESS
assert response_data["data"]["rental"]["id"] == rental.id
assert response_data["data"]["rental"]["bike_identifier"] == random_bike.identifier
assert (await client.get(response_data["data"]["rental"]["bike_url"])).status != 404
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Timer."""
import time
class Timer(object):
"""A simple timer (adapted from Detectron)."""
def __init__(self):
self.total_time = None
self.calls = None
self.start_time = None
self.diff = None
self.average_time = None
self.reset()
def tic(self):
# using time.time as time.clock does not nomalize for multithreading
self.start_time = time.time()
def toc(self):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
def reset(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import optparse
import os,sys
#from optparse import OptionParser
import glob
import subprocess
import linecache
import struct
import shutil
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("%prog -f <stack> -p <parameter> -c <ctf> -s")
parser.add_option("-f",dest="stack",type="string",metavar="FILE",
help="raw, IMAGIC particle stack (black particles) - if not specified, only parameter files will be created, no new stack")
parser.add_option("-p",dest="param",type="string",metavar="FILE",
help="EMAN2 output parameter file")
parser.add_option("-c",dest="ctf",type="string",metavar="FILE",
help="per-particle CTF information file from APPION (optional)")
parser.add_option("--mag",dest="mag",type="float", metavar="FLOAT", default=10000,
help="actual magnification of images (default=10000)")
parser.add_option("--norm", action="store_true",dest="norm",default=False,
help="Normalize particles")
parser.add_option("-m",dest="onlymodel",type="int",metavar="#",
help="only convert this model (optional, starts with 0)")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 0:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#=========================
def checkConflicts(params):
if not params['stack']:
print "\nWarning: no stack specified\n"
elif not os.path.exists(params['stack']):
print "\nError: stack file '%s' does not exist\n" % params['stack']
sys.exit()
if not params['param']:
print "\nError: no EMAN2 parameter file specified"
sys.exit()
if not os.path.isfile(params['param']):
print "\nError: EMAN2 parameter file '%s' does not exist\n" % params['param']
sys.exit()
if not params['ctf']:
print "\nError: no CTF parameter file specified"
sys.exit()
elif not os.path.isfile(params['ctf']):
print "\nError: Appion CTF parameter file '%s' does not exist\n" % params['ctf']
sys.exit()
#=========================
def getEMANPath():
### get the imagicroot directory
emanpath = subprocess.Popen("env | grep EMAN2DIR", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if emanpath:
emanpath = emanpath.replace("EMAN2DIR=","")
if os.path.exists(emanpath):
return emanpath
print "EMAN2 was not found, make sure it is in your path"
sys.exit()
#=========================
def getNumModels(params):
## find number of models included in reconstruction
f=open(params['param'])
mods = []
for line in f:
l = line.split()
model=float(l[-1])
if 889 > model > 99:
continue
if model not in mods:
mods.append(model)
f.close()
return len(mods)
#=========================
def Eman2Freali(az,alt,phi):
t1 = Transform({"type":"eman","az":az,"alt":alt,"phi":phi,"mirror":False})
#t_conv = Transform({"type":"eman","alt":31.717474411458415,"az":90,"phi":-90,"mirror":False})
#t2 = t1*t_conv.inverse()
d = t1.get_params("eman")
psi = d["phi"]+90
if psi >360:
psi = psi-360
theta= d["alt"]
phi = d["az"]-90
return psi,theta,phi
#=========================
def createFiles(params):
parm=params['param']
numMods = params['num']
mag = params['mag']
stack = params['stack']
debug = params['debug']
# open EMAN2 param file
f=open(parm,'r')
# for each model, create an output file
mout=[]
mtxt=[]
count=[]
for m in range(numMods):
mout.append(open("%s_%02i_frealign"%(parm,m),'w'))
mtxt.append(open("%s_%02i.txt"%(parm,m),'w'))
count.append(1)
print "Calculating euler angle conversion..."
pcount=1
for line in f:
l = line.split()
parmPSI = float(l[0])
parmTHETA = float(l[1])
parmPHI = float(l[2])
sx =(float(l[3]))
sy =(float(l[4]))
model = int(float(l[5]))
psi,theta,phi = Eman2Freali(parmPSI,parmTHETA,parmPHI)
if model < 99 or model > 889:
if debug is True:
print 'Particle %s is included' %(pcount-1)
if model > 889:
model = 0
mtxt[model].write("%s\n" %(pcount-1))
ctf = linecache.getline(params['ctf'],pcount)
if debug is True:
print 'Reading line %s in ctf file' %(pcount)
print ctf
c = ctf.split()
micro = float(c[7])
df1 = float(c[8])
df2 = float(c[9])
astig = float(c[10])
mout[model].write("%7d%8.3f%8.3f%8.3f%8.3f%8.3f%8.1f%6d%9.1f%9.1f%8.2f%7.2f%6.2f\n" %(count[model],psi,theta,phi,sx,sy,mag,micro,df1,df2,astig,0,0))
count[model] += 1
pcount+=1
# close files
f.close()
for m in range(numMods):
mout[m].close()
mtxt[m].close()
# exit if not converting stack
if stack is None:
return
# get box size
im=EMData.read_images(stack,[0])
nx = im[0].get_xsize()
del im
# from EMAN2PAR import EMTaskCustomer
# if params['nproc'] > 1:
# etc = EMTaskCustomer("thread:%i"%params['nproc'])
# else:
# etc = EMTaskCustomer("thread:1")
for m in range(numMods):
if params['onlymodel'] is not None:
if m!=params['onlymodel']: continue
text='%s_%02i.txt' %(parm,m)
parts = open(text).readlines()
nimg = len(parts)
imstack = "%s_model%02i"%(os.path.splitext(stack)[0],m)
print "\nAllocating space for Model %i stack..."%m
img = EMData(nx,nx,nimg)
img.write_image(imstack+'.mrc')
print "Generating %i particle stack for Model %i..."%(nimg,m)
for i in xrange(nimg):
p = int(float(parts[i]))
d = EMData()
d.read_image(stack, p)
if params['norm'] is True:
d.process_inplace("normalize")
region = Region(0, 0, i, nx, nx, 1)
d.write_image(imstack+".mrc",0,EMUtil.get_image_ext_type("mrc"), False, region, EMUtil.EMDataType.EM_FLOAT, True)
progress = int(float(i)/nimg*100)
if progress%2==0:
print "%3i%% complete\t\r"%progress,
print "100% complete\t"
os.remove(text)
#=========================
#=========================
if __name__ == "__main__":
params=setupParserOptions()
getEMANPath()
from EMAN2 import *
from sparx import *
checkConflicts(params)
params['num']=getNumModels(params)
print "EMAN2 parameter file contains %s models"%params['num']
createFiles(params)
|
nilq/baby-python
|
python
|
from itertools import cycle
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.cache import cache
from django.http import Http404
from django.shortcuts import render
from django.views.generic import TemplateView
from django.views.generic.base import View
import requests
from requests.exceptions import ConnectionError
from .google_analytics import get_access_token
from .uptime_robot import UptimeRobot
from .models import Service
from .models import GoogleAnalyticsSite
class HomeView(LoginRequiredMixin, TemplateView):
template_name = 'home.html'
class SpotligthView(LoginRequiredMixin, View):
SPOTLIGTH_CYCLE = cycle('AB')
def get(self, request, *args, **kwargs):
case = next(self.SPOTLIGTH_CYCLE)
if case == 'A':
obj = Service.objects.all().order_by('?').first()
if not obj:
raise Http404('Create a Service first')
return render(request, 'service_detail.html', {
'obj': obj,
})
elif case == 'B':
obj = GoogleAnalyticsSite.objects.all().order_by('?').first()
if not obj:
raise Http404('Create a GoogleAnalyticsSite first')
return render(request, 'googleanalyticssite_detail.html', {
'ACCESS_TOKEN': get_access_token(),
'obj': obj,
})
class TickerView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
response_list = []
# Zendesk
zendesk_data = cache.get('zendesk_data')
if not zendesk_data:
try:
req = requests.get(
settings.ZENDESK_URL,
auth=(settings.ZENDESK_EMAIL, settings.ZENDESK_API),
)
if req.ok:
zendesk_data = {
'title': 'Tickets',
'label': 'Zendesk',
'value': req.json()['view_count']['value'],
}
cache.set('zendesk_data', zendesk_data, 120)
except ConnectionError:
zendesk_data = None
if zendesk_data:
response_list.append(zendesk_data)
# Sentry
sentry_data = cache.get('sentry_data')
if not sentry_data:
try:
req = requests.get(
settings.SENTRY_URL,
auth=(settings.SENTRY_KEY, ''),
)
if req.ok:
sentry_data = {
'title': 'Events',
'label': 'Sentry',
'value': sum([x[1] for x in req.json()]),
}
cache.set('sentry_data', sentry_data, 60)
except ConnectionError:
sentry_data = None
if sentry_data:
response_list.append(sentry_data)
# Uptime Robot
monitor_list = cache.get('monitor_list')
if not monitor_list:
uptime_robot = UptimeRobot()
success, response = uptime_robot.get_monitors()
if success:
monitor_list = []
for monitor in response.get('monitors').get('monitor'):
monitor_list.append({
'title': monitor.get('friendlyname'),
'label': 'Uptime',
'value': '{0}%'.format(
monitor.get('customuptimeratio')
),
})
cache.set('monitor_list', monitor_list, 90)
if monitor_list:
response_list.extend(monitor_list)
return render(request, 'ticker_detail.html', {
'response_list': response_list,
})
|
nilq/baby-python
|
python
|
node = S(input, "application/json")
object = {
"name": "test",
"comment": "42!"
}
node.prop("comment", object)
propertyNode = node.prop("comment")
value = propertyNode.prop("comment").stringValue()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
Style Guide`_. Docstrings may extend over multiple lines. Sections are created
with a section header and a colon followed by a block of indented text.
Example:
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_google.py
Section breaks are created by resuming unindented text. Section breaks
are also implicitly created anytime a new section starts.
Attributes:
module level variable1 (int): Module level variables may be documented in
either the ``Attributes`` section of the module docstring, or in an
inline docstring immediately following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
Todo:
* For module TODOs
* You have to also use ``sphinx.ext.todo`` extension
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import logging
import {{cookiecutter.project_slug}}.send_notification # pylint: disable=import-error
import {{cookiecutter.project_slug}}.settings_accessor # pylint: disable=import-error
_SETTINGS = {{cookiecutter.project_slug}}.settings_accessor.SettingsAccessor()
_LOGGER = logging.getLogger(__name__)
_HANDLER = {{cookiecutter.project_slug}}.send_notification.EmailHandler()
_LOGGER.addHandler(_HANDLER)
_LOGGER.setLevel(logging.WARNING)
def main():
"""Main function."""
_LOGGER.warning('here')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
__author__ = "Nathan Ward"
import logging
from datetime import date, datetime
from pytz import timezone, utc
_LOGGER = logging.getLogger()
_LOGGER.setLevel(logging.INFO)
def get_market_open_close() -> dict:
"""
Grab the market open and close settings. Convert timezone.
Lambdas run in UTC. Settings are set to US/Eastern (NYSE).
"""
today = date.today()
date_format = '%H:%M:%S'
current_time = datetime.now(utc)
#Regular hours
market_normal_open = '09:30:00'
market_normal_close = '16:00:00'
#Extended hours for stock
market_extended_open = '09:00:00'
market_extended_close = '18:00:00'
#New york timezone for nasdaq/nyse, same timezone as us-east-1
market_tz = timezone('US/Eastern')
market_open_datetime = datetime.strptime(market_normal_open, date_format).time()
market_extended_open_datetime = datetime.strptime(market_extended_open, date_format).time()
market_close_datetime = datetime.strptime(market_normal_close, date_format).time()
market_extended_close_datetime = datetime.strptime(market_extended_close, date_format).time()
naive_open_datetime = datetime.combine(today, market_open_datetime)
naive_extended_open_datetime = datetime.combine(today, market_extended_open_datetime)
naive_close_datetime = datetime.combine(today, market_close_datetime)
naive_extended_close_datetime = datetime.combine(today, market_extended_close_datetime)
open_local_datetime = market_tz.localize(naive_open_datetime, is_dst=None)
open_extended_local_datetime = market_tz.localize(naive_extended_open_datetime, is_dst=None)
close_local_datetime = market_tz.localize(naive_close_datetime, is_dst=None)
close_extended_local_datetime = market_tz.localize(naive_extended_close_datetime, is_dst=None)
open_utc_converted_datetime = open_local_datetime.astimezone(utc)
open_extended_utc_converted_datetime = open_extended_local_datetime.astimezone(utc)
close_utc_converted_datetime = close_local_datetime.astimezone(utc)
close_extended_utc_converted_datetime = close_extended_local_datetime.astimezone(utc)
time_to_close = current_time - close_utc_converted_datetime
extended_time_to_close = current_time - close_extended_utc_converted_datetime
time_to_open = open_utc_converted_datetime - current_time
extended_time_to_open = open_extended_utc_converted_datetime - current_time
return {
'market_open': open_utc_converted_datetime,
'market_close': close_utc_converted_datetime,
'time_to_close': time_to_close.total_seconds(),
'time_to_open': time_to_open.total_seconds(),
'extended_market_open': open_extended_utc_converted_datetime,
'extended_market_close': close_extended_utc_converted_datetime,
'extended_time_to_close': extended_time_to_close.total_seconds(),
'extended_time_to_open': extended_time_to_open.total_seconds(),
'time_now': current_time
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#---------------------------------------
# Import Libraries
#---------------------------------------
import sys
import io
import json
from os.path import isfile
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
from datetime import datetime
#---------------------------------------
# [Required] Script Information
#---------------------------------------
ScriptName = "OwRank"
Website = "https://github.com/lucarin91/overwatch-streamlabs"
Description = "Return the hoster rank on Overwatch."
Creator = "lucarin91"
Version = "2.0.0"
#---------------------------------------
# Set Variables
#---------------------------------------
_command_permission = "everyone"
_command_info = ""
_last_update = None
_responce = None
_battletag = []
_region = 'eu'
_message = "Rank:"
_command = "!owrank"
_cooldown = 10
#---------------------------------------
# [Required] Intialize Data (Only called on Load)
#---------------------------------------
def Init():
global _last_update, _responce
settings = 'Services/Scripts/{}/settings.json'.format(ScriptName)
if isfile(settings):
with io.open(settings, mode='r', encoding='utf-8-sig') as f:
string = f.read()
Parent.Log(ScriptName, 'Load json: {}'.format(string))
conf = json.loads(string)
parse_conf(conf)
_responce = build_message()
_last_update = datetime.today()
#---------------------------------------
# [Required] Execute Data / Process Messages
#---------------------------------------
def Execute(data):
if data.IsChatMessage():
if data.GetParam(0).lower() == _command\
and not Parent.IsOnCooldown(ScriptName, _command)\
and Parent.HasPermission(data.User, _command_permission, _command_info):
Parent.SendTwitchMessage(_responce)
#---------------------------------------
# [Required] Tick Function
#---------------------------------------
def Tick():
global _responce, _last_update
if (datetime.today() - _last_update).seconds > 30:
_responce = build_message()
_last_update = datetime.today()
Parent.Log(ScriptName, 'update rank! ({})'.format(_responce))
def Unload():
pass
def ReloadSettings(jsonData):
parse_conf(json.loads(jsonData))
#---------------------------------------
# My functions
#---------------------------------------
def get_rank(username, region='eu'):
"""Return the rank of the username given in input."""
url = 'https://owapi.net/api/v3/u/{}/stats'.format(username)
res_raw = Parent.GetRequest(url, {"User-Agent":"Linux/generic"})
res = json.loads(res_raw)
status, data = res['status'], json.loads(res['response'])
if status != 200:
Parent.Log(ScriptName, 'Request status {}'.format(status))
return "not placed"
if not data\
or not region in data\
or not 'stats' in data[region]\
or not 'competitive' in data[region]['stats']\
or not 'overall_stats' in data[region]['stats']['competitive']\
or not 'comprank' in data[region]['stats']['competitive']['overall_stats']:
Parent.Log(ScriptName, 'Remote service error.')
return "not placed"
rank = data[region]['stats']['competitive']['overall_stats']['comprank']
return rank if rank is not None else "not placed"
def parse_conf(conf):
"""Set the configuration variable."""
global _battletag, _region, _message, _command, _cooldown
_battletag = [b.strip() for b in conf['battletag'].split(',')]
_region = conf['region']
_message = conf['message']
_command = conf['command']
_cooldown = conf['cooldown']
Parent.Log(ScriptName, 'Load conf: {}'.format((_battletag, _region, _message, _command, _cooldown)))
def build_message():
"""Build the message with the rank to sent to the chat."""
ranks = [(user.split('-')[0], get_rank(user, _region)) for user in _battletag]
responce = "{} {}".format(_message, ', '.join(['{}->{}'.format(u, r) for u, r in ranks]))
return responce
def ShowRank():
"""Send the rank to the chat."""
Parent.Log(ScriptName, 'Send rank to chat!')
responce = build_message()
Parent.SendTwitchMessage(responce)
|
nilq/baby-python
|
python
|
import unittest
import sys
module = sys.argv[-1].split(".py")[0]
class PublicTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
global top_3
undertest = __import__(module)
top_3 = getattr(undertest, 'top_3', None)
def test_exemplo(self):
l = [1,2,3,4,8,22,-3,5]
top_3(l)
assert l[0] == 22 and l[1] == 8 and l[2] == 5
assert len(l) == 8
if __name__ == '__main__':
loader = unittest.TestLoader()
runner = unittest.TextTestRunner()
runner.run(loader.loadTestsFromModule(sys.modules[__name__]))
|
nilq/baby-python
|
python
|
import datetime
from .wordpress import WordPress
class CclawTranslations(WordPress):
base_urls = [
"https://cclawtranslations.home.blog/",
]
last_updated = datetime.date(2021, 11, 3)
def init(self):
self.blacklist_patterns += ["CONTENIDO | SIGUIENTE"]
def parse_content(self, element) -> str:
self.clean_contents(element)
for div in element.find_all("div", recursive=False):
div.extract()
return str(element)
|
nilq/baby-python
|
python
|
from discord.ext import commands
class Echo(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def echo(self, ctx):
await ctx.send(ctx.message.content[6:])
def setup(bot):
bot.add_cog(Echo(bot))
|
nilq/baby-python
|
python
|
import json
import logging
from . import BASE
from .oauth import Tokens
logger = logging.getLogger(__name__)
def store(client_id: str, tokens: Tokens) -> None:
cache = BASE / f"{client_id}_cache.json"
# Store tokens
cache.touch(0o600, exist_ok=True)
with cache.open("w") as fh:
temp = {k: v for k, v in tokens._asdict().items() if v is not None}
json.dump(temp, fh)
def exists(client_id: str) -> bool:
tokens = BASE / f"{client_id}_cache.json"
return tokens.exists()
def retrieve(client_id: str) -> Tokens:
cache = BASE / f"{client_id}_cache.json"
with cache.open() as fh:
data = json.load(fh)
return Tokens(**data)
|
nilq/baby-python
|
python
|
from .model import TreeNode
"""
BFS Solution
Space : O(n)
Time : O(n)
"""
class Solution:
def pseudoPalindromicPaths(self, root: TreeNode) -> int:
if not root:
return 0
stack = [(root, [])]
res = []
ans = 0
while stack:
node, mem = stack.pop()
if not node.left and not node.right:
res.append(mem + [node.val])
continue
if node.left:
stack.append((node.left, mem + [node.val]))
if node.right:
stack.append((node.right, mem + [node.val]))
for item in res:
d = {}
for i in item:
if i in d:
d[i] += 1
else:
d[i] = 1
odds = 0
for _, v in d.items():
if v % 2 == 1:
odds += 1
if odds <= 1:
ans += 1
return ans
|
nilq/baby-python
|
python
|
import sys, os
from MySQLdb import Error as Error
from connect_db import read_connection
class ReaderBase(object):
def __init__(self):
self._password_file = "/n/home00/cadams/mysqldb"
def connect(self):
return read_connection(self._password_file)
|
nilq/baby-python
|
python
|
"""Geometric Brownian motion."""
import numpy as np
from stochastic.processes.base import BaseTimeProcess
from stochastic.processes.continuous.brownian_motion import BrownianMotion
from stochastic.utils import generate_times
from stochastic.utils.validation import check_numeric
from stochastic.utils.validation import check_positive_integer
from stochastic.utils.validation import check_positive_number
class GeometricBrownianMotion(BaseTimeProcess):
r"""Geometric Brownian motion process.
.. image:: _static/geometric_brownian_motion.png
:scale: 50%
A geometric Brownian motion :math:`S_t` is the analytic solution to the
stochastic differential equation with Wiener process :math:`W_t`:
.. math::
dS_t = \mu S_t dt + \sigma S_t dW_t
and can be represented with initial value :math:`S_0` in the form:
.. math::
S_t = S_0 \exp \left( \left( \mu - \frac{\sigma^2}{2} \right) t +
\sigma W_t \right)
:param float drift: the parameter :math:`\mu`
:param float volatility: the parameter :math:`\sigma`
:param float t: the right hand endpoint of the time interval :math:`[0,t]`
for the process
:param numpy.random.Generator rng: a custom random number generator
"""
def __init__(self, drift=0, volatility=1, t=1, rng=None):
super().__init__(t=t, rng=rng)
self._brownian_motion = BrownianMotion(t=t)
self.drift = drift
self.volatility = volatility
self._n = None
def __str__(self):
return "Geometric Brownian motion with drift {d} and volatility {v} on [0, {t}].".format(
t=str(self.t), d=str(self.drift), v=str(self.volatility)
)
def __repr__(self):
return "GeometricBrownianMotion(drift={d}, volatility={v}, t={t})".format(
t=str(self.t), d=str(self.drift), v=str(self.volatility)
)
@property
def drift(self):
"""Geometric Brownian motion drift parameter."""
return self._drift
@drift.setter
def drift(self, value):
check_numeric(value, "Drift")
self._drift = value
@property
def volatility(self):
"""Geometric Brownian motion volatility parameter."""
return self._volatility
@volatility.setter
def volatility(self, value):
check_positive_number(value, "Volatility")
self._volatility = value
def _sample_geometric_brownian_motion(self, n, initial=1.0):
"""Generate a realization of geometric Brownian motion."""
check_positive_integer(n)
check_positive_number(initial, "Initial")
# Opt for repeated use
if self._n != n:
self._n = n
self._line = generate_times(self.drift - self.volatility ** 2 / 2.0, n)
noise = self.volatility * self._brownian_motion.sample(n)
return initial * np.exp(self._line + noise)
def _sample_geometric_brownian_motion_at(self, times, initial=1.0):
"""Generate a realization of geometric Brownian motion."""
line = [(self.drift - self.volatility ** 2 / 2.0) * t for t in times]
noise = self.volatility * self._brownian_motion.sample_at(times)
return initial * np.exp(line + noise)
def sample(self, n, initial=1):
"""Generate a realization.
:param int n: the number of increments to generate.
:param float initial: the initial value of the process :math:`S_0`.
"""
return self._sample_geometric_brownian_motion(n, initial)
def sample_at(self, times, initial=1):
"""Generate a realization using specified times.
:param times: a vector of increasing time values at which to generate
the realization
:param float initial: the initial value of the process :math:`S_0`.
"""
return self._sample_geometric_brownian_motion_at(times, initial)
|
nilq/baby-python
|
python
|
#-----------------------------------------------------
# Make plots from matplotlib using data exported by
# DNSS.jl
# Soham M 05/2022
#-----------------------------------------------------
import numpy as np
import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import matplotlib
mpl.rcParams.update({
"font.size": 34.0,
"axes.titlesize": 34.0,
"axes.labelsize": 34.0,
"xtick.labelsize": 34.0,
"ytick.labelsize": 34.0,
"legend.fontsize": 34.0,
"figure.figsize": (25, 10),
"figure.dpi": 300,
"savefig.dpi": 300,
"text.usetex": True
})
def plot_solution():
fguv = glob.glob("../data/minkowski/constraints/minkowski_guv*")
fgrr = glob.glob("../data/minkowski/constraints/minkowski_grr*")
guvmax = np.amax(list(map(lambda x: np.amax(np.load(x)["w"]), fguv)))
guvmin = np.amin(list(map(lambda x: np.amin(np.load(x)["w"]), fguv)))
grrmax = np.amax(list(map(lambda x: np.amax(np.load(x)["w"]), fgrr)))
grrmin = np.amin(list(map(lambda x: np.amin(np.load(x)["w"]), fgrr)))
guvlevels = np.linspace(guvmin, guvmax, 40)
grrlevels = np.linspace(grrmin, grrmax, 40)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False, sharex=True)
for (_guv, _grr) in zip(fguv, fgrr):
guv = np.load(_guv)
grr = np.load(_grr)
A1 = ax1.contourf(guv["v"], guv["u"], guv["w"], vmax=np.amax(guvlevels), vmin=np.amin(guvlevels), levels=guvlevels)
A2 = ax2.contourf(grr["v"], grr["u"], grr["w"], vmax=np.amax(grrlevels), vmin=np.amin(grrlevels), levels=grrlevels)
ax1.tick_params(axis='both', which='major', size=10)
ax1.set_xlabel(r"$v$")
ax1.set_ylabel(r"$u$")
fig.colorbar(A1, ax=ax1)
fig.colorbar(A2, ax=ax2)
plt.tight_layout()
fig.savefig("minkowski_constraints.pdf")
return 0
plot_solution()
|
nilq/baby-python
|
python
|
import os
import torch
from torchinfo import summary
from torch.utils.data import DataLoader
import source.utils as utils
import source.arguments as arguments
from source.model import FusionNet, UNet
from source.dataset.dataset import NucleiCellDataset
def main(m_args):
# For reproducibility
torch.manual_seed(123)
# Get model name
model_name = utils.get_model_name(m_args)
# Device
device = torch.device("cuda:" + m_args.gpu_ids) \
if torch.cuda.is_available() else "cpu"
# Model
if m_args.model == "fusion":
model = FusionNet(m_args, 1)
else:
model = UNet(m_args.num_kernel, m_args.kernel_size, 1, 2)
print(list(model.parameters())[0].shape)
summary(model)
model = model.to(device)
# Optimizer
parameters = model.parameters()
if m_args.optimizer == "adam":
optimizer = torch.optim.Adam(parameters, m_args.lr)
else:
optimizer = torch.optim.SGD(parameters, m_args.lr)
# Load model
if m_args.device == "cpu":
utils.load_checkpoint(
torch.load(os.path.join("output/", m_args.experiment_name,
model_name + ".pth.tar"),
map_location=torch.device("cpu")), model, optimizer)
else:
utils.load_checkpoint(
torch.load(os.path.join("output/", m_args.experiment_name,
model_name + ".pth.tar")),
model, optimizer)
# Load data
test_dataset = NucleiCellDataset(m_args.test_data,
phase="test",
transform=m_args.transform,
image_size=m_args.image_size)
validation_dataset = NucleiCellDataset(m_args.train_data,
phase="validation",
transform=m_args.transform,
image_size=m_args.image_size)
validation_dataloader = DataLoader(validation_dataset,
batch_size=m_args.batch_size,
shuffle=False,
num_workers=m_args.num_workers,
pin_memory=True)
test_dataloader = DataLoader(test_dataset,
batch_size=m_args.batch_size,
shuffle=False,
num_workers=m_args.num_workers,
pin_memory=True)
print("Total number of test examples", str(len(test_dataset)))
print("Total number of validation examples", str(len(validation_dataset)))
# Calculate dice and ious
print("---- Validation metrics ----")
dice_val = calculate_metrics(m_args, device, model, validation_dataloader)
print("---- Test metrics ----")
dice_test = calculate_metrics(m_args, device, model, test_dataloader)
print("Total number of parameters")
params = sum(dict((p.data_ptr(), p.numel())
for p in model.parameters()).values())
print(params)
with open(os.path.join("output/results.csv"), "a") as file:
file.write("{},{},{},{},{},{},{},{},{}\n"
.format(model_name,
str(m_args.target_type),
str(m_args.num_kernel),
str(m_args.image_size),
str(m_args.batch_size),
str(m_args.lr),
str(dice_val),
str(dice_test),
str(params)))
def calculate_metrics(f_args, device, model, loader):
intersections, totals = 0, 0
model.eval()
with torch.no_grad():
for i_val, (x_val, y_nuclei_val, y_cell_val) in enumerate(loader):
if f_args.target_type == "nuclei":
y_train = y_nuclei_val
else:
y_train = y_cell_val
# Send data and label to device
x = x_val.to(device)
# Input should be between 0 and 1
x = torch.div(x, 255)
y = y_train.to(device)
# Predict segmentation
pred = model(x).squeeze(1)
# Get the class with the highest probability
_, pred = torch.max(pred, dim=1)
inputs = pred.view(-1)
targets = y.view(-1)
intersection = (inputs * targets).sum()
total = inputs.sum() + targets.sum()
# intersection is equivalent to True Positive count
intersections += intersection
# union is the mutually inclusive area of all labels & predictions
totals += total
dice = (2. * intersections) / totals
print("dice: ", dice.item())
return dice.item()
if __name__ == "__main__":
args = arguments.get_arguments()
main(args)
|
nilq/baby-python
|
python
|
"""Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for C++ module twiss.
"""
import os
import IBSLib as ibslib
import numpy as np
import pandas as pd
import pytest
constants = [
(ibslib.clight, 299792458.0),
(ibslib.hbarGeV, 6.582119569e-25),
(ibslib.electron_mass, 0.51099895000e-3),
(ibslib.proton_mass, 0.93827208816),
(ibslib.neutron_mass, 0.93956542052),
(ibslib.mu_mass, 0.1056583755),
(ibslib.atomic_mass_unit, 0.93149410242),
(ibslib.pi, 3.141592653589793),
(ibslib.electric_charge, 1.602176634e-19),
(ibslib.euler, 0.577215664901533),
(ibslib.electron_radius, 2.8179403262e-15),
(ibslib.proton_radius, 1.5346982671888944e-18),
]
@pytest.mark.parametrize("name, value", constants)
def test_constants(name, value):
assert name == value
def test_cpp_sigefromsigs():
assert (ibslib.sige_from_sigs(ibslib.pi * 2 * 1.25e6, 0.005, 5e-4, 3326.0, 37.0)) < 1e-2
def test_cpp_sigsfromsige():
val = ibslib.sigs_from_sige(8.96628617341675e-05, 3326.0, 37.0, 5e-4 * ibslib.pi * 2 * 1.25e6)
assert (val < 0.0051) & (val > 0.004999)
def test_cpp_eta():
assert ibslib.eta(3600.0, 37.0) - 0.00073046018996082 < 1e-9
def test_cpp_fmohl():
a = 5.709563671168914e-04
b = 2.329156389696222e-01
q = 2.272866910079534e00
npp = 1000
actual = ibslib.fmohl(a, b, q, npp)
expected = 6824.655537384558
assert expected - actual < 1e-9
def test_cpp_particle_radius():
charge = -1
aatom = 1
actual = ibslib.particle_radius(charge, aatom)
expected = 1.5346982671888944e-18
assert actual == expected
def test_cpp_BetaRelativisticFromGamma():
gamma = 1
expected = 0
actual = ibslib.beta_relativistic_from_gamma(gamma)
assert expected == actual
def test_cpp_rds():
x, y, z = 1, 2, 3
actual = ibslib.rds(x, y, z)
expected = 0.29046028102188937
assert actual == expected
hvphi = [
([1.0], [1.0], 90, -1.0),
([1.0, 17.6, 20.0], [400.0, 1200.0, 1400.0], 90, 30.742135),
]
@pytest.mark.parametrize("voltages, harmonics, phi, expected", hvphi)
def test_cpp_rfvoltages(voltages, harmonics, phi, expected):
actual = ibslib.rf_voltage_in_ev(phi, -1.0, harmonics, voltages)
assert actual - expected < 1.0e-6
print(actual)
hvphip = [
([1.0], [1.0], 90, -6.123233995e-17),
([1.0, 17.6, 20.0], [400.0, 1200.0, 1400.0], 90, 30.742135),
]
@pytest.mark.parametrize("voltages, harmonics, phi, expected", hvphip)
def test_cpp_rfvoltages_prime(voltages, harmonics, phi, expected):
actual = ibslib.rf_voltage_in_ev_prime(phi, -1.0, harmonics, voltages)
assert actual - expected < 1.0e-6
print(actual)
def test_cpp_rf_voltage_in_ev_with_rad_losses():
actual = ibslib.rf_voltage_in_ev_with_rad_losses(180.0, 179e3, -1.0, [1.0], [1.0])
print(actual)
expected = 1.0000000000219211
assert actual - expected < 1.0e-9
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
my_twiss_file = os.path.join(THIS_DIR, "b2_design_lattice_1996.twiss")
def test_cpp_updateTwiss():
twiss = ibslib.GetTwissTable(my_twiss_file)
tw = ibslib.updateTwiss(twiss)
print(twiss.keys())
print(tw.keys())
assert sorted(list(twiss.keys())) == sorted(
["ALFX", "ALFY", "ANGLE", "BETX", "BETY", "DPX", "DPY", "DX", "DY", "K1L", "K1SL", "L"]
)
assert sorted(list(tw.keys())) == sorted(
[
"ALFX",
"ALFY",
"ANGLE",
"BETX",
"BETY",
"DPX",
"DPY",
"DX",
"DY",
"I1",
"I2",
"I3",
"I4x",
"I4y",
"I5x",
"I5y",
"K1L",
"K1SL",
"L",
"gammax",
"gammay",
"hx",
"hy",
"k",
"rho",
]
)
|
nilq/baby-python
|
python
|
import attr
from typing import Any, List, Optional
from tokopedia import TokopediaResponse
@attr.dataclass(slots=True)
class ActiveProductsShop:
id: int
name: str
uri: str
location: str
@attr.dataclass(slots=True)
class ActiveProductShop:
id: int
name: str
url: str
is_gold: bool
location: str
city: str
reputation: str
clover: str
@attr.dataclass(slots=True)
class ActiveProductBadge:
title: str
image_url: str
@attr.dataclass(slots=True)
class ActiveProduct:
id: int
name: str
childs: Optional[Any]
url: str
image_url: str
image_url_700: str
price: str
shop: ActiveProductShop
wholesale_price: List
courier_count: int
condition: int
category_id: int
category_name: str
category_breadcrumb: str
department_id: int
labels: List
badges: List[ActiveProductBadge]
is_featured: int
rating: int
count_review: int
original_price: str
discount_expired: str
discount_percentage: int
sku: str
stock: int
@attr.dataclass(slots=True)
class ActiveProducts:
total_data: int
shop: ActiveProductsShop
products: List[ActiveProduct]
@attr.dataclass(slots=True)
class ResponseActiveProducts(TokopediaResponse):
data: Optional[ActiveProducts] = None
|
nilq/baby-python
|
python
|
"""Tests for the models of the ``media_library`` app."""
from django.test import TestCase
from user_media.models import UserMediaImage
from user_media.tests.factories import UserMediaImageFactory
from . import factories
class MediaLibraryTestCase(TestCase):
"""Tests for the ``MediaLibrary`` model class."""
longMessage = True
def setUp(self):
self.library = factories.MediaLibraryFactory()
def test_instantiation(self):
self.assertTrue(self.library.pk)
def test_media_images(self):
image = factories.MediaItemFactory(
image=UserMediaImageFactory(),
library=self.library,
video=None,
)
factories.MediaItemFactory(library=self.library)
self.assertEqual(
self.library.media_images().count(), 1, msg=(
'``media_images`` should return only one item.'
)
)
self.assertEqual(
self.library.media_images()[0], image, msg=(
'``media_images`` should return the created image.'
)
)
def test_media_videos(self):
factories.MediaItemFactory(
image=UserMediaImageFactory(),
library=self.library,
video=None,
)
video = factories.MediaItemFactory(library=self.library)
self.assertEqual(
self.library.media_videos().count(), 1, msg=(
'``media_videos`` should return only one item.'
)
)
self.assertEqual(
self.library.media_videos()[0], video, msg=(
'``media_videos`` should return the created video.'
)
)
class MediaItemTestCase(TestCase):
"""Tests for the ``MediaItem`` model class."""
longMessage = True
def assertNotRaises(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as ex:
self.fail(msg=(
'"{0}" should not have raised an exception, but raised'
' "{1}"'.format(repr(func), str(ex))
))
def setUp(self):
self.library = factories.MediaLibraryFactory()
self.mediaitem = factories.MediaItemFactory(
showreel=self.library,
video='https://youtube.com/watch?v=123456',
)
self.umedia_image = UserMediaImageFactory()
self.mediaitemimage = factories.MediaItemFactory(
video=None, image=self.umedia_image,
)
def test_delete(self):
self.mediaitemimage.delete()
self.assertEqual(UserMediaImage.objects.count(), 0, msg=(
'The user media images should have been deleted as well.'
))
def test_instantiation(self):
self.assertTrue(self.mediaitem.pk)
def test_video_id(self):
self.assertEqual(self.mediaitem.video_id, '123456', msg=(
'The property should have returned the correct video id.'
))
def test_clean(self):
linklist = [
'http://www.youtube.com/watch?v=-JyZLS2IhkQ',
'https://www.youtube.com/watch?v=-JyZLS2IhkQ',
'http://www.youtube.de/watch?v=-JyZLS2IhkQ',
'https://youtube.com/watch?v=-JyZLS2IhkQ',
('https://www.youtube.com/watch?v=PguLNvCcOHQ'
'&list=RDPguLNvCcOHQ#t=0'),
'http://youtu.be/PguLNvCcOHQ?list=RDPguLNvCcOHQ ',
'http://vimeo.com/channels/staffpicks/110140870',
'http://vimeo.com/59777392',
'http://vimeo.com/video/59777392',
('http://vimeo.com/groups/thedirectorofphotography/'
'videos/110016243'),
]
for link in linklist:
self.mediaitem.video = link
self.assertNotRaises(self.mediaitem.clean)
|
nilq/baby-python
|
python
|
from slack import WebClient
class SlackApiWrapper(WebClient):
def __init__(self, api_token):
super().__init__(api_token)
def post_message(self, channel, message):
response = self.chat_postMessage(
channel=channel,
text=message)
assert response["ok"]
def post_attachment_message(self, channel, blocks, attachments):
response = self.api_call(
'chat.postMessage',
json=dict(
channel=channel,
blocks=blocks,
attachments=attachments
)
)
assert response["ok"]
def update_message(self, channel, ts, blocks, attachments):
response = self.api_call(
'chat.update',
json=dict(
channel=channel,
ts=ts,
blocks=blocks,
attachments=attachments
)
)
assert response["ok"]
|
nilq/baby-python
|
python
|
from sys import stdin, stdout
num_cases = int(stdin.readline())
stdin.readline()
for case in range(num_cases):
n = int(stdin.readline().strip()) # num_candidates
candidates = []
for i in range(n):
candidates.append(stdin.readline().strip())
votes = []
line = stdin.readline().strip()
while line != "":
votes.append(list(map(lambda x: int(x) - 1, line.split())))
line = stdin.readline().strip()
# This set keeps track of candidates that are eliminated in vote counting cycle
eliminated = set([])
v = len(votes)
pointers = [0] * v
is_decided = False
if case > 0:
stdout.write("\n")
while not is_decided:
# Re-count the votes
total_votes = [0]*n
for i in range(v):
# advance pointers[i] till it points to a candidate still in the race
p = pointers[i]
while votes[i][p] in eliminated:
p += 1
pointers[i] = p
# Pointers[i] point to a valid vote
total_votes[votes[i][p]] += 1
# Find max vote (no need to check if not eliminated)
max_vote = max(total_votes)
# Check if it is more than 50 percents
if max_vote*2 >= v :
is_decided = True
for iv in range(len(total_votes)):
if total_votes[iv] == max_vote:
stdout.write(candidates[iv] + "\n")
else:
min_vote = -1
for vi, vv in enumerate(total_votes):
if not vi in eliminated:
if min_vote == -1:
min_vote = vv
else:
min_vote = min(vv, min_vote)
# make sure min is not already eliminated
if min_vote == max_vote:
# No one left to eliminate
is_decided = True
for i, guy in enumerate(candidates):
if not i in eliminated:
stdout.write(guy + "\n")
else:
# find everyone with min_vote and eliminate them
for k in range(n):
if total_votes[k] == min_vote:
eliminated.add(k)
|
nilq/baby-python
|
python
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='syncbn_cpu',
ext_modules=[
CppExtension('syncbn_cpu', [
'operator.cpp',
'syncbn_cpu.cpp',
]),
],
cmdclass={
'build_ext': BuildExtension
})
|
nilq/baby-python
|
python
|
from typing import get_type_hints, TypeVar, Type
__all__ = ["Storage"]
T = TypeVar('T')
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __sub__(self, other):
if isinstance(other, str):
if other in self:
del self[other]
else:
for key in other:
self.__sub__(key)
return self
@staticmethod
def type_hints(cls: Type) -> 'Storage':
result = Storage()
for prop_name, prop_type in get_type_hints(cls).items():
if prop_name[0] != '_':
result[prop_name] = prop_type
else:
pub_name = prop_name[1:]
if isinstance(getattr(cls, pub_name, None), property):
result[pub_name] = prop_type
return result
@staticmethod
def of(obj) -> 'Storage':
result = Storage()
for name in get_type_hints(obj).keys():
if name[0] == '_':
name = name[1:]
if hasattr(obj, name):
result[name] = getattr(obj, name)
return result
def to(self, cls: Type[T]) -> T:
obj = cls()
for key, val in self.items():
setattr(obj, key, val)
return obj
|
nilq/baby-python
|
python
|
from libarduino import pinMode,digitalWrite,analogRead
import time
class Actuator():
def __init__(self, port):
self.port = port
pinMode(self.port, 'OUTPUT')
def activate(self):
digitalWrite(self.port, 1)
def deactivate(self):
digitalWrite(self.port, 0)
class Ranger():
def __init__(self, port, drink):
self.port = port
self.drink = drink
def read(self):
return analogRead(self.port)
class Mixer():
def __init__(self, motor, piston, rangers, valves, capacity=250, drinks=2, dist=128):
self.motor = motor
self.piston = piston
self.rangers = rangers
self.valves = valves
self.capacity = capacity
self.drinks = drinks
self.dist = dist
def mix_drink(self,recipe):
use = [] # Use these liquids.
for i in range(self.drinks):
if recipe[i] > 0:
use.append(i)
for i in use:
while self.rangers[i].read() > self.dist:
self.motor.activate()
time.sleep(0.1)
self.motor.deactivate()
start_time = time.time()
self.valves[i].activate()
const = 1 # Const is the relation between time and how much liquid which gets through the valves. TODO find proper const.
fill_time = recipe[i] * self.capacity * const
while (time.time() - start_time) < fill_time:
print 'Standing still'
self.valves[i].deactivate()
def serve(self,piston_time=7,ranger=0):
# Get to piston position
while self.rangers[ranger].read() > self.dist:
self.motor.activate()
time.sleep(0.1)
start_time = time.time()
self.piston.activate()
while (time.time() - start_time) < piston_time:
print 'Serving drink'
self.piston.deactivate()
|
nilq/baby-python
|
python
|
from django import forms
class CartAddForm(forms.Form):
quantity = forms.IntegerField(min_value=1, max_value=9)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a JSON file to a MATLAB .mat file.
Usage: json_to_mat.py foo.json
"""
import collections
import json
import os
import sys
import scipy.io
def _Sanitize(data):
"""Converts data to a format acceptable by scipy.io.savemat.
The scipy.io.savemat function cannot handle Booleans, NoneTypes, or
unicode strings.
Args:
data: Dictionary returned by json.load.
Returns:
Sanitized dictionary that is compatible with scipy.io.savemat.
"""
if isinstance(data, collections.OrderedDict):
return collections.OrderedDict([(str(k), _Sanitize(v))
for k, v in data.items()])
if isinstance(data, dict):
return {str(k): _Sanitize(v) for k, v in data.items()}
elif isinstance(data, list):
return [_Sanitize(x) for x in data]
elif data is None:
return []
elif isinstance(data, bool):
return 1 if data else 0
else:
return data
def _PrintUsage():
print
print 'Usage: json_to_mat.py foo.json'
print
def main(argv):
if len(argv) != 2:
print 'Error: Wrong number of arguments.'
_PrintUsage()
sys.exit(1)
if not os.path.isfile(argv[1]):
print 'Error: File does not exist.'
_PrintUsage()
sys.exit(1)
with open(argv[1], 'r') as f:
data = _Sanitize(json.load(f, object_pairs_hook=collections.OrderedDict))
filename, _ = os.path.splitext(argv[1])
scipy.io.savemat(filename + '.mat', data, long_field_names=True)
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
__all__ = ['LEA','ECB','CBC','CTR','CFB','OFB','CCM','GCM','CMAC']
from .LEA import LEA
from .ECB import ECB
from .CBC import CBC
from .CTR import CTR
from .CFB import CFB
from .OFB import OFB
from .CCM import CCM
from .GCM import GCM
from .CMAC import CMAC
from .CipherMode import CipherMode, ENCRYPT_MODE, DECRYPT_MODE
from .CipherMode import TagError
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer which represents aggregation function.
See class level comment.
This layer applies the provided model to the ragged input tensor and aggregates
the results.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
class Aggregation(keras.layers.Layer):
# pyformat: disable
"""Layer which represents an aggregation function.
Calls the model on each of the ragged dimensions and takes the mean.
Input shape:
A list or dictionary with num_input_dims Rank-2 ragged tensors with
shape: (batch_size, ?)
Output shape:
Rank-2 tensor with shape: (batch_size, 1)
Attributes:
- All `__init__ `arguments.
Example:
```python
model = tf.keras.Model(inputs=inputs, outputs=outputs)
layer = tfl.layers.Aggregation(model)
```
"""
# pyformat: enable
def __init__(self, model, **kwargs):
"""initializes an instance of `Aggregation`.
Args:
model: A tf.keras.Model instance.
**kwargs: Other args passed to `tf.keras.layers.Layer` initializer.
Raises:
ValueError: if model is not at `tf.keras.Model` instance.
"""
if not isinstance(model, tf.keras.Model):
raise ValueError('Model must be a tf.keras.Model instance.')
super(Aggregation, self).__init__(**kwargs)
# This flag enables inputs to be Ragged Tensors
self._supports_ragged_inputs = True
self.model = model
def call(self, x):
"""Standard Keras call() method."""
return tf.reduce_mean(tf.ragged.map_flat_values(self.model, x), axis=1)
def get_config(self):
"""Standard Keras get_config() method."""
config = super(Aggregation, self).get_config().copy()
config.update({'model': tf.keras.utils.serialize_keras_object(self.model)})
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = tf.keras.utils.deserialize_keras_object(
config.pop('model'), custom_objects=custom_objects)
return cls(model, **config)
|
nilq/baby-python
|
python
|
#This file contains a common EKF tracking code for both elevator and rover
#It checks variable from file config.npy to figure out its own type
import time
from datetime import datetime
import subprocess
import numpy as np
from numpy import linalg
from numpy.linalg import inv
import math
import cmath
import linalgfunc
import pdb
import os
import serial
import sys, glob
import random
import Adafruit_BBIO.GPIO as GPIO
import pickle
#Libraries made for convenience
from analog import Analog
from motion_tracking_socket3D import MotionTrackingSocket3D
from led import LED
from trigger_socket import TriggerSocket
from motor_system import MotorSystem
import my_functions as mf
def initialize():
global num_iteration
num_iteration = 200
global A
A = np.identity(3)
global I
I = np.identity(3)
global B
B = np.matrix([[0,0],[1,0],[0,1]])
global Q
Q = np.matrix([[0.00001,0,0],[0,0.0005,0],[0,0,0.0005]])
global Q_scaling
Q_scaling = 1000000
global R
R = 1
global P_f
P_f = np.matrix([[0.100,0,0],[0,0.50,0],[0,0,0.50]])
global P
P = P_f
global scan_parameters_all
scan_parameters_all = np.zeros((num_iteration,6))
global x_hatf_all
x_hatf_all = np.zeros((num_iteration,3))
global x_hat_all
x_hat_all = np.zeros((num_iteration,3))
global x_I_hat_all
x_hat_all = np.zeros((num_iteration,3))
global y_hat_all
y_hat_all = np.zeros(num_iteration)
global y_all
y_all = np.zeros(num_iteration)
global eigP_all
eigP_all = np.zeros(num_iteration)
global Pf_all
Pf_all = np.zeros((num_iteration,3,3))
global P_all
P_all = np.zeros((num_iteration,3,3))
global C_all
C_all = np.zeros((num_iteration,3))
global K_all
K_all = np.zeros((num_iteration,3))
global u_all
u_all = np.zeros((num_iteration,3))
global motor_commands_all
motor_commands_all = np.zeros((num_iteration,2))
global x_ground_truth_all
x_ground_truth_all = np.zeros((num_iteration,6))
global time_all
time_all = np.zeros(num_iteration)
def setup():
global receiver
receiver = Analog()
global Gimbal
Gimbal = MotorSystem()
Gimbal.TakeGroundPosition()
global motion_socket
motion_socket = MotionTrackingSocket3D()
global MyRobotName
MyRobotName = mf.read_file("my_type.txt").split()[0]
global scan_alternation_flag
global c
if MyRobotName == 'Rover':
initial_pitch = 7
initial_yaw = 7
scan_alternation_flag = 1
c = 15
from underlying_robot import Robot
global myBot
myBot = Robot(motion_socket,MyRobotName,3,0.6)
elif MyRobotName == 'Elevator':
initial_pitch = 6
initial_yaw = -8
scan_alternation_flag = 0
c = 15
MyRobotName2 = mf.read_file("my_name.txt").split()[0]
local_config_file_name = MyRobotName2 + '_config.txt'
s = mf.read_file(local_config_file_name)
local_config = s.split(' ')
global bias_angle
bias_angle = float(local_config[8])
global receiver_sum_angle
global base_sum_angle
receiver_sum_angle = initial_pitch
base_sum_angle = initial_yaw
global communication_flag
communication_flag = int(mf.read_file("communication_flag.txt"))
if communication_flag == 0:
global txLED
txLED = LED()
txLED.on()
else:
from receiver_handle import ReceiverHandle
global RxRoutine
RxRoutine = ReceiverHandle(scan[1])
global TxRoutine
TxRoutine = TransmissionHandle()
yaw1 = Gimbal.get_yaw()
x = motion_socket.x
if bias_angle == 180:
yaw2 = x[0]%360-180
else:
yaw2 = x[0]
#pdb.set_trace()
if abs(yaw1-yaw2)>1.0:
motion_socket.stop()
Gimbal.Deactivate()
txLED.off()
pdb.set_trace()
raise Exception("Sorry, the robot is not aligned, please correct the orientation: ",yaw2)
Gimbal.WriteAbsoluteAngles([initial_yaw,initial_pitch])
x = motion_socket.x
pitch = Gimbal.get_pitch()
yaw = Gimbal.get_yaw()
print('Reached absolute yaw at ',yaw,' degrees, and absolute pitch at ',pitch,' degrees')
if bias_angle == 180:
yaw = x[0]%360-180
else:
yaw = x[0]
print('From Motion Tracking System yaw = ',yaw,' and pitch = ',x[1])
def trigger_setup():
current_time = time.time()
print("Current time: %f" %(current_time))
global my_trigger
my_trigger = TriggerSocket()
print("Waiting for the starting trigger on ", MyRobotName)
global t_START
t_START, duty, tIdle= my_trigger.waitForTrigger()
mf.wait_till(t_START+3)
global toc
toc = time.time()
print("Process triggered at time ",datetime.fromtimestamp(toc).strftime('%Y %m %d_%I:%M:%S.%f %p'), ' on ', MyRobotName)
if MyRobotName == 'Rover':
myBot.duty = duty
myBot.idle_time = tIdle
myBot.motion_state = True
def closing_setup():
Gimbal.Deactivate()
file_name = MyRobotName + '_3D_EKF_data'
txt_file_name = file_name + '_recent_files_name.txt'
zip_name = file_name + datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.npz')
received_data_pkl_file_name = file_name + '_received_data' + datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.pkl')
iteration_num_pkl_file_name = file_name + '_iteration_nums'+ datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.pkl')
file2write = open(txt_file_name,'w')
file2write.write(zip_name + ' ')
if communication_flag == 0:
txLED.off()
else:
RxRoutine.stop()
TxRoutine.deactivate_transmission()
file2write.write(received_data_pkl_file_name + ' ')
file2write.write(iteration_num_pkl_file_name)
iteration_nums = RxRoutine.iteration_nums
received_data = RxRoutine.received_data
#np.save('recent_file_name.npy',common_file_name)
f = open(iteration_num_pkl_file_name,"wb")
pickle.dump(iteration_nums,f)
f.close()
f = open(received_data_pkl_file_name,"wb")
pickle.dump(received_data,f)
f.close()
file2write.close()
np.savez(zip_name, scan_parameters_all=scan_parameters_all, \
x_hatf_all=x_hatf_all, x_hat_all=x_hat, Pf_all=Pf_all,\
C_all=C_all, y_hat_all=y_hat_all,\
y_all=y_all, P_all=P_all, K_all=K_all, timer=timer,interval = interval,\
u_all=u_all, scan_psi_all=scan_psi,scan_theta_all=scan_theta, \
motor_commands_all=motor_commands_all, x_ground_truth_all=x_ground_truth_all,theta_all = theta)
message = MyRobotName+" is Done!"
my_trigger.sendFinisherFlag(message.encode())
my_trigger.Deactivate()
if MyRobotName == 'Rover':
myBot.takeGroundPosition()
motion_socket.stop()
initialize()
setup()
x_ground_truth_all[0] = motion_socket.x
#Variables Initialization
diff_sum = 0
x_hat = np.zeros((num_iteration,3))
comm_array = np.zeros(7)
x_hat[0,:] = [0.5,0,0]
x_hat_k_f = [0.5,0,0]
x_I_hat = np.zeros((num_iteration,3))
x_I_hat[0,:] = x_hat[0,:]
x_hatf_all[0,:] = x_hat[0,:]
x_I_hat_k = x_hat_k_f
x_hat_k_p = x_hat_k_f
y_hat = 0
K = np.identity(3)
C = np.identity(3)
y = 0
u2 = 0
u3 = 0
u = [0,u2,u3]
psi = np.zeros(num_iteration+1)
timer = np.zeros(num_iteration+1)
theta = np.zeros(num_iteration+1)
scan_psi = np.zeros(num_iteration+1)
scan_theta = np.zeros(num_iteration+1)
difference = np.zeros(num_iteration+1)
angle_bias = np.zeros(num_iteration+1)
difference[0] = 0.5
theta[0] = Gimbal.get_pitch()
scan_theta[0] = theta[0]
# ReceiverStepper.rotateMotor(-theta[0])
# receiver_sum_angle = receiver_sum_angle -theta[0]
interval = np.zeros(num_iteration)
disturbance = 1 #degree/second
T = 0.8
T_factor = 2 #assuming 2.5 seconds for the full circle
t_Iter = 0.5 #assigned time for 1 step
switch = 0
#scanning terms
phi = 120
scan_radius = 4
radius = 4
bias = angle_bias[0]
k_factor = 360/phi
scan_counter = (360/phi)*scan_alternation_flag-1
pause_flag = 0
active_phase = 0
alpha_bias = 0
beta_bias = 0
Motor_command_receiver = 0
Motor_command_base = 0
termination_flag =1
is_moving = 0
if(is_moving == 0):
min_radius = 2
else:
min_radius = 4
max_radius = 6
Vmax = 0.0
trigger_setup()
x_ground_truth_all[0] = motion_socket.x
set_time = t_START + t_Iter +3
tdiff_min = 1000
for i in range(1,num_iteration):
#print 'i= %d' %(i)
#u = [0,0,0]
Gimbal.ApplyMotorCommandsSync([Motor_command_base, Motor_command_receiver])
y = receiver.getIntensity()
theta[i] = Gimbal.get_pitch()
if y>Vmax:
Vmax = y
x_hat_k_f = x_hat[i-1,:] + [0,u2,u3]
y_hat,C = mf.get_output_and_jacobian(alpha_bias,beta_bias,x_hat_k_f,c)
#pdb.set_trace()
if(active_phase == 1 and termination_flag == 1):
P_f = A*P*A + Q_scaling*Q
#Filtering
K = P_f*np.transpose(C)*linalg.inv(C*P_f*np.transpose(C) + R)
x_hat_k_p = np.array(np.mat(x_hat_k_f).T+K*(y-y_hat)).T[0] #0 is added to make it a one dimensional array rather a 2D array
if x_hat_k_p[0] < 0:
x_hat_k_p[0] = 0
x_I_hat_k = x_I_hat[i-1,:] + x_hat_k_p*interval[i-1]
P = (np.identity(3) - K*C)*P_f
difference[i] = abs((y-y_hat)/y)
min_ind = max(i-2,0)
diff_sum = sum(difference[min_ind:i+1])/3
if(diff_sum < 0.5):
G = 0.98*pause_flag
Gi = 0.2*pause_flag
else:
G = 0
Gi = 0
u2 = -G*x_hat_k_p[1] - Gi*x_I_hat_k[1]
u3 = -G*x_hat_k_p[2] - Gi*x_I_hat_k[2]
else:
P_f_partial = A[0,0]*P[0,0]*A[0,0] + Q_scaling*Q[0,0]
P_f[0,0] = P_f_partial
K = P_f_partial*(C[0,0])/(C[0,0]*P_f_partial*C[0,0] + R)
x_hat_k_p[0] = x_hat_k_f[0]+K*(y-y_hat)
x_I_hat_k = [0,0,0]
x_I_hat_k[0] = x_I_hat[i-1,0] + x_hat_k_p[0]*interval[i-1]
P[0,0] = (1 - K*C[0,0])*P_f_partial
u2 = 0
u3 = 0
u = [0,u2,u3]
#print 'normal_u2 %f, normal_u3 %f' %(normal_u2, normal_u3)
P_all[i,:,:] = P
x_hatf_all[i,:] = x_hat_k_f
scan_parameters_all[i,:] = [beta_bias,alpha_bias, scan_counter, active_phase, pause_flag, scan_radius]
C_all[i,:] = C
Pf_all[i,:,:] = P_f
y_all[i] = y
y_hat_all[i] = y_hat
K_all[i,:] = np.transpose(K)
x_I_hat[i,:] = x_I_hat_k
x_hat[i,:] = x_hat_k_p
u_all[i,:] = u
motor_commands_all[i] = [Motor_command_base,Motor_command_receiver]
toc = time.time()
timer[i] = toc-t_START
interval[i] = timer[i] - timer[i-1]
if(i>0):
T = sum(interval[1:i+1])/i
comm_array[0] = i
comm_array[1] = timer[i]
comm_array[2] = x_hat[i,0]
comm_array[3] = x_hat[i,1]
comm_array[4] = x_hat[i,2]
comm_array[5] = y
comm_array[6] = y_hat
#np.save(npy_name,comm_array)
#sftp.put(npy_name,remote_path + npy_name)
previous_alpha_bias = scan_radius*mf.sind(bias)
previous_beta_bias = scan_radius*mf.cosd(bias)
P_angles = P[1:3,1:3]
V = np.linalg.eig(P_angles)[0] #Eigen vectors
eigP_all[i] = max(V) #Max eigen vector
scan_counter = scan_counter%(2*k_factor) + 1
if(scan_counter == 1):
pause_flag = 1
if(y < 0.5*Vmax):
termination_flag = 1
if(scan_counter == k_factor+1):
pause_flag = 0
if(scan_counter == 2*k_factor):
active_phase = 1
if(scan_counter == k_factor+1):
active_phase = 0
if(i>20): #After this it becomes adaptive
min_ind = int(max(i-k_factor,0))
e = sum(eigP_all[min_ind:i])/k_factor
#radius = (min(20,max(min_radius, math.floor((e)/200)))+radius)/2
radius = min(max_radius,max(min_radius, math.floor((e)/6000)))
if((radius == 0) and (y > 7*Vmax)):
print("Reached terminal condition!!!")
termination_flag = 0 + is_moving #It will only be zero when is moving is false
scan_radius = pause_flag*radius*termination_flag
#Computing scanning parameters for the next iteration
angle_bias[i+1] = (scan_counter-1)*phi
bias = angle_bias[i+1]
alpha_bias = scan_radius*mf.sind(bias)
beta_bias = scan_radius*mf.cosd(bias)
motor_commands =mf.generate_motor_commands_old(theta[i], previous_alpha_bias,previous_beta_bias, u, alpha_bias, beta_bias)
Motor_command_base = motor_commands[0,0]
Motor_command_receiver = motor_commands[0,1]
base_sum_angle = base_sum_angle + Motor_command_base
receiver_sum_angle = receiver_sum_angle + Motor_command_receiver
#theta[i+1] = receiver_sum_angle
time_all[i] = set_time-t_START
tDiff= mf.wait_till(set_time)
if tDiff<tdiff_min:
tdiff_min = tDiff
#print "Iteration: %d, Scan_radius: %d, Angle %d" %(i,scan_radius,bias)
x_ground_truth_all[i] = motion_socket.x
set_time = set_time + t_Iter
# sys.stdout.write("Iteration: %d / %d \r" % (i,num_iteration) )
# #sys.stdout.write("Measurements: %f / %f \r" % (y,Vmax) )
# sys.stdout.flush()
print("Iteration: %d / %d \r" % (i,num_iteration) )
if bias_angle == 180:
yaw = x_ground_truth_all[i,0]%360-180
else:
yaw = x_ground_truth_all[i,0]
print('From Motion Tracking System yaw = ',yaw,' and pitch = ',x_ground_truth_all[i,1], ' tDiff ',tDiff)
print('Minimum wait was: ',tdiff_min)
closing_setup()
print('Done!')
|
nilq/baby-python
|
python
|
from Jumpscale import j
class Nodes:
def __init__(self, session, url):
self._session = session
self._base_url = url
j.data.schema.add_from_path(
"/sandbox/code/github/threefoldtech/jumpscaleX_threebot/ThreeBotPackages/tfgrid/directory/models"
)
self._model = j.data.schema.get_from_url("tfgrid.directory.node.2")
def list(self, farm_id=None, country=None, city=None, cru=None, sru=None, mru=None, hru=None, proofs=False):
query = {}
if proofs:
query["proofs"] = "true"
args = {
"farm": farm_id,
"city": city,
"cru": cru,
"sru": sru,
"mru": mru,
"hru": hru,
}
for k, v in args.items():
if v is not None:
query[k] = v
resp = self._session.get(self._base_url + "/nodes", params=query)
nodes = []
for node_data in resp.json():
node = self._model.new(datadict=node_data)
nodes.append(node)
return nodes
def get(self, node_id, proofs=False):
params = {}
if proofs:
params["proofs"] = "true"
resp = self._session.get(self._base_url + f"/nodes/{node_id}", params=params)
return self._model.new(datadict=resp.json())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
For grabbing ggv arguments for checking
"""
import numpy as np
from env.numerics.npy.prism import Prism, Box
import argparse
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--save", action="store_true", default=False )
parser.add_argument("--test", action="store_true", default=False )
parser.add_argument("--torch", action="store_true", default=False )
parser.add_argument("--tag", default="" )
parser.add_argument("--testconfig", default="" )
parser.add_argument("--torchconfig", default="" )
parser.add_argument("--animtimemax", default=100 )
args = parser.parse_args()
return args
kv_ = lambda s:map(lambda _:_.split("="),s.split("_"))
class Torch(object):
def __init__(self, config):
self.config = kv_(config)
self.source = None
self.target = None
for k,v in self.config:
if k == "source":
self.source = np.fromstring(v, sep=",")
elif k == "target":
self.target = np.fromstring(v, sep=",")
else:
pass
pass
pass
self.direction = self.target - self.source
def __repr__(self):
return "\n".join([
"source %25s " % self.source,
"target %25s " % self.target,
"direction %25s " % self.direction
])
def __str__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
class Test(object):
def __init__(self, config):
self.config = kv_(config)
shapes = []
boundaries = []
parameters = []
for k,v in self.config:
if k == "shape":
shapes.append(v)
elif k == "boundary":
boundaries.append(v)
elif k == "parameters":
parameters.append(v)
else:
pass
assert len(shapes) == len(boundaries) == len(parameters)
self.shapes = []
for i in range(len(shapes)):
shape = None
if shapes[i] == "box":
shape = Box(parameters[i], boundaries[i])
elif shapes[i] == "prism":
shape = Prism(parameters[i], boundaries[i])
else:
assert 0
pass
self.shapes.append(shape)
def __str__(self):
return "\n".join(map(str, self.shapes))
def __repr__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
if __name__ == '__main__':
#print "\n".join(sys.argv)
args = parse_args()
torch = Torch(args.torchconfig)
test = Test(args.testconfig)
sh = test.shapes[-1]
print "torch:\n", torch
print repr(torch)
print "test:\n", test
print "sh:\n", sh
|
nilq/baby-python
|
python
|
class Frame:
def __init__(self, size):
self.type = None # whether this is an audio/video frame
self.data = None # this is the raw frame data
self.codec = None # codec
self.time_stamp = 0 # TS of the frame
self.size = size
class AudioFrame(Frame):
def __init__(self):
return super().__init__()
class VideoFrame(Frame):
def __init__(self, size, reader):
super().__init__(size)
self.data = reader.readn(size)
|
nilq/baby-python
|
python
|
import traceback
import math
import numpy as np
import pandas as pd
from .CostModule import CostModule
class CollectionCost(CostModule):
"""
Assumptions:
1. System contains central inverters of 1 MW rating each. The inverter being
considered is a containerized solution which includes a co-located LV/MV
transformer.
2. PV array is rectangular in design, with an aspect ratio of 1.5:1::L:W
3. Trench for buried cables from each string inverter runs along the perimeter
of the system, and up till the combiner box placed at one of the 4 corners of the
array.
Shown below is a crude visualization of solar farm floor-plan considered in
SolarBOSSE. As mentioned above, the aspect ratio of this solar farm is assumed
to be 1.5:1::L:W. This is a simple, over-generalization of-course, given that it
is the 1st version of SolarBOSSE (v.1.0.0). This model is being designed in such
a way that any future interest to allow the user design project layout will be
possible.
Key:
||| - 3 phase HV power cables (gen-tie)
|| - main project road; assumed to have 20+ ton bearing capacity. Also contains
trench along both sides of the road for output circuit cables (DC), as well
as MV power cables from each inverter station going all the way to the
substation.
=== - horizontal road running across the width of project land. Assumed to be of
lower quality than the main project road, and not meant to support cranes.
Smaller maintenance vehicles (like Ford F-150 permissible).
[gen-tie to utility substation/point of interconnection]
|||
|||
|||
|||
________ |||
_____________|inverter|__|||____
| ||-------| |
| || |substation|
| || | |
| || |__________|
| || |
| || |
| ||________ |
| ||inverter| |
|============||==================|
| || |
| || |
| || |
| || |
| || |
| || |
| ||________ |
| ||inverter| |
|============||==================|
| || |
| || |
| || |
| || |
| || |
| ||________ |
| ||inverter| |
|============||==================|
| || |
| || |
| || |
| || |
| || |
|____________||__________________|
Module to calculate:
1. Wiring requirements of system. This includes:
a. Source circuit cabling (from string to combiner box located at end of each
row). The combiner box capacity (number of strings per box) is a user input.
b. Output circuit; from each combiner box to that string's inverter station.
c. Power cable home run; from inverter/transformer station (where it is
transformed to MV) to the plant's substation which is located at the long end
of the plant.
"""
def __init__(self, input_dict, output_dict, project_name):
super(CollectionCost, self).__init__(input_dict, output_dict, project_name)
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
self.m2_per_acre = 4046.86
self.inch_to_m = 0.0254
self.m_to_lf = 3.28084
self._km_to_LF = 3.28084 * 1000
# Max allowable voltage drop (VD%) in circuits
self.allowable_vd_percent = 3 / 100
# Specific resistivity of copper between 25 and 50 deg C:
self.Cu_specific_resistivity = 11
def land_dimensions(self):
"""
Given user defined project area, and assumed aspect ratio of 1.5:1, calculate
solar farm's length and width (in m)
"""
land_area_acres = self.input_dict['site_prep_area_acres']
land_area_m2 = land_area_acres * self.m2_per_acre
# Determine width & length of project land respectively:
land_width_m = (land_area_m2 / 1.5) ** 0.5
self.output_dict['land_width_m'] = land_width_m
land_length_m = 1.5 * land_width_m
return land_length_m, land_width_m
def get_quadrant_dimensions(self):
"""
1 inverter for every 1 MW_DC worth of panels. Super imposing the project layout
on a cartesian plane, the main project road (along the long edge of the land)
is at x = 0. And the souther most part of the project land is at y = 0. The
area covering each unit MW_DC worth of land will be referred to as a quadrant.
y
|
|
(-x) ------|----- x
|
|
(-y)
"""
# Get length and width of each quadrant:
land_area_acres = self.input_dict['site_prep_area_acres_mw_dc']
land_area_per_inverter_acres = land_area_acres * \
(self.input_dict['inverter_rating_kW'] / 1000)
land_area_m2 = land_area_per_inverter_acres * self.m2_per_acre
# Determine width & length of project land respectively:
land_width_m = self.output_dict['land_width_m']
subarray_width_m = land_width_m / 2
self.output_dict['subarray_width_m'] = subarray_width_m
land_length_m = land_area_m2 / land_width_m
return land_length_m, land_width_m
def inverter_list(self):
"""
Return a tuple of inverters in the project
"""
# Get number of inverters in the project
# dividing by 150 because that's the upper limit on the size of 1 region.
# Where 1 region is the max size of PV array that the collection module
# runs for. If the project size is greater than size of region,
# SolarBOSSE runs the collection cost module
# (floor(project_size / region) + 1) times.
if self.input_dict['system_size_MW_DC'] > 150:
number_of_inverters = 150
else:
number_of_inverters = self.input_dict['system_size_MW_DC']
inverter_list = [n for n in range(round(number_of_inverters))]
self.output_dict['inverter_list'] = inverter_list
return inverter_list
def number_panels_along_x(self):
"""
Assuming portrait orientation of modules, with 2 modules stacked end-to-end.
"""
subarray_width_m = self.output_dict['subarray_width_m']
# Adding 1 inch for mid clamp:
panel_width_m = self.input_dict['module_width_m'] + self.inch_to_m
number_panels_along_x = math.floor(subarray_width_m / panel_width_m)
return number_panels_along_x
def number_rows_per_subquadrant(self):
"""
2 sub-quadrants per quadrant; one sub-quadrant on either side of the main
project road. 2 sub arrays per quadrant; accordingly, 1 sub-array per
sub-quadrant. And each sub-quadrant is rated for half of quadrant's DC
rating.
"""
module_rating_W = self.input_dict['module_rating_W']
# multiplied by 2 since 2 modules end-to-end in portrait orientation
single_row_rating_W = 2 * self.number_panels_along_x() * module_rating_W
# Since each quadrant is sized according to inverter rating (DC)
inverter_rating_W = self.input_dict['inverter_rating_kW'] * 1000 * \
self.input_dict['dc_ac_ratio']
num_rows_sub_quadrant = math.floor((inverter_rating_W / 2) / single_row_rating_W)
return num_rows_sub_quadrant
def number_modules_per_string(self):
"""
Calculate number of modules per string based on module V_oc and inverter max
MPPT DC voltage
"""
number_modules_per_string = math.floor(self.input_dict['inverter_max_mppt_V_DC'] /
self.input_dict['module_V_oc'])
# string open circuit voltage (used later in VD% calculations):
self.output_dict['string_V_oc'] = number_modules_per_string * \
self.input_dict['module_V_oc']
return number_modules_per_string
def num_strings_per_row(self):
"""
Combined number of strings from both sub rows
"""
number_panels_along_x = self.number_panels_along_x()
# Multiplying by 2 since there are 2 sub rows per row
num_strings_per_row = 2 * math.floor(number_panels_along_x /
self.number_modules_per_string())
return num_strings_per_row
def distance_to_combiner_box(self, number_of_strings):
"""
Cumulative distance to combiner box at end of each row for all strings in a
row. Note that this is only the cumulative length of source circuits for 1 of
the 2 sub rows in a row. Remember that each row has 2 panels in portrait
orientation stacked end-to-end. Multiply result obtained form this method by
2 to get total cumulative length of source circuit wire for entire row.
"""
distance_to_combiner_box = 0 # initialize
number_modules_per_string = self.number_modules_per_string()
# Get module length (plus 1" width of mid clamp):
module_width_m = self.input_dict['module_width_m'] + self.inch_to_m
number_of_strings_per_sub_row = int(number_of_strings / 2)
for i in range(number_of_strings_per_sub_row):
if 0 == i:
# Distance of terminal module in 1st string from combiner box:
distance_to_combiner_box = (i + 1) * module_width_m * \
number_modules_per_string
adder = distance_to_combiner_box + module_width_m
else:
# Where adder is the first module in subsequent strings
distance_to_combiner_box += adder + ((i + 1) * module_width_m *
number_modules_per_string)
adder = ((i + 1) * module_width_m * number_modules_per_string) + \
module_width_m
return distance_to_combiner_box
def source_circuit_wire_length_lf(self,
num_strings_per_row,
number_rows_per_subquadrant):
"""
Determine total source circuit wire length for each quadrant
"""
distance_to_combiner_box_per_row = \
self.distance_to_combiner_box(num_strings_per_row)
# Multiply by 2 since there are 2 sets of rows in a quadrant:
source_circuit_wire_length_m = distance_to_combiner_box_per_row * \
number_rows_per_subquadrant * 2
source_circuit_wire_length_lf = source_circuit_wire_length_m * self.m_to_lf
return source_circuit_wire_length_lf
def source_circuit_wire_length_total_lf(self, source_circuit_wire_length_lf,
num_quadrants):
"""
Returns combined source circuit wire length for all quadrants combined. This
includes length of wire in each sub row of each sub quadrant.
Accordingly, length of wire for both sub rows of every row, and both sub
quadrants of a quadrant has been accounted for up till this point.
"""
source_circuit_wire_length_total_lf = \
source_circuit_wire_length_lf * num_quadrants
self.output_dict['source_circuit_wire_length_total_lf'] = \
source_circuit_wire_length_total_lf
return source_circuit_wire_length_total_lf
def pv_wire_cost(self, system_size_MW_DC, circuit_type, circuit_amps):
"""
Empirical curve fit of pv wire cost ($/LF) for AWG #10 wire or smaller.
"""
if system_size_MW_DC > 500:
volume_order_discount_multiplier = 0.50 # 25 % discount (volume pricing)
elif system_size_MW_DC > 300:
volume_order_discount_multiplier = 0.70 # 25 % discount (volume pricing)
elif system_size_MW_DC > 150:
volume_order_discount_multiplier = 0.75 # 25 % discount (volume pricing)
elif system_size_MW_DC > 50:
volume_order_discount_multiplier = 0.80 # 20 % discount (volume pricing)
elif system_size_MW_DC > 20:
volume_order_discount_multiplier = 0.90
else:
volume_order_discount_multiplier = 1
pv_wire_DC_specs = self.input_dict['pv_wire_DC_specs']
if circuit_type is 'source_circuit':
cost_usd_lf = pv_wire_DC_specs.loc[
pv_wire_DC_specs['Size (AWG or kcmil)'] == 10, 'Cost (USD/LF)']
cost_usd_lf = cost_usd_lf.iloc[0]
elif circuit_type is 'output_circuit':
if circuit_amps >= 175:
cost_usd_lf = \
pv_wire_DC_specs.loc[
pv_wire_DC_specs['Temperature Rating of Conductor at 75°C ' \
'(167°F) in Amps'] == 175, 'Cost (USD/LF)']
else:
cost_usd_lf = \
pv_wire_DC_specs.loc[
pv_wire_DC_specs['Temperature Rating of Conductor at 75°C ' \
'(167°F) in Amps'] == 150, 'Cost (USD/LF)']
cost_usd_lf = cost_usd_lf.iloc[0]
pv_wire_cost = cost_usd_lf * volume_order_discount_multiplier # $/LF
return pv_wire_cost
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><
# Output circuit calculations:
def number_strings_quadrant(self, num_strings_per_row, num_rows_per_subquadrant):
"""
Get number of strings in each quadrant
"""
number_strings_quadrant = num_strings_per_row * num_rows_per_subquadrant * 2
return number_strings_quadrant
def num_strings_parallel(self, num_strings_per_row):
"""
Starting with the highest allowable number of strings in parallel as possible.
This is to ensure highest possible output circuit ampacity, which would lead
to lowest possible max allowable circuit resistance.
"""
if num_strings_per_row > 24:
num_strings_parallel = 24
else:
num_strings_parallel = num_strings_per_row
return int(num_strings_parallel)
def output_circuit_ampacity(self, num_strings_in_parallel):
"""
"""
string_short_circuit_current = self.input_dict['module_I_SC_DC']
# Consider 25% safety factor for over irradiance / over-current scenarios
over_current_factor = 1.25
output_circuit_ampacity = over_current_factor * \
string_short_circuit_current * \
num_strings_in_parallel
return output_circuit_ampacity
def row_spacing_m(self, quadrant_length_m, number_rows_per_subquadrant):
"""
"""
row_spacing_m = quadrant_length_m / number_rows_per_subquadrant
return row_spacing_m
def voltage_drop_V(self):
"""
Returns maximum allowable Voltage drop (in V) in an output circuit based on
NEC guidelines.
"""
voltage_drop_V = self.allowable_vd_percent * self.output_dict['string_V_oc']
return voltage_drop_V
def VD_passes(self,
circuit_length_m,
wire_R_per_kft,
max_VD,
output_circuit_ampacity):
"""
Once the wire has been picked based on its ampacity, call this method to
check whether the VD from using this wire exceeds 3%
"""
R = wire_R_per_kft * (1 / 1000) * (circuit_length_m * self.m_to_lf)
VD = R * output_circuit_ampacity
if VD > max_VD:
return False
else:
return True
def circular_mils_area(self, circuit_length, current, VD):
"""
Calculates the wire's circ mils area. This will help in selecting wire
appropriate for wiring (based on its ampacity and ohms/kFT)
"""
circular_mills_area = (circuit_length * self.Cu_specific_resistivity *
current) / VD
return circular_mills_area
def estimate_construction_time(self):
"""
Function to estimate construction time on per turbine basis.
Parameters
-------
duration_construction
pd.DataFrame
construction_estimator
pd.DataFrame
trench_length_km
Returns
-------
(pd.DataFrame) operation_data
"""
# assumes collection construction occurs for 45 % of project duration
collection_construction_time = self.input_dict[
'construction_time_months'] * 0.45
throughput_operations = self.input_dict['construction_estimator']
trench_length_km = self.output_dict['trench_length_km']
operation_data = throughput_operations.where(
throughput_operations['Module'] == 'Collection').dropna(thresh=4)
source_wiring_operations = throughput_operations.where(
throughput_operations['Module'] == 'Source circuit wiring').dropna(thresh=4)
output_wiring_operations = throughput_operations.where(
throughput_operations['Module'] == 'Output circuit wiring').dropna(thresh=4)
# from construction_estimator data, only read in Collection related data and
# filter out the rest:
cable_trenching = throughput_operations[throughput_operations.Module == 'Collection']
source_wiring = throughput_operations[throughput_operations.Module == 'Source circuit wiring']
output_wiring = throughput_operations[throughput_operations.Module == 'Output circuit wiring']
# Storing data with labor related inputs:
trenching_labor = cable_trenching[cable_trenching.values == 'Labor']
trenching_labor_usd_per_hr = trenching_labor['Rate USD per unit'].sum()
self.output_dict['trenching_labor_usd_per_hr'] = trenching_labor_usd_per_hr
# Units: LF/day -> where LF = Linear Foot
trenching_labor_daily_output = trenching_labor['Daily output'].values[0]
trenching_labor_num_workers = trenching_labor['Number of workers'].sum()
# Get labor daily output for source circuit wiring:
source_wiring_labor = source_wiring[source_wiring.Module == 'Source circuit wiring']
source_circuit_daily_output = source_wiring_labor.loc[
source_wiring_labor['Operation ID'] == 'Source circuit wiring', 'Daily output']
source_circuit_daily_output = source_circuit_daily_output.iloc[0]
self.output_dict['source_circuit_daily_output'] = source_circuit_daily_output
# Get labor daily output for output circuit wiring:
output_wiring_labor = output_wiring[output_wiring.Module == 'Output circuit wiring']
output_circuit_daily_output = output_wiring_labor.loc[
output_wiring_labor['Operation ID'] == 'Output circuit wiring', 'Daily output']
output_circuit_daily_output = output_circuit_daily_output.iloc[0]
self.output_dict['output_circuit_daily_output'] = output_circuit_daily_output
# Storing data with equipment related inputs:
trenching_equipment = cable_trenching[cable_trenching.values == 'Equipment']
trenching_cable_equipment_usd_per_hr = trenching_equipment['Rate USD per unit'].sum()
self.output_dict['trenching_cable_equipment_usd_per_hr'] = \
trenching_cable_equipment_usd_per_hr
# Units: LF/day -> where LF = Linear Foot
trenching_equipment_daily_output = trenching_equipment['Daily output'].values[0]
self.output_dict['trenching_labor_daily_output'] = trenching_labor_daily_output
self.output_dict['trenching_equipment_daily_output'] = trenching_equipment_daily_output
operation_data['Number of days taken by single crew'] = \
((trench_length_km * self._km_to_LF) / trenching_labor_daily_output)
operation_data['Number of crews'] = \
np.ceil((operation_data['Number of days taken by single crew'] / 30) /
collection_construction_time)
operation_data['Cost USD without weather delays'] = \
((trench_length_km * self._km_to_LF) / trenching_labor_daily_output) * \
(operation_data['Rate USD per unit'] * self.input_dict['hour_day'])
# Repeat above steps, for cost of source circuit wiring
source_wiring_operations['Number of days taken by single crew'] = \
self.output_dict['source_circuit_wire_length_total_lf'] / source_circuit_daily_output
source_wiring_operations['Number of crews'] = \
np.ceil((source_wiring_operations['Number of days taken by single crew'] / 30) /
collection_construction_time)
source_wiring_operations['Cost USD without weather delays'] = \
self.output_dict['source_circuit_wire_length_total_lf'] * \
source_wiring_operations['Rate USD per unit']
self.output_dict['source_wiring_USD_lf'] = \
source_wiring_operations['Rate USD per unit'].iloc[0]
# Repeat above steps, for cost of output circuit wiring
output_wiring_operations['Number of days taken by single crew'] = \
self.output_dict['output_circuit_wire_length_total_lf'] / output_circuit_daily_output
output_wiring_operations['Number of crews'] = \
np.ceil((output_wiring_operations['Number of days taken by single crew'] / 30) /
collection_construction_time)
output_wiring_operations['Cost USD without weather delays'] = \
self.output_dict['output_circuit_wire_length_total_lf'] * \
output_wiring_operations['Rate USD per unit']
self.output_dict['output_wiring_USD_lf'] = \
output_wiring_operations['Rate USD per unit'].iloc[0]
alpha = operation_data[operation_data['Type of cost'] == 'Labor']
operation_data_id_days_crews_workers = alpha[['Operation ID',
'Number of days taken by single crew',
'Number of crews',
'Number of workers']]
source_wiring_alpha = source_wiring_operations[source_wiring_operations['Type of cost'] == 'Labor']
source_wiring_id_days_crews_workers = source_wiring_alpha[['Operation ID',
'Number of days taken by single crew',
'Number of crews',
'Number of workers']]
output_wiring_alpha = output_wiring_operations[output_wiring_operations['Type of cost'] == 'Labor']
output_wiring_id_days_crews_workers = output_wiring_alpha[['Operation ID',
'Number of days taken by single crew',
'Number of crews',
'Number of workers']]
operation_data_id_days_crews_workers = pd.merge(operation_data_id_days_crews_workers,
source_wiring_id_days_crews_workers,
how='outer')
operation_data_id_days_crews_workers = pd.merge(operation_data_id_days_crews_workers,
output_wiring_id_days_crews_workers,
how='outer')
operation_data = pd.merge(operation_data, source_wiring_operations, how='outer')
operation_data = pd.merge(operation_data, output_wiring_operations, how='outer')
# if more than one crew needed to complete within construction duration then
# assume that all construction happens within that window and use that timeframe
# for weather delays;
# if not, use the number of days calculated
operation_data['time_construct_bool'] = \
operation_data['Number of days taken by single crew'] > \
(collection_construction_time * 30)
boolean_dictionary = {True: collection_construction_time * 30, False: np.NAN}
operation_data['time_construct_bool'] = \
operation_data['time_construct_bool'].map(boolean_dictionary)
operation_data['Time construct days'] = \
operation_data[['time_construct_bool',
'Number of days taken by single crew']].min(axis=1)
self.output_dict['num_days'] = operation_data['Time construct days'].max()
self.output_dict['managament_crew_cost_before_wind_delay'] = 0
self.output_dict['operation_data_id_days_crews_workers'] = \
operation_data_id_days_crews_workers
self.output_dict['operation_data_entire_farm'] = operation_data
return self.output_dict['operation_data_entire_farm']
def calculate_costs(self):
# Read in construction_estimator data:
# construction_estimator = input_dict['construction_estimator']
operation_data = self.output_dict['operation_data_entire_farm']
per_diem = operation_data['Number of workers'] * \
operation_data['Number of crews'] * \
(operation_data['Time construct days'] +
np.ceil(operation_data['Time construct days'] / 7)) * \
self.input_dict['construction_estimator_per_diem']
per_diem = per_diem.dropna()
self.output_dict['time_construct_days'] = \
(self.output_dict['trench_length_km'] * self._km_to_LF) / \
self.output_dict['trenching_labor_daily_output']
# weather based delays not yet implemented in SolarBOSSE
self.output_dict['wind_multiplier'] = 1 # Placeholder
# Calculating trenching cost:
self.output_dict['Days taken for trenching (equipment)'] = \
(self.output_dict['trench_length_km'] * self._km_to_LF) / \
self.output_dict['trenching_equipment_daily_output']
self.output_dict['Equipment cost of trenching per day {usd/day)'] = \
self.output_dict['trenching_cable_equipment_usd_per_hr'] * \
self.input_dict['hour_day']
self.output_dict['Equipment Cost USD without weather delays'] = \
self.output_dict['Days taken for trenching (equipment)'] * \
self.output_dict['Equipment cost of trenching per day {usd/day)']
self.output_dict['Equipment Cost USD with weather delays'] = \
self.output_dict['Equipment Cost USD without weather delays'] * \
self.output_dict['wind_multiplier']
trenching_equipment_rental_cost_df = \
pd.DataFrame([['Equipment rental',
self.output_dict['Equipment Cost USD with weather delays'],
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
# Calculating trenching labor cost:
self.output_dict['Days taken for trenching (labor)'] = \
((self.output_dict['trench_length_km'] * self._km_to_LF) /
self.output_dict['trenching_labor_daily_output'])
self.output_dict['days_taken_source_wiring'] = \
self.output_dict['source_circuit_wire_length_total_lf'] / \
self.output_dict['source_circuit_daily_output']
self.output_dict['days_taken_output_wiring'] = \
self.output_dict['output_circuit_wire_length_total_lf'] / \
self.output_dict['output_circuit_daily_output']
self.output_dict['Labor cost of trenching per day (usd/day)'] = \
(self.output_dict['trenching_labor_usd_per_hr'] *
self.input_dict['hour_day'] *
self.input_dict['overtime_multiplier'])
self.output_dict['Labor cost of source wiring per day (usd/day)'] = \
(self.output_dict['source_circuit_daily_output'] *
self.output_dict['source_wiring_USD_lf'] *
self.input_dict['overtime_multiplier'])
self.output_dict['Labor cost of output wiring per day (usd/day)'] = \
(self.output_dict['output_circuit_daily_output'] *
self.output_dict['output_wiring_USD_lf'] *
self.input_dict['overtime_multiplier'])
self.output_dict['Total per diem costs (USD)'] = per_diem.sum()
foo = self.output_dict['Labor cost of source wiring per day (usd/day)'] * \
self.output_dict['days_taken_source_wiring']
self.output_dict['Labor Cost USD without weather delays'] = \
((self.output_dict['Days taken for trenching (labor)'] *
self.output_dict['Labor cost of trenching per day (usd/day)']
) +
(self.output_dict['Labor cost of source wiring per day (usd/day)'] *
self.output_dict['days_taken_source_wiring']
) +
(self.output_dict['Labor cost of output wiring per day (usd/day)'] *
self.output_dict['days_taken_output_wiring']
) +
(self.output_dict['Total per diem costs (USD)'] +
self.output_dict['managament_crew_cost_before_wind_delay']
))
self.output_dict['Labor Cost USD with weather delays'] = \
self.output_dict['Labor Cost USD without weather delays'] * \
self.output_dict['wind_multiplier']
trenching_labor_cost_df = pd.DataFrame([['Labor',
self.output_dict['Labor Cost USD with weather delays'],
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
# Calculate cable cost:
cable_cost_usd_per_LF_df = pd.DataFrame([['Materials',
self.output_dict['total_material_cost'],
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
# Combine all calculated cost items into the 'collection_cost' data frame:
collection_cost = pd.DataFrame([], columns=['Type of cost',
'Cost USD',
'Phase of construction'])
collection_cost = collection_cost.append(trenching_equipment_rental_cost_df)
collection_cost = collection_cost.append(trenching_labor_cost_df)
collection_cost = collection_cost.append(cable_cost_usd_per_LF_df)
# Calculate Mobilization Cost and add to collection_cost data frame:
equip_material_mobilization_multiplier = \
0.16161 * (self.input_dict['system_size_MW_DC'] ** (-0.135))
material_mobilization_USD = self.output_dict['total_material_cost'] * \
equip_material_mobilization_multiplier
equipment_mobilization_USD = \
self.output_dict['Equipment Cost USD with weather delays'] * \
equip_material_mobilization_multiplier
labor_mobilization_multiplier = \
1.245 * (self.input_dict['system_size_MW_DC'] ** (-0.367))
labor_mobilization_USD = \
self.output_dict['Labor Cost USD with weather delays'] * \
labor_mobilization_multiplier
collection_mobilization_usd = material_mobilization_USD + \
equipment_mobilization_USD + \
labor_mobilization_USD
mobilization_cost = pd.DataFrame([['Mobilization',
collection_mobilization_usd ,
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
collection_cost = collection_cost.append(mobilization_cost)
self.output_dict['total_collection_cost_df'] = collection_cost
self.output_dict['total_collection_cost'] = collection_cost['Cost USD'].sum()
return self.output_dict['total_collection_cost']
def run_module_for_150_MW(self):
"""
Runs the CollectionCost module and populates the IO dictionaries with
calculated values.
Parameters
----------
<None>
Returns
-------
tuple
First element of tuple contains a 0 or 1. 0 means no errors happened
and 1 means an error happened and the module failed to run. The second
element either returns a 0 if the module ran successfully, or it returns
the error raised that caused the failure.
"""
# l = length ; w = width
project_l_m, project_w_m = self.land_dimensions()
l, w = self.get_quadrant_dimensions()
num_quadrants = len(self.inverter_list())
number_rows_per_subquadrant = self.number_rows_per_subquadrant()
num_strings_per_row = self.num_strings_per_row()
source_circuit_wire_length_lf =\
self.source_circuit_wire_length_lf(num_strings_per_row,
number_rows_per_subquadrant)
source_circuit_wire_length_total_lf = \
self.source_circuit_wire_length_total_lf(source_circuit_wire_length_lf,
num_quadrants)
self.output_dict['source_circuit_wire_length_total_lf'] = \
source_circuit_wire_length_total_lf
# Begin output circuit calculations:
num_strings_per_quadrant = \
self.number_strings_quadrant(num_strings_per_row,
number_rows_per_subquadrant)
num_strings_parallel = self.num_strings_parallel(num_strings_per_row)
row_spacing_m = self.row_spacing_m(l, number_rows_per_subquadrant)
# make a list of rows in each quadrant:
all_rows = [n for n in range(number_rows_per_subquadrant)]
row_out_circuit_length_m = all_rows
# starting with the bottom-most row in a quadrant (which is also the
# farthest row from the inverter.
total_out_circuit_length_m = 0 # Initialize
for row in all_rows:
row_inverter_distance_m = ((number_rows_per_subquadrant - 1) - row) * \
row_spacing_m
row_out_circuit_length_m[row] = row_inverter_distance_m * 2
total_out_circuit_length_m += row_out_circuit_length_m[row]
# total output circuit length for quadrant (2 sub quadrants per quadrant):
TOC_length_quadrant_m = total_out_circuit_length_m * 2
# Total output circuit length for entire farms (all quadrants combined):
output_circuit_wire_length_total_lf = \
TOC_length_quadrant_m * self.m_to_lf * num_quadrants
self.output_dict[
'output_circuit_wire_length_total_lf'] = output_circuit_wire_length_total_lf
# Trench length for project (all quadrants combined):
self.output_dict['trench_length_km'] = (project_l_m / 1000) * 2 # 2 trenches
# Series of methods to select the right cable for output circuit:
# Not using this set of implementations for now. That is, I'm assuming the
# cable selected based solely on circuit ampacity also satisfies the 3 %
# VD (max) requirement.
# longest_output_circuit_m = row_out_circuit_length_m[0]
# max_voltage_drop_V = self.voltage_drop_V()
# self.VD_passes(longest_output_circuit_m, max_voltage_drop_V,
# output_circuit_ampacity)
output_circuit_ampacity = self.output_circuit_ampacity(num_strings_parallel)
total_material_cost = source_circuit_wire_length_total_lf * \
self.pv_wire_cost(self.input_dict['system_size_MW_DC'],
'source_circuit',
self.input_dict['module_I_SC_DC'])
total_material_cost += TOC_length_quadrant_m * self.m_to_lf * num_quadrants * \
self.pv_wire_cost(self.input_dict['system_size_MW_DC'],
'output_circuit',
output_circuit_ampacity)
self.output_dict['total_material_cost'] = total_material_cost
self.estimate_construction_time()
self.output_dict['total_collection_cost'] = self.calculate_costs()
def run_module(self):
"""
"""
try:
original_site_prep_area_acres = self.input_dict['site_prep_area_acres']
regions_list = []
region_iter = 0
total_collection_cost = 0
if self.input_dict['system_size_MW_DC'] > 150:
site_prep_area_regions = self.input_dict['system_size_MW_DC'] / 150
fraction_site_prep_area_regions = site_prep_area_regions - \
math.floor(site_prep_area_regions)
region_iter = math.floor(site_prep_area_regions)
for i in range(region_iter):
regions_list.append(150) # Stores size (in MW) of the region
if fraction_site_prep_area_regions > 0:
regions_list.append(fraction_site_prep_area_regions * 150)
for region in regions_list:
# Should be site_prep_area_acres_mw_dc and not site_prep_area_acres_mw_ac
self.input_dict['site_prep_area_acres'] = \
self.input_dict['site_prep_area_acres_mw_ac'] * region
self.run_module_for_150_MW()
total_collection_cost += self.output_dict['total_collection_cost']
else:
self.run_module_for_150_MW()
total_collection_cost += self.output_dict['total_collection_cost']
self.input_dict['site_prep_area_acres'] = original_site_prep_area_acres
self.output_dict['total_collection_cost'] = total_collection_cost
# self.output_dict['total_collection_cost'] = 65153571
return 0, 0 # module ran successfully
except Exception as error:
traceback.print_exc()
print(f"Fail {self.project_name} CollectionCost")
self.input_dict['error']['CollectionCost'] = error
return 1, error # module did not run successfully
|
nilq/baby-python
|
python
|
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from z3b import enum
import core.suit as suit
import z3
_honor_names = ('ace', 'king', 'queen', 'jack', 'ten')
_honor_values = (4, 3, 2, 1, 0)
def _honor_vars(suit):
return map(z3.Int, map(("{}_of_" + suit.name.lower()).format, _honor_names))
def _suit_count_var(suit):
return z3.Int(suit.name.lower())
clubs, diamonds, hearts, spades = map(_suit_count_var, suit.SUITS)
def expr_for_suit(suit):
return (clubs, diamonds, hearts, spades)[suit.index]
ace_of_spades, king_of_spades, queen_of_spades, jack_of_spades, ten_of_spades = _honor_vars(suit.SPADES)
ace_of_hearts, king_of_hearts, queen_of_hearts, jack_of_hearts, ten_of_hearts = _honor_vars(suit.HEARTS)
ace_of_diamonds, king_of_diamonds, queen_of_diamonds, jack_of_diamonds, ten_of_diamonds = _honor_vars(suit.DIAMONDS)
ace_of_clubs, king_of_clubs, queen_of_clubs, jack_of_clubs, ten_of_clubs = _honor_vars(suit.CLUBS)
high_card_points, points, playing_points = z3.Ints('high_card_points points playing_points')
points_supporting_spades, points_supporting_hearts, points_supporting_diamonds, points_supporting_clubs = z3.Ints(
'points_supporting_spades points_supporting_hearts points_supporting_diamonds points_supporting_clubs')
void_in_spades, void_in_hearts, void_in_diamonds, void_in_clubs = z3.Ints(
'void_in_spades void_in_hearts void_in_diamonds void_in_clubs')
singleton_in_spades, singleton_in_hearts, singleton_in_diamonds, singleton_in_clubs = z3.Ints(
'singleton_in_spades singleton_in_hearts singleton_in_diamonds singleton_in_clubs')
doubleton_in_spades, doubleton_in_hearts, doubleton_in_diamonds, doubleton_in_clubs = z3.Ints(
'doubleton_in_spades doubleton_in_hearts doubleton_in_diamonds doubleton_in_clubs')
voids, singletons, doubletons = z3.Ints('voids singletons doubletons')
def named_count_expr(count_name, count):
exprs = []
suit_count_vars = map(expr_for_suit, suit.SUITS)
suit_matches_count_vars = [z3.Int("%s_in_%s" % (count_name, s.name.lower())) for s in suit.SUITS] # void_in_spades, etc.
exprs = [
# FIXME: Can z3 support writing this as "void_in_spades == (spades == 0)"?
z3.Or(
z3.And(suit_count == count, suit_matches_count == 1),
z3.And(suit_count != count, suit_matches_count == 0),
)
for suit_count, suit_matches_count in zip(suit_count_vars, suit_matches_count_vars)
]
exprs.append(z3.Int(count_name + "s") == sum(suit_matches_count_vars))
return z3.And(*exprs)
def constrain_honors_expr():
exprs = []
for honor_suit in suit.SUITS:
# The easiest way to have an Int var and constrain it to bool values is to just:
# z3.And(0 <= ace_of_spades, ace_of_spades <= 1)
honor_vars = _honor_vars(honor_suit)
exprs.extend([z3.And(0 <= honor_var, honor_var <= 1) for honor_var in honor_vars])
# Also make sure that total number of honors is <= total number of cards
exprs.append(sum(honor_vars) <= expr_for_suit(honor_suit))
return z3.And(*exprs)
axioms = [
spades + hearts + diamonds + clubs == 13,
spades >= 0,
hearts >= 0,
diamonds >= 0,
clubs >= 0,
0 <= high_card_points, high_card_points <= 37,
points == high_card_points,
high_card_points <= playing_points,
playing_points <= 55, # Just to make the model finite.
named_count_expr('void', 0),
named_count_expr('singleton', 1),
named_count_expr('doubleton', 2),
constrain_honors_expr(),
z3.Or(
z3.And(spades <= 2, points_supporting_spades == high_card_points),
z3.And(spades == 3, points_supporting_spades == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(spades >= 4, points_supporting_spades == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
z3.Or(
z3.And(hearts <= 2, points_supporting_hearts == high_card_points),
z3.And(hearts == 3, points_supporting_hearts == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(hearts >= 4, points_supporting_hearts == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
z3.Or(
z3.And(diamonds <= 2, points_supporting_diamonds == high_card_points),
z3.And(diamonds == 3, points_supporting_diamonds == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(diamonds >= 4, points_supporting_diamonds == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
z3.Or(
z3.And(clubs <= 2, points_supporting_clubs == high_card_points),
z3.And(clubs == 3, points_supporting_clubs == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(clubs >= 4, points_supporting_clubs == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
sum([ # Sum the sums for all suits.
sum([ # Sum the honors for a single suit
a * b for a, b in zip(_honor_values, honor_vars)])
for honor_vars in map(_honor_vars, suit.SUITS)
]) == high_card_points, # The total is our hcp.
]
min_hcp_for_open = 8
def _expr_for_point_rule(count):
return z3.And(
high_card_points >= min_hcp_for_open,
playing_points >= 12,
z3.Or(
spades + hearts + high_card_points >= count,
spades + diamonds + high_card_points >= count,
spades + clubs + high_card_points >= count,
hearts + diamonds + high_card_points >= count,
hearts + clubs + high_card_points >= count,
diamonds + clubs + high_card_points >= count,
)
)
rule_of_twenty = _expr_for_point_rule(20)
rule_of_nineteen = _expr_for_point_rule(19)
# FIXME: This rule probably needs to consider min_hcp_for_open
rule_of_fifteen = z3.And(spades + high_card_points >= 15, high_card_points >= min_hcp_for_open, playing_points >= 12)
two_of_the_top_three_spades = ace_of_spades + king_of_spades + queen_of_spades >= 2
two_of_the_top_three_hearts = ace_of_hearts + king_of_hearts + queen_of_hearts >= 2
two_of_the_top_three_diamonds = ace_of_diamonds + king_of_diamonds + queen_of_diamonds >= 2
two_of_the_top_three_clubs = ace_of_clubs + king_of_clubs + queen_of_clubs >= 2
three_of_the_top_five_spades = ace_of_spades + king_of_spades + queen_of_spades + jack_of_spades + ten_of_spades >= 3
three_of_the_top_five_hearts = ace_of_hearts + king_of_hearts + queen_of_hearts + jack_of_hearts + ten_of_hearts >= 3
three_of_the_top_five_diamonds = ace_of_diamonds + king_of_diamonds + queen_of_diamonds + jack_of_diamonds + ten_of_diamonds >= 3
three_of_the_top_five_clubs = ace_of_clubs + king_of_clubs + queen_of_clubs + jack_of_clubs + ten_of_clubs >= 3
three_of_the_top_five_spades_or_better = z3.Or(two_of_the_top_three_spades, three_of_the_top_five_spades)
three_of_the_top_five_hearts_or_better = z3.Or(two_of_the_top_three_hearts, three_of_the_top_five_hearts)
three_of_the_top_five_diamonds_or_better = z3.Or(two_of_the_top_three_diamonds, three_of_the_top_five_diamonds)
three_of_the_top_five_clubs_or_better = z3.Or(two_of_the_top_three_clubs, three_of_the_top_five_clubs)
third_round_stopper_spades = z3.Or(ace_of_spades == 1, z3.And(king_of_spades == 1, spades >= 2), z3.And(queen_of_spades == 1, spades >= 3))
third_round_stopper_hearts = z3.Or(ace_of_hearts == 1, z3.And(king_of_hearts == 1, hearts >= 2), z3.And(queen_of_hearts == 1, hearts >= 3))
third_round_stopper_diamonds = z3.Or(ace_of_diamonds == 1, z3.And(king_of_diamonds == 1, diamonds >= 2), z3.And(queen_of_diamonds == 1, diamonds >= 3))
third_round_stopper_clubs = z3.Or(ace_of_clubs == 1, z3.And(king_of_clubs == 1, clubs >= 2), z3.And(queen_of_clubs == 1, clubs >= 3))
number_of_aces = ace_of_spades + ace_of_hearts + ace_of_diamonds + ace_of_clubs
number_of_kings = king_of_spades + king_of_hearts + king_of_diamonds + king_of_clubs
balanced = z3.And(doubletons <= 1, singletons == 0, voids == 0)
stopper_spades = z3.Or(ace_of_spades == 1, z3.And(king_of_spades == 1, spades >= 2), z3.And(queen_of_spades == 1, spades >= 3), z3.And(jack_of_spades == 1, ten_of_spades == 1, spades >= 4))
stopper_hearts = z3.Or(ace_of_hearts == 1, z3.And(king_of_hearts == 1, hearts >= 2), z3.And(queen_of_hearts == 1, hearts >= 3), z3.And(jack_of_hearts == 1, ten_of_hearts == 1, hearts >= 4))
stopper_diamonds = z3.Or(ace_of_diamonds == 1, z3.And(king_of_diamonds == 1, diamonds >= 2), z3.And(queen_of_diamonds == 1, diamonds >= 3), z3.And(jack_of_diamonds == 1, ten_of_diamonds == 1, diamonds >= 4))
stopper_clubs = z3.Or(ace_of_clubs == 1, z3.And(king_of_clubs == 1, clubs >= 2), z3.And(queen_of_clubs == 1, clubs >= 3), z3.And(jack_of_clubs == 1, ten_of_clubs == 1, clubs >= 4))
NO_CONSTRAINTS = z3.BoolVal(True)
def stopper_expr_for_suit(suit):
return (
stopper_clubs,
stopper_diamonds,
stopper_hearts,
stopper_spades,
)[suit.index]
def support_points_expr_for_suit(suit):
return (
points_supporting_clubs,
points_supporting_diamonds,
points_supporting_hearts,
points_supporting_spades,
)[suit.index]
def expr_for_hand(hand):
cards_in_spades = hand.cards_in_suit(suit.SPADES)
cards_in_hearts = hand.cards_in_suit(suit.HEARTS)
cards_in_diamonds = hand.cards_in_suit(suit.DIAMONDS)
cards_in_clubs = hand.cards_in_suit(suit.CLUBS)
return z3.And(
spades == len(cards_in_spades),
hearts == len(cards_in_hearts),
diamonds == len(cards_in_diamonds),
clubs == len(cards_in_clubs),
ace_of_spades == int('A' in cards_in_spades),
king_of_spades == int('K' in cards_in_spades),
queen_of_spades == int('Q' in cards_in_spades),
jack_of_spades == int('J' in cards_in_spades),
ten_of_spades == int('T' in cards_in_spades),
ace_of_hearts == int('A' in cards_in_hearts),
king_of_hearts == int('K' in cards_in_hearts),
queen_of_hearts == int('Q' in cards_in_hearts),
jack_of_hearts == int('J' in cards_in_hearts),
ten_of_hearts == int('T' in cards_in_hearts),
ace_of_diamonds == int('A' in cards_in_diamonds),
king_of_diamonds == int('K' in cards_in_diamonds),
queen_of_diamonds == int('Q' in cards_in_diamonds),
jack_of_diamonds == int('J' in cards_in_diamonds),
ten_of_diamonds == int('T' in cards_in_diamonds),
ace_of_clubs == int('A' in cards_in_clubs),
king_of_clubs == int('K' in cards_in_clubs),
queen_of_clubs == int('Q' in cards_in_clubs),
jack_of_clubs == int('J' in cards_in_clubs),
ten_of_clubs == int('T' in cards_in_clubs),
)
positions = enum.Enum(
"RHO",
"Partner",
"LHO",
"Me",
)
def is_certain(solver, expr):
solver.push()
solver.add(z3.Not(expr))
result = solver.check() == z3.unsat
solver.pop()
return result
def is_possible(solver, expr):
solver.push()
solver.add(expr)
result = solver.check() == z3.sat
solver.pop()
return result
|
nilq/baby-python
|
python
|
# An XOR linked list is a more memory efficient doubly linked list.
# Instead of each node holding next and prev fields, it holds a field named both,
# which is an XOR of the next node and the previous node.
# Implement an XOR linked list; it has an add(element) which adds the
# element to the end, and a get(index) which returns the node at index.
# If using a language that has no pointers (such as Python),
# you can assume you have access to get_pointer and
# dereference_pointer functions that converts between nodes and memory addresses
# Note: not sure how to test the code since Python doesn't use
# pointers the same way as other languages.
class Node():
def __init__(self, value = None, xor = None):
self.value = value
self.xor = xor
def get_pointer(node):
pass
def dereference_pointer(node):
pass
class XORlist():
def __init__(self):
self.head = Node()
self.tail = Node()
def add(self, element):
new = Node()
if self.head.value == None:
self.head = self.tail = new.val
else:
new.xor = self.tail.xor ^ get_pointer(new)
self.tail = new
def get(self, index):
temp_head = self.head
temp_prev = 0
if index >= 0:
for i in range(index):
temp_pnt = get_pointer(temp_head)
temp_head = dereference_pointer(temp_head.both ^ temp_prev)
temp_head = temp_pnt
if temp_head == self.tail:
return None
return temp_head
|
nilq/baby-python
|
python
|
from app import const
BASE_ID = const.AIRTABLE_MAP_BY_GEOGRAPHIC_AREA_BASE_ID
AREA_CONTACT_TABLE_NAME = "Area Contact"
AREA_TARGET_COMMUNITY_TABLE_NAME = "Area Target Community"
class AirtableGeographicAreaTypes:
AREA_TYPE_CITY = "City"
AREA_TYPE_POLYGON = "Polygon"
AREA_TYPE_REGION = "Region"
AREA_TYPE_STATE = "State"
AREA_TYPE_COUNTRY = "Country"
AREA_TYPE_DEFAULT_US = "Default (US)"
AREA_TYPE_DEFAULT_INTERNATIONAL = "Default (International)"
|
nilq/baby-python
|
python
|
from csv import DictReader
from scrapy import Item
from pyproj import Proj, transform
from jedeschule.spiders.nordrhein_westfalen_helper import NordRheinWestfalenHelper
from jedeschule.spiders.school_spider import SchoolSpider
from jedeschule.items import School
# for an overview of the data provided by the State of
# Nordrhein-Westfalen, check out the overview page here:
# https://www.schulministerium.nrw.de/ministerium/open-government/offene-daten
class NordrheinWestfalenSpider(SchoolSpider):
name = 'nordrhein-westfalen'
start_urls = [
'https://www.schulministerium.nrw.de/BiPo/OpenData/Schuldaten/schuldaten.csv',
]
def parse(self, response):
body = response.body.decode('utf-8').splitlines()
# skip the first line which contains information about the separator
reader = DictReader(body[1:], delimiter=';')
for line in reader:
yield line
@staticmethod
def normalize(item: Item) -> School:
name = " ".join([item.get("Schulbezeichnung_1", ""),
item.get("Schulbezeichnung_2", ""),
item.get("Schulbezeichnung_3", "")]).strip()
helper = NordRheinWestfalenHelper()
right, high = item.get('UTMRechtswert'), item.get('UTMHochwert')
this_projection = Proj(item.get('EPSG'))
target_projection = Proj('epsg:4326')
lon, lat = transform(this_projection, target_projection, right, high)
return School(name=name,
id='NW-{}'.format(item.get('Schulnummer')),
address=item.get('Strasse'),
zip=item.get("PLZ"),
city=item.get('Ort'),
website=item.get('Homepage'),
email=item.get('E-Mail'),
legal_status=helper.resolve('rechtsform', item.get('Rechtsform')),
school_type=helper.resolve('schulform', item.get('Schulform')),
provider=helper.resolve('provider', item.get('Traegernummer')),
fax=f"{item.get('Faxvorwahl')}{item.get('Fax')}",
phone=f"{item.get('Telefonvorwahl')}{item.get('Telefon')}",
latitude=lat,
longitude=lon,
)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2 on 2022-02-12 21:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reservations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='needed_capacity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reservations.reservationcapacity'),
),
migrations.AlterField(
model_name='reservation',
name='status',
field=models.CharField(choices=[('S', 'Scheduled'), ('A', 'Active'), ('D', 'Done')], default='S', max_length=1),
),
]
|
nilq/baby-python
|
python
|
import hashlib
import os
import errno
def hashpasswd(passwd):
return hashlib.sha512(passwd.encode('utf-8')).hexdigest()
def create_path(path):
if not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
class EvalBuilder:
_expressions = None
def __init__(self):
self._expressions = []
def append(self, expression):
self._expressions.append(expression)
def __str__(self):
if len(self._expressions) == 0:
return "True"
eval_string = "and_(True"
for expression in self._expressions:
eval_string += ","+expression
eval_string += ")"
return eval_string
def getEvalStr(self):
return self.__str__()
|
nilq/baby-python
|
python
|
#!/usr/bin/untitled #created by Reyad
import smtplib
import json
# import datetime
# import mysql.connector
import pymysql
# import MySQLdb
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
# import datetime
from email.mime.application import MIMEApplication
from dotenv import load_dotenv
import os
from pathlib import Path # python3 only
from dotenv import load_dotenv
load_dotenv()
hostname = os.getenv("DB_HOST")
username = os.getenv("DB_USERNAME")
password = os.getenv("DB_PASSWORD")
database = os.getenv("DB_DATABASE")
# myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
myConnection = pymysql.connect(host=hostname, user=username, passwd=password, db=database)
def email_queue(conn):
cur = conn.cursor()
maximum_total_mail = 300
# start email configuration setup
sql = "SELECT id,from_email,server_details, sent_total_mail, last_updated_date " \
"FROM email_configuration WHERE is_active=1 AND sent_total_mail >=" + str(maximum_total_mail) + ""
cur.execute(sql)
for id, from_email, server_details, sent_total_mail, last_updated_date in cur.fetchall():
application_updated_date = last_updated_date
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
datetimeFormat = '%Y-%m-%d %H:%M:%S'
date1 = str(application_updated_date)
diff = datetime.strptime(current_date, datetimeFormat) \
- datetime.strptime(date1, datetimeFormat)
# checking one day 24 hours after clear total mail
if diff.days >= 1:
query3 = "INSERT INTO email_configuration_history(from_email,sent_total_mail,server_details," \
"created_at,updated_at) VALUES(%s,%s,%s,%s,%s)"
args = (from_email, sent_total_mail, server_details, current_date, current_date)
cur.execute(query3, args)
query4 = "UPDATE email_configuration SET sent_total_mail = %s, updated_at = %s, last_updated_date = %s" \
" where id= %s"
data2 = (0, str(current_date), str(current_date), id)
cur.execute(query4, data2)
conn.commit()
# end of email configuration setup
config = "SELECT server_details,sent_total_mail, id " \
"FROM email_configuration WHERE is_active=1 AND sent_total_mail <="+str(maximum_total_mail)+" "
cur.execute(config)
row = cur.fetchone()
if row:
config_data = json.loads(row[0])
MAIL_USERNAME = config_data["MAIL_USERNAME"]
MAIL_PASSWORD = config_data["MAIL_PASSWORD"]
MAIL_HOST = config_data["MAIL_HOST"]
MAIL_PORT = config_data["MAIL_PORT"]
else:
print('Today Email Quata has been full. Please check email configuration table!')
exit()
# # Default configuration
# MAIL_USERNAME = 'ossbida@bidaquickserv.org'
# MAIL_PASSWORD = 'mKFxxgf3'
# MAIL_HOST = 'smtp.bidaquickserv.org'
# # MAIL_HOST = 'smtp.gmail.com'
# MAIL_PORT = 587 #tls
# #MAIL_PORT = 465 #ssl
# for details in cur.fetchall()
query = "SELECT id,email_to,email_cc,email_content,no_of_try,attachment,email_subject,attachment_certificate_name,"\
"app_id, service_id FROM email_queue WHERE email_status=0 AND email_to!='' ORDER BY id DESC LIMIT 5"
result = cur.execute(query)
count = 0
is_active = 1
smtp_response = ''
attachments = ''
if result > 0:
for id, email_to, email_cc, email_content, no_of_try, attachment, email_subject, attachment_certificate_name, app_id, service_id in cur.fetchall():
print("from: " + MAIL_USERNAME)
print('to', email_to)
# attachment certificate link
if attachment_certificate_name:
cer_exp = attachment_certificate_name.split('.')
if cer_exp[0] is not None: # cer_exp[0] = TABLE NAME, cer_exp[1] = FILED NAME
sql2 = "SELECT "+str(cer_exp[1])+" FROM "+str(cer_exp[0])+" where id= " + str(app_id) + " AND "+str(cer_exp[1])+"!='' "
result2 = cur.execute(sql2)
if result2 == 0:
continue
else:
certificate_link = cur.fetchone()
email_content = email_content.replace('{attachment}', certificate_link[0])
html = email_content
msg = MIMEMultipart('alternative')
msg["Subject"] = email_subject
msg["From"] = MAIL_USERNAME
msg["To"] = email_to
msg["Cc"] = email_cc
if msg["Cc"] is not None:
cc = msg["Cc"].split(",")
else:
cc = ['']
if msg["To"]:
msg["To"].split(",")
part2 = MIMEText(html, 'html')
msg.attach(part2)
# Attach pdf file to the email
if attachment:
attachment_file = MIMEApplication(open(attachment, "rb").read())
attachment_file.add_header('Content-Disposition', 'attachment', filename=attachment)
msg.attach(attachment_file)
try:
if MAIL_HOST == 'smtp.gmail.com':
server = smtplib.SMTP_SSL(host=MAIL_HOST, port=MAIL_PORT)
else:
server = smtplib.SMTP(MAIL_HOST, MAIL_PORT) # smtp tls premium server ossbida@bidaquickserv.org
server.login(MAIL_USERNAME, MAIL_PASSWORD)
server.sendmail(str(msg["From"]), [msg["To"]] + cc, msg.as_string())
server.quit()
# server.ehlo()
status = 1
mail_messages = "Email has been sent on " + datetime.now().strftime('%Y-%m-%d %H:%M:%S')
count += 1
no_of_try += 1
except smtplib.SMTPException as e:
no_of_try = no_of_try + 1
if no_of_try > 10:
status = -9
else:
status = 0
mail_messages = 'Something went wrong...' + str(e)
smtp_response = str(e)
is_active = -9 # forcefully inactive
query1 = "UPDATE email_queue SET email_status = %s, no_of_try = %s where id= %s"
data = (status, no_of_try, id)
cur.execute(query1, data)
query2 = "UPDATE email_configuration SET is_active=%s,sent_total_mail =%s,updated_at =%s," \
"smtp_response=%s where id=%s "
data5 = (is_active, str(row[1]+count), datetime.now().strftime("%Y-%m-%d %H:%M:%S"), smtp_response, row[2])
cur.execute(query2, data5)
print(row[1]+count)
conn.commit()
print(mail_messages)
if count == 0:
print("No Email in queue to send! " + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("Using MySQLdb…")
email_queue(myConnection)
myConnection.close()
|
nilq/baby-python
|
python
|
from is_wire.core import Channel, Message, Subscription
from google.protobuf.struct_pb2 import Struct
import socket
channel = Channel("amqp://guest:guest@10.10.2.7:30000")
subscription = Subscription(channel)
# Prepare request
struct = Struct()
struct.fields["value"].number_value = 1.0
request = Message(content=struct, reply_to=subscription)
# Make request
channel.publish(request, topic="Tester.Increment")
# Wait for reply with 1.0 seconds timeout
try:
reply = channel.consume(timeout=1.0)
struct = reply.unpack(Struct)
print('RPC Status:', reply.status, '\nReply:', struct)
except socket.timeout:
print('No reply :(')
|
nilq/baby-python
|
python
|
import logging
from omega import __version__
from tests.interfaces.test_web_interfaces import TestWebInterfaces
logger = logging.getLogger(__name__)
class TestSys(TestWebInterfaces):
async def test_sever_version(self):
ver = await self.server_get("sys", "version", is_pickled=False)
self.assertEqual(__version__, ver)
|
nilq/baby-python
|
python
|
from montague.ast import (
And,
Call,
ComplexType,
Exists,
ForAll,
IfAndOnlyIf,
IfThen,
Iota,
Lambda,
Not,
Or,
TYPE_ENTITY,
TYPE_EVENT,
TYPE_TRUTH_VALUE,
TYPE_WORLD,
Var,
)
def test_variable_to_str():
assert str(Var("a")) == "a"
def test_and_to_str():
assert str(And(Var("a"), Var("b"))) == "a & b"
def test_or_to_str():
assert str(Or(Var("a"), Var("b"))) == "a | b"
def test_if_then_to_str():
assert str(IfThen(Var("a"), Var("b"))) == "a -> b"
def test_if_and_only_if_to_str():
assert str(IfAndOnlyIf(Var("a"), Var("b"))) == "a <-> b"
def test_lambda_to_str():
tree = Lambda("x", And(Var("a"), Var("x")))
assert str(tree) == "λx.a & x"
assert tree.ascii_str() == "Lx.a & x"
# This formula is semantically invalid but that doesn't matter.
assert str(And(Lambda("x", Var("x")), Lambda("y", Var("y")))) == "[λx.x] & [λy.y]"
def test_call_to_str():
assert (
str(Call(Call(Var("P"), And(Var("a"), Var("b"))), Lambda("x", Var("x"))))
== "P(a & b, λx.x)"
)
assert str(Call(Var("P"), Var("x"))) == "P(x)"
def test_for_all_to_str():
tree = ForAll("x", Call(Var("P"), Var("x")))
assert str(tree) == "∀ x.P(x)"
assert tree.ascii_str() == "Ax.P(x)"
def test_exists_to_str():
tree = Exists("x", Call(Var("P"), Var("x")))
assert str(tree) == "∃ x.P(x)"
assert tree.ascii_str() == "Ex.P(x)"
def test_not_to_str():
assert str(Not(Var("x"))) == "~x"
assert str(Not(Or(Var("x"), Var("y")))) == "~[x | y]"
def test_binary_operators_to_str():
assert str(And(Or(Var("a"), Var("b")), Var("c"))) == "[a | b] & c"
assert str(Or(And(Var("a"), Var("b")), Var("c"))) == "a & b | c"
assert str(Or(Var("a"), Or(Var("b"), Var("c")))) == "a | b | c"
assert str(And(Var("a"), And(Var("b"), Var("c")))) == "a & b & c"
def test_nested_exists_and_for_all_to_str():
assert str(And(ForAll("x", Var("x")), Exists("x", Var("x")))) == "[∀ x.x] & [∃ x.x]"
def test_iota_to_str():
tree = Iota("x", Var("x"))
assert str(tree) == "ιx.x"
assert tree.ascii_str() == "ix.x"
def test_entity_to_str():
assert str(TYPE_ENTITY) == "e"
def test_event_to_str():
assert str(TYPE_EVENT) == "v"
def test_truth_value_to_str():
assert str(TYPE_TRUTH_VALUE) == "t"
def test_world_to_str():
assert str(TYPE_WORLD) == "s"
def test_recursive_type_to_str():
assert str(ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE)) == "<e, t>"
def test_deeply_recursive_type_to_str():
assert (
str(
ComplexType(
TYPE_EVENT,
ComplexType(
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
),
)
)
== "<v, <<e, t>, <e, t>>>"
)
def test_recursive_type_to_concise_str():
typ = ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE)
assert typ.concise_str() == "et"
def test_deeply_recursive_type_to_concise_str():
typ = ComplexType(
TYPE_EVENT,
ComplexType(
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
),
)
assert typ.concise_str() == "<v, <et, et>>"
def test_simple_replace_variable():
assert Var("x").replace_variable("x", Var("y")) == Var("y")
def test_replace_variable_in_and_or():
tree = And(Or(Var("x"), Var("y")), Var("z"))
assert tree.replace_variable("x", Var("x'")) == And(
Or(Var("x'"), Var("y")), Var("z")
)
def test_replace_predicate():
tree = Call(Var("P"), Var("x"))
assert tree.replace_variable("P", Var("Good")) == Call(Var("Good"), Var("x"))
def test_replace_variable_in_quantifiers():
tree = ForAll(
"x",
Or(And(ForAll("b", Var("b")), Exists("b", Var("b"))), Exists("y", Var("b"))),
)
assert tree.replace_variable("b", Var("bbb")) == ForAll(
"x",
Or(And(ForAll("b", Var("b")), Exists("b", Var("b"))), Exists("y", Var("bbb"))),
)
def test_recursive_replace_variable():
# BFP(x, Lx.x, x & y)
tree = Call(
Call(
Call(Var("BFP"), Var("x")),
Lambda("x", Var("x")), # This should not be replaced.
),
And(Var("x"), Var("y")),
)
assert tree.replace_variable("x", Var("j")) == Call(
Call(Call(Var("BFP"), Var("j")), Lambda("x", Var("x"))), And(Var("j"), Var("y"))
)
def test_replace_variable_in_iota():
tree = Iota("x", And(Var("x"), Var("y")))
assert tree.replace_variable("x", Var("a")) == tree
assert tree.replace_variable("y", Var("b")) == Iota("x", And(Var("x"), Var("b")))
|
nilq/baby-python
|
python
|
from recommendation.api.types.related_articles import candidate_finder
from recommendation.utils import configuration
import recommendation
EXPECTED = [('Q22686', 1.0), ('Q3752663', 0.8853468379287844), ('Q2462124', 0.861691557168689),
('Q432473', 0.8481581254555062), ('Q242351', 0.8379904779822078), ('Q868772', 0.8087311692249578),
('Q21070387', 0.7956811552934058), ('Q239411', 0.7829732882093489), ('Q736223', 0.7760532537216831),
('Q3731533', 0.7474319215265643), ('Q699872', 0.6474165168034756), ('Q2597050', 0.6352709659245916),
('Q12071552', 0.6273134513051442), ('Q6294', 0.6132842610738145), ('Q13628723', 0.5921917468920406),
('Q359442', 0.5868018793427279), ('Q29468', 0.5696888764253161), ('Q76', 0.5616138355609682),
('Q2036942', 0.5538574999463601), ('Q324546', 0.5466022935973467), ('Q17092708', 0.5438881700622109),
('Q69319', 0.5400609632856112), ('Q846330', 0.5337995502586717), ('Q44430', 0.5300078863669737),
('Q816459', 0.5156321533144876), ('Q4496', 0.515222705930191), ('Q29552', 0.5072461049596773)]
def test_embedding():
candidate_finder.initialize_embedding(optimize=False)
results = candidate_finder.get_embedding().most_similar('Q22686')
for expected, actual in zip(EXPECTED, results):
assert expected[0] == actual[0]
assert isclose(expected[1], actual[1])
def test_configuration():
assert recommendation.__name__ == configuration.get_config_value('related_articles', 'embedding_package')
# math.isclose was added in 3.5
# https://www.python.org/dev/peps/pep-0485/#proposed-implementation
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
|
nilq/baby-python
|
python
|
"""
Objects
Defining objects imitating the behavior of Python's built-in objects but linked to the database.
"""
from yuno.objects.dict import YunoDict
from yuno.objects.list import YunoList
|
nilq/baby-python
|
python
|
import ptypes
from ptypes import *
## string primitives
class LengthPrefixedAnsiString(pstruct.type):
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.int()), 'String'),
]
def str(self):
return self['String'].li.str()
class LengthPrefixedUnicodeString(pstruct.type):
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.clone(pstr.wstring, length=s['Length'].li.int()), 'String'),
]
def str(self):
return self['String'].li.str()
## PresentationObject Format
class PresentationObjectHeader(pstruct.type):
def __ClassName(self):
fmt = self['FormatID'].li.int()
if fmt == 5:
return LengthPrefixedAnsiString
return pstr.string
_fields_ = [
(pint.uint32_t, 'OLEVersion'),
(pint.uint32_t, 'FormatID'),
(__ClassName, 'ClassName'),
]
class PresentationObjectType(ptype.definition):
cache = {}
@PresentationObjectType.define(type='METAFILEPICT')
@PresentationObjectType.define(type='BITMAP')
@PresentationObjectType.define(type='DIB')
class StandardPresentationObject(pstruct.type):
class BitmapPresentationSize(pint.uint32_t): pass
class MetaFilePresentationSize(pint.uint32_t): pass
def __SizeType(self):
if self.type in ('BITMAP', 'DIB'):
return self.BitmapPresentationSize
if self.type in ('METAFILEPICT',):
return self.MetaFilePresentationSize
return pint.uint32_t
_fields_ = [
(__SizeType, 'Width'),
(__SizeType, 'Height'),
(pint.uint32_t, 'PresentationDataSize'),
(lambda s: dyn.block(s['PresentationDataSize'].li.int()), 'PresentationData'),
]
class ClipboardFormatHeader(pstruct.type): pass
@PresentationObjectType.define
class GenericPresentationObject(pstruct.type):
type = None
def __ClipboardObject(self):
fmt = self['Header'].li['ClipboardFormat'].int()
return ClipboardFormatType.withdefault(fmt, type=fmt)
_fields_ = [
(ClipboardFormatHeader, 'Header'),
(__ClipboardObject, 'Object'),
]
PresentationObjectType.default = GenericPresentationObject
## Clipboard Format (not be set to 0)
ClipboardFormatHeader._fields_ = [
(pint.uint32_t, 'ClipboardFormat')
]
class ClipboardFormatType(ptype.definition):
cache = {}
@ClipboardFormatType.define
class StandardClipboardFormatPresentationObject(pstruct.type):
type = None
_fields_ = [
(pint.uint32_t, 'PresentationDataSize'),
(lambda s: dyn.block(s['PresentationDataSize'].li.int()), 'PresentationData'),
]
ClipboardFormatType.default = StandardClipboardFormatPresentationObject
@ClipboardFormatType.define
class RegisteredClipboardFormatPresentationObject(pstruct.type):
type = 0x00000000
_fields_ = [
(pint.uint32_t, 'StringFormatDataSize'),
(lambda s: dyn.block(s['StringFormatDataSize'].li.int()), 'StringFormatData'),
(pint.uint32_t, 'PresentationDataSize'),
(lambda s: dyn.block(s['PresentationDataSize'].li.int()), 'PresentationData'),
]
## Object
class ObjectHeader(pstruct.type):
def __ClassName(self):
fmt = self['FormatID'].li.int()
if fmt == 5:
return LengthPrefixedAnsiString
return ptype.type
_fields_ = [
(pint.uint32_t, 'OLEVersion'),
(pint.uint32_t, 'FormatID'),
(__ClassName, 'ClassName'),
(LengthPrefixedAnsiString, 'TopicName'),
(LengthPrefixedAnsiString, 'ItemName'),
]
class ObjectType(ptype.definition):
cache = {}
@ObjectType.define
class EmbeddedObject(pstruct.type):
type = 0x00000002
_fields_ = [
(pint.uint32_t, 'NativeDataSize'),
(lambda s: dyn.block(s['NativeDataSize'].li.int()), 'NativeData'),
]
@ObjectType.define
class LinkedObject(pstruct.type):
type = 0x00000001
_fields_ = [
(LengthPrefixedAnsiString, 'NetworkName'),
(pint.uint32_t, 'Reserved'),
(pint.uint32_t, 'LinkUpdateOption'),
]
### OLE 1.0 Format Structures
class PresentationObject(pstruct.type):
def __PresentationObject(self):
fmt = self['Header'].li['FormatID'].int()
if fmt != 0:
clsname = self['Header']['ClassName'].str()
return PresentationObjectType.withdefault(clsname, type=clsname)
return ptype.type
_fields_ = [
(PresentationObjectHeader, 'Header'),
(__PresentationObject, 'Object'),
]
# Ole v1.0
class Object(pstruct.type):
def __Object(self):
fmtid = self['Header'].li['FormatID'].int()
return ObjectType.withdefault(fmtid, type=fmtid)
_fields_ = [
(ObjectHeader, 'Header'),
(__Object, 'Object'),
(PresentationObject, 'Presentation'),
]
if __name__ == '__main__':
pass
|
nilq/baby-python
|
python
|
##############################################################################
# Copyright (c) 2016 ZTE Corporation
# feng.xiaowei@zte.com.cn
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import inspect
import json
import tornado.template
import tornado.web
from opnfv_testapi.tornado_swagger import settings
def json_dumps(obj, pretty=False):
return json.dumps(obj,
sort_keys=True,
indent=4,
separators=(',', ': ')) if pretty else json.dumps(obj)
class SwaggerUIHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.static_path = kwargs.get('static_path')
self.base_url = kwargs.get('base_url')
def get_template_path(self):
return self.static_path
def get(self):
resource_url = self.reverse_url(settings.RESOURCE_LISTING_NAME)
discovery_url = self.base_url + resource_url
self.render('swagger/index.html', discovery_url=discovery_url)
class SwaggerResourcesHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.api_version = kwargs.get('api_version')
self.swagger_version = kwargs.get('swagger_version')
self.base_url = kwargs.get('base_url')
self.exclude_namespaces = kwargs.get('exclude_namespaces')
def get(self):
self.set_header('content-type', 'application/json')
resources = {
'apiVersion': self.api_version,
'swaggerVersion': self.swagger_version,
'basePath': self.base_url,
'apis': [{
'path': self.reverse_url(settings.API_DECLARATION_NAME),
'description': 'Restful APIs Specification'
}]
}
self.finish(json_dumps(resources, self.get_arguments('pretty')))
class SwaggerApiHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.api_version = kwargs.get('api_version')
self.swagger_version = kwargs.get('swagger_version')
self.base_url = kwargs.get('base_url')
def get(self):
self.set_header('content-type', 'application/json')
apis = self.find_api(self.application.handlers)
if apis is None:
raise tornado.web.HTTPError(404)
specs = {
'apiVersion': self.api_version,
'swaggerVersion': self.swagger_version,
'basePath': self.base_url,
'resourcePath': '/',
'produces': ["application/json"],
'apis': [self.__get_api_spec__(path, spec, operations)
for path, spec, operations in apis],
'models': self.__get_models_spec(settings.models)
}
self.finish(json_dumps(specs, self.get_arguments('pretty')))
def __get_models_spec(self, models):
models_spec = {}
for model in models:
models_spec.setdefault(model.id, self.__get_model_spec(model))
return models_spec
@staticmethod
def __get_model_spec(model):
return {
'description': model.summary,
'id': model.id,
'notes': model.notes,
'properties': model.properties,
'required': model.required
}
@staticmethod
def __get_api_spec__(path, spec, operations):
return {
'path': path,
'description': spec.handler_class.__doc__,
'operations': [{
'httpMethod': api.func.__name__.upper(),
'nickname': api.nickname,
'parameters': api.params.values(),
'summary': api.summary,
'notes': api.notes,
'responseClass': api.responseClass,
'responseMessages': api.responseMessages,
} for api in operations]
}
@staticmethod
def find_api(host_handlers):
def get_path(url, args):
return url % tuple(['{%s}' % arg for arg in args])
def get_operations(cls):
return [member.rest_api
for (_, member) in inspect.getmembers(cls)
if hasattr(member, 'rest_api')]
for host, handlers in host_handlers:
for spec in handlers:
for (_, mbr) in inspect.getmembers(spec.handler_class):
if inspect.ismethod(mbr) and hasattr(mbr, 'rest_api'):
path = get_path(spec._path, mbr.rest_api.func_args)
operations = get_operations(spec.handler_class)
yield path, spec, operations
break
|
nilq/baby-python
|
python
|
import sys
def raise_from(my_exception, other_exception):
raise my_exception, None, sys.exc_info()[2] # noqa: W602, E999
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.