max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
manager/views/employee.py | jordancarlson08/MyStuff | 0 | 6619851 | <filename>manager/views/employee.py
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from account import models as amod
from . import templater
from base_app.user_util import user_check, my_account
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@my_account
def process_request(request):
'''Shows the stores'''
if request.urlparams[1] == 'delete':
u = amod.User.objects.get(id=request.urlparams[0])
u.is_active = False
u.save()
return HttpResponseRedirect('/index/')
e = ''
u = ''
try:
u = amod.User.objects.get(id=request.urlparams[0])
except:
return HttpResponseRedirect('/index/')
try:
e = amod.Employee.objects.get(user=u)
except:
return HttpResponseRedirect('/index/')
user = u
if u.is_active == False:
return Http404()
form = UserForm(initial={
'username' : u.username,
'first_name' : u.first_name,
'last_name' : u.last_name,
'email' : u.email,
'phone' : u.phone,
# 'security_question' : u.security_question,
# 'security_answer' : u.security_answer,
'is_staff' : u.is_staff,
'street1' : u.street1,
'street2' : u.street2,
'city' : u.city,
'state' : u.state,
'zipCode' : u.zipCode,
'hireDate':e.hireDate,
'salary':e.salary,
})
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
#time to save the data
u.username = form.cleaned_data['username']
# u.set_password(form.cleaned_data['password'])
u.first_name = form.cleaned_data['first_name']
u.last_name = form.cleaned_data['last_name']
u.email = form.cleaned_data['email']
u.phone = form.cleaned_data['phone']
# u.security_question = form.cleaned_data['security_question']
# u.security_answer = form.cleaned_data['security_answer']
u.is_staff = form.cleaned_data['is_staff']
u.street1 = form.cleaned_data['street1']
u.street2 = form.cleaned_data['street2']
u.city = form.cleaned_data['city']
u.state = form.cleaned_data['state']
u.zipCode = form.cleaned_data['zipCode']
u.save()
e.hireDate = form.cleaned_data['hireDate']
e.salary = form.cleaned_data['salary']
e.save()
passwordForm = UserPasswordForm()
if request.method == 'POST':
passwordForm = UserPasswordForm(request.POST)
if passwordForm.is_valid():
#time to save the data
if u.check_password(passwordForm.cleaned_data['password']):
if passwordForm.cleaned_data['newpassword1'] == passwordForm.cleaned_data['newpassword2']:
u.set_password(passwordForm.cleaned_data['<PASSWORD>'])
u.save()
tvars = {
'user':user,
'form':form,
'passwordForm':passwordForm,
}
return templater.render_to_response(request, 'employee.html', tvars)
class UserForm(forms.Form):
username = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username',}))
first_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '<NAME>',}))
last_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '<NAME>',}))
email = forms.CharField(max_length=50, widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': '<EMAIL>',}))
phone = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '801-555-1234',}))
# security_question = forms.CharField(label='Security Question', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'What is your mother\'s maiden name?',}))
# security_answer = forms.CharField(label='Answer', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Smith',}))
is_staff = forms.BooleanField(label='Employee?', widget=forms.CheckboxInput(), required=False, )
street1 = forms.CharField(label = "Street 1", widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '123 Center St.',}))
street2 = forms.CharField(label = "Street 2", required = False, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '#242',}))
city = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Provo',}))
state = forms.CharField(max_length=2, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'UT',}))
zipCode = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '84601',}))
hireDate = forms.DateField(label='Hire Date', widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': 'Hire Date',}))
salary= forms.DecimalField(widget=forms.NumberInput(attrs={'class':'form-control', 'placeholder': 'Salary',}))
class UserPasswordForm(forms.Form):
password = forms.CharField(label='Current Password', max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
newpassword1 = forms.CharField(label='New Password',max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
newpassword2 = forms.CharField(label='Repeat Password',max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
def clean(self):
if (self.cleaned_data.get('newpassword1') !=
self.cleaned_data.get('newpassword2')):
raise forms.ValidationError("New password does not match.")
return self.cleaned_data | <filename>manager/views/employee.py
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as hmod
from account import models as amod
from . import templater
from base_app.user_util import user_check, my_account
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@my_account
def process_request(request):
'''Shows the stores'''
if request.urlparams[1] == 'delete':
u = amod.User.objects.get(id=request.urlparams[0])
u.is_active = False
u.save()
return HttpResponseRedirect('/index/')
e = ''
u = ''
try:
u = amod.User.objects.get(id=request.urlparams[0])
except:
return HttpResponseRedirect('/index/')
try:
e = amod.Employee.objects.get(user=u)
except:
return HttpResponseRedirect('/index/')
user = u
if u.is_active == False:
return Http404()
form = UserForm(initial={
'username' : u.username,
'first_name' : u.first_name,
'last_name' : u.last_name,
'email' : u.email,
'phone' : u.phone,
# 'security_question' : u.security_question,
# 'security_answer' : u.security_answer,
'is_staff' : u.is_staff,
'street1' : u.street1,
'street2' : u.street2,
'city' : u.city,
'state' : u.state,
'zipCode' : u.zipCode,
'hireDate':e.hireDate,
'salary':e.salary,
})
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
#time to save the data
u.username = form.cleaned_data['username']
# u.set_password(form.cleaned_data['password'])
u.first_name = form.cleaned_data['first_name']
u.last_name = form.cleaned_data['last_name']
u.email = form.cleaned_data['email']
u.phone = form.cleaned_data['phone']
# u.security_question = form.cleaned_data['security_question']
# u.security_answer = form.cleaned_data['security_answer']
u.is_staff = form.cleaned_data['is_staff']
u.street1 = form.cleaned_data['street1']
u.street2 = form.cleaned_data['street2']
u.city = form.cleaned_data['city']
u.state = form.cleaned_data['state']
u.zipCode = form.cleaned_data['zipCode']
u.save()
e.hireDate = form.cleaned_data['hireDate']
e.salary = form.cleaned_data['salary']
e.save()
passwordForm = UserPasswordForm()
if request.method == 'POST':
passwordForm = UserPasswordForm(request.POST)
if passwordForm.is_valid():
#time to save the data
if u.check_password(passwordForm.cleaned_data['password']):
if passwordForm.cleaned_data['newpassword1'] == passwordForm.cleaned_data['newpassword2']:
u.set_password(passwordForm.cleaned_data['<PASSWORD>'])
u.save()
tvars = {
'user':user,
'form':form,
'passwordForm':passwordForm,
}
return templater.render_to_response(request, 'employee.html', tvars)
class UserForm(forms.Form):
username = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username',}))
first_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '<NAME>',}))
last_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '<NAME>',}))
email = forms.CharField(max_length=50, widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': '<EMAIL>',}))
phone = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '801-555-1234',}))
# security_question = forms.CharField(label='Security Question', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'What is your mother\'s maiden name?',}))
# security_answer = forms.CharField(label='Answer', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Smith',}))
is_staff = forms.BooleanField(label='Employee?', widget=forms.CheckboxInput(), required=False, )
street1 = forms.CharField(label = "Street 1", widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '123 Center St.',}))
street2 = forms.CharField(label = "Street 2", required = False, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '#242',}))
city = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Provo',}))
state = forms.CharField(max_length=2, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'UT',}))
zipCode = forms.IntegerField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': '84601',}))
hireDate = forms.DateField(label='Hire Date', widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': 'Hire Date',}))
salary= forms.DecimalField(widget=forms.NumberInput(attrs={'class':'form-control', 'placeholder': 'Salary',}))
class UserPasswordForm(forms.Form):
password = forms.CharField(label='Current Password', max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
newpassword1 = forms.CharField(label='New Password',max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
newpassword2 = forms.CharField(label='Repeat Password',max_length=25, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password',}))
def clean(self):
if (self.cleaned_data.get('newpassword1') !=
self.cleaned_data.get('newpassword2')):
raise forms.ValidationError("New password does not match.")
return self.cleaned_data | en | 0.553989 | Shows the stores # 'security_question' : u.security_question, # 'security_answer' : u.security_answer, #time to save the data # u.set_password(form.cleaned_data['password']) # u.security_question = form.cleaned_data['security_question'] # u.security_answer = form.cleaned_data['security_answer'] #time to save the data # security_question = forms.CharField(label='Security Question', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'What is your mother\'s maiden name?',})) # security_answer = forms.CharField(label='Answer', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Smith',})) | 2.206974 | 2 |
saleor/rest/serializers/task_result.py | Chaoslecion123/Diver | 0 | 6619852 | from django.apps import apps
from rest_flex_fields import FlexFieldsModelSerializer
__all__ = [
'TaskResultSerializer',
]
TaskResult = apps.get_model(*'django_celery_results.TaskResult'.split())
class TaskResultSerializer(FlexFieldsModelSerializer):
"""Serializer for :model:`django_celery_results.TaskResult`:
`**Fields:**`
01. `content_encoding` : `CharField`
02. `content_type` : `CharField`
03. `date_done` : `DateTimeField`
04. `hidden` : `BooleanField`
05. `id` : `AutoField`
06. `meta` : `TextField`
07. `result` : `TextField`
08. `status` : `CharField`
09. `task_args` : `TextField`
10. `task_id` : `CharField`
11. `task_kwargs` : `TextField`
12. `task_name` : `CharField`
13. `traceback` : `TextField`
`**Reverse Fields:**`
"""
class Meta:
model = TaskResult
fields = [
# Fields
'content_encoding',
'content_type',
'date_done',
'hidden',
'id',
'meta',
'result',
'status',
'task_args',
'task_id',
'task_kwargs',
'task_name',
'traceback',
# Reverse Fields
]
read_only_fields = []
# def create(self, validated_data):
# return super().create(validated_data)
# def update(self, instance, validated_data):
# return super().update(instance, validated_data)
| from django.apps import apps
from rest_flex_fields import FlexFieldsModelSerializer
__all__ = [
'TaskResultSerializer',
]
TaskResult = apps.get_model(*'django_celery_results.TaskResult'.split())
class TaskResultSerializer(FlexFieldsModelSerializer):
"""Serializer for :model:`django_celery_results.TaskResult`:
`**Fields:**`
01. `content_encoding` : `CharField`
02. `content_type` : `CharField`
03. `date_done` : `DateTimeField`
04. `hidden` : `BooleanField`
05. `id` : `AutoField`
06. `meta` : `TextField`
07. `result` : `TextField`
08. `status` : `CharField`
09. `task_args` : `TextField`
10. `task_id` : `CharField`
11. `task_kwargs` : `TextField`
12. `task_name` : `CharField`
13. `traceback` : `TextField`
`**Reverse Fields:**`
"""
class Meta:
model = TaskResult
fields = [
# Fields
'content_encoding',
'content_type',
'date_done',
'hidden',
'id',
'meta',
'result',
'status',
'task_args',
'task_id',
'task_kwargs',
'task_name',
'traceback',
# Reverse Fields
]
read_only_fields = []
# def create(self, validated_data):
# return super().create(validated_data)
# def update(self, instance, validated_data):
# return super().update(instance, validated_data)
| en | 0.079291 | Serializer for :model:`django_celery_results.TaskResult`: `**Fields:**` 01. `content_encoding` : `CharField` 02. `content_type` : `CharField` 03. `date_done` : `DateTimeField` 04. `hidden` : `BooleanField` 05. `id` : `AutoField` 06. `meta` : `TextField` 07. `result` : `TextField` 08. `status` : `CharField` 09. `task_args` : `TextField` 10. `task_id` : `CharField` 11. `task_kwargs` : `TextField` 12. `task_name` : `CharField` 13. `traceback` : `TextField` `**Reverse Fields:**` # Fields # Reverse Fields # def create(self, validated_data): # return super().create(validated_data) # def update(self, instance, validated_data): # return super().update(instance, validated_data) | 2.082102 | 2 |
importa_immagini.py | easybyte-software/konga_scripts | 0 | 6619853 | # -*- title: Utilità / Importazione immagini -*-
# -*- konga-version-min: 1.9.0-beta -*-
# -*- requires: Pillow -*-
# -*- py3k-safe -*-
# -*- coding: utf-8 -*-
import re
import os
import os.path
import uuid
import shutil
import tempfile
import io
import kongalib
import kongautil
import kongaui
from PIL import Image
TIPO_NORMALE = 1
TIPO_WEB = 2
TIPO_MINIATURA = 3
CODE_FIELD_INFO = [
( 'EB_Articoli.Codice', 'codice' ),
( 'EB_Articoli.CodiceAlternativo', 'codice alternativo' ),
( 'EB_Articoli.Barcode', 'barcode' ),
( 'EB_Articoli.CodiceArticoloFornitore', 'codice articolo fornitore' ),
]
FORM_FIELDS = [
{
'name': 'code_azienda',
'label': "Codice azienda di destinazione",
'type': 'company_code',
},
{
'name': 'fieldname',
'label': "Il nome file corrisponde al campo",
'type': 'choice',
'items': [ info[1].capitalize() for info in CODE_FIELD_INFO ],
'default': 0,
},
{
'name': 'path',
'label': "Percorso sorgente",
'type': 'dir',
}
]
def main():
params = kongaui.execute_form(FORM_FIELDS,
"Importazione immagini",
"Questo script importa tutte le immagini presenti in una cartella, qualora ad un nome file corrisponda il codice di un articolo. Le versioni web e miniatura verranno generate automaticamente dall'immagine normale.",
condition = "path and code_azienda")
if not params:
return
fieldname, fieldname_label = CODE_FIELD_INFO[params['fieldname']]
log = kongalib.Log()
client = kongautil.connect()
kongaui.open_progress('Importazione immagini in corso...')
def store(filename, type, data, code, id_art, code_art, code_azienda):
try:
client.store_binary('EB_Articoli', id_art, type, original_filename=filename, data=data, code_azienda=code_azienda)
if data is None:
name = {
TIPO_NORMALE: 'normale',
TIPO_WEB: 'web',
TIPO_MINIATURA: 'miniatura'
}[type]
if fieldname == 'EB_Articoli.Codice':
log.info("Cancellata l'immagine %s dall'articolo con codice %s" % (name, code_art))
else:
log.info("Cancellata l'immagine %s dall'articolo con codice %s e %s %s" % (name, code_art, fieldname_label, code))
else:
if fieldname == 'EB_Articoli.Codice':
log.info("Assegnata l'immagine \"%s\" all'articolo con codice %s" % (filename, code_art))
else:
log.info("Assegnata l'immagine \"%s\" all'articolo con codice %s e %s %s" % (filename, code_art, fieldname_label, code))
except Exception as e:
if data:
if fieldname == 'EB_Articoli.Codice':
log.error("Errore di assegnazione dell'immagine \"%s\" all'articolo con codice %s: %s" % (filename, code_art, str(e)))
else:
log.error("Errore di assegnazione dell'immagine \"%s\" all'articolo con codice %s e %s %s: %s" % (filename, code_art, fieldname_label, code, str(e)))
raise
client.begin_transaction()
try:
web_width, web_height = client.select_data('EB_StatoArchivi', ['EB_StatoArchivi.LarghezzaImgWeb', 'EB_StatoArchivi.AltezzaImgWeb'], kongalib.OperandEQ('EB_StatoArchivi.ref_Azienda.Codice', params['code_azienda']))[0]
files = os.listdir(params['path'])
num_files = len(files)
for index, name in enumerate(files):
filename = os.path.join(params['path'], name)
code, original_ext = os.path.splitext(name)
if kongaui.is_progress_aborted():
break
kongaui.set_progress((index * 100.0) / num_files, None, '%s (%d di %d)' % (name, index+1, num_files))
if code:
results = client.select_data('EB_Articoli', ['EB_Articoli.id', 'EB_Articoli.Codice', 'EB_Articoli.ref_Azienda.Codice'], kongalib.AND(kongalib.OperandEQ(fieldname, code), kongalib.OR(kongalib.OperandEQ('EB_Articoli.ref_Azienda.Codice', params['code_azienda']), kongalib.OperandIsNull('EB_Articoli.ref_Azienda'))))
if len(results) > 1:
codes = [ result[1] for result in results ]
log.warning(u"Il %s %s è associato a più di un articolo! (codici %s) L'immagine non verrà associata a nessun articolo" % (fieldname_label, code, ', '.join(codes)))
continue
if len(results) > 0:
id_art, code_art, code_azienda = results[0]
try:
with open(filename, 'rb') as f:
data = f.read()
bitmap = Image.open(io.BytesIO(data))
except Exception as e:
log.error('Errore di caricamento immagine da file "%s": %s' % (filename, str(e)))
continue
size = bitmap.size
if (size[0] > web_width) or (size[1] > web_height):
web_filename = None
thumb_filename = None
else:
if (size[0] > 48) or (size[1] > 48):
log.warning("L'immagine \"%s\" ha dimensioni inferiori a quelle impostate per le immagini web (%dx%d) pertanto verrà importata come immagine di tipo web (l'articolo non avrà un'immagine di tipo normale)" % (filename, web_width, web_height))
web_filename = filename
thumb_filename = None
else:
log.warning("L'immagine \"%s\" ha dimensioni inferiori alla dimensione delle miniature (48x48) pertanto verrà importata come immagine di tipo miniatura (l'articolo non avrà un'immagine di tipo normale nè una di tipo web)" % filename)
web_filename = None
thumb_filename = filename
if (size[0] > web_width) or (size[1] > web_height):
normal_data = data
temp = bitmap.copy()
temp.thumbnail((web_width, web_height))
buffer = io.BytesIO()
temp.convert('RGBA').save(buffer, 'PNG')
data = buffer.getvalue()
size = temp.size
else:
normal_data = None
if (size[0] > 48) or (size[1] > 48):
web_data = data
bitmap.thumbnail((48, 48))
temp = Image.new('RGBA', (48, 48))
temp.paste(bitmap, ((48 - bitmap.size[0]) // 2, (48 - bitmap.size[1]) // 2))
buffer = io.BytesIO()
temp.save(buffer, 'PNG')
data = buffer.getvalue()
else:
web_data = None
thumb_data = data
store(filename, TIPO_NORMALE, normal_data, code, id_art, code_art, code_azienda)
store(web_filename, TIPO_WEB, web_data, code, id_art, code_art, code_azienda)
store(thumb_filename, TIPO_MINIATURA, thumb_data, code, id_art, code_art, code_azienda)
finally:
if kongaui.is_progress_aborted():
client.rollback_transaction()
kongaui.close_progress()
else:
client.commit_transaction()
kongaui.close_progress()
kongautil.notify_data_changes('EB_Articoli')
kongautil.print_log(log, "Esito importazione immagini")
main()
| # -*- title: Utilità / Importazione immagini -*-
# -*- konga-version-min: 1.9.0-beta -*-
# -*- requires: Pillow -*-
# -*- py3k-safe -*-
# -*- coding: utf-8 -*-
import re
import os
import os.path
import uuid
import shutil
import tempfile
import io
import kongalib
import kongautil
import kongaui
from PIL import Image
TIPO_NORMALE = 1
TIPO_WEB = 2
TIPO_MINIATURA = 3
CODE_FIELD_INFO = [
( 'EB_Articoli.Codice', 'codice' ),
( 'EB_Articoli.CodiceAlternativo', 'codice alternativo' ),
( 'EB_Articoli.Barcode', 'barcode' ),
( 'EB_Articoli.CodiceArticoloFornitore', 'codice articolo fornitore' ),
]
FORM_FIELDS = [
{
'name': 'code_azienda',
'label': "Codice azienda di destinazione",
'type': 'company_code',
},
{
'name': 'fieldname',
'label': "Il nome file corrisponde al campo",
'type': 'choice',
'items': [ info[1].capitalize() for info in CODE_FIELD_INFO ],
'default': 0,
},
{
'name': 'path',
'label': "Percorso sorgente",
'type': 'dir',
}
]
def main():
params = kongaui.execute_form(FORM_FIELDS,
"Importazione immagini",
"Questo script importa tutte le immagini presenti in una cartella, qualora ad un nome file corrisponda il codice di un articolo. Le versioni web e miniatura verranno generate automaticamente dall'immagine normale.",
condition = "path and code_azienda")
if not params:
return
fieldname, fieldname_label = CODE_FIELD_INFO[params['fieldname']]
log = kongalib.Log()
client = kongautil.connect()
kongaui.open_progress('Importazione immagini in corso...')
def store(filename, type, data, code, id_art, code_art, code_azienda):
try:
client.store_binary('EB_Articoli', id_art, type, original_filename=filename, data=data, code_azienda=code_azienda)
if data is None:
name = {
TIPO_NORMALE: 'normale',
TIPO_WEB: 'web',
TIPO_MINIATURA: 'miniatura'
}[type]
if fieldname == 'EB_Articoli.Codice':
log.info("Cancellata l'immagine %s dall'articolo con codice %s" % (name, code_art))
else:
log.info("Cancellata l'immagine %s dall'articolo con codice %s e %s %s" % (name, code_art, fieldname_label, code))
else:
if fieldname == 'EB_Articoli.Codice':
log.info("Assegnata l'immagine \"%s\" all'articolo con codice %s" % (filename, code_art))
else:
log.info("Assegnata l'immagine \"%s\" all'articolo con codice %s e %s %s" % (filename, code_art, fieldname_label, code))
except Exception as e:
if data:
if fieldname == 'EB_Articoli.Codice':
log.error("Errore di assegnazione dell'immagine \"%s\" all'articolo con codice %s: %s" % (filename, code_art, str(e)))
else:
log.error("Errore di assegnazione dell'immagine \"%s\" all'articolo con codice %s e %s %s: %s" % (filename, code_art, fieldname_label, code, str(e)))
raise
client.begin_transaction()
try:
web_width, web_height = client.select_data('EB_StatoArchivi', ['EB_StatoArchivi.LarghezzaImgWeb', 'EB_StatoArchivi.AltezzaImgWeb'], kongalib.OperandEQ('EB_StatoArchivi.ref_Azienda.Codice', params['code_azienda']))[0]
files = os.listdir(params['path'])
num_files = len(files)
for index, name in enumerate(files):
filename = os.path.join(params['path'], name)
code, original_ext = os.path.splitext(name)
if kongaui.is_progress_aborted():
break
kongaui.set_progress((index * 100.0) / num_files, None, '%s (%d di %d)' % (name, index+1, num_files))
if code:
results = client.select_data('EB_Articoli', ['EB_Articoli.id', 'EB_Articoli.Codice', 'EB_Articoli.ref_Azienda.Codice'], kongalib.AND(kongalib.OperandEQ(fieldname, code), kongalib.OR(kongalib.OperandEQ('EB_Articoli.ref_Azienda.Codice', params['code_azienda']), kongalib.OperandIsNull('EB_Articoli.ref_Azienda'))))
if len(results) > 1:
codes = [ result[1] for result in results ]
log.warning(u"Il %s %s è associato a più di un articolo! (codici %s) L'immagine non verrà associata a nessun articolo" % (fieldname_label, code, ', '.join(codes)))
continue
if len(results) > 0:
id_art, code_art, code_azienda = results[0]
try:
with open(filename, 'rb') as f:
data = f.read()
bitmap = Image.open(io.BytesIO(data))
except Exception as e:
log.error('Errore di caricamento immagine da file "%s": %s' % (filename, str(e)))
continue
size = bitmap.size
if (size[0] > web_width) or (size[1] > web_height):
web_filename = None
thumb_filename = None
else:
if (size[0] > 48) or (size[1] > 48):
log.warning("L'immagine \"%s\" ha dimensioni inferiori a quelle impostate per le immagini web (%dx%d) pertanto verrà importata come immagine di tipo web (l'articolo non avrà un'immagine di tipo normale)" % (filename, web_width, web_height))
web_filename = filename
thumb_filename = None
else:
log.warning("L'immagine \"%s\" ha dimensioni inferiori alla dimensione delle miniature (48x48) pertanto verrà importata come immagine di tipo miniatura (l'articolo non avrà un'immagine di tipo normale nè una di tipo web)" % filename)
web_filename = None
thumb_filename = filename
if (size[0] > web_width) or (size[1] > web_height):
normal_data = data
temp = bitmap.copy()
temp.thumbnail((web_width, web_height))
buffer = io.BytesIO()
temp.convert('RGBA').save(buffer, 'PNG')
data = buffer.getvalue()
size = temp.size
else:
normal_data = None
if (size[0] > 48) or (size[1] > 48):
web_data = data
bitmap.thumbnail((48, 48))
temp = Image.new('RGBA', (48, 48))
temp.paste(bitmap, ((48 - bitmap.size[0]) // 2, (48 - bitmap.size[1]) // 2))
buffer = io.BytesIO()
temp.save(buffer, 'PNG')
data = buffer.getvalue()
else:
web_data = None
thumb_data = data
store(filename, TIPO_NORMALE, normal_data, code, id_art, code_art, code_azienda)
store(web_filename, TIPO_WEB, web_data, code, id_art, code_art, code_azienda)
store(thumb_filename, TIPO_MINIATURA, thumb_data, code, id_art, code_art, code_azienda)
finally:
if kongaui.is_progress_aborted():
client.rollback_transaction()
kongaui.close_progress()
else:
client.commit_transaction()
kongaui.close_progress()
kongautil.notify_data_changes('EB_Articoli')
kongautil.print_log(log, "Esito importazione immagini")
main()
| it | 0.524489 | # -*- title: Utilità / Importazione immagini -*- # -*- konga-version-min: 1.9.0-beta -*- # -*- requires: Pillow -*- # -*- py3k-safe -*- # -*- coding: utf-8 -*- | 2.050269 | 2 |
RegExemail.py | PradeepDongre/PythonCode | 0 | 6619854 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 00:22:40 2018
@author: dongrp2
"""
import re
emails = '''
<EMAIL>
<EMAIL>
<EMAIL>
'''
pattern = re.compile(r'[a-zA-Z]+@[a-zA-Z]+\.com') #1st
pattern = re.compile(r'[a-zA-Z.]+@[a-zA-Z]+\.(com|edu)') #2nd
pattern = re.compile(r'[a-zA-Z0-9.-]+@[a-zA-Z-]+\.(com|edu|net)') #2nd
pattern = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
matches = pattern.finditer(emails)
for match in matches:
print(match) | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 00:22:40 2018
@author: dongrp2
"""
import re
emails = '''
<EMAIL>
<EMAIL>
<EMAIL>
'''
pattern = re.compile(r'[a-zA-Z]+@[a-zA-Z]+\.com') #1st
pattern = re.compile(r'[a-zA-Z.]+@[a-zA-Z]+\.(com|edu)') #2nd
pattern = re.compile(r'[a-zA-Z0-9.-]+@[a-zA-Z-]+\.(com|edu|net)') #2nd
pattern = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
matches = pattern.finditer(emails)
for match in matches:
print(match) | en | 0.441505 | # -*- coding: utf-8 -*- Created on Mon Apr 23 00:22:40 2018
@author: dongrp2 <EMAIL>
<EMAIL>
<EMAIL> #1st #2nd #2nd | 3.342429 | 3 |
test_htmlgen/attribute.py | stungkit/python-htmlgen | 14 | 6619855 | import datetime
from enum import Enum
from unittest import TestCase
from asserts import assert_true, assert_false, assert_is_none, assert_equal, \
assert_raises
from htmlgen.attribute import (
html_attribute,
boolean_html_attribute,
int_html_attribute,
float_html_attribute,
time_html_attribute,
list_html_attribute,
data_attribute,
css_class_attribute,
enum_attribute,
)
from htmlgen.element import Element
class HTMLAttributeTest(TestCase):
def test_regular(self):
class MyElement(Element):
attr = html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = "Foo"
assert_equal("Foo", element.attr)
assert_equal('<div data-attr="Foo"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_regular_with_default(self):
class MyElement(Element):
attr = html_attribute("data-attr", default="Bar")
element = MyElement("div")
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
element.attr = "Foo"
assert_equal("Foo", element.attr)
assert_equal('<div data-attr="Foo"></div>', str(element))
element.attr = "Bar"
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
def test_boolean(self):
class MyElement(Element):
attr = boolean_html_attribute("data-attr")
element = MyElement("div")
assert_false(element.attr)
assert_equal("<div></div>", str(element))
element.attr = True
assert_true(element.attr)
assert_equal('<div data-attr="data-attr"></div>', str(element))
element.attr = False
assert_false(element.attr)
assert_equal("<div></div>", str(element))
def test_integer(self):
class MyElement(Element):
attr = int_html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = 42
assert_equal(42, element.attr)
assert_equal('<div data-attr="42"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_integer_with_default(self):
class MyElement(Element):
attr = int_html_attribute("data-attr", default=42)
element = MyElement("div")
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
element.attr = 4711
assert_equal(4711, element.attr)
assert_equal('<div data-attr="4711"></div>', str(element))
element.attr = 42
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
def test_float(self):
class MyElement(Element):
attr = float_html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = 4.2
assert_equal(4.2, element.attr)
assert_equal('<div data-attr="4.2"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_float_with_default(self):
class MyElement(Element):
attr = float_html_attribute("data-attr", default=4.2)
element = MyElement("div")
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
element.attr = 47.11
assert_equal(47.11, element.attr)
assert_equal('<div data-attr="47.11"></div>', str(element))
element.attr = 4.2
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
def test_time(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = datetime.time(14, 13, 9)
assert_equal(datetime.time(14, 13, 9), element.attr)
assert_equal('<div data-time="14:13:09"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.set_attribute("data-time", "09:33:04")
assert_equal(datetime.time(9, 33, 4), element.attr)
def test_time_with_fraction(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
element.attr = datetime.time(14, 13, 9, 123456)
assert_equal(datetime.time(14, 13, 9, 123456), element.attr)
assert_equal('<div data-time="14:13:09.123456"></div>', str(element))
def test_time__invalid_value(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
element.set_attribute("data-time", "INVALID")
assert_is_none(element.attr)
def test_time_with_default(self):
class MyElement(Element):
attr = time_html_attribute(
"data-attr", default=datetime.time(12, 9, 34)
)
element = MyElement("div")
assert_equal(datetime.time(12, 9, 34), element.attr)
assert_equal("<div></div>", str(element))
element.attr = datetime.time(12, 9, 34)
assert_equal(datetime.time(12, 9, 34), element.attr)
assert_equal("<div></div>", str(element))
def test_list(self):
class MyElement(Element):
attr = list_html_attribute("data-attr")
element = MyElement("div")
assert_equal([], element.attr)
element.set_attribute("data-attr", "")
assert_equal([], element.attr)
element.set_attribute("data-attr", "foo,bar")
assert_equal(["foo", "bar"], element.attr)
element.attr = []
assert_equal("<div></div>", str(element))
element.attr = ["abc", "def"]
assert_equal(["abc", "def"], element.attr)
element.attr.append("ghi")
assert_equal(["abc", "def"], element.attr)
assert_equal("abc,def", element.get_attribute("data-attr"))
assert_equal('<div data-attr="abc,def"></div>', str(element))
def test_data(self):
class MyElement(Element):
attr = data_attribute("attr")
element = MyElement("div")
assert_is_none(element.get_attribute("data-attr"))
element.attr = "foo"
assert_equal("foo", element.get_attribute("data-attr"))
element.set_attribute("data-attr", "bar")
assert_equal("bar", element.attr)
def test_data_with_default(self):
class MyElement(Element):
attr = data_attribute("attr", "def")
element = MyElement("div")
element.attr = "def"
assert_is_none(element.get_attribute("data-attr"))
def test_css_class(self):
class MyElement(Element):
attr = css_class_attribute("my-class")
element = MyElement("div")
assert_false(element.attr)
element.add_css_classes("other-class")
assert_false(element.attr)
element.add_css_classes("my-class")
assert_true(element.attr)
element.attr = False
assert_false(element.has_css_class("my-class"))
element.attr = False
assert_false(element.has_css_class("my-class"))
element.attr = True
assert_true(element.has_css_class("my-class"))
element.attr = True
assert_true(element.has_css_class("my-class"))
class TestEnum(Enum):
FOO = "foo"
BAR = "bar"
class EnumAttributeTest(TestCase):
def test_enum(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum)
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = TestEnum.BAR
assert_equal(TestEnum.BAR, element.attr)
assert_equal('<div attr="bar"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal('<div></div>', str(element))
def test_default(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum, default=TestEnum.FOO)
element = MyElement("div")
assert_equal(TestEnum.FOO, element.attr)
assert_equal("<div></div>", str(element))
element.attr = TestEnum.BAR
assert_equal(TestEnum.BAR, element.attr)
assert_equal('<div attr="bar"></div>', str(element))
element.attr = None
assert_equal(TestEnum.FOO, element.attr)
assert_equal("<div></div>", str(element))
def test_not_an_enum(self):
with assert_raises(TypeError):
class MyElement(Element):
attr = enum_attribute("attr", "foo") # type: ignore
def test_invalid_value(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum)
element = MyElement("div")
with assert_raises(TypeError):
element.attr = "foo" # type: ignore
| import datetime
from enum import Enum
from unittest import TestCase
from asserts import assert_true, assert_false, assert_is_none, assert_equal, \
assert_raises
from htmlgen.attribute import (
html_attribute,
boolean_html_attribute,
int_html_attribute,
float_html_attribute,
time_html_attribute,
list_html_attribute,
data_attribute,
css_class_attribute,
enum_attribute,
)
from htmlgen.element import Element
class HTMLAttributeTest(TestCase):
def test_regular(self):
class MyElement(Element):
attr = html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = "Foo"
assert_equal("Foo", element.attr)
assert_equal('<div data-attr="Foo"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_regular_with_default(self):
class MyElement(Element):
attr = html_attribute("data-attr", default="Bar")
element = MyElement("div")
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
element.attr = "Foo"
assert_equal("Foo", element.attr)
assert_equal('<div data-attr="Foo"></div>', str(element))
element.attr = "Bar"
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
def test_boolean(self):
class MyElement(Element):
attr = boolean_html_attribute("data-attr")
element = MyElement("div")
assert_false(element.attr)
assert_equal("<div></div>", str(element))
element.attr = True
assert_true(element.attr)
assert_equal('<div data-attr="data-attr"></div>', str(element))
element.attr = False
assert_false(element.attr)
assert_equal("<div></div>", str(element))
def test_integer(self):
class MyElement(Element):
attr = int_html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = 42
assert_equal(42, element.attr)
assert_equal('<div data-attr="42"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_integer_with_default(self):
class MyElement(Element):
attr = int_html_attribute("data-attr", default=42)
element = MyElement("div")
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
element.attr = 4711
assert_equal(4711, element.attr)
assert_equal('<div data-attr="4711"></div>', str(element))
element.attr = 42
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
def test_float(self):
class MyElement(Element):
attr = float_html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = 4.2
assert_equal(4.2, element.attr)
assert_equal('<div data-attr="4.2"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_float_with_default(self):
class MyElement(Element):
attr = float_html_attribute("data-attr", default=4.2)
element = MyElement("div")
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
element.attr = 47.11
assert_equal(47.11, element.attr)
assert_equal('<div data-attr="47.11"></div>', str(element))
element.attr = 4.2
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
def test_time(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = datetime.time(14, 13, 9)
assert_equal(datetime.time(14, 13, 9), element.attr)
assert_equal('<div data-time="14:13:09"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.set_attribute("data-time", "09:33:04")
assert_equal(datetime.time(9, 33, 4), element.attr)
def test_time_with_fraction(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
element.attr = datetime.time(14, 13, 9, 123456)
assert_equal(datetime.time(14, 13, 9, 123456), element.attr)
assert_equal('<div data-time="14:13:09.123456"></div>', str(element))
def test_time__invalid_value(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
element.set_attribute("data-time", "INVALID")
assert_is_none(element.attr)
def test_time_with_default(self):
class MyElement(Element):
attr = time_html_attribute(
"data-attr", default=datetime.time(12, 9, 34)
)
element = MyElement("div")
assert_equal(datetime.time(12, 9, 34), element.attr)
assert_equal("<div></div>", str(element))
element.attr = datetime.time(12, 9, 34)
assert_equal(datetime.time(12, 9, 34), element.attr)
assert_equal("<div></div>", str(element))
def test_list(self):
class MyElement(Element):
attr = list_html_attribute("data-attr")
element = MyElement("div")
assert_equal([], element.attr)
element.set_attribute("data-attr", "")
assert_equal([], element.attr)
element.set_attribute("data-attr", "foo,bar")
assert_equal(["foo", "bar"], element.attr)
element.attr = []
assert_equal("<div></div>", str(element))
element.attr = ["abc", "def"]
assert_equal(["abc", "def"], element.attr)
element.attr.append("ghi")
assert_equal(["abc", "def"], element.attr)
assert_equal("abc,def", element.get_attribute("data-attr"))
assert_equal('<div data-attr="abc,def"></div>', str(element))
def test_data(self):
class MyElement(Element):
attr = data_attribute("attr")
element = MyElement("div")
assert_is_none(element.get_attribute("data-attr"))
element.attr = "foo"
assert_equal("foo", element.get_attribute("data-attr"))
element.set_attribute("data-attr", "bar")
assert_equal("bar", element.attr)
def test_data_with_default(self):
class MyElement(Element):
attr = data_attribute("attr", "def")
element = MyElement("div")
element.attr = "def"
assert_is_none(element.get_attribute("data-attr"))
def test_css_class(self):
class MyElement(Element):
attr = css_class_attribute("my-class")
element = MyElement("div")
assert_false(element.attr)
element.add_css_classes("other-class")
assert_false(element.attr)
element.add_css_classes("my-class")
assert_true(element.attr)
element.attr = False
assert_false(element.has_css_class("my-class"))
element.attr = False
assert_false(element.has_css_class("my-class"))
element.attr = True
assert_true(element.has_css_class("my-class"))
element.attr = True
assert_true(element.has_css_class("my-class"))
class TestEnum(Enum):
FOO = "foo"
BAR = "bar"
class EnumAttributeTest(TestCase):
def test_enum(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum)
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = TestEnum.BAR
assert_equal(TestEnum.BAR, element.attr)
assert_equal('<div attr="bar"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal('<div></div>', str(element))
def test_default(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum, default=TestEnum.FOO)
element = MyElement("div")
assert_equal(TestEnum.FOO, element.attr)
assert_equal("<div></div>", str(element))
element.attr = TestEnum.BAR
assert_equal(TestEnum.BAR, element.attr)
assert_equal('<div attr="bar"></div>', str(element))
element.attr = None
assert_equal(TestEnum.FOO, element.attr)
assert_equal("<div></div>", str(element))
def test_not_an_enum(self):
with assert_raises(TypeError):
class MyElement(Element):
attr = enum_attribute("attr", "foo") # type: ignore
def test_invalid_value(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum)
element = MyElement("div")
with assert_raises(TypeError):
element.attr = "foo" # type: ignore
| it | 0.193911 | # type: ignore # type: ignore | 2.898955 | 3 |
segmentation_rt/mask2rs/__init__.py | BrouBoni/segmentation_RT | 6 | 6619856 | from .mask import Mask
from .rtstruct import RTStruct
__all__ = [
'Mask',
'RTStruct',
]
| from .mask import Mask
from .rtstruct import RTStruct
__all__ = [
'Mask',
'RTStruct',
]
| none | 1 | 1.17268 | 1 | |
loop/prime number from 1 to n.py | PraghadeshManivannan/Python | 0 | 6619857 | n = int(input("Enter the range:"))
for i in range(1,n+1):
m = (i//2)+1
c = 0
for b in range(2,m):
if i%b == 0:
c = 1
if c == 0:
print("The number",i,"is prime")
else:
print("The number",i,"is not prime")
| n = int(input("Enter the range:"))
for i in range(1,n+1):
m = (i//2)+1
c = 0
for b in range(2,m):
if i%b == 0:
c = 1
if c == 0:
print("The number",i,"is prime")
else:
print("The number",i,"is not prime")
| none | 1 | 3.980395 | 4 | |
brl_gym/envs/mujoco/hopper.py | gilwoolee/brl_gym | 2 | 6619858 | import numpy as np
import os
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import autoargs
from rllab.misc import logger
from rllab.misc.overrides import overrides
from mujoco_py import MjViewer
from gym import utils
# states: [
# 0: z-coord,
# 1: x-coord (forward distance),
# 2: forward pitch along y-axis,
# 6: z-vel (up = +),
# 7: xvel (forward = +)
class HopperEnv(MujocoEnv, utils.EzPickle):
@autoargs.arg('alive_coeff', type=float,
help='reward coefficient for being alive')
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
alive_coeff=1,
ctrl_cost_coeff=0.01,
*args, **kwargs):
self.alive_coeff = alive_coeff
self.ctrl_cost_coeff = ctrl_cost_coeff
xml_string = kwargs.get("xml_string", "")
curr_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(curr_dir, 'assets/hopper.xml')
MujocoEnv.__init__(self, model_path, frame_skip=5, xml_string=xml_string)
utils.EzPickle.__init__(self)
def _get_obs(self):
return np.concatenate([
self.data.qpos[0:1].flat,
self.data.qpos[2:].flat,
np.clip(self.data.qvel, -10, 10).flat,
np.clip(self.data.qfrc_constraint, -10, 10).flat,
self.data.body_xpos[0].flat,
])
def _step(self, action):
self.do_simulation(action, self.frame_skip)
next_obs = self._get_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
vel = self.data.qvel[0]
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(np.square(action / scaling))
reward = vel + self.alive_coeff - ctrl_cost
state = self.state_vector()
notdone = np.isfinite(state).all() and \
(np.abs(state[3:]) < 100).all() and (state[0] > .7) and \
(abs(state[2]) < .2)
done = not notdone
return next_obs, reward, done, dict(forward_reward=reward, ctrl_cost=ctrl_cost)
def reset_model(self, randomize=True):
nq = self.init_qpos.shape[0]
nv = self.init_qvel.shape[0]
if randomize:
qpos = self.init_qpos + self.np_random.uniform(size=nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(nv) * .1
else:
qpos = self.init_qpos
qvel = self.init_qvel
self.set_state(qpos, qvel)
return self._get_obs()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.trackbodyid = 1
self.viewer.cam.type = 1
self.sim.forward()
self.viewer.cam.distance = self.model.stat.extent*1.2
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| import numpy as np
import os
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab.misc import autoargs
from rllab.misc import logger
from rllab.misc.overrides import overrides
from mujoco_py import MjViewer
from gym import utils
# states: [
# 0: z-coord,
# 1: x-coord (forward distance),
# 2: forward pitch along y-axis,
# 6: z-vel (up = +),
# 7: xvel (forward = +)
class HopperEnv(MujocoEnv, utils.EzPickle):
@autoargs.arg('alive_coeff', type=float,
help='reward coefficient for being alive')
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
alive_coeff=1,
ctrl_cost_coeff=0.01,
*args, **kwargs):
self.alive_coeff = alive_coeff
self.ctrl_cost_coeff = ctrl_cost_coeff
xml_string = kwargs.get("xml_string", "")
curr_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(curr_dir, 'assets/hopper.xml')
MujocoEnv.__init__(self, model_path, frame_skip=5, xml_string=xml_string)
utils.EzPickle.__init__(self)
def _get_obs(self):
return np.concatenate([
self.data.qpos[0:1].flat,
self.data.qpos[2:].flat,
np.clip(self.data.qvel, -10, 10).flat,
np.clip(self.data.qfrc_constraint, -10, 10).flat,
self.data.body_xpos[0].flat,
])
def _step(self, action):
self.do_simulation(action, self.frame_skip)
next_obs = self._get_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
vel = self.data.qvel[0]
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(np.square(action / scaling))
reward = vel + self.alive_coeff - ctrl_cost
state = self.state_vector()
notdone = np.isfinite(state).all() and \
(np.abs(state[3:]) < 100).all() and (state[0] > .7) and \
(abs(state[2]) < .2)
done = not notdone
return next_obs, reward, done, dict(forward_reward=reward, ctrl_cost=ctrl_cost)
def reset_model(self, randomize=True):
nq = self.init_qpos.shape[0]
nv = self.init_qvel.shape[0]
if randomize:
qpos = self.init_qpos + self.np_random.uniform(size=nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(nv) * .1
else:
qpos = self.init_qpos
qvel = self.init_qvel
self.set_state(qpos, qvel)
return self._get_obs()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.trackbodyid = 1
self.viewer.cam.type = 1
self.sim.forward()
self.viewer.cam.distance = self.model.stat.extent*1.2
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| en | 0.858661 | # states: [ # 0: z-coord, # 1: x-coord (forward distance), # 2: forward pitch along y-axis, # 6: z-vel (up = +), # 7: xvel (forward = +) | 1.803697 | 2 |
safedelete/collector.py | AktisIntelligence/django-safedelete | 4 | 6619859 | from django.db import router
from django.db.models.deletion import Collector
def get_collector(objs):
"""
Create a collector for the given objects.
The collector contains all the objects related to the objects given as input that would need to be modified if we
deleted the input objects:
- if they need to be deleted by cascade they are in `collector.data` (ordered dictionary) and `collector.nested()`
(as a nested list)
- if they need to be updated (case of on_delete=models.SET_NULL for example) they are in `collector.field_updates`
When calling `collector.delete()`, it goes through both types and delete/update the related objects.
Note that by doing that it does not call the model delete/save methods.
Note that `collector.data` also contains the object itself.
"""
# Assume we have at least one object (which is fine since we control where this method is called)
collector = Collector(using=router.db_for_write(objs[0]))
collector.collect(objs)
collector.sort()
return collector
| from django.db import router
from django.db.models.deletion import Collector
def get_collector(objs):
"""
Create a collector for the given objects.
The collector contains all the objects related to the objects given as input that would need to be modified if we
deleted the input objects:
- if they need to be deleted by cascade they are in `collector.data` (ordered dictionary) and `collector.nested()`
(as a nested list)
- if they need to be updated (case of on_delete=models.SET_NULL for example) they are in `collector.field_updates`
When calling `collector.delete()`, it goes through both types and delete/update the related objects.
Note that by doing that it does not call the model delete/save methods.
Note that `collector.data` also contains the object itself.
"""
# Assume we have at least one object (which is fine since we control where this method is called)
collector = Collector(using=router.db_for_write(objs[0]))
collector.collect(objs)
collector.sort()
return collector
| en | 0.949703 | Create a collector for the given objects. The collector contains all the objects related to the objects given as input that would need to be modified if we deleted the input objects: - if they need to be deleted by cascade they are in `collector.data` (ordered dictionary) and `collector.nested()` (as a nested list) - if they need to be updated (case of on_delete=models.SET_NULL for example) they are in `collector.field_updates` When calling `collector.delete()`, it goes through both types and delete/update the related objects. Note that by doing that it does not call the model delete/save methods. Note that `collector.data` also contains the object itself. # Assume we have at least one object (which is fine since we control where this method is called) | 2.931283 | 3 |
csrv/model/cards/corp/card01065.py | mrroach/CentralServer | 0 | 6619860 | <reponame>mrroach/CentralServer
from csrv.model import actions
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import modifiers
from csrv.model import parameters
from csrv.model import timing_phases
from csrv.model.cards import card_info
from csrv.model.cards import upgrade
class BoostIceStrengthAction(actions.Action):
REQUEST_CLASS = parameters.VariableCreditCostRequest
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
if not response or response.credits < 0:
raise errors.InvalidResponse('You must choose an amount to spend')
self.cost = cost.SimpleCost(self.game, self.player,
credits=response.credits)
actions.Action.resolve(
self, response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
modifiers.IceStrengthModifier(
self.game, response.credits,
card=self.card, until=events.CorpDiscardPhase)
class ChooseIceToBoost(timing_phases.BasePhase):
"""Choose a piece of rezzed ice protecting this server."""
NULL_OK = False
def __init__(self, game, player, server):
timing_phases.BasePhase.__init__(self, game, player)
self.server = server
def choices(self, refresh=False):
if self._choices is None or refresh:
self._choices = []
for ice in self.server.ice.cards:
if ice.is_rezzed:
self._choices.append(
BoostIceStrengthAction(self.game, self.player, ice))
return self._choices
def resolve(self, choice, response):
timing_phases.BasePhase.resolve(self, choice, response)
if choice:
self.end_phase()
class Card01065Action(actions.Action):
DESCRIPTION = 'Trash card01065 and choose ice to boost'
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
actions.Action.resolve(
self, response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.game.insert_next_phase(
ChooseIceToBoost(self.game, self.player,
server=self.card.location.parent))
self.card.trash()
class Card01065(upgrade.Upgrade):
NAME = u'Card01065'
SET = card_info.CORE
NUMBER = 65
SIDE = card_info.CORP
FACTION = card_info.ROBOCORP
INFLUENCE = 1
UNIQUE = False
KEYWORDS = set([
card_info.CONNECTION,
])
COST = 0
IMAGE_SRC = '01065.png'
TRASH_COST = 2
WHEN_REZZED_PROVIDES_CHOICES_FOR = {
timing_phases.CorpUseAbilities: 'card01065_actions',
}
def build_actions(self):
upgrade.Upgrade.build_actions(self)
self._card01065_action = Card01065Action(
self.game, self.player, card=self)
def card01065_actions(self):
return [self._card01065_action]
| from csrv.model import actions
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import modifiers
from csrv.model import parameters
from csrv.model import timing_phases
from csrv.model.cards import card_info
from csrv.model.cards import upgrade
class BoostIceStrengthAction(actions.Action):
REQUEST_CLASS = parameters.VariableCreditCostRequest
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
if not response or response.credits < 0:
raise errors.InvalidResponse('You must choose an amount to spend')
self.cost = cost.SimpleCost(self.game, self.player,
credits=response.credits)
actions.Action.resolve(
self, response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
modifiers.IceStrengthModifier(
self.game, response.credits,
card=self.card, until=events.CorpDiscardPhase)
class ChooseIceToBoost(timing_phases.BasePhase):
"""Choose a piece of rezzed ice protecting this server."""
NULL_OK = False
def __init__(self, game, player, server):
timing_phases.BasePhase.__init__(self, game, player)
self.server = server
def choices(self, refresh=False):
if self._choices is None or refresh:
self._choices = []
for ice in self.server.ice.cards:
if ice.is_rezzed:
self._choices.append(
BoostIceStrengthAction(self.game, self.player, ice))
return self._choices
def resolve(self, choice, response):
timing_phases.BasePhase.resolve(self, choice, response)
if choice:
self.end_phase()
class Card01065Action(actions.Action):
DESCRIPTION = 'Trash card01065 and choose ice to boost'
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
actions.Action.resolve(
self, response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.game.insert_next_phase(
ChooseIceToBoost(self.game, self.player,
server=self.card.location.parent))
self.card.trash()
class Card01065(upgrade.Upgrade):
NAME = u'Card01065'
SET = card_info.CORE
NUMBER = 65
SIDE = card_info.CORP
FACTION = card_info.ROBOCORP
INFLUENCE = 1
UNIQUE = False
KEYWORDS = set([
card_info.CONNECTION,
])
COST = 0
IMAGE_SRC = '01065.png'
TRASH_COST = 2
WHEN_REZZED_PROVIDES_CHOICES_FOR = {
timing_phases.CorpUseAbilities: 'card01065_actions',
}
def build_actions(self):
upgrade.Upgrade.build_actions(self)
self._card01065_action = Card01065Action(
self.game, self.player, card=self)
def card01065_actions(self):
return [self._card01065_action] | en | 0.77313 | Choose a piece of rezzed ice protecting this server. | 2.340252 | 2 |
src/slack_request.py | suwa-sh/lambda-slack-notification | 0 | 6619861 | # -*- coding: utf-8 -*-
import json
import logging
logger = logging.getLogger()
class SlackRequest(object):
def __init__(self, event):
logger.debug("{} - event:{}".format(__name__, json.dumps(event)))
# validation
if 'text' not in event:
raise ValueError("event.text is required. attachment: {}".format(event))
self.text = event['text']
self.attachment = None
if 'attachment' in event:
self.attachment = Attachment(event['attachment'])
class Attachment(object):
def __init__(self, attachment):
logger.debug("{} - attachment:{}".format(__name__, json.dumps(attachment)))
# validation
if 'status' not in attachment:
raise ValueError("event.attachment.status is required. attachment: {}".format(attachment))
if 'fields' not in attachment:
raise ValueError("event.attachment.fields is required. attachment: {}".format(attachment))
self.color = self._parse_status(attachment['status'])
self.fields = self._parse_fields(attachment['fields'])
self.actions = None
if 'links' in attachment:
self.actions = self._parse_links(attachment['links'])
@staticmethod
def _parse_status(status):
if status == "success":
return "good"
if status == "warning":
return "warning"
if status == "error":
return "danger"
raise ValueError("status: {} is not supported.".format(status))
def _parse_fields(self, fields):
parsed_fields = []
for field in fields:
parsed_fields.append(self._parse_field(field))
return parsed_fields
def _parse_links(self, links):
actions = []
for link in links:
actions.append(self._parse_link(link))
return actions
def _parse_field(self, field):
# validation
if 'title' not in field:
raise ValueError("event.attachment.fields[].title is required. attachment: {}".format(field))
if 'value' not in field:
raise ValueError("event.attachment.fields[].value is required. attachment: {}".format(field))
# default value
is_short = True
if 'is_short' in field:
is_short = field['is_short']
# parse
return {"title": field['title'], "value": field['value'], "short": is_short}
def _parse_link(self, link):
# validation
if 'text' not in link:
raise ValueError("event.attachment.links[].text is required. attachment: {}".format(link))
if 'url' not in link:
raise ValueError("event.attachment.links[].url is required. attachment: {}".format(link))
# parse
return {"type": "button", "text": link['text'], "url": link['url']}
| # -*- coding: utf-8 -*-
import json
import logging
logger = logging.getLogger()
class SlackRequest(object):
def __init__(self, event):
logger.debug("{} - event:{}".format(__name__, json.dumps(event)))
# validation
if 'text' not in event:
raise ValueError("event.text is required. attachment: {}".format(event))
self.text = event['text']
self.attachment = None
if 'attachment' in event:
self.attachment = Attachment(event['attachment'])
class Attachment(object):
def __init__(self, attachment):
logger.debug("{} - attachment:{}".format(__name__, json.dumps(attachment)))
# validation
if 'status' not in attachment:
raise ValueError("event.attachment.status is required. attachment: {}".format(attachment))
if 'fields' not in attachment:
raise ValueError("event.attachment.fields is required. attachment: {}".format(attachment))
self.color = self._parse_status(attachment['status'])
self.fields = self._parse_fields(attachment['fields'])
self.actions = None
if 'links' in attachment:
self.actions = self._parse_links(attachment['links'])
@staticmethod
def _parse_status(status):
if status == "success":
return "good"
if status == "warning":
return "warning"
if status == "error":
return "danger"
raise ValueError("status: {} is not supported.".format(status))
def _parse_fields(self, fields):
parsed_fields = []
for field in fields:
parsed_fields.append(self._parse_field(field))
return parsed_fields
def _parse_links(self, links):
actions = []
for link in links:
actions.append(self._parse_link(link))
return actions
def _parse_field(self, field):
# validation
if 'title' not in field:
raise ValueError("event.attachment.fields[].title is required. attachment: {}".format(field))
if 'value' not in field:
raise ValueError("event.attachment.fields[].value is required. attachment: {}".format(field))
# default value
is_short = True
if 'is_short' in field:
is_short = field['is_short']
# parse
return {"title": field['title'], "value": field['value'], "short": is_short}
def _parse_link(self, link):
# validation
if 'text' not in link:
raise ValueError("event.attachment.links[].text is required. attachment: {}".format(link))
if 'url' not in link:
raise ValueError("event.attachment.links[].url is required. attachment: {}".format(link))
# parse
return {"type": "button", "text": link['text'], "url": link['url']}
| en | 0.283293 | # -*- coding: utf-8 -*- # validation # validation # validation # default value # parse # validation # parse | 2.680546 | 3 |
account.py | Kingsly62/Password-Locker | 0 | 6619862 | <gh_stars>0
class Accounts:
def __init__(self, firstname, lastname, email, username):
self.firstname = firstname
self.lastname = lastname
self.email = email
self.username = username
user_accounts = []
def save_user(self):
Accounts.user_accounts.append(self)
'''
save_user saves new user
'''
def delete_user(self):
'''
used to delete new user account
'''
Accounts.user_accounts.remove(self)
@classmethod
def find_by_username(cls, username):
'''
used to check username provided whether its true and returns the accounts if its true
'''
for account in cls.user_accounts:
if account.username == username:
return account
@classmethod
def account_exists(cls, username):
'''
loops through the function to check whether the account provided is true
'''
for account in cls.user_accounts:
if account.username == username:
return True
return False
@classmethod
def display_accounts(cls):
return cls.user_accounts
'''
displays the account of the user
'''
| class Accounts:
def __init__(self, firstname, lastname, email, username):
self.firstname = firstname
self.lastname = lastname
self.email = email
self.username = username
user_accounts = []
def save_user(self):
Accounts.user_accounts.append(self)
'''
save_user saves new user
'''
def delete_user(self):
'''
used to delete new user account
'''
Accounts.user_accounts.remove(self)
@classmethod
def find_by_username(cls, username):
'''
used to check username provided whether its true and returns the accounts if its true
'''
for account in cls.user_accounts:
if account.username == username:
return account
@classmethod
def account_exists(cls, username):
'''
loops through the function to check whether the account provided is true
'''
for account in cls.user_accounts:
if account.username == username:
return True
return False
@classmethod
def display_accounts(cls):
return cls.user_accounts
'''
displays the account of the user
''' | en | 0.810062 | save_user saves new user used to delete new user account used to check username provided whether its true and returns the accounts if its true loops through the function to check whether the account provided is true displays the account of the user | 4.10257 | 4 |
sp_experiment/tests/test_define_payoff_settings.py | sappelhoff/sp_psychopy | 1 | 6619863 | """Testing the setup of the payoff distributions."""
from collections import OrderedDict
import pytest
import numpy as np
from sp_experiment.define_payoff_settings import (get_payoff_settings,
get_payoff_dict,
get_random_payoff_settings,
)
@pytest.mark.parametrize('ev_diff', [0.1, 0.9, 7.])
def test_get_payoff_settings(ev_diff):
"""Test the setup of payoff distributions."""
payoff_settings = get_payoff_settings(ev_diff)
assert payoff_settings.ndim == 2
assert payoff_settings.shape[-1] == 8
assert payoff_settings.shape[0] >= 1
for probability in payoff_settings[0, [2, 3, 6, 7]]:
assert probability in np.round(np.arange(0.1, 1, 0.1), 1)
mags = list()
for magnitude in payoff_settings[0, [0, 1, 4, 5]]:
assert magnitude in range(1, 10)
mags.append(magnitude)
assert len(np.unique(mags)) == 4
def test_get_payoff_dict():
"""Test getting a payoff_dict off a setup."""
payoff_settings = get_payoff_settings(0.1)
setting = payoff_settings[0, :]
payoff_dict = get_payoff_dict(setting)
# Should be a dict
assert isinstance(payoff_dict, OrderedDict)
assert len(list(payoff_dict.values())[0]) == 10
assert len(list(payoff_dict.values())[1]) == 10
def _simulate_run(rand_payoff_settings, n_samples=12, seed=None):
"""Simulate a participant with 50% 50% left right tendency."""
rng = np.random.RandomState(seed)
actions = list()
outcomes = list()
for setting in rand_payoff_settings:
payoff_dict = get_payoff_dict(setting)
for sample in range(n_samples):
action = rng.choice((0, 1))
actions.append(action)
outcome = rng.choice(payoff_dict[action])
outcomes.append(outcome)
actions = np.array(actions)
outcomes = np.array(outcomes)
# combine actions and outcomes to code outcomes on the left with negative
# sign outcomes on the right with positive sign ... will end up with stim
# classes: - sign for "left", + sign for "right"
stim_classes = outcomes * (actions*2-1)
return stim_classes
def _make_class_hist(stim_classes):
"""Turn stim_classes into hist."""
# Make a histogram of which stimulus_classes we have collected so far
bins = np.hstack((np.arange(-9, 0), np.arange(1, 11)))
stim_class_hist = np.histogram(stim_classes, bins)
# Make an array from the hist and sort it
stim_class_arr = np.vstack((stim_class_hist[0], stim_class_hist[1][:-1])).T
stim_class_arr_sorted = stim_class_arr[stim_class_arr[:, 0].argsort()]
return stim_class_arr_sorted
def test_balancing():
"""Test that we can get a balanced stimulus selection."""
seed = 1
max_ntrls = 100
ev_diff = 0.9
payoff_settings = get_payoff_settings(ev_diff)
# No balancing at all, this will lead to a few stim_classes never
# being shown
rand_payoff_settings = payoff_settings.copy()
rng = np.random.RandomState(seed)
perm = rng.permutation(max_ntrls)
rand_payoff_settings = rand_payoff_settings[perm, :]
stim_classes = _simulate_run(rand_payoff_settings, n_samples=12, seed=seed)
hist = _make_class_hist(stim_classes)
diff1 = np.diff(hist[[0, -1], 0])
# some balancing
rand_payoff_settings = get_random_payoff_settings(max_ntrls,
payoff_settings,
-1,
seed)
stim_classes = _simulate_run(rand_payoff_settings, n_samples=12, seed=seed)
hist = _make_class_hist(stim_classes)
diff2 = np.diff(hist[[0, -1], 0])
# Most balancing
rand_payoff_settings = get_random_payoff_settings(max_ntrls,
payoff_settings,
0.6,
seed)
stim_classes = _simulate_run(rand_payoff_settings, n_samples=12, seed=seed)
hist = _make_class_hist(stim_classes)
diff3 = np.diff(hist[[0, -1], 0])
assert diff1 > diff2
assert diff2 > diff3
with pytest.raises(RuntimeError, match='We want to randomly pick 10'):
rand_payoff_settings = get_random_payoff_settings(180,
payoff_settings, 1)
| """Testing the setup of the payoff distributions."""
from collections import OrderedDict
import pytest
import numpy as np
from sp_experiment.define_payoff_settings import (get_payoff_settings,
get_payoff_dict,
get_random_payoff_settings,
)
@pytest.mark.parametrize('ev_diff', [0.1, 0.9, 7.])
def test_get_payoff_settings(ev_diff):
"""Test the setup of payoff distributions."""
payoff_settings = get_payoff_settings(ev_diff)
assert payoff_settings.ndim == 2
assert payoff_settings.shape[-1] == 8
assert payoff_settings.shape[0] >= 1
for probability in payoff_settings[0, [2, 3, 6, 7]]:
assert probability in np.round(np.arange(0.1, 1, 0.1), 1)
mags = list()
for magnitude in payoff_settings[0, [0, 1, 4, 5]]:
assert magnitude in range(1, 10)
mags.append(magnitude)
assert len(np.unique(mags)) == 4
def test_get_payoff_dict():
"""Test getting a payoff_dict off a setup."""
payoff_settings = get_payoff_settings(0.1)
setting = payoff_settings[0, :]
payoff_dict = get_payoff_dict(setting)
# Should be a dict
assert isinstance(payoff_dict, OrderedDict)
assert len(list(payoff_dict.values())[0]) == 10
assert len(list(payoff_dict.values())[1]) == 10
def _simulate_run(rand_payoff_settings, n_samples=12, seed=None):
"""Simulate a participant with 50% 50% left right tendency."""
rng = np.random.RandomState(seed)
actions = list()
outcomes = list()
for setting in rand_payoff_settings:
payoff_dict = get_payoff_dict(setting)
for sample in range(n_samples):
action = rng.choice((0, 1))
actions.append(action)
outcome = rng.choice(payoff_dict[action])
outcomes.append(outcome)
actions = np.array(actions)
outcomes = np.array(outcomes)
# combine actions and outcomes to code outcomes on the left with negative
# sign outcomes on the right with positive sign ... will end up with stim
# classes: - sign for "left", + sign for "right"
stim_classes = outcomes * (actions*2-1)
return stim_classes
def _make_class_hist(stim_classes):
"""Turn stim_classes into hist."""
# Make a histogram of which stimulus_classes we have collected so far
bins = np.hstack((np.arange(-9, 0), np.arange(1, 11)))
stim_class_hist = np.histogram(stim_classes, bins)
# Make an array from the hist and sort it
stim_class_arr = np.vstack((stim_class_hist[0], stim_class_hist[1][:-1])).T
stim_class_arr_sorted = stim_class_arr[stim_class_arr[:, 0].argsort()]
return stim_class_arr_sorted
def test_balancing():
"""Test that we can get a balanced stimulus selection."""
seed = 1
max_ntrls = 100
ev_diff = 0.9
payoff_settings = get_payoff_settings(ev_diff)
# No balancing at all, this will lead to a few stim_classes never
# being shown
rand_payoff_settings = payoff_settings.copy()
rng = np.random.RandomState(seed)
perm = rng.permutation(max_ntrls)
rand_payoff_settings = rand_payoff_settings[perm, :]
stim_classes = _simulate_run(rand_payoff_settings, n_samples=12, seed=seed)
hist = _make_class_hist(stim_classes)
diff1 = np.diff(hist[[0, -1], 0])
# some balancing
rand_payoff_settings = get_random_payoff_settings(max_ntrls,
payoff_settings,
-1,
seed)
stim_classes = _simulate_run(rand_payoff_settings, n_samples=12, seed=seed)
hist = _make_class_hist(stim_classes)
diff2 = np.diff(hist[[0, -1], 0])
# Most balancing
rand_payoff_settings = get_random_payoff_settings(max_ntrls,
payoff_settings,
0.6,
seed)
stim_classes = _simulate_run(rand_payoff_settings, n_samples=12, seed=seed)
hist = _make_class_hist(stim_classes)
diff3 = np.diff(hist[[0, -1], 0])
assert diff1 > diff2
assert diff2 > diff3
with pytest.raises(RuntimeError, match='We want to randomly pick 10'):
rand_payoff_settings = get_random_payoff_settings(180,
payoff_settings, 1)
| en | 0.8785 | Testing the setup of the payoff distributions. Test the setup of payoff distributions. Test getting a payoff_dict off a setup. # Should be a dict Simulate a participant with 50% 50% left right tendency. # combine actions and outcomes to code outcomes on the left with negative # sign outcomes on the right with positive sign ... will end up with stim # classes: - sign for "left", + sign for "right" Turn stim_classes into hist. # Make a histogram of which stimulus_classes we have collected so far # Make an array from the hist and sort it Test that we can get a balanced stimulus selection. # No balancing at all, this will lead to a few stim_classes never # being shown # some balancing # Most balancing | 2.472356 | 2 |
examples/drawing/sample15_stroke.py | chromia/wandplus | 0 | 6619864 | <reponame>chromia/wandplus<gh_stars>0
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
# http://www.imagemagick.org/Usage/draw/#stroke
# original imagemagick command:
# convert -size 380x70 xc:lightblue -pointsize 50 -font Chisel \
# -fill green -stroke black -draw 'text 10,55 "Black Border"' \
# stroke_font.jpg
#
# convert -size 320x420 xc:lightblue -pointsize 70 -font Vademecum \
# -fill red -stroke none -draw 'text 30,80 "Stroke -"' \
# -fill red -stroke black -strokewidth 0 -draw 'text 30,160 "Stroke 0"' \
# -fill red -stroke black -strokewidth 1 -draw 'text 30,240 "Stroke 1"' \
# -fill red -stroke black -strokewidth 2 -draw 'text 30,320 "Stroke 2"' \
# -fill red -stroke black -strokewidth 3 -draw 'text 30,400 "Stroke 3"' \
# stroke_table.jpg
#
# convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \
# -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \
# stroke_thick.jpg
#
# convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \
# -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \
# -stroke none -draw "text 25,65 'Anthony'" \
# stroke_outline.jpg
with Image(width=380, height=70, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Chisel'
draw.font_size = 50
draw.fill_color = Color('green')
draw.stroke_color = Color('black')
draw.text(10, 55, 'Black Border')
draw(img)
img.save(filename='sample15a.png')
with Image(width=320, height=420, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Vademecum'
draw.font_size = 70
draw.fill_color = Color('red')
draw.stroke_color = Color('none')
draw.text(30, 80, 'Stroke -')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.text(30, 160, 'Stroke 0')
draw.stroke_width = 1
draw.text(30, 240, 'Stroke 1')
draw.stroke_width = 2
draw.text(30, 320, 'Stroke 2')
draw.stroke_width = 3
draw.text(30, 400, 'Stroke 3')
draw(img)
img.save(filename='sample15b.png')
# convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \
# -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \
# stroke_thick.jpg
with Image(width=320, height=100, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Candice'
draw.font_size = 72
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 15
draw.text(25, 65, 'Anthony')
draw(img)
img.save(filename='sample15c.png')
with Image(width=320, height=100, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Candice'
draw.font_size = 72
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 15
draw.text(25, 65, 'Anthony')
draw.stroke_color = Color('none')
draw.text(25, 65, 'Anthony')
draw(img)
img.save(filename='sample15d.png')
| #!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
# http://www.imagemagick.org/Usage/draw/#stroke
# original imagemagick command:
# convert -size 380x70 xc:lightblue -pointsize 50 -font Chisel \
# -fill green -stroke black -draw 'text 10,55 "Black Border"' \
# stroke_font.jpg
#
# convert -size 320x420 xc:lightblue -pointsize 70 -font Vademecum \
# -fill red -stroke none -draw 'text 30,80 "Stroke -"' \
# -fill red -stroke black -strokewidth 0 -draw 'text 30,160 "Stroke 0"' \
# -fill red -stroke black -strokewidth 1 -draw 'text 30,240 "Stroke 1"' \
# -fill red -stroke black -strokewidth 2 -draw 'text 30,320 "Stroke 2"' \
# -fill red -stroke black -strokewidth 3 -draw 'text 30,400 "Stroke 3"' \
# stroke_table.jpg
#
# convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \
# -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \
# stroke_thick.jpg
#
# convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \
# -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \
# -stroke none -draw "text 25,65 'Anthony'" \
# stroke_outline.jpg
with Image(width=380, height=70, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Chisel'
draw.font_size = 50
draw.fill_color = Color('green')
draw.stroke_color = Color('black')
draw.text(10, 55, 'Black Border')
draw(img)
img.save(filename='sample15a.png')
with Image(width=320, height=420, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Vademecum'
draw.font_size = 70
draw.fill_color = Color('red')
draw.stroke_color = Color('none')
draw.text(30, 80, 'Stroke -')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.text(30, 160, 'Stroke 0')
draw.stroke_width = 1
draw.text(30, 240, 'Stroke 1')
draw.stroke_width = 2
draw.text(30, 320, 'Stroke 2')
draw.stroke_width = 3
draw.text(30, 400, 'Stroke 3')
draw(img)
img.save(filename='sample15b.png')
# convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \
# -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \
# stroke_thick.jpg
with Image(width=320, height=100, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Candice'
draw.font_size = 72
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 15
draw.text(25, 65, 'Anthony')
draw(img)
img.save(filename='sample15c.png')
with Image(width=320, height=100, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.font = 'Candice'
draw.font_size = 72
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 15
draw.text(25, 65, 'Anthony')
draw.stroke_color = Color('none')
draw.text(25, 65, 'Anthony')
draw(img)
img.save(filename='sample15d.png') | en | 0.159541 | #!/usr/bin/env python # http://www.imagemagick.org/Usage/draw/#stroke # original imagemagick command: # convert -size 380x70 xc:lightblue -pointsize 50 -font Chisel \ # -fill green -stroke black -draw 'text 10,55 "Black Border"' \ # stroke_font.jpg # # convert -size 320x420 xc:lightblue -pointsize 70 -font Vademecum \ # -fill red -stroke none -draw 'text 30,80 "Stroke -"' \ # -fill red -stroke black -strokewidth 0 -draw 'text 30,160 "Stroke 0"' \ # -fill red -stroke black -strokewidth 1 -draw 'text 30,240 "Stroke 1"' \ # -fill red -stroke black -strokewidth 2 -draw 'text 30,320 "Stroke 2"' \ # -fill red -stroke black -strokewidth 3 -draw 'text 30,400 "Stroke 3"' \ # stroke_table.jpg # # convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \ # -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \ # stroke_thick.jpg # # convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \ # -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \ # -stroke none -draw "text 25,65 'Anthony'" \ # stroke_outline.jpg # convert -size 320x100 xc:lightblue -font Candice -pointsize 72 -fill white \ # -stroke black -strokewidth 15 -draw "text 25,65 'Anthony'" \ # stroke_thick.jpg | 2.733274 | 3 |
amplimap/merge_folders.py | koelling/amplimap | 11 | 6619865 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module provides the amplimap.merge_folders.main() function called by the ``amplimap_merge`` script.
This script merges coverage data and variant calls from different working directories together,
making it possible to merge samples sequenced in different runs into a single output file.
"""
# python 3 compat
# http://python-future.org/compatible_idioms.html
from __future__ import print_function
import sys
import os
import re
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import time
# use pandas to read the CSV file and write output files
import pandas as pd
# for defaultdict + sorting
import collections, itertools
import operator
import argparse
files_to_merge = ['variants_raw/variants_summary.csv', 'variants_raw/variants_summary_filtered.csv', 'bams/coverages/coverage_full.csv']
def join_nonempty(values):
return ';'.join(values[values.notnull() & (values.str.len() > 0)])
def merge_folders(output_folder, folders, force, unique_sample_id_column, additional_coverage, enforce_integer_ids):
data = {}
# read in
for folder in folders:
log.info('Reading files from %s...', folder)
for file in files_to_merge:
fn = os.path.join(folder, file)
try:
folder_data = pd.read_csv(fn, index_col = False, dtype = 'object')
log.info('%s: shape = %s', fn, folder_data.shape)
folder_data['Folder'] = folder
if not 'Notes' in folder_data.columns:
folder_data['Notes'] = ''
if file in data:
# reorder if we have to
if list(data[file].columns) != list(folder_data.columns):
log.warn('Inconsistent column names! Will proceed but column order may be changed')
log.warn('%s =\t%s', 'previous', ','.join(data[file].columns))
log.warn('%s =\t%s', 'this file', ','.join(folder_data.columns))
data[file] = pd.concat([data[file], folder_data], ignore_index = True)
else:
data[file] = folder_data
except pd.errors.EmptyDataError:
log.info('Skipping empty file: %s', fn)
# add additional coverage data from another file -- to override coverage numbers with sanger sequencing
if additional_coverage:
fn = os.path.join(additional_coverage)
try:
folder_data = pd.read_csv(fn, index_col = False, dtype = 'object')
log.info('%s: shape = %s', fn, folder_data.shape)
folder_data['Folder'] = additional_coverage
if not 'Notes' in folder_data.columns:
folder_data['Notes'] = ''
data['bams/coverages/coverage_full.csv'] = pd.concat([data['bams/coverages/coverage_full.csv'], folder_data], ignore_index = True)
except pd.errors.EmptyDataError:
log.info('Empty additional coverage file: %s', additional_coverage)
raise
# handle duplicates
if unique_sample_id_column:
log.info('Combining duplicate values of %s into single row...', unique_sample_id_column)
variants_null_sample_column = data['variants_raw/variants_summary_filtered.csv'][unique_sample_id_column].isnull()
if enforce_integer_ids:
# just remove the .0 from end of strings rather than trying to make them ints, which causes problems with NAs
data['variants_raw/variants_summary_filtered.csv'][unique_sample_id_column] = data['variants_raw/variants_summary_filtered.csv'][unique_sample_id_column].replace(
re.compile(r'\.0$'), '')
log.info('Removing .0 from end of ID column!')
if variants_null_sample_column.any():
log.warn('Dropping %d variant rows without %s to make unique table', variants_null_sample_column.sum(), unique_sample_id_column)
variants_with_sample = data['variants_raw/variants_summary_filtered.csv'][~variants_null_sample_column]
else:
variants_with_sample = data['variants_raw/variants_summary_filtered.csv']
# merge variant table - don't need to fix dtype here since we just check for equality
# in this case we can drop the previous index, since it's not meaningful (we may not even have to reset it at all, not sure...)
data['variants_summary_filtered.unique.csv'] = variants_with_sample.drop_duplicates([unique_sample_id_column, 'Chr', 'Start', 'Ref', 'Alt']).reset_index(drop = True)
log.info('Variants: Combined %d/%d rows into %d rows (first row of each sample kept)',
len(variants_with_sample), len(data['variants_raw/variants_summary_filtered.csv']), len(data['variants_summary_filtered.unique.csv']))
# merge coverage table
coverage_group_cols = [unique_sample_id_column, 'Target']
coverage_ignore_cols = ['basepairs', 'Sample']
coverage_aggregation_ops = {'min_coverage': max, 'cov_per_bp': max, 'sum_coverage': max, 'fraction_zero_coverage': min,
'Notes': join_nonempty, 'Folder': join_nonempty}
# detect additional columns to join
for colname in data['bams/coverages/coverage_full.csv'].columns:
if not colname in coverage_group_cols + coverage_ignore_cols + list(coverage_aggregation_ops.keys()):
coverage_aggregation_ops[colname] = join_nonempty
log.info('Coverage: Detected additional column to join: %s', colname)
# fix dtypes for numeric columns
for colname in ['min_coverage', 'sum_coverage']:
data['bams/coverages/coverage_full.csv'][colname] = data['bams/coverages/coverage_full.csv'][colname].astype(int)
for colname in ['cov_per_bp', 'fraction_zero_coverage']:
data['bams/coverages/coverage_full.csv'][colname] = data['bams/coverages/coverage_full.csv'][colname].astype(float)
log.info('Coverage: Fixed datatypes of numeric columns for min/max')
coverage_null_sample_column = data['bams/coverages/coverage_full.csv'][unique_sample_id_column].isnull()
if enforce_integer_ids:
# just remove the .0 from end of strings rather than trying to make them ints, which causes problems with NAs
data['bams/coverages/coverage_full.csv'][unique_sample_id_column] = data['bams/coverages/coverage_full.csv'][unique_sample_id_column].replace(
re.compile(r'\.0$'), '')
log.info('Removing .0 from end of ID column!')
if coverage_null_sample_column.any():
log.warn('Dropping %d coverage rows without %s to make unique table', coverage_null_sample_column.sum(), unique_sample_id_column)
coverage_with_sample = data['bams/coverages/coverage_full.csv'][~coverage_null_sample_column]
else:
coverage_with_sample = data['bams/coverages/coverage_full.csv']
# actually do the aggregation
# in this case we want to keep the index, since this is coverage_group_cols, which is dnaid + target (I think?)
data['coverage_full.unique.csv'] = coverage_with_sample.groupby(coverage_group_cols).aggregate(coverage_aggregation_ops).reset_index(drop = False)
# reorder columns
coverage_unique_cols = [colname for colname in coverage_with_sample.columns if not colname in coverage_ignore_cols]
assert pd.Series(data['coverage_full.unique.csv'].columns).isin(coverage_unique_cols).all()
data['coverage_full.unique.csv'] = data['coverage_full.unique.csv'][coverage_unique_cols]
# log
log.info('Coverage: Combined %d/%d rows (mean min_coverage = %f) into %d rows (mean min_coverage = %f) taking best values per sample',
len(coverage_with_sample), len(data['bams/coverages/coverage_full.csv']), coverage_with_sample['min_coverage'].mean(),
len(data['coverage_full.unique.csv']), data['coverage_full.unique.csv']['min_coverage'].mean())
else:
log.info('Not combining rows since unique-sample-id-column not provided.')
# write out
log.info('Writing output files...')
for file in data.keys():
log.info('%s: final shape = %s', file, data[file].shape)
bnfile = os.path.basename(file)
fn = os.path.join(output_folder, bnfile)
# handle existing files
if os.path.exists(fn):
if force:
log.info('Overwriting file: %s', fn)
else:
raise Exception('Cannot write to file, exists: %s. Aborting, delete file or set --force to overwrite.' % fn)
else:
log.info('Creating new file: %s', fn)
# add hash for first column name
data[file].columns = ['#' + c if ix == 0 else c for ix, c in enumerate(data[file].columns)]
# write to file
data[file].to_csv(fn, index = False)
def main():
log.info('Called with arguments: "%s"', '" "'.join(sys.argv))
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force", help="overwrite existing files", action='store_true')
parser.add_argument("-c", "--unique-sample-id-column", help="column which contains the unique sample id (to remove duplicates)")
parser.add_argument("-a", "--additional-coverage", help="file with additional coverage data, which will be added to the merged coverages")
parser.add_argument("--enforce-integer-ids", help="force all sample id columns to be in integer format", action='store_true')
parser.add_argument("OUTPUT_FOLDER", help="output folder")
parser.add_argument("INPUT_FOLDER", help="input folders to merge (analysis directories)", nargs="+")
args = parser.parse_args()
merge_folders(args.OUTPUT_FOLDER, args.INPUT_FOLDER, args.force, args.unique_sample_id_column, args.additional_coverage, args.enforce_integer_ids)
return 0
if __name__ == '__main__':
sys.exit(main())
| # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module provides the amplimap.merge_folders.main() function called by the ``amplimap_merge`` script.
This script merges coverage data and variant calls from different working directories together,
making it possible to merge samples sequenced in different runs into a single output file.
"""
# python 3 compat
# http://python-future.org/compatible_idioms.html
from __future__ import print_function
import sys
import os
import re
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import time
# use pandas to read the CSV file and write output files
import pandas as pd
# for defaultdict + sorting
import collections, itertools
import operator
import argparse
files_to_merge = ['variants_raw/variants_summary.csv', 'variants_raw/variants_summary_filtered.csv', 'bams/coverages/coverage_full.csv']
def join_nonempty(values):
return ';'.join(values[values.notnull() & (values.str.len() > 0)])
def merge_folders(output_folder, folders, force, unique_sample_id_column, additional_coverage, enforce_integer_ids):
data = {}
# read in
for folder in folders:
log.info('Reading files from %s...', folder)
for file in files_to_merge:
fn = os.path.join(folder, file)
try:
folder_data = pd.read_csv(fn, index_col = False, dtype = 'object')
log.info('%s: shape = %s', fn, folder_data.shape)
folder_data['Folder'] = folder
if not 'Notes' in folder_data.columns:
folder_data['Notes'] = ''
if file in data:
# reorder if we have to
if list(data[file].columns) != list(folder_data.columns):
log.warn('Inconsistent column names! Will proceed but column order may be changed')
log.warn('%s =\t%s', 'previous', ','.join(data[file].columns))
log.warn('%s =\t%s', 'this file', ','.join(folder_data.columns))
data[file] = pd.concat([data[file], folder_data], ignore_index = True)
else:
data[file] = folder_data
except pd.errors.EmptyDataError:
log.info('Skipping empty file: %s', fn)
# add additional coverage data from another file -- to override coverage numbers with sanger sequencing
if additional_coverage:
fn = os.path.join(additional_coverage)
try:
folder_data = pd.read_csv(fn, index_col = False, dtype = 'object')
log.info('%s: shape = %s', fn, folder_data.shape)
folder_data['Folder'] = additional_coverage
if not 'Notes' in folder_data.columns:
folder_data['Notes'] = ''
data['bams/coverages/coverage_full.csv'] = pd.concat([data['bams/coverages/coverage_full.csv'], folder_data], ignore_index = True)
except pd.errors.EmptyDataError:
log.info('Empty additional coverage file: %s', additional_coverage)
raise
# handle duplicates
if unique_sample_id_column:
log.info('Combining duplicate values of %s into single row...', unique_sample_id_column)
variants_null_sample_column = data['variants_raw/variants_summary_filtered.csv'][unique_sample_id_column].isnull()
if enforce_integer_ids:
# just remove the .0 from end of strings rather than trying to make them ints, which causes problems with NAs
data['variants_raw/variants_summary_filtered.csv'][unique_sample_id_column] = data['variants_raw/variants_summary_filtered.csv'][unique_sample_id_column].replace(
re.compile(r'\.0$'), '')
log.info('Removing .0 from end of ID column!')
if variants_null_sample_column.any():
log.warn('Dropping %d variant rows without %s to make unique table', variants_null_sample_column.sum(), unique_sample_id_column)
variants_with_sample = data['variants_raw/variants_summary_filtered.csv'][~variants_null_sample_column]
else:
variants_with_sample = data['variants_raw/variants_summary_filtered.csv']
# merge variant table - don't need to fix dtype here since we just check for equality
# in this case we can drop the previous index, since it's not meaningful (we may not even have to reset it at all, not sure...)
data['variants_summary_filtered.unique.csv'] = variants_with_sample.drop_duplicates([unique_sample_id_column, 'Chr', 'Start', 'Ref', 'Alt']).reset_index(drop = True)
log.info('Variants: Combined %d/%d rows into %d rows (first row of each sample kept)',
len(variants_with_sample), len(data['variants_raw/variants_summary_filtered.csv']), len(data['variants_summary_filtered.unique.csv']))
# merge coverage table
coverage_group_cols = [unique_sample_id_column, 'Target']
coverage_ignore_cols = ['basepairs', 'Sample']
coverage_aggregation_ops = {'min_coverage': max, 'cov_per_bp': max, 'sum_coverage': max, 'fraction_zero_coverage': min,
'Notes': join_nonempty, 'Folder': join_nonempty}
# detect additional columns to join
for colname in data['bams/coverages/coverage_full.csv'].columns:
if not colname in coverage_group_cols + coverage_ignore_cols + list(coverage_aggregation_ops.keys()):
coverage_aggregation_ops[colname] = join_nonempty
log.info('Coverage: Detected additional column to join: %s', colname)
# fix dtypes for numeric columns
for colname in ['min_coverage', 'sum_coverage']:
data['bams/coverages/coverage_full.csv'][colname] = data['bams/coverages/coverage_full.csv'][colname].astype(int)
for colname in ['cov_per_bp', 'fraction_zero_coverage']:
data['bams/coverages/coverage_full.csv'][colname] = data['bams/coverages/coverage_full.csv'][colname].astype(float)
log.info('Coverage: Fixed datatypes of numeric columns for min/max')
coverage_null_sample_column = data['bams/coverages/coverage_full.csv'][unique_sample_id_column].isnull()
if enforce_integer_ids:
# just remove the .0 from end of strings rather than trying to make them ints, which causes problems with NAs
data['bams/coverages/coverage_full.csv'][unique_sample_id_column] = data['bams/coverages/coverage_full.csv'][unique_sample_id_column].replace(
re.compile(r'\.0$'), '')
log.info('Removing .0 from end of ID column!')
if coverage_null_sample_column.any():
log.warn('Dropping %d coverage rows without %s to make unique table', coverage_null_sample_column.sum(), unique_sample_id_column)
coverage_with_sample = data['bams/coverages/coverage_full.csv'][~coverage_null_sample_column]
else:
coverage_with_sample = data['bams/coverages/coverage_full.csv']
# actually do the aggregation
# in this case we want to keep the index, since this is coverage_group_cols, which is dnaid + target (I think?)
data['coverage_full.unique.csv'] = coverage_with_sample.groupby(coverage_group_cols).aggregate(coverage_aggregation_ops).reset_index(drop = False)
# reorder columns
coverage_unique_cols = [colname for colname in coverage_with_sample.columns if not colname in coverage_ignore_cols]
assert pd.Series(data['coverage_full.unique.csv'].columns).isin(coverage_unique_cols).all()
data['coverage_full.unique.csv'] = data['coverage_full.unique.csv'][coverage_unique_cols]
# log
log.info('Coverage: Combined %d/%d rows (mean min_coverage = %f) into %d rows (mean min_coverage = %f) taking best values per sample',
len(coverage_with_sample), len(data['bams/coverages/coverage_full.csv']), coverage_with_sample['min_coverage'].mean(),
len(data['coverage_full.unique.csv']), data['coverage_full.unique.csv']['min_coverage'].mean())
else:
log.info('Not combining rows since unique-sample-id-column not provided.')
# write out
log.info('Writing output files...')
for file in data.keys():
log.info('%s: final shape = %s', file, data[file].shape)
bnfile = os.path.basename(file)
fn = os.path.join(output_folder, bnfile)
# handle existing files
if os.path.exists(fn):
if force:
log.info('Overwriting file: %s', fn)
else:
raise Exception('Cannot write to file, exists: %s. Aborting, delete file or set --force to overwrite.' % fn)
else:
log.info('Creating new file: %s', fn)
# add hash for first column name
data[file].columns = ['#' + c if ix == 0 else c for ix, c in enumerate(data[file].columns)]
# write to file
data[file].to_csv(fn, index = False)
def main():
log.info('Called with arguments: "%s"', '" "'.join(sys.argv))
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force", help="overwrite existing files", action='store_true')
parser.add_argument("-c", "--unique-sample-id-column", help="column which contains the unique sample id (to remove duplicates)")
parser.add_argument("-a", "--additional-coverage", help="file with additional coverage data, which will be added to the merged coverages")
parser.add_argument("--enforce-integer-ids", help="force all sample id columns to be in integer format", action='store_true')
parser.add_argument("OUTPUT_FOLDER", help="output folder")
parser.add_argument("INPUT_FOLDER", help="input folders to merge (analysis directories)", nargs="+")
args = parser.parse_args()
merge_folders(args.OUTPUT_FOLDER, args.INPUT_FOLDER, args.force, args.unique_sample_id_column, args.additional_coverage, args.enforce_integer_ids)
return 0
if __name__ == '__main__':
sys.exit(main())
| en | 0.898384 | # !/usr/bin/env python # -*- coding: utf-8 -*- This module provides the amplimap.merge_folders.main() function called by the ``amplimap_merge`` script. This script merges coverage data and variant calls from different working directories together, making it possible to merge samples sequenced in different runs into a single output file. # python 3 compat # http://python-future.org/compatible_idioms.html # use pandas to read the CSV file and write output files # for defaultdict + sorting # read in # reorder if we have to # add additional coverage data from another file -- to override coverage numbers with sanger sequencing # handle duplicates # just remove the .0 from end of strings rather than trying to make them ints, which causes problems with NAs # merge variant table - don't need to fix dtype here since we just check for equality # in this case we can drop the previous index, since it's not meaningful (we may not even have to reset it at all, not sure...) # merge coverage table # detect additional columns to join # fix dtypes for numeric columns # just remove the .0 from end of strings rather than trying to make them ints, which causes problems with NAs # actually do the aggregation # in this case we want to keep the index, since this is coverage_group_cols, which is dnaid + target (I think?) # reorder columns # log # write out # handle existing files # add hash for first column name # write to file | 2.466035 | 2 |
OS/Scheduling/script.py | prtx/What-I-learned-in-college | 0 | 6619866 | #!/usr/bin/python
import subprocess
for i in range(0,1000):
print i
subprocess.check_output( ['python graph.py '+str(i/200+1)+' > test/fcfs/fcfs_'+str(i)] ,shell = True )
| #!/usr/bin/python
import subprocess
for i in range(0,1000):
print i
subprocess.check_output( ['python graph.py '+str(i/200+1)+' > test/fcfs/fcfs_'+str(i)] ,shell = True )
| ru | 0.258958 | #!/usr/bin/python | 1.974883 | 2 |
Language Skills/Python/Unit 12 File Input and Output/02 The Devil's in the Details/7-The with and as Keywords.py | WarHatch/Codecademy-Exercise-Answers | 346 | 6619867 | <reponame>WarHatch/Codecademy-Exercise-Answers
with open("text.txt", "w") as textfile:
textfile.write("Success!")
| with open("text.txt", "w") as textfile:
textfile.write("Success!") | none | 1 | 2.370866 | 2 | |
Old/odlstats.py | eriksore/sdn | 0 | 6619868 | <gh_stars>0
import httplib2
import json
h = httplib2.Http(".cache")
h.add_credentials('admin', 'admin')
resp, content = h.request('http://192.168.231.246:8080/controller/nb/v2/statistics/default/flow', "GET")
allFlowStats = json.loads(content)
flowStats = allFlowStats['flowStatistics']
for fs in flowStats:
print "\nSwitch ID : " + fs['node']['id']
print '{0:8} {1:8} {2:5} {3:15}'.format('Count', 'Action', 'Port', 'DestIP')
for aFlow in fs['flowStatistic']:
count = aFlow['packetCount']
actions = aFlow['flow']['actions']
actionType = ''
actionPort = ''
#print actions
if(type(actions) == type(list())):
actionType = actions[0]['type']
actionPort = actions[0]['port']['id']
else:
actionType = actions['type']
actionPort = actions['port']['id']
dst = aFlow['flow']['match']['matchField'][0]['value']
print '{0:8} {1:8} {2:5} {3:15}'.format(count, actionType, actionPort, dst)
| import httplib2
import json
h = httplib2.Http(".cache")
h.add_credentials('admin', 'admin')
resp, content = h.request('http://192.168.231.246:8080/controller/nb/v2/statistics/default/flow', "GET")
allFlowStats = json.loads(content)
flowStats = allFlowStats['flowStatistics']
for fs in flowStats:
print "\nSwitch ID : " + fs['node']['id']
print '{0:8} {1:8} {2:5} {3:15}'.format('Count', 'Action', 'Port', 'DestIP')
for aFlow in fs['flowStatistic']:
count = aFlow['packetCount']
actions = aFlow['flow']['actions']
actionType = ''
actionPort = ''
#print actions
if(type(actions) == type(list())):
actionType = actions[0]['type']
actionPort = actions[0]['port']['id']
else:
actionType = actions['type']
actionPort = actions['port']['id']
dst = aFlow['flow']['match']['matchField'][0]['value']
print '{0:8} {1:8} {2:5} {3:15}'.format(count, actionType, actionPort, dst) | en | 0.25036 | #print actions | 2.463582 | 2 |
worker/tests/test_bake.py | mixslice/bake | 2 | 6619869 | from context import bake as bake # env of our project src
cake = {
'layers': [
{
'resource': '1.mp4',
'start': '10',
'end': '12',
'filters': [
{
'name': 'vflip',
'values': [],
'kwvalues': {},
},
],
},
{
'resource': '2.mp4',
'start': '2',
'end': '4',
'filters': [
{
'name': 'hue',
'values': [],
'kwvalues': {'s': '0'},
},
],
},
{
'resource': '3.mp4',
'start': '16',
'end': '18',
'filters': [
{
'name': 'hflip',
'values': [],
'kwvalues': {},
},
{
'name': 'crop',
'values': [],
'kwvalues': {'w': 100, 'h': 200, 'x': 300, 'y': 400},
},
],
},
],
'uid': '34165ddbb9f314b4c60a6f0121d6d498',
}
def test_filter_adapter():
assert bake.filter_adapter(cake['layers'][0]['filters'][0]) == 'vflip'
assert bake.filter_adapter(cake['layers'][1]['filters'][0]) == 'hue=s=0'
assert bake.filter_adapter(cake['layers'][2]['filters'][0]) == 'hflip'
assert 'w=100' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'h=200' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'x=300' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'y=400' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'crop' in bake.filter_adapter(cake['layers'][2]['filters'][1])
def trim_filter_adapter():
assert 'start_frame=10' in bake.trim_filter_adapter(cake['layers'][0])
assert 'end_frame=12' in bake.trim_filter_adapter(cake['layers'][0])
assert ':' in bake.trim_filter_adapter(cake['layers'][0])
def test_get_input_files():
assert '1.mp4' in bake._get_input_files(cake)
assert '2.mp4' in bake._get_input_files(cake)
assert '3.mp4' in bake._get_input_files(cake)
def test_generate_filtergraph():
assert len(bake.get_filtergraph_chains(cake, 'myoutput')) == len(cake['layers']) * 2
assert 'myoutput' in bake.get_filtergraph_chains(cake, 'myoutput')[-1]
def test_generate_full_command_line():
assert 'myresult.mp4' in bake.generate_cake_render_command(cake, 'myresult.mp4')
| from context import bake as bake # env of our project src
cake = {
'layers': [
{
'resource': '1.mp4',
'start': '10',
'end': '12',
'filters': [
{
'name': 'vflip',
'values': [],
'kwvalues': {},
},
],
},
{
'resource': '2.mp4',
'start': '2',
'end': '4',
'filters': [
{
'name': 'hue',
'values': [],
'kwvalues': {'s': '0'},
},
],
},
{
'resource': '3.mp4',
'start': '16',
'end': '18',
'filters': [
{
'name': 'hflip',
'values': [],
'kwvalues': {},
},
{
'name': 'crop',
'values': [],
'kwvalues': {'w': 100, 'h': 200, 'x': 300, 'y': 400},
},
],
},
],
'uid': '34165ddbb9f314b4c60a6f0121d6d498',
}
def test_filter_adapter():
assert bake.filter_adapter(cake['layers'][0]['filters'][0]) == 'vflip'
assert bake.filter_adapter(cake['layers'][1]['filters'][0]) == 'hue=s=0'
assert bake.filter_adapter(cake['layers'][2]['filters'][0]) == 'hflip'
assert 'w=100' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'h=200' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'x=300' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'y=400' in bake.filter_adapter(cake['layers'][2]['filters'][1])
assert 'crop' in bake.filter_adapter(cake['layers'][2]['filters'][1])
def trim_filter_adapter():
assert 'start_frame=10' in bake.trim_filter_adapter(cake['layers'][0])
assert 'end_frame=12' in bake.trim_filter_adapter(cake['layers'][0])
assert ':' in bake.trim_filter_adapter(cake['layers'][0])
def test_get_input_files():
assert '1.mp4' in bake._get_input_files(cake)
assert '2.mp4' in bake._get_input_files(cake)
assert '3.mp4' in bake._get_input_files(cake)
def test_generate_filtergraph():
assert len(bake.get_filtergraph_chains(cake, 'myoutput')) == len(cake['layers']) * 2
assert 'myoutput' in bake.get_filtergraph_chains(cake, 'myoutput')[-1]
def test_generate_full_command_line():
assert 'myresult.mp4' in bake.generate_cake_render_command(cake, 'myresult.mp4')
| en | 0.475563 | # env of our project src | 2.236357 | 2 |
Projects/Main/Tools/xmlbrushexport.py | dphrygian/zeta | 6 | 6619870 | #!BPY
"""
Name: 'Brush V1 (.brush)...'
Blender: 243
Group: 'Export'
Tooltip: 'Export selected mesh to Engine Brush V1 Format (.brush)'
"""
__bpydoc__ = """\
Intermediate output for Engine brushes.
"""
# This emits a .brush file (in config file format)
# and a .mesh for each object in the scene.
import sys
import Blender
from Blender import Mesh, Modifier, Mathutils
from Blender.Mathutils import *
import BPyMesh
from collections import defaultdict
def safeGetProp( object, name, type, default ):
try:
if not object:
return default
prop = object.getProperty( name )
if not prop:
return default
if prop.getType() != type:
return default
return prop.getData()
except:
return default
def exportMesh( subFilename, objects ):
subFile = open( subFilename, "wb" )
subFile.write( '<mesh>\n' )
# Only export the materials for the first object in the merge group
firstObject = objects[0]
firstMesh = firstObject.getData( False, True )
for mat in firstMesh.materials:
if mat:
for mtex in mat.getTextures():
if mtex and mtex.tex and mtex.tex.image:
subFile.write( '\t<tex file="%s" />\n' % mtex.tex.image.filename )
for object in objects:
# HACKHACK: Modified radial normals for foliage
radialNormals = safeGetProp( object, 'RadialNormals', 'BOOL', False )
radialOffsetZ = safeGetProp( object, 'RadialOffsetZ', 'FLOAT', 0.0 )
radialScalarZ = 1.0 / safeGetProp( object, 'RadialScalarZ', 'FLOAT', 1.0 ) # Divide here, since it's more intuitive to imagine scaling a sphere around the object
objLoc = Vector( object.loc )
objLoc.z += radialOffsetZ
directedNormals = safeGetProp( object, 'DirectedNormals', 'BOOL', False )
directedX = safeGetProp( object, 'DirectedX', 'FLOAT', 0.0 )
directedY = safeGetProp( object, 'DirectedY', 'FLOAT', 0.0 )
directedZ = safeGetProp( object, 'DirectedZ', 'FLOAT', 0.0 )
directedNormal = Vector( directedX, directedY, directedZ )
directedNormal.normalize()
normalsB = safeGetProp( object, 'NormalsB', 'BOOL', False )
mesh = object.getData( False, True )
mesh.transform( object.matrixWorld, True )
for face in mesh.faces:
subFile.write('\t<face>\n')
idx = 0;
for vert in face.verts:
subFile.write( '\t\t<vert>\n' )
subFile.write( '\t\t\t<pos x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.co ) )
if mesh.faceUV:
subFile.write( '\t\t\t<uv x="%.6f" y="%.6f" />\n' % tuple( face.uv[ idx ] ) )
elif mesh.vertexUV:
subFile.write( '\t\t\t<uv x="%.6f" y="%.6f" />\n' % tuple( vert.uvco ) )
if radialNormals:
vertLoc = vert.co
vertOffset = vertLoc - objLoc
vertOffset.z *= radialScalarZ
vertOffset.normalize()
# ZETA: Write the actual normals as norm and the bent normals as normb
subFile.write( '\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
subFile.write( '\t\t\t<normb x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vertOffset ) )
elif directedNormals:
# ZETA: Write the actual normals as norm and the bent normals as normb
subFile.write( '\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
subFile.write( '\t\t\t<normb x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( directedNormal ) )
else:
subFile.write( '\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
if normalsB:
# Also write the normals as a second channel, which we need for foliage
subFile.write( '\t\t\t<normb x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
if mesh.vertexColors:
col = face.col[ idx ]
subFile.write( '\t\t\t<col r="%d" g="%d" b="%d" />\n' % ( col.r, col.g, col.b ) )
subFile.write( '\t\t</vert>\n' )
idx = idx + 1
subFile.write( '\t</face>\n' )
mesh.transform( object.getInverseMatrix(), True )
subFile.write('</mesh>\n')
subFile.close()
def write( brushFilename ):
startTime = Blender.sys.time()
if not brushFilename.lower().endswith('.brush'):
brushFilename += '.brush'
currentScene = Blender.Scene.GetCurrent()
meshes = []
materials = []
hulls = []
surfaces = []
blocksentities = []
blockstraces = []
blocksocclusions = []
blocksaudios = []
navignores = []
ambients = []
cubemaps = []
fogmeshes = []
fogmeshdefs = []
editormeshes = []
editorhulls = []
editorhiddens = []
castsshadows = []
mergegroups = defaultdict( list )
for object in currentScene.objects:
if safeGetProp( object, 'Ignore', 'BOOL', False ):
# Do nothing with this mesh!
continue
mergeGroupName = safeGetProp( object, 'MergeGroup', 'STRING', object.name )
subName = '-' + mergeGroupName + '.mesh'
subFilename = brushFilename.replace( '.brush', subName )
subFilename = subFilename.replace( 'Raw/', 'Intermediate/' ) # HACKHACK because Blender wants to output beside .blend file
mergegroups[ subFilename ].append( object )
# "SoftCover" is a shortcut for a hull which only blocks occlusion
if safeGetProp( object, 'SoftCover', 'BOOL', False ):
hulls.append( subFilename )
surfaces.append( '' )
blocksentities.append( False )
blockstraces.append( False )
blocksocclusions.append( True )
blocksaudios.append( False )
navignores.append( True )
# "Glass" is a shortcut for a hull which blocks everything but occlusion
elif safeGetProp( object, 'Glass', 'BOOL', False ):
hulls.append( subFilename )
surfaces.append( safeGetProp( object, 'Surface', 'STRING', '' ) )
blocksentities.append( True )
blockstraces.append( True )
blocksocclusions.append( False )
blocksaudios.append( True )
navignores.append( safeGetProp( object, 'NavIgnore', 'BOOL', False ) )
elif safeGetProp( object, 'Hull', 'BOOL', False ) or ( safeGetProp( object, 'Surface', 'STRING', '' ) and not safeGetProp( object, 'EditorHull', 'BOOL', False ) ):
hulls.append( subFilename )
surfaces.append( safeGetProp( object, 'Surface', 'STRING', '' ) )
blocksentities.append( safeGetProp( object, 'BlocksEntities', 'BOOL', True ) )
blockstraces.append( safeGetProp( object, 'BlocksTrace', 'BOOL', True ) )
blocksocclusions.append( safeGetProp( object, 'BlocksOcclusion', 'BOOL', True ) )
blocksaudios.append( safeGetProp( object, 'BlocksAudio', 'BOOL', True ) )
navignores.append( safeGetProp( object, 'NavIgnore', 'BOOL', False ) )
elif safeGetProp( object, 'Ambient', 'BOOL', False ) or safeGetProp( object, 'Cubemap', 'STRING', '' ):
ambients.append( subFilename )
cubemaps.append( safeGetProp( object, 'Cubemap', 'STRING', '' ) )
elif safeGetProp( object, 'FogMesh', 'BOOL', False ) or safeGetProp( object, 'FogMeshDef', 'STRING', '' ):
fogmeshes.append( subFilename )
fogmeshdefs.append( safeGetProp( object, 'FogMeshDef', 'STRING', '' ) )
elif safeGetProp( object, 'EditorMesh', 'BOOL', False ):
editormeshes.append( subFilename )
elif safeGetProp( object, 'EditorHull', 'BOOL', False ):
editorhulls.append( subFilename )
else:
if subFilename in meshes:
# We've already added this mesh! Don't add redundant meshes when using MergeGroup
pass
else:
meshes.append( subFilename )
materials.append( safeGetProp( object, 'Material', 'STRING', '' ) )
editorhiddens.append( safeGetProp( object, 'EditorHidden', 'BOOL', False ) )
castsshadows.append( safeGetProp( object, 'CastsShadows', 'BOOL', True ) )
for filename, objects in mergegroups.iteritems():
exportMesh( filename, objects )
brushFile = open( brushFilename, "wb" )
# HACKHACK: We shouldn't know anything about bake structure here either
relativeBrushFilename = brushFilename.lstrip( '../Raw/' ).replace( '.brush', '.cbr' ).replace( '\\', '/' )
brushFile.write( '[%s]\n' % relativeBrushFilename )
brushFile.write( 'NumMeshes = &\n' )
brushFile.write( '@ Mesh\n' )
for meshFilename, materialName, editorHidden, castsShadows in zip( meshes, materials, editorhiddens, castsshadows ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeMeshFilename = meshFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeMeshFilename )
if materialName != '':
brushFile.write( '@@^Material = "%s"\n' % materialName )
if editorHidden:
brushFile.write( '@@^EditorHidden = true\n' )
if not castsShadows:
brushFile.write( '@@^CastsShadows = false\n' )
brushFile.write( 'NumHulls = &\n' )
brushFile.write( '@ Hull\n' )
for hullFilename, surfaceName, blocksEntities, blocksTrace, blocksOcclusion, blocksAudio, navIgnore in zip( hulls, surfaces, blocksentities, blockstraces, blocksocclusions, blocksaudios, navignores ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeHullFilename = hullFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeHullFilename )
if surfaceName != '':
brushFile.write( '@@^Surface = "%s"\n' % surfaceName )
if not blocksEntities:
brushFile.write( '@@^BlocksEntities = false\n' )
if not blocksTrace:
brushFile.write( '@@^BlocksTrace = false\n' )
if not blocksOcclusion:
brushFile.write( '@@^BlocksOcclusion = false\n' )
if not blocksAudio:
brushFile.write( '@@^BlocksAudio = false\n' )
if navIgnore:
brushFile.write( '@@^NavIgnore = true\n' )
brushFile.write( 'NumAmbientLights = &\n' )
brushFile.write( '@ AmbientLight\n' )
for ambientFilename, cubemapName in zip( ambients, cubemaps ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeAmbientFilename = ambientFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeAmbientFilename )
if cubemapName != '':
brushFile.write( '@@^Cubemap = "%s"\n' % cubemapName )
brushFile.write( 'NumFogMeshes = &\n' )
brushFile.write( '@ FogMesh\n' )
for fogMeshFilename, fogMeshDefName in zip( fogmeshes, fogmeshdefs ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeFogMeshFilename = fogMeshFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeFogMeshFilename )
if fogMeshDefName != '':
brushFile.write( '@@^FogMeshDef = "%s"\n' % fogMeshDefName )
brushFile.write( 'NumEditorMeshes = &\n' )
brushFile.write( '@ EditorMesh\n' )
for meshFilename in editormeshes:
# HACKHACK: We shouldn't know anything about bake structure here either
relativeMeshFilename = meshFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeMeshFilename )
brushFile.write( 'NumEditorHulls = &\n' )
brushFile.write( '@ EditorHull\n' )
for hullFilename in editorhulls:
# HACKHACK: We shouldn't know anything about bake structure here either
relativeHullFilename = hullFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeHullFilename )
brushFile.close()
endTime = Blender.sys.time()
totalTime = endTime - startTime
message = 'Successfully exported "%s" in %.4f seconds' % ( Blender.sys.basename( brushFilename ), totalTime )
print message
def main():
if Blender.mode == 'interactive':
Blender.Window.FileSelector( write, 'Brush Export', Blender.sys.makename( ext = '.brush' ) )
else:
# Find the blend file argument
for arg in sys.argv:
if '.blend' in arg:
write( Blender.sys.makename( arg, '.brush' ) )
if __name__=='__main__':
main() | #!BPY
"""
Name: 'Brush V1 (.brush)...'
Blender: 243
Group: 'Export'
Tooltip: 'Export selected mesh to Engine Brush V1 Format (.brush)'
"""
__bpydoc__ = """\
Intermediate output for Engine brushes.
"""
# This emits a .brush file (in config file format)
# and a .mesh for each object in the scene.
import sys
import Blender
from Blender import Mesh, Modifier, Mathutils
from Blender.Mathutils import *
import BPyMesh
from collections import defaultdict
def safeGetProp( object, name, type, default ):
try:
if not object:
return default
prop = object.getProperty( name )
if not prop:
return default
if prop.getType() != type:
return default
return prop.getData()
except:
return default
def exportMesh( subFilename, objects ):
subFile = open( subFilename, "wb" )
subFile.write( '<mesh>\n' )
# Only export the materials for the first object in the merge group
firstObject = objects[0]
firstMesh = firstObject.getData( False, True )
for mat in firstMesh.materials:
if mat:
for mtex in mat.getTextures():
if mtex and mtex.tex and mtex.tex.image:
subFile.write( '\t<tex file="%s" />\n' % mtex.tex.image.filename )
for object in objects:
# HACKHACK: Modified radial normals for foliage
radialNormals = safeGetProp( object, 'RadialNormals', 'BOOL', False )
radialOffsetZ = safeGetProp( object, 'RadialOffsetZ', 'FLOAT', 0.0 )
radialScalarZ = 1.0 / safeGetProp( object, 'RadialScalarZ', 'FLOAT', 1.0 ) # Divide here, since it's more intuitive to imagine scaling a sphere around the object
objLoc = Vector( object.loc )
objLoc.z += radialOffsetZ
directedNormals = safeGetProp( object, 'DirectedNormals', 'BOOL', False )
directedX = safeGetProp( object, 'DirectedX', 'FLOAT', 0.0 )
directedY = safeGetProp( object, 'DirectedY', 'FLOAT', 0.0 )
directedZ = safeGetProp( object, 'DirectedZ', 'FLOAT', 0.0 )
directedNormal = Vector( directedX, directedY, directedZ )
directedNormal.normalize()
normalsB = safeGetProp( object, 'NormalsB', 'BOOL', False )
mesh = object.getData( False, True )
mesh.transform( object.matrixWorld, True )
for face in mesh.faces:
subFile.write('\t<face>\n')
idx = 0;
for vert in face.verts:
subFile.write( '\t\t<vert>\n' )
subFile.write( '\t\t\t<pos x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.co ) )
if mesh.faceUV:
subFile.write( '\t\t\t<uv x="%.6f" y="%.6f" />\n' % tuple( face.uv[ idx ] ) )
elif mesh.vertexUV:
subFile.write( '\t\t\t<uv x="%.6f" y="%.6f" />\n' % tuple( vert.uvco ) )
if radialNormals:
vertLoc = vert.co
vertOffset = vertLoc - objLoc
vertOffset.z *= radialScalarZ
vertOffset.normalize()
# ZETA: Write the actual normals as norm and the bent normals as normb
subFile.write( '\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
subFile.write( '\t\t\t<normb x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vertOffset ) )
elif directedNormals:
# ZETA: Write the actual normals as norm and the bent normals as normb
subFile.write( '\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
subFile.write( '\t\t\t<normb x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( directedNormal ) )
else:
subFile.write( '\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
if normalsB:
# Also write the normals as a second channel, which we need for foliage
subFile.write( '\t\t\t<normb x="%.6f" y="%.6f" z="%.6f" />\n' % tuple( vert.no ) )
if mesh.vertexColors:
col = face.col[ idx ]
subFile.write( '\t\t\t<col r="%d" g="%d" b="%d" />\n' % ( col.r, col.g, col.b ) )
subFile.write( '\t\t</vert>\n' )
idx = idx + 1
subFile.write( '\t</face>\n' )
mesh.transform( object.getInverseMatrix(), True )
subFile.write('</mesh>\n')
subFile.close()
def write( brushFilename ):
startTime = Blender.sys.time()
if not brushFilename.lower().endswith('.brush'):
brushFilename += '.brush'
currentScene = Blender.Scene.GetCurrent()
meshes = []
materials = []
hulls = []
surfaces = []
blocksentities = []
blockstraces = []
blocksocclusions = []
blocksaudios = []
navignores = []
ambients = []
cubemaps = []
fogmeshes = []
fogmeshdefs = []
editormeshes = []
editorhulls = []
editorhiddens = []
castsshadows = []
mergegroups = defaultdict( list )
for object in currentScene.objects:
if safeGetProp( object, 'Ignore', 'BOOL', False ):
# Do nothing with this mesh!
continue
mergeGroupName = safeGetProp( object, 'MergeGroup', 'STRING', object.name )
subName = '-' + mergeGroupName + '.mesh'
subFilename = brushFilename.replace( '.brush', subName )
subFilename = subFilename.replace( 'Raw/', 'Intermediate/' ) # HACKHACK because Blender wants to output beside .blend file
mergegroups[ subFilename ].append( object )
# "SoftCover" is a shortcut for a hull which only blocks occlusion
if safeGetProp( object, 'SoftCover', 'BOOL', False ):
hulls.append( subFilename )
surfaces.append( '' )
blocksentities.append( False )
blockstraces.append( False )
blocksocclusions.append( True )
blocksaudios.append( False )
navignores.append( True )
# "Glass" is a shortcut for a hull which blocks everything but occlusion
elif safeGetProp( object, 'Glass', 'BOOL', False ):
hulls.append( subFilename )
surfaces.append( safeGetProp( object, 'Surface', 'STRING', '' ) )
blocksentities.append( True )
blockstraces.append( True )
blocksocclusions.append( False )
blocksaudios.append( True )
navignores.append( safeGetProp( object, 'NavIgnore', 'BOOL', False ) )
elif safeGetProp( object, 'Hull', 'BOOL', False ) or ( safeGetProp( object, 'Surface', 'STRING', '' ) and not safeGetProp( object, 'EditorHull', 'BOOL', False ) ):
hulls.append( subFilename )
surfaces.append( safeGetProp( object, 'Surface', 'STRING', '' ) )
blocksentities.append( safeGetProp( object, 'BlocksEntities', 'BOOL', True ) )
blockstraces.append( safeGetProp( object, 'BlocksTrace', 'BOOL', True ) )
blocksocclusions.append( safeGetProp( object, 'BlocksOcclusion', 'BOOL', True ) )
blocksaudios.append( safeGetProp( object, 'BlocksAudio', 'BOOL', True ) )
navignores.append( safeGetProp( object, 'NavIgnore', 'BOOL', False ) )
elif safeGetProp( object, 'Ambient', 'BOOL', False ) or safeGetProp( object, 'Cubemap', 'STRING', '' ):
ambients.append( subFilename )
cubemaps.append( safeGetProp( object, 'Cubemap', 'STRING', '' ) )
elif safeGetProp( object, 'FogMesh', 'BOOL', False ) or safeGetProp( object, 'FogMeshDef', 'STRING', '' ):
fogmeshes.append( subFilename )
fogmeshdefs.append( safeGetProp( object, 'FogMeshDef', 'STRING', '' ) )
elif safeGetProp( object, 'EditorMesh', 'BOOL', False ):
editormeshes.append( subFilename )
elif safeGetProp( object, 'EditorHull', 'BOOL', False ):
editorhulls.append( subFilename )
else:
if subFilename in meshes:
# We've already added this mesh! Don't add redundant meshes when using MergeGroup
pass
else:
meshes.append( subFilename )
materials.append( safeGetProp( object, 'Material', 'STRING', '' ) )
editorhiddens.append( safeGetProp( object, 'EditorHidden', 'BOOL', False ) )
castsshadows.append( safeGetProp( object, 'CastsShadows', 'BOOL', True ) )
for filename, objects in mergegroups.iteritems():
exportMesh( filename, objects )
brushFile = open( brushFilename, "wb" )
# HACKHACK: We shouldn't know anything about bake structure here either
relativeBrushFilename = brushFilename.lstrip( '../Raw/' ).replace( '.brush', '.cbr' ).replace( '\\', '/' )
brushFile.write( '[%s]\n' % relativeBrushFilename )
brushFile.write( 'NumMeshes = &\n' )
brushFile.write( '@ Mesh\n' )
for meshFilename, materialName, editorHidden, castsShadows in zip( meshes, materials, editorhiddens, castsshadows ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeMeshFilename = meshFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeMeshFilename )
if materialName != '':
brushFile.write( '@@^Material = "%s"\n' % materialName )
if editorHidden:
brushFile.write( '@@^EditorHidden = true\n' )
if not castsShadows:
brushFile.write( '@@^CastsShadows = false\n' )
brushFile.write( 'NumHulls = &\n' )
brushFile.write( '@ Hull\n' )
for hullFilename, surfaceName, blocksEntities, blocksTrace, blocksOcclusion, blocksAudio, navIgnore in zip( hulls, surfaces, blocksentities, blockstraces, blocksocclusions, blocksaudios, navignores ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeHullFilename = hullFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeHullFilename )
if surfaceName != '':
brushFile.write( '@@^Surface = "%s"\n' % surfaceName )
if not blocksEntities:
brushFile.write( '@@^BlocksEntities = false\n' )
if not blocksTrace:
brushFile.write( '@@^BlocksTrace = false\n' )
if not blocksOcclusion:
brushFile.write( '@@^BlocksOcclusion = false\n' )
if not blocksAudio:
brushFile.write( '@@^BlocksAudio = false\n' )
if navIgnore:
brushFile.write( '@@^NavIgnore = true\n' )
brushFile.write( 'NumAmbientLights = &\n' )
brushFile.write( '@ AmbientLight\n' )
for ambientFilename, cubemapName in zip( ambients, cubemaps ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeAmbientFilename = ambientFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeAmbientFilename )
if cubemapName != '':
brushFile.write( '@@^Cubemap = "%s"\n' % cubemapName )
brushFile.write( 'NumFogMeshes = &\n' )
brushFile.write( '@ FogMesh\n' )
for fogMeshFilename, fogMeshDefName in zip( fogmeshes, fogmeshdefs ):
# HACKHACK: We shouldn't know anything about bake structure here either
relativeFogMeshFilename = fogMeshFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeFogMeshFilename )
if fogMeshDefName != '':
brushFile.write( '@@^FogMeshDef = "%s"\n' % fogMeshDefName )
brushFile.write( 'NumEditorMeshes = &\n' )
brushFile.write( '@ EditorMesh\n' )
for meshFilename in editormeshes:
# HACKHACK: We shouldn't know anything about bake structure here either
relativeMeshFilename = meshFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeMeshFilename )
brushFile.write( 'NumEditorHulls = &\n' )
brushFile.write( '@ EditorHull\n' )
for hullFilename in editorhulls:
# HACKHACK: We shouldn't know anything about bake structure here either
relativeHullFilename = hullFilename.lstrip( '../Intermediate/' ).replace( '.mesh', '.cms' ).replace( '\\', '/' )
brushFile.write( '@@& = "%s"\n' % relativeHullFilename )
brushFile.close()
endTime = Blender.sys.time()
totalTime = endTime - startTime
message = 'Successfully exported "%s" in %.4f seconds' % ( Blender.sys.basename( brushFilename ), totalTime )
print message
def main():
if Blender.mode == 'interactive':
Blender.Window.FileSelector( write, 'Brush Export', Blender.sys.makename( ext = '.brush' ) )
else:
# Find the blend file argument
for arg in sys.argv:
if '.blend' in arg:
write( Blender.sys.makename( arg, '.brush' ) )
if __name__=='__main__':
main() | en | 0.896376 | #!BPY Name: 'Brush V1 (.brush)...' Blender: 243 Group: 'Export' Tooltip: 'Export selected mesh to Engine Brush V1 Format (.brush)' \ Intermediate output for Engine brushes. # This emits a .brush file (in config file format) # and a .mesh for each object in the scene. # Only export the materials for the first object in the merge group # HACKHACK: Modified radial normals for foliage # Divide here, since it's more intuitive to imagine scaling a sphere around the object # ZETA: Write the actual normals as norm and the bent normals as normb # ZETA: Write the actual normals as norm and the bent normals as normb # Also write the normals as a second channel, which we need for foliage # Do nothing with this mesh! # HACKHACK because Blender wants to output beside .blend file # "SoftCover" is a shortcut for a hull which only blocks occlusion # "Glass" is a shortcut for a hull which blocks everything but occlusion # We've already added this mesh! Don't add redundant meshes when using MergeGroup # HACKHACK: We shouldn't know anything about bake structure here either # HACKHACK: We shouldn't know anything about bake structure here either # HACKHACK: We shouldn't know anything about bake structure here either # HACKHACK: We shouldn't know anything about bake structure here either # HACKHACK: We shouldn't know anything about bake structure here either # HACKHACK: We shouldn't know anything about bake structure here either # HACKHACK: We shouldn't know anything about bake structure here either # Find the blend file argument | 2.543667 | 3 |
boiler/conn.py | rudineirk/faculdade_boiler | 0 | 6619871 | <reponame>rudineirk/faculdade_boiler
#!/usr/bin/python3
from __future__ import print_function
import socket
from threading import Semaphore
__all__ = [
'BoilerConn',
]
MSG_AIR_TEMP = b"sta0"
MSG_WATER_INSIDE_TEMP = b"st-0"
MSG_WATER_IN_TEMP = b"sti0"
MSG_WATER_OUT_TEMP = b"sno0"
MSG_WATER_COLUMN = b"sh-0"
CMD_HEAT_FLUX = b"aq-"
CMD_WATER_FLUX = b"ani"
class BoilerConn(object):
def __init__(self, host="127.0.0.1", port=4545):
self.host = host
self.port = port
self.sock = None
self._heat_flux = 0.0
self._water_flux = 0.0
self._semaphore = Semaphore()
self.open()
def open(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def close(self):
self.sock.close()
def _lock(self):
self._semaphore.acquire()
def _unlock(self):
self._semaphore.release()
def _send(self, msg):
return self.sock.sendto(msg, (self.host, self.port))
def _read(self, size=10000):
return self.sock.recv(size)
def _set_cmd(self, cmd, value):
self._lock()
value = str(value).encode()
self._send(cmd + value + b"\r\n")
self._read()
self._unlock()
def _get_msg(self, msg):
self._lock()
self._send(msg)
data = self._read()
self._unlock()
data = data.decode("utf-8")
data = data[3:].replace(",", ".").strip()
try:
data = float(data)
except ValueError:
data = data.split('.')
new_data = ''.join(data[:-1]) + '.' + data[-1]
data = float(new_data)
return data
@property
def heat_flux(self):
return self._heat_flux
@heat_flux.setter
def heat_flux(self, value):
self._heat_flux = value
self._set_cmd(CMD_HEAT_FLUX, value)
@property
def water_flux(self):
return self._water_flux
@water_flux.setter
def water_flux(self, value):
self._water_flux = value
self._set_cmd(CMD_WATER_FLUX, value)
@property
def air_temp(self):
return self._get_msg(MSG_AIR_TEMP)
@property
def water_inside_temp(self):
return self._get_msg(MSG_WATER_INSIDE_TEMP)
@property
def water_in_temp(self):
return self._get_msg(MSG_WATER_IN_TEMP)
@property
def water_out_temp(self):
return self._get_msg(MSG_WATER_OUT_TEMP)
@property
def water_column(self):
return self._get_msg(MSG_WATER_COLUMN)
| #!/usr/bin/python3
from __future__ import print_function
import socket
from threading import Semaphore
__all__ = [
'BoilerConn',
]
MSG_AIR_TEMP = b"sta0"
MSG_WATER_INSIDE_TEMP = b"st-0"
MSG_WATER_IN_TEMP = b"sti0"
MSG_WATER_OUT_TEMP = b"sno0"
MSG_WATER_COLUMN = b"sh-0"
CMD_HEAT_FLUX = b"aq-"
CMD_WATER_FLUX = b"ani"
class BoilerConn(object):
def __init__(self, host="127.0.0.1", port=4545):
self.host = host
self.port = port
self.sock = None
self._heat_flux = 0.0
self._water_flux = 0.0
self._semaphore = Semaphore()
self.open()
def open(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def close(self):
self.sock.close()
def _lock(self):
self._semaphore.acquire()
def _unlock(self):
self._semaphore.release()
def _send(self, msg):
return self.sock.sendto(msg, (self.host, self.port))
def _read(self, size=10000):
return self.sock.recv(size)
def _set_cmd(self, cmd, value):
self._lock()
value = str(value).encode()
self._send(cmd + value + b"\r\n")
self._read()
self._unlock()
def _get_msg(self, msg):
self._lock()
self._send(msg)
data = self._read()
self._unlock()
data = data.decode("utf-8")
data = data[3:].replace(",", ".").strip()
try:
data = float(data)
except ValueError:
data = data.split('.')
new_data = ''.join(data[:-1]) + '.' + data[-1]
data = float(new_data)
return data
@property
def heat_flux(self):
return self._heat_flux
@heat_flux.setter
def heat_flux(self, value):
self._heat_flux = value
self._set_cmd(CMD_HEAT_FLUX, value)
@property
def water_flux(self):
return self._water_flux
@water_flux.setter
def water_flux(self, value):
self._water_flux = value
self._set_cmd(CMD_WATER_FLUX, value)
@property
def air_temp(self):
return self._get_msg(MSG_AIR_TEMP)
@property
def water_inside_temp(self):
return self._get_msg(MSG_WATER_INSIDE_TEMP)
@property
def water_in_temp(self):
return self._get_msg(MSG_WATER_IN_TEMP)
@property
def water_out_temp(self):
return self._get_msg(MSG_WATER_OUT_TEMP)
@property
def water_column(self):
return self._get_msg(MSG_WATER_COLUMN) | fr | 0.386793 | #!/usr/bin/python3 | 2.904512 | 3 |
DEPENDENCIES/utf/tests/ut_utftests_test_module.py | kevinkenzhao/Repy2 | 0 | 6619872 | <gh_stars>0
import subprocess
import sys
sub = subprocess.Popen([sys.executable, 'utf.py', '-m', 'stagedtest'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(out, err) = sub.communicate()
#should cause test to fail if there's anything on stderr
if err != '':
print "FAIL: test produced on standard out"
if not "Testing module: stagedtest" in out:
print "FAIL: module test output incorrect"
if not "Running: ut_stagedtest_falsetestone.py" in out:
print "FAIL: ut_stagedtest_falsetestone.py did not show up in test list"
if not "Running: ut_stagedtest_falsetesttwo.py" in out:
print "FAIL: ut_stagedtest_falsetesttwo.py did not show up in test list"
| import subprocess
import sys
sub = subprocess.Popen([sys.executable, 'utf.py', '-m', 'stagedtest'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(out, err) = sub.communicate()
#should cause test to fail if there's anything on stderr
if err != '':
print "FAIL: test produced on standard out"
if not "Testing module: stagedtest" in out:
print "FAIL: module test output incorrect"
if not "Running: ut_stagedtest_falsetestone.py" in out:
print "FAIL: ut_stagedtest_falsetestone.py did not show up in test list"
if not "Running: ut_stagedtest_falsetesttwo.py" in out:
print "FAIL: ut_stagedtest_falsetesttwo.py did not show up in test list" | en | 0.91687 | #should cause test to fail if there's anything on stderr | 2.262612 | 2 |
source/lambdas/shared/message.py | aws-samples/tabular-document-digitization | 2 | 6619873 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from shared.defines import *
from shared.helpers import GetCurrentStamp
from shared.document import (
StageMap,
AcquireMap,
ConvertMap,
ExtractMap,
ReshapeMap,
OperateMap,
AugmentMap,
CatalogMap,
)
from dataclasses import asdict, dataclass, field, fields
from decimal import Decimal
from json import JSONEncoder, dumps
from dotmap import DotMap
class MessageEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
return int(o)
return super(MessageEncoder, self).default(o)
@dataclass
class Message:
"""
Generic message object.
Serialized to SQS message body.
"""
DocumentID: str
ActorGrade: str = Grade.PASS
StartStamp: str = field(default_factory = GetCurrentStamp)
FinalStamp: str = ''
# MapUpdates: StageMap = field(default_factory = StageMap)
MapUpdates: DotMap = field(default_factory = DotMap)
def to_json(self):
return dumps(self.to_dict(), cls = MessageEncoder)
def to_dict(self):
d = asdict(self)
if type(d.get('MapUpdates', None)) == DotMap:
d['MapUpdates'] = d['MapUpdates'].toDict()
if type(d.get('MapUpdates', None)) == StageMap:
d['MapUpdates'] = d['MapUpdates'].to_dict()
return d
@classmethod
def from_dict(cls, d):
new_message = cls(**d)
MapUpdates_field = [f for f in fields(cls) if f.name == 'MapUpdates'][0]
new_message.MapUpdates = MapUpdates_field.default_factory(**d['MapUpdates'])
return new_message
# Messages Sent by Respective Stage Actors for Current Map Updates
class AcquireMapUpdates(Message):
MapUpdates: field(default_factory = AcquireMap)
class CatalogMapUpdates(Message):
MapUpdates: field(default_factory = CatalogMap)
class ExtractMapUpdates(Message):
MapUpdates: field(default_factory = ExtractMap)
class ReshapeMapUpdates(Message):
MapUpdates: field(default_factory = ReshapeMap)
class OperateMapUpdates(Message):
MapUpdates: field(default_factory = OperateMap)
class AugmentMapUpdates(Message):
MapUpdates: field(default_factory = AugmentMap)
class ConvertMapUpdates(Message):
MapUpdates: field(default_factory = ConvertMap)
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from shared.defines import *
from shared.helpers import GetCurrentStamp
from shared.document import (
StageMap,
AcquireMap,
ConvertMap,
ExtractMap,
ReshapeMap,
OperateMap,
AugmentMap,
CatalogMap,
)
from dataclasses import asdict, dataclass, field, fields
from decimal import Decimal
from json import JSONEncoder, dumps
from dotmap import DotMap
class MessageEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
return int(o)
return super(MessageEncoder, self).default(o)
@dataclass
class Message:
"""
Generic message object.
Serialized to SQS message body.
"""
DocumentID: str
ActorGrade: str = Grade.PASS
StartStamp: str = field(default_factory = GetCurrentStamp)
FinalStamp: str = ''
# MapUpdates: StageMap = field(default_factory = StageMap)
MapUpdates: DotMap = field(default_factory = DotMap)
def to_json(self):
return dumps(self.to_dict(), cls = MessageEncoder)
def to_dict(self):
d = asdict(self)
if type(d.get('MapUpdates', None)) == DotMap:
d['MapUpdates'] = d['MapUpdates'].toDict()
if type(d.get('MapUpdates', None)) == StageMap:
d['MapUpdates'] = d['MapUpdates'].to_dict()
return d
@classmethod
def from_dict(cls, d):
new_message = cls(**d)
MapUpdates_field = [f for f in fields(cls) if f.name == 'MapUpdates'][0]
new_message.MapUpdates = MapUpdates_field.default_factory(**d['MapUpdates'])
return new_message
# Messages Sent by Respective Stage Actors for Current Map Updates
class AcquireMapUpdates(Message):
MapUpdates: field(default_factory = AcquireMap)
class CatalogMapUpdates(Message):
MapUpdates: field(default_factory = CatalogMap)
class ExtractMapUpdates(Message):
MapUpdates: field(default_factory = ExtractMap)
class ReshapeMapUpdates(Message):
MapUpdates: field(default_factory = ReshapeMap)
class OperateMapUpdates(Message):
MapUpdates: field(default_factory = OperateMap)
class AugmentMapUpdates(Message):
MapUpdates: field(default_factory = AugmentMap)
class ConvertMapUpdates(Message):
MapUpdates: field(default_factory = ConvertMap)
| en | 0.684495 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 Generic message object. Serialized to SQS message body. # MapUpdates: StageMap = field(default_factory = StageMap) # Messages Sent by Respective Stage Actors for Current Map Updates | 1.907956 | 2 |
tests/test_property_value.py | data4knowledge/RdfOgm | 0 | 6619874 | <filename>tests/test_property_value.py
import pytest
from rdfogm.property_literal import PropertyLiteral
from rdfogm.property_uri import PropertyUri
from rdfogm.property_value import PropertyValue
def test_initial_state():
value = PropertyValue()
assert value.value == None
assert value.to_be_saved() == False
def test_initial_state_value():
value = PropertyValue(PropertyLiteral("New"))
assert value.value == PropertyLiteral("New")
assert value.to_be_saved() == True
def test_set():
value = PropertyValue()
value.value = PropertyLiteral("Set")
assert value.value == PropertyLiteral("Set")
assert value.to_be_saved() == True
def test_cleared():
value = PropertyValue()
value.value = "Set"
assert value.to_be_saved() == True
value.saved()
assert value.value == "Set"
assert value.to_be_saved() == False
def test_str_literal():
value = PropertyValue(PropertyLiteral("YYY"))
assert value.__str__() == 'YYY'
def test_str_uri():
uri = PropertyUri("http://example.com#A")
value = PropertyValue(uri)
assert value.__str__() == 'http://example.com#A'
| <filename>tests/test_property_value.py
import pytest
from rdfogm.property_literal import PropertyLiteral
from rdfogm.property_uri import PropertyUri
from rdfogm.property_value import PropertyValue
def test_initial_state():
value = PropertyValue()
assert value.value == None
assert value.to_be_saved() == False
def test_initial_state_value():
value = PropertyValue(PropertyLiteral("New"))
assert value.value == PropertyLiteral("New")
assert value.to_be_saved() == True
def test_set():
value = PropertyValue()
value.value = PropertyLiteral("Set")
assert value.value == PropertyLiteral("Set")
assert value.to_be_saved() == True
def test_cleared():
value = PropertyValue()
value.value = "Set"
assert value.to_be_saved() == True
value.saved()
assert value.value == "Set"
assert value.to_be_saved() == False
def test_str_literal():
value = PropertyValue(PropertyLiteral("YYY"))
assert value.__str__() == 'YYY'
def test_str_uri():
uri = PropertyUri("http://example.com#A")
value = PropertyValue(uri)
assert value.__str__() == 'http://example.com#A'
| none | 1 | 2.354023 | 2 | |
p888_fair_candy_swap.py | feigaochn/leetcode | 0 | 6619875 | """
Alice and Bob have candy bars of different sizes: A[i] is the size of the i-th bar of candy that Alice has, and B[j] is the size of the j-th bar of candy that Bob has.
Since they are friends, they would like to exchange one candy bar each so that after the exchange, they both have the same total amount of candy. (The total amount of candy a person has is the sum of the sizes of candy bars they have.)
Return an integer array ans where ans[0] is the size of the candy bar that Alice must exchange, and ans[1] is the size of the candy bar that Bob must exchange.
If there are multiple answers, you may return any one of them. It is guaranteed an answer exists.
Example 1:
Input: A = [1,1], B = [2,2]
Output: [1,2]
Example 2:
Input: A = [1,2], B = [2,3]
Output: [1,2]
Example 3:
Input: A = [2], B = [1,3]
Output: [2,3]
Example 4:
Input: A = [1,2,5], B = [2,4]
Output: [5,4]
Note:
1 <= A.length <= 10000
1 <= B.length <= 10000
1 <= A[i] <= 100000
1 <= B[i] <= 100000
It is guaranteed that Alice and Bob have different total amounts of candy.
It is guaranteed there exists an answer.
"""
class Solution:
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
A.sort()
sa = sum(A)
B.sort()
sb = sum(B)
target = (sa + sb) // 2
diff = sa - target
# Alice give Bob a (x) and Bob give Alice a (x - diff)
ia = ib = 0
while True:
if A[ia] - B[ib] == diff:
return [A[ia], B[ib]]
elif A[ia] - B[ib] > diff:
ib += 1
else:
ia += 1
sol = Solution().fairCandySwap
print(sol([1, 1], [2, 2]))
print(sol([1, 2], [2, 3]))
print(sol([2], [1, 3]))
print(sol([1, 2, 5], [2, 4]))
| """
Alice and Bob have candy bars of different sizes: A[i] is the size of the i-th bar of candy that Alice has, and B[j] is the size of the j-th bar of candy that Bob has.
Since they are friends, they would like to exchange one candy bar each so that after the exchange, they both have the same total amount of candy. (The total amount of candy a person has is the sum of the sizes of candy bars they have.)
Return an integer array ans where ans[0] is the size of the candy bar that Alice must exchange, and ans[1] is the size of the candy bar that Bob must exchange.
If there are multiple answers, you may return any one of them. It is guaranteed an answer exists.
Example 1:
Input: A = [1,1], B = [2,2]
Output: [1,2]
Example 2:
Input: A = [1,2], B = [2,3]
Output: [1,2]
Example 3:
Input: A = [2], B = [1,3]
Output: [2,3]
Example 4:
Input: A = [1,2,5], B = [2,4]
Output: [5,4]
Note:
1 <= A.length <= 10000
1 <= B.length <= 10000
1 <= A[i] <= 100000
1 <= B[i] <= 100000
It is guaranteed that Alice and Bob have different total amounts of candy.
It is guaranteed there exists an answer.
"""
class Solution:
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
A.sort()
sa = sum(A)
B.sort()
sb = sum(B)
target = (sa + sb) // 2
diff = sa - target
# Alice give Bob a (x) and Bob give Alice a (x - diff)
ia = ib = 0
while True:
if A[ia] - B[ib] == diff:
return [A[ia], B[ib]]
elif A[ia] - B[ib] > diff:
ib += 1
else:
ia += 1
sol = Solution().fairCandySwap
print(sol([1, 1], [2, 2]))
print(sol([1, 2], [2, 3]))
print(sol([2], [1, 3]))
print(sol([1, 2, 5], [2, 4]))
| en | 0.915876 | Alice and Bob have candy bars of different sizes: A[i] is the size of the i-th bar of candy that Alice has, and B[j] is the size of the j-th bar of candy that Bob has. Since they are friends, they would like to exchange one candy bar each so that after the exchange, they both have the same total amount of candy. (The total amount of candy a person has is the sum of the sizes of candy bars they have.) Return an integer array ans where ans[0] is the size of the candy bar that Alice must exchange, and ans[1] is the size of the candy bar that Bob must exchange. If there are multiple answers, you may return any one of them. It is guaranteed an answer exists. Example 1: Input: A = [1,1], B = [2,2] Output: [1,2] Example 2: Input: A = [1,2], B = [2,3] Output: [1,2] Example 3: Input: A = [2], B = [1,3] Output: [2,3] Example 4: Input: A = [1,2,5], B = [2,4] Output: [5,4] Note: 1 <= A.length <= 10000 1 <= B.length <= 10000 1 <= A[i] <= 100000 1 <= B[i] <= 100000 It is guaranteed that Alice and Bob have different total amounts of candy. It is guaranteed there exists an answer. :type A: List[int] :type B: List[int] :rtype: List[int] # Alice give Bob a (x) and Bob give Alice a (x - diff) | 3.926944 | 4 |
expand_yce.py | Illidanz/GurrenTranslation | 3 | 6619876 | <filename>expand_yce.py
from hacktools import common
animfiles = {
2: "data/extract_NFP/NFP2D.NFP/AV03_01.YCE",
5: "data/extract_NFP/NFP2D.NFP/AV09_01.YCE",
6: "data/extract_NFP/NFP2D.NFP/AV08_01.YCE",
7: "data/extract_NFP/NFP2D.NFP/AV01_04.YCE",
8: "data/extract_NFP/NFP2D.NFP/AV05_02.YCE"
}
def run(file, addframes):
infile = "data/extract_NFP/NFP2D.NFP/" + file
outfile = "data/work_YCE/" + file
common.logMessage("Expanding", infile, "to", outfile, "...")
with common.Stream(outfile, "wb") as f:
with common.Stream(infile, "rb") as fin:
# Copy header
f.write(fin.read(28))
# Image number
num = fin.readUInt()
fin.seek(num * 4, 1)
f.writeUInt(num + addframes)
# Make room for the positions
offsetpos = f.tell()
for i in range(num + addframes):
f.writeUInt(0)
# Copy the existing images
for i in range(num):
newpos = f.tell()
f.seek(offsetpos + i * 4)
f.writeUInt(newpos - 24)
f.seek(newpos)
size = fin.readUInt()
fin.seek(-4, 1)
data = fin.read(size)
f.write(data)
# Add the new frames
for i in range(num, num + addframes):
newpos = f.tell()
f.seek(offsetpos + i * 4)
f.writeUInt(newpos - 24)
f.seek(newpos)
f.write(data)
# Read the animation frames from another file
animoffset = f.tell()
with common.Stream(animfiles[num + addframes], "rb") as fin:
fin.seek(20)
animoffset2 = fin.readUInt()
fin.seek(animoffset2)
animsize = fin.readUInt()
fin.seek(-4, 1)
f.write(fin.read(animsize))
totsize = f.tell()
# Pad with 0s
f.writeZero(16 - (f.tell() % 16))
# Write new sizes and offsets
f.seek(8)
f.writeUInt(totsize)
f.seek(20)
f.writeUInt(animoffset)
f.writeUInt(animoffset - 32)
common.logMessage("Done!")
| <filename>expand_yce.py
from hacktools import common
animfiles = {
2: "data/extract_NFP/NFP2D.NFP/AV03_01.YCE",
5: "data/extract_NFP/NFP2D.NFP/AV09_01.YCE",
6: "data/extract_NFP/NFP2D.NFP/AV08_01.YCE",
7: "data/extract_NFP/NFP2D.NFP/AV01_04.YCE",
8: "data/extract_NFP/NFP2D.NFP/AV05_02.YCE"
}
def run(file, addframes):
infile = "data/extract_NFP/NFP2D.NFP/" + file
outfile = "data/work_YCE/" + file
common.logMessage("Expanding", infile, "to", outfile, "...")
with common.Stream(outfile, "wb") as f:
with common.Stream(infile, "rb") as fin:
# Copy header
f.write(fin.read(28))
# Image number
num = fin.readUInt()
fin.seek(num * 4, 1)
f.writeUInt(num + addframes)
# Make room for the positions
offsetpos = f.tell()
for i in range(num + addframes):
f.writeUInt(0)
# Copy the existing images
for i in range(num):
newpos = f.tell()
f.seek(offsetpos + i * 4)
f.writeUInt(newpos - 24)
f.seek(newpos)
size = fin.readUInt()
fin.seek(-4, 1)
data = fin.read(size)
f.write(data)
# Add the new frames
for i in range(num, num + addframes):
newpos = f.tell()
f.seek(offsetpos + i * 4)
f.writeUInt(newpos - 24)
f.seek(newpos)
f.write(data)
# Read the animation frames from another file
animoffset = f.tell()
with common.Stream(animfiles[num + addframes], "rb") as fin:
fin.seek(20)
animoffset2 = fin.readUInt()
fin.seek(animoffset2)
animsize = fin.readUInt()
fin.seek(-4, 1)
f.write(fin.read(animsize))
totsize = f.tell()
# Pad with 0s
f.writeZero(16 - (f.tell() % 16))
# Write new sizes and offsets
f.seek(8)
f.writeUInt(totsize)
f.seek(20)
f.writeUInt(animoffset)
f.writeUInt(animoffset - 32)
common.logMessage("Done!")
| en | 0.799187 | # Copy header # Image number # Make room for the positions # Copy the existing images # Add the new frames # Read the animation frames from another file # Pad with 0s # Write new sizes and offsets | 2.38578 | 2 |
pyrdf2vec/embedders/word2vec.py | heureka-labs/pyRDF2Vec | 154 | 6619877 | from __future__ import annotations
from typing import List
import attr
from gensim.models.word2vec import Word2Vec as W2V
from pyrdf2vec.embedders import Embedder
from pyrdf2vec.typings import Embeddings, Entities, SWalk
@attr.s(init=False)
class Word2Vec(Embedder):
"""Defines the Word2Vec embedding technique.
SEE: https://radimrehurek.com/gensim/models/word2vec.html
Attributes:
_model: The gensim.models.word2vec model.
Defaults to None.
kwargs: The keyword arguments dictionary.
Defaults to { min_count=0 }.
"""
kwargs = attr.ib(init=False, default=None)
_model = attr.ib(init=False, type=W2V, default=None, repr=False)
def __init__(self, **kwargs):
self.kwargs = {
"min_count": 0,
**kwargs,
}
self._model = W2V(**self.kwargs)
def fit(
self, walks: List[List[SWalk]], is_update: bool = False
) -> Embedder:
"""Fits the Word2Vec model based on provided walks.
Args:
walks: The walks to create the corpus to to fit the model.
is_update: True if the new walks should be added to old model's
walks, False otherwise.
Defaults to False.
Returns:
The fitted Word2Vec model.
"""
corpus = [walk for entity_walks in walks for walk in entity_walks]
self._model.build_vocab(corpus, update=is_update)
self._model.train(
corpus,
total_examples=self._model.corpus_count,
epochs=self._model.epochs,
)
return self
def transform(self, entities: Entities) -> Embeddings:
"""The features vector of the provided entities.
Args:
entities: The entities including test entities to create the
embeddings. Since RDF2Vec is unsupervised, there is no label
leakage.
Returns:
The features vector of the provided entities.
"""
if not all([entity in self._model.wv for entity in entities]):
raise ValueError(
"The entities must have been provided to fit() first "
"before they can be transformed into a numerical vector."
)
return [self._model.wv.get_vector(entity) for entity in entities]
| from __future__ import annotations
from typing import List
import attr
from gensim.models.word2vec import Word2Vec as W2V
from pyrdf2vec.embedders import Embedder
from pyrdf2vec.typings import Embeddings, Entities, SWalk
@attr.s(init=False)
class Word2Vec(Embedder):
"""Defines the Word2Vec embedding technique.
SEE: https://radimrehurek.com/gensim/models/word2vec.html
Attributes:
_model: The gensim.models.word2vec model.
Defaults to None.
kwargs: The keyword arguments dictionary.
Defaults to { min_count=0 }.
"""
kwargs = attr.ib(init=False, default=None)
_model = attr.ib(init=False, type=W2V, default=None, repr=False)
def __init__(self, **kwargs):
self.kwargs = {
"min_count": 0,
**kwargs,
}
self._model = W2V(**self.kwargs)
def fit(
self, walks: List[List[SWalk]], is_update: bool = False
) -> Embedder:
"""Fits the Word2Vec model based on provided walks.
Args:
walks: The walks to create the corpus to to fit the model.
is_update: True if the new walks should be added to old model's
walks, False otherwise.
Defaults to False.
Returns:
The fitted Word2Vec model.
"""
corpus = [walk for entity_walks in walks for walk in entity_walks]
self._model.build_vocab(corpus, update=is_update)
self._model.train(
corpus,
total_examples=self._model.corpus_count,
epochs=self._model.epochs,
)
return self
def transform(self, entities: Entities) -> Embeddings:
"""The features vector of the provided entities.
Args:
entities: The entities including test entities to create the
embeddings. Since RDF2Vec is unsupervised, there is no label
leakage.
Returns:
The features vector of the provided entities.
"""
if not all([entity in self._model.wv for entity in entities]):
raise ValueError(
"The entities must have been provided to fit() first "
"before they can be transformed into a numerical vector."
)
return [self._model.wv.get_vector(entity) for entity in entities]
| en | 0.757613 | Defines the Word2Vec embedding technique. SEE: https://radimrehurek.com/gensim/models/word2vec.html Attributes: _model: The gensim.models.word2vec model. Defaults to None. kwargs: The keyword arguments dictionary. Defaults to { min_count=0 }. Fits the Word2Vec model based on provided walks. Args: walks: The walks to create the corpus to to fit the model. is_update: True if the new walks should be added to old model's walks, False otherwise. Defaults to False. Returns: The fitted Word2Vec model. The features vector of the provided entities. Args: entities: The entities including test entities to create the embeddings. Since RDF2Vec is unsupervised, there is no label leakage. Returns: The features vector of the provided entities. | 2.922216 | 3 |
roscraco/router/tenda/__init__.py | cyroxx/roscraco | 1 | 6619878 | from w268r import Tenda_W268R
| from w268r import Tenda_W268R
| none | 1 | 1.183054 | 1 | |
org/apache/helix/ClusterMessagingService.py | davzhang/helix-python-binding | 3 | 6619879 | # package org.apache.helix
#from org.apache.helix import *
from org.apache.helix.messaging.AsyncCallback import AsyncCallback
from org.apache.helix.messaging.handling.MessageHandlerFactory import MessageHandlerFactory
from org.apache.helix.model.Message import Message
class ClusterMessagingService:
def send(self, recipientCriteria, message):
"""
Returns int
Parameters:
recipientCriteria: Criteriamessage: Message
"""
pass
def send(self, receipientCriteria, message, callbackOnReply, timeOut):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: int
"""
pass
def send(self, receipientCriteria, message, callbackOnReply, timeOut, retryCount):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: intretryCount: int
"""
pass
def sendAndWait(self, receipientCriteria, message, callbackOnReply, timeOut):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: int
"""
pass
def sendAndWait(self, receipientCriteria, message, callbackOnReply, timeOut, retryCount):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: intretryCount: int
"""
pass
def registerMessageHandlerFactory(self, type, factory):
"""
Returns void
Parameters:
type: Stringfactory: MessageHandlerFactory
"""
pass
| # package org.apache.helix
#from org.apache.helix import *
from org.apache.helix.messaging.AsyncCallback import AsyncCallback
from org.apache.helix.messaging.handling.MessageHandlerFactory import MessageHandlerFactory
from org.apache.helix.model.Message import Message
class ClusterMessagingService:
def send(self, recipientCriteria, message):
"""
Returns int
Parameters:
recipientCriteria: Criteriamessage: Message
"""
pass
def send(self, receipientCriteria, message, callbackOnReply, timeOut):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: int
"""
pass
def send(self, receipientCriteria, message, callbackOnReply, timeOut, retryCount):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: intretryCount: int
"""
pass
def sendAndWait(self, receipientCriteria, message, callbackOnReply, timeOut):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: int
"""
pass
def sendAndWait(self, receipientCriteria, message, callbackOnReply, timeOut, retryCount):
"""
Returns int
Parameters:
receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: intretryCount: int
"""
pass
def registerMessageHandlerFactory(self, type, factory):
"""
Returns void
Parameters:
type: Stringfactory: MessageHandlerFactory
"""
pass
| en | 0.310365 | # package org.apache.helix #from org.apache.helix import * Returns int Parameters: recipientCriteria: Criteriamessage: Message Returns int Parameters: receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: int Returns int Parameters: receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: intretryCount: int Returns int Parameters: receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: int Returns int Parameters: receipientCriteria: Criteriamessage: MessagecallbackOnReply: AsyncCallbacktimeOut: intretryCount: int Returns void Parameters: type: Stringfactory: MessageHandlerFactory | 2.033932 | 2 |
tunas/arch/universal.py | yancz1989/tunas | 0 | 6619880 | # -*- coding: utf-8 -*-
# @Author: yancz1989
# @Date: 2016-05-05 20:59:31
# @Last Modified by: yancz1989
# @Last Modified time: 2016-12-06 16:29:50
# This file implement expression module for tunas, including variable,
# placeholder, function and their base support interfaces.
dim2d = {}
dim3d = {}
kernel2d = {}
kernel3d = {}
dim_orders = ['tf', 'th']
paddings = ['same', 'valid']
dim2d['tf'] = 'NHWC'
dim3d['tf'] = 'NTHWC'
dim2d['th'] = 'NCHW'
dim3d['th'] = 'NCTHW'
kernel2d['tf'] = 'HWCD'
kernel3d['tf'] = 'HWTCD'
kernel2d['th'] = 'DCHW'
kernel3d['th'] = 'DCHWT'
def _string_order(in_, out_):
order = []
for i in range(len(in_)):
order.append(in_.index(out_[i]))
return order
def _expand(dims, order):
if order == 'th':
return (1, 1) + dims
else:
return (1, ) + dims + (1,)
| # -*- coding: utf-8 -*-
# @Author: yancz1989
# @Date: 2016-05-05 20:59:31
# @Last Modified by: yancz1989
# @Last Modified time: 2016-12-06 16:29:50
# This file implement expression module for tunas, including variable,
# placeholder, function and their base support interfaces.
dim2d = {}
dim3d = {}
kernel2d = {}
kernel3d = {}
dim_orders = ['tf', 'th']
paddings = ['same', 'valid']
dim2d['tf'] = 'NHWC'
dim3d['tf'] = 'NTHWC'
dim2d['th'] = 'NCHW'
dim3d['th'] = 'NCTHW'
kernel2d['tf'] = 'HWCD'
kernel3d['tf'] = 'HWTCD'
kernel2d['th'] = 'DCHW'
kernel3d['th'] = 'DCHWT'
def _string_order(in_, out_):
order = []
for i in range(len(in_)):
order.append(in_.index(out_[i]))
return order
def _expand(dims, order):
if order == 'th':
return (1, 1) + dims
else:
return (1, ) + dims + (1,)
| en | 0.781676 | # -*- coding: utf-8 -*- # @Author: yancz1989 # @Date: 2016-05-05 20:59:31 # @Last Modified by: yancz1989 # @Last Modified time: 2016-12-06 16:29:50 # This file implement expression module for tunas, including variable, # placeholder, function and their base support interfaces. | 2.616084 | 3 |
grocerypathsim/path_generator.py | kimholmgren/grocerystore_path_simulation | 0 | 6619881 | <reponame>kimholmgren/grocerystore_path_simulation<filename>grocerypathsim/path_generator.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
class PathGenerator:
def __init__(self, storelayout, start_coords=[499,0]):
"""
Initialize path generator object
:param storelayout: store layout object
:param start_coords: where customers are generated spatially
"""
self.slayout = storelayout
self.dpt_coord_choices = [self.slayout.product_options[i].shape[1] for
i in
range(len(self.slayout.product_options.keys()))]
self.start_coords = start_coords
def generate_pixel_coordinates(self, shopping_list):
"""
Generate a random set of coordinates to visit based on the
departments in a given shopping list
:param shopping_list: Current shopping list generated from
ShoppingListGen
:return: list of pixel coordinates to visit
"""
visited_dpts = shopping_list['mapped_dpt']
visited_pixel_coords = []
for d in visited_dpts:
curr_pixel_ind_choice = np.random.choice(self.dpt_coord_choices[d])
curr_pixel = self.slayout.product_options[d][:,
curr_pixel_ind_choice]
visited_pixel_coords.append(curr_pixel)
visited_pixel_coords = np.array(visited_pixel_coords)
return visited_pixel_coords
def order_coords(self, pixel_coords):
"""
Generate the order for the path from a list of coordinates to visit
:param pixel_coords: Coordinates to visit
:return: ordered list of coordinates
"""
euclidean_dist = 0
ordered_path = [np.array(self.start_coords)]
curr_loc = self.start_coords
while len(pixel_coords)>0:
# compute euclidean distances from current location
dists = [np.linalg.norm(a - curr_loc) for a in pixel_coords]
for i, d in enumerate(dists):
if d==0:
dists[i]=.5
# compute probabilities
p = np.power(np.reciprocal(dists), 5)
p = p / p.sum()
# choose next point
next_point_index = np.random.choice(list(range(len(p))), p=p)
euclidean_dist += dists[next_point_index]
next_point = pixel_coords[next_point_index]
pixel_coords = np.vstack((pixel_coords[:next_point_index],
pixel_coords[next_point_index+1:]))
# add to ordered list
ordered_path.append(next_point)
curr_loc = next_point
# when no items remain visit the checkout area
checkout_ind = np.random.choice(self.dpt_coord_choices[
self.slayout.checkout_index])
checkout_point = self.slayout.product_options[
self.slayout.checkout_index][:, checkout_ind]
ordered_path.append(checkout_point)
return ordered_path, euclidean_dist
# now we have an ordered list of points to visit, and a store layout
# denoting where we can walk if we choose to compute a path around
# obstacles rather than euclidean distance
def calc_path_astar(self, ordered):
"""
Calculate the walking path using the A* algorithm
:param ordered: ordered set of coordinates.
:return: path, distance
"""
distance = 0
full_path = []
# make sure all destination points are walkable
for o in ordered:
x, y = o
self.slayout.walkable[x, y] = 1
# calculate path
for i in range(len(ordered)-1):
# define the grid and the solver
grid = Grid(matrix=self.slayout.walkable)
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
start = ordered[i]
start_node = grid.node(start[1], start[0])
end = ordered[i+1]
end_node = grid.node(end[1], end[0])
path, runs = finder.find_path(start_node, end_node, grid)
distance += len(path)
full_path.extend(path)
return full_path, distance
def plot_ordered_coords(self, visited_pixel_coords):
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in visited_pixel_coords], [p[1] for p in visited_pixel_coords]
plt.scatter(ys, xs, color="purple")
for i in range(len(visited_pixel_coords)):
plt.text(visited_pixel_coords[i][1] - 10,
visited_pixel_coords[i][0] + 25, str(i))
plt.show()
def plot_astar_path(self, full_path, ordered):
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in full_path], [p[1] for p in full_path]
plt.scatter(xs, ys, color="gray", s=10)
xs, ys = [p[0] for p in ordered], [p[1] for p in ordered]
plt.scatter(ys, xs, marker='x', color='red')
for i in range(len(ordered)):
plt.text(ordered[i][1] - 10,
ordered[i][0] + 25, str(i), fontsize='large',
fontdict={'weight': 'heavy', 'color': 'black'})
plt.show()
def plot_euclidean_path(self, visited_pixel_coords):
plt.clf()
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in visited_pixel_coords], [p[1] for p in
visited_pixel_coords]
plt.scatter(ys, xs, marker='x', color='red', zorder=2)
for i in range(len(visited_pixel_coords)):
plt.text(visited_pixel_coords[i][1] - 10,
visited_pixel_coords[i][0] + 25, str(i), fontsize='large',
fontdict={'weight': 'heavy', 'color': 'black'})
plt.plot(ys, xs, color="gray", linewidth=4, zorder=1)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
class PathGenerator:
def __init__(self, storelayout, start_coords=[499,0]):
"""
Initialize path generator object
:param storelayout: store layout object
:param start_coords: where customers are generated spatially
"""
self.slayout = storelayout
self.dpt_coord_choices = [self.slayout.product_options[i].shape[1] for
i in
range(len(self.slayout.product_options.keys()))]
self.start_coords = start_coords
def generate_pixel_coordinates(self, shopping_list):
"""
Generate a random set of coordinates to visit based on the
departments in a given shopping list
:param shopping_list: Current shopping list generated from
ShoppingListGen
:return: list of pixel coordinates to visit
"""
visited_dpts = shopping_list['mapped_dpt']
visited_pixel_coords = []
for d in visited_dpts:
curr_pixel_ind_choice = np.random.choice(self.dpt_coord_choices[d])
curr_pixel = self.slayout.product_options[d][:,
curr_pixel_ind_choice]
visited_pixel_coords.append(curr_pixel)
visited_pixel_coords = np.array(visited_pixel_coords)
return visited_pixel_coords
def order_coords(self, pixel_coords):
"""
Generate the order for the path from a list of coordinates to visit
:param pixel_coords: Coordinates to visit
:return: ordered list of coordinates
"""
euclidean_dist = 0
ordered_path = [np.array(self.start_coords)]
curr_loc = self.start_coords
while len(pixel_coords)>0:
# compute euclidean distances from current location
dists = [np.linalg.norm(a - curr_loc) for a in pixel_coords]
for i, d in enumerate(dists):
if d==0:
dists[i]=.5
# compute probabilities
p = np.power(np.reciprocal(dists), 5)
p = p / p.sum()
# choose next point
next_point_index = np.random.choice(list(range(len(p))), p=p)
euclidean_dist += dists[next_point_index]
next_point = pixel_coords[next_point_index]
pixel_coords = np.vstack((pixel_coords[:next_point_index],
pixel_coords[next_point_index+1:]))
# add to ordered list
ordered_path.append(next_point)
curr_loc = next_point
# when no items remain visit the checkout area
checkout_ind = np.random.choice(self.dpt_coord_choices[
self.slayout.checkout_index])
checkout_point = self.slayout.product_options[
self.slayout.checkout_index][:, checkout_ind]
ordered_path.append(checkout_point)
return ordered_path, euclidean_dist
# now we have an ordered list of points to visit, and a store layout
# denoting where we can walk if we choose to compute a path around
# obstacles rather than euclidean distance
def calc_path_astar(self, ordered):
"""
Calculate the walking path using the A* algorithm
:param ordered: ordered set of coordinates.
:return: path, distance
"""
distance = 0
full_path = []
# make sure all destination points are walkable
for o in ordered:
x, y = o
self.slayout.walkable[x, y] = 1
# calculate path
for i in range(len(ordered)-1):
# define the grid and the solver
grid = Grid(matrix=self.slayout.walkable)
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
start = ordered[i]
start_node = grid.node(start[1], start[0])
end = ordered[i+1]
end_node = grid.node(end[1], end[0])
path, runs = finder.find_path(start_node, end_node, grid)
distance += len(path)
full_path.extend(path)
return full_path, distance
def plot_ordered_coords(self, visited_pixel_coords):
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in visited_pixel_coords], [p[1] for p in visited_pixel_coords]
plt.scatter(ys, xs, color="purple")
for i in range(len(visited_pixel_coords)):
plt.text(visited_pixel_coords[i][1] - 10,
visited_pixel_coords[i][0] + 25, str(i))
plt.show()
def plot_astar_path(self, full_path, ordered):
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in full_path], [p[1] for p in full_path]
plt.scatter(xs, ys, color="gray", s=10)
xs, ys = [p[0] for p in ordered], [p[1] for p in ordered]
plt.scatter(ys, xs, marker='x', color='red')
for i in range(len(ordered)):
plt.text(ordered[i][1] - 10,
ordered[i][0] + 25, str(i), fontsize='large',
fontdict={'weight': 'heavy', 'color': 'black'})
plt.show()
def plot_euclidean_path(self, visited_pixel_coords):
plt.clf()
plt.imshow(cv2.cvtColor(self.slayout.layout, cv2.COLOR_BGR2RGB))
xs, ys = [p[0] for p in visited_pixel_coords], [p[1] for p in
visited_pixel_coords]
plt.scatter(ys, xs, marker='x', color='red', zorder=2)
for i in range(len(visited_pixel_coords)):
plt.text(visited_pixel_coords[i][1] - 10,
visited_pixel_coords[i][0] + 25, str(i), fontsize='large',
fontdict={'weight': 'heavy', 'color': 'black'})
plt.plot(ys, xs, color="gray", linewidth=4, zorder=1)
plt.show() | en | 0.800297 | Initialize path generator object :param storelayout: store layout object :param start_coords: where customers are generated spatially Generate a random set of coordinates to visit based on the departments in a given shopping list :param shopping_list: Current shopping list generated from ShoppingListGen :return: list of pixel coordinates to visit Generate the order for the path from a list of coordinates to visit :param pixel_coords: Coordinates to visit :return: ordered list of coordinates # compute euclidean distances from current location # compute probabilities # choose next point # add to ordered list # when no items remain visit the checkout area # now we have an ordered list of points to visit, and a store layout # denoting where we can walk if we choose to compute a path around # obstacles rather than euclidean distance Calculate the walking path using the A* algorithm :param ordered: ordered set of coordinates. :return: path, distance # make sure all destination points are walkable # calculate path # define the grid and the solver | 3.034515 | 3 |
src/config.py | tue-5ARA0-2021-Q3/Fashion-MNIST-part2 | 0 | 6619882 | # ====================================================
# CFG
# ====================================================
class CFG:
"""
Configuration settings for the training model
"""
epochs = 15
optimizer = 'adam'
activation = 'relu'
debug = True
model_name = 'model' + '_e' + str(epochs)
| # ====================================================
# CFG
# ====================================================
class CFG:
"""
Configuration settings for the training model
"""
epochs = 15
optimizer = 'adam'
activation = 'relu'
debug = True
model_name = 'model' + '_e' + str(epochs)
| en | 0.540542 | # ==================================================== # CFG # ==================================================== Configuration settings for the training model | 2.031696 | 2 |
Advance_Python/file_handling/file_open_and_close.py | siddharth-143/Python | 0 | 6619883 | <gh_stars>0
# file handing
# open and close file (basic demo)
f = open('abc.txt','w')
print('File name :',f.name)
print('File mode :',f.mode)
print('Is file readable :',f.readable())
print('Is file writeable :',f.writable())
print('Is file close :',f.close())
f.close()
print('Is file close :',f.close)
| # file handing
# open and close file (basic demo)
f = open('abc.txt','w')
print('File name :',f.name)
print('File mode :',f.mode)
print('Is file readable :',f.readable())
print('Is file writeable :',f.writable())
print('Is file close :',f.close())
f.close()
print('Is file close :',f.close) | en | 0.854369 | # file handing # open and close file (basic demo) | 3.840805 | 4 |
assignment_1/pos_hmm_viterbi.py | sigfredonin/NEU_CS6120 | 0 | 6619884 | <filename>assignment_1/pos_hmm_viterbi.py
"""
NEU CS6120 Assignment 1
Problem 4 POS Tagging - Hidden Markov Model
Viterbi Decoder
Implements the Viterbi decoding algorithm in NLP ed3 Fig. 8.5
to derive the most probable tag sequence for a sentence.
Note: Rather than initialize the decoder by processing the state
transition from <s> to the first word, initialized the decoder
to the <start> state. This simplifies the code in that the first
state update is the same as all the rest.
The state (tag) probabilities are set to:
'$S' : 1.0
rest : 0.0
The back pointers are all set to None.
<NAME>
07 Oct 2018
"""
import numpy as np
import os
import re
from collections import defaultdict
_DEBUG_ = False
_VERBOSE_ = False
TOK_SS = '<s>' # start sentence
TAG_SS = '$S'
TOK_ES = '</s>' # end sentence
TAG_ES = 'S$'
# ------------------------------------------------------------------------
# Helper Class - Viterbi Decoder ---
# ------------------------------------------------------------------------
class POS_HMM_Viterbi:
"""
HMM Viterbi POS Decoder
Initialize with transmission and emission probabilities,
indexed by tag:
pTagTrans: { prev_tag : [ ( tag, probability), ...], ...}
pTagEmiss: { curr_tag : [ ( word, probability), ...], ...}
"""
# ------------------------------------------------------------------------
# Class constructor ---
# ------------------------------------------------------------------------
def _probability_LUT(self, pTagProbs, pProbsUnseen):
"""
Create a transition or emission probability lookup table.
Input:
pTagProbs: { t_i-1 : [ ( t_i, P(t_i-1, t_i), ...], ...}
or: ( t_i : [ ( w_i, P(w_i | t_i), ...], ...}
pProbsUnseen: { t_i-1 : probability_of_unseen, ... }
or: ( t_i : probability_of_unseen, ... )
Output:
dT : { prev_tag : { tag : probability, ...}, ...}
"""
probUnseen = pProbsUnseen[None]
print("_probability_LUT.probUnseen:", probUnseen)
dP = defaultdict(lambda: defaultdict(lambda: probUnseen))
for prior, pList in pTagProbs.items():
probUnseenGivenPrior = pProbsUnseen[prior]
dV = defaultdict(lambda: probUnseenGivenPrior)
for value, probability in pList:
dV[value] = probability
dP[prior] = dV
return dP
def _init(self, pTagTrans, pTransUnseen, pTagEmiss, pEmissUnseen):
# tag set
self.tags = set(sorted(pTagTrans))
if self.tags != set(sorted(pTagEmiss)):
msg = ""
print("ERROR: transmission and emission probabilities",
"are not for the same tag set.")
return
# state transition probabilities P(t_i-1, t_i)
self.dT = self._probability_LUT(pTagTrans, pTransUnseen)
# word emission probabilities P( w_i | t_i )
self.dE = self._probability_LUT(pTagEmiss, pEmissUnseen)
def set_DEBUG(self, DEBUG):
_DEBUG_ = DEBUG
def set_VERBOSE(self, VERBOSE):
_VERBOSE_ = VERBOSE
def __init__(self, pTagTrans, pTransUnseen, pTagEmiss, pEmissUnseen, \
DEBUG=False, VERBOSE=False):
self.set_DEBUG(DEBUG)
self.set_VERBOSE(VERBOSE)
self._init(pTagTrans, pTransUnseen, pTagEmiss, pEmissUnseen)
# ------------------------------------------------------------------------
# Viterbi decoding ---
# ------------------------------------------------------------------------
def _find_max_results(self, pS, bS):
probabilities = [ p for tag, p in pS.items() ]
pMax = max(probabilities)
tagsMax = [ tag for tag, p in pS.items() if p == pMax ]
if len(tagsMax) > 1:
print("Warning: there are %d tags with max p = %f" \
% (len(tagsMax), pMax) )
tMax = tagsMax[0]
return pMax, tMax
def _find_max(self, tag, pE, pS_prev):
pMax = 0.0
tMax = None
for prev_tag in self.tags: # iterate over previous tags
pPrev = pS_prev[prev_tag] # Viterbi[i-1, prev_tag]
pTs = self.dT[prev_tag] # { tag : P(prev_tag, tag)}
pT = pTs[tag] # P(prev_tag, tag)
p = pPrev * pT * pE # ? Viterbi(i, tag)
if p > pMax:
pMax = p
tMax = prev_tag
return pMax, tMax
def _step(self, word, pS):
pS_prev = pS # Viterbi[i-1, tag], previous time step
pS = {} # Viterbi[i, tag], this time step
bS = {} # backpointer[i, tag], this time step
for tag in self.tags: # iterate over all possible POS tags
# probability of this word given this tag
pEs = self.dE[tag] # word probabilities for this tag
pE = pEs[word] # P(word | tag)
# find previous tag that gives highest probability for tag
pMax, tMax = self._find_max(tag, pE, pS_prev)
pS[tag] = pMax
bS[tag] = tMax
print("--- %s ---" % word)
if _VERBOSE_:
print("pS:", pS)
print("bS:", bS)
return pS, bS
def _backtrace(self, observations, viterbi, backpointer):
"""
# find the maximum probability and corresponding tag in each time step
# and follow the back pointers to determine the most probable tags
"""
print("Backtrace".center(47, '-'))
most_probable_tags = []
max_probabilities = []
for iV, pS in reversed(list(enumerate(viterbi))):
bS = backpointer[iV]
word = observations[iV]
if _VERBOSE_:
print("----------")
print("iV:", iV)
print("word:", word)
print("pS:", pS)
print("bS:", bS)
pMax, tMax = self._find_max_results(pS, bS)
if _VERBOSE_:
print("tMax, pMax:", tMax, pMax)
most_probable_tags = [ tMax ] + most_probable_tags
max_probabilities = [ pMax ] + max_probabilities
return observations, most_probable_tags, max_probabilities
def decode(self, observations):
"""
Find the most probable POS tag assignment for
a given list of observed tokens.
Inputs:
observations: [ token, ... ]
"""
# Initialize with state at start of sentence
pS = { t : 1.0 if t == TAG_SS else 0.0 for t in self.tags }
bS = { t : None for t in self.tags }
viterbi = [] # Viterbi[iW, tag], at start
backpointer = [] # backpointer[iW, tag], at start
print("--- %s ---" % TOK_SS)
if _VERBOSE_:
print("pS:", pS)
print("bS:", bS)
# iterate over observations, starting with first word
for word in observations:
pS, bS = self._step(word, pS)
viterbi += [ pS ] # Viterbi[iW, tag], this time step
backpointer += [ bS ] # backpointer[iW, tag], this time step
# termination: transition to end of sentence
observations, most_probable_tags, max_probabilities = \
self._backtrace(observations, viterbi, backpointer)
print(" Tagging ".center(49, '-'))
print("words:", observations)
print(" tags:", most_probable_tags)
print("probs:", max_probabilities)
return observations, most_probable_tags, max_probabilities
| <filename>assignment_1/pos_hmm_viterbi.py
"""
NEU CS6120 Assignment 1
Problem 4 POS Tagging - Hidden Markov Model
Viterbi Decoder
Implements the Viterbi decoding algorithm in NLP ed3 Fig. 8.5
to derive the most probable tag sequence for a sentence.
Note: Rather than initialize the decoder by processing the state
transition from <s> to the first word, initialized the decoder
to the <start> state. This simplifies the code in that the first
state update is the same as all the rest.
The state (tag) probabilities are set to:
'$S' : 1.0
rest : 0.0
The back pointers are all set to None.
<NAME>
07 Oct 2018
"""
import numpy as np
import os
import re
from collections import defaultdict
_DEBUG_ = False
_VERBOSE_ = False
TOK_SS = '<s>' # start sentence
TAG_SS = '$S'
TOK_ES = '</s>' # end sentence
TAG_ES = 'S$'
# ------------------------------------------------------------------------
# Helper Class - Viterbi Decoder ---
# ------------------------------------------------------------------------
class POS_HMM_Viterbi:
"""
HMM Viterbi POS Decoder
Initialize with transmission and emission probabilities,
indexed by tag:
pTagTrans: { prev_tag : [ ( tag, probability), ...], ...}
pTagEmiss: { curr_tag : [ ( word, probability), ...], ...}
"""
# ------------------------------------------------------------------------
# Class constructor ---
# ------------------------------------------------------------------------
def _probability_LUT(self, pTagProbs, pProbsUnseen):
"""
Create a transition or emission probability lookup table.
Input:
pTagProbs: { t_i-1 : [ ( t_i, P(t_i-1, t_i), ...], ...}
or: ( t_i : [ ( w_i, P(w_i | t_i), ...], ...}
pProbsUnseen: { t_i-1 : probability_of_unseen, ... }
or: ( t_i : probability_of_unseen, ... )
Output:
dT : { prev_tag : { tag : probability, ...}, ...}
"""
probUnseen = pProbsUnseen[None]
print("_probability_LUT.probUnseen:", probUnseen)
dP = defaultdict(lambda: defaultdict(lambda: probUnseen))
for prior, pList in pTagProbs.items():
probUnseenGivenPrior = pProbsUnseen[prior]
dV = defaultdict(lambda: probUnseenGivenPrior)
for value, probability in pList:
dV[value] = probability
dP[prior] = dV
return dP
def _init(self, pTagTrans, pTransUnseen, pTagEmiss, pEmissUnseen):
# tag set
self.tags = set(sorted(pTagTrans))
if self.tags != set(sorted(pTagEmiss)):
msg = ""
print("ERROR: transmission and emission probabilities",
"are not for the same tag set.")
return
# state transition probabilities P(t_i-1, t_i)
self.dT = self._probability_LUT(pTagTrans, pTransUnseen)
# word emission probabilities P( w_i | t_i )
self.dE = self._probability_LUT(pTagEmiss, pEmissUnseen)
def set_DEBUG(self, DEBUG):
_DEBUG_ = DEBUG
def set_VERBOSE(self, VERBOSE):
_VERBOSE_ = VERBOSE
def __init__(self, pTagTrans, pTransUnseen, pTagEmiss, pEmissUnseen, \
DEBUG=False, VERBOSE=False):
self.set_DEBUG(DEBUG)
self.set_VERBOSE(VERBOSE)
self._init(pTagTrans, pTransUnseen, pTagEmiss, pEmissUnseen)
# ------------------------------------------------------------------------
# Viterbi decoding ---
# ------------------------------------------------------------------------
def _find_max_results(self, pS, bS):
probabilities = [ p for tag, p in pS.items() ]
pMax = max(probabilities)
tagsMax = [ tag for tag, p in pS.items() if p == pMax ]
if len(tagsMax) > 1:
print("Warning: there are %d tags with max p = %f" \
% (len(tagsMax), pMax) )
tMax = tagsMax[0]
return pMax, tMax
def _find_max(self, tag, pE, pS_prev):
pMax = 0.0
tMax = None
for prev_tag in self.tags: # iterate over previous tags
pPrev = pS_prev[prev_tag] # Viterbi[i-1, prev_tag]
pTs = self.dT[prev_tag] # { tag : P(prev_tag, tag)}
pT = pTs[tag] # P(prev_tag, tag)
p = pPrev * pT * pE # ? Viterbi(i, tag)
if p > pMax:
pMax = p
tMax = prev_tag
return pMax, tMax
def _step(self, word, pS):
pS_prev = pS # Viterbi[i-1, tag], previous time step
pS = {} # Viterbi[i, tag], this time step
bS = {} # backpointer[i, tag], this time step
for tag in self.tags: # iterate over all possible POS tags
# probability of this word given this tag
pEs = self.dE[tag] # word probabilities for this tag
pE = pEs[word] # P(word | tag)
# find previous tag that gives highest probability for tag
pMax, tMax = self._find_max(tag, pE, pS_prev)
pS[tag] = pMax
bS[tag] = tMax
print("--- %s ---" % word)
if _VERBOSE_:
print("pS:", pS)
print("bS:", bS)
return pS, bS
def _backtrace(self, observations, viterbi, backpointer):
"""
# find the maximum probability and corresponding tag in each time step
# and follow the back pointers to determine the most probable tags
"""
print("Backtrace".center(47, '-'))
most_probable_tags = []
max_probabilities = []
for iV, pS in reversed(list(enumerate(viterbi))):
bS = backpointer[iV]
word = observations[iV]
if _VERBOSE_:
print("----------")
print("iV:", iV)
print("word:", word)
print("pS:", pS)
print("bS:", bS)
pMax, tMax = self._find_max_results(pS, bS)
if _VERBOSE_:
print("tMax, pMax:", tMax, pMax)
most_probable_tags = [ tMax ] + most_probable_tags
max_probabilities = [ pMax ] + max_probabilities
return observations, most_probable_tags, max_probabilities
def decode(self, observations):
"""
Find the most probable POS tag assignment for
a given list of observed tokens.
Inputs:
observations: [ token, ... ]
"""
# Initialize with state at start of sentence
pS = { t : 1.0 if t == TAG_SS else 0.0 for t in self.tags }
bS = { t : None for t in self.tags }
viterbi = [] # Viterbi[iW, tag], at start
backpointer = [] # backpointer[iW, tag], at start
print("--- %s ---" % TOK_SS)
if _VERBOSE_:
print("pS:", pS)
print("bS:", bS)
# iterate over observations, starting with first word
for word in observations:
pS, bS = self._step(word, pS)
viterbi += [ pS ] # Viterbi[iW, tag], this time step
backpointer += [ bS ] # backpointer[iW, tag], this time step
# termination: transition to end of sentence
observations, most_probable_tags, max_probabilities = \
self._backtrace(observations, viterbi, backpointer)
print(" Tagging ".center(49, '-'))
print("words:", observations)
print(" tags:", most_probable_tags)
print("probs:", max_probabilities)
return observations, most_probable_tags, max_probabilities
| en | 0.631853 | NEU CS6120 Assignment 1 Problem 4 POS Tagging - Hidden Markov Model Viterbi Decoder Implements the Viterbi decoding algorithm in NLP ed3 Fig. 8.5 to derive the most probable tag sequence for a sentence. Note: Rather than initialize the decoder by processing the state transition from <s> to the first word, initialized the decoder to the <start> state. This simplifies the code in that the first state update is the same as all the rest. The state (tag) probabilities are set to: '$S' : 1.0 rest : 0.0 The back pointers are all set to None. <NAME> 07 Oct 2018 # start sentence # end sentence # ------------------------------------------------------------------------ # Helper Class - Viterbi Decoder --- # ------------------------------------------------------------------------ HMM Viterbi POS Decoder Initialize with transmission and emission probabilities, indexed by tag: pTagTrans: { prev_tag : [ ( tag, probability), ...], ...} pTagEmiss: { curr_tag : [ ( word, probability), ...], ...} # ------------------------------------------------------------------------ # Class constructor --- # ------------------------------------------------------------------------ Create a transition or emission probability lookup table. Input: pTagProbs: { t_i-1 : [ ( t_i, P(t_i-1, t_i), ...], ...} or: ( t_i : [ ( w_i, P(w_i | t_i), ...], ...} pProbsUnseen: { t_i-1 : probability_of_unseen, ... } or: ( t_i : probability_of_unseen, ... ) Output: dT : { prev_tag : { tag : probability, ...}, ...} # tag set # state transition probabilities P(t_i-1, t_i) # word emission probabilities P( w_i | t_i ) # ------------------------------------------------------------------------ # Viterbi decoding --- # ------------------------------------------------------------------------ # iterate over previous tags # Viterbi[i-1, prev_tag] # { tag : P(prev_tag, tag)} # P(prev_tag, tag) # ? Viterbi(i, tag) # Viterbi[i-1, tag], previous time step # Viterbi[i, tag], this time step # backpointer[i, tag], this time step # iterate over all possible POS tags # probability of this word given this tag # word probabilities for this tag # P(word | tag) # find previous tag that gives highest probability for tag # find the maximum probability and corresponding tag in each time step # and follow the back pointers to determine the most probable tags Find the most probable POS tag assignment for a given list of observed tokens. Inputs: observations: [ token, ... ] # Initialize with state at start of sentence # Viterbi[iW, tag], at start # backpointer[iW, tag], at start # iterate over observations, starting with first word # Viterbi[iW, tag], this time step # backpointer[iW, tag], this time step # termination: transition to end of sentence | 2.814798 | 3 |
Projetos_Pessoais/BotWhatsapp2.py | Cohuzer/Projetos-Pessoais | 0 | 6619885 | from selenium import webdriver
import time
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
import urllib
import urllib.request
#Definição das funções
def construtorMensagem(tuner, mensagem):
retorno = "Error"
if tuner == "0":
retorno = str(input(mensagem))
return retorno
def lerList(mensagem):
lista = []
c = 0
while (True):
escopo = str(input("\n" + mensagem + ": "))
lista.append(escopo)
c += 1
escopo_boll = str(input("Digite 0 se já inseriu todos os contatos desejados: "))
if escopo_boll == "0":
break
return lista
def lerZeroUm(mensagem):
retorno = str
while (True):
retorno = str(input(mensagem))
if retorno in "01":
break
return retorno
def inicializadorWebdriver():
try:
urllib.request.urlopen('https://web.whatsapp.com/')
except urllib.error.URLError:
print('\033[31mWHATSAPP INDISPONIVEL\033[m')
else:
driver.get('https://web.whatsapp.com/')
time.sleep(10)
def buscarContato(contato):
campo_pesquisa = driver.find_element_by_xpath('//div[contains(@class, "copyable-text selectable-text")]')
#Ambos os Xpath's servem para encontrar as caixas de busca e mensagem do whatsapp
time.sleep(1.5)
campo_pesquisa.click()
campo_pesquisa.send_keys(contato)
campo_pesquisa.send_keys(Keys.ENTER)
def enviarMensagem(mensagem):
campo_mensagem = driver.find_elements_by_xpath('//div[contains(@class, "copyable-text selectable-text")]')
#campo_mensagem[0] == Buscar contatos; campo_mensagem[1] == escrever a mensagem
campo_mensagem[1].click()
time.sleep(0.5)
campo_mensagem[1].send_keys(mensagem)
campo_mensagem[1].send_keys(Keys.ENTER)
def encontraFigurinhas():
campo_emoji = driver.find_element_by_xpath('//div[contains(@class, "_1uqmP _3agz_")]')
campo_emoji.click()
time.sleep(0.2)
campo_sticker = driver.find_element_by_xpath('//button[contains(@class, "_23sAs _3V3JJ _1Eec4 _1owZM")]')
campo_sticker.click()
time.sleep(0.2)
def enviarFigurinha():
campo_stickers = driver.find_elements_by_xpath('//div[contains(@class, "_2elZc")]')
campo_stickers[0].click()
time.sleep(0.2)
#Código Principal
print('='*60)
print(' '*20, 'WHATSAPP BOT 2')
print('='*60)
print('\n ~> By: Mateus "CohuzEr"')
# Entrada
tuner = lerZeroUm("\nDigite para:\n0-Mensagem de Texto\n1-Figurinha\n>> ") # 1-Manda a figurinha em primeiro lugar nas enviadas recentemente
mensagem = construtorMensagem(tuner, "\nInsira a mensagem desejada: ")
contatos = lerList("Insira o nome exato do contato ou grupo desejado")
quantidade_mensagens = int(input("\nInsira quantas mensagens serão enviadas: "))
#Processamento
driver = webdriver.Chrome(ChromeDriverManager().install())
inicializadorWebdriver()
if tuner == "0":
for contato in contatos:
buscarContato(contato)
for i in range(quantidade_mensagens):
enviarMensagem(mensagem)
else:
for contato in contatos:
buscarContato(contato)
encontraFigurinhas()
for i in range(quantidade_mensagens):
enviarFigurinha()
| from selenium import webdriver
import time
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
import urllib
import urllib.request
#Definição das funções
def construtorMensagem(tuner, mensagem):
retorno = "Error"
if tuner == "0":
retorno = str(input(mensagem))
return retorno
def lerList(mensagem):
lista = []
c = 0
while (True):
escopo = str(input("\n" + mensagem + ": "))
lista.append(escopo)
c += 1
escopo_boll = str(input("Digite 0 se já inseriu todos os contatos desejados: "))
if escopo_boll == "0":
break
return lista
def lerZeroUm(mensagem):
retorno = str
while (True):
retorno = str(input(mensagem))
if retorno in "01":
break
return retorno
def inicializadorWebdriver():
try:
urllib.request.urlopen('https://web.whatsapp.com/')
except urllib.error.URLError:
print('\033[31mWHATSAPP INDISPONIVEL\033[m')
else:
driver.get('https://web.whatsapp.com/')
time.sleep(10)
def buscarContato(contato):
campo_pesquisa = driver.find_element_by_xpath('//div[contains(@class, "copyable-text selectable-text")]')
#Ambos os Xpath's servem para encontrar as caixas de busca e mensagem do whatsapp
time.sleep(1.5)
campo_pesquisa.click()
campo_pesquisa.send_keys(contato)
campo_pesquisa.send_keys(Keys.ENTER)
def enviarMensagem(mensagem):
campo_mensagem = driver.find_elements_by_xpath('//div[contains(@class, "copyable-text selectable-text")]')
#campo_mensagem[0] == Buscar contatos; campo_mensagem[1] == escrever a mensagem
campo_mensagem[1].click()
time.sleep(0.5)
campo_mensagem[1].send_keys(mensagem)
campo_mensagem[1].send_keys(Keys.ENTER)
def encontraFigurinhas():
campo_emoji = driver.find_element_by_xpath('//div[contains(@class, "_1uqmP _3agz_")]')
campo_emoji.click()
time.sleep(0.2)
campo_sticker = driver.find_element_by_xpath('//button[contains(@class, "_23sAs _3V3JJ _1Eec4 _1owZM")]')
campo_sticker.click()
time.sleep(0.2)
def enviarFigurinha():
campo_stickers = driver.find_elements_by_xpath('//div[contains(@class, "_2elZc")]')
campo_stickers[0].click()
time.sleep(0.2)
#Código Principal
print('='*60)
print(' '*20, 'WHATSAPP BOT 2')
print('='*60)
print('\n ~> By: Mateus "CohuzEr"')
# Entrada
tuner = lerZeroUm("\nDigite para:\n0-Mensagem de Texto\n1-Figurinha\n>> ") # 1-Manda a figurinha em primeiro lugar nas enviadas recentemente
mensagem = construtorMensagem(tuner, "\nInsira a mensagem desejada: ")
contatos = lerList("Insira o nome exato do contato ou grupo desejado")
quantidade_mensagens = int(input("\nInsira quantas mensagens serão enviadas: "))
#Processamento
driver = webdriver.Chrome(ChromeDriverManager().install())
inicializadorWebdriver()
if tuner == "0":
for contato in contatos:
buscarContato(contato)
for i in range(quantidade_mensagens):
enviarMensagem(mensagem)
else:
for contato in contatos:
buscarContato(contato)
encontraFigurinhas()
for i in range(quantidade_mensagens):
enviarFigurinha()
| pt | 0.974963 | #Definição das funções #Ambos os Xpath's servem para encontrar as caixas de busca e mensagem do whatsapp #campo_mensagem[0] == Buscar contatos; campo_mensagem[1] == escrever a mensagem #Código Principal # Entrada # 1-Manda a figurinha em primeiro lugar nas enviadas recentemente #Processamento | 2.904886 | 3 |
services/backend/thiamsu/migrations/0018_new_word_url_max_length.py | LKKTGB/thiamsu | 10 | 6619886 | <filename>services/backend/thiamsu/migrations/0018_new_word_url_max_length.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-03 12:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("thiamsu", "0017_translation_lang_hanlo")]
operations = [
migrations.AlterField(
model_name="newword",
name="reference_url",
field=models.CharField(
max_length=1000, verbose_name="new_word_reference_url"
),
)
]
| <filename>services/backend/thiamsu/migrations/0018_new_word_url_max_length.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-07-03 12:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("thiamsu", "0017_translation_lang_hanlo")]
operations = [
migrations.AlterField(
model_name="newword",
name="reference_url",
field=models.CharField(
max_length=1000, verbose_name="new_word_reference_url"
),
)
]
| en | 0.611176 | # -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-07-03 12:19 | 1.643338 | 2 |
code/run_gtl.py | neu-spiral/GraphTransferLearning-NEU | 1 | 6619887 | """
Main module to run GTL model. Depending on the input parameters,
it loads certain data, creates and trains a specific GTL model
@author: <NAME>
SPIRAL Group
Electrical & Computer Engineering
Northeastern University
"""
import argparse
import os
import numpy as np
from pathlib import Path
from load_save_data import load_data
from create_gtl_model import getGTLmodel
from train_gtl_model import train
from load_save_data import save_global_results
def str2bool(v):
if v.lower() in ('true', 't', 'yes', 'y', '1'):
return True
elif v.lower() in ('false', 'f', 'no', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Train Graph Transfer Learning model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# dataset parameters
parser.add_argument('-lp', '--load_path',
type=str, default='../datasets/',
help='Full path to folder with datasets')
parser.add_argument('-d', '--dataset',
type=str, default='sb-4', choices=['bp-2', 'sb-4', 'sb-6', 'zachary', 'disease', 'email'],
help='Dataset to be used')
# general graph parameters
parser.add_argument('--labels',
type=str, default='cluster', choices=['cluster', 'infection'],
help='Node labels of the synthetic data')
# graph embedding parameters
parser.add_argument('--nembedding',
type=int, default=5,
help='Size of the output embedding vector')
parser.add_argument('-sg', '--topology_similarity',
type=str, default='randomwalk', choices=['randomwalk', 'adjacency'],
help='Similarity measure between nodes of the same graph in graph topological space')
parser.add_argument('-et', '--embedding_type',
type=str, default='skipgram', choices=['unified', 'skipgram'],
help='Type of embedding function: skipgram, unified')
parser.add_argument('-se', '--embedding_similarity',
type=str, default='softmax', choices=['softmax', 'innerprod', 'cossim', 'l2'],
help='Similarity measures between nodes of the same graph in embedding space')
parser.add_argument('-sl', '--similarity_loss',
type=str, default='crossentropy', choices=['crossentropy', 'innerprod', 'l2'],
help='Loss function between similarities in topological and embedding '
'spaces for nodes of the same graph')
# prediction branch parameters
parser.add_argument('--depth',
type=int, default=1,
help='Number of hidden layers in Prediction Branch')
parser.add_argument('-af', '--activation_function',
type=str, default='tanh', choices=['tanh', 'sigmoid', 'relu'],
help='Activation function for Prediction Branch neurons')
parser.add_argument('-prl', '--prediction_loss',
type=str, default='mean_squared_error',
choices=['mean_squared_error', 'mean_absolute_percentage_error'],
help='Loss function for Prediction Branch')
# randomwalk parameters
parser.add_argument('--nwalks',
type=int, default=20,
help='Number of node2vec random walks')
parser.add_argument('--walk_length',
type=int, default=10,
help='Length of random walk')
parser.add_argument('--window_size',
type=int, default=4,
help='Width of sliding window in random walks')
parser.add_argument('--p',
type=float, default=0.25,
help='Parameter p for node2vec random walks')
parser.add_argument('--q',
type=float, default=4.0,
help='Parameter q for node2vec random walks')
parser.add_argument('--nnegative',
type=int, default=5,
help='Number of negative samples used in skip-gram')
parser.add_argument('--scale_negative',
type=str2bool, default=False,
help='Specifies whether to scale outputs for negative samples')
# second graph parameters
parser.add_argument('--transfer_mode',
type=str, default='1graph', choices=['1graph', 'noP', 'iterP', 'optP', 'trueP', 'trueP_DS'],
help='Specifies transfer learning mode')
parser.add_argument('--b_from_a',
type=str, default='permute', choices=['permute', 'modify'],
help='Specifies whether to permute or add/remove edges to graph A to generate graph B')
parser.add_argument('-dp', '--discrepancy_percent',
type=float, default=0,
help='Specifies percentage of edges to be removed/added when generating second graph')
parser.add_argument('-gd', '--graph_distance',
type=str, default='l2', choices=['l2', 'innerprod', 'cossim'],
help='Pairwise distance measure between nodes in the embedding space (matrix D)')
# neural net train/test parameters
parser.add_argument('--alpha',
type=float, default=1.0,
help='Weight of graph matching loss')
parser.add_argument('--beta',
type=str2bool, default=False,
help='Specifies whether to scale parts of P-optimization loss')
parser.add_argument('-lr', '--learning_rate',
type=float, default=0.025,
help='Learning rate')
parser.add_argument('--batch_size',
type=int, default=2,
help='Number of instances in each batch')
parser.add_argument('--epochs',
type=int, default=2,
help='Number of epochs')
parser.add_argument('--early_stopping',
type=int, default=0,
help='Number of epochs with no improvement after which training will be stopped. '
'If <=0, no early stopping is used')
parser.add_argument('--iterations',
type=int, default=1,
help='Number of iterations for model to initialize and run. '
'Output results are averaged across iterations')
# CUDA parameters
parser.add_argument('--id_gpu',
default=-1, type=int,
help='Specifies which gpu to use. If <0, model is run on cpu')
# results parameters
parser.add_argument('-sp', '--save_path', type=str, default='',
help='Full path to folder where results are saved')
args = parser.parse_args()
if args.id_gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# The GPU id to use
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.id_gpu)
# save configuration file
n_iter = max(1, args.iterations)
save_path = args.save_path
print("*************** Configuration ***************")
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
print("*********************************************\n")
# load data
dataset_path = str(Path(args.load_path) / args.dataset.lower())
labels = args.labels.lower()
transmode = args.transfer_mode
B_from_A = args.b_from_a
disc_pect = args.discrepancy_percent
n_layers = max(10, args.depth)
n_embedding = args.nembedding
topology_similarity = args.topology_similarity
embedding_type = args.embedding_type
embedding_similarity = args.embedding_similarity
if embedding_type == 'skipgram':
embedding_similarity = 'softmax'
if embedding_similarity == 'softmax':
n_negative = args.nnegative
scale_negative = args.scale_negative
else:
n_negative = 0
scale_negative = False
similarity_loss = args.similarity_loss
prediction_loss = args.prediction_loss
activation_function = args.activation_function
graph_distance = args.graph_distance
n_walks = args.nwalks
walk_length = args.walk_length
window_size = args.window_size
p = args.p
q = args.q
alpha = args.alpha
beta = args.beta
learning_rate = args.learning_rate
batch_size = args.batch_size
early_stopping = args.early_stopping
n_epochs = args.epochs
results = {'epochs': {}, 'train': {}, 'testA': {}}
if labels != 'cluster':
results['rsquaredA'] = {}
if transmode != '1graph':
results['testB'] = {}
if labels != 'cluster':
results['rsquaredB'] = {}
for iter in range(n_iter):
save_path_iter = save_path + str(iter+1) + '/'
if not os.path.exists(save_path_iter):
os.makedirs(save_path_iter)
# load data
n_nodes, n_features, n_labels, A, Afeatures, Alabels, Atrain, Atest, B, Bfeatures, Blabels, Ptrue = \
load_data(dataset_path, labels, transmode, B_from_A, disc_pect)
# create model
models = getGTLmodel(n_nodes, n_features, n_embedding, labels, n_labels, n_layers, n_negative, scale_negative,
embedding_type, embedding_similarity, similarity_loss, prediction_loss,
activation_function, transmode, graph_distance, learning_rate, alpha, save_path)
if iter == 0:
print("\nEmbedding model summary:".upper())
print(models['EmbeddingModel'].summary())
print("\nEmbedding similarity branch summary:".upper())
print(models['EmbeddingModel'].get_layer('Branch_SimilarityA').summary())
print("\nPrediction model summary:".upper())
print(models['PredictionModel'].summary())
print("\nPrediction branch summary:".upper())
print(models['PredictionModel'].get_layer('Branch_Prediction').summary())
# train/test model
print("\n ============================================== ")
print("|*************** ITERATION #{:3d} ***************|".format(iter+1) if n_iter > 1 else
"|************ GRAPH TRANSFER LEARNING *********|")
print(" ============================================== ")
iter_results = train(models, A, Afeatures, Alabels, Atrain, Atest, B, Bfeatures, Blabels, Ptrue, transmode,
topology_similarity, n_walks, walk_length, window_size, p, q, n_negative, learning_rate,
beta, n_epochs, early_stopping, batch_size, save_path_iter)
results['epochs'][iter] = iter_results['epochs']
results['train'][iter] = iter_results['acc_train']
results['testA'][iter] = iter_results['acc_testA']
if labels != 'cluster':
results['rsquaredA'][iter] = iter_results['acc_rsquaredA']
if transmode != '1graph':
results['testB'][iter] = iter_results['acc_testB']
if labels != 'cluster':
results['rsquaredB'][iter] = iter_results['acc_rsquaredB']
# save global results
picklename = "GlobalResults"
save_global_results(args, iter + 1, results, picklename)
epochs_mean = np.mean(list(results['epochs'].values()))
epochs_std = np.std(list(results['epochs'].values()))
train_mean = np.mean(list(results['train'].values()))
train_std = np.std(list(results['train'].values()))
testA_mean = np.mean(list(results['testA'].values()))
testA_std = np.std(list(results['testA'].values()))
res_str = ""
if labels != 'cluster':
rsquaredA_mean = np.mean(list(results['rsquaredA'].values()))
rsquaredA_std = np.std(list(results['rsquaredA'].values()))
res_str += "\tR-squared (graph A) = {0:.4f} (\u00B1{1:.4f})\n".format(rsquaredA_mean, rsquaredA_std)
if transmode != '1graph':
testB_mean = np.mean(list(results['testB'].values()))
testB_std = np.std(list(results['testB'].values()))
res_str += "\tTest accuracy (graph B) = {0:.4f} (\u00B1{1:.4f})\n".format(testB_mean, testB_std)
if labels != 'cluster':
rsquaredB_mean = np.mean(list(results['rsquaredB'].values()))
rsquaredB_std = np.std(list(results['rsquaredB'].values()))
res_str += "\tR-squared (graph B) = {0:.4f} (\u00B1{1:.4f})\n".format(rsquaredB_mean, rsquaredB_std)
print("\n\n ============================================== ")
print("|*************** FINAL RESULTS ***************|")
print(" ============================================== ")
print(f"After {n_iter:2d} iteration(s), the average\n"
f"\tConvergence rate = {epochs_mean:.4f} (\u00B1{epochs_std:.4f})\n"
f"\tTrain accuracy (graph A) = {train_mean:.4f} (\u00B1{train_std:.4f})\n"
f"\tTest accuracy (graph A) = {testA_mean:.4f} (\u00B1{testA_std:.4f})")
print(res_str)
| """
Main module to run GTL model. Depending on the input parameters,
it loads certain data, creates and trains a specific GTL model
@author: <NAME>
SPIRAL Group
Electrical & Computer Engineering
Northeastern University
"""
import argparse
import os
import numpy as np
from pathlib import Path
from load_save_data import load_data
from create_gtl_model import getGTLmodel
from train_gtl_model import train
from load_save_data import save_global_results
def str2bool(v):
if v.lower() in ('true', 't', 'yes', 'y', '1'):
return True
elif v.lower() in ('false', 'f', 'no', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Train Graph Transfer Learning model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# dataset parameters
parser.add_argument('-lp', '--load_path',
type=str, default='../datasets/',
help='Full path to folder with datasets')
parser.add_argument('-d', '--dataset',
type=str, default='sb-4', choices=['bp-2', 'sb-4', 'sb-6', 'zachary', 'disease', 'email'],
help='Dataset to be used')
# general graph parameters
parser.add_argument('--labels',
type=str, default='cluster', choices=['cluster', 'infection'],
help='Node labels of the synthetic data')
# graph embedding parameters
parser.add_argument('--nembedding',
type=int, default=5,
help='Size of the output embedding vector')
parser.add_argument('-sg', '--topology_similarity',
type=str, default='randomwalk', choices=['randomwalk', 'adjacency'],
help='Similarity measure between nodes of the same graph in graph topological space')
parser.add_argument('-et', '--embedding_type',
type=str, default='skipgram', choices=['unified', 'skipgram'],
help='Type of embedding function: skipgram, unified')
parser.add_argument('-se', '--embedding_similarity',
type=str, default='softmax', choices=['softmax', 'innerprod', 'cossim', 'l2'],
help='Similarity measures between nodes of the same graph in embedding space')
parser.add_argument('-sl', '--similarity_loss',
type=str, default='crossentropy', choices=['crossentropy', 'innerprod', 'l2'],
help='Loss function between similarities in topological and embedding '
'spaces for nodes of the same graph')
# prediction branch parameters
parser.add_argument('--depth',
type=int, default=1,
help='Number of hidden layers in Prediction Branch')
parser.add_argument('-af', '--activation_function',
type=str, default='tanh', choices=['tanh', 'sigmoid', 'relu'],
help='Activation function for Prediction Branch neurons')
parser.add_argument('-prl', '--prediction_loss',
type=str, default='mean_squared_error',
choices=['mean_squared_error', 'mean_absolute_percentage_error'],
help='Loss function for Prediction Branch')
# randomwalk parameters
parser.add_argument('--nwalks',
type=int, default=20,
help='Number of node2vec random walks')
parser.add_argument('--walk_length',
type=int, default=10,
help='Length of random walk')
parser.add_argument('--window_size',
type=int, default=4,
help='Width of sliding window in random walks')
parser.add_argument('--p',
type=float, default=0.25,
help='Parameter p for node2vec random walks')
parser.add_argument('--q',
type=float, default=4.0,
help='Parameter q for node2vec random walks')
parser.add_argument('--nnegative',
type=int, default=5,
help='Number of negative samples used in skip-gram')
parser.add_argument('--scale_negative',
type=str2bool, default=False,
help='Specifies whether to scale outputs for negative samples')
# second graph parameters
parser.add_argument('--transfer_mode',
type=str, default='1graph', choices=['1graph', 'noP', 'iterP', 'optP', 'trueP', 'trueP_DS'],
help='Specifies transfer learning mode')
parser.add_argument('--b_from_a',
type=str, default='permute', choices=['permute', 'modify'],
help='Specifies whether to permute or add/remove edges to graph A to generate graph B')
parser.add_argument('-dp', '--discrepancy_percent',
type=float, default=0,
help='Specifies percentage of edges to be removed/added when generating second graph')
parser.add_argument('-gd', '--graph_distance',
type=str, default='l2', choices=['l2', 'innerprod', 'cossim'],
help='Pairwise distance measure between nodes in the embedding space (matrix D)')
# neural net train/test parameters
parser.add_argument('--alpha',
type=float, default=1.0,
help='Weight of graph matching loss')
parser.add_argument('--beta',
type=str2bool, default=False,
help='Specifies whether to scale parts of P-optimization loss')
parser.add_argument('-lr', '--learning_rate',
type=float, default=0.025,
help='Learning rate')
parser.add_argument('--batch_size',
type=int, default=2,
help='Number of instances in each batch')
parser.add_argument('--epochs',
type=int, default=2,
help='Number of epochs')
parser.add_argument('--early_stopping',
type=int, default=0,
help='Number of epochs with no improvement after which training will be stopped. '
'If <=0, no early stopping is used')
parser.add_argument('--iterations',
type=int, default=1,
help='Number of iterations for model to initialize and run. '
'Output results are averaged across iterations')
# CUDA parameters
parser.add_argument('--id_gpu',
default=-1, type=int,
help='Specifies which gpu to use. If <0, model is run on cpu')
# results parameters
parser.add_argument('-sp', '--save_path', type=str, default='',
help='Full path to folder where results are saved')
args = parser.parse_args()
if args.id_gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# The GPU id to use
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.id_gpu)
# save configuration file
n_iter = max(1, args.iterations)
save_path = args.save_path
print("*************** Configuration ***************")
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
print("*********************************************\n")
# load data
dataset_path = str(Path(args.load_path) / args.dataset.lower())
labels = args.labels.lower()
transmode = args.transfer_mode
B_from_A = args.b_from_a
disc_pect = args.discrepancy_percent
n_layers = max(10, args.depth)
n_embedding = args.nembedding
topology_similarity = args.topology_similarity
embedding_type = args.embedding_type
embedding_similarity = args.embedding_similarity
if embedding_type == 'skipgram':
embedding_similarity = 'softmax'
if embedding_similarity == 'softmax':
n_negative = args.nnegative
scale_negative = args.scale_negative
else:
n_negative = 0
scale_negative = False
similarity_loss = args.similarity_loss
prediction_loss = args.prediction_loss
activation_function = args.activation_function
graph_distance = args.graph_distance
n_walks = args.nwalks
walk_length = args.walk_length
window_size = args.window_size
p = args.p
q = args.q
alpha = args.alpha
beta = args.beta
learning_rate = args.learning_rate
batch_size = args.batch_size
early_stopping = args.early_stopping
n_epochs = args.epochs
results = {'epochs': {}, 'train': {}, 'testA': {}}
if labels != 'cluster':
results['rsquaredA'] = {}
if transmode != '1graph':
results['testB'] = {}
if labels != 'cluster':
results['rsquaredB'] = {}
for iter in range(n_iter):
save_path_iter = save_path + str(iter+1) + '/'
if not os.path.exists(save_path_iter):
os.makedirs(save_path_iter)
# load data
n_nodes, n_features, n_labels, A, Afeatures, Alabels, Atrain, Atest, B, Bfeatures, Blabels, Ptrue = \
load_data(dataset_path, labels, transmode, B_from_A, disc_pect)
# create model
models = getGTLmodel(n_nodes, n_features, n_embedding, labels, n_labels, n_layers, n_negative, scale_negative,
embedding_type, embedding_similarity, similarity_loss, prediction_loss,
activation_function, transmode, graph_distance, learning_rate, alpha, save_path)
if iter == 0:
print("\nEmbedding model summary:".upper())
print(models['EmbeddingModel'].summary())
print("\nEmbedding similarity branch summary:".upper())
print(models['EmbeddingModel'].get_layer('Branch_SimilarityA').summary())
print("\nPrediction model summary:".upper())
print(models['PredictionModel'].summary())
print("\nPrediction branch summary:".upper())
print(models['PredictionModel'].get_layer('Branch_Prediction').summary())
# train/test model
print("\n ============================================== ")
print("|*************** ITERATION #{:3d} ***************|".format(iter+1) if n_iter > 1 else
"|************ GRAPH TRANSFER LEARNING *********|")
print(" ============================================== ")
iter_results = train(models, A, Afeatures, Alabels, Atrain, Atest, B, Bfeatures, Blabels, Ptrue, transmode,
topology_similarity, n_walks, walk_length, window_size, p, q, n_negative, learning_rate,
beta, n_epochs, early_stopping, batch_size, save_path_iter)
results['epochs'][iter] = iter_results['epochs']
results['train'][iter] = iter_results['acc_train']
results['testA'][iter] = iter_results['acc_testA']
if labels != 'cluster':
results['rsquaredA'][iter] = iter_results['acc_rsquaredA']
if transmode != '1graph':
results['testB'][iter] = iter_results['acc_testB']
if labels != 'cluster':
results['rsquaredB'][iter] = iter_results['acc_rsquaredB']
# save global results
picklename = "GlobalResults"
save_global_results(args, iter + 1, results, picklename)
epochs_mean = np.mean(list(results['epochs'].values()))
epochs_std = np.std(list(results['epochs'].values()))
train_mean = np.mean(list(results['train'].values()))
train_std = np.std(list(results['train'].values()))
testA_mean = np.mean(list(results['testA'].values()))
testA_std = np.std(list(results['testA'].values()))
res_str = ""
if labels != 'cluster':
rsquaredA_mean = np.mean(list(results['rsquaredA'].values()))
rsquaredA_std = np.std(list(results['rsquaredA'].values()))
res_str += "\tR-squared (graph A) = {0:.4f} (\u00B1{1:.4f})\n".format(rsquaredA_mean, rsquaredA_std)
if transmode != '1graph':
testB_mean = np.mean(list(results['testB'].values()))
testB_std = np.std(list(results['testB'].values()))
res_str += "\tTest accuracy (graph B) = {0:.4f} (\u00B1{1:.4f})\n".format(testB_mean, testB_std)
if labels != 'cluster':
rsquaredB_mean = np.mean(list(results['rsquaredB'].values()))
rsquaredB_std = np.std(list(results['rsquaredB'].values()))
res_str += "\tR-squared (graph B) = {0:.4f} (\u00B1{1:.4f})\n".format(rsquaredB_mean, rsquaredB_std)
print("\n\n ============================================== ")
print("|*************** FINAL RESULTS ***************|")
print(" ============================================== ")
print(f"After {n_iter:2d} iteration(s), the average\n"
f"\tConvergence rate = {epochs_mean:.4f} (\u00B1{epochs_std:.4f})\n"
f"\tTrain accuracy (graph A) = {train_mean:.4f} (\u00B1{train_std:.4f})\n"
f"\tTest accuracy (graph A) = {testA_mean:.4f} (\u00B1{testA_std:.4f})")
print(res_str)
| en | 0.248254 | Main module to run GTL model. Depending on the input parameters, it loads certain data, creates and trains a specific GTL model @author: <NAME> SPIRAL Group Electrical & Computer Engineering Northeastern University # dataset parameters # general graph parameters # graph embedding parameters # prediction branch parameters # randomwalk parameters # second graph parameters # neural net train/test parameters # CUDA parameters # results parameters # The GPU id to use # save configuration file # load data # load data # create model # train/test model #{:3d} ***************|".format(iter+1) if n_iter > 1 else # save global results | 2.848177 | 3 |
game/game.py | chtunsw/SpaceTravel | 0 | 6619888 | import pygame
from pathlib import Path
from enum import Enum
from random import randrange
GAME_NAME = "Space Travel"
WIN_WIDTH, WIN_HEIGHT = 450, 800
STAR_MIN_RADIUS = 2
STAR_MAX_RADIUS = 5
TRANSPARENT_COLOR = (0, 0, 0, 0)
SPACE_BLUE = (10, 25, 50)
STAR_WHITE = (255, 255, 255)
SCORE_FONT_FAMILY = "comicsans"
SCORE_FONT_SIZE = 24
SCORE_FONT_COLOR = (10, 200, 130)
IMGS_DIR = Path(__file__).parent.absolute() / "imgs"
SPACESHIP_IMG = pygame.image.load((IMGS_DIR / "spaceship.png").as_posix())
class Direction(Enum):
LEFT = "left"
RIGHT = "right"
UP = "up"
DOWN = "down"
class Star(pygame.sprite.Sprite):
def __init__(self, x, y, radius):
super().__init__()
self.radius = radius
self.transparent_color = TRANSPARENT_COLOR
self.color = STAR_WHITE
self.image = pygame.Surface([2 * self.radius, 2 * self.radius])
self.image.fill(self.transparent_color)
pygame.draw.circle(self.image, self.color, (radius, radius), radius)
self.rect = self.image.get_rect(center=(x, y))
self.tick_count = 0
self.velocity = 0.5
self.acceleration = 0.001
def update(self):
center_x, center_y = self.rect.center
self.tick_count += 1
displacement = self.velocity * self.tick_count + 0.5 * self.acceleration * self.tick_count ** 2
new_center_y = int(center_y + displacement)
self.rect.center = (center_x, new_center_y)
class SpaceShip(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = SPACESHIP_IMG
self.rect = self.image.get_rect(center=(x, y))
self.velocity = 15
def move(self, direction, display_surf):
x_max, y_max = display_surf.get_size()
new_center_x, new_center_y = center_x, center_y = self.rect.center
displacement = self.velocity
if direction == Direction.UP:
expected_y = int(center_y - displacement)
new_center_y = expected_y if expected_y > 0 else 0
elif direction == Direction.DOWN:
expected_y = int(center_y + displacement)
new_center_y= expected_y if expected_y < y_max else y_max
elif direction == Direction.LEFT:
expected_x = int(center_x - displacement)
new_center_x = expected_x if expected_x > 0 else 0
elif direction == Direction.RIGHT:
expected_x = int(center_x + displacement)
new_center_x = expected_x if expected_x < x_max else x_max
self.rect.center = (new_center_x, new_center_y)
def render(self, display_surf):
display_surf.blit(self.image, self.rect.topleft)
class App:
def __init__(self):
self.name = GAME_NAME
self.is_running = False
self.score = 0
self.score_font_family = SCORE_FONT_FAMILY
self.score_font_size = SCORE_FONT_SIZE
self.score_font_color = SCORE_FONT_COLOR
self.display_surf = None
self.size = self.width, self.height = WIN_WIDTH, WIN_HEIGHT
self.space_blue = SPACE_BLUE
self.spaceship = None
self.star_min_radius = STAR_MIN_RADIUS
self.star_max_radius = STAR_MAX_RADIUS
self.star_list = pygame.sprite.Group([])
def init_game(self):
pygame.init()
pygame.display.set_caption(self.name)
self.spaceship = SpaceShip(int(self.width / 2), int(self.height / 3 * 2))
self.display_surf = pygame.display.set_mode(self.size)
self.is_running = True
def handle_event(self, event):
if event.type == pygame.QUIT:
self.is_running = False
def udpate_state(self):
self.update_stars()
self.handle_spaceship_move()
self.collision_detect()
self.increase_score_with_clock()
def update_screen(self):
self.display_surf.fill(self.space_blue)
self.star_list.draw(self.display_surf)
self.spaceship.render(self.display_surf)
self.render_score()
pygame.display.update()
def run(self):
self.init_game()
clock = pygame.time.Clock()
while (self.is_running):
for event in pygame.event.get():
self.handle_event(event)
self.udpate_state()
self.update_screen()
clock.tick(30)
self.stop()
def stop(self):
pygame.quit()
def update_stars(self):
new_star = Star(randrange(0, self.width), 0, randrange(self.star_min_radius, self.star_max_radius))
self.star_list.add(new_star)
self.star_list.update()
for star in self.star_list.sprites():
if star.rect.center[1] > self.height:
self.star_list.remove(star)
def handle_spaceship_move(self):
if pygame.key.get_pressed()[pygame.K_UP]:
self.spaceship.move(Direction.UP, self.display_surf)
if pygame.key.get_pressed()[pygame.K_DOWN]:
self.spaceship.move(Direction.DOWN, self.display_surf)
if pygame.key.get_pressed()[pygame.K_LEFT]:
self.spaceship.move(Direction.LEFT, self.display_surf)
if pygame.key.get_pressed()[pygame.K_RIGHT]:
self.spaceship.move(Direction.RIGHT, self.display_surf)
def collision_detect(self):
if pygame.sprite.spritecollide(self.spaceship, self.star_list, False):
self.is_running = False
def increase_score_with_clock(self):
self.score += 1
def render_score(self):
score_string = "Score: {}".format(self.score)
score_font = pygame.font.SysFont(self.score_font_family, self.score_font_size)
current_score = score_font.render(score_string, 1, self.score_font_color)
self.display_surf.blit(current_score, (8, 8))
if __name__ == "__main__":
app = App()
app.run() | import pygame
from pathlib import Path
from enum import Enum
from random import randrange
GAME_NAME = "Space Travel"
WIN_WIDTH, WIN_HEIGHT = 450, 800
STAR_MIN_RADIUS = 2
STAR_MAX_RADIUS = 5
TRANSPARENT_COLOR = (0, 0, 0, 0)
SPACE_BLUE = (10, 25, 50)
STAR_WHITE = (255, 255, 255)
SCORE_FONT_FAMILY = "comicsans"
SCORE_FONT_SIZE = 24
SCORE_FONT_COLOR = (10, 200, 130)
IMGS_DIR = Path(__file__).parent.absolute() / "imgs"
SPACESHIP_IMG = pygame.image.load((IMGS_DIR / "spaceship.png").as_posix())
class Direction(Enum):
LEFT = "left"
RIGHT = "right"
UP = "up"
DOWN = "down"
class Star(pygame.sprite.Sprite):
def __init__(self, x, y, radius):
super().__init__()
self.radius = radius
self.transparent_color = TRANSPARENT_COLOR
self.color = STAR_WHITE
self.image = pygame.Surface([2 * self.radius, 2 * self.radius])
self.image.fill(self.transparent_color)
pygame.draw.circle(self.image, self.color, (radius, radius), radius)
self.rect = self.image.get_rect(center=(x, y))
self.tick_count = 0
self.velocity = 0.5
self.acceleration = 0.001
def update(self):
center_x, center_y = self.rect.center
self.tick_count += 1
displacement = self.velocity * self.tick_count + 0.5 * self.acceleration * self.tick_count ** 2
new_center_y = int(center_y + displacement)
self.rect.center = (center_x, new_center_y)
class SpaceShip(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = SPACESHIP_IMG
self.rect = self.image.get_rect(center=(x, y))
self.velocity = 15
def move(self, direction, display_surf):
x_max, y_max = display_surf.get_size()
new_center_x, new_center_y = center_x, center_y = self.rect.center
displacement = self.velocity
if direction == Direction.UP:
expected_y = int(center_y - displacement)
new_center_y = expected_y if expected_y > 0 else 0
elif direction == Direction.DOWN:
expected_y = int(center_y + displacement)
new_center_y= expected_y if expected_y < y_max else y_max
elif direction == Direction.LEFT:
expected_x = int(center_x - displacement)
new_center_x = expected_x if expected_x > 0 else 0
elif direction == Direction.RIGHT:
expected_x = int(center_x + displacement)
new_center_x = expected_x if expected_x < x_max else x_max
self.rect.center = (new_center_x, new_center_y)
def render(self, display_surf):
display_surf.blit(self.image, self.rect.topleft)
class App:
def __init__(self):
self.name = GAME_NAME
self.is_running = False
self.score = 0
self.score_font_family = SCORE_FONT_FAMILY
self.score_font_size = SCORE_FONT_SIZE
self.score_font_color = SCORE_FONT_COLOR
self.display_surf = None
self.size = self.width, self.height = WIN_WIDTH, WIN_HEIGHT
self.space_blue = SPACE_BLUE
self.spaceship = None
self.star_min_radius = STAR_MIN_RADIUS
self.star_max_radius = STAR_MAX_RADIUS
self.star_list = pygame.sprite.Group([])
def init_game(self):
pygame.init()
pygame.display.set_caption(self.name)
self.spaceship = SpaceShip(int(self.width / 2), int(self.height / 3 * 2))
self.display_surf = pygame.display.set_mode(self.size)
self.is_running = True
def handle_event(self, event):
if event.type == pygame.QUIT:
self.is_running = False
def udpate_state(self):
self.update_stars()
self.handle_spaceship_move()
self.collision_detect()
self.increase_score_with_clock()
def update_screen(self):
self.display_surf.fill(self.space_blue)
self.star_list.draw(self.display_surf)
self.spaceship.render(self.display_surf)
self.render_score()
pygame.display.update()
def run(self):
self.init_game()
clock = pygame.time.Clock()
while (self.is_running):
for event in pygame.event.get():
self.handle_event(event)
self.udpate_state()
self.update_screen()
clock.tick(30)
self.stop()
def stop(self):
pygame.quit()
def update_stars(self):
new_star = Star(randrange(0, self.width), 0, randrange(self.star_min_radius, self.star_max_radius))
self.star_list.add(new_star)
self.star_list.update()
for star in self.star_list.sprites():
if star.rect.center[1] > self.height:
self.star_list.remove(star)
def handle_spaceship_move(self):
if pygame.key.get_pressed()[pygame.K_UP]:
self.spaceship.move(Direction.UP, self.display_surf)
if pygame.key.get_pressed()[pygame.K_DOWN]:
self.spaceship.move(Direction.DOWN, self.display_surf)
if pygame.key.get_pressed()[pygame.K_LEFT]:
self.spaceship.move(Direction.LEFT, self.display_surf)
if pygame.key.get_pressed()[pygame.K_RIGHT]:
self.spaceship.move(Direction.RIGHT, self.display_surf)
def collision_detect(self):
if pygame.sprite.spritecollide(self.spaceship, self.star_list, False):
self.is_running = False
def increase_score_with_clock(self):
self.score += 1
def render_score(self):
score_string = "Score: {}".format(self.score)
score_font = pygame.font.SysFont(self.score_font_family, self.score_font_size)
current_score = score_font.render(score_string, 1, self.score_font_color)
self.display_surf.blit(current_score, (8, 8))
if __name__ == "__main__":
app = App()
app.run() | none | 1 | 3.261707 | 3 | |
training.py | orhanf/selector | 0 | 6619889 | <filename>training.py<gh_stars>0
import numpy
import time
from data_iterator import load_dataset, iterate_minibatches
def train(f_train_l, f_train_r, f_train_b,
f_valid_l, f_valid_r, f_valid_b,
xl, xr, y, lr=1., num_epochs=20,
lbatch_sz=128, rbatch_sz=128, bbatch_sz=128,
op='weighted-sum'
**kwargs):
# load data
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
print("Starting training...")
for epoch in range(num_epochs):
train_err_l = 0
train_err_r = 0
train_err_b = 0
tr_batches = 0
alphas = []
start_time = time.time()
for lbatch, rbatch, bbatch in zip(
iterate_minibatches(X_train, y_train, lbatch_sz, shuffle=True),
iterate_minibatches(X_train, y_train, rbatch_sz, shuffle=True),
iterate_minibatches(X_train, y_train, bbatch_sz, shuffle=True)
):
_train_err_b, alpha = f_train_b(lr, bbatch[0], bbatch[1], bbatch[2])
train_err_b += _train_err_b
alphas.append(alpha)
train_err_r += f_train_r(lr, rbatch[1], rbatch[2])
train_err_l += f_train_l(lr, lbatch[0], lbatch[2])
tr_batches += 1
# And a full pass over the validation data:
lval_err = 0
rval_err = 0
bval_err = 0
lval_acc = 0
rval_acc = 0
bval_acc = 0
val_batches = 0
for batch in iterate_minibatches(
X_val, y_val, 500, shuffle=False):
lbatch, rbatch, targets = batch
lerr, lacc = f_valid_l(lbatch, targets)
rerr, racc = f_valid_r(rbatch, targets)
berr, bacc = f_valid_b(lbatch, rbatch, targets)
lval_err += lerr
rval_err += rerr
bval_err += berr
lval_acc += lacc
rval_acc += racc
bval_acc += bacc
val_batches += 1
# Then we print the results for this epoch:
print(("Epoch {:>4} of {} took {:.3f}s" +
" train_loss - l:[{:.6f}] r:[{:.6f}] b:[{:.6f}] " +
" valid_loss - l:[{:.6f}] r:[{:.6f}] b:[{:.6f}] " +
" valid_acc - l:[{:.2f} %] r:[{:.2f} %] b:[{:.2f} %] alphas:[{}]").format(
epoch + 1, num_epochs, time.time() - start_time,
train_err_l / tr_batches, train_err_r / tr_batches,
train_err_b / tr_batches, lval_err / val_batches,
rval_err / val_batches, bval_err / val_batches,
lval_acc / val_batches * 100,
rval_acc / val_batches * 100,
bval_acc / val_batches * 100,
(numpy.vstack([aa.mean(1) for aa in alphas]).mean(0))))
# After training, we compute and print the test error:
ltest_err = 0
rtest_err = 0
btest_err = 0
ltest_acc = 0
rtest_acc = 0
btest_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
lbatch, rbatch, targets = batch
lerr, lacc = f_valid_l(lbatch, targets)
rerr, racc = f_valid_r(rbatch, targets)
berr, bacc = f_valid_b(lbatch, rbatch, targets)
ltest_err += lerr
rtest_err += rerr
btest_err += berr
ltest_acc += lacc
rtest_acc += racc
btest_acc += bacc
test_batches += 1
print("Final results:")
print(" l test loss:\t\t\t{:.6f}"
.format(ltest_err / test_batches))
print(" r test loss:\t\t\t{:.6f}"
.format(rtest_err / test_batches))
print(" b test loss:\t\t\t{:.6f}"
.format(btest_err / test_batches))
print(" l test accuracy:\t\t{:.2f} %"
.format(ltest_acc / test_batches * 100))
print(" r test accuracy:\t\t{:.2f} %"
.format(rtest_acc / test_batches * 100))
print(" b test accuracy:\t\t{:.2f} %"
.format(btest_acc / test_batches * 100))
| <filename>training.py<gh_stars>0
import numpy
import time
from data_iterator import load_dataset, iterate_minibatches
def train(f_train_l, f_train_r, f_train_b,
f_valid_l, f_valid_r, f_valid_b,
xl, xr, y, lr=1., num_epochs=20,
lbatch_sz=128, rbatch_sz=128, bbatch_sz=128,
op='weighted-sum'
**kwargs):
# load data
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
print("Starting training...")
for epoch in range(num_epochs):
train_err_l = 0
train_err_r = 0
train_err_b = 0
tr_batches = 0
alphas = []
start_time = time.time()
for lbatch, rbatch, bbatch in zip(
iterate_minibatches(X_train, y_train, lbatch_sz, shuffle=True),
iterate_minibatches(X_train, y_train, rbatch_sz, shuffle=True),
iterate_minibatches(X_train, y_train, bbatch_sz, shuffle=True)
):
_train_err_b, alpha = f_train_b(lr, bbatch[0], bbatch[1], bbatch[2])
train_err_b += _train_err_b
alphas.append(alpha)
train_err_r += f_train_r(lr, rbatch[1], rbatch[2])
train_err_l += f_train_l(lr, lbatch[0], lbatch[2])
tr_batches += 1
# And a full pass over the validation data:
lval_err = 0
rval_err = 0
bval_err = 0
lval_acc = 0
rval_acc = 0
bval_acc = 0
val_batches = 0
for batch in iterate_minibatches(
X_val, y_val, 500, shuffle=False):
lbatch, rbatch, targets = batch
lerr, lacc = f_valid_l(lbatch, targets)
rerr, racc = f_valid_r(rbatch, targets)
berr, bacc = f_valid_b(lbatch, rbatch, targets)
lval_err += lerr
rval_err += rerr
bval_err += berr
lval_acc += lacc
rval_acc += racc
bval_acc += bacc
val_batches += 1
# Then we print the results for this epoch:
print(("Epoch {:>4} of {} took {:.3f}s" +
" train_loss - l:[{:.6f}] r:[{:.6f}] b:[{:.6f}] " +
" valid_loss - l:[{:.6f}] r:[{:.6f}] b:[{:.6f}] " +
" valid_acc - l:[{:.2f} %] r:[{:.2f} %] b:[{:.2f} %] alphas:[{}]").format(
epoch + 1, num_epochs, time.time() - start_time,
train_err_l / tr_batches, train_err_r / tr_batches,
train_err_b / tr_batches, lval_err / val_batches,
rval_err / val_batches, bval_err / val_batches,
lval_acc / val_batches * 100,
rval_acc / val_batches * 100,
bval_acc / val_batches * 100,
(numpy.vstack([aa.mean(1) for aa in alphas]).mean(0))))
# After training, we compute and print the test error:
ltest_err = 0
rtest_err = 0
btest_err = 0
ltest_acc = 0
rtest_acc = 0
btest_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
lbatch, rbatch, targets = batch
lerr, lacc = f_valid_l(lbatch, targets)
rerr, racc = f_valid_r(rbatch, targets)
berr, bacc = f_valid_b(lbatch, rbatch, targets)
ltest_err += lerr
rtest_err += rerr
btest_err += berr
ltest_acc += lacc
rtest_acc += racc
btest_acc += bacc
test_batches += 1
print("Final results:")
print(" l test loss:\t\t\t{:.6f}"
.format(ltest_err / test_batches))
print(" r test loss:\t\t\t{:.6f}"
.format(rtest_err / test_batches))
print(" b test loss:\t\t\t{:.6f}"
.format(btest_err / test_batches))
print(" l test accuracy:\t\t{:.2f} %"
.format(ltest_acc / test_batches * 100))
print(" r test accuracy:\t\t{:.2f} %"
.format(rtest_acc / test_batches * 100))
print(" b test accuracy:\t\t{:.2f} %"
.format(btest_acc / test_batches * 100))
| en | 0.718911 | # load data # And a full pass over the validation data: # Then we print the results for this epoch: # After training, we compute and print the test error: | 2.207829 | 2 |
Testing/02_use_3rd_party/pytest_sample/bad_coding_style.py | t2y/python-study | 18 | 6619890 | <filename>Testing/02_use_3rd_party/pytest_sample/bad_coding_style.py
# -*- coding: utf-8 -*-
import sys
def f(x,y):
z=x+y
return z
def g():
d = {'x':1, 'y':2}
return d
| <filename>Testing/02_use_3rd_party/pytest_sample/bad_coding_style.py
# -*- coding: utf-8 -*-
import sys
def f(x,y):
z=x+y
return z
def g():
d = {'x':1, 'y':2}
return d
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.133234 | 2 |
modules/music.py | sevenut/jerbot-neo | 5 | 6619891 | <filename>modules/music.py
import youtube_dl
from discord.ext import commands
from modules.util import config, write_embed
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def play(self, ctx, play):
server = config[str(ctx.guild.id)]
if not server['voice']['enabled']:
return
await ctx.send("ok")
await ctx.author.voice.channel.connect()
await ctx.voice.play()
def setup(bot):
bot.add_cog(Music(bot))
| <filename>modules/music.py
import youtube_dl
from discord.ext import commands
from modules.util import config, write_embed
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def play(self, ctx, play):
server = config[str(ctx.guild.id)]
if not server['voice']['enabled']:
return
await ctx.send("ok")
await ctx.author.voice.channel.connect()
await ctx.voice.play()
def setup(bot):
bot.add_cog(Music(bot))
| none | 1 | 2.506326 | 3 | |
back-end/www/update_file_names.py | yenchiah/deep-smoke-machine | 88 | 6619892 | <reponame>yenchiah/deep-smoke-machine<gh_stars>10-100
# This script replaces the old file names to the new ones (with camera id and view id)
from util import *
def main():
#p = "../data/rgb/"
p = "../data/flow/"
f_list = get_all_file_names_in_folder(p)
for file_name in f_list:
new_file_name = get_new_file_name(file_name)
print("-"*60)
print(file_name)
print(new_file_name)
os.rename(p+file_name, p+new_file_name)
def get_new_file_name(file_name):
camera_id = -1
view_id = -1
if "clairton1" in file_name:
camera_id = 0
if "6304-964-6807-1467" in file_name or "6304-884-6807-1387" in file_name or "6304-944-6807-1447" in file_name or "6304-884-6807-1387" in file_name or "6282-1154-6769-1641" in file_name:
view_id = 0
elif "6007-1008-6509-1510" in file_name or "6007-928-6509-1430" in file_name or "6007-988-6509-1490" in file_name or "6007-928-6509-1430" in file_name or "5989-1127-6538-1675" in file_name:
view_id = 1
elif "5648-1004-6150-1506" in file_name or "5648-924-6150-1426" in file_name or "5648-1004-6150-1506" in file_name or "5648-924-6150-1426" in file_name or "5596-1165-6144-1714" in file_name:
view_id = 2
elif "5329-1033-5831-1535" in file_name or "5329-953-5831-1455" in file_name or "5329-1033-5831-1535" in file_name or "5329-953-5831-1455" in file_name or "5298-1167-5846-1715" in file_name:
view_id = 3
elif "4897-1034-5400-1537" in file_name or "4897-954-5400-1457" in file_name or "4897-1034-5400-1537" in file_name or "4897-954-5400-1457" in file_name or "4869-1126-5417-1674" in file_name:
view_id = 4
elif "4365-1074-4867-1576" in file_name or "4365-994-4867-1496" in file_name or "4365-1074-4867-1576" in file_name or "4365-994-4867-1496" in file_name or "4365-1130-4914-1678" in file_name:
view_id = 5
elif "3981-1084-4484-1587" in file_name or "3981-1004-4484-1507" in file_name or "3981-1084-4484-1587" in file_name or "3981-1004-4484-1507" in file_name or "3978-1166-4495-1683" in file_name:
view_id = 6
elif "3544-1009-4026-1491" in file_name or "3544-899-4026-1381" in file_name or "3544-1109-4026-1591" in file_name or "3544-899-4026-1381" in file_name or "3504-1067-4125-1688" in file_name:
view_id = 7
elif "3012-1145-3515-1648" in file_name or "3012-1045-3515-1548" in file_name or "3012-1145-3515-1648" in file_name or "3012-1045-3515-1548" in file_name or "3022-1135-3539-1652" in file_name:
view_id = 8
elif "3271-1116-3774-1619" in file_name or "3271-1016-3774-1519" in file_name or "3271-1116-3774-1619" in file_name or "3271-1016-3774-1519" in file_name or "3237-1143-3768-1674" in file_name:
view_id = 9
elif "2583-1111-3086-1614" in file_name or "2583-1011-3086-1514" in file_name or "2583-1211-3086-1714" in file_name or "2583-1011-3086-1514" in file_name or "2614-1123-3116-1625" in file_name:
view_id = 10
elif "2053-1123-2556-1626" in file_name or "2053-1023-2556-1526" in file_name or "2053-1173-2556-1676" in file_name or "2053-1023-2556-1526" in file_name or "2102-1101-2605-1603" in file_name:
view_id = 11
elif "1626-1115-2129-1618" in file_name or "1626-1015-2129-1518" in file_name or "1626-1265-2129-1768" in file_name or "1626-1015-2129-1518" in file_name or "1650-1074-2212-1636" in file_name:
view_id = 12
elif "1196-1135-1699-1638" in file_name or "1196-1035-1699-1538" in file_name or "1196-1205-1699-1708" in file_name or "1196-1035-1699-1538" in file_name or "1242-1084-1736-1578" in file_name:
view_id = 13
elif "763-1132-1265-1634" in file_name or "763-1032-1265-1534" in file_name or "763-1252-1265-1754" in file_name or "763-1032-1265-1534" in file_name or "814-1076-1308-1570" in file_name:
view_id = 14
file_name = file_name.replace("clairton1", str(camera_id) + "-" + str(view_id))
elif "braddock1" in file_name:
camera_id = 1
if "3018-478-3536-996" in file_name:
view_id = 0
file_name = file_name.replace("braddock1", str(camera_id) + "-" + str(view_id))
elif "westmifflin1" in file_name:
camera_id = 2
if "2617-1625-3124-2132" in file_name:
view_id = 0
elif "874-1602-1380-2108" in file_name:
view_id = 1
elif "488-1550-994-2056" in file_name:
view_id = 2
file_name = file_name.replace("westmifflin1", str(camera_id) + "-" + str(view_id))
return file_name
main()
| # This script replaces the old file names to the new ones (with camera id and view id)
from util import *
def main():
#p = "../data/rgb/"
p = "../data/flow/"
f_list = get_all_file_names_in_folder(p)
for file_name in f_list:
new_file_name = get_new_file_name(file_name)
print("-"*60)
print(file_name)
print(new_file_name)
os.rename(p+file_name, p+new_file_name)
def get_new_file_name(file_name):
camera_id = -1
view_id = -1
if "clairton1" in file_name:
camera_id = 0
if "6304-964-6807-1467" in file_name or "6304-884-6807-1387" in file_name or "6304-944-6807-1447" in file_name or "6304-884-6807-1387" in file_name or "6282-1154-6769-1641" in file_name:
view_id = 0
elif "6007-1008-6509-1510" in file_name or "6007-928-6509-1430" in file_name or "6007-988-6509-1490" in file_name or "6007-928-6509-1430" in file_name or "5989-1127-6538-1675" in file_name:
view_id = 1
elif "5648-1004-6150-1506" in file_name or "5648-924-6150-1426" in file_name or "5648-1004-6150-1506" in file_name or "5648-924-6150-1426" in file_name or "5596-1165-6144-1714" in file_name:
view_id = 2
elif "5329-1033-5831-1535" in file_name or "5329-953-5831-1455" in file_name or "5329-1033-5831-1535" in file_name or "5329-953-5831-1455" in file_name or "5298-1167-5846-1715" in file_name:
view_id = 3
elif "4897-1034-5400-1537" in file_name or "4897-954-5400-1457" in file_name or "4897-1034-5400-1537" in file_name or "4897-954-5400-1457" in file_name or "4869-1126-5417-1674" in file_name:
view_id = 4
elif "4365-1074-4867-1576" in file_name or "4365-994-4867-1496" in file_name or "4365-1074-4867-1576" in file_name or "4365-994-4867-1496" in file_name or "4365-1130-4914-1678" in file_name:
view_id = 5
elif "3981-1084-4484-1587" in file_name or "3981-1004-4484-1507" in file_name or "3981-1084-4484-1587" in file_name or "3981-1004-4484-1507" in file_name or "3978-1166-4495-1683" in file_name:
view_id = 6
elif "3544-1009-4026-1491" in file_name or "3544-899-4026-1381" in file_name or "3544-1109-4026-1591" in file_name or "3544-899-4026-1381" in file_name or "3504-1067-4125-1688" in file_name:
view_id = 7
elif "3012-1145-3515-1648" in file_name or "3012-1045-3515-1548" in file_name or "3012-1145-3515-1648" in file_name or "3012-1045-3515-1548" in file_name or "3022-1135-3539-1652" in file_name:
view_id = 8
elif "3271-1116-3774-1619" in file_name or "3271-1016-3774-1519" in file_name or "3271-1116-3774-1619" in file_name or "3271-1016-3774-1519" in file_name or "3237-1143-3768-1674" in file_name:
view_id = 9
elif "2583-1111-3086-1614" in file_name or "2583-1011-3086-1514" in file_name or "2583-1211-3086-1714" in file_name or "2583-1011-3086-1514" in file_name or "2614-1123-3116-1625" in file_name:
view_id = 10
elif "2053-1123-2556-1626" in file_name or "2053-1023-2556-1526" in file_name or "2053-1173-2556-1676" in file_name or "2053-1023-2556-1526" in file_name or "2102-1101-2605-1603" in file_name:
view_id = 11
elif "1626-1115-2129-1618" in file_name or "1626-1015-2129-1518" in file_name or "1626-1265-2129-1768" in file_name or "1626-1015-2129-1518" in file_name or "1650-1074-2212-1636" in file_name:
view_id = 12
elif "1196-1135-1699-1638" in file_name or "1196-1035-1699-1538" in file_name or "1196-1205-1699-1708" in file_name or "1196-1035-1699-1538" in file_name or "1242-1084-1736-1578" in file_name:
view_id = 13
elif "763-1132-1265-1634" in file_name or "763-1032-1265-1534" in file_name or "763-1252-1265-1754" in file_name or "763-1032-1265-1534" in file_name or "814-1076-1308-1570" in file_name:
view_id = 14
file_name = file_name.replace("clairton1", str(camera_id) + "-" + str(view_id))
elif "braddock1" in file_name:
camera_id = 1
if "3018-478-3536-996" in file_name:
view_id = 0
file_name = file_name.replace("braddock1", str(camera_id) + "-" + str(view_id))
elif "westmifflin1" in file_name:
camera_id = 2
if "2617-1625-3124-2132" in file_name:
view_id = 0
elif "874-1602-1380-2108" in file_name:
view_id = 1
elif "488-1550-994-2056" in file_name:
view_id = 2
file_name = file_name.replace("westmifflin1", str(camera_id) + "-" + str(view_id))
return file_name
main() | en | 0.885182 | # This script replaces the old file names to the new ones (with camera id and view id) #p = "../data/rgb/" | 3.17484 | 3 |
src/loanAPI/urls.py | DanCh11/bank_loan | 0 | 6619893 | from django.urls import path, include
from rest_framework import routers
from rest_framework import views
from .views import myform, approve_reject, ApprovalsView
router = routers.DefaultRouter()
router.register('loanAPI', ApprovalsView)
urlpatterns = [
path('form/', myform, name='myform'),
path('', include(router.urls)),
path('status/', approve_reject)
] | from django.urls import path, include
from rest_framework import routers
from rest_framework import views
from .views import myform, approve_reject, ApprovalsView
router = routers.DefaultRouter()
router.register('loanAPI', ApprovalsView)
urlpatterns = [
path('form/', myform, name='myform'),
path('', include(router.urls)),
path('status/', approve_reject)
] | none | 1 | 1.808808 | 2 | |
Day09/Day9.py | bugraaldal/Advent-of-Code-2020 | 1 | 6619894 | """ I see that I was being silly with the names. I would say this is a bad practice. """
invld = -1
prev = 25
with open("Day9.txt") as read:
numbers = [int(i) for i in read.readlines()]
for i in range(prev, len(numbers)):
prevs = numbers[i - prev:i]
valid = False
for j in range(prev):
for k in range(prev):
if j != k and prevs[j] + prevs[k] == numbers[i]:
valid = True
break
if valid:
break
if not valid:
invld = numbers[i]
print(invld)
break
anslist = [0]
for n in numbers:
anslist.append(anslist[-1] + n)
for i in range(len(numbers)):
for j in range(i + 2, len(numbers)):
final = anslist[j + 1] - anslist[i]
if final == invld:
idunnohowtonamw = numbers[i:j]
print(min(idunnohowtonamw) + max(idunnohowtonamw))
| """ I see that I was being silly with the names. I would say this is a bad practice. """
invld = -1
prev = 25
with open("Day9.txt") as read:
numbers = [int(i) for i in read.readlines()]
for i in range(prev, len(numbers)):
prevs = numbers[i - prev:i]
valid = False
for j in range(prev):
for k in range(prev):
if j != k and prevs[j] + prevs[k] == numbers[i]:
valid = True
break
if valid:
break
if not valid:
invld = numbers[i]
print(invld)
break
anslist = [0]
for n in numbers:
anslist.append(anslist[-1] + n)
for i in range(len(numbers)):
for j in range(i + 2, len(numbers)):
final = anslist[j + 1] - anslist[i]
if final == invld:
idunnohowtonamw = numbers[i:j]
print(min(idunnohowtonamw) + max(idunnohowtonamw))
| en | 0.998108 | I see that I was being silly with the names. I would say this is a bad practice. | 2.754186 | 3 |
arctia/entity.py | unternehmen/arctia | 1 | 6619895 | <filename>arctia/entity.py
class Entity(object):
"""
An Entity is a immobile item in the game, e.g., a stone.
"""
def __init__(self, kind, location):
assert kind in ('bug', 'rock', 'fish'), \
'unknown entity kind: %s' % (kind,)
self.location = location
self.kind = kind
| <filename>arctia/entity.py
class Entity(object):
"""
An Entity is a immobile item in the game, e.g., a stone.
"""
def __init__(self, kind, location):
assert kind in ('bug', 'rock', 'fish'), \
'unknown entity kind: %s' % (kind,)
self.location = location
self.kind = kind
| en | 0.845248 | An Entity is a immobile item in the game, e.g., a stone. | 3.078947 | 3 |
prototype/fake_data/help_query_secondary_nouns.py | reMarkable/cobalt | 0 | 6619896 | <reponame>reMarkable/cobalt
#!/usr/bin/env python
# Copyright 2016 The Fuchsia Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HELP_QUERY_SECONDARY_NOUNS = [
'flower',
'game',
'hill',
'home',
'juice',
'kite',
'lake',
'meal',
'moon',
'nest',
'pen',
'pencil',
'plant',
'rain',
'river',
'road',
'rock',
'room',
'rose',
'soda'
]
| #!/usr/bin/env python
# Copyright 2016 The Fuchsia Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HELP_QUERY_SECONDARY_NOUNS = [
'flower',
'game',
'hill',
'home',
'juice',
'kite',
'lake',
'meal',
'moon',
'nest',
'pen',
'pencil',
'plant',
'rain',
'river',
'road',
'rock',
'room',
'rose',
'soda'
] | en | 0.833112 | #!/usr/bin/env python # Copyright 2016 The Fuchsia Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.263995 | 1 |
test/timeout_game_manager_test.py | AustinHellerRepo/GameManager | 0 | 6619897 | <gh_stars>0
from __future__ import annotations
import unittest
from typing import List, Tuple, Dict, Callable, Type, Set
import os
import time
from datetime import datetime
import uuid
from src.austin_heller_repo.game_manager import GameManagerClientServerMessage, GameManagerStructureFactory, AuthenticateClientRequestGameManagerClientServerMessage, AuthenticateClientResponseGameManagerClientServerMessage, UrlNavigationNeededResponseGameManagerClientServerMessage, GameManagerClientServerMessageTypeEnum, ClientAlreadyAuthenticatedErrorGameManagerClientServerMessage, AuthenticationTimeoutErrorGameManagerClientServerMessage
from austin_heller_repo.client_authentication_manager import ClientAuthenticationClientServerMessage
from austin_heller_repo.socket_queued_message_framework import ClientMessengerFactory, ServerMessengerFactory, ClientServerMessage
from austin_heller_repo.socket import ClientSocketFactory, ServerSocketFactory
from austin_heller_repo.common import HostPointer
from austin_heller_repo.threading import SingletonMemorySequentialQueueFactory, Semaphore, start_thread
def get_default_host_port() -> int:
return 35125
def get_default_client_authentication_port() -> int:
return 35124 # NOTE this is what the client_authentication_manager_service is listening on
def get_default_client_messenger_factory() -> ClientMessengerFactory:
return ClientMessengerFactory(
client_socket_factory=ClientSocketFactory(
to_server_packet_bytes_length=4096
),
server_host_pointer=HostPointer(
host_address="localhost",
host_port=get_default_host_port()
),
client_server_message_class=GameManagerClientServerMessage,
is_debug=True
)
def get_default_server_messenger_factory() -> ServerMessengerFactory:
return ServerMessengerFactory(
server_socket_factory=ServerSocketFactory(
to_client_packet_bytes_length=4096,
listening_limit_total=10,
accept_timeout_seconds=1.0
),
sequential_queue_factory=SingletonMemorySequentialQueueFactory(),
local_host_pointer=HostPointer(
host_address="localhost",
host_port=get_default_host_port()
),
client_server_message_class=GameManagerClientServerMessage,
structure_factory=GameManagerStructureFactory(
client_authentication_client_messenger_factory=ClientMessengerFactory(
client_socket_factory=ClientSocketFactory(
to_server_packet_bytes_length=4096
),
server_host_pointer=HostPointer(
host_address="localhost",
host_port=get_default_client_authentication_port()
),
client_server_message_class=ClientAuthenticationClientServerMessage,
is_debug=True
),
authentication_timeout_seconds=5,
is_debug=True
),
is_debug=True
)
class TimeoutGameManagerTest(unittest.TestCase):
def test_client_authentication(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
authentication_timeout_error_client_server_message = None # type: AuthenticationTimeoutErrorGameManagerClientServerMessage
blocking_semaphore = Semaphore()
blocking_semaphore.acquire()
def callback(client_server_message: GameManagerClientServerMessage):
nonlocal callback_total
nonlocal authentication_timeout_error_client_server_message
callback_total += 1
print(f"{datetime.utcnow()}: test: callback: client_server_message: {client_server_message.__class__.get_client_server_message_type()}")
if callback_total == 1:
self.assertIsInstance(client_server_message, UrlNavigationNeededResponseGameManagerClientServerMessage)
client_server_message.navigate_to_url()
elif callback_total == 2:
self.assertIsInstance(client_server_message, AuthenticationTimeoutErrorGameManagerClientServerMessage)
authentication_timeout_error_client_server_message = client_server_message
blocking_semaphore.release()
else:
raise Exception(f"Unexpected callback total: {callback_total}")
found_exception = None
def on_exception(exception: Exception):
nonlocal found_exception
if found_exception is None:
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"WAIT UNTIL TOLD TO AUTHENTICATE")
client_messenger.send_to_server(
request_client_server_message=AuthenticateClientRequestGameManagerClientServerMessage()
)
print(f"{datetime.utcnow()}: test: waiting for authentication: start")
blocking_semaphore.acquire()
blocking_semaphore.release()
print(f"{datetime.utcnow()}: test: waiting for authentication: end")
print(f"AUTHENTICATE NOW")
time.sleep(5)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
server_messenger.dispose()
self.assertIsNotNone(authentication_timeout_error_client_server_message)
self.assertEqual(2, callback_total)
| from __future__ import annotations
import unittest
from typing import List, Tuple, Dict, Callable, Type, Set
import os
import time
from datetime import datetime
import uuid
from src.austin_heller_repo.game_manager import GameManagerClientServerMessage, GameManagerStructureFactory, AuthenticateClientRequestGameManagerClientServerMessage, AuthenticateClientResponseGameManagerClientServerMessage, UrlNavigationNeededResponseGameManagerClientServerMessage, GameManagerClientServerMessageTypeEnum, ClientAlreadyAuthenticatedErrorGameManagerClientServerMessage, AuthenticationTimeoutErrorGameManagerClientServerMessage
from austin_heller_repo.client_authentication_manager import ClientAuthenticationClientServerMessage
from austin_heller_repo.socket_queued_message_framework import ClientMessengerFactory, ServerMessengerFactory, ClientServerMessage
from austin_heller_repo.socket import ClientSocketFactory, ServerSocketFactory
from austin_heller_repo.common import HostPointer
from austin_heller_repo.threading import SingletonMemorySequentialQueueFactory, Semaphore, start_thread
def get_default_host_port() -> int:
return 35125
def get_default_client_authentication_port() -> int:
return 35124 # NOTE this is what the client_authentication_manager_service is listening on
def get_default_client_messenger_factory() -> ClientMessengerFactory:
return ClientMessengerFactory(
client_socket_factory=ClientSocketFactory(
to_server_packet_bytes_length=4096
),
server_host_pointer=HostPointer(
host_address="localhost",
host_port=get_default_host_port()
),
client_server_message_class=GameManagerClientServerMessage,
is_debug=True
)
def get_default_server_messenger_factory() -> ServerMessengerFactory:
return ServerMessengerFactory(
server_socket_factory=ServerSocketFactory(
to_client_packet_bytes_length=4096,
listening_limit_total=10,
accept_timeout_seconds=1.0
),
sequential_queue_factory=SingletonMemorySequentialQueueFactory(),
local_host_pointer=HostPointer(
host_address="localhost",
host_port=get_default_host_port()
),
client_server_message_class=GameManagerClientServerMessage,
structure_factory=GameManagerStructureFactory(
client_authentication_client_messenger_factory=ClientMessengerFactory(
client_socket_factory=ClientSocketFactory(
to_server_packet_bytes_length=4096
),
server_host_pointer=HostPointer(
host_address="localhost",
host_port=get_default_client_authentication_port()
),
client_server_message_class=ClientAuthenticationClientServerMessage,
is_debug=True
),
authentication_timeout_seconds=5,
is_debug=True
),
is_debug=True
)
class TimeoutGameManagerTest(unittest.TestCase):
def test_client_authentication(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
authentication_timeout_error_client_server_message = None # type: AuthenticationTimeoutErrorGameManagerClientServerMessage
blocking_semaphore = Semaphore()
blocking_semaphore.acquire()
def callback(client_server_message: GameManagerClientServerMessage):
nonlocal callback_total
nonlocal authentication_timeout_error_client_server_message
callback_total += 1
print(f"{datetime.utcnow()}: test: callback: client_server_message: {client_server_message.__class__.get_client_server_message_type()}")
if callback_total == 1:
self.assertIsInstance(client_server_message, UrlNavigationNeededResponseGameManagerClientServerMessage)
client_server_message.navigate_to_url()
elif callback_total == 2:
self.assertIsInstance(client_server_message, AuthenticationTimeoutErrorGameManagerClientServerMessage)
authentication_timeout_error_client_server_message = client_server_message
blocking_semaphore.release()
else:
raise Exception(f"Unexpected callback total: {callback_total}")
found_exception = None
def on_exception(exception: Exception):
nonlocal found_exception
if found_exception is None:
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"WAIT UNTIL TOLD TO AUTHENTICATE")
client_messenger.send_to_server(
request_client_server_message=AuthenticateClientRequestGameManagerClientServerMessage()
)
print(f"{datetime.utcnow()}: test: waiting for authentication: start")
blocking_semaphore.acquire()
blocking_semaphore.release()
print(f"{datetime.utcnow()}: test: waiting for authentication: end")
print(f"AUTHENTICATE NOW")
time.sleep(5)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
server_messenger.dispose()
self.assertIsNotNone(authentication_timeout_error_client_server_message)
self.assertEqual(2, callback_total) | en | 0.565939 | # NOTE this is what the client_authentication_manager_service is listening on # type: AuthenticationTimeoutErrorGameManagerClientServerMessage | 2.198461 | 2 |
waveletPlot.py | li012589/NeuralWavelet | 28 | 6619898 | <gh_stars>10-100
import numpy as np
import argparse, json, math
import os, glob
from PIL import Image
import flow, utils, source
import torch, torchvision
from torch import nn
from encoder import rans, coder
from utils import cdfDiscreteLogitstic, cdfMixDiscreteLogistic
from matplotlib import pyplot as plt
import matplotlib
parser = argparse.ArgumentParser(description="")
parser.add_argument("-folder", default=None, help="Path to load the trained model")
parser.add_argument("-cuda", type=int, default=-1, help="Which device to use with -1 standing for CPU, number bigger than -1 is N.O. of GPU.")
parser.add_argument("-depth", type=int, default=2, help="how many iterations to perform")
parser.add_argument("-best", action='store_false', help="if load the best model")
parser.add_argument("-epoch", type=int, default=-1, help="epoch to load")
parser.add_argument("-img", default=None, help="the img path")
args = parser.parse_args()
if args.img is None:
raise Exception("No image input")
device = torch.device("cpu" if args.cuda < 0 else "cuda:" + str(args.cuda))
if args.folder is None:
raise Exception("No loading")
else:
rootFolder = args.folder
if rootFolder[-1] != '/':
rootFolder += '/'
with open(rootFolder + "parameter.json", 'r') as f:
config = json.load(f)
locals().update(config)
target = config['target']
repeat = config['repeat']
nhidden = config['nhidden']
hchnl = config['hchnl']
nMixing = config['nMixing']
simplePrior = config['simplePrior']
batch = config['batch']
try:
HUE = config['HUE']
except:
HUE = True
IMG = Image.open(args.img)
IMG = torch.from_numpy(np.array(IMG)).permute([2, 0, 1])
IMG = IMG.reshape(1, *IMG.shape).float().to(device)
if not HUE:
IMG = utils.rgb2ycc(IMG, True, True)
# decide which model to load
if args.best:
name = max(glob.iglob(os.path.join(rootFolder, '*.saving')), key=os.path.getctime)
elif args.epoch == -1:
name = max(glob.iglob(os.path.join(rootFolder, 'savings', '*.saving')), key=os.path.getctime)
else:
name = max(glob.iglob(os.path.join(rootFolder, 'savings', 'SimpleMERA_epoch_' + str(args.epoch) + '.saving')), key=os.path.getctime)
# load the model
print("load saving at " + name)
loadedF = torch.load(name, map_location=device)
if 'easyMera' in name:
layerList = loadedF.layerList[:(4 * repeat)]
layerList = [layerList[no] for no in range(4 * repeat)]
elif '1to2Mera' in name:
layerList = loadedF.layerList[:(2 * repeat)]
layerList = [layerList[no] for no in range(2 * repeat)]
else:
raise Exception("model not define")
# Define dimensions
targetSize = IMG.shape[1:]
dimensional = 2
channel = targetSize[0]
blockLength = targetSize[-1]
# Define nomaliziation and decimal
if 'easyMera' in name:
decimal = flow.ScalingNshifting(256, -128)
elif '1to2Mera' in name:
decimal = flow.ScalingNshifting(256, 0)
else:
raise Exception("model not define")
rounding = utils.roundingWidentityGradient
# Building MERA mode
if 'easyMera' in name:
fList = []
for _depth in reversed(range(args.depth)):
f = flow.SimpleMERA(blockLength, layerList, None, None, repeat, _depth + 1, nMixing, decimal=decimal, rounding=utils.roundingWidentityGradient).to(device)
fList.append(f)
elif '1to2Mera' in name:
fList = []
for _depth in reversed(range(args.depth)):
f = flow.OneToTwoMERA(blockLength, layerList, None, None, repeat, _depth + 1, nMixing, decimal=decimal, rounding=utils.roundingWidentityGradient).to(device)
fList.append(f)
else:
raise Exception("model not define")
zList = []
for _f in fList:
z, _ = _f.inverse(IMG)
zList.append(z)
z = torch.cat(zList, 0)
assert args.depth <= int(math.log(blockLength, 2))
def im2grp(t):
return t.reshape(t.shape[0], t.shape[1], t.shape[2] // 2, 2, t.shape[3] // 2, 2).permute([0, 1, 2, 4, 3, 5]).reshape(t.shape[0], t.shape[1], -1, 4)
def reform(tensor):
return tensor.reshape(tensor.shape[0], tensor.shape[1] // 3, 3, tensor.shape[2], tensor.shape[3]).permute([0, 1, 3, 4, 2]).contiguous().reshape(tensor.shape[0], tensor.shape[1] // 3, tensor.shape[2] * tensor.shape[3], 3)
# define renorm fn
def back01(tensor):
ten = tensor.clone().float()
ten = ten.view(ten.shape[0] * ten.shape[1], -1)
ten -= ten.min(1, keepdim=True)[0]
ten /= ten.max(1, keepdim=True)[0]
ten = ten.view(tensor.shape)
return ten
def grayWorld(tensor):
if tensor.dtype is torch.float32:
tensor = torch.round(tensor * 255).float()
meanRGB = tensor.reshape(tensor.shape[0], 3, -1).mean(-1)
gray = meanRGB.sum(-1, keepdim=True) / 3
scaleRGB = gray / meanRGB
scaledTensor = torch.round(tensor.reshape(tensor.shape[0], 3, -1) * scaleRGB.reshape(*scaleRGB.shape, 1)).reshape(tensor.shape)
return torch.clamp(scaledTensor, 0, 255).int()
def backMeanStd(tensor):
mean = IMG.reshape(*IMG.shape[:2], -1).mean(-1).reshape(*IMG.shape[:2], 1, 1)
std = IMG.reshape(*IMG.shape[:2], -1).std(-1).reshape(*IMG.shape[:2], 1, 1)
return tensor * std.repeat([1, 1, tensor.shape[-1], tensor.shape[-1]]) + mean.repeat([1, 1, tensor.shape[-1], tensor.shape[-1]])
# another renorm fn
def clip(tensor, l=0, h=255):
return torch.clamp(tensor, l, h).int()
# yet another renorm fn
def batchNorm(tensor, base=1.0):
m = nn.BatchNorm2d(tensor.shape[1], affine=False)
return m(tensor).float() + base
#renormFn = lambda x: grayWorld(back01(x))
renormFn = lambda x: back01(x)
# collect parts
ul = z
UR = []
DL = []
DR = []
for _depth in reversed(range(args.depth)):
_x = im2grp(ul)
ul = _x[:, :, :, 0].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
_ul = ul[_depth].reshape(1, *ul.shape[1:])
if loadedF.meanNNlist is not None:
zeroDetails = torch.round(decimal.forward_(reform(loadedF.meanNNlist[0](decimal.inverse_(_ul))).contiguous()))
else:
zeroDetails = torch.round(decimal.forward_(loadedF.prior.priorList[0].mean.reshape(1, 3, 1, 3).repeat(1, 1, np.prod(ul.shape[-2:]), 1)).contiguous())
_x[:1, :, :, 1:] = _x[:1, :, :, 1:] - zeroDetails
ur = _x[:, :, :, 1].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
dl = _x[:, :, :, 2].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
dr = _x[:, :, :, 3].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
UR.append(renormFn(ur))
DL.append(renormFn(dl))
DR.append(renormFn(dr))
#ul = back01(backMeanStd(batchNorm(ul, 0)))
ul = renormFn(ul)
#ul = back01(clip(backMeanStd(batchNorm(ul))))
for no in reversed(range(args.depth)):
ur = UR[no]
dl = DL[no]
dr = DR[no]
upper = torch.cat([ul, ur], -1)
down = torch.cat([dl, dr], -1)
ul = torch.cat([upper, down], -2)
if not HUE:
ul = torch.round(ul * 255)
ul = utils.ycc2rgb(ul, True, True).int()
# convert zremaoin to numpy array
zremain = ul[0].permute([1, 2, 0]).detach().cpu().numpy()
matplotlib.image.imsave(rootFolder + 'pic/BigWavelet.png', (zremain * 255).astype('uint8'))
'''
waveletPlot = plt.figure(figsize=(8, 8))
waveletAx = waveletPlot.add_subplot(111)
waveletAx.imshow(zremain)
plt.axis('off')
plt.savefig(rootFolder + 'pic/BigWavelet.pdf', bbox_inches="tight", pad_inches=0)
plt.close()
'''
| import numpy as np
import argparse, json, math
import os, glob
from PIL import Image
import flow, utils, source
import torch, torchvision
from torch import nn
from encoder import rans, coder
from utils import cdfDiscreteLogitstic, cdfMixDiscreteLogistic
from matplotlib import pyplot as plt
import matplotlib
parser = argparse.ArgumentParser(description="")
parser.add_argument("-folder", default=None, help="Path to load the trained model")
parser.add_argument("-cuda", type=int, default=-1, help="Which device to use with -1 standing for CPU, number bigger than -1 is N.O. of GPU.")
parser.add_argument("-depth", type=int, default=2, help="how many iterations to perform")
parser.add_argument("-best", action='store_false', help="if load the best model")
parser.add_argument("-epoch", type=int, default=-1, help="epoch to load")
parser.add_argument("-img", default=None, help="the img path")
args = parser.parse_args()
if args.img is None:
raise Exception("No image input")
device = torch.device("cpu" if args.cuda < 0 else "cuda:" + str(args.cuda))
if args.folder is None:
raise Exception("No loading")
else:
rootFolder = args.folder
if rootFolder[-1] != '/':
rootFolder += '/'
with open(rootFolder + "parameter.json", 'r') as f:
config = json.load(f)
locals().update(config)
target = config['target']
repeat = config['repeat']
nhidden = config['nhidden']
hchnl = config['hchnl']
nMixing = config['nMixing']
simplePrior = config['simplePrior']
batch = config['batch']
try:
HUE = config['HUE']
except:
HUE = True
IMG = Image.open(args.img)
IMG = torch.from_numpy(np.array(IMG)).permute([2, 0, 1])
IMG = IMG.reshape(1, *IMG.shape).float().to(device)
if not HUE:
IMG = utils.rgb2ycc(IMG, True, True)
# decide which model to load
if args.best:
name = max(glob.iglob(os.path.join(rootFolder, '*.saving')), key=os.path.getctime)
elif args.epoch == -1:
name = max(glob.iglob(os.path.join(rootFolder, 'savings', '*.saving')), key=os.path.getctime)
else:
name = max(glob.iglob(os.path.join(rootFolder, 'savings', 'SimpleMERA_epoch_' + str(args.epoch) + '.saving')), key=os.path.getctime)
# load the model
print("load saving at " + name)
loadedF = torch.load(name, map_location=device)
if 'easyMera' in name:
layerList = loadedF.layerList[:(4 * repeat)]
layerList = [layerList[no] for no in range(4 * repeat)]
elif '1to2Mera' in name:
layerList = loadedF.layerList[:(2 * repeat)]
layerList = [layerList[no] for no in range(2 * repeat)]
else:
raise Exception("model not define")
# Define dimensions
targetSize = IMG.shape[1:]
dimensional = 2
channel = targetSize[0]
blockLength = targetSize[-1]
# Define nomaliziation and decimal
if 'easyMera' in name:
decimal = flow.ScalingNshifting(256, -128)
elif '1to2Mera' in name:
decimal = flow.ScalingNshifting(256, 0)
else:
raise Exception("model not define")
rounding = utils.roundingWidentityGradient
# Building MERA mode
if 'easyMera' in name:
fList = []
for _depth in reversed(range(args.depth)):
f = flow.SimpleMERA(blockLength, layerList, None, None, repeat, _depth + 1, nMixing, decimal=decimal, rounding=utils.roundingWidentityGradient).to(device)
fList.append(f)
elif '1to2Mera' in name:
fList = []
for _depth in reversed(range(args.depth)):
f = flow.OneToTwoMERA(blockLength, layerList, None, None, repeat, _depth + 1, nMixing, decimal=decimal, rounding=utils.roundingWidentityGradient).to(device)
fList.append(f)
else:
raise Exception("model not define")
zList = []
for _f in fList:
z, _ = _f.inverse(IMG)
zList.append(z)
z = torch.cat(zList, 0)
assert args.depth <= int(math.log(blockLength, 2))
def im2grp(t):
return t.reshape(t.shape[0], t.shape[1], t.shape[2] // 2, 2, t.shape[3] // 2, 2).permute([0, 1, 2, 4, 3, 5]).reshape(t.shape[0], t.shape[1], -1, 4)
def reform(tensor):
return tensor.reshape(tensor.shape[0], tensor.shape[1] // 3, 3, tensor.shape[2], tensor.shape[3]).permute([0, 1, 3, 4, 2]).contiguous().reshape(tensor.shape[0], tensor.shape[1] // 3, tensor.shape[2] * tensor.shape[3], 3)
# define renorm fn
def back01(tensor):
ten = tensor.clone().float()
ten = ten.view(ten.shape[0] * ten.shape[1], -1)
ten -= ten.min(1, keepdim=True)[0]
ten /= ten.max(1, keepdim=True)[0]
ten = ten.view(tensor.shape)
return ten
def grayWorld(tensor):
if tensor.dtype is torch.float32:
tensor = torch.round(tensor * 255).float()
meanRGB = tensor.reshape(tensor.shape[0], 3, -1).mean(-1)
gray = meanRGB.sum(-1, keepdim=True) / 3
scaleRGB = gray / meanRGB
scaledTensor = torch.round(tensor.reshape(tensor.shape[0], 3, -1) * scaleRGB.reshape(*scaleRGB.shape, 1)).reshape(tensor.shape)
return torch.clamp(scaledTensor, 0, 255).int()
def backMeanStd(tensor):
mean = IMG.reshape(*IMG.shape[:2], -1).mean(-1).reshape(*IMG.shape[:2], 1, 1)
std = IMG.reshape(*IMG.shape[:2], -1).std(-1).reshape(*IMG.shape[:2], 1, 1)
return tensor * std.repeat([1, 1, tensor.shape[-1], tensor.shape[-1]]) + mean.repeat([1, 1, tensor.shape[-1], tensor.shape[-1]])
# another renorm fn
def clip(tensor, l=0, h=255):
return torch.clamp(tensor, l, h).int()
# yet another renorm fn
def batchNorm(tensor, base=1.0):
m = nn.BatchNorm2d(tensor.shape[1], affine=False)
return m(tensor).float() + base
#renormFn = lambda x: grayWorld(back01(x))
renormFn = lambda x: back01(x)
# collect parts
ul = z
UR = []
DL = []
DR = []
for _depth in reversed(range(args.depth)):
_x = im2grp(ul)
ul = _x[:, :, :, 0].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
_ul = ul[_depth].reshape(1, *ul.shape[1:])
if loadedF.meanNNlist is not None:
zeroDetails = torch.round(decimal.forward_(reform(loadedF.meanNNlist[0](decimal.inverse_(_ul))).contiguous()))
else:
zeroDetails = torch.round(decimal.forward_(loadedF.prior.priorList[0].mean.reshape(1, 3, 1, 3).repeat(1, 1, np.prod(ul.shape[-2:]), 1)).contiguous())
_x[:1, :, :, 1:] = _x[:1, :, :, 1:] - zeroDetails
ur = _x[:, :, :, 1].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
dl = _x[:, :, :, 2].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
dr = _x[:, :, :, 3].reshape(*_x.shape[:2], int(_x.shape[2] ** 0.5), int(_x.shape[2] ** 0.5)).contiguous()
UR.append(renormFn(ur))
DL.append(renormFn(dl))
DR.append(renormFn(dr))
#ul = back01(backMeanStd(batchNorm(ul, 0)))
ul = renormFn(ul)
#ul = back01(clip(backMeanStd(batchNorm(ul))))
for no in reversed(range(args.depth)):
ur = UR[no]
dl = DL[no]
dr = DR[no]
upper = torch.cat([ul, ur], -1)
down = torch.cat([dl, dr], -1)
ul = torch.cat([upper, down], -2)
if not HUE:
ul = torch.round(ul * 255)
ul = utils.ycc2rgb(ul, True, True).int()
# convert zremaoin to numpy array
zremain = ul[0].permute([1, 2, 0]).detach().cpu().numpy()
matplotlib.image.imsave(rootFolder + 'pic/BigWavelet.png', (zremain * 255).astype('uint8'))
'''
waveletPlot = plt.figure(figsize=(8, 8))
waveletAx = waveletPlot.add_subplot(111)
waveletAx.imshow(zremain)
plt.axis('off')
plt.savefig(rootFolder + 'pic/BigWavelet.pdf', bbox_inches="tight", pad_inches=0)
plt.close()
''' | en | 0.526345 | # decide which model to load # load the model # Define dimensions # Define nomaliziation and decimal # Building MERA mode # define renorm fn # another renorm fn # yet another renorm fn #renormFn = lambda x: grayWorld(back01(x)) # collect parts #ul = back01(backMeanStd(batchNorm(ul, 0))) #ul = back01(clip(backMeanStd(batchNorm(ul)))) # convert zremaoin to numpy array waveletPlot = plt.figure(figsize=(8, 8)) waveletAx = waveletPlot.add_subplot(111) waveletAx.imshow(zremain) plt.axis('off') plt.savefig(rootFolder + 'pic/BigWavelet.pdf', bbox_inches="tight", pad_inches=0) plt.close() | 2.330387 | 2 |
users/views.py | markavale/mavshopv2 | 0 | 6619899 | from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework.views import APIView
from .serializers import UserSerializer, CustomLoginSerializer, UserProfileSerializer
from django.contrib.auth import authenticate
from rest_framework.permissions import AllowAny, IsAuthenticated
# from dj_rest_auth.views import LoginView
from dj_rest_auth.registration.views import VerifyEmailView
from django.contrib.auth import authenticate #login
from rest_framework import viewsets, status
from django.conf import settings
User = settings.AUTH_USER_MODEL
class UserCreate(generics.CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = UserSerializer
class VerifyEmailView(VerifyEmailView):
def save(self, request, *args, **kwargs):
user = super(VerifyEmailView, self).save(request)
user.verified = True
print(user.verified)
return user
class LoginView(APIView):
permission_classes = (AllowAny)
def post(self, request, *args, **kwargs):
username = request.data.get("username")
email = request.data.get("email")
password = request.data.get("password")
user = authenticate(request, username=username, email=email, password=password)
if user:
return Response({"token": user.auth_token.key})
else:
return Response({"error": "Wrong Credentials"}, status=status.HTTP_400_BAD_REQUEST)
# class LoginUserView(LoginView): # For custom login
# serializer_class = CustomLoginSerializer
# permission_classes = [AllowAny]
# def post(self, request, *args, **kwargs):
# serializer = CustomLoginSerializer(data=request.data) # changed to desired serializer
# serializer.is_valid(raise_exception=True)
# user = serializer.validated_data['user']
# authenticate(request, user)
# return super(LoginUserView, self).post(request)
class UserViewSet(viewsets.ModelViewSet):
model = User
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
def get_object(self):
return self.request.user
def list(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class UserDetailView(APIView):
#authentication_classes = []
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
return Response({"email": request.user.email})
| from rest_framework.response import Response
from rest_framework import status, generics
from rest_framework.views import APIView
from .serializers import UserSerializer, CustomLoginSerializer, UserProfileSerializer
from django.contrib.auth import authenticate
from rest_framework.permissions import AllowAny, IsAuthenticated
# from dj_rest_auth.views import LoginView
from dj_rest_auth.registration.views import VerifyEmailView
from django.contrib.auth import authenticate #login
from rest_framework import viewsets, status
from django.conf import settings
User = settings.AUTH_USER_MODEL
class UserCreate(generics.CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = UserSerializer
class VerifyEmailView(VerifyEmailView):
def save(self, request, *args, **kwargs):
user = super(VerifyEmailView, self).save(request)
user.verified = True
print(user.verified)
return user
class LoginView(APIView):
permission_classes = (AllowAny)
def post(self, request, *args, **kwargs):
username = request.data.get("username")
email = request.data.get("email")
password = request.data.get("password")
user = authenticate(request, username=username, email=email, password=password)
if user:
return Response({"token": user.auth_token.key})
else:
return Response({"error": "Wrong Credentials"}, status=status.HTTP_400_BAD_REQUEST)
# class LoginUserView(LoginView): # For custom login
# serializer_class = CustomLoginSerializer
# permission_classes = [AllowAny]
# def post(self, request, *args, **kwargs):
# serializer = CustomLoginSerializer(data=request.data) # changed to desired serializer
# serializer.is_valid(raise_exception=True)
# user = serializer.validated_data['user']
# authenticate(request, user)
# return super(LoginUserView, self).post(request)
class UserViewSet(viewsets.ModelViewSet):
model = User
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
def get_object(self):
return self.request.user
def list(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
class UserDetailView(APIView):
#authentication_classes = []
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
return Response({"email": request.user.email})
| en | 0.304909 | # from dj_rest_auth.views import LoginView #login # class LoginUserView(LoginView): # For custom login # serializer_class = CustomLoginSerializer # permission_classes = [AllowAny] # def post(self, request, *args, **kwargs): # serializer = CustomLoginSerializer(data=request.data) # changed to desired serializer # serializer.is_valid(raise_exception=True) # user = serializer.validated_data['user'] # authenticate(request, user) # return super(LoginUserView, self).post(request) #authentication_classes = [] | 2.105836 | 2 |
grecord_service/gr/code/goldenrecord.py | qcri/data_civilizer_system | 6 | 6619900 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_goldenrecord')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_goldenrecord')
_goldenrecord = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_goldenrecord', [dirname(__file__)])
except ImportError:
import _goldenrecord
return _goldenrecord
try:
_mod = imp.load_module('_goldenrecord', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_goldenrecord = swig_import_helper()
del swig_import_helper
else:
import _goldenrecord
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class Aggregator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Aggregator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Aggregator, name)
__repr__ = _swig_repr
__swig_getmethods__["rules"] = _goldenrecord.Aggregator_rules_get
if _newclass:
rules = _swig_property(_goldenrecord.Aggregator_rules_get)
__swig_setmethods__["ruleDir"] = _goldenrecord.Aggregator_ruleDir_set
__swig_getmethods__["ruleDir"] = _goldenrecord.Aggregator_ruleDir_get
if _newclass:
ruleDir = _swig_property(_goldenrecord.Aggregator_ruleDir_get, _goldenrecord.Aggregator_ruleDir_set)
__swig_setmethods__["ruleGroup"] = _goldenrecord.Aggregator_ruleGroup_set
__swig_getmethods__["ruleGroup"] = _goldenrecord.Aggregator_ruleGroup_get
if _newclass:
ruleGroup = _swig_property(_goldenrecord.Aggregator_ruleGroup_get, _goldenrecord.Aggregator_ruleGroup_set)
__swig_setmethods__["ruleStatus"] = _goldenrecord.Aggregator_ruleStatus_set
__swig_getmethods__["ruleStatus"] = _goldenrecord.Aggregator_ruleStatus_get
if _newclass:
ruleStatus = _swig_property(_goldenrecord.Aggregator_ruleStatus_get, _goldenrecord.Aggregator_ruleStatus_set)
__swig_setmethods__["transformations"] = _goldenrecord.Aggregator_transformations_set
__swig_getmethods__["transformations"] = _goldenrecord.Aggregator_transformations_get
if _newclass:
transformations = _swig_property(_goldenrecord.Aggregator_transformations_get, _goldenrecord.Aggregator_transformations_set)
__swig_setmethods__["structures"] = _goldenrecord.Aggregator_structures_set
__swig_getmethods__["structures"] = _goldenrecord.Aggregator_structures_get
if _newclass:
structures = _swig_property(_goldenrecord.Aggregator_structures_get, _goldenrecord.Aggregator_structures_set)
__swig_setmethods__["groupRules"] = _goldenrecord.Aggregator_groupRules_set
__swig_getmethods__["groupRules"] = _goldenrecord.Aggregator_groupRules_get
if _newclass:
groupRules = _swig_property(_goldenrecord.Aggregator_groupRules_get, _goldenrecord.Aggregator_groupRules_set)
__swig_setmethods__["format_group_cluster_ids"] = _goldenrecord.Aggregator_format_group_cluster_ids_set
__swig_getmethods__["format_group_cluster_ids"] = _goldenrecord.Aggregator_format_group_cluster_ids_get
if _newclass:
format_group_cluster_ids = _swig_property(_goldenrecord.Aggregator_format_group_cluster_ids_get, _goldenrecord.Aggregator_format_group_cluster_ids_set)
__swig_setmethods__["format_group_rule_ids"] = _goldenrecord.Aggregator_format_group_rule_ids_set
__swig_getmethods__["format_group_rule_ids"] = _goldenrecord.Aggregator_format_group_rule_ids_get
if _newclass:
format_group_rule_ids = _swig_property(_goldenrecord.Aggregator_format_group_rule_ids_get, _goldenrecord.Aggregator_format_group_rule_ids_set)
__swig_setmethods__["format_group_rules"] = _goldenrecord.Aggregator_format_group_rules_set
__swig_getmethods__["format_group_rules"] = _goldenrecord.Aggregator_format_group_rules_get
if _newclass:
format_group_rules = _swig_property(_goldenrecord.Aggregator_format_group_rules_get, _goldenrecord.Aggregator_format_group_rules_set)
__swig_setmethods__["global_const_terms"] = _goldenrecord.Aggregator_global_const_terms_set
__swig_getmethods__["global_const_terms"] = _goldenrecord.Aggregator_global_const_terms_get
if _newclass:
global_const_terms = _swig_property(_goldenrecord.Aggregator_global_const_terms_get, _goldenrecord.Aggregator_global_const_terms_set)
__swig_setmethods__["local_const_terms"] = _goldenrecord.Aggregator_local_const_terms_set
__swig_getmethods__["local_const_terms"] = _goldenrecord.Aggregator_local_const_terms_get
if _newclass:
local_const_terms = _swig_property(_goldenrecord.Aggregator_local_const_terms_get, _goldenrecord.Aggregator_local_const_terms_set)
__swig_setmethods__["cluster_sizes"] = _goldenrecord.Aggregator_cluster_sizes_set
__swig_getmethods__["cluster_sizes"] = _goldenrecord.Aggregator_cluster_sizes_get
if _newclass:
cluster_sizes = _swig_property(_goldenrecord.Aggregator_cluster_sizes_get, _goldenrecord.Aggregator_cluster_sizes_set)
__swig_setmethods__["forward_list"] = _goldenrecord.Aggregator_forward_list_set
__swig_getmethods__["forward_list"] = _goldenrecord.Aggregator_forward_list_get
if _newclass:
forward_list = _swig_property(_goldenrecord.Aggregator_forward_list_get, _goldenrecord.Aggregator_forward_list_set)
__swig_setmethods__["ordered"] = _goldenrecord.Aggregator_ordered_set
__swig_getmethods__["ordered"] = _goldenrecord.Aggregator_ordered_get
if _newclass:
ordered = _swig_property(_goldenrecord.Aggregator_ordered_get, _goldenrecord.Aggregator_ordered_set)
def __init__(self, allRules: 'vector< pair< pair< string,string >,int > > const &'):
this = _goldenrecord.new_Aggregator(allRules)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Aggregate(self) -> "void":
return _goldenrecord.Aggregator_Aggregate(self)
def GroupAggregate(self) -> "void":
return _goldenrecord.Aggregator_GroupAggregate(self)
def AggregateStructure(self) -> "void":
return _goldenrecord.Aggregator_AggregateStructure(self)
def NoAggregatation(self) -> "void":
return _goldenrecord.Aggregator_NoAggregatation(self)
def Group(self) -> "void":
return _goldenrecord.Aggregator_Group(self)
def RandomGroup(self) -> "void":
return _goldenrecord.Aggregator_RandomGroup(self)
def CalConstantTerms(self) -> "void":
return _goldenrecord.Aggregator_CalConstantTerms(self)
__swig_destroy__ = _goldenrecord.delete_Aggregator
__del__ = lambda self: None
Aggregator_swigregister = _goldenrecord.Aggregator_swigregister
Aggregator_swigregister(Aggregator)
MAX_CSV_FILE_SIZE = _goldenrecord.MAX_CSV_FILE_SIZE
class CSVReader(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CSVReader, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CSVReader, name)
__repr__ = _swig_repr
def __init__(self):
this = _goldenrecord.new_CSVReader()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def reading(self, datafilespath: 'string &', normalize: 'bool') -> "bool":
return _goldenrecord.CSVReader_reading(self, datafilespath, normalize)
def get_max_val_len(self) -> "int":
return _goldenrecord.CSVReader_get_max_val_len(self)
__swig_destroy__ = _goldenrecord.delete_CSVReader
__del__ = lambda self: None
CSVReader_swigregister = _goldenrecord.CSVReader_swigregister
CSVReader_swigregister(CSVReader)
class Reader(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Reader, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Reader, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["tables"] = _goldenrecord.Reader_tables_set
__swig_getmethods__["tables"] = _goldenrecord.Reader_tables_get
if _newclass:
tables = _swig_property(_goldenrecord.Reader_tables_get, _goldenrecord.Reader_tables_set)
def strNormalize(self, s: 'std::string &') -> "void":
return _goldenrecord.Reader_strNormalize(self, s)
def reading(self, datafilespath: 'std::string &', normalize: 'bool') -> "bool":
return _goldenrecord.Reader_reading(self, datafilespath, normalize)
def get_max_val_len(self) -> "int":
return _goldenrecord.Reader_get_max_val_len(self)
__swig_destroy__ = _goldenrecord.delete_Reader
__del__ = lambda self: None
Reader_swigregister = _goldenrecord.Reader_swigregister
Reader_swigregister(Reader)
class Locator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Locator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Locator, name)
__repr__ = _swig_repr
__swig_setmethods__["id"] = _goldenrecord.Locator_id_set
__swig_getmethods__["id"] = _goldenrecord.Locator_id_get
if _newclass:
id = _swig_property(_goldenrecord.Locator_id_get, _goldenrecord.Locator_id_set)
__swig_setmethods__["beg"] = _goldenrecord.Locator_beg_set
__swig_getmethods__["beg"] = _goldenrecord.Locator_beg_get
if _newclass:
beg = _swig_property(_goldenrecord.Locator_beg_get, _goldenrecord.Locator_beg_set)
__swig_setmethods__["end"] = _goldenrecord.Locator_end_set
__swig_getmethods__["end"] = _goldenrecord.Locator_end_get
if _newclass:
end = _swig_property(_goldenrecord.Locator_end_get, _goldenrecord.Locator_end_set)
def __init__(self, v: 'int', b: 'int', e: 'int'):
this = _goldenrecord.new_Locator(v, b, e)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __lt__(self, e: 'Locator') -> "bool":
return _goldenrecord.Locator___lt__(self, e)
def __gt__(self, e: 'Locator') -> "bool":
return _goldenrecord.Locator___gt__(self, e)
def __eq__(self, e: 'Locator') -> "bool":
return _goldenrecord.Locator___eq__(self, e)
__swig_destroy__ = _goldenrecord.delete_Locator
__del__ = lambda self: None
Locator_swigregister = _goldenrecord.Locator_swigregister
Locator_swigregister(Locator)
class LocatorHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LocatorHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LocatorHash, name)
__repr__ = _swig_repr
def __call__(self, o: 'Locator') -> "size_t":
return _goldenrecord.LocatorHash___call__(self, o)
def __init__(self):
this = _goldenrecord.new_LocatorHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_LocatorHash
__del__ = lambda self: None
LocatorHash_swigregister = _goldenrecord.LocatorHash_swigregister
LocatorHash_swigregister(LocatorHash)
class PairLocatorHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PairLocatorHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PairLocatorHash, name)
__repr__ = _swig_repr
def __call__(self, o: 'pair< Locator,Locator > const &') -> "size_t":
return _goldenrecord.PairLocatorHash___call__(self, o)
def __init__(self):
this = _goldenrecord.new_PairLocatorHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_PairLocatorHash
__del__ = lambda self: None
PairLocatorHash_swigregister = _goldenrecord.PairLocatorHash_swigregister
PairLocatorHash_swigregister(PairLocatorHash)
class TripleHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TripleHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TripleHash, name)
__repr__ = _swig_repr
def __call__(self, k: 'triple const &') -> "size_t":
return _goldenrecord.TripleHash___call__(self, k)
def __init__(self):
this = _goldenrecord.new_TripleHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_TripleHash
__del__ = lambda self: None
TripleHash_swigregister = _goldenrecord.TripleHash_swigregister
TripleHash_swigregister(TripleHash)
class Rules(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Rules, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Rules, name)
__repr__ = _swig_repr
__swig_setmethods__["table"] = _goldenrecord.Rules_table_set
__swig_getmethods__["table"] = _goldenrecord.Rules_table_get
if _newclass:
table = _swig_property(_goldenrecord.Rules_table_get, _goldenrecord.Rules_table_set)
__swig_setmethods__["col_id"] = _goldenrecord.Rules_col_id_set
__swig_getmethods__["col_id"] = _goldenrecord.Rules_col_id_get
if _newclass:
col_id = _swig_property(_goldenrecord.Rules_col_id_get, _goldenrecord.Rules_col_id_set)
__swig_setmethods__["values"] = _goldenrecord.Rules_values_set
__swig_getmethods__["values"] = _goldenrecord.Rules_values_get
if _newclass:
values = _swig_property(_goldenrecord.Rules_values_get, _goldenrecord.Rules_values_set)
__swig_setmethods__["clusters"] = _goldenrecord.Rules_clusters_set
__swig_getmethods__["clusters"] = _goldenrecord.Rules_clusters_get
if _newclass:
clusters = _swig_property(_goldenrecord.Rules_clusters_get, _goldenrecord.Rules_clusters_set)
__swig_setmethods__["id_mappings"] = _goldenrecord.Rules_id_mappings_set
__swig_getmethods__["id_mappings"] = _goldenrecord.Rules_id_mappings_get
if _newclass:
id_mappings = _swig_property(_goldenrecord.Rules_id_mappings_get, _goldenrecord.Rules_id_mappings_set)
__swig_setmethods__["counts"] = _goldenrecord.Rules_counts_set
__swig_getmethods__["counts"] = _goldenrecord.Rules_counts_get
if _newclass:
counts = _swig_property(_goldenrecord.Rules_counts_get, _goldenrecord.Rules_counts_set)
__swig_setmethods__["row_to_cluster"] = _goldenrecord.Rules_row_to_cluster_set
__swig_getmethods__["row_to_cluster"] = _goldenrecord.Rules_row_to_cluster_get
if _newclass:
row_to_cluster = _swig_property(_goldenrecord.Rules_row_to_cluster_get, _goldenrecord.Rules_row_to_cluster_set)
__swig_setmethods__["rule_types"] = _goldenrecord.Rules_rule_types_set
__swig_getmethods__["rule_types"] = _goldenrecord.Rules_rule_types_get
if _newclass:
rule_types = _swig_property(_goldenrecord.Rules_rule_types_get, _goldenrecord.Rules_rule_types_set)
__swig_getmethods__["enable_auto_confirm_rules"] = _goldenrecord.Rules_enable_auto_confirm_rules_get
if _newclass:
enable_auto_confirm_rules = _swig_property(_goldenrecord.Rules_enable_auto_confirm_rules_get)
__swig_getmethods__["token_delim"] = _goldenrecord.Rules_token_delim_get
if _newclass:
token_delim = _swig_property(_goldenrecord.Rules_token_delim_get)
__swig_setmethods__["op_log"] = _goldenrecord.Rules_op_log_set
__swig_getmethods__["op_log"] = _goldenrecord.Rules_op_log_get
if _newclass:
op_log = _swig_property(_goldenrecord.Rules_op_log_get, _goldenrecord.Rules_op_log_set)
__swig_setmethods__["termRules"] = _goldenrecord.Rules_termRules_set
__swig_getmethods__["termRules"] = _goldenrecord.Rules_termRules_get
if _newclass:
termRules = _swig_property(_goldenrecord.Rules_termRules_get, _goldenrecord.Rules_termRules_set)
__swig_setmethods__["rule_locations"] = _goldenrecord.Rules_rule_locations_set
__swig_getmethods__["rule_locations"] = _goldenrecord.Rules_rule_locations_get
if _newclass:
rule_locations = _swig_property(_goldenrecord.Rules_rule_locations_get, _goldenrecord.Rules_rule_locations_set)
__swig_setmethods__["termRulesApplicable"] = _goldenrecord.Rules_termRulesApplicable_set
__swig_getmethods__["termRulesApplicable"] = _goldenrecord.Rules_termRulesApplicable_get
if _newclass:
termRulesApplicable = _swig_property(_goldenrecord.Rules_termRulesApplicable_get, _goldenrecord.Rules_termRulesApplicable_set)
__swig_setmethods__["valTokens"] = _goldenrecord.Rules_valTokens_set
__swig_getmethods__["valTokens"] = _goldenrecord.Rules_valTokens_get
if _newclass:
valTokens = _swig_property(_goldenrecord.Rules_valTokens_get, _goldenrecord.Rules_valTokens_set)
__swig_setmethods__["invIndex"] = _goldenrecord.Rules_invIndex_set
__swig_getmethods__["invIndex"] = _goldenrecord.Rules_invIndex_get
if _newclass:
invIndex = _swig_property(_goldenrecord.Rules_invIndex_get, _goldenrecord.Rules_invIndex_set)
def __init__(self, t: 'Table', cid: 'int', v: 'vector< std::string > const &', c: 'vector< vector< int > > const &', m: 'vector< vector< int > > const &', f: 'vector< int > const &', rtype: 'int', confirm: 'bool'):
this = _goldenrecord.new_Rules(t, cid, v, c, m, f, rtype, confirm)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def GenerateRules(self) -> "void":
return _goldenrecord.Rules_GenerateRules(self)
def RankRules(self, rules: 'unordered_map< pair< std::string,std::string >,int,pair_hash > &', tops: 'vector< pair< pair< std::string,std::string >,int > > &', limit: 'int'=10) -> "void":
return _goldenrecord.Rules_RankRules(self, rules, tops, limit)
def ApplyGroupRuleComplex(self, rules: 'vector< pair< pair< std::string,std::string >,int > > const &', ruleDir: 'vector< bool > const &', ruleGroup: 'vector< int > const &', ruleStatus: 'vector< int > &', groupRules: 'vector< vector< int > > const &', groupStatus: 'vector< int > &', transformations: 'vector< Path > const &', modification: 'vector< vector< tuple< int,int,int > > > &', max_group_id: 'int', tmp: 'std::string') -> "bool":
return _goldenrecord.Rules_ApplyGroupRuleComplex(self, rules, ruleDir, ruleGroup, ruleStatus, groupRules, groupStatus, transformations, modification, max_group_id, tmp)
def ShowNextCluster(self, rules: 'vector< pair< pair< std::string,std::string >,int > > &', ruleDir: 'vector< bool > &', ruleGroup: 'vector< int > &', ruleStatus: 'vector< int > &', groupRules: 'vector< vector< int > > &', groupStatus: 'vector< int > &', transformations: 'vector< Path > &', structures: 'vector< pair< std::string,std::string > > &', modification: 'vector< vector< tuple< int,int,int > > > &') -> "std::string":
return _goldenrecord.Rules_ShowNextCluster(self, rules, ruleDir, ruleGroup, ruleStatus, groupRules, groupStatus, transformations, structures, modification)
def ApplyRule(self, rules: 'vector< pair< pair< std::string,std::string >,int > > &', ruleDir: 'vector< bool > &', ruleGroup: 'vector< int > &', ruleStatus: 'vector< int > &', groupRules: 'vector< vector< int > > &', groupStatus: 'vector< int > &', transformations: 'vector< Path > &', structures: 'vector< pair< std::string,std::string > > &', modification: 'vector< vector< tuple< int,int,int > > > &', max_group_id: 'int', tmp: 'std::string') -> "bool":
return _goldenrecord.Rules_ApplyRule(self, rules, ruleDir, ruleGroup, ruleStatus, groupRules, groupStatus, transformations, structures, modification, max_group_id, tmp)
__swig_destroy__ = _goldenrecord.delete_Rules
__del__ = lambda self: None
Rules_swigregister = _goldenrecord.Rules_swigregister
Rules_swigregister(Rules)
SUBSTR_SCORE = _goldenrecord.SUBSTR_SCORE
CPOS_SCORE = _goldenrecord.CPOS_SCORE
CONSTR_SCORE = _goldenrecord.CONSTR_SCORE
CONSTANT_TOKEN_RATIO = _goldenrecord.CONSTANT_TOKEN_RATIO
NEG_INF = _goldenrecord.NEG_INF
REGEX_SIZE = _goldenrecord.REGEX_SIZE
ALL_REGEX_SIZE = _goldenrecord.ALL_REGEX_SIZE
PROPER_CASE = _goldenrecord.PROPER_CASE
CAPS = _goldenrecord.CAPS
LOWER_CASE = _goldenrecord.LOWER_CASE
DIGITS = _goldenrecord.DIGITS
ALPHABETS = _goldenrecord.ALPHABETS
ALPHANUMERIC = _goldenrecord.ALPHANUMERIC
WHITESPACE = _goldenrecord.WHITESPACE
PUNCT = _goldenrecord.PUNCT
CONSTANT = _goldenrecord.CONSTANT
START_T = _goldenrecord.START_T
END_T = _goldenrecord.END_T
SPECIAL_CASE_FOR_DAG_EDGE = _goldenrecord.SPECIAL_CASE_FOR_DAG_EDGE
class VLabel(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VLabel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VLabel, name)
__repr__ = _swig_repr
__swig_setmethods__["pid"] = _goldenrecord.VLabel_pid_set
__swig_getmethods__["pid"] = _goldenrecord.VLabel_pid_get
if _newclass:
pid = _swig_property(_goldenrecord.VLabel_pid_get, _goldenrecord.VLabel_pid_set)
__swig_setmethods__["pos"] = _goldenrecord.VLabel_pos_set
__swig_getmethods__["pos"] = _goldenrecord.VLabel_pos_get
if _newclass:
pos = _swig_property(_goldenrecord.VLabel_pos_get, _goldenrecord.VLabel_pos_set)
def __init__(self, id: 'int', p: 'int'):
this = _goldenrecord.new_VLabel(id, p)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_VLabel
__del__ = lambda self: None
VLabel_swigregister = _goldenrecord.VLabel_swigregister
VLabel_swigregister(VLabel)
cvar = _goldenrecord.cvar
regexes = cvar.regexes
regex_str = cvar.regex_str
start_special_token = cvar.start_special_token
end_special_token = cvar.end_special_token
class Vertex(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Vertex, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Vertex, name)
__repr__ = _swig_repr
__swig_setmethods__["vertex_id"] = _goldenrecord.Vertex_vertex_id_set
__swig_getmethods__["vertex_id"] = _goldenrecord.Vertex_vertex_id_get
if _newclass:
vertex_id = _swig_property(_goldenrecord.Vertex_vertex_id_get, _goldenrecord.Vertex_vertex_id_set)
__swig_setmethods__["labels"] = _goldenrecord.Vertex_labels_set
__swig_getmethods__["labels"] = _goldenrecord.Vertex_labels_get
if _newclass:
labels = _swig_property(_goldenrecord.Vertex_labels_get, _goldenrecord.Vertex_labels_set)
def __init__(self, *args):
this = _goldenrecord.new_Vertex(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Vertex
__del__ = lambda self: None
Vertex_swigregister = _goldenrecord.Vertex_swigregister
Vertex_swigregister(Vertex)
class ELabel(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ELabel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ELabel, name)
__repr__ = _swig_repr
__swig_setmethods__["regex"] = _goldenrecord.ELabel_regex_set
__swig_getmethods__["regex"] = _goldenrecord.ELabel_regex_get
if _newclass:
regex = _swig_property(_goldenrecord.ELabel_regex_get, _goldenrecord.ELabel_regex_set)
__swig_setmethods__["index"] = _goldenrecord.ELabel_index_set
__swig_getmethods__["index"] = _goldenrecord.ELabel_index_get
if _newclass:
index = _swig_property(_goldenrecord.ELabel_index_get, _goldenrecord.ELabel_index_set)
__swig_setmethods__["constr"] = _goldenrecord.ELabel_constr_set
__swig_getmethods__["constr"] = _goldenrecord.ELabel_constr_get
if _newclass:
constr = _swig_property(_goldenrecord.ELabel_constr_get, _goldenrecord.ELabel_constr_set)
def __init__(self, *args):
this = _goldenrecord.new_ELabel(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __eq__(self, o: 'ELabel') -> "bool":
return _goldenrecord.ELabel___eq__(self, o)
def __gt__(self, p: 'ELabel') -> "bool":
return _goldenrecord.ELabel___gt__(self, p)
__swig_destroy__ = _goldenrecord.delete_ELabel
__del__ = lambda self: None
ELabel_swigregister = _goldenrecord.ELabel_swigregister
ELabel_swigregister(ELabel)
class Edge(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Edge, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Edge, name)
__repr__ = _swig_repr
__swig_setmethods__["src_id"] = _goldenrecord.Edge_src_id_set
__swig_getmethods__["src_id"] = _goldenrecord.Edge_src_id_get
if _newclass:
src_id = _swig_property(_goldenrecord.Edge_src_id_get, _goldenrecord.Edge_src_id_set)
__swig_setmethods__["end_id"] = _goldenrecord.Edge_end_id_set
__swig_getmethods__["end_id"] = _goldenrecord.Edge_end_id_get
if _newclass:
end_id = _swig_property(_goldenrecord.Edge_end_id_get, _goldenrecord.Edge_end_id_set)
__swig_setmethods__["labels"] = _goldenrecord.Edge_labels_set
__swig_getmethods__["labels"] = _goldenrecord.Edge_labels_get
if _newclass:
labels = _swig_property(_goldenrecord.Edge_labels_get, _goldenrecord.Edge_labels_set)
def __init__(self, *args):
this = _goldenrecord.new_Edge(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Edge
__del__ = lambda self: None
Edge_swigregister = _goldenrecord.Edge_swigregister
Edge_swigregister(Edge)
class Pos(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Pos, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Pos, name)
__repr__ = _swig_repr
__swig_setmethods__["label"] = _goldenrecord.Pos_label_set
__swig_getmethods__["label"] = _goldenrecord.Pos_label_get
if _newclass:
label = _swig_property(_goldenrecord.Pos_label_get, _goldenrecord.Pos_label_set)
__swig_setmethods__["direction"] = _goldenrecord.Pos_direction_set
__swig_getmethods__["direction"] = _goldenrecord.Pos_direction_get
if _newclass:
direction = _swig_property(_goldenrecord.Pos_direction_get, _goldenrecord.Pos_direction_set)
def __init__(self, *args):
this = _goldenrecord.new_Pos(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __eq__(self, o: 'Pos') -> "bool":
return _goldenrecord.Pos___eq__(self, o)
def __ne__(self, o: 'Pos') -> "bool":
return _goldenrecord.Pos___ne__(self, o)
def __gt__(self, p: 'Pos') -> "bool":
return _goldenrecord.Pos___gt__(self, p)
def __lt__(self, p: 'Pos') -> "bool":
return _goldenrecord.Pos___lt__(self, p)
__swig_destroy__ = _goldenrecord.delete_Pos
__del__ = lambda self: None
Pos_swigregister = _goldenrecord.Pos_swigregister
Pos_swigregister(Pos)
class ELabelHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ELabelHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ELabelHash, name)
__repr__ = _swig_repr
def __call__(self, o: 'ELabel') -> "size_t":
return _goldenrecord.ELabelHash___call__(self, o)
def __init__(self):
this = _goldenrecord.new_ELabelHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_ELabelHash
__del__ = lambda self: None
ELabelHash_swigregister = _goldenrecord.ELabelHash_swigregister
ELabelHash_swigregister(ELabelHash)
class Graph(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Graph, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Graph, name)
__repr__ = _swig_repr
__swig_setmethods__["edges"] = _goldenrecord.Graph_edges_set
__swig_getmethods__["edges"] = _goldenrecord.Graph_edges_get
if _newclass:
edges = _swig_property(_goldenrecord.Graph_edges_get, _goldenrecord.Graph_edges_set)
__swig_setmethods__["nodes"] = _goldenrecord.Graph_nodes_set
__swig_getmethods__["nodes"] = _goldenrecord.Graph_nodes_get
if _newclass:
nodes = _swig_property(_goldenrecord.Graph_nodes_get, _goldenrecord.Graph_nodes_set)
__swig_setmethods__["all_pos"] = _goldenrecord.Graph_all_pos_set
__swig_getmethods__["all_pos"] = _goldenrecord.Graph_all_pos_get
if _newclass:
all_pos = _swig_property(_goldenrecord.Graph_all_pos_get, _goldenrecord.Graph_all_pos_set)
__swig_setmethods__["node2edge"] = _goldenrecord.Graph_node2edge_set
__swig_getmethods__["node2edge"] = _goldenrecord.Graph_node2edge_get
if _newclass:
node2edge = _swig_property(_goldenrecord.Graph_node2edge_get, _goldenrecord.Graph_node2edge_set)
__swig_setmethods__["rev_node2edge"] = _goldenrecord.Graph_rev_node2edge_set
__swig_getmethods__["rev_node2edge"] = _goldenrecord.Graph_rev_node2edge_get
if _newclass:
rev_node2edge = _swig_property(_goldenrecord.Graph_rev_node2edge_get, _goldenrecord.Graph_rev_node2edge_set)
__swig_setmethods__["umap_labels"] = _goldenrecord.Graph_umap_labels_set
__swig_getmethods__["umap_labels"] = _goldenrecord.Graph_umap_labels_get
if _newclass:
umap_labels = _swig_property(_goldenrecord.Graph_umap_labels_get, _goldenrecord.Graph_umap_labels_set)
def UmapIndex(self) -> "void":
return _goldenrecord.Graph_UmapIndex(self)
def GetMatchId(self, str: 'std::string const &', mid: 'int &', r: 'Regex const', exp: 'regex const &', match_map: 'unordered_map< pair< int,int >,vector< ELabel >,pair_hash > &') -> "void":
return _goldenrecord.Graph_GetMatchId(self, str, mid, r, exp, match_map)
def GenGraphStr(self, *args) -> "void":
return _goldenrecord.Graph_GenGraphStr(self, *args)
def getPosList(self, posList: 'vector< vector< Pos > > &') -> "void":
return _goldenrecord.Graph_getPosList(self, posList)
def getAdjacentMatrix(self) -> "void":
return _goldenrecord.Graph_getAdjacentMatrix(self)
def TestContainment(self, path: 'vector< pair< Pos,Pos > > const &', target: 'std::string const &', source: 'std::string const &') -> "bool":
return _goldenrecord.Graph_TestContainment(self, path, target, source)
def __init__(self):
this = _goldenrecord.new_Graph()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Graph
__del__ = lambda self: None
Graph_swigregister = _goldenrecord.Graph_swigregister
Graph_swigregister(Graph)
class Elem(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Elem, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Elem, name)
__repr__ = _swig_repr
__swig_setmethods__["pid"] = _goldenrecord.Elem_pid_set
__swig_getmethods__["pid"] = _goldenrecord.Elem_pid_get
if _newclass:
pid = _swig_property(_goldenrecord.Elem_pid_get, _goldenrecord.Elem_pid_set)
__swig_setmethods__["beg"] = _goldenrecord.Elem_beg_set
__swig_getmethods__["beg"] = _goldenrecord.Elem_beg_get
if _newclass:
beg = _swig_property(_goldenrecord.Elem_beg_get, _goldenrecord.Elem_beg_set)
__swig_setmethods__["end"] = _goldenrecord.Elem_end_set
__swig_getmethods__["end"] = _goldenrecord.Elem_end_get
if _newclass:
end = _swig_property(_goldenrecord.Elem_end_get, _goldenrecord.Elem_end_set)
def __init__(self, r: 'int', b: 'int', e: 'int'):
this = _goldenrecord.new_Elem(r, b, e)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __lt__(self, e: 'Elem') -> "bool":
return _goldenrecord.Elem___lt__(self, e)
def __gt__(self, e: 'Elem') -> "bool":
return _goldenrecord.Elem___gt__(self, e)
def __eq__(self, e: 'Elem') -> "bool":
return _goldenrecord.Elem___eq__(self, e)
__swig_destroy__ = _goldenrecord.delete_Elem
__del__ = lambda self: None
Elem_swigregister = _goldenrecord.Elem_swigregister
Elem_swigregister(Elem)
class DagEdge(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DagEdge, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DagEdge, name)
__repr__ = _swig_repr
__swig_setmethods__["src_id"] = _goldenrecord.DagEdge_src_id_set
__swig_getmethods__["src_id"] = _goldenrecord.DagEdge_src_id_get
if _newclass:
src_id = _swig_property(_goldenrecord.DagEdge_src_id_get, _goldenrecord.DagEdge_src_id_set)
__swig_setmethods__["end_id"] = _goldenrecord.DagEdge_end_id_set
__swig_getmethods__["end_id"] = _goldenrecord.DagEdge_end_id_get
if _newclass:
end_id = _swig_property(_goldenrecord.DagEdge_end_id_get, _goldenrecord.DagEdge_end_id_set)
__swig_setmethods__["constant"] = _goldenrecord.DagEdge_constant_set
__swig_getmethods__["constant"] = _goldenrecord.DagEdge_constant_get
if _newclass:
constant = _swig_property(_goldenrecord.DagEdge_constant_get, _goldenrecord.DagEdge_constant_set)
__swig_setmethods__["prefixes"] = _goldenrecord.DagEdge_prefixes_set
__swig_getmethods__["prefixes"] = _goldenrecord.DagEdge_prefixes_get
if _newclass:
prefixes = _swig_property(_goldenrecord.DagEdge_prefixes_get, _goldenrecord.DagEdge_prefixes_set)
__swig_setmethods__["suffixes"] = _goldenrecord.DagEdge_suffixes_set
__swig_getmethods__["suffixes"] = _goldenrecord.DagEdge_suffixes_get
if _newclass:
suffixes = _swig_property(_goldenrecord.DagEdge_suffixes_get, _goldenrecord.DagEdge_suffixes_set)
__swig_setmethods__["inffixes"] = _goldenrecord.DagEdge_inffixes_set
__swig_getmethods__["inffixes"] = _goldenrecord.DagEdge_inffixes_get
if _newclass:
inffixes = _swig_property(_goldenrecord.DagEdge_inffixes_get, _goldenrecord.DagEdge_inffixes_set)
__swig_setmethods__["const_pos"] = _goldenrecord.DagEdge_const_pos_set
__swig_getmethods__["const_pos"] = _goldenrecord.DagEdge_const_pos_get
if _newclass:
const_pos = _swig_property(_goldenrecord.DagEdge_const_pos_get, _goldenrecord.DagEdge_const_pos_set)
__swig_setmethods__["input_pos"] = _goldenrecord.DagEdge_input_pos_set
__swig_getmethods__["input_pos"] = _goldenrecord.DagEdge_input_pos_get
if _newclass:
input_pos = _swig_property(_goldenrecord.DagEdge_input_pos_get, _goldenrecord.DagEdge_input_pos_set)
__swig_setmethods__["weight"] = _goldenrecord.DagEdge_weight_set
__swig_getmethods__["weight"] = _goldenrecord.DagEdge_weight_get
if _newclass:
weight = _swig_property(_goldenrecord.DagEdge_weight_get, _goldenrecord.DagEdge_weight_set)
def __init__(self, *args):
this = _goldenrecord.new_DagEdge(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_DagEdge
__del__ = lambda self: None
DagEdge_swigregister = _goldenrecord.DagEdge_swigregister
DagEdge_swigregister(DagEdge)
class DAGraph(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DAGraph, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DAGraph, name)
__repr__ = _swig_repr
__swig_setmethods__["nodes"] = _goldenrecord.DAGraph_nodes_set
__swig_getmethods__["nodes"] = _goldenrecord.DAGraph_nodes_get
if _newclass:
nodes = _swig_property(_goldenrecord.DAGraph_nodes_get, _goldenrecord.DAGraph_nodes_set)
__swig_setmethods__["edges"] = _goldenrecord.DAGraph_edges_set
__swig_getmethods__["edges"] = _goldenrecord.DAGraph_edges_get
if _newclass:
edges = _swig_property(_goldenrecord.DAGraph_edges_get, _goldenrecord.DAGraph_edges_set)
__swig_setmethods__["rule_id"] = _goldenrecord.DAGraph_rule_id_set
__swig_getmethods__["rule_id"] = _goldenrecord.DAGraph_rule_id_get
if _newclass:
rule_id = _swig_property(_goldenrecord.DAGraph_rule_id_get, _goldenrecord.DAGraph_rule_id_set)
__swig_setmethods__["input"] = _goldenrecord.DAGraph_input_set
__swig_getmethods__["input"] = _goldenrecord.DAGraph_input_get
if _newclass:
input = _swig_property(_goldenrecord.DAGraph_input_get, _goldenrecord.DAGraph_input_set)
__swig_setmethods__["output"] = _goldenrecord.DAGraph_output_set
__swig_getmethods__["output"] = _goldenrecord.DAGraph_output_get
if _newclass:
output = _swig_property(_goldenrecord.DAGraph_output_get, _goldenrecord.DAGraph_output_set)
__swig_setmethods__["input_graph"] = _goldenrecord.DAGraph_input_graph_set
__swig_getmethods__["input_graph"] = _goldenrecord.DAGraph_input_graph_get
if _newclass:
input_graph = _swig_property(_goldenrecord.DAGraph_input_graph_get, _goldenrecord.DAGraph_input_graph_set)
__swig_setmethods__["starting_node_id"] = _goldenrecord.DAGraph_starting_node_id_set
__swig_getmethods__["starting_node_id"] = _goldenrecord.DAGraph_starting_node_id_get
if _newclass:
starting_node_id = _swig_property(_goldenrecord.DAGraph_starting_node_id_get, _goldenrecord.DAGraph_starting_node_id_set)
__swig_setmethods__["ending_node_id"] = _goldenrecord.DAGraph_ending_node_id_set
__swig_getmethods__["ending_node_id"] = _goldenrecord.DAGraph_ending_node_id_get
if _newclass:
ending_node_id = _swig_property(_goldenrecord.DAGraph_ending_node_id_get, _goldenrecord.DAGraph_ending_node_id_set)
__swig_setmethods__["dag_edge_adj"] = _goldenrecord.DAGraph_dag_edge_adj_set
__swig_getmethods__["dag_edge_adj"] = _goldenrecord.DAGraph_dag_edge_adj_get
if _newclass:
dag_edge_adj = _swig_property(_goldenrecord.DAGraph_dag_edge_adj_get, _goldenrecord.DAGraph_dag_edge_adj_set)
__swig_setmethods__["graphSize"] = _goldenrecord.DAGraph_graphSize_set
__swig_getmethods__["graphSize"] = _goldenrecord.DAGraph_graphSize_get
if _newclass:
graphSize = _swig_property(_goldenrecord.DAGraph_graphSize_get, _goldenrecord.DAGraph_graphSize_set)
__swig_setmethods__["nodeSize"] = _goldenrecord.DAGraph_nodeSize_set
__swig_getmethods__["nodeSize"] = _goldenrecord.DAGraph_nodeSize_get
if _newclass:
nodeSize = _swig_property(_goldenrecord.DAGraph_nodeSize_get, _goldenrecord.DAGraph_nodeSize_set)
__swig_setmethods__["edgeSize"] = _goldenrecord.DAGraph_edgeSize_set
__swig_getmethods__["edgeSize"] = _goldenrecord.DAGraph_edgeSize_get
if _newclass:
edgeSize = _swig_property(_goldenrecord.DAGraph_edgeSize_get, _goldenrecord.DAGraph_edgeSize_set)
def __init__(self, arg2: 'std::string const &', out: 'std::string const &', id: 'int const', valid_terms: 'unordered_map< std::string,pair< double,int > > const &'):
this = _goldenrecord.new_DAGraph(arg2, out, id, valid_terms)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def DynamicRanking(self, path: 'Path', synsizer: 'Synthesizer') -> "void":
return _goldenrecord.DAGraph_DynamicRanking(self, path, synsizer)
def getGraphSize(self) -> "uint64_t":
return _goldenrecord.DAGraph_getGraphSize(self)
__swig_destroy__ = _goldenrecord.delete_DAGraph
__del__ = lambda self: None
DAGraph_swigregister = _goldenrecord.DAGraph_swigregister
DAGraph_swigregister(DAGraph)
class Synthesizer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Synthesizer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Synthesizer, name)
__repr__ = _swig_repr
__swig_setmethods__["constr_index"] = _goldenrecord.Synthesizer_constr_index_set
__swig_getmethods__["constr_index"] = _goldenrecord.Synthesizer_constr_index_get
if _newclass:
constr_index = _swig_property(_goldenrecord.Synthesizer_constr_index_get, _goldenrecord.Synthesizer_constr_index_set)
__swig_setmethods__["cpos_index"] = _goldenrecord.Synthesizer_cpos_index_set
__swig_getmethods__["cpos_index"] = _goldenrecord.Synthesizer_cpos_index_get
if _newclass:
cpos_index = _swig_property(_goldenrecord.Synthesizer_cpos_index_get, _goldenrecord.Synthesizer_cpos_index_set)
__swig_setmethods__["substr_index"] = _goldenrecord.Synthesizer_substr_index_set
__swig_getmethods__["substr_index"] = _goldenrecord.Synthesizer_substr_index_get
if _newclass:
substr_index = _swig_property(_goldenrecord.Synthesizer_substr_index_get, _goldenrecord.Synthesizer_substr_index_set)
__swig_setmethods__["prefix_index"] = _goldenrecord.Synthesizer_prefix_index_set
__swig_getmethods__["prefix_index"] = _goldenrecord.Synthesizer_prefix_index_get
if _newclass:
prefix_index = _swig_property(_goldenrecord.Synthesizer_prefix_index_get, _goldenrecord.Synthesizer_prefix_index_set)
__swig_setmethods__["suffix_index"] = _goldenrecord.Synthesizer_suffix_index_set
__swig_getmethods__["suffix_index"] = _goldenrecord.Synthesizer_suffix_index_get
if _newclass:
suffix_index = _swig_property(_goldenrecord.Synthesizer_suffix_index_get, _goldenrecord.Synthesizer_suffix_index_set)
__swig_setmethods__["dags"] = _goldenrecord.Synthesizer_dags_set
__swig_getmethods__["dags"] = _goldenrecord.Synthesizer_dags_get
if _newclass:
dags = _swig_property(_goldenrecord.Synthesizer_dags_get, _goldenrecord.Synthesizer_dags_set)
__swig_setmethods__["thresholds"] = _goldenrecord.Synthesizer_thresholds_set
__swig_getmethods__["thresholds"] = _goldenrecord.Synthesizer_thresholds_get
if _newclass:
thresholds = _swig_property(_goldenrecord.Synthesizer_thresholds_get, _goldenrecord.Synthesizer_thresholds_set)
__swig_getmethods__["rules"] = _goldenrecord.Synthesizer_rules_get
if _newclass:
rules = _swig_property(_goldenrecord.Synthesizer_rules_get)
def __init__(self, r: 'vector< pair< pair< std::string,std::string >,int > > const &', valid_terms: 'unordered_map< std::string,pair< double,int > > const &'):
this = _goldenrecord.new_Synthesizer(r, valid_terms)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Indexing(self, valid_terms: 'unordered_map< std::string,pair< double,int > > const &') -> "void":
return _goldenrecord.Synthesizer_Indexing(self, valid_terms)
def InvIndex(self, dags: 'vector< DAGraph > const &') -> "void":
return _goldenrecord.Synthesizer_InvIndex(self, dags)
def SynAggregating(self, groups: 'unordered_map< Path,vector< int >,PathHash > &', rule_id_maps: 'vector< int > const &') -> "void":
return _goldenrecord.Synthesizer_SynAggregating(self, groups, rule_id_maps)
__swig_destroy__ = _goldenrecord.delete_Synthesizer
__del__ = lambda self: None
Synthesizer_swigregister = _goldenrecord.Synthesizer_swigregister
Synthesizer_swigregister(Synthesizer)
class Path(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Path, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Path, name)
__repr__ = _swig_repr
__swig_setmethods__["path"] = _goldenrecord.Path_path_set
__swig_getmethods__["path"] = _goldenrecord.Path_path_get
if _newclass:
path = _swig_property(_goldenrecord.Path_path_get, _goldenrecord.Path_path_set)
def __eq__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___eq__(self, p2)
def __ne__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___ne__(self, p2)
def __lt__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___lt__(self, p2)
def __gt__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___gt__(self, p2)
def __init__(self):
this = _goldenrecord.new_Path()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Path
__del__ = lambda self: None
Path_swigregister = _goldenrecord.Path_swigregister
Path_swigregister(Path)
class PathHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PathHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PathHash, name)
__repr__ = _swig_repr
def __call__(self, p: 'Path') -> "size_t":
return _goldenrecord.PathHash___call__(self, p)
def __init__(self):
this = _goldenrecord.new_PathHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_PathHash
__del__ = lambda self: None
PathHash_swigregister = _goldenrecord.PathHash_swigregister
PathHash_swigregister(PathHash)
class Table(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Table, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Table, name)
__repr__ = _swig_repr
__swig_setmethods__["tid"] = _goldenrecord.Table_tid_set
__swig_getmethods__["tid"] = _goldenrecord.Table_tid_get
if _newclass:
tid = _swig_property(_goldenrecord.Table_tid_get, _goldenrecord.Table_tid_set)
__swig_setmethods__["row_no"] = _goldenrecord.Table_row_no_set
__swig_getmethods__["row_no"] = _goldenrecord.Table_row_no_get
if _newclass:
row_no = _swig_property(_goldenrecord.Table_row_no_get, _goldenrecord.Table_row_no_set)
__swig_setmethods__["col_no"] = _goldenrecord.Table_col_no_set
__swig_getmethods__["col_no"] = _goldenrecord.Table_col_no_get
if _newclass:
col_no = _swig_property(_goldenrecord.Table_col_no_get, _goldenrecord.Table_col_no_set)
__swig_setmethods__["table_name"] = _goldenrecord.Table_table_name_set
__swig_getmethods__["table_name"] = _goldenrecord.Table_table_name_get
if _newclass:
table_name = _swig_property(_goldenrecord.Table_table_name_get, _goldenrecord.Table_table_name_set)
__swig_setmethods__["schema"] = _goldenrecord.Table_schema_set
__swig_getmethods__["schema"] = _goldenrecord.Table_schema_get
if _newclass:
schema = _swig_property(_goldenrecord.Table_schema_get, _goldenrecord.Table_schema_set)
__swig_setmethods__["rows"] = _goldenrecord.Table_rows_set
__swig_getmethods__["rows"] = _goldenrecord.Table_rows_get
if _newclass:
rows = _swig_property(_goldenrecord.Table_rows_get, _goldenrecord.Table_rows_set)
__swig_setmethods__["cols"] = _goldenrecord.Table_cols_set
__swig_getmethods__["cols"] = _goldenrecord.Table_cols_get
if _newclass:
cols = _swig_property(_goldenrecord.Table_cols_get, _goldenrecord.Table_cols_set)
__swig_setmethods__["type"] = _goldenrecord.Table_type_set
__swig_getmethods__["type"] = _goldenrecord.Table_type_get
if _newclass:
type = _swig_property(_goldenrecord.Table_type_get, _goldenrecord.Table_type_set)
__swig_setmethods__["isUnique"] = _goldenrecord.Table_isUnique_set
__swig_getmethods__["isUnique"] = _goldenrecord.Table_isUnique_get
if _newclass:
isUnique = _swig_property(_goldenrecord.Table_isUnique_get, _goldenrecord.Table_isUnique_set)
__swig_setmethods__["isKeyCand"] = _goldenrecord.Table_isKeyCand_set
__swig_getmethods__["isKeyCand"] = _goldenrecord.Table_isKeyCand_get
if _newclass:
isKeyCand = _swig_property(_goldenrecord.Table_isKeyCand_get, _goldenrecord.Table_isKeyCand_set)
__swig_setmethods__["hasNull"] = _goldenrecord.Table_hasNull_set
__swig_getmethods__["hasNull"] = _goldenrecord.Table_hasNull_get
if _newclass:
hasNull = _swig_property(_goldenrecord.Table_hasNull_get, _goldenrecord.Table_hasNull_set)
__swig_setmethods__["notNullNum"] = _goldenrecord.Table_notNullNum_set
__swig_getmethods__["notNullNum"] = _goldenrecord.Table_notNullNum_get
if _newclass:
notNullNum = _swig_property(_goldenrecord.Table_notNullNum_get, _goldenrecord.Table_notNullNum_set)
def __init__(self, *args):
this = _goldenrecord.new_Table(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Profile(self) -> "void":
return _goldenrecord.Table_Profile(self)
def OutputCSV(self, filename: 'std::string const &') -> "void":
return _goldenrecord.Table_OutputCSV(self, filename)
__swig_destroy__ = _goldenrecord.delete_Table
__del__ = lambda self: None
Table_swigregister = _goldenrecord.Table_swigregister
Table_swigregister(Table)
INT_TYPE_THRESH = _goldenrecord.INT_TYPE_THRESH
UNIQUE_THRESH = _goldenrecord.UNIQUE_THRESH
EPS = _goldenrecord.EPS
MAX_PATH_LENGTH = _goldenrecord.MAX_PATH_LENGTH
PRUNE_BY_STRUCTURE_THRESHOLD = _goldenrecord.PRUNE_BY_STRUCTURE_THRESHOLD
PRUNE_BY_STRUCTURE_GROUP_NUM = _goldenrecord.PRUNE_BY_STRUCTURE_GROUP_NUM
MAX_CONSTANT_LENGTH = _goldenrecord.MAX_CONSTANT_LENGTH
GLOBAL_FREQUENCY_THRESHOLD = _goldenrecord.GLOBAL_FREQUENCY_THRESHOLD
LOCAL_FREQUENCY_THRESHOLD = _goldenrecord.LOCAL_FREQUENCY_THRESHOLD
MAX_NUMBER_OF_RULES = _goldenrecord.MAX_NUMBER_OF_RULES
AGG_REGEX_NUM = _goldenrecord.AGG_REGEX_NUM
class pair_hash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pair_hash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pair_hash, name)
__repr__ = _swig_repr
def __init__(self):
this = _goldenrecord.new_pair_hash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_pair_hash
__del__ = lambda self: None
pair_hash_swigregister = _goldenrecord.pair_hash_swigregister
pair_hash_swigregister(pair_hash)
class Wrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Wrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Wrapper, name)
__repr__ = _swig_repr
__swig_setmethods__["maxLength"] = _goldenrecord.Wrapper_maxLength_set
__swig_getmethods__["maxLength"] = _goldenrecord.Wrapper_maxLength_get
if _newclass:
maxLength = _swig_property(_goldenrecord.Wrapper_maxLength_get, _goldenrecord.Wrapper_maxLength_set)
__swig_setmethods__["matrix"] = _goldenrecord.Wrapper_matrix_set
__swig_getmethods__["matrix"] = _goldenrecord.Wrapper_matrix_get
if _newclass:
matrix = _swig_property(_goldenrecord.Wrapper_matrix_get, _goldenrecord.Wrapper_matrix_set)
if _newclass:
FindReplace = staticmethod(_goldenrecord.Wrapper_FindReplace)
else:
FindReplace = _goldenrecord.Wrapper_FindReplace
__swig_setmethods__["agg_regex_str"] = _goldenrecord.Wrapper_agg_regex_str_set
__swig_getmethods__["agg_regex_str"] = _goldenrecord.Wrapper_agg_regex_str_get
if _newclass:
agg_regex_str = _swig_property(_goldenrecord.Wrapper_agg_regex_str_get, _goldenrecord.Wrapper_agg_regex_str_set)
__swig_setmethods__["agg_replace_str"] = _goldenrecord.Wrapper_agg_replace_str_set
__swig_getmethods__["agg_replace_str"] = _goldenrecord.Wrapper_agg_replace_str_get
if _newclass:
agg_replace_str = _swig_property(_goldenrecord.Wrapper_agg_replace_str_get, _goldenrecord.Wrapper_agg_replace_str_set)
if _newclass:
logTime = staticmethod(_goldenrecord.Wrapper_logTime)
else:
logTime = _goldenrecord.Wrapper_logTime
__swig_setmethods__["agg_regexes"] = _goldenrecord.Wrapper_agg_regexes_set
__swig_getmethods__["agg_regexes"] = _goldenrecord.Wrapper_agg_regexes_get
if _newclass:
agg_regexes = _swig_property(_goldenrecord.Wrapper_agg_regexes_get, _goldenrecord.Wrapper_agg_regexes_set)
if _newclass:
print_green = staticmethod(_goldenrecord.Wrapper_print_green)
else:
print_green = _goldenrecord.Wrapper_print_green
if _newclass:
strToTokens = staticmethod(_goldenrecord.Wrapper_strToTokens)
else:
strToTokens = _goldenrecord.Wrapper_strToTokens
def __init__(self):
this = _goldenrecord.new_Wrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Wrapper
__del__ = lambda self: None
Wrapper_swigregister = _goldenrecord.Wrapper_swigregister
Wrapper_swigregister(Wrapper)
def Wrapper_FindReplace(str: 'std::string const &', exp: 'regex const &', replace: 'std::string const &') -> "std::string":
return _goldenrecord.Wrapper_FindReplace(str, exp, replace)
Wrapper_FindReplace = _goldenrecord.Wrapper_FindReplace
def Wrapper_logTime(begin: 'timeval &', end: 'timeval &', log: 'std::string const &') -> "void":
return _goldenrecord.Wrapper_logTime(begin, end, log)
Wrapper_logTime = _goldenrecord.Wrapper_logTime
def Wrapper_print_green(s: 'std::string const &') -> "void":
return _goldenrecord.Wrapper_print_green(s)
Wrapper_print_green = _goldenrecord.Wrapper_print_green
def Wrapper_strToTokens(s: 'std::string const &', res: 'vector< std::string > &', delims: 'std::string const &') -> "void":
return _goldenrecord.Wrapper_strToTokens(s, res, delims)
Wrapper_strToTokens = _goldenrecord.Wrapper_strToTokens
class Consolidation(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Consolidation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Consolidation, name)
__repr__ = _swig_repr
__swig_setmethods__["cluster_id_name"] = _goldenrecord.Consolidation_cluster_id_name_set
__swig_getmethods__["cluster_id_name"] = _goldenrecord.Consolidation_cluster_id_name_get
if _newclass:
cluster_id_name = _swig_property(_goldenrecord.Consolidation_cluster_id_name_get, _goldenrecord.Consolidation_cluster_id_name_set)
__swig_setmethods__["csvfilepath"] = _goldenrecord.Consolidation_csvfilepath_set
__swig_getmethods__["csvfilepath"] = _goldenrecord.Consolidation_csvfilepath_get
if _newclass:
csvfilepath = _swig_property(_goldenrecord.Consolidation_csvfilepath_get, _goldenrecord.Consolidation_csvfilepath_set)
__swig_setmethods__["csvreader"] = _goldenrecord.Consolidation_csvreader_set
__swig_getmethods__["csvreader"] = _goldenrecord.Consolidation_csvreader_get
if _newclass:
csvreader = _swig_property(_goldenrecord.Consolidation_csvreader_get, _goldenrecord.Consolidation_csvreader_set)
__swig_setmethods__["number_of_tables"] = _goldenrecord.Consolidation_number_of_tables_set
__swig_getmethods__["number_of_tables"] = _goldenrecord.Consolidation_number_of_tables_get
if _newclass:
number_of_tables = _swig_property(_goldenrecord.Consolidation_number_of_tables_get, _goldenrecord.Consolidation_number_of_tables_set)
__swig_setmethods__["cluster_id_col"] = _goldenrecord.Consolidation_cluster_id_col_set
__swig_getmethods__["cluster_id_col"] = _goldenrecord.Consolidation_cluster_id_col_get
if _newclass:
cluster_id_col = _swig_property(_goldenrecord.Consolidation_cluster_id_col_get, _goldenrecord.Consolidation_cluster_id_col_set)
__swig_setmethods__["clusters"] = _goldenrecord.Consolidation_clusters_set
__swig_getmethods__["clusters"] = _goldenrecord.Consolidation_clusters_get
if _newclass:
clusters = _swig_property(_goldenrecord.Consolidation_clusters_get, _goldenrecord.Consolidation_clusters_set)
__swig_setmethods__["agg"] = _goldenrecord.Consolidation_agg_set
__swig_getmethods__["agg"] = _goldenrecord.Consolidation_agg_get
if _newclass:
agg = _swig_property(_goldenrecord.Consolidation_agg_get, _goldenrecord.Consolidation_agg_set)
__swig_setmethods__["rules"] = _goldenrecord.Consolidation_rules_set
__swig_getmethods__["rules"] = _goldenrecord.Consolidation_rules_get
if _newclass:
rules = _swig_property(_goldenrecord.Consolidation_rules_get, _goldenrecord.Consolidation_rules_set)
__swig_setmethods__["groupStatus"] = _goldenrecord.Consolidation_groupStatus_set
__swig_getmethods__["groupStatus"] = _goldenrecord.Consolidation_groupStatus_get
if _newclass:
groupStatus = _swig_property(_goldenrecord.Consolidation_groupStatus_get, _goldenrecord.Consolidation_groupStatus_set)
__swig_setmethods__["modification"] = _goldenrecord.Consolidation_modification_set
__swig_getmethods__["modification"] = _goldenrecord.Consolidation_modification_get
if _newclass:
modification = _swig_property(_goldenrecord.Consolidation_modification_get, _goldenrecord.Consolidation_modification_set)
__swig_setmethods__["termRules"] = _goldenrecord.Consolidation_termRules_set
__swig_getmethods__["termRules"] = _goldenrecord.Consolidation_termRules_get
if _newclass:
termRules = _swig_property(_goldenrecord.Consolidation_termRules_get, _goldenrecord.Consolidation_termRules_set)
def __init__(self, filepath: 'std::string', cname: 'std::string'):
this = _goldenrecord.new_Consolidation(filepath, cname)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def TryNextTable(self, i: 'int') -> "std::string":
return _goldenrecord.Consolidation_TryNextTable(self, i)
def ProfileColumn(self, i: 'int', col_id: 'int') -> "std::string":
return _goldenrecord.Consolidation_ProfileColumn(self, i, col_id)
def TryNextColumn(self, i: 'int', col_id: 'int', skip: 'std::string') -> "std::string":
return _goldenrecord.Consolidation_TryNextColumn(self, i, col_id, skip)
def ShowNextCluster(self) -> "std::string":
return _goldenrecord.Consolidation_ShowNextCluster(self)
def ApplyCluster(self, i: 'int', col_id: 'int', applied_group_num: 'int', max_group_id: 'int', tmp: 'std::string') -> "std::string":
return _goldenrecord.Consolidation_ApplyCluster(self, i, col_id, applied_group_num, max_group_id, tmp)
def MaterializeTable(self, i: 'int', outfilepath: 'std::string') -> "void":
return _goldenrecord.Consolidation_MaterializeTable(self, i, outfilepath)
__swig_destroy__ = _goldenrecord.delete_Consolidation
__del__ = lambda self: None
Consolidation_swigregister = _goldenrecord.Consolidation_swigregister
Consolidation_swigregister(Consolidation)
# This file is compatible with both classic and new-style classes.
| # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_goldenrecord')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_goldenrecord')
_goldenrecord = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_goldenrecord', [dirname(__file__)])
except ImportError:
import _goldenrecord
return _goldenrecord
try:
_mod = imp.load_module('_goldenrecord', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_goldenrecord = swig_import_helper()
del swig_import_helper
else:
import _goldenrecord
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class Aggregator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Aggregator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Aggregator, name)
__repr__ = _swig_repr
__swig_getmethods__["rules"] = _goldenrecord.Aggregator_rules_get
if _newclass:
rules = _swig_property(_goldenrecord.Aggregator_rules_get)
__swig_setmethods__["ruleDir"] = _goldenrecord.Aggregator_ruleDir_set
__swig_getmethods__["ruleDir"] = _goldenrecord.Aggregator_ruleDir_get
if _newclass:
ruleDir = _swig_property(_goldenrecord.Aggregator_ruleDir_get, _goldenrecord.Aggregator_ruleDir_set)
__swig_setmethods__["ruleGroup"] = _goldenrecord.Aggregator_ruleGroup_set
__swig_getmethods__["ruleGroup"] = _goldenrecord.Aggregator_ruleGroup_get
if _newclass:
ruleGroup = _swig_property(_goldenrecord.Aggregator_ruleGroup_get, _goldenrecord.Aggregator_ruleGroup_set)
__swig_setmethods__["ruleStatus"] = _goldenrecord.Aggregator_ruleStatus_set
__swig_getmethods__["ruleStatus"] = _goldenrecord.Aggregator_ruleStatus_get
if _newclass:
ruleStatus = _swig_property(_goldenrecord.Aggregator_ruleStatus_get, _goldenrecord.Aggregator_ruleStatus_set)
__swig_setmethods__["transformations"] = _goldenrecord.Aggregator_transformations_set
__swig_getmethods__["transformations"] = _goldenrecord.Aggregator_transformations_get
if _newclass:
transformations = _swig_property(_goldenrecord.Aggregator_transformations_get, _goldenrecord.Aggregator_transformations_set)
__swig_setmethods__["structures"] = _goldenrecord.Aggregator_structures_set
__swig_getmethods__["structures"] = _goldenrecord.Aggregator_structures_get
if _newclass:
structures = _swig_property(_goldenrecord.Aggregator_structures_get, _goldenrecord.Aggregator_structures_set)
__swig_setmethods__["groupRules"] = _goldenrecord.Aggregator_groupRules_set
__swig_getmethods__["groupRules"] = _goldenrecord.Aggregator_groupRules_get
if _newclass:
groupRules = _swig_property(_goldenrecord.Aggregator_groupRules_get, _goldenrecord.Aggregator_groupRules_set)
__swig_setmethods__["format_group_cluster_ids"] = _goldenrecord.Aggregator_format_group_cluster_ids_set
__swig_getmethods__["format_group_cluster_ids"] = _goldenrecord.Aggregator_format_group_cluster_ids_get
if _newclass:
format_group_cluster_ids = _swig_property(_goldenrecord.Aggregator_format_group_cluster_ids_get, _goldenrecord.Aggregator_format_group_cluster_ids_set)
__swig_setmethods__["format_group_rule_ids"] = _goldenrecord.Aggregator_format_group_rule_ids_set
__swig_getmethods__["format_group_rule_ids"] = _goldenrecord.Aggregator_format_group_rule_ids_get
if _newclass:
format_group_rule_ids = _swig_property(_goldenrecord.Aggregator_format_group_rule_ids_get, _goldenrecord.Aggregator_format_group_rule_ids_set)
__swig_setmethods__["format_group_rules"] = _goldenrecord.Aggregator_format_group_rules_set
__swig_getmethods__["format_group_rules"] = _goldenrecord.Aggregator_format_group_rules_get
if _newclass:
format_group_rules = _swig_property(_goldenrecord.Aggregator_format_group_rules_get, _goldenrecord.Aggregator_format_group_rules_set)
__swig_setmethods__["global_const_terms"] = _goldenrecord.Aggregator_global_const_terms_set
__swig_getmethods__["global_const_terms"] = _goldenrecord.Aggregator_global_const_terms_get
if _newclass:
global_const_terms = _swig_property(_goldenrecord.Aggregator_global_const_terms_get, _goldenrecord.Aggregator_global_const_terms_set)
__swig_setmethods__["local_const_terms"] = _goldenrecord.Aggregator_local_const_terms_set
__swig_getmethods__["local_const_terms"] = _goldenrecord.Aggregator_local_const_terms_get
if _newclass:
local_const_terms = _swig_property(_goldenrecord.Aggregator_local_const_terms_get, _goldenrecord.Aggregator_local_const_terms_set)
__swig_setmethods__["cluster_sizes"] = _goldenrecord.Aggregator_cluster_sizes_set
__swig_getmethods__["cluster_sizes"] = _goldenrecord.Aggregator_cluster_sizes_get
if _newclass:
cluster_sizes = _swig_property(_goldenrecord.Aggregator_cluster_sizes_get, _goldenrecord.Aggregator_cluster_sizes_set)
__swig_setmethods__["forward_list"] = _goldenrecord.Aggregator_forward_list_set
__swig_getmethods__["forward_list"] = _goldenrecord.Aggregator_forward_list_get
if _newclass:
forward_list = _swig_property(_goldenrecord.Aggregator_forward_list_get, _goldenrecord.Aggregator_forward_list_set)
__swig_setmethods__["ordered"] = _goldenrecord.Aggregator_ordered_set
__swig_getmethods__["ordered"] = _goldenrecord.Aggregator_ordered_get
if _newclass:
ordered = _swig_property(_goldenrecord.Aggregator_ordered_get, _goldenrecord.Aggregator_ordered_set)
def __init__(self, allRules: 'vector< pair< pair< string,string >,int > > const &'):
this = _goldenrecord.new_Aggregator(allRules)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Aggregate(self) -> "void":
return _goldenrecord.Aggregator_Aggregate(self)
def GroupAggregate(self) -> "void":
return _goldenrecord.Aggregator_GroupAggregate(self)
def AggregateStructure(self) -> "void":
return _goldenrecord.Aggregator_AggregateStructure(self)
def NoAggregatation(self) -> "void":
return _goldenrecord.Aggregator_NoAggregatation(self)
def Group(self) -> "void":
return _goldenrecord.Aggregator_Group(self)
def RandomGroup(self) -> "void":
return _goldenrecord.Aggregator_RandomGroup(self)
def CalConstantTerms(self) -> "void":
return _goldenrecord.Aggregator_CalConstantTerms(self)
__swig_destroy__ = _goldenrecord.delete_Aggregator
__del__ = lambda self: None
Aggregator_swigregister = _goldenrecord.Aggregator_swigregister
Aggregator_swigregister(Aggregator)
MAX_CSV_FILE_SIZE = _goldenrecord.MAX_CSV_FILE_SIZE
class CSVReader(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CSVReader, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CSVReader, name)
__repr__ = _swig_repr
def __init__(self):
this = _goldenrecord.new_CSVReader()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def reading(self, datafilespath: 'string &', normalize: 'bool') -> "bool":
return _goldenrecord.CSVReader_reading(self, datafilespath, normalize)
def get_max_val_len(self) -> "int":
return _goldenrecord.CSVReader_get_max_val_len(self)
__swig_destroy__ = _goldenrecord.delete_CSVReader
__del__ = lambda self: None
CSVReader_swigregister = _goldenrecord.CSVReader_swigregister
CSVReader_swigregister(CSVReader)
class Reader(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Reader, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Reader, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_setmethods__["tables"] = _goldenrecord.Reader_tables_set
__swig_getmethods__["tables"] = _goldenrecord.Reader_tables_get
if _newclass:
tables = _swig_property(_goldenrecord.Reader_tables_get, _goldenrecord.Reader_tables_set)
def strNormalize(self, s: 'std::string &') -> "void":
return _goldenrecord.Reader_strNormalize(self, s)
def reading(self, datafilespath: 'std::string &', normalize: 'bool') -> "bool":
return _goldenrecord.Reader_reading(self, datafilespath, normalize)
def get_max_val_len(self) -> "int":
return _goldenrecord.Reader_get_max_val_len(self)
__swig_destroy__ = _goldenrecord.delete_Reader
__del__ = lambda self: None
Reader_swigregister = _goldenrecord.Reader_swigregister
Reader_swigregister(Reader)
class Locator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Locator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Locator, name)
__repr__ = _swig_repr
__swig_setmethods__["id"] = _goldenrecord.Locator_id_set
__swig_getmethods__["id"] = _goldenrecord.Locator_id_get
if _newclass:
id = _swig_property(_goldenrecord.Locator_id_get, _goldenrecord.Locator_id_set)
__swig_setmethods__["beg"] = _goldenrecord.Locator_beg_set
__swig_getmethods__["beg"] = _goldenrecord.Locator_beg_get
if _newclass:
beg = _swig_property(_goldenrecord.Locator_beg_get, _goldenrecord.Locator_beg_set)
__swig_setmethods__["end"] = _goldenrecord.Locator_end_set
__swig_getmethods__["end"] = _goldenrecord.Locator_end_get
if _newclass:
end = _swig_property(_goldenrecord.Locator_end_get, _goldenrecord.Locator_end_set)
def __init__(self, v: 'int', b: 'int', e: 'int'):
this = _goldenrecord.new_Locator(v, b, e)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __lt__(self, e: 'Locator') -> "bool":
return _goldenrecord.Locator___lt__(self, e)
def __gt__(self, e: 'Locator') -> "bool":
return _goldenrecord.Locator___gt__(self, e)
def __eq__(self, e: 'Locator') -> "bool":
return _goldenrecord.Locator___eq__(self, e)
__swig_destroy__ = _goldenrecord.delete_Locator
__del__ = lambda self: None
Locator_swigregister = _goldenrecord.Locator_swigregister
Locator_swigregister(Locator)
class LocatorHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LocatorHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LocatorHash, name)
__repr__ = _swig_repr
def __call__(self, o: 'Locator') -> "size_t":
return _goldenrecord.LocatorHash___call__(self, o)
def __init__(self):
this = _goldenrecord.new_LocatorHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_LocatorHash
__del__ = lambda self: None
LocatorHash_swigregister = _goldenrecord.LocatorHash_swigregister
LocatorHash_swigregister(LocatorHash)
class PairLocatorHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PairLocatorHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PairLocatorHash, name)
__repr__ = _swig_repr
def __call__(self, o: 'pair< Locator,Locator > const &') -> "size_t":
return _goldenrecord.PairLocatorHash___call__(self, o)
def __init__(self):
this = _goldenrecord.new_PairLocatorHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_PairLocatorHash
__del__ = lambda self: None
PairLocatorHash_swigregister = _goldenrecord.PairLocatorHash_swigregister
PairLocatorHash_swigregister(PairLocatorHash)
class TripleHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TripleHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TripleHash, name)
__repr__ = _swig_repr
def __call__(self, k: 'triple const &') -> "size_t":
return _goldenrecord.TripleHash___call__(self, k)
def __init__(self):
this = _goldenrecord.new_TripleHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_TripleHash
__del__ = lambda self: None
TripleHash_swigregister = _goldenrecord.TripleHash_swigregister
TripleHash_swigregister(TripleHash)
class Rules(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Rules, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Rules, name)
__repr__ = _swig_repr
__swig_setmethods__["table"] = _goldenrecord.Rules_table_set
__swig_getmethods__["table"] = _goldenrecord.Rules_table_get
if _newclass:
table = _swig_property(_goldenrecord.Rules_table_get, _goldenrecord.Rules_table_set)
__swig_setmethods__["col_id"] = _goldenrecord.Rules_col_id_set
__swig_getmethods__["col_id"] = _goldenrecord.Rules_col_id_get
if _newclass:
col_id = _swig_property(_goldenrecord.Rules_col_id_get, _goldenrecord.Rules_col_id_set)
__swig_setmethods__["values"] = _goldenrecord.Rules_values_set
__swig_getmethods__["values"] = _goldenrecord.Rules_values_get
if _newclass:
values = _swig_property(_goldenrecord.Rules_values_get, _goldenrecord.Rules_values_set)
__swig_setmethods__["clusters"] = _goldenrecord.Rules_clusters_set
__swig_getmethods__["clusters"] = _goldenrecord.Rules_clusters_get
if _newclass:
clusters = _swig_property(_goldenrecord.Rules_clusters_get, _goldenrecord.Rules_clusters_set)
__swig_setmethods__["id_mappings"] = _goldenrecord.Rules_id_mappings_set
__swig_getmethods__["id_mappings"] = _goldenrecord.Rules_id_mappings_get
if _newclass:
id_mappings = _swig_property(_goldenrecord.Rules_id_mappings_get, _goldenrecord.Rules_id_mappings_set)
__swig_setmethods__["counts"] = _goldenrecord.Rules_counts_set
__swig_getmethods__["counts"] = _goldenrecord.Rules_counts_get
if _newclass:
counts = _swig_property(_goldenrecord.Rules_counts_get, _goldenrecord.Rules_counts_set)
__swig_setmethods__["row_to_cluster"] = _goldenrecord.Rules_row_to_cluster_set
__swig_getmethods__["row_to_cluster"] = _goldenrecord.Rules_row_to_cluster_get
if _newclass:
row_to_cluster = _swig_property(_goldenrecord.Rules_row_to_cluster_get, _goldenrecord.Rules_row_to_cluster_set)
__swig_setmethods__["rule_types"] = _goldenrecord.Rules_rule_types_set
__swig_getmethods__["rule_types"] = _goldenrecord.Rules_rule_types_get
if _newclass:
rule_types = _swig_property(_goldenrecord.Rules_rule_types_get, _goldenrecord.Rules_rule_types_set)
__swig_getmethods__["enable_auto_confirm_rules"] = _goldenrecord.Rules_enable_auto_confirm_rules_get
if _newclass:
enable_auto_confirm_rules = _swig_property(_goldenrecord.Rules_enable_auto_confirm_rules_get)
__swig_getmethods__["token_delim"] = _goldenrecord.Rules_token_delim_get
if _newclass:
token_delim = _swig_property(_goldenrecord.Rules_token_delim_get)
__swig_setmethods__["op_log"] = _goldenrecord.Rules_op_log_set
__swig_getmethods__["op_log"] = _goldenrecord.Rules_op_log_get
if _newclass:
op_log = _swig_property(_goldenrecord.Rules_op_log_get, _goldenrecord.Rules_op_log_set)
__swig_setmethods__["termRules"] = _goldenrecord.Rules_termRules_set
__swig_getmethods__["termRules"] = _goldenrecord.Rules_termRules_get
if _newclass:
termRules = _swig_property(_goldenrecord.Rules_termRules_get, _goldenrecord.Rules_termRules_set)
__swig_setmethods__["rule_locations"] = _goldenrecord.Rules_rule_locations_set
__swig_getmethods__["rule_locations"] = _goldenrecord.Rules_rule_locations_get
if _newclass:
rule_locations = _swig_property(_goldenrecord.Rules_rule_locations_get, _goldenrecord.Rules_rule_locations_set)
__swig_setmethods__["termRulesApplicable"] = _goldenrecord.Rules_termRulesApplicable_set
__swig_getmethods__["termRulesApplicable"] = _goldenrecord.Rules_termRulesApplicable_get
if _newclass:
termRulesApplicable = _swig_property(_goldenrecord.Rules_termRulesApplicable_get, _goldenrecord.Rules_termRulesApplicable_set)
__swig_setmethods__["valTokens"] = _goldenrecord.Rules_valTokens_set
__swig_getmethods__["valTokens"] = _goldenrecord.Rules_valTokens_get
if _newclass:
valTokens = _swig_property(_goldenrecord.Rules_valTokens_get, _goldenrecord.Rules_valTokens_set)
__swig_setmethods__["invIndex"] = _goldenrecord.Rules_invIndex_set
__swig_getmethods__["invIndex"] = _goldenrecord.Rules_invIndex_get
if _newclass:
invIndex = _swig_property(_goldenrecord.Rules_invIndex_get, _goldenrecord.Rules_invIndex_set)
def __init__(self, t: 'Table', cid: 'int', v: 'vector< std::string > const &', c: 'vector< vector< int > > const &', m: 'vector< vector< int > > const &', f: 'vector< int > const &', rtype: 'int', confirm: 'bool'):
this = _goldenrecord.new_Rules(t, cid, v, c, m, f, rtype, confirm)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def GenerateRules(self) -> "void":
return _goldenrecord.Rules_GenerateRules(self)
def RankRules(self, rules: 'unordered_map< pair< std::string,std::string >,int,pair_hash > &', tops: 'vector< pair< pair< std::string,std::string >,int > > &', limit: 'int'=10) -> "void":
return _goldenrecord.Rules_RankRules(self, rules, tops, limit)
def ApplyGroupRuleComplex(self, rules: 'vector< pair< pair< std::string,std::string >,int > > const &', ruleDir: 'vector< bool > const &', ruleGroup: 'vector< int > const &', ruleStatus: 'vector< int > &', groupRules: 'vector< vector< int > > const &', groupStatus: 'vector< int > &', transformations: 'vector< Path > const &', modification: 'vector< vector< tuple< int,int,int > > > &', max_group_id: 'int', tmp: 'std::string') -> "bool":
return _goldenrecord.Rules_ApplyGroupRuleComplex(self, rules, ruleDir, ruleGroup, ruleStatus, groupRules, groupStatus, transformations, modification, max_group_id, tmp)
def ShowNextCluster(self, rules: 'vector< pair< pair< std::string,std::string >,int > > &', ruleDir: 'vector< bool > &', ruleGroup: 'vector< int > &', ruleStatus: 'vector< int > &', groupRules: 'vector< vector< int > > &', groupStatus: 'vector< int > &', transformations: 'vector< Path > &', structures: 'vector< pair< std::string,std::string > > &', modification: 'vector< vector< tuple< int,int,int > > > &') -> "std::string":
return _goldenrecord.Rules_ShowNextCluster(self, rules, ruleDir, ruleGroup, ruleStatus, groupRules, groupStatus, transformations, structures, modification)
def ApplyRule(self, rules: 'vector< pair< pair< std::string,std::string >,int > > &', ruleDir: 'vector< bool > &', ruleGroup: 'vector< int > &', ruleStatus: 'vector< int > &', groupRules: 'vector< vector< int > > &', groupStatus: 'vector< int > &', transformations: 'vector< Path > &', structures: 'vector< pair< std::string,std::string > > &', modification: 'vector< vector< tuple< int,int,int > > > &', max_group_id: 'int', tmp: 'std::string') -> "bool":
return _goldenrecord.Rules_ApplyRule(self, rules, ruleDir, ruleGroup, ruleStatus, groupRules, groupStatus, transformations, structures, modification, max_group_id, tmp)
__swig_destroy__ = _goldenrecord.delete_Rules
__del__ = lambda self: None
Rules_swigregister = _goldenrecord.Rules_swigregister
Rules_swigregister(Rules)
SUBSTR_SCORE = _goldenrecord.SUBSTR_SCORE
CPOS_SCORE = _goldenrecord.CPOS_SCORE
CONSTR_SCORE = _goldenrecord.CONSTR_SCORE
CONSTANT_TOKEN_RATIO = _goldenrecord.CONSTANT_TOKEN_RATIO
NEG_INF = _goldenrecord.NEG_INF
REGEX_SIZE = _goldenrecord.REGEX_SIZE
ALL_REGEX_SIZE = _goldenrecord.ALL_REGEX_SIZE
PROPER_CASE = _goldenrecord.PROPER_CASE
CAPS = _goldenrecord.CAPS
LOWER_CASE = _goldenrecord.LOWER_CASE
DIGITS = _goldenrecord.DIGITS
ALPHABETS = _goldenrecord.ALPHABETS
ALPHANUMERIC = _goldenrecord.ALPHANUMERIC
WHITESPACE = _goldenrecord.WHITESPACE
PUNCT = _goldenrecord.PUNCT
CONSTANT = _goldenrecord.CONSTANT
START_T = _goldenrecord.START_T
END_T = _goldenrecord.END_T
SPECIAL_CASE_FOR_DAG_EDGE = _goldenrecord.SPECIAL_CASE_FOR_DAG_EDGE
class VLabel(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VLabel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VLabel, name)
__repr__ = _swig_repr
__swig_setmethods__["pid"] = _goldenrecord.VLabel_pid_set
__swig_getmethods__["pid"] = _goldenrecord.VLabel_pid_get
if _newclass:
pid = _swig_property(_goldenrecord.VLabel_pid_get, _goldenrecord.VLabel_pid_set)
__swig_setmethods__["pos"] = _goldenrecord.VLabel_pos_set
__swig_getmethods__["pos"] = _goldenrecord.VLabel_pos_get
if _newclass:
pos = _swig_property(_goldenrecord.VLabel_pos_get, _goldenrecord.VLabel_pos_set)
def __init__(self, id: 'int', p: 'int'):
this = _goldenrecord.new_VLabel(id, p)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_VLabel
__del__ = lambda self: None
VLabel_swigregister = _goldenrecord.VLabel_swigregister
VLabel_swigregister(VLabel)
cvar = _goldenrecord.cvar
regexes = cvar.regexes
regex_str = cvar.regex_str
start_special_token = cvar.start_special_token
end_special_token = cvar.end_special_token
class Vertex(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Vertex, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Vertex, name)
__repr__ = _swig_repr
__swig_setmethods__["vertex_id"] = _goldenrecord.Vertex_vertex_id_set
__swig_getmethods__["vertex_id"] = _goldenrecord.Vertex_vertex_id_get
if _newclass:
vertex_id = _swig_property(_goldenrecord.Vertex_vertex_id_get, _goldenrecord.Vertex_vertex_id_set)
__swig_setmethods__["labels"] = _goldenrecord.Vertex_labels_set
__swig_getmethods__["labels"] = _goldenrecord.Vertex_labels_get
if _newclass:
labels = _swig_property(_goldenrecord.Vertex_labels_get, _goldenrecord.Vertex_labels_set)
def __init__(self, *args):
this = _goldenrecord.new_Vertex(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Vertex
__del__ = lambda self: None
Vertex_swigregister = _goldenrecord.Vertex_swigregister
Vertex_swigregister(Vertex)
class ELabel(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ELabel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ELabel, name)
__repr__ = _swig_repr
__swig_setmethods__["regex"] = _goldenrecord.ELabel_regex_set
__swig_getmethods__["regex"] = _goldenrecord.ELabel_regex_get
if _newclass:
regex = _swig_property(_goldenrecord.ELabel_regex_get, _goldenrecord.ELabel_regex_set)
__swig_setmethods__["index"] = _goldenrecord.ELabel_index_set
__swig_getmethods__["index"] = _goldenrecord.ELabel_index_get
if _newclass:
index = _swig_property(_goldenrecord.ELabel_index_get, _goldenrecord.ELabel_index_set)
__swig_setmethods__["constr"] = _goldenrecord.ELabel_constr_set
__swig_getmethods__["constr"] = _goldenrecord.ELabel_constr_get
if _newclass:
constr = _swig_property(_goldenrecord.ELabel_constr_get, _goldenrecord.ELabel_constr_set)
def __init__(self, *args):
this = _goldenrecord.new_ELabel(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __eq__(self, o: 'ELabel') -> "bool":
return _goldenrecord.ELabel___eq__(self, o)
def __gt__(self, p: 'ELabel') -> "bool":
return _goldenrecord.ELabel___gt__(self, p)
__swig_destroy__ = _goldenrecord.delete_ELabel
__del__ = lambda self: None
ELabel_swigregister = _goldenrecord.ELabel_swigregister
ELabel_swigregister(ELabel)
class Edge(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Edge, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Edge, name)
__repr__ = _swig_repr
__swig_setmethods__["src_id"] = _goldenrecord.Edge_src_id_set
__swig_getmethods__["src_id"] = _goldenrecord.Edge_src_id_get
if _newclass:
src_id = _swig_property(_goldenrecord.Edge_src_id_get, _goldenrecord.Edge_src_id_set)
__swig_setmethods__["end_id"] = _goldenrecord.Edge_end_id_set
__swig_getmethods__["end_id"] = _goldenrecord.Edge_end_id_get
if _newclass:
end_id = _swig_property(_goldenrecord.Edge_end_id_get, _goldenrecord.Edge_end_id_set)
__swig_setmethods__["labels"] = _goldenrecord.Edge_labels_set
__swig_getmethods__["labels"] = _goldenrecord.Edge_labels_get
if _newclass:
labels = _swig_property(_goldenrecord.Edge_labels_get, _goldenrecord.Edge_labels_set)
def __init__(self, *args):
this = _goldenrecord.new_Edge(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Edge
__del__ = lambda self: None
Edge_swigregister = _goldenrecord.Edge_swigregister
Edge_swigregister(Edge)
class Pos(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Pos, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Pos, name)
__repr__ = _swig_repr
__swig_setmethods__["label"] = _goldenrecord.Pos_label_set
__swig_getmethods__["label"] = _goldenrecord.Pos_label_get
if _newclass:
label = _swig_property(_goldenrecord.Pos_label_get, _goldenrecord.Pos_label_set)
__swig_setmethods__["direction"] = _goldenrecord.Pos_direction_set
__swig_getmethods__["direction"] = _goldenrecord.Pos_direction_get
if _newclass:
direction = _swig_property(_goldenrecord.Pos_direction_get, _goldenrecord.Pos_direction_set)
def __init__(self, *args):
this = _goldenrecord.new_Pos(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __eq__(self, o: 'Pos') -> "bool":
return _goldenrecord.Pos___eq__(self, o)
def __ne__(self, o: 'Pos') -> "bool":
return _goldenrecord.Pos___ne__(self, o)
def __gt__(self, p: 'Pos') -> "bool":
return _goldenrecord.Pos___gt__(self, p)
def __lt__(self, p: 'Pos') -> "bool":
return _goldenrecord.Pos___lt__(self, p)
__swig_destroy__ = _goldenrecord.delete_Pos
__del__ = lambda self: None
Pos_swigregister = _goldenrecord.Pos_swigregister
Pos_swigregister(Pos)
class ELabelHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ELabelHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ELabelHash, name)
__repr__ = _swig_repr
def __call__(self, o: 'ELabel') -> "size_t":
return _goldenrecord.ELabelHash___call__(self, o)
def __init__(self):
this = _goldenrecord.new_ELabelHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_ELabelHash
__del__ = lambda self: None
ELabelHash_swigregister = _goldenrecord.ELabelHash_swigregister
ELabelHash_swigregister(ELabelHash)
class Graph(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Graph, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Graph, name)
__repr__ = _swig_repr
__swig_setmethods__["edges"] = _goldenrecord.Graph_edges_set
__swig_getmethods__["edges"] = _goldenrecord.Graph_edges_get
if _newclass:
edges = _swig_property(_goldenrecord.Graph_edges_get, _goldenrecord.Graph_edges_set)
__swig_setmethods__["nodes"] = _goldenrecord.Graph_nodes_set
__swig_getmethods__["nodes"] = _goldenrecord.Graph_nodes_get
if _newclass:
nodes = _swig_property(_goldenrecord.Graph_nodes_get, _goldenrecord.Graph_nodes_set)
__swig_setmethods__["all_pos"] = _goldenrecord.Graph_all_pos_set
__swig_getmethods__["all_pos"] = _goldenrecord.Graph_all_pos_get
if _newclass:
all_pos = _swig_property(_goldenrecord.Graph_all_pos_get, _goldenrecord.Graph_all_pos_set)
__swig_setmethods__["node2edge"] = _goldenrecord.Graph_node2edge_set
__swig_getmethods__["node2edge"] = _goldenrecord.Graph_node2edge_get
if _newclass:
node2edge = _swig_property(_goldenrecord.Graph_node2edge_get, _goldenrecord.Graph_node2edge_set)
__swig_setmethods__["rev_node2edge"] = _goldenrecord.Graph_rev_node2edge_set
__swig_getmethods__["rev_node2edge"] = _goldenrecord.Graph_rev_node2edge_get
if _newclass:
rev_node2edge = _swig_property(_goldenrecord.Graph_rev_node2edge_get, _goldenrecord.Graph_rev_node2edge_set)
__swig_setmethods__["umap_labels"] = _goldenrecord.Graph_umap_labels_set
__swig_getmethods__["umap_labels"] = _goldenrecord.Graph_umap_labels_get
if _newclass:
umap_labels = _swig_property(_goldenrecord.Graph_umap_labels_get, _goldenrecord.Graph_umap_labels_set)
def UmapIndex(self) -> "void":
return _goldenrecord.Graph_UmapIndex(self)
def GetMatchId(self, str: 'std::string const &', mid: 'int &', r: 'Regex const', exp: 'regex const &', match_map: 'unordered_map< pair< int,int >,vector< ELabel >,pair_hash > &') -> "void":
return _goldenrecord.Graph_GetMatchId(self, str, mid, r, exp, match_map)
def GenGraphStr(self, *args) -> "void":
return _goldenrecord.Graph_GenGraphStr(self, *args)
def getPosList(self, posList: 'vector< vector< Pos > > &') -> "void":
return _goldenrecord.Graph_getPosList(self, posList)
def getAdjacentMatrix(self) -> "void":
return _goldenrecord.Graph_getAdjacentMatrix(self)
def TestContainment(self, path: 'vector< pair< Pos,Pos > > const &', target: 'std::string const &', source: 'std::string const &') -> "bool":
return _goldenrecord.Graph_TestContainment(self, path, target, source)
def __init__(self):
this = _goldenrecord.new_Graph()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Graph
__del__ = lambda self: None
Graph_swigregister = _goldenrecord.Graph_swigregister
Graph_swigregister(Graph)
class Elem(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Elem, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Elem, name)
__repr__ = _swig_repr
__swig_setmethods__["pid"] = _goldenrecord.Elem_pid_set
__swig_getmethods__["pid"] = _goldenrecord.Elem_pid_get
if _newclass:
pid = _swig_property(_goldenrecord.Elem_pid_get, _goldenrecord.Elem_pid_set)
__swig_setmethods__["beg"] = _goldenrecord.Elem_beg_set
__swig_getmethods__["beg"] = _goldenrecord.Elem_beg_get
if _newclass:
beg = _swig_property(_goldenrecord.Elem_beg_get, _goldenrecord.Elem_beg_set)
__swig_setmethods__["end"] = _goldenrecord.Elem_end_set
__swig_getmethods__["end"] = _goldenrecord.Elem_end_get
if _newclass:
end = _swig_property(_goldenrecord.Elem_end_get, _goldenrecord.Elem_end_set)
def __init__(self, r: 'int', b: 'int', e: 'int'):
this = _goldenrecord.new_Elem(r, b, e)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def __lt__(self, e: 'Elem') -> "bool":
return _goldenrecord.Elem___lt__(self, e)
def __gt__(self, e: 'Elem') -> "bool":
return _goldenrecord.Elem___gt__(self, e)
def __eq__(self, e: 'Elem') -> "bool":
return _goldenrecord.Elem___eq__(self, e)
__swig_destroy__ = _goldenrecord.delete_Elem
__del__ = lambda self: None
Elem_swigregister = _goldenrecord.Elem_swigregister
Elem_swigregister(Elem)
class DagEdge(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DagEdge, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DagEdge, name)
__repr__ = _swig_repr
__swig_setmethods__["src_id"] = _goldenrecord.DagEdge_src_id_set
__swig_getmethods__["src_id"] = _goldenrecord.DagEdge_src_id_get
if _newclass:
src_id = _swig_property(_goldenrecord.DagEdge_src_id_get, _goldenrecord.DagEdge_src_id_set)
__swig_setmethods__["end_id"] = _goldenrecord.DagEdge_end_id_set
__swig_getmethods__["end_id"] = _goldenrecord.DagEdge_end_id_get
if _newclass:
end_id = _swig_property(_goldenrecord.DagEdge_end_id_get, _goldenrecord.DagEdge_end_id_set)
__swig_setmethods__["constant"] = _goldenrecord.DagEdge_constant_set
__swig_getmethods__["constant"] = _goldenrecord.DagEdge_constant_get
if _newclass:
constant = _swig_property(_goldenrecord.DagEdge_constant_get, _goldenrecord.DagEdge_constant_set)
__swig_setmethods__["prefixes"] = _goldenrecord.DagEdge_prefixes_set
__swig_getmethods__["prefixes"] = _goldenrecord.DagEdge_prefixes_get
if _newclass:
prefixes = _swig_property(_goldenrecord.DagEdge_prefixes_get, _goldenrecord.DagEdge_prefixes_set)
__swig_setmethods__["suffixes"] = _goldenrecord.DagEdge_suffixes_set
__swig_getmethods__["suffixes"] = _goldenrecord.DagEdge_suffixes_get
if _newclass:
suffixes = _swig_property(_goldenrecord.DagEdge_suffixes_get, _goldenrecord.DagEdge_suffixes_set)
__swig_setmethods__["inffixes"] = _goldenrecord.DagEdge_inffixes_set
__swig_getmethods__["inffixes"] = _goldenrecord.DagEdge_inffixes_get
if _newclass:
inffixes = _swig_property(_goldenrecord.DagEdge_inffixes_get, _goldenrecord.DagEdge_inffixes_set)
__swig_setmethods__["const_pos"] = _goldenrecord.DagEdge_const_pos_set
__swig_getmethods__["const_pos"] = _goldenrecord.DagEdge_const_pos_get
if _newclass:
const_pos = _swig_property(_goldenrecord.DagEdge_const_pos_get, _goldenrecord.DagEdge_const_pos_set)
__swig_setmethods__["input_pos"] = _goldenrecord.DagEdge_input_pos_set
__swig_getmethods__["input_pos"] = _goldenrecord.DagEdge_input_pos_get
if _newclass:
input_pos = _swig_property(_goldenrecord.DagEdge_input_pos_get, _goldenrecord.DagEdge_input_pos_set)
__swig_setmethods__["weight"] = _goldenrecord.DagEdge_weight_set
__swig_getmethods__["weight"] = _goldenrecord.DagEdge_weight_get
if _newclass:
weight = _swig_property(_goldenrecord.DagEdge_weight_get, _goldenrecord.DagEdge_weight_set)
def __init__(self, *args):
this = _goldenrecord.new_DagEdge(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_DagEdge
__del__ = lambda self: None
DagEdge_swigregister = _goldenrecord.DagEdge_swigregister
DagEdge_swigregister(DagEdge)
class DAGraph(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DAGraph, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DAGraph, name)
__repr__ = _swig_repr
__swig_setmethods__["nodes"] = _goldenrecord.DAGraph_nodes_set
__swig_getmethods__["nodes"] = _goldenrecord.DAGraph_nodes_get
if _newclass:
nodes = _swig_property(_goldenrecord.DAGraph_nodes_get, _goldenrecord.DAGraph_nodes_set)
__swig_setmethods__["edges"] = _goldenrecord.DAGraph_edges_set
__swig_getmethods__["edges"] = _goldenrecord.DAGraph_edges_get
if _newclass:
edges = _swig_property(_goldenrecord.DAGraph_edges_get, _goldenrecord.DAGraph_edges_set)
__swig_setmethods__["rule_id"] = _goldenrecord.DAGraph_rule_id_set
__swig_getmethods__["rule_id"] = _goldenrecord.DAGraph_rule_id_get
if _newclass:
rule_id = _swig_property(_goldenrecord.DAGraph_rule_id_get, _goldenrecord.DAGraph_rule_id_set)
__swig_setmethods__["input"] = _goldenrecord.DAGraph_input_set
__swig_getmethods__["input"] = _goldenrecord.DAGraph_input_get
if _newclass:
input = _swig_property(_goldenrecord.DAGraph_input_get, _goldenrecord.DAGraph_input_set)
__swig_setmethods__["output"] = _goldenrecord.DAGraph_output_set
__swig_getmethods__["output"] = _goldenrecord.DAGraph_output_get
if _newclass:
output = _swig_property(_goldenrecord.DAGraph_output_get, _goldenrecord.DAGraph_output_set)
__swig_setmethods__["input_graph"] = _goldenrecord.DAGraph_input_graph_set
__swig_getmethods__["input_graph"] = _goldenrecord.DAGraph_input_graph_get
if _newclass:
input_graph = _swig_property(_goldenrecord.DAGraph_input_graph_get, _goldenrecord.DAGraph_input_graph_set)
__swig_setmethods__["starting_node_id"] = _goldenrecord.DAGraph_starting_node_id_set
__swig_getmethods__["starting_node_id"] = _goldenrecord.DAGraph_starting_node_id_get
if _newclass:
starting_node_id = _swig_property(_goldenrecord.DAGraph_starting_node_id_get, _goldenrecord.DAGraph_starting_node_id_set)
__swig_setmethods__["ending_node_id"] = _goldenrecord.DAGraph_ending_node_id_set
__swig_getmethods__["ending_node_id"] = _goldenrecord.DAGraph_ending_node_id_get
if _newclass:
ending_node_id = _swig_property(_goldenrecord.DAGraph_ending_node_id_get, _goldenrecord.DAGraph_ending_node_id_set)
__swig_setmethods__["dag_edge_adj"] = _goldenrecord.DAGraph_dag_edge_adj_set
__swig_getmethods__["dag_edge_adj"] = _goldenrecord.DAGraph_dag_edge_adj_get
if _newclass:
dag_edge_adj = _swig_property(_goldenrecord.DAGraph_dag_edge_adj_get, _goldenrecord.DAGraph_dag_edge_adj_set)
__swig_setmethods__["graphSize"] = _goldenrecord.DAGraph_graphSize_set
__swig_getmethods__["graphSize"] = _goldenrecord.DAGraph_graphSize_get
if _newclass:
graphSize = _swig_property(_goldenrecord.DAGraph_graphSize_get, _goldenrecord.DAGraph_graphSize_set)
__swig_setmethods__["nodeSize"] = _goldenrecord.DAGraph_nodeSize_set
__swig_getmethods__["nodeSize"] = _goldenrecord.DAGraph_nodeSize_get
if _newclass:
nodeSize = _swig_property(_goldenrecord.DAGraph_nodeSize_get, _goldenrecord.DAGraph_nodeSize_set)
__swig_setmethods__["edgeSize"] = _goldenrecord.DAGraph_edgeSize_set
__swig_getmethods__["edgeSize"] = _goldenrecord.DAGraph_edgeSize_get
if _newclass:
edgeSize = _swig_property(_goldenrecord.DAGraph_edgeSize_get, _goldenrecord.DAGraph_edgeSize_set)
def __init__(self, arg2: 'std::string const &', out: 'std::string const &', id: 'int const', valid_terms: 'unordered_map< std::string,pair< double,int > > const &'):
this = _goldenrecord.new_DAGraph(arg2, out, id, valid_terms)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def DynamicRanking(self, path: 'Path', synsizer: 'Synthesizer') -> "void":
return _goldenrecord.DAGraph_DynamicRanking(self, path, synsizer)
def getGraphSize(self) -> "uint64_t":
return _goldenrecord.DAGraph_getGraphSize(self)
__swig_destroy__ = _goldenrecord.delete_DAGraph
__del__ = lambda self: None
DAGraph_swigregister = _goldenrecord.DAGraph_swigregister
DAGraph_swigregister(DAGraph)
class Synthesizer(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Synthesizer, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Synthesizer, name)
__repr__ = _swig_repr
__swig_setmethods__["constr_index"] = _goldenrecord.Synthesizer_constr_index_set
__swig_getmethods__["constr_index"] = _goldenrecord.Synthesizer_constr_index_get
if _newclass:
constr_index = _swig_property(_goldenrecord.Synthesizer_constr_index_get, _goldenrecord.Synthesizer_constr_index_set)
__swig_setmethods__["cpos_index"] = _goldenrecord.Synthesizer_cpos_index_set
__swig_getmethods__["cpos_index"] = _goldenrecord.Synthesizer_cpos_index_get
if _newclass:
cpos_index = _swig_property(_goldenrecord.Synthesizer_cpos_index_get, _goldenrecord.Synthesizer_cpos_index_set)
__swig_setmethods__["substr_index"] = _goldenrecord.Synthesizer_substr_index_set
__swig_getmethods__["substr_index"] = _goldenrecord.Synthesizer_substr_index_get
if _newclass:
substr_index = _swig_property(_goldenrecord.Synthesizer_substr_index_get, _goldenrecord.Synthesizer_substr_index_set)
__swig_setmethods__["prefix_index"] = _goldenrecord.Synthesizer_prefix_index_set
__swig_getmethods__["prefix_index"] = _goldenrecord.Synthesizer_prefix_index_get
if _newclass:
prefix_index = _swig_property(_goldenrecord.Synthesizer_prefix_index_get, _goldenrecord.Synthesizer_prefix_index_set)
__swig_setmethods__["suffix_index"] = _goldenrecord.Synthesizer_suffix_index_set
__swig_getmethods__["suffix_index"] = _goldenrecord.Synthesizer_suffix_index_get
if _newclass:
suffix_index = _swig_property(_goldenrecord.Synthesizer_suffix_index_get, _goldenrecord.Synthesizer_suffix_index_set)
__swig_setmethods__["dags"] = _goldenrecord.Synthesizer_dags_set
__swig_getmethods__["dags"] = _goldenrecord.Synthesizer_dags_get
if _newclass:
dags = _swig_property(_goldenrecord.Synthesizer_dags_get, _goldenrecord.Synthesizer_dags_set)
__swig_setmethods__["thresholds"] = _goldenrecord.Synthesizer_thresholds_set
__swig_getmethods__["thresholds"] = _goldenrecord.Synthesizer_thresholds_get
if _newclass:
thresholds = _swig_property(_goldenrecord.Synthesizer_thresholds_get, _goldenrecord.Synthesizer_thresholds_set)
__swig_getmethods__["rules"] = _goldenrecord.Synthesizer_rules_get
if _newclass:
rules = _swig_property(_goldenrecord.Synthesizer_rules_get)
def __init__(self, r: 'vector< pair< pair< std::string,std::string >,int > > const &', valid_terms: 'unordered_map< std::string,pair< double,int > > const &'):
this = _goldenrecord.new_Synthesizer(r, valid_terms)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Indexing(self, valid_terms: 'unordered_map< std::string,pair< double,int > > const &') -> "void":
return _goldenrecord.Synthesizer_Indexing(self, valid_terms)
def InvIndex(self, dags: 'vector< DAGraph > const &') -> "void":
return _goldenrecord.Synthesizer_InvIndex(self, dags)
def SynAggregating(self, groups: 'unordered_map< Path,vector< int >,PathHash > &', rule_id_maps: 'vector< int > const &') -> "void":
return _goldenrecord.Synthesizer_SynAggregating(self, groups, rule_id_maps)
__swig_destroy__ = _goldenrecord.delete_Synthesizer
__del__ = lambda self: None
Synthesizer_swigregister = _goldenrecord.Synthesizer_swigregister
Synthesizer_swigregister(Synthesizer)
class Path(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Path, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Path, name)
__repr__ = _swig_repr
__swig_setmethods__["path"] = _goldenrecord.Path_path_set
__swig_getmethods__["path"] = _goldenrecord.Path_path_get
if _newclass:
path = _swig_property(_goldenrecord.Path_path_get, _goldenrecord.Path_path_set)
def __eq__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___eq__(self, p2)
def __ne__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___ne__(self, p2)
def __lt__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___lt__(self, p2)
def __gt__(self, p2: 'Path') -> "bool":
return _goldenrecord.Path___gt__(self, p2)
def __init__(self):
this = _goldenrecord.new_Path()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Path
__del__ = lambda self: None
Path_swigregister = _goldenrecord.Path_swigregister
Path_swigregister(Path)
class PathHash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PathHash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PathHash, name)
__repr__ = _swig_repr
def __call__(self, p: 'Path') -> "size_t":
return _goldenrecord.PathHash___call__(self, p)
def __init__(self):
this = _goldenrecord.new_PathHash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_PathHash
__del__ = lambda self: None
PathHash_swigregister = _goldenrecord.PathHash_swigregister
PathHash_swigregister(PathHash)
class Table(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Table, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Table, name)
__repr__ = _swig_repr
__swig_setmethods__["tid"] = _goldenrecord.Table_tid_set
__swig_getmethods__["tid"] = _goldenrecord.Table_tid_get
if _newclass:
tid = _swig_property(_goldenrecord.Table_tid_get, _goldenrecord.Table_tid_set)
__swig_setmethods__["row_no"] = _goldenrecord.Table_row_no_set
__swig_getmethods__["row_no"] = _goldenrecord.Table_row_no_get
if _newclass:
row_no = _swig_property(_goldenrecord.Table_row_no_get, _goldenrecord.Table_row_no_set)
__swig_setmethods__["col_no"] = _goldenrecord.Table_col_no_set
__swig_getmethods__["col_no"] = _goldenrecord.Table_col_no_get
if _newclass:
col_no = _swig_property(_goldenrecord.Table_col_no_get, _goldenrecord.Table_col_no_set)
__swig_setmethods__["table_name"] = _goldenrecord.Table_table_name_set
__swig_getmethods__["table_name"] = _goldenrecord.Table_table_name_get
if _newclass:
table_name = _swig_property(_goldenrecord.Table_table_name_get, _goldenrecord.Table_table_name_set)
__swig_setmethods__["schema"] = _goldenrecord.Table_schema_set
__swig_getmethods__["schema"] = _goldenrecord.Table_schema_get
if _newclass:
schema = _swig_property(_goldenrecord.Table_schema_get, _goldenrecord.Table_schema_set)
__swig_setmethods__["rows"] = _goldenrecord.Table_rows_set
__swig_getmethods__["rows"] = _goldenrecord.Table_rows_get
if _newclass:
rows = _swig_property(_goldenrecord.Table_rows_get, _goldenrecord.Table_rows_set)
__swig_setmethods__["cols"] = _goldenrecord.Table_cols_set
__swig_getmethods__["cols"] = _goldenrecord.Table_cols_get
if _newclass:
cols = _swig_property(_goldenrecord.Table_cols_get, _goldenrecord.Table_cols_set)
__swig_setmethods__["type"] = _goldenrecord.Table_type_set
__swig_getmethods__["type"] = _goldenrecord.Table_type_get
if _newclass:
type = _swig_property(_goldenrecord.Table_type_get, _goldenrecord.Table_type_set)
__swig_setmethods__["isUnique"] = _goldenrecord.Table_isUnique_set
__swig_getmethods__["isUnique"] = _goldenrecord.Table_isUnique_get
if _newclass:
isUnique = _swig_property(_goldenrecord.Table_isUnique_get, _goldenrecord.Table_isUnique_set)
__swig_setmethods__["isKeyCand"] = _goldenrecord.Table_isKeyCand_set
__swig_getmethods__["isKeyCand"] = _goldenrecord.Table_isKeyCand_get
if _newclass:
isKeyCand = _swig_property(_goldenrecord.Table_isKeyCand_get, _goldenrecord.Table_isKeyCand_set)
__swig_setmethods__["hasNull"] = _goldenrecord.Table_hasNull_set
__swig_getmethods__["hasNull"] = _goldenrecord.Table_hasNull_get
if _newclass:
hasNull = _swig_property(_goldenrecord.Table_hasNull_get, _goldenrecord.Table_hasNull_set)
__swig_setmethods__["notNullNum"] = _goldenrecord.Table_notNullNum_set
__swig_getmethods__["notNullNum"] = _goldenrecord.Table_notNullNum_get
if _newclass:
notNullNum = _swig_property(_goldenrecord.Table_notNullNum_get, _goldenrecord.Table_notNullNum_set)
def __init__(self, *args):
this = _goldenrecord.new_Table(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Profile(self) -> "void":
return _goldenrecord.Table_Profile(self)
def OutputCSV(self, filename: 'std::string const &') -> "void":
return _goldenrecord.Table_OutputCSV(self, filename)
__swig_destroy__ = _goldenrecord.delete_Table
__del__ = lambda self: None
Table_swigregister = _goldenrecord.Table_swigregister
Table_swigregister(Table)
INT_TYPE_THRESH = _goldenrecord.INT_TYPE_THRESH
UNIQUE_THRESH = _goldenrecord.UNIQUE_THRESH
EPS = _goldenrecord.EPS
MAX_PATH_LENGTH = _goldenrecord.MAX_PATH_LENGTH
PRUNE_BY_STRUCTURE_THRESHOLD = _goldenrecord.PRUNE_BY_STRUCTURE_THRESHOLD
PRUNE_BY_STRUCTURE_GROUP_NUM = _goldenrecord.PRUNE_BY_STRUCTURE_GROUP_NUM
MAX_CONSTANT_LENGTH = _goldenrecord.MAX_CONSTANT_LENGTH
GLOBAL_FREQUENCY_THRESHOLD = _goldenrecord.GLOBAL_FREQUENCY_THRESHOLD
LOCAL_FREQUENCY_THRESHOLD = _goldenrecord.LOCAL_FREQUENCY_THRESHOLD
MAX_NUMBER_OF_RULES = _goldenrecord.MAX_NUMBER_OF_RULES
AGG_REGEX_NUM = _goldenrecord.AGG_REGEX_NUM
class pair_hash(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pair_hash, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pair_hash, name)
__repr__ = _swig_repr
def __init__(self):
this = _goldenrecord.new_pair_hash()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_pair_hash
__del__ = lambda self: None
pair_hash_swigregister = _goldenrecord.pair_hash_swigregister
pair_hash_swigregister(pair_hash)
class Wrapper(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Wrapper, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Wrapper, name)
__repr__ = _swig_repr
__swig_setmethods__["maxLength"] = _goldenrecord.Wrapper_maxLength_set
__swig_getmethods__["maxLength"] = _goldenrecord.Wrapper_maxLength_get
if _newclass:
maxLength = _swig_property(_goldenrecord.Wrapper_maxLength_get, _goldenrecord.Wrapper_maxLength_set)
__swig_setmethods__["matrix"] = _goldenrecord.Wrapper_matrix_set
__swig_getmethods__["matrix"] = _goldenrecord.Wrapper_matrix_get
if _newclass:
matrix = _swig_property(_goldenrecord.Wrapper_matrix_get, _goldenrecord.Wrapper_matrix_set)
if _newclass:
FindReplace = staticmethod(_goldenrecord.Wrapper_FindReplace)
else:
FindReplace = _goldenrecord.Wrapper_FindReplace
__swig_setmethods__["agg_regex_str"] = _goldenrecord.Wrapper_agg_regex_str_set
__swig_getmethods__["agg_regex_str"] = _goldenrecord.Wrapper_agg_regex_str_get
if _newclass:
agg_regex_str = _swig_property(_goldenrecord.Wrapper_agg_regex_str_get, _goldenrecord.Wrapper_agg_regex_str_set)
__swig_setmethods__["agg_replace_str"] = _goldenrecord.Wrapper_agg_replace_str_set
__swig_getmethods__["agg_replace_str"] = _goldenrecord.Wrapper_agg_replace_str_get
if _newclass:
agg_replace_str = _swig_property(_goldenrecord.Wrapper_agg_replace_str_get, _goldenrecord.Wrapper_agg_replace_str_set)
if _newclass:
logTime = staticmethod(_goldenrecord.Wrapper_logTime)
else:
logTime = _goldenrecord.Wrapper_logTime
__swig_setmethods__["agg_regexes"] = _goldenrecord.Wrapper_agg_regexes_set
__swig_getmethods__["agg_regexes"] = _goldenrecord.Wrapper_agg_regexes_get
if _newclass:
agg_regexes = _swig_property(_goldenrecord.Wrapper_agg_regexes_get, _goldenrecord.Wrapper_agg_regexes_set)
if _newclass:
print_green = staticmethod(_goldenrecord.Wrapper_print_green)
else:
print_green = _goldenrecord.Wrapper_print_green
if _newclass:
strToTokens = staticmethod(_goldenrecord.Wrapper_strToTokens)
else:
strToTokens = _goldenrecord.Wrapper_strToTokens
def __init__(self):
this = _goldenrecord.new_Wrapper()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _goldenrecord.delete_Wrapper
__del__ = lambda self: None
Wrapper_swigregister = _goldenrecord.Wrapper_swigregister
Wrapper_swigregister(Wrapper)
def Wrapper_FindReplace(str: 'std::string const &', exp: 'regex const &', replace: 'std::string const &') -> "std::string":
return _goldenrecord.Wrapper_FindReplace(str, exp, replace)
Wrapper_FindReplace = _goldenrecord.Wrapper_FindReplace
def Wrapper_logTime(begin: 'timeval &', end: 'timeval &', log: 'std::string const &') -> "void":
return _goldenrecord.Wrapper_logTime(begin, end, log)
Wrapper_logTime = _goldenrecord.Wrapper_logTime
def Wrapper_print_green(s: 'std::string const &') -> "void":
return _goldenrecord.Wrapper_print_green(s)
Wrapper_print_green = _goldenrecord.Wrapper_print_green
def Wrapper_strToTokens(s: 'std::string const &', res: 'vector< std::string > &', delims: 'std::string const &') -> "void":
return _goldenrecord.Wrapper_strToTokens(s, res, delims)
Wrapper_strToTokens = _goldenrecord.Wrapper_strToTokens
class Consolidation(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Consolidation, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Consolidation, name)
__repr__ = _swig_repr
__swig_setmethods__["cluster_id_name"] = _goldenrecord.Consolidation_cluster_id_name_set
__swig_getmethods__["cluster_id_name"] = _goldenrecord.Consolidation_cluster_id_name_get
if _newclass:
cluster_id_name = _swig_property(_goldenrecord.Consolidation_cluster_id_name_get, _goldenrecord.Consolidation_cluster_id_name_set)
__swig_setmethods__["csvfilepath"] = _goldenrecord.Consolidation_csvfilepath_set
__swig_getmethods__["csvfilepath"] = _goldenrecord.Consolidation_csvfilepath_get
if _newclass:
csvfilepath = _swig_property(_goldenrecord.Consolidation_csvfilepath_get, _goldenrecord.Consolidation_csvfilepath_set)
__swig_setmethods__["csvreader"] = _goldenrecord.Consolidation_csvreader_set
__swig_getmethods__["csvreader"] = _goldenrecord.Consolidation_csvreader_get
if _newclass:
csvreader = _swig_property(_goldenrecord.Consolidation_csvreader_get, _goldenrecord.Consolidation_csvreader_set)
__swig_setmethods__["number_of_tables"] = _goldenrecord.Consolidation_number_of_tables_set
__swig_getmethods__["number_of_tables"] = _goldenrecord.Consolidation_number_of_tables_get
if _newclass:
number_of_tables = _swig_property(_goldenrecord.Consolidation_number_of_tables_get, _goldenrecord.Consolidation_number_of_tables_set)
__swig_setmethods__["cluster_id_col"] = _goldenrecord.Consolidation_cluster_id_col_set
__swig_getmethods__["cluster_id_col"] = _goldenrecord.Consolidation_cluster_id_col_get
if _newclass:
cluster_id_col = _swig_property(_goldenrecord.Consolidation_cluster_id_col_get, _goldenrecord.Consolidation_cluster_id_col_set)
__swig_setmethods__["clusters"] = _goldenrecord.Consolidation_clusters_set
__swig_getmethods__["clusters"] = _goldenrecord.Consolidation_clusters_get
if _newclass:
clusters = _swig_property(_goldenrecord.Consolidation_clusters_get, _goldenrecord.Consolidation_clusters_set)
__swig_setmethods__["agg"] = _goldenrecord.Consolidation_agg_set
__swig_getmethods__["agg"] = _goldenrecord.Consolidation_agg_get
if _newclass:
agg = _swig_property(_goldenrecord.Consolidation_agg_get, _goldenrecord.Consolidation_agg_set)
__swig_setmethods__["rules"] = _goldenrecord.Consolidation_rules_set
__swig_getmethods__["rules"] = _goldenrecord.Consolidation_rules_get
if _newclass:
rules = _swig_property(_goldenrecord.Consolidation_rules_get, _goldenrecord.Consolidation_rules_set)
__swig_setmethods__["groupStatus"] = _goldenrecord.Consolidation_groupStatus_set
__swig_getmethods__["groupStatus"] = _goldenrecord.Consolidation_groupStatus_get
if _newclass:
groupStatus = _swig_property(_goldenrecord.Consolidation_groupStatus_get, _goldenrecord.Consolidation_groupStatus_set)
__swig_setmethods__["modification"] = _goldenrecord.Consolidation_modification_set
__swig_getmethods__["modification"] = _goldenrecord.Consolidation_modification_get
if _newclass:
modification = _swig_property(_goldenrecord.Consolidation_modification_get, _goldenrecord.Consolidation_modification_set)
__swig_setmethods__["termRules"] = _goldenrecord.Consolidation_termRules_set
__swig_getmethods__["termRules"] = _goldenrecord.Consolidation_termRules_get
if _newclass:
termRules = _swig_property(_goldenrecord.Consolidation_termRules_get, _goldenrecord.Consolidation_termRules_set)
def __init__(self, filepath: 'std::string', cname: 'std::string'):
this = _goldenrecord.new_Consolidation(filepath, cname)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def TryNextTable(self, i: 'int') -> "std::string":
return _goldenrecord.Consolidation_TryNextTable(self, i)
def ProfileColumn(self, i: 'int', col_id: 'int') -> "std::string":
return _goldenrecord.Consolidation_ProfileColumn(self, i, col_id)
def TryNextColumn(self, i: 'int', col_id: 'int', skip: 'std::string') -> "std::string":
return _goldenrecord.Consolidation_TryNextColumn(self, i, col_id, skip)
def ShowNextCluster(self) -> "std::string":
return _goldenrecord.Consolidation_ShowNextCluster(self)
def ApplyCluster(self, i: 'int', col_id: 'int', applied_group_num: 'int', max_group_id: 'int', tmp: 'std::string') -> "std::string":
return _goldenrecord.Consolidation_ApplyCluster(self, i, col_id, applied_group_num, max_group_id, tmp)
def MaterializeTable(self, i: 'int', outfilepath: 'std::string') -> "void":
return _goldenrecord.Consolidation_MaterializeTable(self, i, outfilepath)
__swig_destroy__ = _goldenrecord.delete_Consolidation
__del__ = lambda self: None
Consolidation_swigregister = _goldenrecord.Consolidation_swigregister
Consolidation_swigregister(Consolidation)
# This file is compatible with both classic and new-style classes.
| en | 0.888409 | # This file was automatically generated by SWIG (http://www.swig.org). # Version 3.0.12 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. # Python < 2.2 doesn't have 'property'. # This file is compatible with both classic and new-style classes. | 1.820299 | 2 |
NaC/nac.py | thdb-theo/Board-Games | 0 | 6619901 | """A simple noughts and crosses game with two players or against the computer.
Difficultu level is set with cmd argument between 1 and 4,
1 being complety random and 4 unbeatable"""
import sys
import textwrap
import argparse
import itertools
import operator
import numpy as np
from computer_move import next_move
parse = argparse.ArgumentParser('Noughts and Crosses')
parse.add_argument('dif', nargs='?', default='4', help='AI difficulty level. ')
args = parse.parse_args()
args.dif = int(args.dif)
assert 0 < args.dif < 5
class Board:
board = np.array([0] * 9)
player_sign = iter(np.random.permutation([1, -1]))
def __init__(self, name):
self.name = name
self.sign = next(self.player_sign)
def __str__(self):
subs = {0: '-', 1: 'X', -1: 'O'}
as_str = [subs.get(x) for x in self.board]
return textwrap.dedent(
'''
┌─┬─┬─┐\t\t┌─┬─┬─┐
│{0}│{1}│{2}│\t\t│1│2│3│
├─┼─┼─┤\t\t├─┼─┼─┤
│{3}│{4}│{5}│\t\t│4│5│6│
├─┼─┼─┤\t\t├─┼─┼─┤
│{6}│{7}│{8}│\t\t│7│8│9│
└─┴─┴─┘\t\t└─┴─┴─┘
'''.format(*as_str)
)
@staticmethod
def check_game_over(board):
board.resize(3, 3)
return (
all(board.diagonal()) or
all(np.fliplr(board).diagonal()) or
any(board.all(axis=0)) or
any(board.all(axis=1))
)
def available_moves(self):
return [i for i, x in enumerate(self.board) if not x]
def draw(self):
np_board = np.array(self.board)
player = itertools.cycle((self.sign, -self.sign))
opens = self.available_moves()
for future in itertools.permutations(opens, len(opens)):
copy_squares = np_board.copy()
for move in future:
copy_squares[move] = next(player)
for p in [self.sign, -self.sign]:
p_wins = copy_squares.copy()
p_wins[p_wins == p] = 0
if self.check_game_over(p_wins):
return False
elif len(opens) == 1:
return True
else:
return True
def take_turn(self):
move = self.get_move()
self.board[move] = self.sign
personal_board = self.board.copy()
personal_board[personal_board == -self.sign] = 0
if self.check_game_over(personal_board):
self.game_over()
elif self.draw():
self.game_over(draw=True)
def game_over(self, draw=False):
print(self)
if draw:
print('Det ble uavgjort')
else:
print('\n{0} vant!\n'.format(self.name))
sys.exit()
def get_move(self):
raise NotImplementedError('This is supposted to be overwritten,'
' but you ran it always you muppet!')
class Human(Board):
def get_move(self):
print(self)
alts = self.available_moves()
answ = ''
while answ not in alts:
try:
answ = int(input('{}: '.format(self.name))) - 1
except ValueError:
print('Ugyldig svar, prøv igjen')
else:
if answ not in alts:
print('Ugyldig svar, prøv igjen')
return answ
class AI(Board):
def get_move(self):
answ = int(next_move(args.dif, self.board, self.sign, self.sign))
print('{0} spilte {1}!'.format(self.name, answ + 1))
return answ
def game_loop():
p1 = Human('Mike')
p2 = AI('Freddy')
p1, p2 = reversed(sorted([p1, p2], key=operator.attrgetter('sign')))
while True:
p1.take_turn()
p2.take_turn()
if __name__ == '__main__':
print('Velkommen til Bondesjakk!')
game_loop()
| """A simple noughts and crosses game with two players or against the computer.
Difficultu level is set with cmd argument between 1 and 4,
1 being complety random and 4 unbeatable"""
import sys
import textwrap
import argparse
import itertools
import operator
import numpy as np
from computer_move import next_move
parse = argparse.ArgumentParser('Noughts and Crosses')
parse.add_argument('dif', nargs='?', default='4', help='AI difficulty level. ')
args = parse.parse_args()
args.dif = int(args.dif)
assert 0 < args.dif < 5
class Board:
board = np.array([0] * 9)
player_sign = iter(np.random.permutation([1, -1]))
def __init__(self, name):
self.name = name
self.sign = next(self.player_sign)
def __str__(self):
subs = {0: '-', 1: 'X', -1: 'O'}
as_str = [subs.get(x) for x in self.board]
return textwrap.dedent(
'''
┌─┬─┬─┐\t\t┌─┬─┬─┐
│{0}│{1}│{2}│\t\t│1│2│3│
├─┼─┼─┤\t\t├─┼─┼─┤
│{3}│{4}│{5}│\t\t│4│5│6│
├─┼─┼─┤\t\t├─┼─┼─┤
│{6}│{7}│{8}│\t\t│7│8│9│
└─┴─┴─┘\t\t└─┴─┴─┘
'''.format(*as_str)
)
@staticmethod
def check_game_over(board):
board.resize(3, 3)
return (
all(board.diagonal()) or
all(np.fliplr(board).diagonal()) or
any(board.all(axis=0)) or
any(board.all(axis=1))
)
def available_moves(self):
return [i for i, x in enumerate(self.board) if not x]
def draw(self):
np_board = np.array(self.board)
player = itertools.cycle((self.sign, -self.sign))
opens = self.available_moves()
for future in itertools.permutations(opens, len(opens)):
copy_squares = np_board.copy()
for move in future:
copy_squares[move] = next(player)
for p in [self.sign, -self.sign]:
p_wins = copy_squares.copy()
p_wins[p_wins == p] = 0
if self.check_game_over(p_wins):
return False
elif len(opens) == 1:
return True
else:
return True
def take_turn(self):
move = self.get_move()
self.board[move] = self.sign
personal_board = self.board.copy()
personal_board[personal_board == -self.sign] = 0
if self.check_game_over(personal_board):
self.game_over()
elif self.draw():
self.game_over(draw=True)
def game_over(self, draw=False):
print(self)
if draw:
print('Det ble uavgjort')
else:
print('\n{0} vant!\n'.format(self.name))
sys.exit()
def get_move(self):
raise NotImplementedError('This is supposted to be overwritten,'
' but you ran it always you muppet!')
class Human(Board):
def get_move(self):
print(self)
alts = self.available_moves()
answ = ''
while answ not in alts:
try:
answ = int(input('{}: '.format(self.name))) - 1
except ValueError:
print('Ugyldig svar, prøv igjen')
else:
if answ not in alts:
print('Ugyldig svar, prøv igjen')
return answ
class AI(Board):
def get_move(self):
answ = int(next_move(args.dif, self.board, self.sign, self.sign))
print('{0} spilte {1}!'.format(self.name, answ + 1))
return answ
def game_loop():
p1 = Human('Mike')
p2 = AI('Freddy')
p1, p2 = reversed(sorted([p1, p2], key=operator.attrgetter('sign')))
while True:
p1.take_turn()
p2.take_turn()
if __name__ == '__main__':
print('Velkommen til Bondesjakk!')
game_loop()
| en | 0.805165 | A simple noughts and crosses game with two players or against the computer. Difficultu level is set with cmd argument between 1 and 4, 1 being complety random and 4 unbeatable ┌─┬─┬─┐\t\t┌─┬─┬─┐ │{0}│{1}│{2}│\t\t│1│2│3│ ├─┼─┼─┤\t\t├─┼─┼─┤ │{3}│{4}│{5}│\t\t│4│5│6│ ├─┼─┼─┤\t\t├─┼─┼─┤ │{6}│{7}│{8}│\t\t│7│8│9│ └─┴─┴─┘\t\t└─┴─┴─┘ | 3.023642 | 3 |
player.py | iCurlmyster/rumble_bot | 0 | 6619902 | from db import *
from peewee import *
import random as r
import datetime
class Player(BaseModel):
"""
Player defines out the attributes that are needed for a rumble
"""
discord_id = IntegerField(unique=True)
name = CharField()
strength = IntegerField(default=1)
health = IntegerField(default=100)
last_workout = DateTimeField(default=datetime.datetime.now)
def get_player(did, n):
"""
get_player returns the player with the associated discord id from the database
or generates a new player if one does not already exist.
"""
pl = Player.select().where(Player.discord_id == did)
if len(pl) == 0:
pl = [Player.create(discord_id=did,name=n)]
ret_pl = pl[0]
# update name if changed
if ret_pl.name != n:
ret_pl.name = n
ret_pl.save()
return ret_pl
# TODO maybe invite the possibilty to vary the percentages based on how much they workout within a time frame
def increase_health(did, n):
"""
increase_health simulates a workout for the player and may or may not increase
the players strength.
"""
pl = get_player(did, n)
if pl.strength > 70:
return f"You are already at the max strength of {pl.strength}."
success = r.uniform(0,1)
prev_strength = pl.strength
# we use 80.0 to allow a reasonable percentage chance as you get closer to 70
if success <= (pl.strength/80.0):
return f"You didn't beat your PR this time! Strength: {prev_strength} -> {pl.strength}"
wild_card = r.uniform(0,1)
message = ""
if wild_card >= 0.95:
pl.strength = min(70, pl.strength + 2)
message = f"Workout went better than normal! Strength: {prev_strength} -> {pl.strength}"
elif wild_card <= 0.05:
pl.strength = max(1, pl.strength - 1)
message = f"You injured yourself during the workout! Strength: {prev_strength} -> {pl.strength}"
else:
pl.strength = pl.strength + 1
message = f"Workout went well! Strength: {prev_strength} -> {pl.strength}"
pl.last_workout = datetime.datetime.now()
pl.save()
return message
def player_summary(did, n):
"""
player_summary generates a stat summary of the given player.
"""
pl = get_player(did, n)
last_w = pl.last_workout.strftime("%A, %d. %B %Y %I:%M%p")
message = f"Rumble Player: {n}\nStrength: {pl.strength}\nLast Workout: {last_w}"
return message
if __name__ == '__main__':
with db:
db.create_tables([Player])
| from db import *
from peewee import *
import random as r
import datetime
class Player(BaseModel):
"""
Player defines out the attributes that are needed for a rumble
"""
discord_id = IntegerField(unique=True)
name = CharField()
strength = IntegerField(default=1)
health = IntegerField(default=100)
last_workout = DateTimeField(default=datetime.datetime.now)
def get_player(did, n):
"""
get_player returns the player with the associated discord id from the database
or generates a new player if one does not already exist.
"""
pl = Player.select().where(Player.discord_id == did)
if len(pl) == 0:
pl = [Player.create(discord_id=did,name=n)]
ret_pl = pl[0]
# update name if changed
if ret_pl.name != n:
ret_pl.name = n
ret_pl.save()
return ret_pl
# TODO maybe invite the possibilty to vary the percentages based on how much they workout within a time frame
def increase_health(did, n):
"""
increase_health simulates a workout for the player and may or may not increase
the players strength.
"""
pl = get_player(did, n)
if pl.strength > 70:
return f"You are already at the max strength of {pl.strength}."
success = r.uniform(0,1)
prev_strength = pl.strength
# we use 80.0 to allow a reasonable percentage chance as you get closer to 70
if success <= (pl.strength/80.0):
return f"You didn't beat your PR this time! Strength: {prev_strength} -> {pl.strength}"
wild_card = r.uniform(0,1)
message = ""
if wild_card >= 0.95:
pl.strength = min(70, pl.strength + 2)
message = f"Workout went better than normal! Strength: {prev_strength} -> {pl.strength}"
elif wild_card <= 0.05:
pl.strength = max(1, pl.strength - 1)
message = f"You injured yourself during the workout! Strength: {prev_strength} -> {pl.strength}"
else:
pl.strength = pl.strength + 1
message = f"Workout went well! Strength: {prev_strength} -> {pl.strength}"
pl.last_workout = datetime.datetime.now()
pl.save()
return message
def player_summary(did, n):
"""
player_summary generates a stat summary of the given player.
"""
pl = get_player(did, n)
last_w = pl.last_workout.strftime("%A, %d. %B %Y %I:%M%p")
message = f"Rumble Player: {n}\nStrength: {pl.strength}\nLast Workout: {last_w}"
return message
if __name__ == '__main__':
with db:
db.create_tables([Player])
| en | 0.930598 | Player defines out the attributes that are needed for a rumble get_player returns the player with the associated discord id from the database or generates a new player if one does not already exist. # update name if changed # TODO maybe invite the possibilty to vary the percentages based on how much they workout within a time frame increase_health simulates a workout for the player and may or may not increase the players strength. # we use 80.0 to allow a reasonable percentage chance as you get closer to 70 player_summary generates a stat summary of the given player. | 3.079519 | 3 |
CH10/get_format_name.py | yancqS/Python-tourial | 0 | 6619903 | <reponame>yancqS/Python-tourial
def get_format_name(first, last):
full_name = f"{first} {last}"
return full_name.title()
class AnonymousSurvey:
def __init__(self, question):
self.question = question
self.response = []
def show_question(self):
print(self.question)
def store_response(self, new_response):
self.response.append(new_response)
def show_result(self):
print("Survey result:")
for response in self.response:
print(f"- {response}")
| def get_format_name(first, last):
full_name = f"{first} {last}"
return full_name.title()
class AnonymousSurvey:
def __init__(self, question):
self.question = question
self.response = []
def show_question(self):
print(self.question)
def store_response(self, new_response):
self.response.append(new_response)
def show_result(self):
print("Survey result:")
for response in self.response:
print(f"- {response}") | none | 1 | 3.236256 | 3 | |
scripts/train_models.py | hopsparser/npdependency | 0 | 6619904 | <filename>scripts/train_models.py
from ast import literal_eval
import enum
from io import StringIO
import itertools
import json
import logging
import multiprocessing
import os.path
import pathlib
import shutil
import sys
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
import click
import pandas as pd
from loguru import logger
from rich import box
from rich.console import Console
from rich.progress import MofNCompleteColumn, Progress, TimeElapsedColumn, TaskID
from rich.table import Table
import transformers
import yaml
from hopsparser import parser
from hopsparser import utils
from hopsparser import conll2018_eval as evaluator
class Messages(enum.Enum):
CLOSE = enum.auto()
EPOCH_END = enum.auto()
LOG = enum.auto()
RUN_DONE = enum.auto()
RUN_START = enum.auto()
class TrainResults(NamedTuple):
dev_upos: float
dev_las: float
test_upos: float
test_las: float
def train_single_model(
additional_args: Dict[str, str],
config_file: pathlib.Path,
device: str,
dev_file: pathlib.Path,
log_epoch: Callable[[str, Dict[str, str]], Any],
output_dir: pathlib.Path,
test_file: pathlib.Path,
train_file: pathlib.Path,
) -> TrainResults:
output_dir.mkdir(exist_ok=True, parents=True)
log_handler = logger.add(
output_dir / "train.log",
level="DEBUG",
format=(
"[hops]" " {time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} |" " {message}"
),
colorize=False,
)
model_path = output_dir / "model"
shutil.copy(config_file, output_dir / config_file.name)
parser.train(
config_file=config_file,
dev_file=dev_file,
device=device,
train_file=train_file,
model_path=model_path,
**{k: literal_eval(v) for k, v in additional_args.items()},
log_epoch=log_epoch,
)
metrics_table = Table(box=box.HORIZONTALS)
metrics_table.add_column("Split")
metrics = ("UPOS", "UAS", "LAS")
for m in metrics:
metrics_table.add_column(m, justify="center")
if dev_file is not None:
parsed_devset_path = output_dir / f"{dev_file.stem}.parsed.conllu"
parser.parse(model_path, dev_file, parsed_devset_path, device=device)
gold_devset = evaluator.load_conllu_file(dev_file)
syst_devset = evaluator.load_conllu_file(parsed_devset_path)
dev_metrics = evaluator.evaluate(gold_devset, syst_devset)
metrics_table.add_row("Dev", *(f"{100*dev_metrics[m].f1:.2f}" for m in metrics))
if test_file is not None:
parsed_testset_path = output_dir / f"{test_file.stem}.parsed.conllu"
parser.parse(model_path, test_file, parsed_testset_path, device=device)
gold_testset = evaluator.load_conllu_file(test_file)
syst_testset = evaluator.load_conllu_file(parsed_testset_path)
test_metrics = evaluator.evaluate(gold_testset, syst_testset)
metrics_table.add_row(
"Test", *(f"{100*test_metrics[m].f1:.2f}" for m in metrics)
)
if metrics_table.rows:
out = Console(file=StringIO())
out.print(metrics_table)
logger.info(f"\n{out.file.getvalue()}")
logger.remove(log_handler)
return TrainResults(
dev_upos=dev_metrics["UPOS"].f1,
dev_las=dev_metrics["LAS"].f1,
test_upos=test_metrics["UPOS"].f1,
test_las=test_metrics["LAS"].f1,
)
def worker(device_queue, monitor_queue, name, kwargs) -> Tuple[str, TrainResults]:
# We use no more workers than devices so the queue should never be empty when launching the
# worker fun so we want to fail early here if the Queue is empty. It does not feel right but it
# works.
device = device_queue.get(block=False)
log_handle = setup_logging(
lambda m: monitor_queue.put((Messages.LOG, m)), rich_fmt=True
)
kwargs["device"] = device
logger.info(f"Start training {name} on {device}")
with open(kwargs["config_file"]) as in_stream:
n_epochs = yaml.load(in_stream, Loader=yaml.SafeLoader)["epochs"]
monitor_queue.put((Messages.RUN_START, (name, n_epochs)))
def log_epoch(*args, **kwargs):
utils.log_epoch(*args, **kwargs)
monitor_queue.put((Messages.EPOCH_END, name))
res = train_single_model(**kwargs, log_epoch=log_epoch)
device_queue.put(device)
# logger.info(f"Run {name} finished with results {res}")
monitor_queue.put((Messages.RUN_DONE, name))
logger.remove(log_handle)
return (name, res)
def run_multi(
runs: Sequence[Tuple[str, Dict[str, Any]]],
devices: List[str],
) -> List[Tuple[str, TrainResults]]:
with multiprocessing.Manager() as manager:
device_queue = manager.Queue()
for d in devices:
device_queue.put(d)
monitor_queue = manager.Queue()
monitor = multiprocessing.Process(
target=monitor_process,
kwargs={
"num_runs": len(runs),
"queue": monitor_queue,
},
)
monitor.start()
with multiprocessing.Pool(len(devices)) as pool:
res_future = pool.starmap_async(
worker,
((device_queue, monitor_queue, *r) for r in runs),
)
res = res_future.get()
monitor_queue.put((Messages.CLOSE, None))
monitor.join()
monitor.close()
return res
def monitor_process(num_runs: int, queue: multiprocessing.Queue):
with Progress(
*Progress.get_default_columns(),
MofNCompleteColumn(),
TimeElapsedColumn(),
utils.SpeedColumn(),
refresh_per_second=1.0,
speed_estimate_period=1800,
) as progress:
setup_logging(lambda m: progress.console.print(m, end=""), rich_fmt=True)
train_task = progress.add_task("Training", total=num_runs)
ongoing: Dict[str, TaskID] = dict()
while True:
try:
msg_type, msg = queue.get()
except EOFError:
break
if msg_type is Messages.CLOSE:
break
elif msg_type is Messages.EPOCH_END:
progress.advance(ongoing[msg])
elif msg_type is Messages.LOG:
logger.log(msg.record["level"].name, msg.record["message"])
elif msg_type is Messages.RUN_DONE:
progress.advance(train_task)
progress.remove_task(ongoing[msg])
ongoing.pop(msg)
elif msg_type is Messages.RUN_START:
ongoing[msg[0]] = progress.add_task(msg[0], total=msg[1])
else:
raise ValueError("Unknown message")
logger.complete()
def parse_args_callback(
_ctx: click.Context,
_opt: Union[click.Parameter, click.Option],
val: Optional[List[str]],
) -> Optional[List[Tuple[str, List[str]]]]:
if val is None:
return None
res: List[Tuple[str, List[str]]] = []
for v in val:
name, values = v.split("=", maxsplit=1)
res.append((name, values.split(",")))
return res
class InterceptHandler(logging.Handler):
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def setup_logging(sink=sys.stderr, rich_fmt: bool = False):
appname = "\\[hops_trainer]" if rich_fmt else "[hops_trainer]"
log_level = "INFO"
log_fmt = (
f"{appname}"
" <green>{time:YYYY-MM-DD}T{time:HH:mm:ss}</green> {level}: "
" <level>{message}</level>"
)
# FIXME: I hate this but it's the easiest way
if rich_fmt:
log_fmt = log_fmt.replace("<", "[").replace(">", "]")
# Deal with stdlib.logging
transformers.utils.logging.disable_default_handler()
transformers.utils.logging.disable_progress_bar()
# FIXME: I found no easy public way to avoid reaching to the private interaface here
# Avoid adding the intercepter multiple times
if not any(
isinstance(handler, InterceptHandler)
for handler in transformers.utils.logging._get_library_root_logger().handlers
):
transformers.utils.logging.add_handler(InterceptHandler())
return logger.add(
sink,
colorize=True,
enqueue=True,
format=log_fmt,
level=log_level,
)
@click.command()
@click.argument(
"configs_dir",
type=click.Path(
resolve_path=True, exists=True, file_okay=False, path_type=pathlib.Path
),
)
@click.argument(
"treebanks_dir",
type=click.Path(
resolve_path=True, exists=True, file_okay=False, path_type=pathlib.Path
),
)
@click.option(
"--args",
multiple=True,
callback=parse_args_callback,
help=(
"An additional list of values for an argument, given as `name=value,value2,…`."
" Leave a trailing comma to also run the default value of the argument"
" Can be provided several times for different arguments."
" Path options should have different file names."
),
)
@click.option(
"--devices",
default="cpu",
callback=(lambda _ctx, _opt, val: val.split(",")),
help="A comma-separated list of devices to run on.",
)
@click.option(
"--out-dir",
default=".",
type=click.Path(
resolve_path=True, exists=False, file_okay=False, path_type=pathlib.Path
),
)
@click.option("--prefix", default="", help="A custom prefix to prepend to run names.")
@click.option(
"--rand-seeds",
callback=(
lambda _ctx, _opt, val: None
if val is None
else [int(v) for v in val.split(",") if v]
),
help=(
"A comma-separated list of random seeds to try and run stats on."
" Only the seed with the best result will be kept for every running config."
),
)
def main(
args: Optional[List[Tuple[str, List[str]]]],
configs_dir: pathlib.Path,
devices: List[str],
out_dir: pathlib.Path,
prefix: str,
rand_seeds: Optional[List[int]],
treebanks_dir: pathlib.Path,
):
logger.remove(0)
logging_handler = setup_logging()
out_dir.mkdir(parents=True, exist_ok=True)
treebanks = [train.parent for train in treebanks_dir.glob("**/*train.conllu")]
logger.info(f"Training on {len(treebanks)} treebanks.")
configs = list(configs_dir.glob("*.yaml"))
logger.info(f"Training using {len(configs)} configs.")
if rand_seeds is not None:
args = [
("rand_seed", [str(s) for s in rand_seeds]),
*(args if args is not None else []),
]
logger.info(f"Training with {len(rand_seeds)} rand seeds.")
additional_args_combinations: List[Dict[str, str]]
if args:
args_names, all_args_values = map(list, zip(*args))
additional_args_combinations = [
dict(zip(args_names, args_values))
for args_values in itertools.product(*all_args_values)
]
else:
args_names = []
additional_args_combinations = [{}]
runs: List[Tuple[str, Dict[str, Any]]] = []
runs_dict: Dict[str, Dict] = dict()
skipped_res: List[Tuple[str, TrainResults]] = []
for t in treebanks:
for c in configs:
train_file = next(t.glob("*train.conllu"))
dev_file = next(t.glob("*dev.conllu"))
test_file = next(t.glob("*test.conllu"))
common_params = {
"train_file": train_file,
"dev_file": dev_file,
"test_file": test_file,
"config_file": c,
}
run_base_name = f"{prefix}{t.name}-{c.stem}"
run_out_root_dir = out_dir / run_base_name
for additional_args in additional_args_combinations:
if not additional_args:
run_out_dir = run_out_root_dir
run_name = run_base_name
else:
args_combination_str = "+".join(
f"{n}={os.path.basename(v)}" if v else f"no{n}"
for n, v in additional_args.items()
)
run_out_dir = run_out_root_dir / args_combination_str
run_name = f"{run_base_name}+{args_combination_str}"
run_args = {
**common_params,
"additional_args": additional_args,
"output_dir": run_out_dir,
}
runs_dict[run_name] = run_args
if run_out_dir.exists():
parsed_dev = run_out_dir / f"{dev_file.stem}.parsed.conllu"
parsed_test = run_out_dir / f"{test_file.stem}.parsed.conllu"
if parsed_dev.exists() and parsed_test.exists():
try:
gold_devset = evaluator.load_conllu_file(dev_file)
syst_devset = evaluator.load_conllu_file(parsed_dev)
dev_metrics = evaluator.evaluate(gold_devset, syst_devset)
except evaluator.UDError as e:
raise ValueError(
f"Corrupted parsed dev file for {run_out_dir}"
) from e
try:
gold_testset = evaluator.load_conllu_file(test_file)
syst_testset = evaluator.load_conllu_file(parsed_test)
test_metrics = evaluator.evaluate(
gold_testset, syst_testset
)
except evaluator.UDError as e:
raise ValueError(
f"Corrupted parsed test file for {run_out_dir}"
) from e
skip_res = TrainResults(
dev_upos=dev_metrics["UPOS"].f1,
dev_las=dev_metrics["LAS"].f1,
test_upos=test_metrics["UPOS"].f1,
test_las=test_metrics["LAS"].f1,
)
skipped_res.append((run_name, skip_res))
logger.info(
f"{run_out_dir} already exists, skipping run {run_name}. Results were {skip_res}"
)
continue
else:
logger.warning(
f"Incomplete run in {run_out_dir}, skipping it. You will probably want to delete it and rerun."
)
continue
runs.append((run_name, run_args))
logger.info(f"Starting {len(runs)} runs.")
logger.remove(logging_handler)
res = run_multi(runs, devices)
setup_logging()
logger.info("Done with training")
res.extend(skipped_res)
report_file = out_dir / "full_report.json"
if report_file.exists():
with open(report_file) as in_stream:
report_dict = json.load(in_stream)
else:
report_dict = dict()
for name, scores in res:
run = runs_dict[name]
report_dict[name] = {
"additional_args": run["additional_args"],
"config": str(run["config_file"]),
"output_dir": str(run["output_dir"]),
"results": scores._asdict(),
"treebank": run["train_file"].parent.name,
}
with open(report_file, "w") as out_stream:
json.dump(report_dict, out_stream)
summary_file = out_dir / "summary.tsv"
if rand_seeds is None:
with open(summary_file, "w") as out_stream:
summary_file.write_text("run\tdev UPOS\tdev LAS\ttest UPOS\ttest LAS\n")
for name, report in report_dict.items():
out_stream.write(name)
for s in ("dev_upos", "dev_las", "test_upos", "test_las"):
out_stream.write(f"\t{100*report['results'][s]:.2f}")
out_stream.write("\n")
else:
df_dict = {
run_name: {
**{
k: v
for k, v in run_report.items()
if k not in ("additional_args", "results")
},
**run_report["additional_args"],
**run_report["results"],
}
for run_name, run_report in report_dict.items()
}
df = pd.DataFrame.from_dict(df_dict, orient="index")
df.to_csv(out_dir / "full_report.csv")
grouped = df.groupby(
["config", "treebank", *(a for a in args_names if a != "rand_seed")],
)
grouped[["dev_upos", "dev_las", "test_upos", "test_las"]].describe().to_csv(
summary_file
)
best_dir = out_dir / "best"
best_dir.mkdir(exist_ok=True, parents=True)
with open(best_dir / "models.md", "w") as out_stream:
out_stream.write(
"| Model name | UPOS (dev) | LAS (dev) | UPOS (test) | LAS (test) | Download |\n"
"|:-----------|:----------:|:---------:|:-----------:|:----------:|:--------:|\n"
)
for run_name, report in sorted(
df.loc[grouped["dev_las"].idxmax()].iterrows()
):
shutil.copytree(
report["output_dir"], best_dir / run_name, dirs_exist_ok=True
)
model_name = run_name.split("+", maxsplit=1)[0]
out_stream.write("| ")
out_stream.write(
" | ".join(
[
model_name,
*(
f"{100*report[v]:.2f}"
for v in [
"dev_upos",
"dev_las",
"test_upos",
"test_las",
]
),
]
)
)
out_stream.write(f" | [link][{model_name}] |\n")
if __name__ == "__main__":
main()
| <filename>scripts/train_models.py
from ast import literal_eval
import enum
from io import StringIO
import itertools
import json
import logging
import multiprocessing
import os.path
import pathlib
import shutil
import sys
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
import click
import pandas as pd
from loguru import logger
from rich import box
from rich.console import Console
from rich.progress import MofNCompleteColumn, Progress, TimeElapsedColumn, TaskID
from rich.table import Table
import transformers
import yaml
from hopsparser import parser
from hopsparser import utils
from hopsparser import conll2018_eval as evaluator
class Messages(enum.Enum):
CLOSE = enum.auto()
EPOCH_END = enum.auto()
LOG = enum.auto()
RUN_DONE = enum.auto()
RUN_START = enum.auto()
class TrainResults(NamedTuple):
dev_upos: float
dev_las: float
test_upos: float
test_las: float
def train_single_model(
additional_args: Dict[str, str],
config_file: pathlib.Path,
device: str,
dev_file: pathlib.Path,
log_epoch: Callable[[str, Dict[str, str]], Any],
output_dir: pathlib.Path,
test_file: pathlib.Path,
train_file: pathlib.Path,
) -> TrainResults:
output_dir.mkdir(exist_ok=True, parents=True)
log_handler = logger.add(
output_dir / "train.log",
level="DEBUG",
format=(
"[hops]" " {time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} |" " {message}"
),
colorize=False,
)
model_path = output_dir / "model"
shutil.copy(config_file, output_dir / config_file.name)
parser.train(
config_file=config_file,
dev_file=dev_file,
device=device,
train_file=train_file,
model_path=model_path,
**{k: literal_eval(v) for k, v in additional_args.items()},
log_epoch=log_epoch,
)
metrics_table = Table(box=box.HORIZONTALS)
metrics_table.add_column("Split")
metrics = ("UPOS", "UAS", "LAS")
for m in metrics:
metrics_table.add_column(m, justify="center")
if dev_file is not None:
parsed_devset_path = output_dir / f"{dev_file.stem}.parsed.conllu"
parser.parse(model_path, dev_file, parsed_devset_path, device=device)
gold_devset = evaluator.load_conllu_file(dev_file)
syst_devset = evaluator.load_conllu_file(parsed_devset_path)
dev_metrics = evaluator.evaluate(gold_devset, syst_devset)
metrics_table.add_row("Dev", *(f"{100*dev_metrics[m].f1:.2f}" for m in metrics))
if test_file is not None:
parsed_testset_path = output_dir / f"{test_file.stem}.parsed.conllu"
parser.parse(model_path, test_file, parsed_testset_path, device=device)
gold_testset = evaluator.load_conllu_file(test_file)
syst_testset = evaluator.load_conllu_file(parsed_testset_path)
test_metrics = evaluator.evaluate(gold_testset, syst_testset)
metrics_table.add_row(
"Test", *(f"{100*test_metrics[m].f1:.2f}" for m in metrics)
)
if metrics_table.rows:
out = Console(file=StringIO())
out.print(metrics_table)
logger.info(f"\n{out.file.getvalue()}")
logger.remove(log_handler)
return TrainResults(
dev_upos=dev_metrics["UPOS"].f1,
dev_las=dev_metrics["LAS"].f1,
test_upos=test_metrics["UPOS"].f1,
test_las=test_metrics["LAS"].f1,
)
def worker(device_queue, monitor_queue, name, kwargs) -> Tuple[str, TrainResults]:
# We use no more workers than devices so the queue should never be empty when launching the
# worker fun so we want to fail early here if the Queue is empty. It does not feel right but it
# works.
device = device_queue.get(block=False)
log_handle = setup_logging(
lambda m: monitor_queue.put((Messages.LOG, m)), rich_fmt=True
)
kwargs["device"] = device
logger.info(f"Start training {name} on {device}")
with open(kwargs["config_file"]) as in_stream:
n_epochs = yaml.load(in_stream, Loader=yaml.SafeLoader)["epochs"]
monitor_queue.put((Messages.RUN_START, (name, n_epochs)))
def log_epoch(*args, **kwargs):
utils.log_epoch(*args, **kwargs)
monitor_queue.put((Messages.EPOCH_END, name))
res = train_single_model(**kwargs, log_epoch=log_epoch)
device_queue.put(device)
# logger.info(f"Run {name} finished with results {res}")
monitor_queue.put((Messages.RUN_DONE, name))
logger.remove(log_handle)
return (name, res)
def run_multi(
runs: Sequence[Tuple[str, Dict[str, Any]]],
devices: List[str],
) -> List[Tuple[str, TrainResults]]:
with multiprocessing.Manager() as manager:
device_queue = manager.Queue()
for d in devices:
device_queue.put(d)
monitor_queue = manager.Queue()
monitor = multiprocessing.Process(
target=monitor_process,
kwargs={
"num_runs": len(runs),
"queue": monitor_queue,
},
)
monitor.start()
with multiprocessing.Pool(len(devices)) as pool:
res_future = pool.starmap_async(
worker,
((device_queue, monitor_queue, *r) for r in runs),
)
res = res_future.get()
monitor_queue.put((Messages.CLOSE, None))
monitor.join()
monitor.close()
return res
def monitor_process(num_runs: int, queue: multiprocessing.Queue):
with Progress(
*Progress.get_default_columns(),
MofNCompleteColumn(),
TimeElapsedColumn(),
utils.SpeedColumn(),
refresh_per_second=1.0,
speed_estimate_period=1800,
) as progress:
setup_logging(lambda m: progress.console.print(m, end=""), rich_fmt=True)
train_task = progress.add_task("Training", total=num_runs)
ongoing: Dict[str, TaskID] = dict()
while True:
try:
msg_type, msg = queue.get()
except EOFError:
break
if msg_type is Messages.CLOSE:
break
elif msg_type is Messages.EPOCH_END:
progress.advance(ongoing[msg])
elif msg_type is Messages.LOG:
logger.log(msg.record["level"].name, msg.record["message"])
elif msg_type is Messages.RUN_DONE:
progress.advance(train_task)
progress.remove_task(ongoing[msg])
ongoing.pop(msg)
elif msg_type is Messages.RUN_START:
ongoing[msg[0]] = progress.add_task(msg[0], total=msg[1])
else:
raise ValueError("Unknown message")
logger.complete()
def parse_args_callback(
_ctx: click.Context,
_opt: Union[click.Parameter, click.Option],
val: Optional[List[str]],
) -> Optional[List[Tuple[str, List[str]]]]:
if val is None:
return None
res: List[Tuple[str, List[str]]] = []
for v in val:
name, values = v.split("=", maxsplit=1)
res.append((name, values.split(",")))
return res
class InterceptHandler(logging.Handler):
def emit(self, record):
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
def setup_logging(sink=sys.stderr, rich_fmt: bool = False):
appname = "\\[hops_trainer]" if rich_fmt else "[hops_trainer]"
log_level = "INFO"
log_fmt = (
f"{appname}"
" <green>{time:YYYY-MM-DD}T{time:HH:mm:ss}</green> {level}: "
" <level>{message}</level>"
)
# FIXME: I hate this but it's the easiest way
if rich_fmt:
log_fmt = log_fmt.replace("<", "[").replace(">", "]")
# Deal with stdlib.logging
transformers.utils.logging.disable_default_handler()
transformers.utils.logging.disable_progress_bar()
# FIXME: I found no easy public way to avoid reaching to the private interaface here
# Avoid adding the intercepter multiple times
if not any(
isinstance(handler, InterceptHandler)
for handler in transformers.utils.logging._get_library_root_logger().handlers
):
transformers.utils.logging.add_handler(InterceptHandler())
return logger.add(
sink,
colorize=True,
enqueue=True,
format=log_fmt,
level=log_level,
)
@click.command()
@click.argument(
"configs_dir",
type=click.Path(
resolve_path=True, exists=True, file_okay=False, path_type=pathlib.Path
),
)
@click.argument(
"treebanks_dir",
type=click.Path(
resolve_path=True, exists=True, file_okay=False, path_type=pathlib.Path
),
)
@click.option(
"--args",
multiple=True,
callback=parse_args_callback,
help=(
"An additional list of values for an argument, given as `name=value,value2,…`."
" Leave a trailing comma to also run the default value of the argument"
" Can be provided several times for different arguments."
" Path options should have different file names."
),
)
@click.option(
"--devices",
default="cpu",
callback=(lambda _ctx, _opt, val: val.split(",")),
help="A comma-separated list of devices to run on.",
)
@click.option(
"--out-dir",
default=".",
type=click.Path(
resolve_path=True, exists=False, file_okay=False, path_type=pathlib.Path
),
)
@click.option("--prefix", default="", help="A custom prefix to prepend to run names.")
@click.option(
"--rand-seeds",
callback=(
lambda _ctx, _opt, val: None
if val is None
else [int(v) for v in val.split(",") if v]
),
help=(
"A comma-separated list of random seeds to try and run stats on."
" Only the seed with the best result will be kept for every running config."
),
)
def main(
args: Optional[List[Tuple[str, List[str]]]],
configs_dir: pathlib.Path,
devices: List[str],
out_dir: pathlib.Path,
prefix: str,
rand_seeds: Optional[List[int]],
treebanks_dir: pathlib.Path,
):
logger.remove(0)
logging_handler = setup_logging()
out_dir.mkdir(parents=True, exist_ok=True)
treebanks = [train.parent for train in treebanks_dir.glob("**/*train.conllu")]
logger.info(f"Training on {len(treebanks)} treebanks.")
configs = list(configs_dir.glob("*.yaml"))
logger.info(f"Training using {len(configs)} configs.")
if rand_seeds is not None:
args = [
("rand_seed", [str(s) for s in rand_seeds]),
*(args if args is not None else []),
]
logger.info(f"Training with {len(rand_seeds)} rand seeds.")
additional_args_combinations: List[Dict[str, str]]
if args:
args_names, all_args_values = map(list, zip(*args))
additional_args_combinations = [
dict(zip(args_names, args_values))
for args_values in itertools.product(*all_args_values)
]
else:
args_names = []
additional_args_combinations = [{}]
runs: List[Tuple[str, Dict[str, Any]]] = []
runs_dict: Dict[str, Dict] = dict()
skipped_res: List[Tuple[str, TrainResults]] = []
for t in treebanks:
for c in configs:
train_file = next(t.glob("*train.conllu"))
dev_file = next(t.glob("*dev.conllu"))
test_file = next(t.glob("*test.conllu"))
common_params = {
"train_file": train_file,
"dev_file": dev_file,
"test_file": test_file,
"config_file": c,
}
run_base_name = f"{prefix}{t.name}-{c.stem}"
run_out_root_dir = out_dir / run_base_name
for additional_args in additional_args_combinations:
if not additional_args:
run_out_dir = run_out_root_dir
run_name = run_base_name
else:
args_combination_str = "+".join(
f"{n}={os.path.basename(v)}" if v else f"no{n}"
for n, v in additional_args.items()
)
run_out_dir = run_out_root_dir / args_combination_str
run_name = f"{run_base_name}+{args_combination_str}"
run_args = {
**common_params,
"additional_args": additional_args,
"output_dir": run_out_dir,
}
runs_dict[run_name] = run_args
if run_out_dir.exists():
parsed_dev = run_out_dir / f"{dev_file.stem}.parsed.conllu"
parsed_test = run_out_dir / f"{test_file.stem}.parsed.conllu"
if parsed_dev.exists() and parsed_test.exists():
try:
gold_devset = evaluator.load_conllu_file(dev_file)
syst_devset = evaluator.load_conllu_file(parsed_dev)
dev_metrics = evaluator.evaluate(gold_devset, syst_devset)
except evaluator.UDError as e:
raise ValueError(
f"Corrupted parsed dev file for {run_out_dir}"
) from e
try:
gold_testset = evaluator.load_conllu_file(test_file)
syst_testset = evaluator.load_conllu_file(parsed_test)
test_metrics = evaluator.evaluate(
gold_testset, syst_testset
)
except evaluator.UDError as e:
raise ValueError(
f"Corrupted parsed test file for {run_out_dir}"
) from e
skip_res = TrainResults(
dev_upos=dev_metrics["UPOS"].f1,
dev_las=dev_metrics["LAS"].f1,
test_upos=test_metrics["UPOS"].f1,
test_las=test_metrics["LAS"].f1,
)
skipped_res.append((run_name, skip_res))
logger.info(
f"{run_out_dir} already exists, skipping run {run_name}. Results were {skip_res}"
)
continue
else:
logger.warning(
f"Incomplete run in {run_out_dir}, skipping it. You will probably want to delete it and rerun."
)
continue
runs.append((run_name, run_args))
logger.info(f"Starting {len(runs)} runs.")
logger.remove(logging_handler)
res = run_multi(runs, devices)
setup_logging()
logger.info("Done with training")
res.extend(skipped_res)
report_file = out_dir / "full_report.json"
if report_file.exists():
with open(report_file) as in_stream:
report_dict = json.load(in_stream)
else:
report_dict = dict()
for name, scores in res:
run = runs_dict[name]
report_dict[name] = {
"additional_args": run["additional_args"],
"config": str(run["config_file"]),
"output_dir": str(run["output_dir"]),
"results": scores._asdict(),
"treebank": run["train_file"].parent.name,
}
with open(report_file, "w") as out_stream:
json.dump(report_dict, out_stream)
summary_file = out_dir / "summary.tsv"
if rand_seeds is None:
with open(summary_file, "w") as out_stream:
summary_file.write_text("run\tdev UPOS\tdev LAS\ttest UPOS\ttest LAS\n")
for name, report in report_dict.items():
out_stream.write(name)
for s in ("dev_upos", "dev_las", "test_upos", "test_las"):
out_stream.write(f"\t{100*report['results'][s]:.2f}")
out_stream.write("\n")
else:
df_dict = {
run_name: {
**{
k: v
for k, v in run_report.items()
if k not in ("additional_args", "results")
},
**run_report["additional_args"],
**run_report["results"],
}
for run_name, run_report in report_dict.items()
}
df = pd.DataFrame.from_dict(df_dict, orient="index")
df.to_csv(out_dir / "full_report.csv")
grouped = df.groupby(
["config", "treebank", *(a for a in args_names if a != "rand_seed")],
)
grouped[["dev_upos", "dev_las", "test_upos", "test_las"]].describe().to_csv(
summary_file
)
best_dir = out_dir / "best"
best_dir.mkdir(exist_ok=True, parents=True)
with open(best_dir / "models.md", "w") as out_stream:
out_stream.write(
"| Model name | UPOS (dev) | LAS (dev) | UPOS (test) | LAS (test) | Download |\n"
"|:-----------|:----------:|:---------:|:-----------:|:----------:|:--------:|\n"
)
for run_name, report in sorted(
df.loc[grouped["dev_las"].idxmax()].iterrows()
):
shutil.copytree(
report["output_dir"], best_dir / run_name, dirs_exist_ok=True
)
model_name = run_name.split("+", maxsplit=1)[0]
out_stream.write("| ")
out_stream.write(
" | ".join(
[
model_name,
*(
f"{100*report[v]:.2f}"
for v in [
"dev_upos",
"dev_las",
"test_upos",
"test_las",
]
),
]
)
)
out_stream.write(f" | [link][{model_name}] |\n")
if __name__ == "__main__":
main()
| en | 0.840602 | # We use no more workers than devices so the queue should never be empty when launching the # worker fun so we want to fail early here if the Queue is empty. It does not feel right but it # works. # logger.info(f"Run {name} finished with results {res}") # Get corresponding Loguru level if it exists # Find caller from where originated the logged message # FIXME: I hate this but it's the easiest way # Deal with stdlib.logging # FIXME: I found no easy public way to avoid reaching to the private interaface here # Avoid adding the intercepter multiple times | 1.983945 | 2 |
book_figures/chapter1/fig_mercator.py | aragilar/astroML | 3 | 6619905 | """
Mercator Projection
-------------------
Figure1.13.
The Mercator projection. Shown are the projections of circles of constant
radius 10 degrees across the sky. Note that the area is not preserved by the
Mercator projection: the projection increases the size of finite regions on
the sphere, with a magnitude which increases at high latitudes.
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.plotting import plot_tissot_ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# generate a latitude/longitude grid
circ_long = np.linspace(-np.pi, np.pi, 13)[1:-1]
circ_lat = np.linspace(-np.pi / 2, np.pi / 2, 7)[1:-1]
radius = 10 * np.pi / 180.
#------------------------------------------------------------
# plot Mercator projection: we need to set this up manually
def mercator_axes():
ax = plt.axes(aspect=1.0)
ax.set_xticks(np.pi / 6 * np.linspace(-5, 5, 11))
ax.set_yticks(np.pi / 12 * np.linspace(-5, 5, 11))
for axy in (ax.xaxis, ax.yaxis):
axy.set_major_formatter(plt.FuncFormatter(lambda s, a: r'$%i^\circ$'
% np.round(s * 180 / np.pi)))
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
return ax
plt.figure(figsize=(5, 3.75))
ax = mercator_axes()
ax.grid(True)
plot_tissot_ellipse(circ_long[:, None], circ_lat, radius,
ax=ax, fc='k', alpha=0.3, lw=0)
ax.set_title('Mercator projection')
plt.show()
| """
Mercator Projection
-------------------
Figure1.13.
The Mercator projection. Shown are the projections of circles of constant
radius 10 degrees across the sky. Note that the area is not preserved by the
Mercator projection: the projection increases the size of finite regions on
the sphere, with a magnitude which increases at high latitudes.
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.plotting import plot_tissot_ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# generate a latitude/longitude grid
circ_long = np.linspace(-np.pi, np.pi, 13)[1:-1]
circ_lat = np.linspace(-np.pi / 2, np.pi / 2, 7)[1:-1]
radius = 10 * np.pi / 180.
#------------------------------------------------------------
# plot Mercator projection: we need to set this up manually
def mercator_axes():
ax = plt.axes(aspect=1.0)
ax.set_xticks(np.pi / 6 * np.linspace(-5, 5, 11))
ax.set_yticks(np.pi / 12 * np.linspace(-5, 5, 11))
for axy in (ax.xaxis, ax.yaxis):
axy.set_major_formatter(plt.FuncFormatter(lambda s, a: r'$%i^\circ$'
% np.round(s * 180 / np.pi)))
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
return ax
plt.figure(figsize=(5, 3.75))
ax = mercator_axes()
ax.grid(True)
plot_tissot_ellipse(circ_long[:, None], circ_lat, radius,
ax=ax, fc='k', alpha=0.3, lw=0)
ax.set_title('Mercator projection')
plt.show()
| en | 0.725389 | Mercator Projection ------------------- Figure1.13. The Mercator projection. Shown are the projections of circles of constant radius 10 degrees across the sky. Note that the area is not preserved by the Mercator projection: the projection increases the size of finite regions on the sphere, with a magnitude which increases at high latitudes. # Author: <NAME> # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. #------------------------------------------------------------ # generate a latitude/longitude grid #------------------------------------------------------------ # plot Mercator projection: we need to set this up manually | 3.608394 | 4 |
com/Leetcode/895.MaximumFrequencyStack.py | samkitsheth95/InterviewPrep | 0 | 6619906 | <gh_stars>0
from collections import defaultdict
class FreqStack:
def __init__(self):
self.freqCount = defaultdict(int)
self.countstacks = defaultdict(list)
self.maxfreq = 0
def push(self, x: int) -> None:
self.freqCount[x] += 1
self.countstacks[self.freqCount[x]].append(x)
if self.freqCount[x] > self.maxfreq:
self.maxfreq += 1
def pop(self) -> int:
removed = self.countstacks[self.maxfreq].pop()
self.freqCount[removed] -= 1
if not len(self.countstacks[self.maxfreq]):
self.maxfreq -= 1
return removed
operations = ["push", "push", "push", "push",
"push", "push", "pop", "pop", "pop", "pop"]
values = [[5], [7], [5], [7], [4], [5], [], [], [], []]
freqStack = FreqStack()
for i in range(len(operations)):
if operations[i] == "push":
freqStack.push(values[i][0])
else:
print(freqStack.pop())
| from collections import defaultdict
class FreqStack:
def __init__(self):
self.freqCount = defaultdict(int)
self.countstacks = defaultdict(list)
self.maxfreq = 0
def push(self, x: int) -> None:
self.freqCount[x] += 1
self.countstacks[self.freqCount[x]].append(x)
if self.freqCount[x] > self.maxfreq:
self.maxfreq += 1
def pop(self) -> int:
removed = self.countstacks[self.maxfreq].pop()
self.freqCount[removed] -= 1
if not len(self.countstacks[self.maxfreq]):
self.maxfreq -= 1
return removed
operations = ["push", "push", "push", "push",
"push", "push", "pop", "pop", "pop", "pop"]
values = [[5], [7], [5], [7], [4], [5], [], [], [], []]
freqStack = FreqStack()
for i in range(len(operations)):
if operations[i] == "push":
freqStack.push(values[i][0])
else:
print(freqStack.pop()) | none | 1 | 3.334074 | 3 | |
game.py | RchrdMrtnz/the_hangman | 1 | 6619907 | <gh_stars>1-10
import os
import time
import random
from wordrandom import wordrand
from difficulty import difficulty
from record import leer, record
def game(lifes):
word = wordrand()
yourword= " "
counter = 0
the_hang_text = ["\n________", "|/ |", "| |", "| (_)", "| _|/", "| |", "| /|", "| ", "| ", "|____________"]
while lifes > 0:
try:
lower_case = input(
" \n Enter a letter, if you have the courage: ")[0]
except:
print(
"\n Only letters dont be a don't be a cheater, no numbers or empty spaces")
lower_case = input("Try again \n")
letter = lower_case.lower()
yourword += letter
fails = 0
for letters in word:
if letters in yourword:
print(letters, end="")
else:
print("*", end="")
fails += 1
if fails == 0:
print("\n Congratulations, you're not a fool ... totally")
record()
see_records=input("if you wanna see the famous table press 1")
if see_records=="1":
leer()
break
if (lifes > 0) and (letter not in word):
counter+= 1
for i in range(counter):
if i < 8:
print(the_hang_text[i])
elif i >= 9:
print(the_hang_text[8])
time.sleep(0.3)
if letter not in word:
lifes -= 1
print("\nHow pathetic, you failed \n")
print(f"Do you have {lifes} lifes \n")
if lifes == 0:
time.sleep(1.2)
print(f"Seriously? you could not guess \"{word}\" \n")
time.sleep(1.5)
print("You lost, from now on your neck won't be the same, idiot \n")
time.sleep(1.2)
print("you are dead man")
time.sleep(1.2)
for i in range(0,10):
print(the_hang_text[i])
time.sleep(0.5)
else:
print("\nThanks for wasting my time \n")
time.sleep(0.2)
see_records=input("if you wanna see the famous table press 1 ")
if see_records=="1":
try:
leer()
except:
print("This not easy stupid")
return lifes
if __name__ == "__main__":
lifes=1
game(lifes)
| import os
import time
import random
from wordrandom import wordrand
from difficulty import difficulty
from record import leer, record
def game(lifes):
word = wordrand()
yourword= " "
counter = 0
the_hang_text = ["\n________", "|/ |", "| |", "| (_)", "| _|/", "| |", "| /|", "| ", "| ", "|____________"]
while lifes > 0:
try:
lower_case = input(
" \n Enter a letter, if you have the courage: ")[0]
except:
print(
"\n Only letters dont be a don't be a cheater, no numbers or empty spaces")
lower_case = input("Try again \n")
letter = lower_case.lower()
yourword += letter
fails = 0
for letters in word:
if letters in yourword:
print(letters, end="")
else:
print("*", end="")
fails += 1
if fails == 0:
print("\n Congratulations, you're not a fool ... totally")
record()
see_records=input("if you wanna see the famous table press 1")
if see_records=="1":
leer()
break
if (lifes > 0) and (letter not in word):
counter+= 1
for i in range(counter):
if i < 8:
print(the_hang_text[i])
elif i >= 9:
print(the_hang_text[8])
time.sleep(0.3)
if letter not in word:
lifes -= 1
print("\nHow pathetic, you failed \n")
print(f"Do you have {lifes} lifes \n")
if lifes == 0:
time.sleep(1.2)
print(f"Seriously? you could not guess \"{word}\" \n")
time.sleep(1.5)
print("You lost, from now on your neck won't be the same, idiot \n")
time.sleep(1.2)
print("you are dead man")
time.sleep(1.2)
for i in range(0,10):
print(the_hang_text[i])
time.sleep(0.5)
else:
print("\nThanks for wasting my time \n")
time.sleep(0.2)
see_records=input("if you wanna see the famous table press 1 ")
if see_records=="1":
try:
leer()
except:
print("This not easy stupid")
return lifes
if __name__ == "__main__":
lifes=1
game(lifes) | none | 1 | 3.972996 | 4 | |
ftpserver.py | hootanht/ftpupload | 0 | 6619908 |
import os
import socket
import stat
import sys
import threading
import time
from pathlib import Path
from utils import fileProperty
try:
host = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = socket.gethostname()
port = 21
working_directory = os.getcwd()
ascii_buffer = 1024
binary_buffer = 4194304
class FTPServerProtocol(threading.Thread):
def __init__(self, command_socket, address):
threading.Thread.__init__(self)
self.authenticated = False
self.pasv_mode = False
self.rest = False
self.allow_delete = False
self.working_directory = working_directory
self.base_path = working_directory
self.command_socket = command_socket
self.address = address
self.type = 'A'
self.mode = 'S'
self.file_structure = 'F'
def run(self):
# Handles and executes received user commands
self.connectionSuccess()
while True:
try:
data = self.command_socket.recv(ascii_buffer).rstrip()
try:
client_command = data.decode('utf-8')
except AttributeError:
client_command = data
log('Received data', client_command)
if not client_command:
break
except socket.error as error:
log('Receive', error)
try:
client_command, param = client_command[:4].strip().upper(), client_command[4:].strip() or None
func = getattr(self, client_command)
func(param)
except AttributeError as error:
self.sendResponse('500 Syntax error, command unrecognized. '
'This may include errors such as command line too long.\r\n')
log('Receive', error)
def connectionSuccess(self):
# Provide greeting for accepted user connection
self.sendResponse('220 Service ready for new user.\r\n')
def setupUserFolder(self, username):
# Separare base access path from working directory
path = self.working_directory + '/' + username
try:
os.mkdir(path)
except OSError:
pass
path = path.split('/')
user_path_index = path.index(username)
base_path = path[:user_path_index]
working_path = path[user_path_index:]
self.base_path = '/'.join(base_path)
self.working_directory = '/' + '/'.join(working_path) + '/'
def generatePath(self, base_path='', working_path=''):
print(base_path + working_path)
return base_path + working_path
#=======================================#
## FTP transmission control procedures ##
#=======================================#
def createDataSocket(self):
# Open socket with client for data transmission
log('createDataSocket', 'Opening a data channel')
try:
self.data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.pasv_mode:
self.data_socket, self.address = self.server_socket.accept()
else:
self.data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data_socket.connect((self.data_socket_address, self.data_socket_port))
except socket.error as error:
log('createDataSocket', error)
def terminateDataSocket(self):
# Close data tranmission socket with client
log('terminateDataSocket', 'Closing a data channel')
try:
self.data_socket.close()
if self.pasv_mode:
self.server_socket.close()
except socket.error as error:
log('terminateDataSocket', error)
def sendResponse(self, client_command):
# Transmit request codes and relevant message to client
self.command_socket.send(client_command.encode('utf-8'))
def sendData(self, data):
# Transmit file data to client
if self.type == 'I':
self.data_socket.send(data)
else:
self.data_socket.send(data.encode('utf-8'))
#===============================================#
## FTP commands and additional functionalities ##
#===============================================#
def USER(self, username):
# Lets user to set their username
log("USER", username)
if not username:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
else:
self.sendResponse('331 User name okay, need password.\r\n')
self.username = username
self.setupUserFolder(username)
def PASS(self, password):
# Lets user to set their password
log("PASS", password)
if not password:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
elif not self.username:
self.sendResponse('503 Bad sequence of commands.\r\n')
else:
self.sendResponse('230 User logged in, proceed.\r\n')
self.password = password
self.authenticated = True
self.allow_delete = True
def TYPE(self, type):
# Specify file mode to be handled
log('TYPE', type)
self.type = type
if self.type == 'I':
self.sendResponse('200 Binary file mode.\r\n')
elif self.type == 'A':
self.sendResponse('200 Ascii file mode.\r\n')
else:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
def PASV(self, client_command):
# Makes server-DTP "listen" on a non-default data port to wait for a connection rather than initiate one upon receipt of a transfer command
log("PASV", client_command)
self.pasv_mode = True
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((host, 0))
self.server_socket.listen(5)
address, port = self.server_socket.getsockname()
self.sendResponse('227 Entering Passive Mode (%s,%u,%u).\r\n' %
(','.join(address.split('.')), port>>8&0xFF, port&0xFF))
def MODE(self, mode):
# Specifies data transfer mode for server
log('MODE', mode)
self.mode = mode
if self.mode == 'S':
self.sendResponse('200 Stream transfer mode.\r\n')
elif self.mode == 'B':
self.sendResponse('502 Command not implemented.\r\n')
elif self.mode == 'C':
self.sendResponse('502 Command not implemented.\r\n')
else:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
def STRU(self, file_structure):
# Specifies file structure type for server
log('STRU', file_structure)
self.file_structure = file_structure
if self.file_structure == 'F':
self.sendResponse('200 File Strcture = File.\r\n')
elif self.file_structure == 'R':
self.sendResponse('502 Command not implemented.\r\n')
elif self.file_structure == 'P':
self.sendResponse('502 Command not implemented.\r\n')
def STAT(self, client_command):
# Specifies file structure type for server
log('STAT', client_command)
self.sendResponse('502 Command not implemented.\r\n')
def PORT(self, client_command):
# Specify the port to be used for data transmission
log("PORT: ", client_command)
if self.pasv_mode:
self.server_socket.close()
self.pasv_mode = False
connection_info = client_command[5:].split(',')
self.data_socket_address = '.'.join(connection_info[:4])
self.data_socket_port = (int(connection_info[4])<<8) + int(connection_info[5])
self.sendResponse('200 Get port.\r\n')
def LIST(self, directory_path):
# Sends list of content in specified server path
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
return
if not directory_path:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
elif directory_path.startswith(os.path.sep):
server_path = os.path.abspath(directory_path)
else:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
log('LIST', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not os.path.exists(server_path):
self.sendResponse('550 LIST failed Path name not exists.\r\n')
else:
self.sendResponse('150 Here is listing.\r\n')
self.createDataSocket()
if not os.path.isdir(server_path):
fileMessage = fileProperty(server_path)
self.data_socket.sock(fileMessage+'\r\n')
else:
for file in os.listdir(server_path):
fileMessage = fileProperty(os.path.join(server_path, file))
self.sendData(fileMessage+'\r\n')
self.terminateDataSocket()
self.sendResponse('226 List done.\r\n')
def NLST(self, directory_path):
# Sends a directory listing from server to user site with only names of content
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
return
if not directory_path:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
elif directory_path.startswith(os.path.sep):
server_path = os.path.abspath(directory_path)
else:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
log('NLST', directory_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not os.path.exists(server_path):
self.sendResponse('550 NLST failed Path name doesnt exist.\r\n')
else:
self.sendResponse('150 Here is listing.\r\n')
self.createDataSocket()
if not os.path.isdir(server_path):
fileMessage = fileProperty(server_path)
self.data_socket.sock(fileMessage+'\r\n')
else:
for file in os.listdir(server_path):
self.sendData(file+'\r\n')
self.terminateDataSocket()
self.sendResponse('226 List done.\r\n')
def CWD(self, directory_path):
# Allows user to change current directory to a new directory on the server
server_path = self.base_path + directory_path
log('CWD', server_path)
if not os.path.exists(server_path) or not os.path.isdir(server_path):
self.sendResponse('550 CWD failed Directory does not exist.\r\n')
return
self.working_directory = directory_path
self.sendResponse('250 CWD Command successful.\r\n')
def PWD(self, client_command):
# Returns the current server directory path
log('PWD', client_command)
self.sendResponse('257 "%s".\r\n' % self.working_directory)
def CDUP(self, client_command):
# Changes current working directory to parent directory
if self.working_directory != '/' + self.username:
self.working_directory = '/' + os.path.abspath(os.path.join(self.base_path + self.working_directory, '..'))
log('CDUP', self.working_directory)
self.sendResponse('200 OK.\r\n')
def DELE(self, filename):
# Deletes file specified in the pathname to be deleted at the server site
server_path = self.generatePath(self.base_path, filename)
log('DELE', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not os.path.exists(server_path):
self.send('550 DELE failed File %s does not exist\r\n' % server_path)
elif not self.allow_delete:
self.send('450 DELE failed delete not allowed.\r\n')
else:
os.remove(server_path)
self.sendResponse('250 File deleted.\r\n')
def MKD(self, dirname):
# Creates specified directory at current path directory
server_path = self.generatePath(self.base_path, dirname)
log('MKD', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
else:
try:
os.mkdir(server_path)
self.sendResponse('257 Directory created.\r\n')
except OSError:
self.sendResponse('550 MKD failed. Directory "%s" already exists.\r\n' % server_path)
def RMD(self, dirname):
# Removes specified directory at current path directory
import shutil
server_path = self.generatePath(self.base_path, dirname)
log('RMD', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not self.allow_delete:
self.sendResponse('450 Invalid permissions.\r\n')
elif not os.path.exists(server_path):
self.sendResponse('550 RMDIR failed Directory "%s" not exists.\r\n' % server_path)
else:
shutil.rmtree(server_path)
self.sendResponse('250 Directory deleted.\r\n')
def RNFR(self, filename):
# Specifies the old pathname of the file which is to be renamed
server_path = self.generatePath(self.base_path, filename)
log('RNFR', server_path)
if not os.path.exists(server_path):
self.sendResponse('550 RNFR failed. File or Directory %s does not exist.\r\n' % server_path)
else:
self.rnfr = server_path
self.sendResponse('350 RNFR successful - awaiting RNTO')
def RNTO(self, filename):
# Specifies the new pathname of the file specified in the immediately preceding "rename from" command
server_path = self.generatePath(self.base_path, filename)
log('RNTO', server_path)
if not os.path.exists(os.path.sep):
self.sendResponse('550 RNTO failed. File or Directory %s does not exist.\r\n' % server_path)
else:
try:
os.rename(self.rnfr, server_path)
self.sendResponse('250 RNTO successful')
except OSError as error:
log('RNTO', error)
def REST(self, pos):
# Represents the server marker at which file transfer is to be restarted
self.pos = int(pos)
log('REST', self.pos)
self.rest = True
self.sendResponse('250 File position reset.\r\n')
def RETR(self, filename):
# Causes server-DTP to transfer a copy of the file, specified in the pathname, to the server- or user-DTP at the other end of the data connection
server_path = self.generatePath(self.base_path, filename)
log('RETR', server_path)
if not os.path.exists(server_path):
return
try:
if self.type=='I':
file = open(server_path, 'rb')
else:
file = open(server_path, 'r')
except OSError as error:
log('RETR', error)
self.sendResponse('150 Opening data connection.\r\n')
if self.rest:
file.seek(self.pos)
self.rest = False
self.createDataSocket()
while True:
data = file.read(binary_buffer)
if not data: break
if self.mode == 'S':
self.sendData(data)
file.close()
self.terminateDataSocket()
self.sendResponse('226 Transfer complete.\r\n')
def STOR(self, filename):
# Causes the server-DTP to accept the data transferred via the data connection and to store the data as a file at the server site
if not self.authenticated:
self.sendResponse('530 STOR failed. User is not logged in.\r\n')
return
server_path = self.generatePath(self.base_path, filename)
log('STOR', server_path)
try:
if self.type == 'I':
file = open(server_path, 'wb')
else:
file = open(server_path, 'w')
except OSError as error:
log('STOR', error)
self.sendResponse('150 Opening data connection.\r\n' )
self.createDataSocket()
while True:
if self.type == 'I':
data = self.data_socket.recv(binary_buffer)
else:
data = self.data_socket.recv(binary_buffer).decode('utf-8')
if not data:
break
file.write(data)
file.close()
self.terminateDataSocket()
self.sendResponse('226 Transfer completed.\r\n')
def APPE(self, filename):
# Causes the server-DTP to accept the data transferred via the data connection and to store the data in a file at the server site
# If file specified in pathname exists at server site, the data shall be appended to that file; otherwise the file shall be created at the server site.
if not self.authenticated:
self.sendResponse('530 APPE failed. User is not logged in.\r\n')
return
server_path = self.generatePath(self.base_path, filename)
log('APPE', server_path)
self.sendResponse('150 Opening data connection.\r\n')
self.createDataSocket()
if not os.path.exists(server_path):
if self.type == 'I':
file = open(server_path, 'wb')
else:
file = open(server_path, 'w')
while True:
data = self.data_socket.recv(ascii_buffer)
if not data:
break
file.write(data)
else:
n = 1
while not os.path.exists(server_path):
filename, extname = os.path.splitext(server_path)
server_path = filename + '(%s)' %n + extname
n += 1
if self.type == 'I':
file = open(server_path, 'wb')
else:
file = open(server_path, 'w')
while True:
data = self.data_socket.recv(ascii_buffer)
if not data:
break
file.write(data)
file.close()
self.terminateDataSocket()
self.sendResponse('226 Transfer completed.\r\n')
def SYST(self, client_command):
# Used to find out the type of operating system at the server
log('SYST', client_command)
self.sendResponse('215 %s type.\r\n' % sys.platform)
def NOOP(self, client_command):
# Specifies no action other than that the server send an OK reply
log('NOOP', client_command)
self.sendResponse('200 OK.\r\n')
def HELP(self, param):
# Provides server command list to client
log('HELP', param)
help = """
214
USER [name], Its argument is used to specify the user's string. It is used for user authentication.
PASS [password], Its argument is used to specify the user password string.
TYPE [type], Its argument is used to specify the file type.
PASV The directive requires server-DTP in a data port.
MODE [mode], Its argument is used to specify the data transfer type.
STRU [structure], Its argument is used to specify the file structure.
PORT [h1, h2, h3, h4, p1, p2], The command parameter is used for the data connection data port
LIST [directory_path or filename], This command allows the server to send the list to the passive DTP.
If the pathname specifies a path or The other set of files, the server sends a list of files in
the specified directory. Current information if you specify a file path name, the server will
send the file.
NLST [directory_path or filename], This command calls LIST with the provided argument.
CWD [path], Its argument is used to specify a new working directory.
PWD Get current working directory.
CDUP Changes the working directory on the remote host to the parent of the current directory.
DELE [filename], Its argument is used to specify the file to be deleted.
MKD [directory_name] Its argument is used to create the directory specified in the RemoteDirectory
parameter on the remote host.
RNFR [old name], Its argument is used to specify the file to be renamed (RNTO must follow).
RNTO [new name] Its argument is used to specify the new name of the file to be renamed (from RNFR).
REST [position] Marks the beginning (REST) The argument on behalf of the server you want to re-start
the file transfer. This command and Do not send files, but skip the file specified data checkpoint.
RETR This command allows server-FTP send a copy of a file with the specified path name to the data
connection on the other end.
STOR This command allows server-DTP to receive data transmitted via a data connection, and data is
stored as a file on the server site.
APPE This command allows server-DTP to receive data transmitted via a data connection, and data is stored
as A file server site.
SYST This command is used to find the server's operating system type.
NOOP This command executes no action other than prompting a 200 OK response from the server.
HELP Displays help information.
QUIT This command terminates a user, if not being executed file transfer, the server will shut down
Control connection\r\n.
"""
self.sendResponse(help)
def QUIT(self, param):
# Connected user logs out and disconnects from server if not transfer in progress
log('QUIT', param)
self.sendResponse('221 Goodbye.\r\n')
def log(func, client_command=''):
# Provides logger service for server activity
log_message = time.strftime("%Y-%m-%d %H-%M-%S [-] " + func)
print("\033[31m%s\033[0m: \033[32m%s\033[0m" % (log_message, client_command))
def serverListener():
global listen_socket
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((host, port))
listen_socket.listen(5)
log('Server started', 'Listen on: %s, %s' % listen_socket.getsockname())
while True:
connection, address = listen_socket.accept()
ftp_connection_instance = FTPServerProtocol(connection, address)
ftp_connection_instance.start()
log('Accept', 'Created a new connection %s, %s' % address)
if __name__ == "__main__":
log('Start FTP server', 'Enter EXIT to stop FTP server...')
listener = threading.Thread(target=serverListener)
listener.start()
if input().lower() == "exit":
listen_socket.close()
log('Server stop', 'Server closed')
sys.exit()
|
import os
import socket
import stat
import sys
import threading
import time
from pathlib import Path
from utils import fileProperty
try:
host = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = socket.gethostname()
port = 21
working_directory = os.getcwd()
ascii_buffer = 1024
binary_buffer = 4194304
class FTPServerProtocol(threading.Thread):
def __init__(self, command_socket, address):
threading.Thread.__init__(self)
self.authenticated = False
self.pasv_mode = False
self.rest = False
self.allow_delete = False
self.working_directory = working_directory
self.base_path = working_directory
self.command_socket = command_socket
self.address = address
self.type = 'A'
self.mode = 'S'
self.file_structure = 'F'
def run(self):
# Handles and executes received user commands
self.connectionSuccess()
while True:
try:
data = self.command_socket.recv(ascii_buffer).rstrip()
try:
client_command = data.decode('utf-8')
except AttributeError:
client_command = data
log('Received data', client_command)
if not client_command:
break
except socket.error as error:
log('Receive', error)
try:
client_command, param = client_command[:4].strip().upper(), client_command[4:].strip() or None
func = getattr(self, client_command)
func(param)
except AttributeError as error:
self.sendResponse('500 Syntax error, command unrecognized. '
'This may include errors such as command line too long.\r\n')
log('Receive', error)
def connectionSuccess(self):
# Provide greeting for accepted user connection
self.sendResponse('220 Service ready for new user.\r\n')
def setupUserFolder(self, username):
# Separare base access path from working directory
path = self.working_directory + '/' + username
try:
os.mkdir(path)
except OSError:
pass
path = path.split('/')
user_path_index = path.index(username)
base_path = path[:user_path_index]
working_path = path[user_path_index:]
self.base_path = '/'.join(base_path)
self.working_directory = '/' + '/'.join(working_path) + '/'
def generatePath(self, base_path='', working_path=''):
print(base_path + working_path)
return base_path + working_path
#=======================================#
## FTP transmission control procedures ##
#=======================================#
def createDataSocket(self):
# Open socket with client for data transmission
log('createDataSocket', 'Opening a data channel')
try:
self.data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.pasv_mode:
self.data_socket, self.address = self.server_socket.accept()
else:
self.data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data_socket.connect((self.data_socket_address, self.data_socket_port))
except socket.error as error:
log('createDataSocket', error)
def terminateDataSocket(self):
# Close data tranmission socket with client
log('terminateDataSocket', 'Closing a data channel')
try:
self.data_socket.close()
if self.pasv_mode:
self.server_socket.close()
except socket.error as error:
log('terminateDataSocket', error)
def sendResponse(self, client_command):
# Transmit request codes and relevant message to client
self.command_socket.send(client_command.encode('utf-8'))
def sendData(self, data):
# Transmit file data to client
if self.type == 'I':
self.data_socket.send(data)
else:
self.data_socket.send(data.encode('utf-8'))
#===============================================#
## FTP commands and additional functionalities ##
#===============================================#
def USER(self, username):
# Lets user to set their username
log("USER", username)
if not username:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
else:
self.sendResponse('331 User name okay, need password.\r\n')
self.username = username
self.setupUserFolder(username)
def PASS(self, password):
# Lets user to set their password
log("PASS", password)
if not password:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
elif not self.username:
self.sendResponse('503 Bad sequence of commands.\r\n')
else:
self.sendResponse('230 User logged in, proceed.\r\n')
self.password = password
self.authenticated = True
self.allow_delete = True
def TYPE(self, type):
# Specify file mode to be handled
log('TYPE', type)
self.type = type
if self.type == 'I':
self.sendResponse('200 Binary file mode.\r\n')
elif self.type == 'A':
self.sendResponse('200 Ascii file mode.\r\n')
else:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
def PASV(self, client_command):
# Makes server-DTP "listen" on a non-default data port to wait for a connection rather than initiate one upon receipt of a transfer command
log("PASV", client_command)
self.pasv_mode = True
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((host, 0))
self.server_socket.listen(5)
address, port = self.server_socket.getsockname()
self.sendResponse('227 Entering Passive Mode (%s,%u,%u).\r\n' %
(','.join(address.split('.')), port>>8&0xFF, port&0xFF))
def MODE(self, mode):
# Specifies data transfer mode for server
log('MODE', mode)
self.mode = mode
if self.mode == 'S':
self.sendResponse('200 Stream transfer mode.\r\n')
elif self.mode == 'B':
self.sendResponse('502 Command not implemented.\r\n')
elif self.mode == 'C':
self.sendResponse('502 Command not implemented.\r\n')
else:
self.sendResponse('501 Syntax error in parameters or arguments.\r\n')
def STRU(self, file_structure):
# Specifies file structure type for server
log('STRU', file_structure)
self.file_structure = file_structure
if self.file_structure == 'F':
self.sendResponse('200 File Strcture = File.\r\n')
elif self.file_structure == 'R':
self.sendResponse('502 Command not implemented.\r\n')
elif self.file_structure == 'P':
self.sendResponse('502 Command not implemented.\r\n')
def STAT(self, client_command):
# Specifies file structure type for server
log('STAT', client_command)
self.sendResponse('502 Command not implemented.\r\n')
def PORT(self, client_command):
# Specify the port to be used for data transmission
log("PORT: ", client_command)
if self.pasv_mode:
self.server_socket.close()
self.pasv_mode = False
connection_info = client_command[5:].split(',')
self.data_socket_address = '.'.join(connection_info[:4])
self.data_socket_port = (int(connection_info[4])<<8) + int(connection_info[5])
self.sendResponse('200 Get port.\r\n')
def LIST(self, directory_path):
# Sends list of content in specified server path
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
return
if not directory_path:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
elif directory_path.startswith(os.path.sep):
server_path = os.path.abspath(directory_path)
else:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
log('LIST', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not os.path.exists(server_path):
self.sendResponse('550 LIST failed Path name not exists.\r\n')
else:
self.sendResponse('150 Here is listing.\r\n')
self.createDataSocket()
if not os.path.isdir(server_path):
fileMessage = fileProperty(server_path)
self.data_socket.sock(fileMessage+'\r\n')
else:
for file in os.listdir(server_path):
fileMessage = fileProperty(os.path.join(server_path, file))
self.sendData(fileMessage+'\r\n')
self.terminateDataSocket()
self.sendResponse('226 List done.\r\n')
def NLST(self, directory_path):
# Sends a directory listing from server to user site with only names of content
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
return
if not directory_path:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
elif directory_path.startswith(os.path.sep):
server_path = os.path.abspath(directory_path)
else:
server_path = os.path.abspath(os.path.join(self.base_path + self.working_directory, '.'))
log('NLST', directory_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not os.path.exists(server_path):
self.sendResponse('550 NLST failed Path name doesnt exist.\r\n')
else:
self.sendResponse('150 Here is listing.\r\n')
self.createDataSocket()
if not os.path.isdir(server_path):
fileMessage = fileProperty(server_path)
self.data_socket.sock(fileMessage+'\r\n')
else:
for file in os.listdir(server_path):
self.sendData(file+'\r\n')
self.terminateDataSocket()
self.sendResponse('226 List done.\r\n')
def CWD(self, directory_path):
# Allows user to change current directory to a new directory on the server
server_path = self.base_path + directory_path
log('CWD', server_path)
if not os.path.exists(server_path) or not os.path.isdir(server_path):
self.sendResponse('550 CWD failed Directory does not exist.\r\n')
return
self.working_directory = directory_path
self.sendResponse('250 CWD Command successful.\r\n')
def PWD(self, client_command):
# Returns the current server directory path
log('PWD', client_command)
self.sendResponse('257 "%s".\r\n' % self.working_directory)
def CDUP(self, client_command):
# Changes current working directory to parent directory
if self.working_directory != '/' + self.username:
self.working_directory = '/' + os.path.abspath(os.path.join(self.base_path + self.working_directory, '..'))
log('CDUP', self.working_directory)
self.sendResponse('200 OK.\r\n')
def DELE(self, filename):
# Deletes file specified in the pathname to be deleted at the server site
server_path = self.generatePath(self.base_path, filename)
log('DELE', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not os.path.exists(server_path):
self.send('550 DELE failed File %s does not exist\r\n' % server_path)
elif not self.allow_delete:
self.send('450 DELE failed delete not allowed.\r\n')
else:
os.remove(server_path)
self.sendResponse('250 File deleted.\r\n')
def MKD(self, dirname):
# Creates specified directory at current path directory
server_path = self.generatePath(self.base_path, dirname)
log('MKD', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
else:
try:
os.mkdir(server_path)
self.sendResponse('257 Directory created.\r\n')
except OSError:
self.sendResponse('550 MKD failed. Directory "%s" already exists.\r\n' % server_path)
def RMD(self, dirname):
# Removes specified directory at current path directory
import shutil
server_path = self.generatePath(self.base_path, dirname)
log('RMD', server_path)
if not self.authenticated:
self.sendResponse('530 User not logged in.\r\n')
elif not self.allow_delete:
self.sendResponse('450 Invalid permissions.\r\n')
elif not os.path.exists(server_path):
self.sendResponse('550 RMDIR failed Directory "%s" not exists.\r\n' % server_path)
else:
shutil.rmtree(server_path)
self.sendResponse('250 Directory deleted.\r\n')
def RNFR(self, filename):
# Specifies the old pathname of the file which is to be renamed
server_path = self.generatePath(self.base_path, filename)
log('RNFR', server_path)
if not os.path.exists(server_path):
self.sendResponse('550 RNFR failed. File or Directory %s does not exist.\r\n' % server_path)
else:
self.rnfr = server_path
self.sendResponse('350 RNFR successful - awaiting RNTO')
def RNTO(self, filename):
# Specifies the new pathname of the file specified in the immediately preceding "rename from" command
server_path = self.generatePath(self.base_path, filename)
log('RNTO', server_path)
if not os.path.exists(os.path.sep):
self.sendResponse('550 RNTO failed. File or Directory %s does not exist.\r\n' % server_path)
else:
try:
os.rename(self.rnfr, server_path)
self.sendResponse('250 RNTO successful')
except OSError as error:
log('RNTO', error)
def REST(self, pos):
# Represents the server marker at which file transfer is to be restarted
self.pos = int(pos)
log('REST', self.pos)
self.rest = True
self.sendResponse('250 File position reset.\r\n')
def RETR(self, filename):
# Causes server-DTP to transfer a copy of the file, specified in the pathname, to the server- or user-DTP at the other end of the data connection
server_path = self.generatePath(self.base_path, filename)
log('RETR', server_path)
if not os.path.exists(server_path):
return
try:
if self.type=='I':
file = open(server_path, 'rb')
else:
file = open(server_path, 'r')
except OSError as error:
log('RETR', error)
self.sendResponse('150 Opening data connection.\r\n')
if self.rest:
file.seek(self.pos)
self.rest = False
self.createDataSocket()
while True:
data = file.read(binary_buffer)
if not data: break
if self.mode == 'S':
self.sendData(data)
file.close()
self.terminateDataSocket()
self.sendResponse('226 Transfer complete.\r\n')
def STOR(self, filename):
# Causes the server-DTP to accept the data transferred via the data connection and to store the data as a file at the server site
if not self.authenticated:
self.sendResponse('530 STOR failed. User is not logged in.\r\n')
return
server_path = self.generatePath(self.base_path, filename)
log('STOR', server_path)
try:
if self.type == 'I':
file = open(server_path, 'wb')
else:
file = open(server_path, 'w')
except OSError as error:
log('STOR', error)
self.sendResponse('150 Opening data connection.\r\n' )
self.createDataSocket()
while True:
if self.type == 'I':
data = self.data_socket.recv(binary_buffer)
else:
data = self.data_socket.recv(binary_buffer).decode('utf-8')
if not data:
break
file.write(data)
file.close()
self.terminateDataSocket()
self.sendResponse('226 Transfer completed.\r\n')
def APPE(self, filename):
# Causes the server-DTP to accept the data transferred via the data connection and to store the data in a file at the server site
# If file specified in pathname exists at server site, the data shall be appended to that file; otherwise the file shall be created at the server site.
if not self.authenticated:
self.sendResponse('530 APPE failed. User is not logged in.\r\n')
return
server_path = self.generatePath(self.base_path, filename)
log('APPE', server_path)
self.sendResponse('150 Opening data connection.\r\n')
self.createDataSocket()
if not os.path.exists(server_path):
if self.type == 'I':
file = open(server_path, 'wb')
else:
file = open(server_path, 'w')
while True:
data = self.data_socket.recv(ascii_buffer)
if not data:
break
file.write(data)
else:
n = 1
while not os.path.exists(server_path):
filename, extname = os.path.splitext(server_path)
server_path = filename + '(%s)' %n + extname
n += 1
if self.type == 'I':
file = open(server_path, 'wb')
else:
file = open(server_path, 'w')
while True:
data = self.data_socket.recv(ascii_buffer)
if not data:
break
file.write(data)
file.close()
self.terminateDataSocket()
self.sendResponse('226 Transfer completed.\r\n')
def SYST(self, client_command):
# Used to find out the type of operating system at the server
log('SYST', client_command)
self.sendResponse('215 %s type.\r\n' % sys.platform)
def NOOP(self, client_command):
# Specifies no action other than that the server send an OK reply
log('NOOP', client_command)
self.sendResponse('200 OK.\r\n')
def HELP(self, param):
# Provides server command list to client
log('HELP', param)
help = """
214
USER [name], Its argument is used to specify the user's string. It is used for user authentication.
PASS [password], Its argument is used to specify the user password string.
TYPE [type], Its argument is used to specify the file type.
PASV The directive requires server-DTP in a data port.
MODE [mode], Its argument is used to specify the data transfer type.
STRU [structure], Its argument is used to specify the file structure.
PORT [h1, h2, h3, h4, p1, p2], The command parameter is used for the data connection data port
LIST [directory_path or filename], This command allows the server to send the list to the passive DTP.
If the pathname specifies a path or The other set of files, the server sends a list of files in
the specified directory. Current information if you specify a file path name, the server will
send the file.
NLST [directory_path or filename], This command calls LIST with the provided argument.
CWD [path], Its argument is used to specify a new working directory.
PWD Get current working directory.
CDUP Changes the working directory on the remote host to the parent of the current directory.
DELE [filename], Its argument is used to specify the file to be deleted.
MKD [directory_name] Its argument is used to create the directory specified in the RemoteDirectory
parameter on the remote host.
RNFR [old name], Its argument is used to specify the file to be renamed (RNTO must follow).
RNTO [new name] Its argument is used to specify the new name of the file to be renamed (from RNFR).
REST [position] Marks the beginning (REST) The argument on behalf of the server you want to re-start
the file transfer. This command and Do not send files, but skip the file specified data checkpoint.
RETR This command allows server-FTP send a copy of a file with the specified path name to the data
connection on the other end.
STOR This command allows server-DTP to receive data transmitted via a data connection, and data is
stored as a file on the server site.
APPE This command allows server-DTP to receive data transmitted via a data connection, and data is stored
as A file server site.
SYST This command is used to find the server's operating system type.
NOOP This command executes no action other than prompting a 200 OK response from the server.
HELP Displays help information.
QUIT This command terminates a user, if not being executed file transfer, the server will shut down
Control connection\r\n.
"""
self.sendResponse(help)
def QUIT(self, param):
# Connected user logs out and disconnects from server if not transfer in progress
log('QUIT', param)
self.sendResponse('221 Goodbye.\r\n')
def log(func, client_command=''):
# Provides logger service for server activity
log_message = time.strftime("%Y-%m-%d %H-%M-%S [-] " + func)
print("\033[31m%s\033[0m: \033[32m%s\033[0m" % (log_message, client_command))
def serverListener():
global listen_socket
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((host, port))
listen_socket.listen(5)
log('Server started', 'Listen on: %s, %s' % listen_socket.getsockname())
while True:
connection, address = listen_socket.accept()
ftp_connection_instance = FTPServerProtocol(connection, address)
ftp_connection_instance.start()
log('Accept', 'Created a new connection %s, %s' % address)
if __name__ == "__main__":
log('Start FTP server', 'Enter EXIT to stop FTP server...')
listener = threading.Thread(target=serverListener)
listener.start()
if input().lower() == "exit":
listen_socket.close()
log('Server stop', 'Server closed')
sys.exit()
| en | 0.786377 | # Handles and executes received user commands # Provide greeting for accepted user connection # Separare base access path from working directory #=======================================# ## FTP transmission control procedures ## #=======================================# # Open socket with client for data transmission # Close data tranmission socket with client # Transmit request codes and relevant message to client # Transmit file data to client #===============================================# ## FTP commands and additional functionalities ## #===============================================# # Lets user to set their username # Lets user to set their password # Specify file mode to be handled # Makes server-DTP "listen" on a non-default data port to wait for a connection rather than initiate one upon receipt of a transfer command # Specifies data transfer mode for server # Specifies file structure type for server # Specifies file structure type for server # Specify the port to be used for data transmission # Sends list of content in specified server path # Sends a directory listing from server to user site with only names of content # Allows user to change current directory to a new directory on the server # Returns the current server directory path # Changes current working directory to parent directory # Deletes file specified in the pathname to be deleted at the server site # Creates specified directory at current path directory # Removes specified directory at current path directory # Specifies the old pathname of the file which is to be renamed # Specifies the new pathname of the file specified in the immediately preceding "rename from" command # Represents the server marker at which file transfer is to be restarted # Causes server-DTP to transfer a copy of the file, specified in the pathname, to the server- or user-DTP at the other end of the data connection # Causes the server-DTP to accept the data transferred via the data connection and to store the data as a file at the server site # Causes the server-DTP to accept the data transferred via the data connection and to store the data in a file at the server site # If file specified in pathname exists at server site, the data shall be appended to that file; otherwise the file shall be created at the server site. # Used to find out the type of operating system at the server # Specifies no action other than that the server send an OK reply # Provides server command list to client 214 USER [name], Its argument is used to specify the user's string. It is used for user authentication. PASS [password], Its argument is used to specify the user password string. TYPE [type], Its argument is used to specify the file type. PASV The directive requires server-DTP in a data port. MODE [mode], Its argument is used to specify the data transfer type. STRU [structure], Its argument is used to specify the file structure. PORT [h1, h2, h3, h4, p1, p2], The command parameter is used for the data connection data port LIST [directory_path or filename], This command allows the server to send the list to the passive DTP. If the pathname specifies a path or The other set of files, the server sends a list of files in the specified directory. Current information if you specify a file path name, the server will send the file. NLST [directory_path or filename], This command calls LIST with the provided argument. CWD [path], Its argument is used to specify a new working directory. PWD Get current working directory. CDUP Changes the working directory on the remote host to the parent of the current directory. DELE [filename], Its argument is used to specify the file to be deleted. MKD [directory_name] Its argument is used to create the directory specified in the RemoteDirectory parameter on the remote host. RNFR [old name], Its argument is used to specify the file to be renamed (RNTO must follow). RNTO [new name] Its argument is used to specify the new name of the file to be renamed (from RNFR). REST [position] Marks the beginning (REST) The argument on behalf of the server you want to re-start the file transfer. This command and Do not send files, but skip the file specified data checkpoint. RETR This command allows server-FTP send a copy of a file with the specified path name to the data connection on the other end. STOR This command allows server-DTP to receive data transmitted via a data connection, and data is stored as a file on the server site. APPE This command allows server-DTP to receive data transmitted via a data connection, and data is stored as A file server site. SYST This command is used to find the server's operating system type. NOOP This command executes no action other than prompting a 200 OK response from the server. HELP Displays help information. QUIT This command terminates a user, if not being executed file transfer, the server will shut down Control connection\r\n. # Connected user logs out and disconnects from server if not transfer in progress # Provides logger service for server activity | 2.51982 | 3 |
carling/group.py | mc-digital/carling | 6 | 6619909 | from functools import reduce
import apache_beam as beam
from carling.iter_utils import is_none, is_some, take_as_singleton, take_top, unwrap_or_none
from carling.mapping import IndexBy
def _merge_two(x, y):
xv, xn = x
yv, yn = y
if xn == 0:
return y
if yn == 0:
return x
if xv == yv:
return (xv, xn + yn)
return ([], xn + yn)
class _UniqueKeyCombineFn(beam.CombineFn):
def create_accumulator(self):
return ([], 0)
def add_input(self, acc, row):
return _merge_two(acc, ([row], 1))
def merge_accumulators(self, accs):
return reduce(_merge_two, accs)
def extract_output(self, acc):
return acc[0]
class UniqueKeyCombine(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.CombinePerKey(_UniqueKeyCombineFn())
class UniqueOnly(beam.PTransform):
"""Produces elements that are the only elements per key after deduplication.
Given a `PCollection` of `(K, V)`,
this transform produces the collection of all `V`s that do not share
the same corresponding `K`s with any other elements after deduplicating
all equivalent `(K, V)` pairs.
This transform is equivalent to `SingletonOnly` with `apache_beam.Distinct`.
`[(1, "A"), (2, "B1"), (2, "B2"), (3, "C"), (3, "C"), (4, "A")]` will be
transformed into `["A", "C", "A"]`.
"""
def expand(self, pcoll):
# As the input collection may include `None`s, this transform uses
# lists, instead of `None`s, to express the emptyness of values.
return (
pcoll
| "Remove Non-unique Elements" >> UniqueKeyCombine()
| "Remove None" >> beam.Filter(lambda kv: len(kv[1]) > 0)
| "Unwrap Values" >> beam.Map(lambda kv: kv[1][0])
)
class SingletonOnly(beam.PTransform):
"""Produces elements that are the only elements per key.
Given a `PCollection` of `(K, V)`,
this transform produces the collection of all `V`s that do not share
the same corresponding `K`s with any other elements.
`[(1, "A"), (2, "B1"), (2, "B2"), (3, "C"), (3, "C"), (4, "A")]` will be
transformed into `["A", "A"]`.
"""
def expand(self, pcoll):
# As the input collection may include `None`s, this transform uses
# tuples, instead of `None`s, to express the emptyness of values.
return (
pcoll
| "Group" >> beam.GroupByKey()
| "Remove Non-singleton Elements" >> beam.Map(lambda kv: take_as_singleton(kv[1]))
| "Remove None" >> beam.Filter(lambda v: len(v) > 0)
| "Unwrap Values" >> beam.Map(lambda v: v[0])
)
class _IntersectionDoFn(beam.DoFn):
def process(self, row):
key, iter_list = row
iter_list = list(iter_list)
for iterable in iter_list:
maybe_top = take_top(iterable)
if is_none(maybe_top):
return
yield key
class Intersection(beam.PTransform):
"""Produces the intersection of given `PCollection`s.
Given a list of `PCollection`s,
this transform produces every element that appears in all collections of
the list.
Elements are deduplicated before taking the intersection.
"""
def expand(self, pcoll_list):
pcoll_list = list(pcoll_list)
keyed_pcolls = (
pcoll | f"Map[{num}]" >> beam.Map(lambda value: (value, None))
for num, pcoll in enumerate(pcoll_list)
)
return (
keyed_pcolls
| "Group" >> beam.CoGroupByKey()
| "Extract" >> beam.ParDo(_IntersectionDoFn())
)
class _FilterByKeyDoFn(beam.DoFn):
def process(self, row):
_, value_list = row
value_list = list(value_list)
head_iter, tail_iters = value_list[0], value_list[1:]
for it in tail_iters:
if is_none(take_top(it)):
return
for v in head_iter:
yield v
class FilterByKey(beam.PTransform):
"""Filters elements by their keys.
The constructor receives one or more `PCollection`s of `K`s,
which are regarded as key lists.
Given a `PCollection` of `(K, V)`,
this transform discards all elements with `K`s that do not appear
in the key lists.
If multiple collections are given to the constructor,
this transform treats the intersection of them as the key list.
"""
def __init__(self, *key_pcolls):
super().__init__()
self._key_pcolls = key_pcolls
def expand(self, pcoll):
keys = (
key_pcoll | f"Map[{num}]" >> beam.Map(lambda key: (key, None))
for num, key_pcoll in enumerate(self._key_pcolls)
)
return (pcoll, *keys) | beam.CoGroupByKey() | beam.ParDo(_FilterByKeyDoFn())
@beam.ptransform_fn
def FilterByKeyUsingSideInput(pcoll, lookup_entries, filter_key):
"""Filters a single collection by a single lookup collection, using a common key.
Given:
- a `PCollection` (lookup_entries) of `(V)`, as a lookup collection
- a `PCollection` (pcoll) of `(V)`, as values to be filtered
- a common key (filter_key)
A dictionary called `filter_dict` - is created by mapping the value of `filter_key`
for each entry in `lookup_entries` to True.
Then, for each item in pcoll, the value associated with `filter_key` checkd against
`filter_dict`, and if it is found, the entry passes through. Otherwise, the entry is
discarded.
Note: `lookup_entries` will be used as a **side input**, so care
must be taken regarding the size of the `lookup_entries`
"""
filter_dict_prepared = beam.pvalue.AsDict(
lookup_entries | beam.Map(lambda row: (row[filter_key], True))
)
def _filter_fn(row, filter_dict):
return row[filter_key] in filter_dict
return pcoll | beam.Filter(_filter_fn, filter_dict=filter_dict_prepared)
def _compare(left, right, tag, dict_keys=None):
def tyname(obj):
return type(obj).__name__
if type(left) != type(right):
return {
"tag": tag,
"detail": "type mismatch",
"left": tyname(left),
"right": tyname(right),
}
if isinstance(left, list):
if len(left) != len(right):
return {
"tag": tag,
"detail": "different array length",
"left": len(left),
"right": len(right),
}
for index, (lv, rv) in enumerate(zip(left, right)):
diff = _compare(lv, rv, f"{tag}[{index}]")
if diff is not None:
return diff
elif isinstance(left, dict):
keys = set(left.keys()) | set(right.keys())
if dict_keys is not None:
keys = keys & set(dict_keys)
for k in keys:
next_tag = f"{tag}/{k}"
if k not in left:
return {"tag": next_tag, "detail": "does not exist in left"}
if k not in right:
return {"tag": next_tag, "detail": "does not exist in right"}
diff = _compare(left[k], right[k], next_tag)
if diff is not None:
return diff
else:
if left != right:
return {
"tag": tag,
"detail": "values not equal",
"left": left,
"right": right,
}
return None
class _DifferencePerKeyDoFn(beam.DoFn):
def __init__(self, columns):
super().__init__()
self._columns = columns
def process(self, row):
key, value_dict = row
left_iter = iter(value_dict["left"])
right_iter = iter(value_dict["right"])
l1 = take_top(left_iter)
l2 = take_top(left_iter)
r1 = take_top(right_iter)
r2 = take_top(right_iter)
l_count = [is_some(v) for v in [l1, l2]].count(True)
r_count = [is_some(v) for v in [r1, r2]].count(True)
error = None
if l_count != 1 or r_count != 1:
error = {
"reason": "count",
"left": l_count,
"right": r_count,
}
else:
lv = unwrap_or_none(l1)
rv = unwrap_or_none(r1)
result = _compare(lv, rv, "(root)", dict_keys=self._columns)
if result is not None:
error = {"reason": "value", **result}
if error is not None:
yield {
"key": key,
"error": error,
"left": unwrap_or_none(l1),
"right": unwrap_or_none(r1),
}
class DifferencePerKey(beam.PTransform):
"""Produces the difference per key between two `PCollection`s.
Given two `PCollection`s of `V`,
this transform indexes the collections by the specified keys `primary_keys`,
compares corresponding two `V` lists for every `K`,
and produces the difference per `K`.
If there is no difference, this transform produces nothing.
Two `V` lists are considered to be different if the numbers of elements
differ or two elements of the lists with a same index differ
at one of the specified columns `columns`.
"""
def __init__(self, primary_keys, columns):
super().__init__()
self._primary_keys = primary_keys
self._columns = columns
def expand(self, pcolls):
pcolls = list(pcolls)
assert len(pcolls) == 2
left, right = pcolls[0], pcolls[1]
return (
{
"left": left | "Index[left]" >> IndexBy(*self._primary_keys),
"right": right | "Index[right]" >> IndexBy(*self._primary_keys),
}
| beam.CoGroupByKey()
| beam.ParDo(_DifferencePerKeyDoFn(self._columns))
)
@beam.ptransform_fn
def MaxSelectPerKey(pcoll, index_keys, sort_key_fn, reverse=False):
"""
- Groups items by key
- Sorts using a key function
- Emits the "MAX" _value_ for each collection - key is stripped.
- Can emit "MIN" by passing reverse=True kwarg
"""
return (
pcoll
| f"Index by {index_keys}" >> IndexBy(*index_keys)
| "Top 1 per key" >> beam.combiners.Top.PerKey(1, key=sort_key_fn, reverse=reverse)
| "De-Index" >> beam.Map(lambda k_v: k_v[1][0])
)
@beam.ptransform_fn
def PartitionRowsContainingNone(pcoll):
"""Emits two tagged pcollections:
- None: Default emitted collection.
Rows are guaranteed not to have any `None` values
- contains_none: At least one column in the row had a `None` value
"""
def _separator(row):
if any([value is None for value in row.values()]):
yield beam.pvalue.TaggedOutput("contains_none", row)
else:
yield row
return pcoll | beam.ParDo(_separator).with_outputs()
| from functools import reduce
import apache_beam as beam
from carling.iter_utils import is_none, is_some, take_as_singleton, take_top, unwrap_or_none
from carling.mapping import IndexBy
def _merge_two(x, y):
xv, xn = x
yv, yn = y
if xn == 0:
return y
if yn == 0:
return x
if xv == yv:
return (xv, xn + yn)
return ([], xn + yn)
class _UniqueKeyCombineFn(beam.CombineFn):
def create_accumulator(self):
return ([], 0)
def add_input(self, acc, row):
return _merge_two(acc, ([row], 1))
def merge_accumulators(self, accs):
return reduce(_merge_two, accs)
def extract_output(self, acc):
return acc[0]
class UniqueKeyCombine(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.CombinePerKey(_UniqueKeyCombineFn())
class UniqueOnly(beam.PTransform):
"""Produces elements that are the only elements per key after deduplication.
Given a `PCollection` of `(K, V)`,
this transform produces the collection of all `V`s that do not share
the same corresponding `K`s with any other elements after deduplicating
all equivalent `(K, V)` pairs.
This transform is equivalent to `SingletonOnly` with `apache_beam.Distinct`.
`[(1, "A"), (2, "B1"), (2, "B2"), (3, "C"), (3, "C"), (4, "A")]` will be
transformed into `["A", "C", "A"]`.
"""
def expand(self, pcoll):
# As the input collection may include `None`s, this transform uses
# lists, instead of `None`s, to express the emptyness of values.
return (
pcoll
| "Remove Non-unique Elements" >> UniqueKeyCombine()
| "Remove None" >> beam.Filter(lambda kv: len(kv[1]) > 0)
| "Unwrap Values" >> beam.Map(lambda kv: kv[1][0])
)
class SingletonOnly(beam.PTransform):
"""Produces elements that are the only elements per key.
Given a `PCollection` of `(K, V)`,
this transform produces the collection of all `V`s that do not share
the same corresponding `K`s with any other elements.
`[(1, "A"), (2, "B1"), (2, "B2"), (3, "C"), (3, "C"), (4, "A")]` will be
transformed into `["A", "A"]`.
"""
def expand(self, pcoll):
# As the input collection may include `None`s, this transform uses
# tuples, instead of `None`s, to express the emptyness of values.
return (
pcoll
| "Group" >> beam.GroupByKey()
| "Remove Non-singleton Elements" >> beam.Map(lambda kv: take_as_singleton(kv[1]))
| "Remove None" >> beam.Filter(lambda v: len(v) > 0)
| "Unwrap Values" >> beam.Map(lambda v: v[0])
)
class _IntersectionDoFn(beam.DoFn):
def process(self, row):
key, iter_list = row
iter_list = list(iter_list)
for iterable in iter_list:
maybe_top = take_top(iterable)
if is_none(maybe_top):
return
yield key
class Intersection(beam.PTransform):
"""Produces the intersection of given `PCollection`s.
Given a list of `PCollection`s,
this transform produces every element that appears in all collections of
the list.
Elements are deduplicated before taking the intersection.
"""
def expand(self, pcoll_list):
pcoll_list = list(pcoll_list)
keyed_pcolls = (
pcoll | f"Map[{num}]" >> beam.Map(lambda value: (value, None))
for num, pcoll in enumerate(pcoll_list)
)
return (
keyed_pcolls
| "Group" >> beam.CoGroupByKey()
| "Extract" >> beam.ParDo(_IntersectionDoFn())
)
class _FilterByKeyDoFn(beam.DoFn):
def process(self, row):
_, value_list = row
value_list = list(value_list)
head_iter, tail_iters = value_list[0], value_list[1:]
for it in tail_iters:
if is_none(take_top(it)):
return
for v in head_iter:
yield v
class FilterByKey(beam.PTransform):
"""Filters elements by their keys.
The constructor receives one or more `PCollection`s of `K`s,
which are regarded as key lists.
Given a `PCollection` of `(K, V)`,
this transform discards all elements with `K`s that do not appear
in the key lists.
If multiple collections are given to the constructor,
this transform treats the intersection of them as the key list.
"""
def __init__(self, *key_pcolls):
super().__init__()
self._key_pcolls = key_pcolls
def expand(self, pcoll):
keys = (
key_pcoll | f"Map[{num}]" >> beam.Map(lambda key: (key, None))
for num, key_pcoll in enumerate(self._key_pcolls)
)
return (pcoll, *keys) | beam.CoGroupByKey() | beam.ParDo(_FilterByKeyDoFn())
@beam.ptransform_fn
def FilterByKeyUsingSideInput(pcoll, lookup_entries, filter_key):
"""Filters a single collection by a single lookup collection, using a common key.
Given:
- a `PCollection` (lookup_entries) of `(V)`, as a lookup collection
- a `PCollection` (pcoll) of `(V)`, as values to be filtered
- a common key (filter_key)
A dictionary called `filter_dict` - is created by mapping the value of `filter_key`
for each entry in `lookup_entries` to True.
Then, for each item in pcoll, the value associated with `filter_key` checkd against
`filter_dict`, and if it is found, the entry passes through. Otherwise, the entry is
discarded.
Note: `lookup_entries` will be used as a **side input**, so care
must be taken regarding the size of the `lookup_entries`
"""
filter_dict_prepared = beam.pvalue.AsDict(
lookup_entries | beam.Map(lambda row: (row[filter_key], True))
)
def _filter_fn(row, filter_dict):
return row[filter_key] in filter_dict
return pcoll | beam.Filter(_filter_fn, filter_dict=filter_dict_prepared)
def _compare(left, right, tag, dict_keys=None):
def tyname(obj):
return type(obj).__name__
if type(left) != type(right):
return {
"tag": tag,
"detail": "type mismatch",
"left": tyname(left),
"right": tyname(right),
}
if isinstance(left, list):
if len(left) != len(right):
return {
"tag": tag,
"detail": "different array length",
"left": len(left),
"right": len(right),
}
for index, (lv, rv) in enumerate(zip(left, right)):
diff = _compare(lv, rv, f"{tag}[{index}]")
if diff is not None:
return diff
elif isinstance(left, dict):
keys = set(left.keys()) | set(right.keys())
if dict_keys is not None:
keys = keys & set(dict_keys)
for k in keys:
next_tag = f"{tag}/{k}"
if k not in left:
return {"tag": next_tag, "detail": "does not exist in left"}
if k not in right:
return {"tag": next_tag, "detail": "does not exist in right"}
diff = _compare(left[k], right[k], next_tag)
if diff is not None:
return diff
else:
if left != right:
return {
"tag": tag,
"detail": "values not equal",
"left": left,
"right": right,
}
return None
class _DifferencePerKeyDoFn(beam.DoFn):
def __init__(self, columns):
super().__init__()
self._columns = columns
def process(self, row):
key, value_dict = row
left_iter = iter(value_dict["left"])
right_iter = iter(value_dict["right"])
l1 = take_top(left_iter)
l2 = take_top(left_iter)
r1 = take_top(right_iter)
r2 = take_top(right_iter)
l_count = [is_some(v) for v in [l1, l2]].count(True)
r_count = [is_some(v) for v in [r1, r2]].count(True)
error = None
if l_count != 1 or r_count != 1:
error = {
"reason": "count",
"left": l_count,
"right": r_count,
}
else:
lv = unwrap_or_none(l1)
rv = unwrap_or_none(r1)
result = _compare(lv, rv, "(root)", dict_keys=self._columns)
if result is not None:
error = {"reason": "value", **result}
if error is not None:
yield {
"key": key,
"error": error,
"left": unwrap_or_none(l1),
"right": unwrap_or_none(r1),
}
class DifferencePerKey(beam.PTransform):
"""Produces the difference per key between two `PCollection`s.
Given two `PCollection`s of `V`,
this transform indexes the collections by the specified keys `primary_keys`,
compares corresponding two `V` lists for every `K`,
and produces the difference per `K`.
If there is no difference, this transform produces nothing.
Two `V` lists are considered to be different if the numbers of elements
differ or two elements of the lists with a same index differ
at one of the specified columns `columns`.
"""
def __init__(self, primary_keys, columns):
super().__init__()
self._primary_keys = primary_keys
self._columns = columns
def expand(self, pcolls):
pcolls = list(pcolls)
assert len(pcolls) == 2
left, right = pcolls[0], pcolls[1]
return (
{
"left": left | "Index[left]" >> IndexBy(*self._primary_keys),
"right": right | "Index[right]" >> IndexBy(*self._primary_keys),
}
| beam.CoGroupByKey()
| beam.ParDo(_DifferencePerKeyDoFn(self._columns))
)
@beam.ptransform_fn
def MaxSelectPerKey(pcoll, index_keys, sort_key_fn, reverse=False):
"""
- Groups items by key
- Sorts using a key function
- Emits the "MAX" _value_ for each collection - key is stripped.
- Can emit "MIN" by passing reverse=True kwarg
"""
return (
pcoll
| f"Index by {index_keys}" >> IndexBy(*index_keys)
| "Top 1 per key" >> beam.combiners.Top.PerKey(1, key=sort_key_fn, reverse=reverse)
| "De-Index" >> beam.Map(lambda k_v: k_v[1][0])
)
@beam.ptransform_fn
def PartitionRowsContainingNone(pcoll):
"""Emits two tagged pcollections:
- None: Default emitted collection.
Rows are guaranteed not to have any `None` values
- contains_none: At least one column in the row had a `None` value
"""
def _separator(row):
if any([value is None for value in row.values()]):
yield beam.pvalue.TaggedOutput("contains_none", row)
else:
yield row
return pcoll | beam.ParDo(_separator).with_outputs()
| en | 0.835653 | Produces elements that are the only elements per key after deduplication. Given a `PCollection` of `(K, V)`, this transform produces the collection of all `V`s that do not share the same corresponding `K`s with any other elements after deduplicating all equivalent `(K, V)` pairs. This transform is equivalent to `SingletonOnly` with `apache_beam.Distinct`. `[(1, "A"), (2, "B1"), (2, "B2"), (3, "C"), (3, "C"), (4, "A")]` will be transformed into `["A", "C", "A"]`. # As the input collection may include `None`s, this transform uses # lists, instead of `None`s, to express the emptyness of values. Produces elements that are the only elements per key. Given a `PCollection` of `(K, V)`, this transform produces the collection of all `V`s that do not share the same corresponding `K`s with any other elements. `[(1, "A"), (2, "B1"), (2, "B2"), (3, "C"), (3, "C"), (4, "A")]` will be transformed into `["A", "A"]`. # As the input collection may include `None`s, this transform uses # tuples, instead of `None`s, to express the emptyness of values. Produces the intersection of given `PCollection`s. Given a list of `PCollection`s, this transform produces every element that appears in all collections of the list. Elements are deduplicated before taking the intersection. Filters elements by their keys. The constructor receives one or more `PCollection`s of `K`s, which are regarded as key lists. Given a `PCollection` of `(K, V)`, this transform discards all elements with `K`s that do not appear in the key lists. If multiple collections are given to the constructor, this transform treats the intersection of them as the key list. Filters a single collection by a single lookup collection, using a common key. Given: - a `PCollection` (lookup_entries) of `(V)`, as a lookup collection - a `PCollection` (pcoll) of `(V)`, as values to be filtered - a common key (filter_key) A dictionary called `filter_dict` - is created by mapping the value of `filter_key` for each entry in `lookup_entries` to True. Then, for each item in pcoll, the value associated with `filter_key` checkd against `filter_dict`, and if it is found, the entry passes through. Otherwise, the entry is discarded. Note: `lookup_entries` will be used as a **side input**, so care must be taken regarding the size of the `lookup_entries` Produces the difference per key between two `PCollection`s. Given two `PCollection`s of `V`, this transform indexes the collections by the specified keys `primary_keys`, compares corresponding two `V` lists for every `K`, and produces the difference per `K`. If there is no difference, this transform produces nothing. Two `V` lists are considered to be different if the numbers of elements differ or two elements of the lists with a same index differ at one of the specified columns `columns`. - Groups items by key - Sorts using a key function - Emits the "MAX" _value_ for each collection - key is stripped. - Can emit "MIN" by passing reverse=True kwarg Emits two tagged pcollections: - None: Default emitted collection. Rows are guaranteed not to have any `None` values - contains_none: At least one column in the row had a `None` value | 2.473761 | 2 |
yolo3/utils.py | hoshinohikari/stereo-target-detection-and-ranging | 0 | 6619910 | """Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import cv2
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
isize = image.shape
ih = isize[0]
iw = isize[1]
#iw, ih = image.size
w, h = size
scale = min(float(w)/iw, float(h)/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw, nh), interpolation = cv2.INTER_CUBIC)
#image = image.resize((nw,nh), Image.BICUBIC)
img = np.zeros((h, w), dtype = np.uint8)
new_image = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
new_image[:,:,0] = 128
new_image[:,:,1] = 128
new_image[:,:,2] = 128
#new_image = Image.new('RGB', size, (128,128,128))
new_image[(h-nh)//2:(h-nh)//2+nh, (w-nw)//2:(w-nw)//2+nw] = image
#new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
| """Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import cv2
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
isize = image.shape
ih = isize[0]
iw = isize[1]
#iw, ih = image.size
w, h = size
scale = min(float(w)/iw, float(h)/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw, nh), interpolation = cv2.INTER_CUBIC)
#image = image.resize((nw,nh), Image.BICUBIC)
img = np.zeros((h, w), dtype = np.uint8)
new_image = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
new_image[:,:,0] = 128
new_image[:,:,1] = 128
new_image[:,:,2] = 128
#new_image = Image.new('RGB', size, (128,128,128))
new_image[(h-nh)//2:(h-nh)//2+nh, (w-nw)//2:(w-nw)//2+nw] = image
#new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
| en | 0.539681 | Miscellaneous utility functions. Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ # return lambda x: reduce(lambda v, f: f(v), funcs, x) resize image with unchanged aspect ratio using padding #iw, ih = image.size #image = image.resize((nw,nh), Image.BICUBIC) #new_image = Image.new('RGB', size, (128,128,128)) #new_image.paste(image, ((w-nw)//2, (h-nh)//2)) | 3.2845 | 3 |
ui/materials.py | jasperges/blenderseed | 0 | 6619911 |
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
#---------------------------------------------
# node tree selector ui
#---------------------------------------------
def node_tree_selector_draw(layout, mat, output_type):
try:
layout.prop_search(mat.appleseed, "node_tree", bpy.data, "node_groups")
except:
return False
node = find_node(mat, output_type)
if not node:
if mat.appleseed.node_tree == '':
layout.operator('appleseed.add_material_nodetree', text="appleseed Node", icon='NODETREE')
return False
return True
def find_node(material, nodetype):
if not (material and material.appleseed and material.appleseed.node_tree):
return None
node_tree = material.appleseed.node_tree
if node_tree == '':
return None
ntree = bpy.data.node_groups[node_tree]
for node in ntree.nodes:
nt = getattr(node, "bl_idname", None)
if nt == nodetype:
return node
return None
#---------------------------------------------
# material preview panel
#---------------------------------------------
class AppleseedMaterialPreview(bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
bl_label = "Preview"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
@classmethod
def poll(cls, context):
return context.scene.render.engine in cls.COMPAT_ENGINES and context.object is not None and context.object.active_material is not None
def draw(self, context):
layout = self.layout
obj = context.object
material = obj.active_material
asr_mat = material.appleseed
layout.template_preview(context.material, show_buttons=False)
layout.prop(asr_mat, "preview_quality")
#---------------------------------------------
# material bsdf slot
#---------------------------------------------
class MATERIAL_UL_BSDF_slots(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
BSDF = item
bsdf_type = BSDF.bsdf_type
bsdf_type_name = bsdf_type[0].upper() + bsdf_type[1:-5] + " " + bsdf_type[-4:].upper()
if 'DEFAULT' in self.layout_type:
layout.label(text=BSDF.name + " | " + bsdf_type_name, translate=False, icon_value=icon)
#---------------------------------------------
# material shading panel
#---------------------------------------------
class AppleseedMaterialShading(bpy.types.Panel):
bl_label = 'Surface Shader'
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.object is not None and context.object.type == 'MESH' and context.object.active_material is not None
def draw(self, context):
layout = self.layout
object = context.object
material = object.active_material
asr_mat = material.appleseed
node_tree_selector_draw(layout, material, 'AppleseedMaterialNode')
if asr_mat.node_tree != '':
node_tree = bpy.data.node_groups[asr_mat.node_tree]
layout.prop_search(asr_mat, "node_output", node_tree, "nodes")
if asr_mat.node_tree == '':
row = layout.row()
row.template_list("MATERIAL_UL_BSDF_slots", "appleseed_material_layers", asr_mat,
"layers", asr_mat, "layer_index", rows=1, maxrows=16, type="DEFAULT")
row = layout.row(align=True)
row.operator("appleseed.add_matlayer", text="Add Layer", icon="ZOOMIN")
row.operator("appleseed.remove_matlayer", text="Remove", icon="ZOOMOUT")
if asr_mat.layers:
current_layer = asr_mat.layers[asr_mat.layer_index]
layout.prop(current_layer, "name")
layout.prop(current_layer, "bsdf_type")
layout.separator()
# lambertian brdf layout
if current_layer.bsdf_type == "lambertian_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "lambertian_weight", text="Layer Weight")
if current_layer.lambertian_use_tex:
layout.prop_search(current_layer, "lambertian_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "lambertian_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.lambertian_mix_tex != '' and current_layer.lambertian_use_tex:
mix_tex = bpy.data.textures[current_layer.lambertian_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "lambertian_reflectance", text="")
if current_layer.lambertian_use_diff_tex:
layout.prop_search(current_layer, "lambertian_diffuse_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "lambertian_use_diff_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.lambertian_diffuse_tex != '' and current_layer.lambertian_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.lambertian_diffuse_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "lambertian_multiplier")
#-------------------------------------------------
# oren-nayar brdf layout
if current_layer.bsdf_type == "orennayar_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "orennayar_weight", text="Layer Weight")
if current_layer.orennayar_use_tex:
layout.prop_search(current_layer, "orennayar_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "orennayar_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.orennayar_mix_tex != '' and current_layer.orennayar_use_tex:
mix_tex = bpy.data.textures[current_layer.orennayar_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "orennayar_reflectance", text="")
if current_layer.orennayar_use_diff_tex:
layout.prop_search(current_layer, "orennayar_diffuse_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "orennayar_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.orennayar_diffuse_tex != '' and current_layer.orennayar_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.orennayar_diffuse_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "orennayar_multiplier")
# roughness
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "orennayar_roughness")
if current_layer.orennayar_use_rough_tex:
layout.prop_search(current_layer, "orennayar_rough_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "orennayar_use_rough_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.orennayar_rough_tex != '' and current_layer.orennayar_use_rough_tex:
rough_tex = bpy.data.textures[current_layer.orennayar_diffuse_tex]
layout.prop(rough_tex.image.colorspace_settings, "name", text="Color Space")
#-------------------------------------------------
# ashikhmin-shirley brdf layout
elif current_layer.bsdf_type == "ashikhmin_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "ashikhmin_weight", text="Layer Weight")
if current_layer.ashikhmin_use_tex:
layout.prop_search(current_layer, "ashikhmin_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "ashikhmin_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.ashikhmin_mix_tex != '' and current_layer.ashikhmin_use_tex:
mix_tex = bpy.data.textures[current_layer.ashikhmin_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "ashikhmin_reflectance", text="")
if current_layer.ashikhmin_use_diff_tex:
layout.prop_search(current_layer, "ashikhmin_diffuse_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "ashikhmin_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.ashikhmin_diffuse_tex != '' and current_layer.ashikhmin_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.ashikhmin_diffuse_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
row = layout.row()
row.prop(current_layer, "ashikhmin_multiplier")
# glossiness
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "ashikhmin_glossy", text="")
if current_layer.ashikhmin_use_gloss_tex:
layout.prop_search(current_layer, "ashikhmin_gloss_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "ashikhmin_use_gloss_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.ashikhmin_gloss_tex != '' and current_layer.ashikhmin_use_gloss_tex:
gloss_tex = bpy.data.textures[current_layer.ashikhmin_gloss_tex]
layout.prop(gloss_tex.image.colorspace_settings, "name", text="Color Space")
row = layout.row()
row.prop(current_layer, "ashikhmin_glossy_multiplier")
# fresnel
col = layout.column()
col.prop(current_layer, "ashikhmin_fresnel")
layout.prop(current_layer, "ashikhmin_shininess_u")
layout.prop(current_layer, "ashikhmin_shininess_v")
#-------------------------------------------------
# diffuse btdf layout
elif current_layer.bsdf_type == "diffuse_btdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "transmittance_weight", text="Layer Weight")
if current_layer.transmittance_use_tex:
layout.prop_search(current_layer, "transmittance_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "transmittance_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.transmittance_mix_tex != '' and current_layer.transmittance_use_tex:
mix_tex = bpy.data.textures[current_layer.transmittance_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "transmittance_color", text="")
if current_layer.transmittance_use_diff_tex:
layout.prop_search(current_layer, "transmittance_diff_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "transmittance_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.transmittance_diff_tex != '' and current_layer.transmittance_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.transmittance_diff_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
# transmittance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "transmittance_multiplier", text="Transmittance")
if current_layer.transmittance_use_mult_tex:
layout.prop_search(current_layer, "transmittance_mult_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "transmittance_use_mult_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.transmittance_mult_tex != '' and current_layer.transmittance_use_mult_tex:
mult_tex = bpy.data.textures[current_layer.transmittance_mult_tex]
layout.prop(mult_tex.image.colorspace_settings, "name", text="Color Space")
#-------------------------------------------------
# disney brdf layout
elif current_layer.bsdf_type == "disney_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_weight", text="Layer Weight")
if current_layer.disney_use_tex:
layout.prop_search(current_layer, "disney_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "disney_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_mix_tex != '' and current_layer.disney_use_tex:
mix_tex = bpy.data.textures[current_layer.disney_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# base color
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_base", text="")
if current_layer.disney_use_base_tex:
layout.prop_search(current_layer, "disney_base_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_base_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_base_tex != '' and current_layer.disney_use_base_tex:
base_tex = bpy.data.textures[current_layer.disney_base_tex]
layout.prop(base_tex.image.colorspace_settings, "name", text="Color Space")
# subsurface
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_subsurface")
if current_layer.disney_use_subsurface_tex:
layout.prop_search(current_layer, "disney_subsurface_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_subsurface_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_subsurface_tex != '' and current_layer.disney_use_subsurface_tex:
subsurface_tex = bpy.data.textures[current_layer.disney_subsurface_tex]
layout.prop(subsurface_tex.image.colorspace_settings, "name", text="Color Space")
# metallic
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_metallic")
if current_layer.disney_use_metallic_tex:
layout.prop_search(current_layer, "disney_metallic_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_metallic_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_metallic_tex != '' and current_layer.disney_use_metallic_tex:
metal_tex = bpy.data.textures[current_layer.disney_metallic_tex]
layout.prop(metal_tex.image.colorspace_settings, "name", text="Color Space")
# specular
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_spec")
if current_layer.disney_use_spec_tex:
layout.prop_search(current_layer, "disney_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_spec_tex != '' and current_layer.disney_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.disney_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
# specular tint
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_spec_tint")
if current_layer.disney_use_spec_tint_tex:
layout.prop_search(current_layer, "disney_spec_tint_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_spec_tint_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_spec_tint_tex != '' and current_layer.disney_use_spec_tint_tex:
spec_tint_tex = bpy.data.textures[current_layer.disney_spec_tint_tex]
layout.prop(spec_tint_tex.image.colorspace_settings, "name", text="Color Space")
# anisotropy
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_aniso")
if current_layer.disney_use_aniso_tex:
layout.prop_search(current_layer, "disney_aniso_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_aniso_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_aniso_tex != '' and current_layer.disney_use_aniso_tex:
aniso_tex = bpy.data.textures[current_layer.disney_aniso_tex]
layout.prop(aniso_tex.image.colorspace_settings, "name", text="Color Space")
# roughness
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_roughness")
if current_layer.disney_use_roughness_tex:
layout.prop_search(current_layer, "disney_roughness_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_roughness_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_roughness_tex != '' and current_layer.disney_use_roughness_tex:
rough_tex = bpy.data.textures[current_layer.disney_roughness_tex]
layout.prop(rough_tex.image.colorspace_settings, "name", text="Color Space")
# sheen
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_sheen")
if current_layer.disney_use_sheen_tex:
layout.prop_search(current_layer, "disney_sheen_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_sheen_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_sheen_tex != '' and current_layer.disney_use_sheen_tex:
sheen_tex = bpy.data.textures[current_layer.disney_sheen_tex]
layout.prop(sheen_tex.image.colorspace_settings, "name", text="Color Space")
# sheen tint
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_sheen_tint")
if current_layer.disney_use_sheen_tint_tex:
layout.prop_search(current_layer, "disney_sheen_tint_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_sheen_tint_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_sheen_tint_tex != '' and current_layer.disney_use_sheen_tint_tex:
sheen_tint_tex = bpy.data.textures[current_layer.disney_sheen_tint_tex]
layout.prop(sheen_tint_tex.image.colorspace_settings, "name", text="Color Space")
# clear coat
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_clearcoat")
if current_layer.disney_use_clearcoat_tex:
layout.prop_search(current_layer, "disney_clearcoat_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_clearcoat_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_clearcoat_tex != '' and current_layer.disney_use_clearcoat_tex:
clearcoat_tex = bpy.data.textures[current_layer.disney_clearcoat_tex]
layout.prop(clearcoat_tex.image.colorspace_settings, "name", text="Color Space")
# clear coat gloss
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_clearcoat_gloss")
if current_layer.disney_use_clearcoat_gloss_tex:
layout.prop_search(current_layer, "disney_clearcoat_gloss_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_clearcoat_gloss_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_clearcoat_gloss_tex != '' and current_layer.disney_use_clearcoat_gloss_tex:
clearcoat_gloss_tex = bpy.data.textures[current_layer.disney_clearcoat_gloss_tex]
layout.prop(clearcoat_gloss_tex.image.colorspace_settings, "name", text="Color Space")
#-------------------------------------------------
# kelemen brdf layout
elif current_layer.bsdf_type == "kelemen_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "kelemen_weight", text="Layer Weight")
if current_layer.kelemen_use_tex:
layout.prop_search(current_layer, "kelemen_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "kelemen_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.kelemen_mix_tex != '' and current_layer.kelemen_use_tex:
mix_tex = bpy.data.textures[current_layer.kelemen_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "kelemen_matte_reflectance", text="")
if current_layer.kelemen_use_diff_tex:
layout.prop_search(current_layer, "kelemen_diff_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "kelemen_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.kelemen_diff_tex != '' and current_layer.kelemen_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.kelemen_diff_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
row = layout.row()
row.prop(current_layer, "kelemen_matte_multiplier")
# specular reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "kelemen_specular_reflectance", text="")
if current_layer.kelemen_use_spec_tex:
layout.prop_search(current_layer, "kelemen_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "kelemen_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.kelemen_spec_tex != '' and current_layer.kelemen_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.kelemen_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "kelemen_specular_multiplier")
layout.prop(current_layer, "kelemen_roughness")
#-------------------------------------------------
# microfacet brdf layout
elif current_layer.bsdf_type == "microfacet_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "microfacet_weight", text="Layer Weight")
if current_layer.microfacet_use_tex:
layout.prop_search(current_layer, "microfacet_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "microfacet_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.microfacet_mix_tex != '' and current_layer.microfacet_use_tex:
mix_tex = bpy.data.textures[current_layer.microfacet_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "microfacet_model", text="Model")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "microfacet_reflectance", text="")
if current_layer.microfacet_use_diff_tex:
layout.prop_search(current_layer, "microfacet_diff_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "microfacet_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.microfacet_diff_tex != '' and current_layer.microfacet_use_diff_tex:
diff_tex = bpy.data.textures[current_layer.microfacet_diff_tex]
layout.prop(diff_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "microfacet_multiplier")
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "microfacet_mdf")
layout.prop(current_layer, "microfacet_mdf_multiplier")
# specular
if current_layer.microfacet_use_spec_tex:
layout.prop_search(current_layer, "microfacet_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "microfacet_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.microfacet_spec_tex != '' and current_layer.microfacet_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.microfacet_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
# fresnel
layout.prop(current_layer, "microfacet_fresnel")
#-------------------------------------------------
# specular brdf layout
elif current_layer.bsdf_type == "specular_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "specular_weight", text="Layer Weight")
if current_layer.specular_use_tex:
layout.prop_search(current_layer, "specular_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "specular_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.specular_mix_tex != '' and current_layer.specular_use_tex:
mix_tex = bpy.data.textures[current_layer.specular_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# specular reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "specular_reflectance", text="")
if current_layer.specular_use_gloss_tex:
layout.prop_search(current_layer, "specular_gloss_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "specular_use_gloss_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.specular_gloss_tex != '' and current_layer.specular_use_gloss_tex:
spec_tex = bpy.data.textures[current_layer.specular_gloss_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "specular_multiplier")
#----------------------------------------------
# specular btdf layout
elif current_layer.bsdf_type == "specular_btdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "spec_btdf_weight", text="Layer Weight")
if current_layer.spec_btdf_use_tex:
layout.prop_search(current_layer, "spec_btdf_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "spec_btdf_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.spec_btdf_mix_tex != '' and current_layer.spec_btdf_use_tex:
mix_tex = bpy.data.textures[current_layer.spec_btdf_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# specular reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "spec_btdf_reflectance", text="")
if current_layer.spec_btdf_use_spec_tex:
layout.prop_search(current_layer, "spec_btdf_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "spec_btdf_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.spec_btdf_spec_tex != '' and current_layer.spec_btdf_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.spec_btdf_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "spec_btdf_refl_mult")
# transmittance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "spec_btdf_transmittance", text="")
if current_layer.spec_btdf_use_trans_tex:
layout.prop_search(current_layer, "spec_btdf_trans_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "spec_btdf_use_trans_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.spec_btdf_trans_tex != '':
trans_tex = bpy.data.textures[current_layer.spec_btdf_trans_tex]
layout.prop(trans_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "spec_btdf_trans_mult")
# IOR
row = layout.row(align=True)
row.prop(current_layer, "spec_btdf_ior")
#----------------------------------------------
# bump/normal mapping
layout.separator()
split = layout.split(percentage=0.50)
col = split.column()
col.prop(asr_mat, "material_use_bump_tex", text="Bump Map", icon="POTATO", toggle=True)
col = split.column()
if asr_mat.material_use_bump_tex:
col.prop(asr_mat, "material_use_normalmap", text="Normal Map", toggle=True)
layout.prop_search(asr_mat, "material_bump_tex", material, "texture_slots", text="")
if asr_mat.material_bump_tex != '':
bump_tex = bpy.data.textures[asr_mat.material_bump_tex]
layout.prop(bump_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(asr_mat, "material_bump_amplitude", text="Bump Amplitude")
#----------------------------------------------
# alpha
split = layout.split(percentage=0.50)
col = split.column()
col.prop(asr_mat, "material_use_alpha", text="Alpha Map", icon="POTATO", toggle=True)
col = split.column()
if asr_mat.material_use_alpha:
col.prop_search(asr_mat, "material_alpha_map", material, "texture_slots", text="")
if asr_mat.material_alpha_map != '':
alpha_tex = bpy.data.textures[asr_mat.material_alpha_map]
layout.prop(alpha_tex.image.colorspace_settings, "name", text="Color Space")
else:
col.prop(asr_mat, "material_alpha")
layout.prop(asr_mat, "shade_alpha_cutouts")
#---------------------------------------------
# light material panel
#---------------------------------------------
class AppleseedMatEmissionPanel(bpy.types.Panel):
bl_label = "Light Material"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
renderer = context.scene.render.engine == 'APPLESEED_RENDER'
obj = context.object is not None
obj_type = context.object.type == 'MESH'
material = context.object.active_material is not None
if material:
is_not_nodemat = context.object.active_material.appleseed.node_tree == ''
return renderer and obj and obj_type and material and is_not_nodemat
return False
def draw_header(self, context):
header = self.layout
asr_mat = context.object.active_material.appleseed
header.prop(asr_mat, "use_light_emission")
def draw(self, context):
layout = self.layout
material = context.object.active_material
asr_mat = material.appleseed
col = layout.column()
col.active = asr_mat.use_light_emission
col.prop(asr_mat, "light_color", text="")
col.prop(asr_mat, "light_emission", text="Radiance Multiplier")
layout.active = asr_mat.use_light_emission
row = layout.row(align=True)
layout.prop(asr_mat, "cast_indirect")
layout.prop(asr_mat, "importance_multiplier")
layout.prop(asr_mat, "light_near_start")
def register():
bpy.types.MATERIAL_PT_context_material.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.MATERIAL_PT_custom_props.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.utils.register_class(AppleseedMaterialPreview)
bpy.utils.register_class(MATERIAL_UL_BSDF_slots)
bpy.utils.register_class(AppleseedMaterialShading)
bpy.utils.register_class(AppleseedMatEmissionPanel)
def unregister():
bpy.utils.unregister_class(AppleseedMaterialPreview)
bpy.utils.unregister_class(MATERIAL_UL_BSDF_slots)
bpy.utils.unregister_class(AppleseedMaterialShading)
bpy.utils.unregister_class(AppleseedMatEmissionPanel)
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
#---------------------------------------------
# node tree selector ui
#---------------------------------------------
def node_tree_selector_draw(layout, mat, output_type):
try:
layout.prop_search(mat.appleseed, "node_tree", bpy.data, "node_groups")
except:
return False
node = find_node(mat, output_type)
if not node:
if mat.appleseed.node_tree == '':
layout.operator('appleseed.add_material_nodetree', text="appleseed Node", icon='NODETREE')
return False
return True
def find_node(material, nodetype):
if not (material and material.appleseed and material.appleseed.node_tree):
return None
node_tree = material.appleseed.node_tree
if node_tree == '':
return None
ntree = bpy.data.node_groups[node_tree]
for node in ntree.nodes:
nt = getattr(node, "bl_idname", None)
if nt == nodetype:
return node
return None
#---------------------------------------------
# material preview panel
#---------------------------------------------
class AppleseedMaterialPreview(bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
bl_label = "Preview"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
@classmethod
def poll(cls, context):
return context.scene.render.engine in cls.COMPAT_ENGINES and context.object is not None and context.object.active_material is not None
def draw(self, context):
layout = self.layout
obj = context.object
material = obj.active_material
asr_mat = material.appleseed
layout.template_preview(context.material, show_buttons=False)
layout.prop(asr_mat, "preview_quality")
#---------------------------------------------
# material bsdf slot
#---------------------------------------------
class MATERIAL_UL_BSDF_slots(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
BSDF = item
bsdf_type = BSDF.bsdf_type
bsdf_type_name = bsdf_type[0].upper() + bsdf_type[1:-5] + " " + bsdf_type[-4:].upper()
if 'DEFAULT' in self.layout_type:
layout.label(text=BSDF.name + " | " + bsdf_type_name, translate=False, icon_value=icon)
#---------------------------------------------
# material shading panel
#---------------------------------------------
class AppleseedMaterialShading(bpy.types.Panel):
bl_label = 'Surface Shader'
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.object is not None and context.object.type == 'MESH' and context.object.active_material is not None
def draw(self, context):
layout = self.layout
object = context.object
material = object.active_material
asr_mat = material.appleseed
node_tree_selector_draw(layout, material, 'AppleseedMaterialNode')
if asr_mat.node_tree != '':
node_tree = bpy.data.node_groups[asr_mat.node_tree]
layout.prop_search(asr_mat, "node_output", node_tree, "nodes")
if asr_mat.node_tree == '':
row = layout.row()
row.template_list("MATERIAL_UL_BSDF_slots", "appleseed_material_layers", asr_mat,
"layers", asr_mat, "layer_index", rows=1, maxrows=16, type="DEFAULT")
row = layout.row(align=True)
row.operator("appleseed.add_matlayer", text="Add Layer", icon="ZOOMIN")
row.operator("appleseed.remove_matlayer", text="Remove", icon="ZOOMOUT")
if asr_mat.layers:
current_layer = asr_mat.layers[asr_mat.layer_index]
layout.prop(current_layer, "name")
layout.prop(current_layer, "bsdf_type")
layout.separator()
# lambertian brdf layout
if current_layer.bsdf_type == "lambertian_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "lambertian_weight", text="Layer Weight")
if current_layer.lambertian_use_tex:
layout.prop_search(current_layer, "lambertian_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "lambertian_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.lambertian_mix_tex != '' and current_layer.lambertian_use_tex:
mix_tex = bpy.data.textures[current_layer.lambertian_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "lambertian_reflectance", text="")
if current_layer.lambertian_use_diff_tex:
layout.prop_search(current_layer, "lambertian_diffuse_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "lambertian_use_diff_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.lambertian_diffuse_tex != '' and current_layer.lambertian_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.lambertian_diffuse_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "lambertian_multiplier")
#-------------------------------------------------
# oren-nayar brdf layout
if current_layer.bsdf_type == "orennayar_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "orennayar_weight", text="Layer Weight")
if current_layer.orennayar_use_tex:
layout.prop_search(current_layer, "orennayar_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "orennayar_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.orennayar_mix_tex != '' and current_layer.orennayar_use_tex:
mix_tex = bpy.data.textures[current_layer.orennayar_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "orennayar_reflectance", text="")
if current_layer.orennayar_use_diff_tex:
layout.prop_search(current_layer, "orennayar_diffuse_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "orennayar_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.orennayar_diffuse_tex != '' and current_layer.orennayar_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.orennayar_diffuse_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "orennayar_multiplier")
# roughness
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "orennayar_roughness")
if current_layer.orennayar_use_rough_tex:
layout.prop_search(current_layer, "orennayar_rough_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "orennayar_use_rough_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.orennayar_rough_tex != '' and current_layer.orennayar_use_rough_tex:
rough_tex = bpy.data.textures[current_layer.orennayar_diffuse_tex]
layout.prop(rough_tex.image.colorspace_settings, "name", text="Color Space")
#-------------------------------------------------
# ashikhmin-shirley brdf layout
elif current_layer.bsdf_type == "ashikhmin_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "ashikhmin_weight", text="Layer Weight")
if current_layer.ashikhmin_use_tex:
layout.prop_search(current_layer, "ashikhmin_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "ashikhmin_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.ashikhmin_mix_tex != '' and current_layer.ashikhmin_use_tex:
mix_tex = bpy.data.textures[current_layer.ashikhmin_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "ashikhmin_reflectance", text="")
if current_layer.ashikhmin_use_diff_tex:
layout.prop_search(current_layer, "ashikhmin_diffuse_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "ashikhmin_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.ashikhmin_diffuse_tex != '' and current_layer.ashikhmin_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.ashikhmin_diffuse_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
row = layout.row()
row.prop(current_layer, "ashikhmin_multiplier")
# glossiness
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "ashikhmin_glossy", text="")
if current_layer.ashikhmin_use_gloss_tex:
layout.prop_search(current_layer, "ashikhmin_gloss_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "ashikhmin_use_gloss_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.ashikhmin_gloss_tex != '' and current_layer.ashikhmin_use_gloss_tex:
gloss_tex = bpy.data.textures[current_layer.ashikhmin_gloss_tex]
layout.prop(gloss_tex.image.colorspace_settings, "name", text="Color Space")
row = layout.row()
row.prop(current_layer, "ashikhmin_glossy_multiplier")
# fresnel
col = layout.column()
col.prop(current_layer, "ashikhmin_fresnel")
layout.prop(current_layer, "ashikhmin_shininess_u")
layout.prop(current_layer, "ashikhmin_shininess_v")
#-------------------------------------------------
# diffuse btdf layout
elif current_layer.bsdf_type == "diffuse_btdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "transmittance_weight", text="Layer Weight")
if current_layer.transmittance_use_tex:
layout.prop_search(current_layer, "transmittance_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "transmittance_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.transmittance_mix_tex != '' and current_layer.transmittance_use_tex:
mix_tex = bpy.data.textures[current_layer.transmittance_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "transmittance_color", text="")
if current_layer.transmittance_use_diff_tex:
layout.prop_search(current_layer, "transmittance_diff_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "transmittance_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.transmittance_diff_tex != '' and current_layer.transmittance_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.transmittance_diff_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
# transmittance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "transmittance_multiplier", text="Transmittance")
if current_layer.transmittance_use_mult_tex:
layout.prop_search(current_layer, "transmittance_mult_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "transmittance_use_mult_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.transmittance_mult_tex != '' and current_layer.transmittance_use_mult_tex:
mult_tex = bpy.data.textures[current_layer.transmittance_mult_tex]
layout.prop(mult_tex.image.colorspace_settings, "name", text="Color Space")
#-------------------------------------------------
# disney brdf layout
elif current_layer.bsdf_type == "disney_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_weight", text="Layer Weight")
if current_layer.disney_use_tex:
layout.prop_search(current_layer, "disney_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "disney_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_mix_tex != '' and current_layer.disney_use_tex:
mix_tex = bpy.data.textures[current_layer.disney_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# base color
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_base", text="")
if current_layer.disney_use_base_tex:
layout.prop_search(current_layer, "disney_base_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_base_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_base_tex != '' and current_layer.disney_use_base_tex:
base_tex = bpy.data.textures[current_layer.disney_base_tex]
layout.prop(base_tex.image.colorspace_settings, "name", text="Color Space")
# subsurface
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_subsurface")
if current_layer.disney_use_subsurface_tex:
layout.prop_search(current_layer, "disney_subsurface_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_subsurface_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_subsurface_tex != '' and current_layer.disney_use_subsurface_tex:
subsurface_tex = bpy.data.textures[current_layer.disney_subsurface_tex]
layout.prop(subsurface_tex.image.colorspace_settings, "name", text="Color Space")
# metallic
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_metallic")
if current_layer.disney_use_metallic_tex:
layout.prop_search(current_layer, "disney_metallic_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_metallic_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_metallic_tex != '' and current_layer.disney_use_metallic_tex:
metal_tex = bpy.data.textures[current_layer.disney_metallic_tex]
layout.prop(metal_tex.image.colorspace_settings, "name", text="Color Space")
# specular
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_spec")
if current_layer.disney_use_spec_tex:
layout.prop_search(current_layer, "disney_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_spec_tex != '' and current_layer.disney_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.disney_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
# specular tint
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_spec_tint")
if current_layer.disney_use_spec_tint_tex:
layout.prop_search(current_layer, "disney_spec_tint_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_spec_tint_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_spec_tint_tex != '' and current_layer.disney_use_spec_tint_tex:
spec_tint_tex = bpy.data.textures[current_layer.disney_spec_tint_tex]
layout.prop(spec_tint_tex.image.colorspace_settings, "name", text="Color Space")
# anisotropy
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_aniso")
if current_layer.disney_use_aniso_tex:
layout.prop_search(current_layer, "disney_aniso_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_aniso_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_aniso_tex != '' and current_layer.disney_use_aniso_tex:
aniso_tex = bpy.data.textures[current_layer.disney_aniso_tex]
layout.prop(aniso_tex.image.colorspace_settings, "name", text="Color Space")
# roughness
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_roughness")
if current_layer.disney_use_roughness_tex:
layout.prop_search(current_layer, "disney_roughness_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_roughness_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_roughness_tex != '' and current_layer.disney_use_roughness_tex:
rough_tex = bpy.data.textures[current_layer.disney_roughness_tex]
layout.prop(rough_tex.image.colorspace_settings, "name", text="Color Space")
# sheen
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_sheen")
if current_layer.disney_use_sheen_tex:
layout.prop_search(current_layer, "disney_sheen_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_sheen_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_sheen_tex != '' and current_layer.disney_use_sheen_tex:
sheen_tex = bpy.data.textures[current_layer.disney_sheen_tex]
layout.prop(sheen_tex.image.colorspace_settings, "name", text="Color Space")
# sheen tint
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_sheen_tint")
if current_layer.disney_use_sheen_tint_tex:
layout.prop_search(current_layer, "disney_sheen_tint_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_sheen_tint_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_sheen_tint_tex != '' and current_layer.disney_use_sheen_tint_tex:
sheen_tint_tex = bpy.data.textures[current_layer.disney_sheen_tint_tex]
layout.prop(sheen_tint_tex.image.colorspace_settings, "name", text="Color Space")
# clear coat
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_clearcoat")
if current_layer.disney_use_clearcoat_tex:
layout.prop_search(current_layer, "disney_clearcoat_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_clearcoat_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_clearcoat_tex != '' and current_layer.disney_use_clearcoat_tex:
clearcoat_tex = bpy.data.textures[current_layer.disney_clearcoat_tex]
layout.prop(clearcoat_tex.image.colorspace_settings, "name", text="Color Space")
# clear coat gloss
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "disney_clearcoat_gloss")
if current_layer.disney_use_clearcoat_gloss_tex:
layout.prop_search(current_layer, "disney_clearcoat_gloss_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "disney_use_clearcoat_gloss_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.disney_clearcoat_gloss_tex != '' and current_layer.disney_use_clearcoat_gloss_tex:
clearcoat_gloss_tex = bpy.data.textures[current_layer.disney_clearcoat_gloss_tex]
layout.prop(clearcoat_gloss_tex.image.colorspace_settings, "name", text="Color Space")
#-------------------------------------------------
# kelemen brdf layout
elif current_layer.bsdf_type == "kelemen_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "kelemen_weight", text="Layer Weight")
if current_layer.kelemen_use_tex:
layout.prop_search(current_layer, "kelemen_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "kelemen_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.kelemen_mix_tex != '' and current_layer.kelemen_use_tex:
mix_tex = bpy.data.textures[current_layer.kelemen_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "kelemen_matte_reflectance", text="")
if current_layer.kelemen_use_diff_tex:
layout.prop_search(current_layer, "kelemen_diff_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "kelemen_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.kelemen_diff_tex != '' and current_layer.kelemen_use_diff_tex:
diffuse_tex = bpy.data.textures[current_layer.kelemen_diff_tex]
layout.prop(diffuse_tex.image.colorspace_settings, "name", text="Color Space")
row = layout.row()
row.prop(current_layer, "kelemen_matte_multiplier")
# specular reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "kelemen_specular_reflectance", text="")
if current_layer.kelemen_use_spec_tex:
layout.prop_search(current_layer, "kelemen_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "kelemen_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.kelemen_spec_tex != '' and current_layer.kelemen_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.kelemen_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "kelemen_specular_multiplier")
layout.prop(current_layer, "kelemen_roughness")
#-------------------------------------------------
# microfacet brdf layout
elif current_layer.bsdf_type == "microfacet_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "microfacet_weight", text="Layer Weight")
if current_layer.microfacet_use_tex:
layout.prop_search(current_layer, "microfacet_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "microfacet_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.microfacet_mix_tex != '' and current_layer.microfacet_use_tex:
mix_tex = bpy.data.textures[current_layer.microfacet_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "microfacet_model", text="Model")
# reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "microfacet_reflectance", text="")
if current_layer.microfacet_use_diff_tex:
layout.prop_search(current_layer, "microfacet_diff_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "microfacet_use_diff_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.microfacet_diff_tex != '' and current_layer.microfacet_use_diff_tex:
diff_tex = bpy.data.textures[current_layer.microfacet_diff_tex]
layout.prop(diff_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "microfacet_multiplier")
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "microfacet_mdf")
layout.prop(current_layer, "microfacet_mdf_multiplier")
# specular
if current_layer.microfacet_use_spec_tex:
layout.prop_search(current_layer, "microfacet_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "microfacet_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.microfacet_spec_tex != '' and current_layer.microfacet_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.microfacet_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
# fresnel
layout.prop(current_layer, "microfacet_fresnel")
#-------------------------------------------------
# specular brdf layout
elif current_layer.bsdf_type == "specular_brdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "specular_weight", text="Layer Weight")
if current_layer.specular_use_tex:
layout.prop_search(current_layer, "specular_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "specular_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.specular_mix_tex != '' and current_layer.specular_use_tex:
mix_tex = bpy.data.textures[current_layer.specular_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# specular reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "specular_reflectance", text="")
if current_layer.specular_use_gloss_tex:
layout.prop_search(current_layer, "specular_gloss_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "specular_use_gloss_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.specular_gloss_tex != '' and current_layer.specular_use_gloss_tex:
spec_tex = bpy.data.textures[current_layer.specular_gloss_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "specular_multiplier")
#----------------------------------------------
# specular btdf layout
elif current_layer.bsdf_type == "specular_btdf":
# layer weight
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "spec_btdf_weight", text="Layer Weight")
if current_layer.spec_btdf_use_tex:
layout.prop_search(current_layer, "spec_btdf_mix_tex", material, "texture_slots")
col = split.column()
col.prop(current_layer, "spec_btdf_use_tex", icon="TEXTURE_SHADED", toggle=True)
if current_layer.spec_btdf_mix_tex != '' and current_layer.spec_btdf_use_tex:
mix_tex = bpy.data.textures[current_layer.spec_btdf_mix_tex]
layout.prop(mix_tex.image.colorspace_settings, "name", text="Color Space")
# specular reflectance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "spec_btdf_reflectance", text="")
if current_layer.spec_btdf_use_spec_tex:
layout.prop_search(current_layer, "spec_btdf_spec_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "spec_btdf_use_spec_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.spec_btdf_spec_tex != '' and current_layer.spec_btdf_use_spec_tex:
spec_tex = bpy.data.textures[current_layer.spec_btdf_spec_tex]
layout.prop(spec_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "spec_btdf_refl_mult")
# transmittance
split = layout.split(percentage=0.90)
col = split.column()
col.prop(current_layer, "spec_btdf_transmittance", text="")
if current_layer.spec_btdf_use_trans_tex:
layout.prop_search(current_layer, "spec_btdf_trans_tex", material, "texture_slots", text="")
col = split.column()
col.prop(current_layer, "spec_btdf_use_trans_tex", text="", icon="TEXTURE_SHADED", toggle=True)
if current_layer.spec_btdf_trans_tex != '':
trans_tex = bpy.data.textures[current_layer.spec_btdf_trans_tex]
layout.prop(trans_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(current_layer, "spec_btdf_trans_mult")
# IOR
row = layout.row(align=True)
row.prop(current_layer, "spec_btdf_ior")
#----------------------------------------------
# bump/normal mapping
layout.separator()
split = layout.split(percentage=0.50)
col = split.column()
col.prop(asr_mat, "material_use_bump_tex", text="Bump Map", icon="POTATO", toggle=True)
col = split.column()
if asr_mat.material_use_bump_tex:
col.prop(asr_mat, "material_use_normalmap", text="Normal Map", toggle=True)
layout.prop_search(asr_mat, "material_bump_tex", material, "texture_slots", text="")
if asr_mat.material_bump_tex != '':
bump_tex = bpy.data.textures[asr_mat.material_bump_tex]
layout.prop(bump_tex.image.colorspace_settings, "name", text="Color Space")
layout.prop(asr_mat, "material_bump_amplitude", text="Bump Amplitude")
#----------------------------------------------
# alpha
split = layout.split(percentage=0.50)
col = split.column()
col.prop(asr_mat, "material_use_alpha", text="Alpha Map", icon="POTATO", toggle=True)
col = split.column()
if asr_mat.material_use_alpha:
col.prop_search(asr_mat, "material_alpha_map", material, "texture_slots", text="")
if asr_mat.material_alpha_map != '':
alpha_tex = bpy.data.textures[asr_mat.material_alpha_map]
layout.prop(alpha_tex.image.colorspace_settings, "name", text="Color Space")
else:
col.prop(asr_mat, "material_alpha")
layout.prop(asr_mat, "shade_alpha_cutouts")
#---------------------------------------------
# light material panel
#---------------------------------------------
class AppleseedMatEmissionPanel(bpy.types.Panel):
bl_label = "Light Material"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
renderer = context.scene.render.engine == 'APPLESEED_RENDER'
obj = context.object is not None
obj_type = context.object.type == 'MESH'
material = context.object.active_material is not None
if material:
is_not_nodemat = context.object.active_material.appleseed.node_tree == ''
return renderer and obj and obj_type and material and is_not_nodemat
return False
def draw_header(self, context):
header = self.layout
asr_mat = context.object.active_material.appleseed
header.prop(asr_mat, "use_light_emission")
def draw(self, context):
layout = self.layout
material = context.object.active_material
asr_mat = material.appleseed
col = layout.column()
col.active = asr_mat.use_light_emission
col.prop(asr_mat, "light_color", text="")
col.prop(asr_mat, "light_emission", text="Radiance Multiplier")
layout.active = asr_mat.use_light_emission
row = layout.row(align=True)
layout.prop(asr_mat, "cast_indirect")
layout.prop(asr_mat, "importance_multiplier")
layout.prop(asr_mat, "light_near_start")
def register():
bpy.types.MATERIAL_PT_context_material.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.types.MATERIAL_PT_custom_props.COMPAT_ENGINES.add('APPLESEED_RENDER')
bpy.utils.register_class(AppleseedMaterialPreview)
bpy.utils.register_class(MATERIAL_UL_BSDF_slots)
bpy.utils.register_class(AppleseedMaterialShading)
bpy.utils.register_class(AppleseedMatEmissionPanel)
def unregister():
bpy.utils.unregister_class(AppleseedMaterialPreview)
bpy.utils.unregister_class(MATERIAL_UL_BSDF_slots)
bpy.utils.unregister_class(AppleseedMaterialShading)
bpy.utils.unregister_class(AppleseedMatEmissionPanel)
| en | 0.378492 | # # This source file is part of appleseed. # Visit http://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2014-2017 The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # #--------------------------------------------- # node tree selector ui #--------------------------------------------- #--------------------------------------------- # material preview panel #--------------------------------------------- #--------------------------------------------- # material bsdf slot #--------------------------------------------- #--------------------------------------------- # material shading panel #--------------------------------------------- # lambertian brdf layout # layer weight # reflectance #------------------------------------------------- # oren-nayar brdf layout # layer weight # reflectance # roughness #------------------------------------------------- # ashikhmin-shirley brdf layout # layer weight # reflectance # glossiness # fresnel #------------------------------------------------- # diffuse btdf layout # layer weight # reflectance # transmittance #------------------------------------------------- # disney brdf layout # layer weight # base color # subsurface # metallic # specular # specular tint # anisotropy # roughness # sheen # sheen tint # clear coat # clear coat gloss #------------------------------------------------- # kelemen brdf layout # layer weight # reflectance # specular reflectance #------------------------------------------------- # microfacet brdf layout # layer weight # reflectance # specular # fresnel #------------------------------------------------- # specular brdf layout # layer weight # specular reflectance #---------------------------------------------- # specular btdf layout # layer weight # specular reflectance # transmittance # IOR #---------------------------------------------- # bump/normal mapping #---------------------------------------------- # alpha #--------------------------------------------- # light material panel #--------------------------------------------- | 1.436529 | 1 |
processing/Mod. 8/sketch_8_2_l56/sketch_8_2_l56.pyde | nanam0rgana/2019-fall-polytech-cs | 0 | 6619912 | <filename>processing/Mod. 8/sketch_8_2_l56/sketch_8_2_l56.pyde
r = 50
cx = 500/2
cy = 500/2
l = 350
a = PI/4
w = 1
c = 0
class myline():
def render(self, cx,cy,a):
x1 = cx - cos(a)*l/2
y1 = cy + sin(a)*l/2
x2 = cx + cos(a)*l/2
y2 = cy - sin(a)*l/2
stroke(50,100)
strokeWeight(w)
line(x1,y1,x2,y2)
strokeWeight(5)
stroke(50)
line(x2,y2,x2,y2)
line(x1,y1,x1,y1)
mline=myline()
def setup():
size(500,500)
smooth()
background(255)
def draw():
global c,r
c += 0.05
if c>2*PI:
c = 0
r += 50
mline.render(width/2 + sin(c)*r, width/2 + cos(c)*r, c*2)
| <filename>processing/Mod. 8/sketch_8_2_l56/sketch_8_2_l56.pyde
r = 50
cx = 500/2
cy = 500/2
l = 350
a = PI/4
w = 1
c = 0
class myline():
def render(self, cx,cy,a):
x1 = cx - cos(a)*l/2
y1 = cy + sin(a)*l/2
x2 = cx + cos(a)*l/2
y2 = cy - sin(a)*l/2
stroke(50,100)
strokeWeight(w)
line(x1,y1,x2,y2)
strokeWeight(5)
stroke(50)
line(x2,y2,x2,y2)
line(x1,y1,x1,y1)
mline=myline()
def setup():
size(500,500)
smooth()
background(255)
def draw():
global c,r
c += 0.05
if c>2*PI:
c = 0
r += 50
mline.render(width/2 + sin(c)*r, width/2 + cos(c)*r, c*2)
| none | 1 | 2.705449 | 3 | |
daedalus.py | lojikil/daedalus | 2 | 6619913 | from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, g, render_template
app = Flask(__name__)
SQLQ = """select docs.document, tokens.tf_idf
from tokens left outer join docs
on tokens.doc_id = docs.doc_id
where (tokens.tok = ?) order by tokens.tf_idf desc"""
@app.errorhandler(500)
def handle500(e):
print "Error:", e
return "<html><body><p>%s</p></body></html>" % str(e), 500
@app.before_request
def before_req():
g.db = sqlite3.connect('./index_test.db')
@app.after_request
def after_req(response):
g.db.close()
return response
@app.route("/")
def daedalus_index():
return render_template("show_index.html")
@app.route("/search", methods=["GET", "POST"])
def daedalus_search():
entries = []
if request.method == "POST":
if request.form['query']:
qs = request.form['query'].lower().split(' ')
for q in qs:
cur = g.db.execute(SQLQ, [q])
entries.extend([dict(document=row[0], score=row[1])
for row in cur.fetchall()])
return render_template("list.html", entries=entries)
if __name__ == "__main__":
app.run()
| from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, g, render_template
app = Flask(__name__)
SQLQ = """select docs.document, tokens.tf_idf
from tokens left outer join docs
on tokens.doc_id = docs.doc_id
where (tokens.tok = ?) order by tokens.tf_idf desc"""
@app.errorhandler(500)
def handle500(e):
print "Error:", e
return "<html><body><p>%s</p></body></html>" % str(e), 500
@app.before_request
def before_req():
g.db = sqlite3.connect('./index_test.db')
@app.after_request
def after_req(response):
g.db.close()
return response
@app.route("/")
def daedalus_index():
return render_template("show_index.html")
@app.route("/search", methods=["GET", "POST"])
def daedalus_search():
entries = []
if request.method == "POST":
if request.form['query']:
qs = request.form['query'].lower().split(' ')
for q in qs:
cur = g.db.execute(SQLQ, [q])
entries.extend([dict(document=row[0], score=row[1])
for row in cur.fetchall()])
return render_template("list.html", entries=entries)
if __name__ == "__main__":
app.run()
| en | 0.647795 | select docs.document, tokens.tf_idf from tokens left outer join docs on tokens.doc_id = docs.doc_id where (tokens.tok = ?) order by tokens.tf_idf desc | 2.743726 | 3 |
gunicorn_config.py | NewLanded/future_picture | 0 | 6619914 | bind = "0.0.0.0:6677"
workers = 1
worker_class = "uvicorn.workers.UvicornWorker"
timeout = 3600
keepalive = 3600
backlog = 100 # 服务器中在pending状态的最大连接数,即client处于waiting的数目。超过这个数目, client连接会得到一个error
daemon = True
debug = False
proc_name = 'gunicorn_future_picture'
pidfile = './log/gunicorn_future_picture.pid'
errorlog = './log/gunicorn_future_picture_error.log'
accesslog = './log/gunicorn_future_picture_access.log'
| bind = "0.0.0.0:6677"
workers = 1
worker_class = "uvicorn.workers.UvicornWorker"
timeout = 3600
keepalive = 3600
backlog = 100 # 服务器中在pending状态的最大连接数,即client处于waiting的数目。超过这个数目, client连接会得到一个error
daemon = True
debug = False
proc_name = 'gunicorn_future_picture'
pidfile = './log/gunicorn_future_picture.pid'
errorlog = './log/gunicorn_future_picture_error.log'
accesslog = './log/gunicorn_future_picture_access.log'
| zh | 0.9192 | # 服务器中在pending状态的最大连接数,即client处于waiting的数目。超过这个数目, client连接会得到一个error | 1.689121 | 2 |
samplecode/Course3_1_1_submission.py | RickRe/MachineLearning | 0 | 6619915 | # encoding: utf-8
import numpy as np
import model
import utils
import plot_data
def exec_c3_1_a(X_a, X_b, init_w):
"""
plot 3 histogram of data projecting to difference vector w
:param X_a: Gaussian data of class a
:param X_b: Gaussian data of class b
:param init_w: initial w vector to be projected
:return: none
"""
n_histogram = 3
proj_a = np.zeros((X_a.shape[0], n_histogram))
proj_b = np.zeros((X_b.shape[0], n_histogram))
new_w = np.zeros((init_w.shape[0], n_histogram))
for i in range(n_histogram):
new_w[:, i] = (init_w + np.array(np.random.randn(*init_w.shape))).ravel()
proj_a[:, i] = utils.project_X_onto_w(X_a, new_w[:, i]).ravel()
proj_b[:, i] = utils.project_X_onto_w(X_b, new_w[:, i]).ravel()
plot_data.plt_histogram(proj_a, proj_b, new_w)
def exec_c3_1_b(X_a, X_b, init_w):
"""
Turn vector w by 360 degree to find the maximum value of Fisher score,
and the corresponding direction w∗
:param X_a: Gaussian data of class a
:param X_b: Gaussian data of class b
:param init_w: initial w vector to be projected
:return: none
"""
fs_clf = model.FisherScoreClassifier(X_a, X_b, init_w)
optimal_w = fs_clf.classify()
if __name__ == '__main__':
# generate gaussian distribution for class a
n_pts = 100
mean_a = [4, 2]
cov_a = np.array([[1, 0.5], [0.5, 1]]) # diagonal covariance
Gaus_dist_a = model.GausDS(mean_a, cov_a, n_pts)
# generate gaussian distribution for class b
mean_b = [2, 4]
cov_b = np.array([[1, 0.5], [0.5, 1]]) # diagonal covariance
Gaus_dist_b = model.GausDS(mean_b, cov_b, n_pts)
# plot two Gaussian distributions including class a and class b
plot_data.plt_distribution(Gaus_dist_a.data, Gaus_dist_b.data)
# init weight to do projection
init_w = np.array([1, -2]).reshape(-1, 1)
# draw three histograms by projecting to different w
exec_c3_1_a(Gaus_dist_a.data, Gaus_dist_b.data, init_w)
# find optimal angel to separate class a and class b
exec_c3_1_b(Gaus_dist_a.data, Gaus_dist_b.data, init_w)
| # encoding: utf-8
import numpy as np
import model
import utils
import plot_data
def exec_c3_1_a(X_a, X_b, init_w):
"""
plot 3 histogram of data projecting to difference vector w
:param X_a: Gaussian data of class a
:param X_b: Gaussian data of class b
:param init_w: initial w vector to be projected
:return: none
"""
n_histogram = 3
proj_a = np.zeros((X_a.shape[0], n_histogram))
proj_b = np.zeros((X_b.shape[0], n_histogram))
new_w = np.zeros((init_w.shape[0], n_histogram))
for i in range(n_histogram):
new_w[:, i] = (init_w + np.array(np.random.randn(*init_w.shape))).ravel()
proj_a[:, i] = utils.project_X_onto_w(X_a, new_w[:, i]).ravel()
proj_b[:, i] = utils.project_X_onto_w(X_b, new_w[:, i]).ravel()
plot_data.plt_histogram(proj_a, proj_b, new_w)
def exec_c3_1_b(X_a, X_b, init_w):
"""
Turn vector w by 360 degree to find the maximum value of Fisher score,
and the corresponding direction w∗
:param X_a: Gaussian data of class a
:param X_b: Gaussian data of class b
:param init_w: initial w vector to be projected
:return: none
"""
fs_clf = model.FisherScoreClassifier(X_a, X_b, init_w)
optimal_w = fs_clf.classify()
if __name__ == '__main__':
# generate gaussian distribution for class a
n_pts = 100
mean_a = [4, 2]
cov_a = np.array([[1, 0.5], [0.5, 1]]) # diagonal covariance
Gaus_dist_a = model.GausDS(mean_a, cov_a, n_pts)
# generate gaussian distribution for class b
mean_b = [2, 4]
cov_b = np.array([[1, 0.5], [0.5, 1]]) # diagonal covariance
Gaus_dist_b = model.GausDS(mean_b, cov_b, n_pts)
# plot two Gaussian distributions including class a and class b
plot_data.plt_distribution(Gaus_dist_a.data, Gaus_dist_b.data)
# init weight to do projection
init_w = np.array([1, -2]).reshape(-1, 1)
# draw three histograms by projecting to different w
exec_c3_1_a(Gaus_dist_a.data, Gaus_dist_b.data, init_w)
# find optimal angel to separate class a and class b
exec_c3_1_b(Gaus_dist_a.data, Gaus_dist_b.data, init_w)
| en | 0.773475 | # encoding: utf-8 plot 3 histogram of data projecting to difference vector w :param X_a: Gaussian data of class a :param X_b: Gaussian data of class b :param init_w: initial w vector to be projected :return: none Turn vector w by 360 degree to find the maximum value of Fisher score, and the corresponding direction w∗ :param X_a: Gaussian data of class a :param X_b: Gaussian data of class b :param init_w: initial w vector to be projected :return: none # generate gaussian distribution for class a # diagonal covariance # generate gaussian distribution for class b # diagonal covariance # plot two Gaussian distributions including class a and class b # init weight to do projection # draw three histograms by projecting to different w # find optimal angel to separate class a and class b | 2.845387 | 3 |
src/models/master.py | yfoucade/colupi | 1 | 6619916 | from copy import deepcopy
from numba import njit
class Master(object):
"""
Master algorithm managing the collaboration between the local data sites
"""
@njit
def __init__(self, collabs=None, max_iter=0, *args, **kwargs):
"""
Parameters:
collabs: list(Collaborator)
List of instances of type "Collaborator".
max_iter (optional): int
Maximum number of iterations in the collaborative phase
optional:
X (optional): list(pandas.DataFrame)
List of dataframes with same size than collabs.
The view of each collaborators, with acces to possibly different
set of individuals (identified by the index column).
Y (optional): list(pandas.DataFrame)
List of dataframes, one for each collaborator. With the indices and the labels.
notes:
02/02 14:16 - Xs and Ys are not necessary parameters, as we can get them from each collab.
Consider removing them.
"""
self.collabs, self.P = collabs, len(collabs)
for i, collab in enumerate(self.collabs):
collab.set_id(i)
self.max_iter = max_iter
# create a log that will contain information about each step of the process
self.log = [] # validation indices (for each step, a list of dicts)
self.collaboration_history = [] # confidence matrices
@njit
def launch_collab(self):
"""
Proceed to the collaboration.
"""
# each collaborator fits its parameters in the local step
to_log = []
for collab in self.collabs:
collab.local_step()
to_log.append(deepcopy(collab.log_local()))
# log
self.log.append(deepcopy(to_log))
n_iter = 0
while True:
stop, to_log = True, []
# save all collaboration matrices
# each data site is in turn considered as the local data site
for p, collab in enumerate(self.collabs):
# it is provided with a tuple for each remote data site: (Id, partition_matrix)
remote_Ids, remote_partitions = self.get_partitions_except_p(p)
successful_collab = collab.collaborate(remote_Ids, remote_partitions)
to_log.append(deepcopy(collab.log_collab()))
if successful_collab:
stop = False
# log the results
self.log.append(deepcopy(to_log))
# should we stop ?
if stop:
break
# if stop==False, then a collaboration occured, add 1 to counter
n_iter += 1
if n_iter == self.max_iter:
break
@njit
def get_partitions_except_p(self, p):
"""
Get all partition matrices except number p.
This is used to get every remote partition matrix when p is the local data site.
Parameters:
p: int
id of the partition matrix to ignore.
"""
res_Ids, res_partitions = [], []
for i, collab in self.collabs:
if i != p:
res_Ids.append(collab.get_id())
res_partitions.append(deepcopy(collab.get_partition_matrix()))
return res_Ids, res_partitions
| from copy import deepcopy
from numba import njit
class Master(object):
"""
Master algorithm managing the collaboration between the local data sites
"""
@njit
def __init__(self, collabs=None, max_iter=0, *args, **kwargs):
"""
Parameters:
collabs: list(Collaborator)
List of instances of type "Collaborator".
max_iter (optional): int
Maximum number of iterations in the collaborative phase
optional:
X (optional): list(pandas.DataFrame)
List of dataframes with same size than collabs.
The view of each collaborators, with acces to possibly different
set of individuals (identified by the index column).
Y (optional): list(pandas.DataFrame)
List of dataframes, one for each collaborator. With the indices and the labels.
notes:
02/02 14:16 - Xs and Ys are not necessary parameters, as we can get them from each collab.
Consider removing them.
"""
self.collabs, self.P = collabs, len(collabs)
for i, collab in enumerate(self.collabs):
collab.set_id(i)
self.max_iter = max_iter
# create a log that will contain information about each step of the process
self.log = [] # validation indices (for each step, a list of dicts)
self.collaboration_history = [] # confidence matrices
@njit
def launch_collab(self):
"""
Proceed to the collaboration.
"""
# each collaborator fits its parameters in the local step
to_log = []
for collab in self.collabs:
collab.local_step()
to_log.append(deepcopy(collab.log_local()))
# log
self.log.append(deepcopy(to_log))
n_iter = 0
while True:
stop, to_log = True, []
# save all collaboration matrices
# each data site is in turn considered as the local data site
for p, collab in enumerate(self.collabs):
# it is provided with a tuple for each remote data site: (Id, partition_matrix)
remote_Ids, remote_partitions = self.get_partitions_except_p(p)
successful_collab = collab.collaborate(remote_Ids, remote_partitions)
to_log.append(deepcopy(collab.log_collab()))
if successful_collab:
stop = False
# log the results
self.log.append(deepcopy(to_log))
# should we stop ?
if stop:
break
# if stop==False, then a collaboration occured, add 1 to counter
n_iter += 1
if n_iter == self.max_iter:
break
@njit
def get_partitions_except_p(self, p):
"""
Get all partition matrices except number p.
This is used to get every remote partition matrix when p is the local data site.
Parameters:
p: int
id of the partition matrix to ignore.
"""
res_Ids, res_partitions = [], []
for i, collab in self.collabs:
if i != p:
res_Ids.append(collab.get_id())
res_partitions.append(deepcopy(collab.get_partition_matrix()))
return res_Ids, res_partitions
| en | 0.805861 | Master algorithm managing the collaboration between the local data sites Parameters: collabs: list(Collaborator) List of instances of type "Collaborator". max_iter (optional): int Maximum number of iterations in the collaborative phase optional: X (optional): list(pandas.DataFrame) List of dataframes with same size than collabs. The view of each collaborators, with acces to possibly different set of individuals (identified by the index column). Y (optional): list(pandas.DataFrame) List of dataframes, one for each collaborator. With the indices and the labels. notes: 02/02 14:16 - Xs and Ys are not necessary parameters, as we can get them from each collab. Consider removing them. # create a log that will contain information about each step of the process # validation indices (for each step, a list of dicts) # confidence matrices Proceed to the collaboration. # each collaborator fits its parameters in the local step # log # save all collaboration matrices # each data site is in turn considered as the local data site # it is provided with a tuple for each remote data site: (Id, partition_matrix) # log the results # should we stop ? # if stop==False, then a collaboration occured, add 1 to counter Get all partition matrices except number p. This is used to get every remote partition matrix when p is the local data site. Parameters: p: int id of the partition matrix to ignore. | 3.050621 | 3 |
SendLog.py | gzq763199198/xcos_on_cloud | 1 | 6619917 | #!/usr/bin/python3
import gevent
from gevent.lock import RLock
from gevent.monkey import patch_all
from gevent.pywsgi import WSGIServer
patch_all(aggressive=False, subprocess=False)
from datetime import datetime
import fileinput
import flask
from flask import request, Response, session, render_template, jsonify
import flask_session
import glob
import json
import os
from os.path import abspath, basename, dirname, exists, isfile, join, splitext
import re
import requests
import signal
import subprocess
from tempfile import mkdtemp, mkstemp
from threading import Timer
from time import time
import uuid
from werkzeug import secure_filename
from xml.dom import minidom
from db_connection import connection
import config
from config import FLASKSESSIONDIR, SESSIONDIR, XCOSSOURCEDIR, REMOVEFILE
def makedirs(dirname, dirtype):
if not exists(dirname):
print('making', dirtype, 'dir', dirname)
os.makedirs(dirname)
def remove(filename):
if filename is None:
return False
if not REMOVEFILE:
print("not removing", filename)
return True
try:
os.remove(filename)
return True
except BaseException:
print("could not remove", filename)
return False
# change directory before using relative paths
os.chdir(dirname(abspath(__file__)))
makedirs(FLASKSESSIONDIR, 'top flask session')
makedirs(SESSIONDIR, 'top session')
app = flask.Flask(__name__, static_folder='webapp/', template_folder='webapp')
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_FILE_DIR'] = FLASKSESSIONDIR
# These are the extension that we are accepting to be uploaded
app.config['ALLOWED_EXTENSIONS'] = set(['zcos', 'xcos', 'txt'])
flask_session.Session(app)
# This is the path to the upload directory and values directory
UPLOAD_FOLDER = 'uploads' # to store xcos file
VALUES_FOLDER = 'values' # to store files related to tkscale block
# to store uploaded sci files for sci-func block
SCRIPT_FILES_FOLDER = 'script_files'
# to store uploaded sci files for sci-func block
SCIFUNC_FILES_FOLDER = 'scifunc_files'
# Delay time to look for new line (in s)
LOOK_DELAY = 0.1
# States of the line
# to indicate initialization of block in log file is encountered
INITIALIZATION = 0
# to indicate ending of log file data for that block is encountered
ENDING = 1
# to indicate data is proper and can be read
DATA = 2
# to indicate there is no line in log file further
NOLINE = -1
# to indicate block id is present
BLOCK_IDENTIFICATION = -2
# Scilab dir
SCIDIR = abspath(config.SCILAB_DIR)
SCI = join(SCIDIR, "bin", "scilab-adv-cli")
READCONTENTFILE = abspath("Read_Content.txt")
CONT_FRM_WRITE = abspath("cont_frm_write.sci")
COPIED_EXPRESSION_SCI_FRM_SCILAB = abspath("copied_expression_from_scilab.sci")
EXP_SCI_FUNC_WRITE = abspath("expression-sci-function.sci")
BASEDIR = abspath('webapp')
IMAGEDIR = join(BASEDIR, 'res_imgs')
# display limit for long strings
DISPLAY_LIMIT = 10
# handle scilab startup
SCILAB_START = (
"errcatch(-1,'stop');lines(0,120);clearfun('messagebox');"
"function messagebox(msg,msgboxTitle,msgboxIcon,buttons,isModal),"
"disp(msg),endfunction;")
SCILAB_END = "mode(2);quit();"
SCILAB_VARS = [
"%p_r_p",
"canon",
"close",
"extractDatatip",
"extractLight",
"messagebox",
"syslin",
"tf2ss",
]
USER_DATA = {}
class Diagram:
diagram_id = None
# session dir
sessiondir = None
# store uploaded filename
xcos_file_name = None
# type of uploaded file
workspace_counter = 0
# workspace from script
workspace_filename = None
# tk count
tk_count = 0
scilab_proc = None
# store log name
log_name = None
# is thread running?
tkbool = False
tk_starttime = None
# in memory values
tk_deltatimes = None
tk_values = None
tk_times = None
# List to store figure IDs from log_name
figure_list = None
def __init__(self):
self.figure_list = []
def __str__(self):
return (
"{ 'scilab_pid': %s, "
"'log_name': %s, 'tkbool': %s, 'figure_list': %s }") % (
self.scilab_proc.pid if self.scilab_proc is not None else None,
self.log_name, self.tkbool, self.figure_list)
class Script:
script_id = None
sessiondir = None
filename = None
status = 0
proc = None
workspace_filename = None
def __str__(self):
return (
"{ script_id: %s, filename: %s, status: %d, "
"script_pid: %s, "
"workspace_filename: %s }") % (
self.script_id, self.filename, self.status,
self.proc.pid if self.proc is not None else None,
self.workspace_filename)
class SciFile:
'''Variables used in sci-func block'''
filename = ''
file_image = ''
flag_sci = False
proc = None
class UserData:
sessiondir = None
diagrams = None
scripts = None
scifile = None
diagramlock = None
def __init__(self):
self.sessiondir = mkdtemp(
prefix=datetime.now().strftime('%Y%m%d.'), dir=SESSIONDIR)
self.diagrams = []
self.scripts = []
self.scifile = SciFile()
self.diagramlock = RLock()
class line_and_state:
'''
Class to store the line and its state (Used in reading data from log file)
'''
line = None # initial line to none(Nothing is present)
state = NOLINE # initial state to NOLINE ie
def set(self, line_state):
self.line = line_state[0] # to set line
self.state = line_state[1] # to set state
return False
def get_line(self):
return self.line
def get_state(self):
return self.state
def init_session():
if 'uid' not in session:
session['uid'] = str(uuid.uuid1())
uid = session['uid']
if uid not in USER_DATA:
USER_DATA[uid] = UserData()
ud = USER_DATA[uid]
sessiondir = ud.sessiondir
makedirs(sessiondir, 'session')
makedirs(join(sessiondir, UPLOAD_FOLDER), 'upload')
makedirs(join(sessiondir, VALUES_FOLDER), 'values')
makedirs(join(sessiondir, SCRIPT_FILES_FOLDER), 'script files')
makedirs(join(sessiondir, SCIFUNC_FILES_FOLDER), 'scifunc files')
return (ud.diagrams, ud.scripts, ud.scifile, sessiondir, ud.diagramlock)
def get_diagram(xcos_file_id, remove=False):
if len(xcos_file_id) == 0:
print("no id")
return (None, None)
xcos_file_id = int(xcos_file_id)
(diagrams, __, scifile, __, __) = init_session()
if xcos_file_id < 0 or xcos_file_id >= len(diagrams):
print("id", xcos_file_id, "not in diagrams")
return (None, None)
diagram = diagrams[xcos_file_id]
if remove:
diagrams[xcos_file_id] = Diagram()
return (diagram, scifile)
def add_diagram():
(diagrams, scripts, scifile, sessiondir, diagramlock) = init_session()
diagramlock.acquire()
diagram = Diagram()
diagram.diagram_id = str(len(diagrams))
diagram.sessiondir = sessiondir
diagrams.append(diagram)
diagramlock.release()
return (diagram, scripts, scifile, sessiondir)
def get_script(script_id, scripts=None, remove=False):
if script_id is None:
return None
if len(script_id) == 0:
print("no id")
return None
script_id = int(script_id)
if scripts is None:
(__, scripts, __, __, __) = init_session()
if script_id < 0 or script_id >= len(scripts):
print("id", script_id, "not in scripts")
return None
script = scripts[script_id]
if remove:
scripts[script_id] = None
return script
def add_script():
(__, scripts, __, sessiondir, diagramlock) = init_session()
diagramlock.acquire()
script = Script()
script.script_id = str(len(scripts))
script.sessiondir = sessiondir
scripts.append(script)
diagramlock.release()
return (script, sessiondir)
def parse_line(line, lineno):
'''
Function to parse the line
Returns tuple of figure ID and state
state = INITIALIZATION if new figure is created
ENDING if current fig end
DATA otherwise
'''
line_words = line.split(' ') # Each line is split to read condition
try:
# The below condition determines the block ID
if line_words[2] == "Block":
# to get block id (Which is explicitly added by us while writing
# into log in scilab source code)
block_id = int(line_words[4])
return (block_id, BLOCK_IDENTIFICATION)
if line_words[2] == "Initialization":
# New figure created
# Get fig id
# to extract figure ids (sometime multiple sinks can be used in one
# diagram to differentiate that)
figure_id = int(line_words[-1])
return (figure_id, INITIALIZATION)
elif line_words[2] == "Ending":
# Current figure end
# Get fig id
figure_id = int(line_words[-1])
return (figure_id, ENDING)
else:
# Current figure coordinates
figure_id = int(line_words[3])
return (figure_id, DATA)
except Exception as e:
print(str(e), "while parsing", line, "on line", lineno)
return (None, NOLINE)
def get_line_and_state(file, figure_list, lineno):
'''
Function to get a new line from file
This also parses the line and appends new figures to figure List
'''
line = file.readline() # read line by line from log
if not line: # if line is empty then return noline
return (None, NOLINE)
# every line is passed to function parse_line for getting values
parse_result = parse_line(line, lineno)
figure_id = parse_result[0]
state = parse_result[1]
if state == INITIALIZATION:
# New figure created
# Add figure ID to list
figure_list.append(figure_id) # figure id of block is added to list
return (None, INITIALIZATION)
# Check for block identification
elif state == BLOCK_IDENTIFICATION:
return (line, BLOCK_IDENTIFICATION)
elif state == ENDING:
# End of figure
# Remove figure ID from list
# Once ending of log file/data is encountered for that block, figure id
# will be removed
figure_list.remove(figure_id)
return (None, ENDING)
elif state == NOLINE:
return (None, NOLINE)
return (line, DATA)
logfilefdrlock = RLock()
LOGFILEFD = 123
def run_scilab(command, createlogfile=False):
cmd = SCILAB_START + command + SCILAB_END
print('running command', cmd)
cmdarray = [SCI,
"-nogui",
"-noatomsautoload",
"-nouserstartup",
"-nb",
"-nw",
"-e", cmd]
if not createlogfile:
return subprocess.Popen(
cmdarray,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, start_new_session=True,
universal_newlines=True)
logfilefd, log_name = mkstemp(prefix=datetime.now().strftime(
'scilab-log-%Y%m%d-'), suffix='.txt', dir=SESSIONDIR)
logfilefdrlock.acquire()
if logfilefd != LOGFILEFD:
os.dup2(logfilefd, LOGFILEFD)
os.close(logfilefd)
proc = subprocess.Popen(
cmdarray,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, start_new_session=True,
universal_newlines=True, pass_fds=(LOGFILEFD, ))
os.close(LOGFILEFD)
logfilefdrlock.release()
return (proc, log_name)
SYSTEM_COMMANDS = re.compile(config.SYSTEM_COMMANDS)
def is_unsafe_script(filename):
'''
Read file and check for system commands and return error if file contains
system commands
'''
with open(filename, 'r') as f:
if not re.search(SYSTEM_COMMANDS, f.read()):
return False
# Delete saved file if system commands are encountered in that file
remove(filename)
return True
@app.route('/uploadscript', methods=['POST'])
def uploadscript():
'''
Below route is called for uploading script file.
'''
(script, sessiondir) = add_script()
file = request.files['file']
if not file:
msg = "Upload Error\n"
rv = {'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
fname = join(sessiondir, SCRIPT_FILES_FOLDER,
script.script_id + '_script.sce')
file.save(fname)
script.filename = fname
if is_unsafe_script(fname):
msg = ("System calls are not allowed in script.\n"
"Please edit the script again.\n")
script.status = -1
rv = {'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
wfname = join(sessiondir, SCRIPT_FILES_FOLDER,
script.script_id + '_script_workspace.dat')
script.workspace_filename = wfname
command = "exec('" + fname + "');save('" + wfname + "');"
try:
script.proc = run_scilab(command)
except FileNotFoundError:
msg = "scilab not found. Follow the installation instructions"
script.status = -2
rv = {'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
msg = ''
script.status = 1
rv = {'script_id': script.script_id, 'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
def clean_output(s):
'''handle whitespace and sequences in output'''
s = re.sub(r'[\a\b\f\r\v]', r'', s)
# https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences
s = re.sub(r'\x1b\[[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e]', r'', s)
s = re.sub(r'\t', r' ', s)
s = re.sub(r' +(\n|$)', r'\n', s)
s = re.sub(r'\n+', r'\n', s)
s = re.sub(r'^\n', r'', s)
return s
@app.route('/getscriptoutput', methods=['POST'])
def getscriptoutput():
'''
Below route is called for uploading script file.
'''
script = get_script(get_script_id())
if script is None:
# when called with same script_id again or with incorrect script_id
print('no script')
msg = "no script"
rv = {'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
proc = script.proc
wfname = script.workspace_filename
try:
# output from scilab terminal is saved for checking error msg
output = proc.communicate(timeout=30)[0]
output = clean_output(output)
if proc.returncode < 0:
msg = 'Script stopped'
script.status = -5
rv = {'status': script.status, 'msg': msg, 'output': output}
return Response(json.dumps(rv), mimetype='application/json')
return msg
# if error is encountered while execution of script file, then error
# message is returned to the user
if '!--error' in output:
msg = ("Check result window for details.\n"
"Please edit the script and execute again.\n")
script.status = -3
rv = {'status': script.status, 'msg': msg, 'output': output}
return Response(json.dumps(rv), mimetype='application/json')
print('workspace for', script.script_id, 'saved in', wfname)
msg = ''
script.status = 0
rv = {'script_id': script.script_id, 'status': script.status,
'msg': msg, 'output': output, 'returncode': proc.returncode}
return Response(json.dumps(rv), mimetype='application/json')
except subprocess.TimeoutExpired:
kill_script(script)
msg = 'Timeout'
script.status = -4
rv = {'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
@app.route('/stopscript', methods=['POST'])
def kill_script(script=None):
'''Below route is called for stopping a running script file.'''
if script is None:
script = get_script(get_script_id(), remove=True)
if script is None:
# when called with same script_id again or with incorrect script_id
print('no script')
return "error"
print('kill_script: script=', script.__dict__)
if script.filename is None:
print('empty script')
else:
remove(script.filename)
script.filename = None
if script.proc is None:
print('no scilab proc')
else:
if not kill_scilab_with(script.proc, signal.SIGTERM):
kill_scilab_with(script.proc, signal.SIGKILL)
script.proc = None
if script.workspace_filename is None:
print('empty workspace')
else:
remove(script.workspace_filename)
script.workspace_filename = None
return "ok"
@app.route('/uploadsci', methods=['POST'])
def uploadsci():
'''
Below route is called for uploading sci file which is required in sci-func
block (called in Javscript only_scifunc_code.js)
'''
(__, __, scifile, sessiondir, __) = init_session()
file = request.files['file'] # to get uploaded file
# Check if the file is not null
if not file:
return "error"
ts = datetime.now()
# file name is created with timestamp
fname = join(sessiondir, SCIFUNC_FILES_FOLDER,
str(ts) + secure_filename(file.filename))
file.save(fname) # file is saved in scifunc_files folder
scifile.filename = fname
scifile.flag_sci = True # flag for file saved
if is_unsafe_script(scifile.filename):
msg = ("System calls are not allowed in .sci file!\n"
"Please upload another .sci file!!")
# flag for file saved will be set as False
scifile.flag_sci = False
return msg
# scilab command is created to run that uploaded sci file which will be
# used by sci-func block
command = "exec('" + scifile.filename + "');"
try:
scifile.proc = run_scilab(command)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
try:
# output from scilab terminal is saved for checking error msg
out = scifile.proc.communicate(timeout=30)[0]
if scifile.proc.returncode < 0:
remove(scifile.filename)
scifile.flag_sci = False
msg = 'Cancelled'
return msg
# if error is encountered while execution of sci file, then error msg
# is returned to user. in case no error is encountered, file uploaded
# successful msg is sent to user.
if '!--error' in out:
error_index = out.index('!')
msg = out[error_index:-9]
# Delete saved file if error is encountered while executing sci
# function in that file
remove(scifile.filename)
# flag for file saved will be set as False
scifile.flag_sci = False
return msg
msg = "File is uploaded successfully!!"
return msg
except subprocess.TimeoutExpired:
kill_scifile(scifile)
msg = 'Timeout'
return msg
@app.route('/stopscifile')
def kill_scifile(scifile=None):
'''Below route is called for stopping a running sci file.'''
if scifile is None:
(__, __, scifile, __, __) = init_session()
print('kill_scifile: scifile=', scifile.__dict__)
if scifile.filename is None:
print('empty scifile')
else:
remove(scifile.filename)
scifile.filename = None
if scifile.proc is None:
print('no scilab proc')
else:
if not kill_scilab_with(scifile.proc, signal.SIGTERM):
kill_scilab_with(scifile.proc, signal.SIGKILL)
scifile.proc = None
scifile.flag_sci = False
return "ok"
@app.route('/requestfilename', methods=['POST'])
def sendfile():
'''
This route is used in index.html for checking condition
if sci file is uploaded for sci-func block diagram imported directly using
import (will confirm again)
'''
(__, __, scifile, __, __) = init_session()
if scifile.flag_sci:
scifile.file_image = ('img_test%s.jpg' %
splitext(basename(scifile.filename))[0])
else:
scifile.file_image = ''
scifile.flag_sci = False
return scifile.file_image
def kill_scilab_with(proc, sgnl):
'''
function to kill a process group with a signal. wait for maximum 2 seconds
for process to exit. return True on exit, False otherwise
'''
if proc.poll() is not None:
return True
try:
os.killpg(proc.pid, sgnl)
except OSError:
print('could not kill', proc.pid, 'with signal', sgnl)
return False
except TypeError:
print('could not kill invalid process with signal', sgnl)
return True
for i in range(0, 20):
gevent.sleep(LOOK_DELAY)
if proc.poll() is not None:
return True
return False
def get_request_id(key='id'):
args = request.args
if args is None:
print('No args in request')
return ''
if key not in args:
print('No', key, 'in request.args')
return ''
value = args[key]
if re.fullmatch(r'[0-9]+', value):
return value
displayvalue = value if len(
value) <= DISPLAY_LIMIT + 3 else value[:DISPLAY_LIMIT] + '...'
print('Invalid value', displayvalue, 'for', key, 'in request.args')
return ''
def get_script_id(key='script_id', default=''):
form = request.form
if form is None:
print('No form in request')
return default
if key not in form:
print('No', key, 'in request.form')
return default
value = form[key]
if re.fullmatch(r'[0-9]+', value):
return value
displayvalue = value if len(
value) <= DISPLAY_LIMIT + 3 else value[:DISPLAY_LIMIT] + '...'
print('Invalid value', displayvalue, 'for', key, 'in request.form')
return default
def kill_scilab(diagram=None):
'''Define function to kill scilab(if still running) and remove files'''
if diagram is None:
(diagram, __) = get_diagram(get_request_id(), True)
if diagram is None:
print('no diagram')
return
print('kill_scilab: diagram=', diagram.__dict__)
if diagram.xcos_file_name is None:
print('empty diagram')
else:
# Remove xcos file
remove(diagram.xcos_file_name)
diagram.xcos_file_name = None
if diagram.scilab_proc is None:
print('no scilab proc')
else:
if not kill_scilab_with(diagram.scilab_proc, signal.SIGTERM):
kill_scilab_with(diagram.scilab_proc, signal.SIGKILL)
diagram.scilab_proc = None
if diagram.log_name is None:
print('empty diagram')
else:
# Remove log file
remove(diagram.log_name)
diagram.log_name = None
stopDetailsThread(diagram)
@app.route('/start_scilab')
def start_scilab():
'''
function to execute xcos file using scilab (scilab-adv-cli), access log
file written by scilab
This function is called in app route 'start_scilab' below
'''
(diagram, scifile) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
return "error"
# name of primary workspace file
workspace_filename = diagram.workspace_filename
# name of workspace file
workspace = "workspace.dat"
loadfile = workspace_filename is not None or \
(diagram.workspace_counter in (2, 3) and exists(workspace)) or \
diagram.workspace_counter == 5
command = ""
if loadfile:
# ignore import errors
command += "errcatch(-1,'continue');"
if workspace_filename is not None:
command += "[__V1,__V2]=listvarinfile('%s');" % workspace_filename
command += "__V3=['%s'];" % ("';'".join(SCILAB_VARS))
command += "__V4=setdiff(__V1,__V3);"
command += "__V5=''''+strcat(__V4,''',''')+'''';"
command += "__V6='load(''%s'','+__V5+');';" % workspace_filename
command += "execstr(__V6);"
command += "clear __V1 __V2 __V3 __V4 __V5 __V6;"
if diagram.workspace_counter in (2, 3) and exists(workspace):
# 3 - for both TOWS_c and FROMWSB and also workspace dat file exist
# In this case workspace is saved in format of dat file (Scilab way
# of saying workpsace)
# For FROMWSB block and also workspace dat file exist
command += "load('" + workspace + "');"
if diagram.workspace_counter == 5:
command += "exec('" + scifile.filename + "');"
command += "errcatch(-1,'stop');"
# Scilab Commands for running of scilab based on existence of different
# blocks in same diagram from workspace_counter's value
# 1: Indicate TOWS_c exist
# 2: Indicate FROMWSB exist
# 3: Both TOWS_c and FROMWSB exist
# 4: Indicate AFFICH_m exist (We dont want graphic window to open so
# xs2jpg() command is removed)
# 5: Indicate Sci-func block as it some time return image as output
# rather than Sinks's log file.
# 0/No-condition : For all other blocks
command += "loadXcosLibs();"
command += "importXcosDiagram('" + diagram.xcos_file_name + "');"
command += "xcos_simulate(scs_m,4);"
if diagram.workspace_counter == 4:
# For AFFICH-m block
pass
elif diagram.workspace_counter == 5:
# For Sci-Func block (Image are return as output in some cases)
command += "xs2jpg(gcf(),'%s/%s');" % (IMAGEDIR, scifile.file_image)
else:
# For all other block
command += "xs2jpg(gcf(),'%s/%s');" % (IMAGEDIR, 'img_test.jpg')
if diagram.workspace_counter in (1, 2, 3) and exists(workspace):
command += "deletefile('" + workspace + "');"
if diagram.workspace_counter in (1, 3):
command += "save('" + workspace + "');"
try:
diagram.scilab_proc, diagram.log_name = run_scilab(command, True)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
print('log_name=', diagram.log_name)
# Start sending log to chart function for creating chart
try:
# For processes taking less than 10 seconds
scilab_out = diagram.scilab_proc.communicate(timeout=4)[0]
scilab_out = re.sub(r'^[ !\\-]*\n', r'',
scilab_out, flags=re.MULTILINE)
print("=== Begin output from scilab console ===")
print(scilab_out, end='')
print("===== End output from scilab console ===")
# Check for errors in Scilab
if "Empty diagram" in scilab_out:
return "Empty diagram"
m = re.search(r'Fatal error: exception Failure\("([^"]*)"\)',
scilab_out)
if m:
msg = 'modelica error: ' + m.group(1)
return msg
if ("xcos_simulate: "
"Error during block parameters update.") in scilab_out:
return "Error in block parameter. Please check block parameters"
if "xcosDiagramToScilab:" in scilab_out:
return "Error in xcos diagram. Please check diagram"
if "Cannot find scilab-bin" in scilab_out:
return ("scilab has not been built. "
"Follow the installation instructions")
if os.stat(diagram.log_name).st_size == 0:
return "log file is empty"
# For processes taking more than 10 seconds
except subprocess.TimeoutExpired:
pass
if diagram.workspace_counter == 5:
Timer(15.0, delete_image, [scifile]).start()
Timer(10.0, delete_scifile, [scifile]).start()
return ""
@flask.stream_with_context
def event_stream():
'''
Read log file and return data to eventscource function of javascript for
displaying chart.
This function is called in app route 'SendLog' below
'''
(diagram, __) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
yield "event: ERROR\ndata: no diagram\n\n"
return
# Open the log file
if not isfile(diagram.log_name):
print("log file does not exist")
yield "event: ERROR\ndata: no log file found\n\n"
return
while os.stat(diagram.log_name).st_size == 0 and \
diagram.scilab_proc.poll() is None:
gevent.sleep(LOOK_DELAY)
if os.stat(diagram.log_name).st_size == 0 and \
diagram.scilab_proc.poll() is not None:
print("log file is empty")
yield "event: ERROR\ndata: log file is empty\n\n"
return
with open(diagram.log_name, "r") as log_file:
# Start sending log
lineno = 0
line = line_and_state()
while True:
lineno += 1
line.set(get_line_and_state(log_file, diagram.figure_list, lineno))
if len(diagram.figure_list) == 0:
break
# Get the line and loop until the state is ENDING and figure_list
# empty. Determine if we get block id and give it to chart.js
if line.get_state() == BLOCK_IDENTIFICATION:
yield "event: block\ndata: " + line.get_line() + "\n\n"
elif line.get_state() != DATA:
gevent.sleep(LOOK_DELAY)
else:
yield "event: log\ndata: " + line.get_line() + "\n\n"
# Finished Sending Log
kill_scilab(diagram)
# Notify Client
yield "event: DONE\ndata: None\n\n"
def delete_image(scifile):
if scifile.file_image == '':
return
image_path = IMAGEDIR + '/' + scifile.file_image
remove(image_path)
scifile.file_image = ''
def delete_scifile(scifile):
if scifile.filename == '':
return
remove(scifile.filename)
scifile.filename = ''
def AppendtoTKfile(diagram):
'''function which appends the updated (new) value to the file'''
starttime = diagram.tk_starttime
for i in range(diagram.tk_count):
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_tk" + str(i + 1) + ".txt")
# append data to the tk.txt
with open(fname, 'a') as w:
while time() > starttime + \
diagram.tk_times[i] + diagram.tk_deltatimes[i]:
# update the time
diagram.tk_times[i] += diagram.tk_deltatimes[i]
w.write('%10.3E %10.3E\n' %
(diagram.tk_times[i], diagram.tk_values[i]))
def getDetailsThread(diagram):
'''function which makes the initialisation of thread'''
while diagram.tkbool:
AppendtoTKfile(diagram)
gevent.sleep(0.1)
def stopDetailsThread(diagram):
diagram.tkbool = False # stops the thread
gevent.sleep(LOOK_DELAY)
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_*")
for fn in glob.glob(fname):
# deletes all files created under the 'diagram_id' name
remove(fn)
@app.route('/upload', methods=['POST'])
def upload():
'''Route that will process the file upload'''
# Get the file
file = request.files['file']
# Check if the file is not null
if not file:
return "error"
# flags to check if both TOWS_c and FROMWSB are present
flag1 = 0
flag2 = 0
list1 = []
list2 = []
# Make the filename safe, remove unsupported chars
(diagram, scripts, scifile, sessiondir) = add_diagram()
script = get_script(get_script_id(default=None), scripts=scripts)
if script is not None:
diagram.workspace_filename = script.workspace_filename
# Save the file in xml extension and using it for further modification
# by using xml parser
temp_file_xml_name = diagram.diagram_id + ".xml"
file.save(temp_file_xml_name)
new_xml = minidom.parse(temp_file_xml_name)
# to identify if we have to load or save to workspace or neither #0 if
# neither TOWS_c or FROWSB found
blocks = new_xml.getElementsByTagName("BasicBlock")
tk_is_present = False
pattern = re.compile(r"<SplitBlock")
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern, line):
list1.append(i + 1)
pattern1 = re.compile(r"<ControlPort")
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern1, line):
list2.append(i + 1)
pattern2 = re.compile(r"<ImplicitInputPort")
count1 = 0
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern2, line):
count1 += 1
if count1 >= 1:
splitline = []
count = 0
for i in range(len(list1)):
for j in range(len(list2)):
if list2[j] == list1[i] + 3:
count += 1
splitline.append(list1[i])
blocksplit = new_xml.getElementsByTagName("SplitBlock")
block_ids = [] # this stores the id of split blocks
for block in blocksplit:
if block.getAttribute("style") == "SPLIT_f":
block_ids.append(int(block.getAttribute("id")))
compsplit = []
for i in range(len(splitline)):
for j in range(len(list1)):
if splitline[i] == list1[j]:
compsplit.append(j)
finalsplit = []
for i in range(len(compsplit)):
finalsplit.append(block_ids[compsplit[i]])
blockcontrol = new_xml.getElementsByTagName("ControlPort")
for block in blockcontrol:
for i in range(len(finalsplit)):
# match the lines with the parent of our spliblocks which
# we need to change
if block.getAttribute("parent") == str(finalsplit[i]):
block.setAttribute('id', '-1')
blockcommand = new_xml.getElementsByTagName("CommandPort")
for block in blockcommand:
for i in range(len(finalsplit)):
if block.getAttribute("parent") == str(finalsplit[i]):
block.setAttribute('id', '-1')
# here we take the ids of command controllink which we will search
# and change
finalchangeid = []
for i in range(len(finalsplit)):
finalchangeid.append(finalsplit[i] + 4)
finalchangeid.append(finalsplit[i] + 5)
# here we save the contents
with open(temp_file_xml_name, 'w') as f:
f.write(new_xml.toxml())
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
if "<CommandControlLink id=" in word:
temp_word = ""
for i in range(len(finalchangeid)):
fcid = str(finalchangeid[i])
srch = '<CommandControlLink id="' + fcid + '"'
if srch in word:
rplc = '<ImplicitLink id="' + fcid + '"'
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as in_file:
buf = in_file.readlines()
# length=len(finalsplit)
# return finalsplit
with open(temp_file_xml_name, "w") as out_file:
for line in buf:
for i in range(len(finalsplit)):
fs = str(finalsplit[i])
srch = ('<ControlPort connectable="0" '
'dataType="UNKNOW_TYPE" id="-1" ordering="1" '
'parent="' + fs + '"')
if srch in line:
line = (
'\t <ImplicitInputPort connectable="0" '
'dataType="UNKNOW_TYPE" '
'id="' + str(finalsplit[i] + 1) + '" '
'ordering="1" parent="' + fs + '" '
'style="ImplicitInputPort">\n'
'\t\t<mxGeometry as="geometry" height="10" '
'relative="1" width="10" y="0.5000">\n'
'\t\t</mxGeometry>\n'
'\t </ImplicitInputPort>\n'
'\t <ImplicitOutputPort connectable="0" '
'dataType="UNKNOW_TYPE" '
'id="' + str(finalsplit[i] + 2) + '" '
'ordering="1" parent="' + fs + '" '
'style="ImplicitOutputPort">\n'
'\t\t<mxGeometry as="geometry" height="10" '
'relative="1" width="10" y="0.5000">\n'
'\t\t</mxGeometry>\n'
'\t </ImplicitOutputPort>\n'
'\t <ImplicitOutputPort connectable="0" '
'dataType="UNKNOW_TYPE" '
'id="' + str(finalsplit[i] + 3) + '" '
'ordering="1" parent="' + fs + '" '
'style="ImplicitOutputPort">\n'
'\t\t<mxGeometry as="geometry" height="10" '
'relative="1" width="10" y="0.5000">\n'
'\t\t</mxGeometry>\n'
'\t </ImplicitOutputPort>\n' + line)
out_file.write(line)
list3 = []
implitdetect = []
# return temp_file_xml_name
for i in range(len(finalsplit)):
implitdetect.append(finalsplit[i] + 5)
implitdetect.append(finalsplit[i] + 6)
for i in range(len(implitdetect)):
pattern3 = re.compile(
"<ImplicitLink id=\"" + str(implitdetect[i]) + "\"")
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern3, line):
list3.append(i - 1)
with open(temp_file_xml_name, 'r+') as f:
data = f.read().splitlines()
replace = list3
for i in replace:
data[i] = '\t </ImplicitLink>'
f.seek(0)
f.write('\n'.join(data))
f.truncate()
fname = join(sessiondir, UPLOAD_FOLDER,
splitext(temp_file_xml_name)[0] + ".xcos")
os.rename(temp_file_xml_name, fname)
diagram.xcos_file_name = fname
return diagram.diagram_id
# List to contain all affich blocks
blockaffich = new_xml.getElementsByTagName("AfficheBlock")
for block in blockaffich:
if block.getAttribute("interfaceFunctionName") == "AFFICH_m":
diagram.workspace_counter = 4
# List to contain all the block IDs of tkscales so that we can create
# read blocks with these IDs
block_id = []
for block in blocks:
if block.getAttribute("interfaceFunctionName") == "TKSCALE":
block_id.append(block.getAttribute("id"))
block.setAttribute('id', '-1')
tk_is_present = True
# Changed the ID of tkscales to -1 so that virtually the
# tkscale blocks get disconnected from diagram at the backend
# Taking workspace_counter 1 for TOWS_c and 2 for FROMWSB
if block.getAttribute(
"interfaceFunctionName") == "scifunc_block_m":
diagram.workspace_counter = 5
if block.getAttribute("interfaceFunctionName") == "TOWS_c":
diagram.workspace_counter = 1
flag1 = 1
if block.getAttribute("interfaceFunctionName") == "FROMWSB":
diagram.workspace_counter = 2
flag2 = 1
if flag1 and flag2:
# Both TOWS_c and FROMWSB are present
diagram.workspace_counter = 3
# Hardcoded the real time scaling to 1.0 (i.e., no scaling of time
# occurs) only if tkscale is present
if tk_is_present:
for dia in new_xml.getElementsByTagName("XcosDiagram"):
dia.setAttribute('realTimeScaling', '1.0')
# Save the changes made by parser
with open(temp_file_xml_name, 'w') as f:
f.write(new_xml.toxml())
# In front of block tkscale printing the block corresponding to read
# function and assigning corresponding values
for line in fileinput.input(temp_file_xml_name, inplace=1):
if 'interfaceFunctionName=\"TKSCALE\"' in line:
# change the block ID
i = diagram.tk_count
print('<BasicBlock blockType="d" id="', block_id[i], '" '
'interfaceFunctionName="RFILE_f" parent="1" '
'simulationFunctionName="readf" '
'simulationFunctionType="DEFAULT" style="RFILE_f">',
sep='')
print('<ScilabString as="exprs" height="5" width="1">')
print('<data column="0" line="0" value="1"/>')
# Value equal to 1 implies take readings from first column in
# the file
print('<data column="0" line="1" value="2"/>')
# Path to the file from which read block obtains the values
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_tk" + str(i + 1) + ".txt")
print('<data column="0" line="2" value="', fname, '"/>',
sep='')
print('<data column="0" line="3" value="(2(e10.3,1x))"/>')
# (2(e10.3,1x)) The format in which numbers are written
# Two columns with base 10 and 3 digits after decimal and 1x
# represents 1 unit space between two columns.
print('<data column="0" line="4" value="2"/>')
print('</ScilabString>')
print('<ScilabDouble as="realParameters" '
'height="0" width="0"/>')
print('<ScilabDouble as="integerParameters" '
'height="105" width="1">')
diagram.tk_count += 1
# The remaining part of the block is read from the
# Read_Content.txt file and written to the xml file
with open(READCONTENTFILE, "r") as read_file:
for line_content in read_file:
print(line_content, end='')
print(line, end='')
# To resolve port issue coming in xcos file for following blocks :
# INTMUL,MATBKSL,MATDET,MATDIAG,MATDIV and CURV_F
# ISSUE is missing of dataColumns and dataLines in port tag
block_idint = []
block_idmatblsk = []
block_det = []
block_diag = []
block_div = []
block_curl = []
for block in blocks:
# to find INTMUL in blocks and extract its block id and save in
# block_idint
if block.getAttribute("style") == "INTMUL":
block_idint.append(int(block.getAttribute("id")))
# to find MATBKSL in blocks and extract its block id and save in
# block_idmatblsk
if block.getAttribute("style") == "MATBKSL":
block_idmatblsk.append(int(block.getAttribute("id")))
# to find MATDET in blocks and extract its block id and save in
# block_det
if block.getAttribute("style") == "MATDET":
block_det.append(int(block.getAttribute("id")))
# to find MATDIAG in blocks and extract its block id and save in
# block_diag
if block.getAttribute("style") == "MATDIAG":
block_diag.append(int(block.getAttribute("id")))
# to find MATDIV in blocks and extract its block id and save in
# block_div
if block.getAttribute("style") == "MATDIV":
block_div.append(int(block.getAttribute("id")))
# to find CURV_f in blocks and extract its block id and save in
# block_curl
if block.getAttribute("style") == "CURV_f":
block_curl.append(int(block.getAttribute("id")))
if len(block_idint) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
# check for existance of "ExplicitInputPort" in line
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataLines="-2" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idint)):
# if ordering= 2 and parent id= INTMUL block id
srch2 = (
'ordering="2" '
'parent="' + str(block_idint[i]) + '"')
if srch2 in word:
# replace word and add datacolumns and
# datalines
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
# check for existance of "ExplicitOutputPort" in line
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="-3" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idint)):
# if parent id= INTMUL block id
srch2 = 'parent="' + str(block_idint[i]) + '"'
if srch2 in word:
# replace word and add datacolumns and
# datalines
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_idmatblsk) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idmatblsk)):
srch2 = (
'ordering="2" '
'parent="' + str(block_idmatblsk[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="-3" '
'dataLines="-2" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idmatblsk)):
srch2 = 'parent="' + str(block_idmatblsk[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_det) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-1" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_det)):
srch2 = (
'ordering="2" '
'parent="' + str(block_det[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="1" '
'dataLines="1" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_det)):
srch2 = 'parent="' + str(block_det[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_curl) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="1" '
'dataLines="1" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_curl)):
srch2 = 'parent="' + str(block_curl[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_diag) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="1" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_diag)):
srch2 = (
'ordering="2" '
'parent="' + str(block_diag[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="-1" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_diag)):
srch2 = 'parent="' + str(block_diag[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_div) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_div)):
srch2 = (
'ordering="1" '
'parent="' + str(block_div[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataLines="-2" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_div)):
srch2 = (
'ordering="2" '
'parent="' + str(block_div[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
# Changing the file extension from xml to xcos
fname = join(sessiondir, UPLOAD_FOLDER,
splitext(temp_file_xml_name)[0] + ".xcos")
# Move the xcos file to uploads directory
os.rename(temp_file_xml_name, fname)
diagram.xcos_file_name = fname
return diagram.diagram_id
@app.route('/filenames.php', methods=['POST'])
def filenames():
url = request.form['url']
if url == '' or '.' in url or url[0] != '/' or url[-1] != '/':
return "error"
filelist = [url + f for f in os.listdir(BASEDIR + url)]
return Response(json.dumps(filelist), mimetype='application/json')
@app.route('/UpdateTKfile', methods=['POST'])
def UpdateTKfile():
(diagram, __) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
return "error"
# function which makes the initialazation and updation of the files with
# obtained new value
# Get the file
file = request.files['file']
# Check if the file is not null
if not file:
return "error"
# saves the file in values folder
line = file.read().decode()
if line == "Start":
# at first the val.txt contains "Start" indicating the starting of the
# process
diagram.tkbool = True
diagram.tk_starttime = time()
diagram.tk_deltatimes = []
diagram.tk_values = []
diagram.tk_times = []
for i in range(diagram.tk_count):
diagram.tk_deltatimes.append(0.1)
diagram.tk_values.append(0)
diagram.tk_times.append(0)
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_tk" + str(i + 1) + ".txt")
open(fname, "w").close()
# create empty tk text files
# starts the thread
Timer(0.1, getDetailsThread, [diagram]).start()
elif line == "Stop":
# at last the val.txt contains "Stop" indicating the ending process
# stops the thread
stopDetailsThread(diagram)
else:
tklist = line.split(',')
for i in range(min(diagram.tk_count, len(tklist))):
tl = tklist[i].split(' ')
if len(tl) == 1 or tl[1] == '':
continue
diagram.tk_deltatimes[i] = float(tl[0])
diagram.tk_values[i] = float(tl[1])
return ""
@app.route('/downloadfile', methods=['POST'])
def DownloadFile():
'''route for download of binary and audio'''
fn = request.form['path']
if fn == '' or fn[0] == '.' or '/' in fn:
print('downloadfile=', fn)
return "error"
# check if audio file or binary file
if "audio" in fn:
mimetype = 'audio/basic'
else:
mimetype = 'application/octet-stream'
return flask.send_from_directory(
SESSIONDIR, fn, as_attachment=True, mimetype=mimetype)
@app.route('/deletefile', methods=['POST'])
def DeleteFile():
'''route for deletion of binary and audio file'''
fn = request.form['path']
if fn == '' or fn[0] == '.' or '/' in fn:
print('deletefile=', fn)
return "error"
remove(fn) # deleting the file
return "0"
@app.route('/SendLog')
def sse_request():
'''Set response method to event-stream'''
return Response(event_stream(), mimetype='text/event-stream')
@app.route('/<path:path>')
def static_file(path):
return app.send_static_file(path)
@app.route('/stop')
def stop():
'''route to kill scilab on closing of chart'''
kill_scilab()
return "done"
@app.route('/endBlock/<fig_id>')
def endBlock(fig_id):
'''route to end blocks with no Ending parameter'''
(diagram, __) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
return
diagram.figure_list.remove(fig_id)
return "done"
@app.route('/')
def page():
return render_template('index.html',
example_content='',
example_filename='',
prerequisite_content='',
prerequisite_filename='')
@app.route('/getOutput', methods=['POST'])
def run_scilab_func_request():
(__, __, scifile, sessiondir, __) = init_session()
file_name = join(sessiondir, "cont_frm_value.txt")
num = request.form['num']
den = request.form['den']
'''
sample input to scilab:
num: 1+s
den: s^2-5*s+1
'''
if 'z' in num or 'z' in den:
p = 'z'
else:
p = 's'
command = "%s=poly(0, '%s');" % (p, p)
command += "exec('%s');" % CONT_FRM_WRITE
command += "calculate_cont_frm(%s,%s,'%s');" % (num, den, file_name)
try:
scifile.proc = run_scilab(command)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
scifile.proc.communicate()
list_value = ""
'''
sample output from scilab:
[[0], [1], [0], [0]]
'''
if isfile(file_name):
with open(file_name) as f:
data = f.read() # Read the data into a variable
list_value = data.replace('][', '],[')
remove(file_name)
else:
list_value = "Error"
return jsonify(list_value)
# App route for getting scilab expression output for Expression Block
@app.route('/getExpressionOutput', methods=['POST'])
def run_scilab_func_expr_request():
(__, __, scifile, sessiondir, __) = init_session()
file_name = join(sessiondir, "expr_set_value.txt")
head = request.form['head']
exx = request.form['exx']
'''
sample input to scilab:
head: %foo(u1,u2)
exx: (u1>0)*sin(u2)^2
'''
command = "exec('" + COPIED_EXPRESSION_SCI_FRM_SCILAB + \
"');exec('" + EXP_SCI_FUNC_WRITE + \
"');callFunctionAcctoMethod('" + file_name + \
"','" + head + "','" + exx + "');"
try:
scifile.proc = run_scilab(command)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
scifile.proc.communicate()
# create a dictionary
exprs_value = {}
'''
Array containing value which will be used as key
for dictionary 'exprs_value'
sample output from scilab:
ok: true or scilab error message
ok1: true
ipar: [[2], [1], [6], [1], [5], [18], [2], [2], [5]
, [101], [6], [2], [5], [15], [5], [3]]
rpar: [[0], [2]]
nz: [[1]]
'''
var_array = ["ok", "ok1", "ipar", "rpar", "nz"]
with open(file_name) as f:
data = f.read() # Read the data into a variable
valuesfromfile = data.splitlines()
for i in range(len(valuesfromfile)):
exprs_value[var_array[i]] = valuesfromfile[i]
if not exprs_value:
exprs_value["ok"] = "Enter a valid scilab expression : " + \
"custom made message"
remove(file_name)
return jsonify(exprs_value)
# example page start ###################
@app.route('/example')
def example_page():
try:
cur = connection()
cur.execute(config.QUERY_CATEGORY)
data = cur.fetchall()
return render_template('example.html', data=data)
except Exception as e:
return str(e)
@app.route('/get_book', methods=['GET', 'POST'])
def ajax_get_book():
cat_id = request.args.get('catid')
try:
cur = connection()
cur.execute(config.QUERY_BOOK, [cat_id])
data = cur.fetchall()
return jsonify(data)
except Exception as e:
return str(e)
@app.route('/get_chapter', methods=['GET', 'POST'])
def ajax_get_chapter():
book_id = request.args.get('bookid')
try:
cur = connection()
cur.execute(config.QUERY_CHAPTER, [book_id])
chapter = cur.fetchall()
return jsonify(chapter)
except Exception as e:
return str(e)
@app.route('/get_example', methods=['GET', 'POST'])
def ajax_get_example():
chapter_id = request.args.get('chapterid')
try:
cur = connection()
cur.execute(config.QUERY_EXAMPLE, [chapter_id])
example = cur.fetchall()
return jsonify(example)
except Exception as e:
return str(e)
@app.route('/get_example_file', methods=['GET', 'POST'])
def ajax_get_example_file():
example_id = request.args.get('exampleid')
try:
cur = connection()
cur.execute(config.QUERY_EXAMPLE_FILE, [example_id])
example_file = cur.fetchall()
return jsonify(example_file)
except Exception as e:
return str(e)
def clean_text(s):
return re.sub(r'[ \t]*[\r\n]+[ \t]*', r'', s)
def get_example_file(example_file_id):
filename = 'example.xcos'
filepath = ''
cur = connection()
cur.execute(config.QUERY_EXAMPLE_FILE_BY_ID, [example_file_id])
for (filename, filepath, example_id) in cur.fetchall():
pass
if XCOSSOURCEDIR != '' and filepath != '':
try:
print('reading', filename, 'from', filepath)
with open(join(XCOSSOURCEDIR, filepath), 'r') as f:
text = clean_text(f.read())
return (text, filename, example_id)
except Exception as e:
print('Exception:', str(e))
scilab_url = "https://scilab.in/download/file/" + example_file_id
print('downloading', scilab_url)
r = requests.get(scilab_url)
text = clean_text(r.text)
return (text, filename, example_id)
def clean_text_2(s):
'''handle whitespace'''
s = re.sub(r'[\a\b\f\r\v]', r'', s)
s = re.sub(r'\t', r' ', s)
s = re.sub(r' +(\n|$)', r'\n', s)
s = re.sub(r'\n+$', r'', s)
# double each backslash
s = re.sub(r'\\', r'\\\\', s)
# replace each newline with '\n'
s = re.sub(r'\n', r'\\n', s)
return s
def get_prerequisite_file(example_id):
filename = ''
filepath = ''
prerequisite_file_id = None
cur = connection()
cur.execute(config.QUERY_PREREQUISITE_FILE_BY_EXAMPLE_ID, [example_id])
for (filename, filepath, prerequisite_file_id) in cur.fetchall():
pass
if prerequisite_file_id is None:
return ('', filename)
if XCOSSOURCEDIR != '' and filepath != '':
try:
print('reading', filename, 'from', filepath)
with open(join(XCOSSOURCEDIR, filepath), 'r') as f:
text = clean_text_2(f.read())
return (text, filename)
except Exception as e:
print('Exception:', str(e))
scilab_url = "https://scilab.in/download/file/" + str(prerequisite_file_id)
print('downloading', scilab_url)
r = requests.get(scilab_url)
text = clean_text_2(r.text)
return (text, filename)
@app.route('/example_file', methods=['GET', 'POST'])
def download_example_file():
example_file_id = request.args.get('efid')
(example_content, example_filename, example_id) = get_example_file(
example_file_id)
return Response(
example_content,
mimetype='application/octet-stream',
headers={'Content-Disposition':
'attachment; filename="%s"' % example_filename})
@app.route('/open', methods=['GET', 'POST'])
def open_example_file():
example_file_id = request.args.get('efid')
(example_content, example_filename, example_id) = get_example_file(
example_file_id)
(prerequisite_content, prerequisite_filename) = get_prerequisite_file(
str(example_id))
return render_template('index.html',
example_content=example_content,
example_filename=example_filename,
prerequisite_content=prerequisite_content,
prerequisite_filename=prerequisite_filename)
# example page end #################
if __name__ == '__main__':
print('starting')
os.chdir(SESSIONDIR)
# Set server address from config
http_server = WSGIServer(
(config.HTTP_SERVER_HOST, config.HTTP_SERVER_PORT), app)
print('listening:', http_server)
try:
http_server.serve_forever()
except KeyboardInterrupt:
print('exiting')
| #!/usr/bin/python3
import gevent
from gevent.lock import RLock
from gevent.monkey import patch_all
from gevent.pywsgi import WSGIServer
patch_all(aggressive=False, subprocess=False)
from datetime import datetime
import fileinput
import flask
from flask import request, Response, session, render_template, jsonify
import flask_session
import glob
import json
import os
from os.path import abspath, basename, dirname, exists, isfile, join, splitext
import re
import requests
import signal
import subprocess
from tempfile import mkdtemp, mkstemp
from threading import Timer
from time import time
import uuid
from werkzeug import secure_filename
from xml.dom import minidom
from db_connection import connection
import config
from config import FLASKSESSIONDIR, SESSIONDIR, XCOSSOURCEDIR, REMOVEFILE
def makedirs(dirname, dirtype):
if not exists(dirname):
print('making', dirtype, 'dir', dirname)
os.makedirs(dirname)
def remove(filename):
if filename is None:
return False
if not REMOVEFILE:
print("not removing", filename)
return True
try:
os.remove(filename)
return True
except BaseException:
print("could not remove", filename)
return False
# change directory before using relative paths
os.chdir(dirname(abspath(__file__)))
makedirs(FLASKSESSIONDIR, 'top flask session')
makedirs(SESSIONDIR, 'top session')
app = flask.Flask(__name__, static_folder='webapp/', template_folder='webapp')
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_FILE_DIR'] = FLASKSESSIONDIR
# These are the extension that we are accepting to be uploaded
app.config['ALLOWED_EXTENSIONS'] = set(['zcos', 'xcos', 'txt'])
flask_session.Session(app)
# This is the path to the upload directory and values directory
UPLOAD_FOLDER = 'uploads' # to store xcos file
VALUES_FOLDER = 'values' # to store files related to tkscale block
# to store uploaded sci files for sci-func block
SCRIPT_FILES_FOLDER = 'script_files'
# to store uploaded sci files for sci-func block
SCIFUNC_FILES_FOLDER = 'scifunc_files'
# Delay time to look for new line (in s)
LOOK_DELAY = 0.1
# States of the line
# to indicate initialization of block in log file is encountered
INITIALIZATION = 0
# to indicate ending of log file data for that block is encountered
ENDING = 1
# to indicate data is proper and can be read
DATA = 2
# to indicate there is no line in log file further
NOLINE = -1
# to indicate block id is present
BLOCK_IDENTIFICATION = -2
# Scilab dir
SCIDIR = abspath(config.SCILAB_DIR)
SCI = join(SCIDIR, "bin", "scilab-adv-cli")
READCONTENTFILE = abspath("Read_Content.txt")
CONT_FRM_WRITE = abspath("cont_frm_write.sci")
COPIED_EXPRESSION_SCI_FRM_SCILAB = abspath("copied_expression_from_scilab.sci")
EXP_SCI_FUNC_WRITE = abspath("expression-sci-function.sci")
BASEDIR = abspath('webapp')
IMAGEDIR = join(BASEDIR, 'res_imgs')
# display limit for long strings
DISPLAY_LIMIT = 10
# handle scilab startup
SCILAB_START = (
"errcatch(-1,'stop');lines(0,120);clearfun('messagebox');"
"function messagebox(msg,msgboxTitle,msgboxIcon,buttons,isModal),"
"disp(msg),endfunction;")
SCILAB_END = "mode(2);quit();"
SCILAB_VARS = [
"%p_r_p",
"canon",
"close",
"extractDatatip",
"extractLight",
"messagebox",
"syslin",
"tf2ss",
]
USER_DATA = {}
class Diagram:
diagram_id = None
# session dir
sessiondir = None
# store uploaded filename
xcos_file_name = None
# type of uploaded file
workspace_counter = 0
# workspace from script
workspace_filename = None
# tk count
tk_count = 0
scilab_proc = None
# store log name
log_name = None
# is thread running?
tkbool = False
tk_starttime = None
# in memory values
tk_deltatimes = None
tk_values = None
tk_times = None
# List to store figure IDs from log_name
figure_list = None
def __init__(self):
self.figure_list = []
def __str__(self):
return (
"{ 'scilab_pid': %s, "
"'log_name': %s, 'tkbool': %s, 'figure_list': %s }") % (
self.scilab_proc.pid if self.scilab_proc is not None else None,
self.log_name, self.tkbool, self.figure_list)
class Script:
script_id = None
sessiondir = None
filename = None
status = 0
proc = None
workspace_filename = None
def __str__(self):
return (
"{ script_id: %s, filename: %s, status: %d, "
"script_pid: %s, "
"workspace_filename: %s }") % (
self.script_id, self.filename, self.status,
self.proc.pid if self.proc is not None else None,
self.workspace_filename)
class SciFile:
'''Variables used in sci-func block'''
filename = ''
file_image = ''
flag_sci = False
proc = None
class UserData:
sessiondir = None
diagrams = None
scripts = None
scifile = None
diagramlock = None
def __init__(self):
self.sessiondir = mkdtemp(
prefix=datetime.now().strftime('%Y%m%d.'), dir=SESSIONDIR)
self.diagrams = []
self.scripts = []
self.scifile = SciFile()
self.diagramlock = RLock()
class line_and_state:
'''
Class to store the line and its state (Used in reading data from log file)
'''
line = None # initial line to none(Nothing is present)
state = NOLINE # initial state to NOLINE ie
def set(self, line_state):
self.line = line_state[0] # to set line
self.state = line_state[1] # to set state
return False
def get_line(self):
return self.line
def get_state(self):
return self.state
def init_session():
if 'uid' not in session:
session['uid'] = str(uuid.uuid1())
uid = session['uid']
if uid not in USER_DATA:
USER_DATA[uid] = UserData()
ud = USER_DATA[uid]
sessiondir = ud.sessiondir
makedirs(sessiondir, 'session')
makedirs(join(sessiondir, UPLOAD_FOLDER), 'upload')
makedirs(join(sessiondir, VALUES_FOLDER), 'values')
makedirs(join(sessiondir, SCRIPT_FILES_FOLDER), 'script files')
makedirs(join(sessiondir, SCIFUNC_FILES_FOLDER), 'scifunc files')
return (ud.diagrams, ud.scripts, ud.scifile, sessiondir, ud.diagramlock)
def get_diagram(xcos_file_id, remove=False):
if len(xcos_file_id) == 0:
print("no id")
return (None, None)
xcos_file_id = int(xcos_file_id)
(diagrams, __, scifile, __, __) = init_session()
if xcos_file_id < 0 or xcos_file_id >= len(diagrams):
print("id", xcos_file_id, "not in diagrams")
return (None, None)
diagram = diagrams[xcos_file_id]
if remove:
diagrams[xcos_file_id] = Diagram()
return (diagram, scifile)
def add_diagram():
(diagrams, scripts, scifile, sessiondir, diagramlock) = init_session()
diagramlock.acquire()
diagram = Diagram()
diagram.diagram_id = str(len(diagrams))
diagram.sessiondir = sessiondir
diagrams.append(diagram)
diagramlock.release()
return (diagram, scripts, scifile, sessiondir)
def get_script(script_id, scripts=None, remove=False):
if script_id is None:
return None
if len(script_id) == 0:
print("no id")
return None
script_id = int(script_id)
if scripts is None:
(__, scripts, __, __, __) = init_session()
if script_id < 0 or script_id >= len(scripts):
print("id", script_id, "not in scripts")
return None
script = scripts[script_id]
if remove:
scripts[script_id] = None
return script
def add_script():
(__, scripts, __, sessiondir, diagramlock) = init_session()
diagramlock.acquire()
script = Script()
script.script_id = str(len(scripts))
script.sessiondir = sessiondir
scripts.append(script)
diagramlock.release()
return (script, sessiondir)
def parse_line(line, lineno):
'''
Function to parse the line
Returns tuple of figure ID and state
state = INITIALIZATION if new figure is created
ENDING if current fig end
DATA otherwise
'''
line_words = line.split(' ') # Each line is split to read condition
try:
# The below condition determines the block ID
if line_words[2] == "Block":
# to get block id (Which is explicitly added by us while writing
# into log in scilab source code)
block_id = int(line_words[4])
return (block_id, BLOCK_IDENTIFICATION)
if line_words[2] == "Initialization":
# New figure created
# Get fig id
# to extract figure ids (sometime multiple sinks can be used in one
# diagram to differentiate that)
figure_id = int(line_words[-1])
return (figure_id, INITIALIZATION)
elif line_words[2] == "Ending":
# Current figure end
# Get fig id
figure_id = int(line_words[-1])
return (figure_id, ENDING)
else:
# Current figure coordinates
figure_id = int(line_words[3])
return (figure_id, DATA)
except Exception as e:
print(str(e), "while parsing", line, "on line", lineno)
return (None, NOLINE)
def get_line_and_state(file, figure_list, lineno):
'''
Function to get a new line from file
This also parses the line and appends new figures to figure List
'''
line = file.readline() # read line by line from log
if not line: # if line is empty then return noline
return (None, NOLINE)
# every line is passed to function parse_line for getting values
parse_result = parse_line(line, lineno)
figure_id = parse_result[0]
state = parse_result[1]
if state == INITIALIZATION:
# New figure created
# Add figure ID to list
figure_list.append(figure_id) # figure id of block is added to list
return (None, INITIALIZATION)
# Check for block identification
elif state == BLOCK_IDENTIFICATION:
return (line, BLOCK_IDENTIFICATION)
elif state == ENDING:
# End of figure
# Remove figure ID from list
# Once ending of log file/data is encountered for that block, figure id
# will be removed
figure_list.remove(figure_id)
return (None, ENDING)
elif state == NOLINE:
return (None, NOLINE)
return (line, DATA)
logfilefdrlock = RLock()
LOGFILEFD = 123
def run_scilab(command, createlogfile=False):
cmd = SCILAB_START + command + SCILAB_END
print('running command', cmd)
cmdarray = [SCI,
"-nogui",
"-noatomsautoload",
"-nouserstartup",
"-nb",
"-nw",
"-e", cmd]
if not createlogfile:
return subprocess.Popen(
cmdarray,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, start_new_session=True,
universal_newlines=True)
logfilefd, log_name = mkstemp(prefix=datetime.now().strftime(
'scilab-log-%Y%m%d-'), suffix='.txt', dir=SESSIONDIR)
logfilefdrlock.acquire()
if logfilefd != LOGFILEFD:
os.dup2(logfilefd, LOGFILEFD)
os.close(logfilefd)
proc = subprocess.Popen(
cmdarray,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, start_new_session=True,
universal_newlines=True, pass_fds=(LOGFILEFD, ))
os.close(LOGFILEFD)
logfilefdrlock.release()
return (proc, log_name)
SYSTEM_COMMANDS = re.compile(config.SYSTEM_COMMANDS)
def is_unsafe_script(filename):
'''
Read file and check for system commands and return error if file contains
system commands
'''
with open(filename, 'r') as f:
if not re.search(SYSTEM_COMMANDS, f.read()):
return False
# Delete saved file if system commands are encountered in that file
remove(filename)
return True
@app.route('/uploadscript', methods=['POST'])
def uploadscript():
'''
Below route is called for uploading script file.
'''
(script, sessiondir) = add_script()
file = request.files['file']
if not file:
msg = "Upload Error\n"
rv = {'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
fname = join(sessiondir, SCRIPT_FILES_FOLDER,
script.script_id + '_script.sce')
file.save(fname)
script.filename = fname
if is_unsafe_script(fname):
msg = ("System calls are not allowed in script.\n"
"Please edit the script again.\n")
script.status = -1
rv = {'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
wfname = join(sessiondir, SCRIPT_FILES_FOLDER,
script.script_id + '_script_workspace.dat')
script.workspace_filename = wfname
command = "exec('" + fname + "');save('" + wfname + "');"
try:
script.proc = run_scilab(command)
except FileNotFoundError:
msg = "scilab not found. Follow the installation instructions"
script.status = -2
rv = {'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
msg = ''
script.status = 1
rv = {'script_id': script.script_id, 'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
def clean_output(s):
'''handle whitespace and sequences in output'''
s = re.sub(r'[\a\b\f\r\v]', r'', s)
# https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences
s = re.sub(r'\x1b\[[\x30-\x3f]*[\x20-\x2f]*[\x40-\x7e]', r'', s)
s = re.sub(r'\t', r' ', s)
s = re.sub(r' +(\n|$)', r'\n', s)
s = re.sub(r'\n+', r'\n', s)
s = re.sub(r'^\n', r'', s)
return s
@app.route('/getscriptoutput', methods=['POST'])
def getscriptoutput():
'''
Below route is called for uploading script file.
'''
script = get_script(get_script_id())
if script is None:
# when called with same script_id again or with incorrect script_id
print('no script')
msg = "no script"
rv = {'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
proc = script.proc
wfname = script.workspace_filename
try:
# output from scilab terminal is saved for checking error msg
output = proc.communicate(timeout=30)[0]
output = clean_output(output)
if proc.returncode < 0:
msg = 'Script stopped'
script.status = -5
rv = {'status': script.status, 'msg': msg, 'output': output}
return Response(json.dumps(rv), mimetype='application/json')
return msg
# if error is encountered while execution of script file, then error
# message is returned to the user
if '!--error' in output:
msg = ("Check result window for details.\n"
"Please edit the script and execute again.\n")
script.status = -3
rv = {'status': script.status, 'msg': msg, 'output': output}
return Response(json.dumps(rv), mimetype='application/json')
print('workspace for', script.script_id, 'saved in', wfname)
msg = ''
script.status = 0
rv = {'script_id': script.script_id, 'status': script.status,
'msg': msg, 'output': output, 'returncode': proc.returncode}
return Response(json.dumps(rv), mimetype='application/json')
except subprocess.TimeoutExpired:
kill_script(script)
msg = 'Timeout'
script.status = -4
rv = {'status': script.status, 'msg': msg}
return Response(json.dumps(rv), mimetype='application/json')
@app.route('/stopscript', methods=['POST'])
def kill_script(script=None):
'''Below route is called for stopping a running script file.'''
if script is None:
script = get_script(get_script_id(), remove=True)
if script is None:
# when called with same script_id again or with incorrect script_id
print('no script')
return "error"
print('kill_script: script=', script.__dict__)
if script.filename is None:
print('empty script')
else:
remove(script.filename)
script.filename = None
if script.proc is None:
print('no scilab proc')
else:
if not kill_scilab_with(script.proc, signal.SIGTERM):
kill_scilab_with(script.proc, signal.SIGKILL)
script.proc = None
if script.workspace_filename is None:
print('empty workspace')
else:
remove(script.workspace_filename)
script.workspace_filename = None
return "ok"
@app.route('/uploadsci', methods=['POST'])
def uploadsci():
'''
Below route is called for uploading sci file which is required in sci-func
block (called in Javscript only_scifunc_code.js)
'''
(__, __, scifile, sessiondir, __) = init_session()
file = request.files['file'] # to get uploaded file
# Check if the file is not null
if not file:
return "error"
ts = datetime.now()
# file name is created with timestamp
fname = join(sessiondir, SCIFUNC_FILES_FOLDER,
str(ts) + secure_filename(file.filename))
file.save(fname) # file is saved in scifunc_files folder
scifile.filename = fname
scifile.flag_sci = True # flag for file saved
if is_unsafe_script(scifile.filename):
msg = ("System calls are not allowed in .sci file!\n"
"Please upload another .sci file!!")
# flag for file saved will be set as False
scifile.flag_sci = False
return msg
# scilab command is created to run that uploaded sci file which will be
# used by sci-func block
command = "exec('" + scifile.filename + "');"
try:
scifile.proc = run_scilab(command)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
try:
# output from scilab terminal is saved for checking error msg
out = scifile.proc.communicate(timeout=30)[0]
if scifile.proc.returncode < 0:
remove(scifile.filename)
scifile.flag_sci = False
msg = 'Cancelled'
return msg
# if error is encountered while execution of sci file, then error msg
# is returned to user. in case no error is encountered, file uploaded
# successful msg is sent to user.
if '!--error' in out:
error_index = out.index('!')
msg = out[error_index:-9]
# Delete saved file if error is encountered while executing sci
# function in that file
remove(scifile.filename)
# flag for file saved will be set as False
scifile.flag_sci = False
return msg
msg = "File is uploaded successfully!!"
return msg
except subprocess.TimeoutExpired:
kill_scifile(scifile)
msg = 'Timeout'
return msg
@app.route('/stopscifile')
def kill_scifile(scifile=None):
'''Below route is called for stopping a running sci file.'''
if scifile is None:
(__, __, scifile, __, __) = init_session()
print('kill_scifile: scifile=', scifile.__dict__)
if scifile.filename is None:
print('empty scifile')
else:
remove(scifile.filename)
scifile.filename = None
if scifile.proc is None:
print('no scilab proc')
else:
if not kill_scilab_with(scifile.proc, signal.SIGTERM):
kill_scilab_with(scifile.proc, signal.SIGKILL)
scifile.proc = None
scifile.flag_sci = False
return "ok"
@app.route('/requestfilename', methods=['POST'])
def sendfile():
'''
This route is used in index.html for checking condition
if sci file is uploaded for sci-func block diagram imported directly using
import (will confirm again)
'''
(__, __, scifile, __, __) = init_session()
if scifile.flag_sci:
scifile.file_image = ('img_test%s.jpg' %
splitext(basename(scifile.filename))[0])
else:
scifile.file_image = ''
scifile.flag_sci = False
return scifile.file_image
def kill_scilab_with(proc, sgnl):
'''
function to kill a process group with a signal. wait for maximum 2 seconds
for process to exit. return True on exit, False otherwise
'''
if proc.poll() is not None:
return True
try:
os.killpg(proc.pid, sgnl)
except OSError:
print('could not kill', proc.pid, 'with signal', sgnl)
return False
except TypeError:
print('could not kill invalid process with signal', sgnl)
return True
for i in range(0, 20):
gevent.sleep(LOOK_DELAY)
if proc.poll() is not None:
return True
return False
def get_request_id(key='id'):
args = request.args
if args is None:
print('No args in request')
return ''
if key not in args:
print('No', key, 'in request.args')
return ''
value = args[key]
if re.fullmatch(r'[0-9]+', value):
return value
displayvalue = value if len(
value) <= DISPLAY_LIMIT + 3 else value[:DISPLAY_LIMIT] + '...'
print('Invalid value', displayvalue, 'for', key, 'in request.args')
return ''
def get_script_id(key='script_id', default=''):
form = request.form
if form is None:
print('No form in request')
return default
if key not in form:
print('No', key, 'in request.form')
return default
value = form[key]
if re.fullmatch(r'[0-9]+', value):
return value
displayvalue = value if len(
value) <= DISPLAY_LIMIT + 3 else value[:DISPLAY_LIMIT] + '...'
print('Invalid value', displayvalue, 'for', key, 'in request.form')
return default
def kill_scilab(diagram=None):
'''Define function to kill scilab(if still running) and remove files'''
if diagram is None:
(diagram, __) = get_diagram(get_request_id(), True)
if diagram is None:
print('no diagram')
return
print('kill_scilab: diagram=', diagram.__dict__)
if diagram.xcos_file_name is None:
print('empty diagram')
else:
# Remove xcos file
remove(diagram.xcos_file_name)
diagram.xcos_file_name = None
if diagram.scilab_proc is None:
print('no scilab proc')
else:
if not kill_scilab_with(diagram.scilab_proc, signal.SIGTERM):
kill_scilab_with(diagram.scilab_proc, signal.SIGKILL)
diagram.scilab_proc = None
if diagram.log_name is None:
print('empty diagram')
else:
# Remove log file
remove(diagram.log_name)
diagram.log_name = None
stopDetailsThread(diagram)
@app.route('/start_scilab')
def start_scilab():
'''
function to execute xcos file using scilab (scilab-adv-cli), access log
file written by scilab
This function is called in app route 'start_scilab' below
'''
(diagram, scifile) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
return "error"
# name of primary workspace file
workspace_filename = diagram.workspace_filename
# name of workspace file
workspace = "workspace.dat"
loadfile = workspace_filename is not None or \
(diagram.workspace_counter in (2, 3) and exists(workspace)) or \
diagram.workspace_counter == 5
command = ""
if loadfile:
# ignore import errors
command += "errcatch(-1,'continue');"
if workspace_filename is not None:
command += "[__V1,__V2]=listvarinfile('%s');" % workspace_filename
command += "__V3=['%s'];" % ("';'".join(SCILAB_VARS))
command += "__V4=setdiff(__V1,__V3);"
command += "__V5=''''+strcat(__V4,''',''')+'''';"
command += "__V6='load(''%s'','+__V5+');';" % workspace_filename
command += "execstr(__V6);"
command += "clear __V1 __V2 __V3 __V4 __V5 __V6;"
if diagram.workspace_counter in (2, 3) and exists(workspace):
# 3 - for both TOWS_c and FROMWSB and also workspace dat file exist
# In this case workspace is saved in format of dat file (Scilab way
# of saying workpsace)
# For FROMWSB block and also workspace dat file exist
command += "load('" + workspace + "');"
if diagram.workspace_counter == 5:
command += "exec('" + scifile.filename + "');"
command += "errcatch(-1,'stop');"
# Scilab Commands for running of scilab based on existence of different
# blocks in same diagram from workspace_counter's value
# 1: Indicate TOWS_c exist
# 2: Indicate FROMWSB exist
# 3: Both TOWS_c and FROMWSB exist
# 4: Indicate AFFICH_m exist (We dont want graphic window to open so
# xs2jpg() command is removed)
# 5: Indicate Sci-func block as it some time return image as output
# rather than Sinks's log file.
# 0/No-condition : For all other blocks
command += "loadXcosLibs();"
command += "importXcosDiagram('" + diagram.xcos_file_name + "');"
command += "xcos_simulate(scs_m,4);"
if diagram.workspace_counter == 4:
# For AFFICH-m block
pass
elif diagram.workspace_counter == 5:
# For Sci-Func block (Image are return as output in some cases)
command += "xs2jpg(gcf(),'%s/%s');" % (IMAGEDIR, scifile.file_image)
else:
# For all other block
command += "xs2jpg(gcf(),'%s/%s');" % (IMAGEDIR, 'img_test.jpg')
if diagram.workspace_counter in (1, 2, 3) and exists(workspace):
command += "deletefile('" + workspace + "');"
if diagram.workspace_counter in (1, 3):
command += "save('" + workspace + "');"
try:
diagram.scilab_proc, diagram.log_name = run_scilab(command, True)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
print('log_name=', diagram.log_name)
# Start sending log to chart function for creating chart
try:
# For processes taking less than 10 seconds
scilab_out = diagram.scilab_proc.communicate(timeout=4)[0]
scilab_out = re.sub(r'^[ !\\-]*\n', r'',
scilab_out, flags=re.MULTILINE)
print("=== Begin output from scilab console ===")
print(scilab_out, end='')
print("===== End output from scilab console ===")
# Check for errors in Scilab
if "Empty diagram" in scilab_out:
return "Empty diagram"
m = re.search(r'Fatal error: exception Failure\("([^"]*)"\)',
scilab_out)
if m:
msg = 'modelica error: ' + m.group(1)
return msg
if ("xcos_simulate: "
"Error during block parameters update.") in scilab_out:
return "Error in block parameter. Please check block parameters"
if "xcosDiagramToScilab:" in scilab_out:
return "Error in xcos diagram. Please check diagram"
if "Cannot find scilab-bin" in scilab_out:
return ("scilab has not been built. "
"Follow the installation instructions")
if os.stat(diagram.log_name).st_size == 0:
return "log file is empty"
# For processes taking more than 10 seconds
except subprocess.TimeoutExpired:
pass
if diagram.workspace_counter == 5:
Timer(15.0, delete_image, [scifile]).start()
Timer(10.0, delete_scifile, [scifile]).start()
return ""
@flask.stream_with_context
def event_stream():
'''
Read log file and return data to eventscource function of javascript for
displaying chart.
This function is called in app route 'SendLog' below
'''
(diagram, __) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
yield "event: ERROR\ndata: no diagram\n\n"
return
# Open the log file
if not isfile(diagram.log_name):
print("log file does not exist")
yield "event: ERROR\ndata: no log file found\n\n"
return
while os.stat(diagram.log_name).st_size == 0 and \
diagram.scilab_proc.poll() is None:
gevent.sleep(LOOK_DELAY)
if os.stat(diagram.log_name).st_size == 0 and \
diagram.scilab_proc.poll() is not None:
print("log file is empty")
yield "event: ERROR\ndata: log file is empty\n\n"
return
with open(diagram.log_name, "r") as log_file:
# Start sending log
lineno = 0
line = line_and_state()
while True:
lineno += 1
line.set(get_line_and_state(log_file, diagram.figure_list, lineno))
if len(diagram.figure_list) == 0:
break
# Get the line and loop until the state is ENDING and figure_list
# empty. Determine if we get block id and give it to chart.js
if line.get_state() == BLOCK_IDENTIFICATION:
yield "event: block\ndata: " + line.get_line() + "\n\n"
elif line.get_state() != DATA:
gevent.sleep(LOOK_DELAY)
else:
yield "event: log\ndata: " + line.get_line() + "\n\n"
# Finished Sending Log
kill_scilab(diagram)
# Notify Client
yield "event: DONE\ndata: None\n\n"
def delete_image(scifile):
if scifile.file_image == '':
return
image_path = IMAGEDIR + '/' + scifile.file_image
remove(image_path)
scifile.file_image = ''
def delete_scifile(scifile):
if scifile.filename == '':
return
remove(scifile.filename)
scifile.filename = ''
def AppendtoTKfile(diagram):
'''function which appends the updated (new) value to the file'''
starttime = diagram.tk_starttime
for i in range(diagram.tk_count):
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_tk" + str(i + 1) + ".txt")
# append data to the tk.txt
with open(fname, 'a') as w:
while time() > starttime + \
diagram.tk_times[i] + diagram.tk_deltatimes[i]:
# update the time
diagram.tk_times[i] += diagram.tk_deltatimes[i]
w.write('%10.3E %10.3E\n' %
(diagram.tk_times[i], diagram.tk_values[i]))
def getDetailsThread(diagram):
'''function which makes the initialisation of thread'''
while diagram.tkbool:
AppendtoTKfile(diagram)
gevent.sleep(0.1)
def stopDetailsThread(diagram):
diagram.tkbool = False # stops the thread
gevent.sleep(LOOK_DELAY)
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_*")
for fn in glob.glob(fname):
# deletes all files created under the 'diagram_id' name
remove(fn)
@app.route('/upload', methods=['POST'])
def upload():
'''Route that will process the file upload'''
# Get the file
file = request.files['file']
# Check if the file is not null
if not file:
return "error"
# flags to check if both TOWS_c and FROMWSB are present
flag1 = 0
flag2 = 0
list1 = []
list2 = []
# Make the filename safe, remove unsupported chars
(diagram, scripts, scifile, sessiondir) = add_diagram()
script = get_script(get_script_id(default=None), scripts=scripts)
if script is not None:
diagram.workspace_filename = script.workspace_filename
# Save the file in xml extension and using it for further modification
# by using xml parser
temp_file_xml_name = diagram.diagram_id + ".xml"
file.save(temp_file_xml_name)
new_xml = minidom.parse(temp_file_xml_name)
# to identify if we have to load or save to workspace or neither #0 if
# neither TOWS_c or FROWSB found
blocks = new_xml.getElementsByTagName("BasicBlock")
tk_is_present = False
pattern = re.compile(r"<SplitBlock")
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern, line):
list1.append(i + 1)
pattern1 = re.compile(r"<ControlPort")
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern1, line):
list2.append(i + 1)
pattern2 = re.compile(r"<ImplicitInputPort")
count1 = 0
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern2, line):
count1 += 1
if count1 >= 1:
splitline = []
count = 0
for i in range(len(list1)):
for j in range(len(list2)):
if list2[j] == list1[i] + 3:
count += 1
splitline.append(list1[i])
blocksplit = new_xml.getElementsByTagName("SplitBlock")
block_ids = [] # this stores the id of split blocks
for block in blocksplit:
if block.getAttribute("style") == "SPLIT_f":
block_ids.append(int(block.getAttribute("id")))
compsplit = []
for i in range(len(splitline)):
for j in range(len(list1)):
if splitline[i] == list1[j]:
compsplit.append(j)
finalsplit = []
for i in range(len(compsplit)):
finalsplit.append(block_ids[compsplit[i]])
blockcontrol = new_xml.getElementsByTagName("ControlPort")
for block in blockcontrol:
for i in range(len(finalsplit)):
# match the lines with the parent of our spliblocks which
# we need to change
if block.getAttribute("parent") == str(finalsplit[i]):
block.setAttribute('id', '-1')
blockcommand = new_xml.getElementsByTagName("CommandPort")
for block in blockcommand:
for i in range(len(finalsplit)):
if block.getAttribute("parent") == str(finalsplit[i]):
block.setAttribute('id', '-1')
# here we take the ids of command controllink which we will search
# and change
finalchangeid = []
for i in range(len(finalsplit)):
finalchangeid.append(finalsplit[i] + 4)
finalchangeid.append(finalsplit[i] + 5)
# here we save the contents
with open(temp_file_xml_name, 'w') as f:
f.write(new_xml.toxml())
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
if "<CommandControlLink id=" in word:
temp_word = ""
for i in range(len(finalchangeid)):
fcid = str(finalchangeid[i])
srch = '<CommandControlLink id="' + fcid + '"'
if srch in word:
rplc = '<ImplicitLink id="' + fcid + '"'
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as in_file:
buf = in_file.readlines()
# length=len(finalsplit)
# return finalsplit
with open(temp_file_xml_name, "w") as out_file:
for line in buf:
for i in range(len(finalsplit)):
fs = str(finalsplit[i])
srch = ('<ControlPort connectable="0" '
'dataType="UNKNOW_TYPE" id="-1" ordering="1" '
'parent="' + fs + '"')
if srch in line:
line = (
'\t <ImplicitInputPort connectable="0" '
'dataType="UNKNOW_TYPE" '
'id="' + str(finalsplit[i] + 1) + '" '
'ordering="1" parent="' + fs + '" '
'style="ImplicitInputPort">\n'
'\t\t<mxGeometry as="geometry" height="10" '
'relative="1" width="10" y="0.5000">\n'
'\t\t</mxGeometry>\n'
'\t </ImplicitInputPort>\n'
'\t <ImplicitOutputPort connectable="0" '
'dataType="UNKNOW_TYPE" '
'id="' + str(finalsplit[i] + 2) + '" '
'ordering="1" parent="' + fs + '" '
'style="ImplicitOutputPort">\n'
'\t\t<mxGeometry as="geometry" height="10" '
'relative="1" width="10" y="0.5000">\n'
'\t\t</mxGeometry>\n'
'\t </ImplicitOutputPort>\n'
'\t <ImplicitOutputPort connectable="0" '
'dataType="UNKNOW_TYPE" '
'id="' + str(finalsplit[i] + 3) + '" '
'ordering="1" parent="' + fs + '" '
'style="ImplicitOutputPort">\n'
'\t\t<mxGeometry as="geometry" height="10" '
'relative="1" width="10" y="0.5000">\n'
'\t\t</mxGeometry>\n'
'\t </ImplicitOutputPort>\n' + line)
out_file.write(line)
list3 = []
implitdetect = []
# return temp_file_xml_name
for i in range(len(finalsplit)):
implitdetect.append(finalsplit[i] + 5)
implitdetect.append(finalsplit[i] + 6)
for i in range(len(implitdetect)):
pattern3 = re.compile(
"<ImplicitLink id=\"" + str(implitdetect[i]) + "\"")
for i, line in enumerate(open(temp_file_xml_name)):
for match in re.finditer(pattern3, line):
list3.append(i - 1)
with open(temp_file_xml_name, 'r+') as f:
data = f.read().splitlines()
replace = list3
for i in replace:
data[i] = '\t </ImplicitLink>'
f.seek(0)
f.write('\n'.join(data))
f.truncate()
fname = join(sessiondir, UPLOAD_FOLDER,
splitext(temp_file_xml_name)[0] + ".xcos")
os.rename(temp_file_xml_name, fname)
diagram.xcos_file_name = fname
return diagram.diagram_id
# List to contain all affich blocks
blockaffich = new_xml.getElementsByTagName("AfficheBlock")
for block in blockaffich:
if block.getAttribute("interfaceFunctionName") == "AFFICH_m":
diagram.workspace_counter = 4
# List to contain all the block IDs of tkscales so that we can create
# read blocks with these IDs
block_id = []
for block in blocks:
if block.getAttribute("interfaceFunctionName") == "TKSCALE":
block_id.append(block.getAttribute("id"))
block.setAttribute('id', '-1')
tk_is_present = True
# Changed the ID of tkscales to -1 so that virtually the
# tkscale blocks get disconnected from diagram at the backend
# Taking workspace_counter 1 for TOWS_c and 2 for FROMWSB
if block.getAttribute(
"interfaceFunctionName") == "scifunc_block_m":
diagram.workspace_counter = 5
if block.getAttribute("interfaceFunctionName") == "TOWS_c":
diagram.workspace_counter = 1
flag1 = 1
if block.getAttribute("interfaceFunctionName") == "FROMWSB":
diagram.workspace_counter = 2
flag2 = 1
if flag1 and flag2:
# Both TOWS_c and FROMWSB are present
diagram.workspace_counter = 3
# Hardcoded the real time scaling to 1.0 (i.e., no scaling of time
# occurs) only if tkscale is present
if tk_is_present:
for dia in new_xml.getElementsByTagName("XcosDiagram"):
dia.setAttribute('realTimeScaling', '1.0')
# Save the changes made by parser
with open(temp_file_xml_name, 'w') as f:
f.write(new_xml.toxml())
# In front of block tkscale printing the block corresponding to read
# function and assigning corresponding values
for line in fileinput.input(temp_file_xml_name, inplace=1):
if 'interfaceFunctionName=\"TKSCALE\"' in line:
# change the block ID
i = diagram.tk_count
print('<BasicBlock blockType="d" id="', block_id[i], '" '
'interfaceFunctionName="RFILE_f" parent="1" '
'simulationFunctionName="readf" '
'simulationFunctionType="DEFAULT" style="RFILE_f">',
sep='')
print('<ScilabString as="exprs" height="5" width="1">')
print('<data column="0" line="0" value="1"/>')
# Value equal to 1 implies take readings from first column in
# the file
print('<data column="0" line="1" value="2"/>')
# Path to the file from which read block obtains the values
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_tk" + str(i + 1) + ".txt")
print('<data column="0" line="2" value="', fname, '"/>',
sep='')
print('<data column="0" line="3" value="(2(e10.3,1x))"/>')
# (2(e10.3,1x)) The format in which numbers are written
# Two columns with base 10 and 3 digits after decimal and 1x
# represents 1 unit space between two columns.
print('<data column="0" line="4" value="2"/>')
print('</ScilabString>')
print('<ScilabDouble as="realParameters" '
'height="0" width="0"/>')
print('<ScilabDouble as="integerParameters" '
'height="105" width="1">')
diagram.tk_count += 1
# The remaining part of the block is read from the
# Read_Content.txt file and written to the xml file
with open(READCONTENTFILE, "r") as read_file:
for line_content in read_file:
print(line_content, end='')
print(line, end='')
# To resolve port issue coming in xcos file for following blocks :
# INTMUL,MATBKSL,MATDET,MATDIAG,MATDIV and CURV_F
# ISSUE is missing of dataColumns and dataLines in port tag
block_idint = []
block_idmatblsk = []
block_det = []
block_diag = []
block_div = []
block_curl = []
for block in blocks:
# to find INTMUL in blocks and extract its block id and save in
# block_idint
if block.getAttribute("style") == "INTMUL":
block_idint.append(int(block.getAttribute("id")))
# to find MATBKSL in blocks and extract its block id and save in
# block_idmatblsk
if block.getAttribute("style") == "MATBKSL":
block_idmatblsk.append(int(block.getAttribute("id")))
# to find MATDET in blocks and extract its block id and save in
# block_det
if block.getAttribute("style") == "MATDET":
block_det.append(int(block.getAttribute("id")))
# to find MATDIAG in blocks and extract its block id and save in
# block_diag
if block.getAttribute("style") == "MATDIAG":
block_diag.append(int(block.getAttribute("id")))
# to find MATDIV in blocks and extract its block id and save in
# block_div
if block.getAttribute("style") == "MATDIV":
block_div.append(int(block.getAttribute("id")))
# to find CURV_f in blocks and extract its block id and save in
# block_curl
if block.getAttribute("style") == "CURV_f":
block_curl.append(int(block.getAttribute("id")))
if len(block_idint) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
# check for existance of "ExplicitInputPort" in line
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataLines="-2" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idint)):
# if ordering= 2 and parent id= INTMUL block id
srch2 = (
'ordering="2" '
'parent="' + str(block_idint[i]) + '"')
if srch2 in word:
# replace word and add datacolumns and
# datalines
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
# check for existance of "ExplicitOutputPort" in line
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="-3" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idint)):
# if parent id= INTMUL block id
srch2 = 'parent="' + str(block_idint[i]) + '"'
if srch2 in word:
# replace word and add datacolumns and
# datalines
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_idmatblsk) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idmatblsk)):
srch2 = (
'ordering="2" '
'parent="' + str(block_idmatblsk[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="-3" '
'dataLines="-2" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_idmatblsk)):
srch2 = 'parent="' + str(block_idmatblsk[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_det) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-1" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_det)):
srch2 = (
'ordering="2" '
'parent="' + str(block_det[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="1" '
'dataLines="1" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_det)):
srch2 = 'parent="' + str(block_det[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_curl) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="1" '
'dataLines="1" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_curl)):
srch2 = 'parent="' + str(block_curl[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_diag) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="1" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_diag)):
srch2 = (
'ordering="2" '
'parent="' + str(block_diag[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitOutputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitOutputPort dataColumns="-1" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_diag)):
srch2 = 'parent="' + str(block_diag[i]) + '"'
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
if len(block_div) >= 1:
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_div)):
srch2 = (
'ordering="1" '
'parent="' + str(block_div[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
with open(temp_file_xml_name, "r") as f:
newline = []
i = 0
for word in f.readlines():
srch = '<ExplicitInputPort dataType="REAL_MATRIX"'
if srch in word:
rplc = ('<ExplicitInputPort dataColumns="-3" '
'dataLines="-2" dataType="REAL_MATRIX"')
temp_word = ""
for i in range(len(block_div)):
srch2 = (
'ordering="2" '
'parent="' + str(block_div[i]) + '"')
if srch2 in word:
temp_word = word.replace(srch, rplc)
i += 1
if temp_word != "":
newline.append(temp_word)
else:
newline.append(word)
else:
newline.append(word)
with open(temp_file_xml_name, "w") as f:
for line in newline:
f.writelines(line)
# Changing the file extension from xml to xcos
fname = join(sessiondir, UPLOAD_FOLDER,
splitext(temp_file_xml_name)[0] + ".xcos")
# Move the xcos file to uploads directory
os.rename(temp_file_xml_name, fname)
diagram.xcos_file_name = fname
return diagram.diagram_id
@app.route('/filenames.php', methods=['POST'])
def filenames():
url = request.form['url']
if url == '' or '.' in url or url[0] != '/' or url[-1] != '/':
return "error"
filelist = [url + f for f in os.listdir(BASEDIR + url)]
return Response(json.dumps(filelist), mimetype='application/json')
@app.route('/UpdateTKfile', methods=['POST'])
def UpdateTKfile():
(diagram, __) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
return "error"
# function which makes the initialazation and updation of the files with
# obtained new value
# Get the file
file = request.files['file']
# Check if the file is not null
if not file:
return "error"
# saves the file in values folder
line = file.read().decode()
if line == "Start":
# at first the val.txt contains "Start" indicating the starting of the
# process
diagram.tkbool = True
diagram.tk_starttime = time()
diagram.tk_deltatimes = []
diagram.tk_values = []
diagram.tk_times = []
for i in range(diagram.tk_count):
diagram.tk_deltatimes.append(0.1)
diagram.tk_values.append(0)
diagram.tk_times.append(0)
fname = join(diagram.sessiondir, VALUES_FOLDER,
diagram.diagram_id + "_tk" + str(i + 1) + ".txt")
open(fname, "w").close()
# create empty tk text files
# starts the thread
Timer(0.1, getDetailsThread, [diagram]).start()
elif line == "Stop":
# at last the val.txt contains "Stop" indicating the ending process
# stops the thread
stopDetailsThread(diagram)
else:
tklist = line.split(',')
for i in range(min(diagram.tk_count, len(tklist))):
tl = tklist[i].split(' ')
if len(tl) == 1 or tl[1] == '':
continue
diagram.tk_deltatimes[i] = float(tl[0])
diagram.tk_values[i] = float(tl[1])
return ""
@app.route('/downloadfile', methods=['POST'])
def DownloadFile():
'''route for download of binary and audio'''
fn = request.form['path']
if fn == '' or fn[0] == '.' or '/' in fn:
print('downloadfile=', fn)
return "error"
# check if audio file or binary file
if "audio" in fn:
mimetype = 'audio/basic'
else:
mimetype = 'application/octet-stream'
return flask.send_from_directory(
SESSIONDIR, fn, as_attachment=True, mimetype=mimetype)
@app.route('/deletefile', methods=['POST'])
def DeleteFile():
'''route for deletion of binary and audio file'''
fn = request.form['path']
if fn == '' or fn[0] == '.' or '/' in fn:
print('deletefile=', fn)
return "error"
remove(fn) # deleting the file
return "0"
@app.route('/SendLog')
def sse_request():
'''Set response method to event-stream'''
return Response(event_stream(), mimetype='text/event-stream')
@app.route('/<path:path>')
def static_file(path):
return app.send_static_file(path)
@app.route('/stop')
def stop():
'''route to kill scilab on closing of chart'''
kill_scilab()
return "done"
@app.route('/endBlock/<fig_id>')
def endBlock(fig_id):
'''route to end blocks with no Ending parameter'''
(diagram, __) = get_diagram(get_request_id())
if diagram is None:
print('no diagram')
return
diagram.figure_list.remove(fig_id)
return "done"
@app.route('/')
def page():
return render_template('index.html',
example_content='',
example_filename='',
prerequisite_content='',
prerequisite_filename='')
@app.route('/getOutput', methods=['POST'])
def run_scilab_func_request():
(__, __, scifile, sessiondir, __) = init_session()
file_name = join(sessiondir, "cont_frm_value.txt")
num = request.form['num']
den = request.form['den']
'''
sample input to scilab:
num: 1+s
den: s^2-5*s+1
'''
if 'z' in num or 'z' in den:
p = 'z'
else:
p = 's'
command = "%s=poly(0, '%s');" % (p, p)
command += "exec('%s');" % CONT_FRM_WRITE
command += "calculate_cont_frm(%s,%s,'%s');" % (num, den, file_name)
try:
scifile.proc = run_scilab(command)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
scifile.proc.communicate()
list_value = ""
'''
sample output from scilab:
[[0], [1], [0], [0]]
'''
if isfile(file_name):
with open(file_name) as f:
data = f.read() # Read the data into a variable
list_value = data.replace('][', '],[')
remove(file_name)
else:
list_value = "Error"
return jsonify(list_value)
# App route for getting scilab expression output for Expression Block
@app.route('/getExpressionOutput', methods=['POST'])
def run_scilab_func_expr_request():
(__, __, scifile, sessiondir, __) = init_session()
file_name = join(sessiondir, "expr_set_value.txt")
head = request.form['head']
exx = request.form['exx']
'''
sample input to scilab:
head: %foo(u1,u2)
exx: (u1>0)*sin(u2)^2
'''
command = "exec('" + COPIED_EXPRESSION_SCI_FRM_SCILAB + \
"');exec('" + EXP_SCI_FUNC_WRITE + \
"');callFunctionAcctoMethod('" + file_name + \
"','" + head + "','" + exx + "');"
try:
scifile.proc = run_scilab(command)
except FileNotFoundError:
return "scilab not found. Follow the installation instructions"
scifile.proc.communicate()
# create a dictionary
exprs_value = {}
'''
Array containing value which will be used as key
for dictionary 'exprs_value'
sample output from scilab:
ok: true or scilab error message
ok1: true
ipar: [[2], [1], [6], [1], [5], [18], [2], [2], [5]
, [101], [6], [2], [5], [15], [5], [3]]
rpar: [[0], [2]]
nz: [[1]]
'''
var_array = ["ok", "ok1", "ipar", "rpar", "nz"]
with open(file_name) as f:
data = f.read() # Read the data into a variable
valuesfromfile = data.splitlines()
for i in range(len(valuesfromfile)):
exprs_value[var_array[i]] = valuesfromfile[i]
if not exprs_value:
exprs_value["ok"] = "Enter a valid scilab expression : " + \
"custom made message"
remove(file_name)
return jsonify(exprs_value)
# example page start ###################
@app.route('/example')
def example_page():
try:
cur = connection()
cur.execute(config.QUERY_CATEGORY)
data = cur.fetchall()
return render_template('example.html', data=data)
except Exception as e:
return str(e)
@app.route('/get_book', methods=['GET', 'POST'])
def ajax_get_book():
cat_id = request.args.get('catid')
try:
cur = connection()
cur.execute(config.QUERY_BOOK, [cat_id])
data = cur.fetchall()
return jsonify(data)
except Exception as e:
return str(e)
@app.route('/get_chapter', methods=['GET', 'POST'])
def ajax_get_chapter():
book_id = request.args.get('bookid')
try:
cur = connection()
cur.execute(config.QUERY_CHAPTER, [book_id])
chapter = cur.fetchall()
return jsonify(chapter)
except Exception as e:
return str(e)
@app.route('/get_example', methods=['GET', 'POST'])
def ajax_get_example():
chapter_id = request.args.get('chapterid')
try:
cur = connection()
cur.execute(config.QUERY_EXAMPLE, [chapter_id])
example = cur.fetchall()
return jsonify(example)
except Exception as e:
return str(e)
@app.route('/get_example_file', methods=['GET', 'POST'])
def ajax_get_example_file():
example_id = request.args.get('exampleid')
try:
cur = connection()
cur.execute(config.QUERY_EXAMPLE_FILE, [example_id])
example_file = cur.fetchall()
return jsonify(example_file)
except Exception as e:
return str(e)
def clean_text(s):
return re.sub(r'[ \t]*[\r\n]+[ \t]*', r'', s)
def get_example_file(example_file_id):
filename = 'example.xcos'
filepath = ''
cur = connection()
cur.execute(config.QUERY_EXAMPLE_FILE_BY_ID, [example_file_id])
for (filename, filepath, example_id) in cur.fetchall():
pass
if XCOSSOURCEDIR != '' and filepath != '':
try:
print('reading', filename, 'from', filepath)
with open(join(XCOSSOURCEDIR, filepath), 'r') as f:
text = clean_text(f.read())
return (text, filename, example_id)
except Exception as e:
print('Exception:', str(e))
scilab_url = "https://scilab.in/download/file/" + example_file_id
print('downloading', scilab_url)
r = requests.get(scilab_url)
text = clean_text(r.text)
return (text, filename, example_id)
def clean_text_2(s):
'''handle whitespace'''
s = re.sub(r'[\a\b\f\r\v]', r'', s)
s = re.sub(r'\t', r' ', s)
s = re.sub(r' +(\n|$)', r'\n', s)
s = re.sub(r'\n+$', r'', s)
# double each backslash
s = re.sub(r'\\', r'\\\\', s)
# replace each newline with '\n'
s = re.sub(r'\n', r'\\n', s)
return s
def get_prerequisite_file(example_id):
filename = ''
filepath = ''
prerequisite_file_id = None
cur = connection()
cur.execute(config.QUERY_PREREQUISITE_FILE_BY_EXAMPLE_ID, [example_id])
for (filename, filepath, prerequisite_file_id) in cur.fetchall():
pass
if prerequisite_file_id is None:
return ('', filename)
if XCOSSOURCEDIR != '' and filepath != '':
try:
print('reading', filename, 'from', filepath)
with open(join(XCOSSOURCEDIR, filepath), 'r') as f:
text = clean_text_2(f.read())
return (text, filename)
except Exception as e:
print('Exception:', str(e))
scilab_url = "https://scilab.in/download/file/" + str(prerequisite_file_id)
print('downloading', scilab_url)
r = requests.get(scilab_url)
text = clean_text_2(r.text)
return (text, filename)
@app.route('/example_file', methods=['GET', 'POST'])
def download_example_file():
example_file_id = request.args.get('efid')
(example_content, example_filename, example_id) = get_example_file(
example_file_id)
return Response(
example_content,
mimetype='application/octet-stream',
headers={'Content-Disposition':
'attachment; filename="%s"' % example_filename})
@app.route('/open', methods=['GET', 'POST'])
def open_example_file():
example_file_id = request.args.get('efid')
(example_content, example_filename, example_id) = get_example_file(
example_file_id)
(prerequisite_content, prerequisite_filename) = get_prerequisite_file(
str(example_id))
return render_template('index.html',
example_content=example_content,
example_filename=example_filename,
prerequisite_content=prerequisite_content,
prerequisite_filename=prerequisite_filename)
# example page end #################
if __name__ == '__main__':
print('starting')
os.chdir(SESSIONDIR)
# Set server address from config
http_server = WSGIServer(
(config.HTTP_SERVER_HOST, config.HTTP_SERVER_PORT), app)
print('listening:', http_server)
try:
http_server.serve_forever()
except KeyboardInterrupt:
print('exiting')
| en | 0.880473 | #!/usr/bin/python3 # change directory before using relative paths # These are the extension that we are accepting to be uploaded # This is the path to the upload directory and values directory # to store xcos file # to store files related to tkscale block # to store uploaded sci files for sci-func block # to store uploaded sci files for sci-func block # Delay time to look for new line (in s) # States of the line # to indicate initialization of block in log file is encountered # to indicate ending of log file data for that block is encountered # to indicate data is proper and can be read # to indicate there is no line in log file further # to indicate block id is present # Scilab dir # display limit for long strings # handle scilab startup # session dir # store uploaded filename # type of uploaded file # workspace from script # tk count # store log name # is thread running? # in memory values # List to store figure IDs from log_name Variables used in sci-func block Class to store the line and its state (Used in reading data from log file) # initial line to none(Nothing is present) # initial state to NOLINE ie # to set line # to set state Function to parse the line Returns tuple of figure ID and state state = INITIALIZATION if new figure is created ENDING if current fig end DATA otherwise # Each line is split to read condition # The below condition determines the block ID # to get block id (Which is explicitly added by us while writing # into log in scilab source code) # New figure created # Get fig id # to extract figure ids (sometime multiple sinks can be used in one # diagram to differentiate that) # Current figure end # Get fig id # Current figure coordinates Function to get a new line from file This also parses the line and appends new figures to figure List # read line by line from log # if line is empty then return noline # every line is passed to function parse_line for getting values # New figure created # Add figure ID to list # figure id of block is added to list # Check for block identification # End of figure # Remove figure ID from list # Once ending of log file/data is encountered for that block, figure id # will be removed Read file and check for system commands and return error if file contains system commands # Delete saved file if system commands are encountered in that file Below route is called for uploading script file. handle whitespace and sequences in output # https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences Below route is called for uploading script file. # when called with same script_id again or with incorrect script_id # output from scilab terminal is saved for checking error msg # if error is encountered while execution of script file, then error # message is returned to the user Below route is called for stopping a running script file. # when called with same script_id again or with incorrect script_id Below route is called for uploading sci file which is required in sci-func block (called in Javscript only_scifunc_code.js) # to get uploaded file # Check if the file is not null # file name is created with timestamp # file is saved in scifunc_files folder # flag for file saved # flag for file saved will be set as False # scilab command is created to run that uploaded sci file which will be # used by sci-func block # output from scilab terminal is saved for checking error msg # if error is encountered while execution of sci file, then error msg # is returned to user. in case no error is encountered, file uploaded # successful msg is sent to user. # Delete saved file if error is encountered while executing sci # function in that file # flag for file saved will be set as False Below route is called for stopping a running sci file. This route is used in index.html for checking condition if sci file is uploaded for sci-func block diagram imported directly using import (will confirm again) function to kill a process group with a signal. wait for maximum 2 seconds for process to exit. return True on exit, False otherwise Define function to kill scilab(if still running) and remove files # Remove xcos file # Remove log file function to execute xcos file using scilab (scilab-adv-cli), access log file written by scilab This function is called in app route 'start_scilab' below # name of primary workspace file # name of workspace file # ignore import errors '+strcat(__V4, )+ # 3 - for both TOWS_c and FROMWSB and also workspace dat file exist # In this case workspace is saved in format of dat file (Scilab way # of saying workpsace) # For FROMWSB block and also workspace dat file exist # Scilab Commands for running of scilab based on existence of different # blocks in same diagram from workspace_counter's value # 1: Indicate TOWS_c exist # 2: Indicate FROMWSB exist # 3: Both TOWS_c and FROMWSB exist # 4: Indicate AFFICH_m exist (We dont want graphic window to open so # xs2jpg() command is removed) # 5: Indicate Sci-func block as it some time return image as output # rather than Sinks's log file. # 0/No-condition : For all other blocks # For AFFICH-m block # For Sci-Func block (Image are return as output in some cases) # For all other block # Start sending log to chart function for creating chart # For processes taking less than 10 seconds # Check for errors in Scilab # For processes taking more than 10 seconds Read log file and return data to eventscource function of javascript for displaying chart. This function is called in app route 'SendLog' below # Open the log file # Start sending log # Get the line and loop until the state is ENDING and figure_list # empty. Determine if we get block id and give it to chart.js # Finished Sending Log # Notify Client function which appends the updated (new) value to the file # append data to the tk.txt # update the time function which makes the initialisation of thread # stops the thread # deletes all files created under the 'diagram_id' name Route that will process the file upload # Get the file # Check if the file is not null # flags to check if both TOWS_c and FROMWSB are present # Make the filename safe, remove unsupported chars # Save the file in xml extension and using it for further modification # by using xml parser # to identify if we have to load or save to workspace or neither #0 if # neither TOWS_c or FROWSB found # this stores the id of split blocks # match the lines with the parent of our spliblocks which # we need to change # here we take the ids of command controllink which we will search # and change # here we save the contents # length=len(finalsplit) # return finalsplit # return temp_file_xml_name # List to contain all affich blocks # List to contain all the block IDs of tkscales so that we can create # read blocks with these IDs # Changed the ID of tkscales to -1 so that virtually the # tkscale blocks get disconnected from diagram at the backend # Taking workspace_counter 1 for TOWS_c and 2 for FROMWSB # Both TOWS_c and FROMWSB are present # Hardcoded the real time scaling to 1.0 (i.e., no scaling of time # occurs) only if tkscale is present # Save the changes made by parser # In front of block tkscale printing the block corresponding to read # function and assigning corresponding values # change the block ID # Value equal to 1 implies take readings from first column in # the file # Path to the file from which read block obtains the values # (2(e10.3,1x)) The format in which numbers are written # Two columns with base 10 and 3 digits after decimal and 1x # represents 1 unit space between two columns. # The remaining part of the block is read from the # Read_Content.txt file and written to the xml file # To resolve port issue coming in xcos file for following blocks : # INTMUL,MATBKSL,MATDET,MATDIAG,MATDIV and CURV_F # ISSUE is missing of dataColumns and dataLines in port tag # to find INTMUL in blocks and extract its block id and save in # block_idint # to find MATBKSL in blocks and extract its block id and save in # block_idmatblsk # to find MATDET in blocks and extract its block id and save in # block_det # to find MATDIAG in blocks and extract its block id and save in # block_diag # to find MATDIV in blocks and extract its block id and save in # block_div # to find CURV_f in blocks and extract its block id and save in # block_curl # check for existance of "ExplicitInputPort" in line # if ordering= 2 and parent id= INTMUL block id # replace word and add datacolumns and # datalines # check for existance of "ExplicitOutputPort" in line # if parent id= INTMUL block id # replace word and add datacolumns and # datalines # Changing the file extension from xml to xcos # Move the xcos file to uploads directory # function which makes the initialazation and updation of the files with # obtained new value # Get the file # Check if the file is not null # saves the file in values folder # at first the val.txt contains "Start" indicating the starting of the # process # create empty tk text files # starts the thread # at last the val.txt contains "Stop" indicating the ending process # stops the thread route for download of binary and audio # check if audio file or binary file route for deletion of binary and audio file # deleting the file Set response method to event-stream route to kill scilab on closing of chart route to end blocks with no Ending parameter sample input to scilab: num: 1+s den: s^2-5*s+1 sample output from scilab: [[0], [1], [0], [0]] # Read the data into a variable # App route for getting scilab expression output for Expression Block sample input to scilab: head: %foo(u1,u2) exx: (u1>0)*sin(u2)^2 # create a dictionary Array containing value which will be used as key for dictionary 'exprs_value' sample output from scilab: ok: true or scilab error message ok1: true ipar: [[2], [1], [6], [1], [5], [18], [2], [2], [5] , [101], [6], [2], [5], [15], [5], [3]] rpar: [[0], [2]] nz: [[1]] # Read the data into a variable # example page start ################### handle whitespace # double each backslash # replace each newline with '\n' # example page end ################# # Set server address from config | 2.093678 | 2 |
bill_law_classification.py | OriHoch/democracy-inst-law-research | 0 | 6619918 | <reponame>OriHoch/democracy-inst-law-research<filename>bill_law_classification.py
from datapackage_pipelines.wrapper import ingest, spew
import logging
def main():
parameters, datapackage, resources, stats = ingest() + ({},)
datapackage["resources"] = [datapackage['resources'][0]]
datapackage["resources"][0]['schema']['fields'] += [{'name': 'classifications', 'type': 'array'},
{'name': 'classification_budget', 'type': 'boolean'},
{'name': 'classification_hesderim', 'type': 'boolean'},
{'name': 'num_pages', 'type': 'integer'},
{'name': 'is_last_page', 'type': 'boolean'}]
def get_resource():
bills = {}
israel_laws = {}
magazines = {}
for row in next(resources):
bills[row['BillID']] = row
if row['IsraelLawID']:
for israel_law_id in row['IsraelLawID']:
israel_laws.setdefault(israel_law_id, set()).add(row['BillID'])
if row['MagazineNumber'] and row['PageNumber']:
magazines.setdefault(row['MagazineNumber'], {}).setdefault(row['PageNumber'], set()).add(row['BillID'])
for magazine_number in sorted(magazines):
magazine = magazines[magazine_number]
prev_page_number = None
prev_page_bill_ids = None
for page_number in sorted(magazine):
if prev_page_bill_ids:
for bill_id in prev_page_bill_ids:
bills[bill_id].update(num_pages=page_number - prev_page_number, is_last_page=False)
prev_page_number = page_number
prev_page_bill_ids = magazine[page_number]
for bill_id in prev_page_bill_ids:
bills[bill_id].update(num_pages=None, is_last_page=True)
for row in next(resources):
if row['IsraelLawID'] in israel_laws:
for bill_id in israel_laws[row['IsraelLawID']]:
bills[bill_id].setdefault('classifications', set()).add(row['ClassificiationDesc'])
for bill in bills.values():
classifications = list(bill.get('classifications', []))
bill.update(classifications=classifications,
classification_budget='תקציב' in classifications,
classification_hesderim='חוקי הסדרים' in classifications)
yield bill
spew(datapackage, [get_resource()], stats)
if __name__ == '__main__':
main()
| from datapackage_pipelines.wrapper import ingest, spew
import logging
def main():
parameters, datapackage, resources, stats = ingest() + ({},)
datapackage["resources"] = [datapackage['resources'][0]]
datapackage["resources"][0]['schema']['fields'] += [{'name': 'classifications', 'type': 'array'},
{'name': 'classification_budget', 'type': 'boolean'},
{'name': 'classification_hesderim', 'type': 'boolean'},
{'name': 'num_pages', 'type': 'integer'},
{'name': 'is_last_page', 'type': 'boolean'}]
def get_resource():
bills = {}
israel_laws = {}
magazines = {}
for row in next(resources):
bills[row['BillID']] = row
if row['IsraelLawID']:
for israel_law_id in row['IsraelLawID']:
israel_laws.setdefault(israel_law_id, set()).add(row['BillID'])
if row['MagazineNumber'] and row['PageNumber']:
magazines.setdefault(row['MagazineNumber'], {}).setdefault(row['PageNumber'], set()).add(row['BillID'])
for magazine_number in sorted(magazines):
magazine = magazines[magazine_number]
prev_page_number = None
prev_page_bill_ids = None
for page_number in sorted(magazine):
if prev_page_bill_ids:
for bill_id in prev_page_bill_ids:
bills[bill_id].update(num_pages=page_number - prev_page_number, is_last_page=False)
prev_page_number = page_number
prev_page_bill_ids = magazine[page_number]
for bill_id in prev_page_bill_ids:
bills[bill_id].update(num_pages=None, is_last_page=True)
for row in next(resources):
if row['IsraelLawID'] in israel_laws:
for bill_id in israel_laws[row['IsraelLawID']]:
bills[bill_id].setdefault('classifications', set()).add(row['ClassificiationDesc'])
for bill in bills.values():
classifications = list(bill.get('classifications', []))
bill.update(classifications=classifications,
classification_budget='תקציב' in classifications,
classification_hesderim='חוקי הסדרים' in classifications)
yield bill
spew(datapackage, [get_resource()], stats)
if __name__ == '__main__':
main() | none | 1 | 2.35684 | 2 | |
subSamplingL1Svm.py | narendrameena/featuerSelectionAssignment | 0 | 6619919 | import numpy as np
from sklearn.svm import LinearSVC
from sklearn.datasets import load_svmlight_file
import random
#data
data = load_svmlight_file("leu")
# subSampling
l =len(data[1])
start = int(round(l*0.70,0))
#N = random.sample(range(start,l), 1)
N = int(round(l*0.80,0))
print("Number of sub sample %d" %N)
i = np.random.choice(np.arange(data[0].shape[0]), N, replace=False)
sub_data = data[0][i.tolist()]
sub_sample = data[1][i.tolist()] # check for this step
X_1 = sub_data.todense().tolist()
y_1 = map(int,sub_sample)
#L1 SVM
l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1)
#print(len(l1svc.coef_[0]))
coef = l1svc.coef_.tolist()[0]
#print(coef[0])
#print(l1svc.coef_.tolist()[0])
#print[i for i, j in enumerate(coef) if j 0]
#print(len(l1svc.coef_.tolist()[0]))
print("Number of features have non-zero weight vector coefficients %d " %sum(1 for i in coef if i != 0))
#For each feature compute a score that is the number of sub-samples for which that feature yielded a non-zero weight vector coefficient
'''
sampleListCoef = []
print(len(l1svc.coef_[0].tolist()))
for k in range(0,len(l1svc.coef_[0].tolist())):
for j in range(start,l):
i = np.random.choice(np.arange(data[0].shape[0]), j, replace=False)
sub_data = data[0][i.tolist()]
sub_sample = data[1][i.tolist()] # check for this step
X_1 = sub_data.todense().tolist() # samples 72 features above 7129
y_1 = map(int,sub_sample) # classes 2
#L1 SVM
l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1)
coef = map(int,np.asarray(l1svc.coef_[0]))
if(coef[k] > 0):
sampleListCoef.append[j]
else:
sampleListCoef + [0]
print("Number of sub-samples for which that feature yielded a non-zero weight vector coefficient :")
print(sampleListCoef)
''' | import numpy as np
from sklearn.svm import LinearSVC
from sklearn.datasets import load_svmlight_file
import random
#data
data = load_svmlight_file("leu")
# subSampling
l =len(data[1])
start = int(round(l*0.70,0))
#N = random.sample(range(start,l), 1)
N = int(round(l*0.80,0))
print("Number of sub sample %d" %N)
i = np.random.choice(np.arange(data[0].shape[0]), N, replace=False)
sub_data = data[0][i.tolist()]
sub_sample = data[1][i.tolist()] # check for this step
X_1 = sub_data.todense().tolist()
y_1 = map(int,sub_sample)
#L1 SVM
l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1)
#print(len(l1svc.coef_[0]))
coef = l1svc.coef_.tolist()[0]
#print(coef[0])
#print(l1svc.coef_.tolist()[0])
#print[i for i, j in enumerate(coef) if j 0]
#print(len(l1svc.coef_.tolist()[0]))
print("Number of features have non-zero weight vector coefficients %d " %sum(1 for i in coef if i != 0))
#For each feature compute a score that is the number of sub-samples for which that feature yielded a non-zero weight vector coefficient
'''
sampleListCoef = []
print(len(l1svc.coef_[0].tolist()))
for k in range(0,len(l1svc.coef_[0].tolist())):
for j in range(start,l):
i = np.random.choice(np.arange(data[0].shape[0]), j, replace=False)
sub_data = data[0][i.tolist()]
sub_sample = data[1][i.tolist()] # check for this step
X_1 = sub_data.todense().tolist() # samples 72 features above 7129
y_1 = map(int,sub_sample) # classes 2
#L1 SVM
l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1)
coef = map(int,np.asarray(l1svc.coef_[0]))
if(coef[k] > 0):
sampleListCoef.append[j]
else:
sampleListCoef + [0]
print("Number of sub-samples for which that feature yielded a non-zero weight vector coefficient :")
print(sampleListCoef)
''' | en | 0.448663 | #data # subSampling #N = random.sample(range(start,l), 1) # check for this step #L1 SVM #print(len(l1svc.coef_[0])) #print(coef[0]) #print(l1svc.coef_.tolist()[0]) #print[i for i, j in enumerate(coef) if j 0] #print(len(l1svc.coef_.tolist()[0])) #For each feature compute a score that is the number of sub-samples for which that feature yielded a non-zero weight vector coefficient sampleListCoef = [] print(len(l1svc.coef_[0].tolist())) for k in range(0,len(l1svc.coef_[0].tolist())): for j in range(start,l): i = np.random.choice(np.arange(data[0].shape[0]), j, replace=False) sub_data = data[0][i.tolist()] sub_sample = data[1][i.tolist()] # check for this step X_1 = sub_data.todense().tolist() # samples 72 features above 7129 y_1 = map(int,sub_sample) # classes 2 #L1 SVM l1svc = LinearSVC(penalty='l1', dual=False).fit(X_1, y_1) coef = map(int,np.asarray(l1svc.coef_[0])) if(coef[k] > 0): sampleListCoef.append[j] else: sampleListCoef + [0] print("Number of sub-samples for which that feature yielded a non-zero weight vector coefficient :") print(sampleListCoef) | 2.90325 | 3 |
changes/jobs/sync_repo.py | vault-the/changes | 443 | 6619920 | from __future__ import absolute_import, print_function
import logging
from datetime import datetime
from changes.config import db
from changes.jobs.signals import fire_signal
from changes.models.repository import Repository, RepositoryBackend, RepositoryStatus
from changes.models.revision import Revision
from changes.queue.task import tracked_task
from changes.vcs.base import ConcurrentUpdateError
logger = logging.getLogger('repo.sync')
NUM_RECENT_COMMITS = 30
@tracked_task(max_retries=None)
def sync_repo(repo_id, continuous=True):
repo = Repository.query.get(repo_id)
if not repo:
logger.error('Repository %s not found', repo_id)
return False
if sync(repo) and continuous:
raise sync_repo.NotFinished(retry_after=20)
def sync(repo):
"""
Checks the repository for new commits, and fires revision.created signals.
"""
vcs = repo.get_vcs()
if vcs is None:
logger.warning('Repository %s has no VCS backend set', repo.id)
return False
if repo.status != RepositoryStatus.active:
logger.info('Repository %s is not active', repo.id)
return False
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update_attempt': datetime.utcnow(),
}, synchronize_session=False)
db.session.commit()
if vcs.exists():
try:
vcs.update()
except ConcurrentUpdateError:
# Updating already so no need to update.
pass
else:
vcs.clone()
# The loop below do two things:
# 1) adds new revisions to the database
# 2) fire off revision created signals for recent revisions
#
# TODO(dcramer): this doesnt scrape everything, and really we wouldn't
# want to do this all in a single job so we should split this into a
# backfill task
if repo.backend == RepositoryBackend.git:
revisions = vcs.log(parent=None, limit=NUM_RECENT_COMMITS, first_parent=False)
else:
revisions = vcs.log(parent=None, limit=NUM_RECENT_COMMITS)
for commit in revisions:
known_revision = Revision.query.filter(
Revision.repository_id == repo.id,
Revision.sha == commit.id
).with_for_update().scalar()
if known_revision and known_revision.date_created_signal:
db.session.commit()
continue
revision, created, _ = commit.save(repo)
db.session.commit()
# Lock the revision.
revision = Revision.query.filter(
Revision.repository_id == repo.id,
Revision.sha == commit.id
).with_for_update().scalar()
# Fire the signal if the revision was created or its branches were discovered.
#
# The `revision.branches` check is a hack right now to prevent builds from
# triggering on branchless commits.
if revision.branches and not revision.date_created_signal:
revision.date_created_signal = datetime.utcnow()
fire_signal.delay(
signal='revision.created',
kwargs={'repository_id': repo.id.hex,
'revision_sha': revision.sha},
)
db.session.commit()
db.session.commit()
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update': datetime.utcnow(),
}, synchronize_session=False)
db.session.commit()
return True
| from __future__ import absolute_import, print_function
import logging
from datetime import datetime
from changes.config import db
from changes.jobs.signals import fire_signal
from changes.models.repository import Repository, RepositoryBackend, RepositoryStatus
from changes.models.revision import Revision
from changes.queue.task import tracked_task
from changes.vcs.base import ConcurrentUpdateError
logger = logging.getLogger('repo.sync')
NUM_RECENT_COMMITS = 30
@tracked_task(max_retries=None)
def sync_repo(repo_id, continuous=True):
repo = Repository.query.get(repo_id)
if not repo:
logger.error('Repository %s not found', repo_id)
return False
if sync(repo) and continuous:
raise sync_repo.NotFinished(retry_after=20)
def sync(repo):
"""
Checks the repository for new commits, and fires revision.created signals.
"""
vcs = repo.get_vcs()
if vcs is None:
logger.warning('Repository %s has no VCS backend set', repo.id)
return False
if repo.status != RepositoryStatus.active:
logger.info('Repository %s is not active', repo.id)
return False
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update_attempt': datetime.utcnow(),
}, synchronize_session=False)
db.session.commit()
if vcs.exists():
try:
vcs.update()
except ConcurrentUpdateError:
# Updating already so no need to update.
pass
else:
vcs.clone()
# The loop below do two things:
# 1) adds new revisions to the database
# 2) fire off revision created signals for recent revisions
#
# TODO(dcramer): this doesnt scrape everything, and really we wouldn't
# want to do this all in a single job so we should split this into a
# backfill task
if repo.backend == RepositoryBackend.git:
revisions = vcs.log(parent=None, limit=NUM_RECENT_COMMITS, first_parent=False)
else:
revisions = vcs.log(parent=None, limit=NUM_RECENT_COMMITS)
for commit in revisions:
known_revision = Revision.query.filter(
Revision.repository_id == repo.id,
Revision.sha == commit.id
).with_for_update().scalar()
if known_revision and known_revision.date_created_signal:
db.session.commit()
continue
revision, created, _ = commit.save(repo)
db.session.commit()
# Lock the revision.
revision = Revision.query.filter(
Revision.repository_id == repo.id,
Revision.sha == commit.id
).with_for_update().scalar()
# Fire the signal if the revision was created or its branches were discovered.
#
# The `revision.branches` check is a hack right now to prevent builds from
# triggering on branchless commits.
if revision.branches and not revision.date_created_signal:
revision.date_created_signal = datetime.utcnow()
fire_signal.delay(
signal='revision.created',
kwargs={'repository_id': repo.id.hex,
'revision_sha': revision.sha},
)
db.session.commit()
db.session.commit()
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update': datetime.utcnow(),
}, synchronize_session=False)
db.session.commit()
return True
| en | 0.931523 | Checks the repository for new commits, and fires revision.created signals. # Updating already so no need to update. # The loop below do two things: # 1) adds new revisions to the database # 2) fire off revision created signals for recent revisions # # TODO(dcramer): this doesnt scrape everything, and really we wouldn't # want to do this all in a single job so we should split this into a # backfill task # Lock the revision. # Fire the signal if the revision was created or its branches were discovered. # # The `revision.branches` check is a hack right now to prevent builds from # triggering on branchless commits. | 2.158376 | 2 |
_sadm/web/view/syslog.py | jrmsdev/pysadm | 1 | 6619921 | <gh_stars>1-10
# Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
from bottle import request
from _sadm import log
from _sadm.web import tpl, syslog
from _sadm.web.app import wapp, view
@wapp.route('/syslog')
@wapp.route('/syslog/last')
@wapp.route('/syslog/last/<limit:int>')
@view('syslog.html')
@tpl.data('syslog')
def index(limit = '100'):
limit = request.query.get('limit', limit)
try:
if int(limit) < 0:
limit = '0'
except ValueError:
limit = '0'
log.debug("last %s messages" % limit)
lvlmap = dict()
for lname, lid in syslog._LVLMAP.items():
lvlmap[lid] = lname
return {
'limit': limit,
'msgs': syslog.last(int(limit)),
'lvlmap': lvlmap,
}
| # Copyright (c) <NAME> <<EMAIL>>
# See LICENSE file.
from bottle import request
from _sadm import log
from _sadm.web import tpl, syslog
from _sadm.web.app import wapp, view
@wapp.route('/syslog')
@wapp.route('/syslog/last')
@wapp.route('/syslog/last/<limit:int>')
@view('syslog.html')
@tpl.data('syslog')
def index(limit = '100'):
limit = request.query.get('limit', limit)
try:
if int(limit) < 0:
limit = '0'
except ValueError:
limit = '0'
log.debug("last %s messages" % limit)
lvlmap = dict()
for lname, lid in syslog._LVLMAP.items():
lvlmap[lid] = lname
return {
'limit': limit,
'msgs': syslog.last(int(limit)),
'lvlmap': lvlmap,
} | en | 0.45205 | # Copyright (c) <NAME> <<EMAIL>> # See LICENSE file. | 1.832728 | 2 |
mobycity/carpooling/admin.py | LucienD/Mobct | 0 | 6619922 | from django.contrib import admin
from django.forms.widgets import TextInput
from carpooling.models import Carpool, Subscription
class CarpoolAdmin(admin.ModelAdmin):
list_display = ('id', 'organizer', 'frequency', 'cancelled')
admin.site.register(Carpool, CarpoolAdmin)
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ('id', 'subscriber', 'carpool', 'cancelled', 'accepted')
admin.site.register(Subscription, SubscriptionAdmin) | from django.contrib import admin
from django.forms.widgets import TextInput
from carpooling.models import Carpool, Subscription
class CarpoolAdmin(admin.ModelAdmin):
list_display = ('id', 'organizer', 'frequency', 'cancelled')
admin.site.register(Carpool, CarpoolAdmin)
class SubscriptionAdmin(admin.ModelAdmin):
list_display = ('id', 'subscriber', 'carpool', 'cancelled', 'accepted')
admin.site.register(Subscription, SubscriptionAdmin) | none | 1 | 1.850052 | 2 | |
tf_gnns/datastructures.py | mylonasc/tf-gnns | 10 | 6619923 | <filename>tf_gnns/datastructures.py
""" Classes for basic manipulation of GraphNet """
import numpy as np
import tensorflow as tf
def _copy_any_ds(val):
"""
Copy semantics for different datatypes accepted.
This affects what happens when copying nodes, edges and graphs.
In order to trace gradients,
and defines a consistent interface regardless of the input data-structure.
"""
valout = val
if isinstance(val , np.ndarray) or isinstance(val, list):
valout = val.copy()
if isinstance(val, tf.Variable) or isinstance(val,tf.Tensor):
valout = tf.identity(val) # TODO: maybe have a flag to override this? Adding more ops does not always make sense.
return valout
class Node:
def __init__(self, node_attr_tensor):
if len(node_attr_tensor.shape) <2:
raise ValueError("The shape of the input for nodes and edges should have at least 2 dimensions!")
self.node_attr_tensor = node_attr_tensor
self.incoming_edges = [];
self.shape = self.node_attr_tensor.shape
def get_state(self):
return self.node_attr_tensor
def set_tensor(self, tensor):
self.node_attr_tensor = tensor
self.shape = self.shape = tensor.shape
def copy(self):
return Node(_copy_any_ds(self.node_attr_tensor))
def __add__(self, n):
return Node(self.node_attr_tensor + n.node_attr_tensor)
def __sub__(self, n):
return Node(self.node_attr_tensor - n.node_attr_tensor)
class Edge:
def __init__(self, edge_attr_tensor, node_from, node_to):
self.edge_tensor = edge_attr_tensor
self.node_from = node_from
self.node_to = node_to
self.shape = self.edge_tensor.shape
# Keep a reference to this edge since it is needed for aggregation afterwards.
node_to.incoming_edges.append(self)
def set_tensor(self, edge_tensor):
self.edge_tensor = edge_tensor
self.shape = edge_tensor.shape
def copy(self, nodes_correspondence):
edge_tensor = _copy_any_ds(self.edge_tensor)
node_from = nodes_correspondence[self.node_from]
node_to = nodes_correspondence[self.node_to]
return Edge(edge_tensor, node_from, node_to)
def __add__(self, edge):
Exception("Edge addition is not implemented! This is due to potentially unclear semantics. Perform this manually.")
class Graph:
def __init__(self, nodes, edges, global_attr = None,NO_VALIDATION=True):
"""
Creates a graph from a set of edges and nodes
"""
self.nodes = nodes
self.edges = edges
self.global_attr = global_attr
self.has_global = self.global_attr is not None
if not NO_VALIDATION:
self.validate_graph()
def is_equal_by_value(self,g2):
"""
Checks if the graphs have the same values for node and edge attributes
"""
is_equal = True
for n1,n2 in zip(self.nodes, g2.nodes):
is_equal = is_equal and tf.reduce_all(n1.node_attr_tensor == n2.node_attr_tensor)
for e1, e2 in zip(self.edges, g2.edges):
is_equal = is_equal and tf.reduce_all(e1.edge_tensor== e2.edge_tensor)
if self.has_global:
is_equal = is_equal and (g2.global_attr == self.global_attr)
return bool(is_equal)
def compare_connectivity(self,g2):
"""
Checks if the connectivity of two graphs is the same.
"""
g1 = self
nodes_from_match = [(g1.nodes.index(e1.node_from) == g2.nodes.index(e2.node_from)) for e1,e2 in zip(g1.edges,g2.edges)]
nodes_to_match = [(g1.nodes.index(e1.node_to) == g2.nodes.index(e2.node_to)) for e1,e2 in zip(g1.edges,g2.edges)]
all_matching = True
for matches in [*nodes_from_match, *nodes_to_match]:
all_matching = all_matching and matches
return all_matching
@staticmethod
def validate_graph(self):
# validate that the edges are all
for e in self.edges:
if ((e.node_from in self.nodes)):
raise AssertionError("The source node {nn} for edge {ee} is not in the graph!".format(nn = e.node_from, ee = e))
if (e.node_to in self.nodes):
raise AssertionError("The destination node {nn} for edge {ee} is not in the graph!".format(nn = e.node_to, ee = e))
def copy(self):
# copy attributes of nodes and edges and re-create graph connectivity:
nodes_coppied = [n.copy() for n in self.nodes]
nodes_correspondence = {s:c for s , c in zip(self.nodes,nodes_coppied)}
# Instantiate the new edges:
coppied_edge_instances = []
for e in self.edges:
enew = e.copy(nodes_correspondence)
coppied_edge_instances.append(enew)
return Graph(nodes_coppied, coppied_edge_instances)
def get_subgraph_from_nodes(self, nodes, edge_trimming_mode = "+from+to"):
"""
Node should belong to graph. Creates a new graph with coppied edge and
node properties, defined from a sub-graph of the original graph.
parameters:
self (type = Graph): the graph we want a sub-graph from
nodes: the nodes of the graph we want the subgraph of.
mode: "+from+to" - keep an edge if there is a "from" node or a "to" node at that edge (and the corresponding node)
"-from-to" - keep an edge if there is NOT a "from" node and NOT a "to" node at that edge (and the corresponding node)
"+from" - keep an edge only if it has a "from" node that coincides with any of the nodes in the list (not implemented)
"+to" - keep an edge only if it has a "to" node that coincides with any of the nodes in the list (not implemented)
"-from" - keep an edge only if it DOESN't have a "from" node that concides with any of the nodes in the list (not implemented)
"""
def check_edge_trimming_condition(e_):
if edge_trimming_mode == "+from+to":
return (e.node_from in nodes) and (e.node_to in nodes)
if edge_trimming_mode == "-from+to":
return (e.node_from not in nodes) and (e.node_to not in nodes)
sg_nodes_copy = [n.copy() for n in nodes]
original_copy_nodes_correspondence = {n:nc for n, nc in zip(nodes, sg_nodes_copy)}
sg_edges_copy = [];
if len(self.edges) > 0:
for e in self.edges:
if check_edge_trimming_condition(e):
sg_edges_copy.append(e.copy(original_copy_nodes_correspondence))
g = Graph(sg_nodes_copy, sg_edges_copy)
return g
def __add__(self, graph):
"""
This should only work with graphs that have compatible node and edge features
Assumed also that the two graphs have the same connectivity (otherwise this will fail ugly)
"""
nodes = [nself + n for nself,n in zip(self.nodes,graph.nodes)]
correspondence = {s:t for s, t in zip(self.nodes,nodes)}
added_edges = [];
for eself,e in zip(self.edges, graph.edges):
enew = Edge(eself.edge_tensor + e.edge_tensor,
correspondence[eself.node_from],
correspondence[eself.node_to])
added_edges.append(enew);
return Graph(nodes, added_edges)
def make_graph_tuple_from_graph_list(list_of_graphs):
"""
Takes in a list of graphs (with consistent sizes - not checked)
and creates a graph tuple (input tensors + some book keeping)
Because there is some initial functionality I don't want to throw away currently, that implements special treatment for nodes and edges
coming from graphs with the same topology, it is currently required that the first dimension of nodes and edges
for the list of graphs that are entered in this function is always 1 (this dimension is the batch dimension in the previous implementation.)
"""
# check the first dimension is 1 - instruct to split graphs if not.
problematic_graphs = []
# TODO: Support splitting a list of same graphs with the first dimension of node and edge
# features different than one and constructing a GraphTuple. Currently the first
# dimension is required to be "1" (but squeezed later on!)
for g_index,g in enumerate(list_of_graphs):
problem = ''
all_sizes_same = True
if g.nodes[0].get_state().shape[0] != 1:
problem += 'First size of node attributes should be 1 - found %i '%g.edges[0].get_state().shape[0]
if g.edges[0].edge_tensor.shape[0] != 1:
problem += 'First size of node attributes should be 1 - found %i '%g.edges[0].get_state().shape[0]
# graph_id = [id_ for id_, dummy in enumerate(list_of_graphs)]
all_edges, all_nodes, n_nodes,n_edges =[[],[],[],[]]
for g in list_of_graphs:
all_edges.extend(g.edges)
all_nodes.extend(g.nodes)
n_nodes.append(len(g.nodes))
n_edges.append(len(g.edges))
edge_attr_tensor, nodes_attr_tensor, senders, receivers = [[],[],[],[]];
for e in all_edges:
edge_attr_tensor.append(e.edge_tensor)
senders.append(all_nodes.index(e.node_from))
receivers.append(all_nodes.index(e.node_to))
for n in all_nodes:
nodes_attr_tensor.append(n.node_attr_tensor)
# The 2nd dimension (dimension index 1) should be of size 1 (there is a test in the start of the constructor).
# The same framework supports efficient computation on graphs of the same topology batched together where the first dimension
# is the batched size. It is required that such graphs were provided for the construction (or at least the first dimension is "1").
edges_attr_stacked = tf.squeeze(tf.stack(edge_attr_tensor,0),1)
nodes_attr_stacked = tf.squeeze(tf.stack(nodes_attr_tensor,0),1)
return GraphTuple(nodes_attr_stacked, edges_attr_stacked,senders, receivers, n_nodes, n_edges)# , graph_id)
class GraphTuple:
def __init__(self, nodes, edges,senders,receivers, n_nodes, n_edges, global_attr = None,sort_receivers_to_edges = False , _global_reps_for_nodes = None, _global_reps_for_edges = None, n_graphs = None):
"""
A graph tuple contains multiple graphs for faster batched computation.
parameters:
nodes : a `tf.Tensor` containing all the node attributes
edges : a `tf.Tensor` containing all the edge attributes
senders : a list of sender node indices defining the graph connectivity. The indices are unique accross graphs
receivers : a list of receiver node indices defining the graph connectivity. The indices are unique accross graphs
n_nodes : a list, a numpy array or a tf.Tensor containing how many nodes are in each graph represented by the nodes and edges in the object
n_edges : a list,a numpy array or a tf.Tensor containing how many edges are in each graph represented by the nodes and edges in the object
global_attr: (optional) a `tf.Tensor` or a `np.array` containing global attributes (first size - self.n_graphs)
sort_receivers : (optional) whether to sort the edges on construction, allowing for not needing to sort the output of the node receiver aggregators.
_global_reps_for_edges : (optional) used for the aggregation of the global var.
_global_reps_for_nodes : (optional) used for the aggregation of the global var.
n_graphs : (optional)
"""
# Sort edges according to receivers and sort receivers:
assert(len(n_nodes) == len(n_edges))
self.nodes = nodes # floats tensor
self.edges = edges # floats tensor
self.senders = senders # integers
self.receivers = receivers # integers
self.n_nodes = n_nodes # integers
self.n_edges = n_edges # integers
if n_graphs is None:
self.n_graphs = len(self.n_nodes) # assuming the n_nodes is a list containing the number of nodes for each graph.
self.global_attr = global_attr
self.has_global = self.global_attr is not None
graph_indices_nodes = []
for k_,k in enumerate(self.n_nodes):
graph_indices_nodes.extend(np.ones(k).astype("int")*k_)
graph_indices_edges = []
for k_,k in enumerate(self.n_edges):
graph_indices_edges.extend(np.ones(k).astype("int")*k_)
if self.has_global: # <- default global is "None". If it was provided, set the global variable (together with some aggregator indices for convenience and performance).
self.assign_global(global_attr)
self.graph_indices_nodes , self.graph_indices_edges = graph_indices_nodes, graph_indices_edges
if (_global_reps_for_edges is None ) and (_global_reps_for_nodes is None):
self.update_reps_for_globals()
self.n_graphs = len(self.n_nodes)
def update_reps_for_globals(self):
"""
Some flat vectors for segment sums when dealing with global variables.
This is created even when there are no globals (one just needs the node
and edge counts for each graph.)
"""
global_reps_for_edges = [] # <- used to cast the global tensor to a compatible size for the edges.
for k, e in enumerate(self.n_edges):
global_reps_for_edges.extend([k]*int(e))
self._global_reps_for_edges = global_reps_for_edges
global_reps_for_nodes = [] # <- similarly for nodes:
for k, e in enumerate(self.n_nodes):
global_reps_for_nodes.extend([k]*int(e))
self._global_reps_for_nodes = global_reps_for_nodes
def assign_global(self, global_attr, check_shape = False):
self.has_global = True
if check_shape:
assert(tf.shape(global_attr)[0] == self.n_graphs)
self.global_attr = global_attr
def is_equal_by_value(self, other_graph_tuple):
v1 = self.edges,self.nodes, self.receivers,self.senders, self.n_nodes, self.n_edges, self.n_graphs
v2 = other_graph_tuple.edges,other_graph_tuple.nodes, other_graph_tuple.receivers,other_graph_tuple.senders, other_graph_tuple.n_nodes, other_graph_tuple.n_edges, other_graph_tuple.n_graphs
def _equals_or_all_equals(v1_,v2_):
if isinstance(v1_, list) and isinstance(v2_, list):
return v1_ == v2_
if isinstance(v1_, tf.Variable) and isinstance(v2_, tf.Variable):
return all(v1_ == v2_)
if isinstance(v1_, np.array) and isinstance(v2_. np.array):
return all(v1_ == v2_)
if self.has_global:
global_same = _equals_or_all_equals(other_graph_tuple.global_attr,self.global_attr)
assert(other_graph_tuple.has_global)
else:
global_same = True
return all([_equals_or_all_equals(v1__,v2__) for v1__, v2__ in zip(v1,v2)]) and global_same
def copy(self):
n = _copy_any_ds(self.nodes)
e = _copy_any_ds(self.edges)
s = _copy_any_ds(self.senders)
r = _copy_any_ds(self.receivers)
nnodes = _copy_any_ds(self.n_nodes)
nedges = _copy_any_ds(self.n_edges)
ngraphs = _copy_any_ds(self.n_graphs)
return GraphTuple(n,e,s,r,nnodes,nedges, global_attr = self.global_attr)
def __add__(self, g2):
nodes = self.nodes + g2.nodes
edges = self.edges + g2.edges
s = self.senders
r = self.receivers
n_nodes = self.n_nodes
n_edges = g2.n_edges
if self.has_global and g2.has_global:
new_global = self.global_attr + g2.global_attr
gt = GraphTuple(nodes,edges,s,r,n_nodes, n_edges, global_attr = new_global)
gt._global_reps_for_edges = self._global_reps_for_edges
gt._global_reps_for_nodes = self._global_reps_for_nodes
else:
gt = GraphTuple(nodes, edges, s,r,n_nodes, n_edges)
return gt
def get_graph(self, graph_index):
"""
Returns a new graph with the same properties as the original graph.
gradients are not traced through this operation.
"""
assert(graph_index >=0 )
if graph_index > self.n_graphs:
raise ValueError("The provided index is larger than the available graphs in this GraphTuple object.")
get_start_stop_index = lambda sizes_list, index : np.cumsum([0,*sizes_list[0:index+1]])[-2:]
start_idx_nodes , end_idx_nodes = get_start_stop_index(self.n_nodes, graph_index)
start_idx_edges , end_idx_edges = get_start_stop_index(self.n_edges, graph_index)
nodes_attrs = self.nodes[start_idx_nodes:end_idx_nodes]
senders, receivers, edge_attr = [v[start_idx_edges:end_idx_edges] for v in [self.senders, self.receivers,self.edges]]
senders = senders-start_idx_nodes
receivers = receivers - start_idx_nodes
nodes = [Node(node_attr[tf.newaxis]) for node_attr in nodes_attrs]
edges = [Edge(edge_attr_tensor[tf.newaxis], nodes[node_from_idx], nodes[node_to_idx]) for edge_attr_tensor, node_from_idx, node_to_idx in zip(edge_attr, senders,receivers)]
if self.has_global:
global_attr = self.global_attr[graph_index]
else:
global_attr = None
return Graph(nodes, edges, global_attr = global_attr)
def to_tensor_dict(self):
return _graphtuple_to_tensor_dict(self)
def _graphtuple_to_tensor_dict(gt_):
"""
Transform a GT to a dictionary.
Used for employing the traceable graph_dict evaluation function.
"""
def _tf_constant_or_none(v):
if v is None:
return None
else:
return tf.constant(v)
return {'edges' : _tf_constant_or_none(gt_.edges),
'nodes' : _tf_constant_or_none(gt_.nodes),
'senders' : _tf_constant_or_none(gt_.senders),
'receivers' :_tf_constant_or_none(gt_.receivers),
'n_edges' : _tf_constant_or_none(gt_.n_edges),
'n_nodes' : _tf_constant_or_none(gt_.n_nodes),
'n_graphs' : _tf_constant_or_none(gt_.n_graphs),
'global_attr' : _tf_constant_or_none(gt_.global_attr),
'_global_reps_for_edges' : _tf_constant_or_none(gt_._global_reps_for_edges),
'_global_reps_for_nodes' : _tf_constant_or_none(gt_._global_reps_for_nodes)}
| <filename>tf_gnns/datastructures.py
""" Classes for basic manipulation of GraphNet """
import numpy as np
import tensorflow as tf
def _copy_any_ds(val):
"""
Copy semantics for different datatypes accepted.
This affects what happens when copying nodes, edges and graphs.
In order to trace gradients,
and defines a consistent interface regardless of the input data-structure.
"""
valout = val
if isinstance(val , np.ndarray) or isinstance(val, list):
valout = val.copy()
if isinstance(val, tf.Variable) or isinstance(val,tf.Tensor):
valout = tf.identity(val) # TODO: maybe have a flag to override this? Adding more ops does not always make sense.
return valout
class Node:
def __init__(self, node_attr_tensor):
if len(node_attr_tensor.shape) <2:
raise ValueError("The shape of the input for nodes and edges should have at least 2 dimensions!")
self.node_attr_tensor = node_attr_tensor
self.incoming_edges = [];
self.shape = self.node_attr_tensor.shape
def get_state(self):
return self.node_attr_tensor
def set_tensor(self, tensor):
self.node_attr_tensor = tensor
self.shape = self.shape = tensor.shape
def copy(self):
return Node(_copy_any_ds(self.node_attr_tensor))
def __add__(self, n):
return Node(self.node_attr_tensor + n.node_attr_tensor)
def __sub__(self, n):
return Node(self.node_attr_tensor - n.node_attr_tensor)
class Edge:
def __init__(self, edge_attr_tensor, node_from, node_to):
self.edge_tensor = edge_attr_tensor
self.node_from = node_from
self.node_to = node_to
self.shape = self.edge_tensor.shape
# Keep a reference to this edge since it is needed for aggregation afterwards.
node_to.incoming_edges.append(self)
def set_tensor(self, edge_tensor):
self.edge_tensor = edge_tensor
self.shape = edge_tensor.shape
def copy(self, nodes_correspondence):
edge_tensor = _copy_any_ds(self.edge_tensor)
node_from = nodes_correspondence[self.node_from]
node_to = nodes_correspondence[self.node_to]
return Edge(edge_tensor, node_from, node_to)
def __add__(self, edge):
Exception("Edge addition is not implemented! This is due to potentially unclear semantics. Perform this manually.")
class Graph:
def __init__(self, nodes, edges, global_attr = None,NO_VALIDATION=True):
"""
Creates a graph from a set of edges and nodes
"""
self.nodes = nodes
self.edges = edges
self.global_attr = global_attr
self.has_global = self.global_attr is not None
if not NO_VALIDATION:
self.validate_graph()
def is_equal_by_value(self,g2):
"""
Checks if the graphs have the same values for node and edge attributes
"""
is_equal = True
for n1,n2 in zip(self.nodes, g2.nodes):
is_equal = is_equal and tf.reduce_all(n1.node_attr_tensor == n2.node_attr_tensor)
for e1, e2 in zip(self.edges, g2.edges):
is_equal = is_equal and tf.reduce_all(e1.edge_tensor== e2.edge_tensor)
if self.has_global:
is_equal = is_equal and (g2.global_attr == self.global_attr)
return bool(is_equal)
def compare_connectivity(self,g2):
"""
Checks if the connectivity of two graphs is the same.
"""
g1 = self
nodes_from_match = [(g1.nodes.index(e1.node_from) == g2.nodes.index(e2.node_from)) for e1,e2 in zip(g1.edges,g2.edges)]
nodes_to_match = [(g1.nodes.index(e1.node_to) == g2.nodes.index(e2.node_to)) for e1,e2 in zip(g1.edges,g2.edges)]
all_matching = True
for matches in [*nodes_from_match, *nodes_to_match]:
all_matching = all_matching and matches
return all_matching
@staticmethod
def validate_graph(self):
# validate that the edges are all
for e in self.edges:
if ((e.node_from in self.nodes)):
raise AssertionError("The source node {nn} for edge {ee} is not in the graph!".format(nn = e.node_from, ee = e))
if (e.node_to in self.nodes):
raise AssertionError("The destination node {nn} for edge {ee} is not in the graph!".format(nn = e.node_to, ee = e))
def copy(self):
# copy attributes of nodes and edges and re-create graph connectivity:
nodes_coppied = [n.copy() for n in self.nodes]
nodes_correspondence = {s:c for s , c in zip(self.nodes,nodes_coppied)}
# Instantiate the new edges:
coppied_edge_instances = []
for e in self.edges:
enew = e.copy(nodes_correspondence)
coppied_edge_instances.append(enew)
return Graph(nodes_coppied, coppied_edge_instances)
def get_subgraph_from_nodes(self, nodes, edge_trimming_mode = "+from+to"):
"""
Node should belong to graph. Creates a new graph with coppied edge and
node properties, defined from a sub-graph of the original graph.
parameters:
self (type = Graph): the graph we want a sub-graph from
nodes: the nodes of the graph we want the subgraph of.
mode: "+from+to" - keep an edge if there is a "from" node or a "to" node at that edge (and the corresponding node)
"-from-to" - keep an edge if there is NOT a "from" node and NOT a "to" node at that edge (and the corresponding node)
"+from" - keep an edge only if it has a "from" node that coincides with any of the nodes in the list (not implemented)
"+to" - keep an edge only if it has a "to" node that coincides with any of the nodes in the list (not implemented)
"-from" - keep an edge only if it DOESN't have a "from" node that concides with any of the nodes in the list (not implemented)
"""
def check_edge_trimming_condition(e_):
if edge_trimming_mode == "+from+to":
return (e.node_from in nodes) and (e.node_to in nodes)
if edge_trimming_mode == "-from+to":
return (e.node_from not in nodes) and (e.node_to not in nodes)
sg_nodes_copy = [n.copy() for n in nodes]
original_copy_nodes_correspondence = {n:nc for n, nc in zip(nodes, sg_nodes_copy)}
sg_edges_copy = [];
if len(self.edges) > 0:
for e in self.edges:
if check_edge_trimming_condition(e):
sg_edges_copy.append(e.copy(original_copy_nodes_correspondence))
g = Graph(sg_nodes_copy, sg_edges_copy)
return g
def __add__(self, graph):
"""
This should only work with graphs that have compatible node and edge features
Assumed also that the two graphs have the same connectivity (otherwise this will fail ugly)
"""
nodes = [nself + n for nself,n in zip(self.nodes,graph.nodes)]
correspondence = {s:t for s, t in zip(self.nodes,nodes)}
added_edges = [];
for eself,e in zip(self.edges, graph.edges):
enew = Edge(eself.edge_tensor + e.edge_tensor,
correspondence[eself.node_from],
correspondence[eself.node_to])
added_edges.append(enew);
return Graph(nodes, added_edges)
def make_graph_tuple_from_graph_list(list_of_graphs):
"""
Takes in a list of graphs (with consistent sizes - not checked)
and creates a graph tuple (input tensors + some book keeping)
Because there is some initial functionality I don't want to throw away currently, that implements special treatment for nodes and edges
coming from graphs with the same topology, it is currently required that the first dimension of nodes and edges
for the list of graphs that are entered in this function is always 1 (this dimension is the batch dimension in the previous implementation.)
"""
# check the first dimension is 1 - instruct to split graphs if not.
problematic_graphs = []
# TODO: Support splitting a list of same graphs with the first dimension of node and edge
# features different than one and constructing a GraphTuple. Currently the first
# dimension is required to be "1" (but squeezed later on!)
for g_index,g in enumerate(list_of_graphs):
problem = ''
all_sizes_same = True
if g.nodes[0].get_state().shape[0] != 1:
problem += 'First size of node attributes should be 1 - found %i '%g.edges[0].get_state().shape[0]
if g.edges[0].edge_tensor.shape[0] != 1:
problem += 'First size of node attributes should be 1 - found %i '%g.edges[0].get_state().shape[0]
# graph_id = [id_ for id_, dummy in enumerate(list_of_graphs)]
all_edges, all_nodes, n_nodes,n_edges =[[],[],[],[]]
for g in list_of_graphs:
all_edges.extend(g.edges)
all_nodes.extend(g.nodes)
n_nodes.append(len(g.nodes))
n_edges.append(len(g.edges))
edge_attr_tensor, nodes_attr_tensor, senders, receivers = [[],[],[],[]];
for e in all_edges:
edge_attr_tensor.append(e.edge_tensor)
senders.append(all_nodes.index(e.node_from))
receivers.append(all_nodes.index(e.node_to))
for n in all_nodes:
nodes_attr_tensor.append(n.node_attr_tensor)
# The 2nd dimension (dimension index 1) should be of size 1 (there is a test in the start of the constructor).
# The same framework supports efficient computation on graphs of the same topology batched together where the first dimension
# is the batched size. It is required that such graphs were provided for the construction (or at least the first dimension is "1").
edges_attr_stacked = tf.squeeze(tf.stack(edge_attr_tensor,0),1)
nodes_attr_stacked = tf.squeeze(tf.stack(nodes_attr_tensor,0),1)
return GraphTuple(nodes_attr_stacked, edges_attr_stacked,senders, receivers, n_nodes, n_edges)# , graph_id)
class GraphTuple:
def __init__(self, nodes, edges,senders,receivers, n_nodes, n_edges, global_attr = None,sort_receivers_to_edges = False , _global_reps_for_nodes = None, _global_reps_for_edges = None, n_graphs = None):
"""
A graph tuple contains multiple graphs for faster batched computation.
parameters:
nodes : a `tf.Tensor` containing all the node attributes
edges : a `tf.Tensor` containing all the edge attributes
senders : a list of sender node indices defining the graph connectivity. The indices are unique accross graphs
receivers : a list of receiver node indices defining the graph connectivity. The indices are unique accross graphs
n_nodes : a list, a numpy array or a tf.Tensor containing how many nodes are in each graph represented by the nodes and edges in the object
n_edges : a list,a numpy array or a tf.Tensor containing how many edges are in each graph represented by the nodes and edges in the object
global_attr: (optional) a `tf.Tensor` or a `np.array` containing global attributes (first size - self.n_graphs)
sort_receivers : (optional) whether to sort the edges on construction, allowing for not needing to sort the output of the node receiver aggregators.
_global_reps_for_edges : (optional) used for the aggregation of the global var.
_global_reps_for_nodes : (optional) used for the aggregation of the global var.
n_graphs : (optional)
"""
# Sort edges according to receivers and sort receivers:
assert(len(n_nodes) == len(n_edges))
self.nodes = nodes # floats tensor
self.edges = edges # floats tensor
self.senders = senders # integers
self.receivers = receivers # integers
self.n_nodes = n_nodes # integers
self.n_edges = n_edges # integers
if n_graphs is None:
self.n_graphs = len(self.n_nodes) # assuming the n_nodes is a list containing the number of nodes for each graph.
self.global_attr = global_attr
self.has_global = self.global_attr is not None
graph_indices_nodes = []
for k_,k in enumerate(self.n_nodes):
graph_indices_nodes.extend(np.ones(k).astype("int")*k_)
graph_indices_edges = []
for k_,k in enumerate(self.n_edges):
graph_indices_edges.extend(np.ones(k).astype("int")*k_)
if self.has_global: # <- default global is "None". If it was provided, set the global variable (together with some aggregator indices for convenience and performance).
self.assign_global(global_attr)
self.graph_indices_nodes , self.graph_indices_edges = graph_indices_nodes, graph_indices_edges
if (_global_reps_for_edges is None ) and (_global_reps_for_nodes is None):
self.update_reps_for_globals()
self.n_graphs = len(self.n_nodes)
def update_reps_for_globals(self):
"""
Some flat vectors for segment sums when dealing with global variables.
This is created even when there are no globals (one just needs the node
and edge counts for each graph.)
"""
global_reps_for_edges = [] # <- used to cast the global tensor to a compatible size for the edges.
for k, e in enumerate(self.n_edges):
global_reps_for_edges.extend([k]*int(e))
self._global_reps_for_edges = global_reps_for_edges
global_reps_for_nodes = [] # <- similarly for nodes:
for k, e in enumerate(self.n_nodes):
global_reps_for_nodes.extend([k]*int(e))
self._global_reps_for_nodes = global_reps_for_nodes
def assign_global(self, global_attr, check_shape = False):
self.has_global = True
if check_shape:
assert(tf.shape(global_attr)[0] == self.n_graphs)
self.global_attr = global_attr
def is_equal_by_value(self, other_graph_tuple):
v1 = self.edges,self.nodes, self.receivers,self.senders, self.n_nodes, self.n_edges, self.n_graphs
v2 = other_graph_tuple.edges,other_graph_tuple.nodes, other_graph_tuple.receivers,other_graph_tuple.senders, other_graph_tuple.n_nodes, other_graph_tuple.n_edges, other_graph_tuple.n_graphs
def _equals_or_all_equals(v1_,v2_):
if isinstance(v1_, list) and isinstance(v2_, list):
return v1_ == v2_
if isinstance(v1_, tf.Variable) and isinstance(v2_, tf.Variable):
return all(v1_ == v2_)
if isinstance(v1_, np.array) and isinstance(v2_. np.array):
return all(v1_ == v2_)
if self.has_global:
global_same = _equals_or_all_equals(other_graph_tuple.global_attr,self.global_attr)
assert(other_graph_tuple.has_global)
else:
global_same = True
return all([_equals_or_all_equals(v1__,v2__) for v1__, v2__ in zip(v1,v2)]) and global_same
def copy(self):
n = _copy_any_ds(self.nodes)
e = _copy_any_ds(self.edges)
s = _copy_any_ds(self.senders)
r = _copy_any_ds(self.receivers)
nnodes = _copy_any_ds(self.n_nodes)
nedges = _copy_any_ds(self.n_edges)
ngraphs = _copy_any_ds(self.n_graphs)
return GraphTuple(n,e,s,r,nnodes,nedges, global_attr = self.global_attr)
def __add__(self, g2):
nodes = self.nodes + g2.nodes
edges = self.edges + g2.edges
s = self.senders
r = self.receivers
n_nodes = self.n_nodes
n_edges = g2.n_edges
if self.has_global and g2.has_global:
new_global = self.global_attr + g2.global_attr
gt = GraphTuple(nodes,edges,s,r,n_nodes, n_edges, global_attr = new_global)
gt._global_reps_for_edges = self._global_reps_for_edges
gt._global_reps_for_nodes = self._global_reps_for_nodes
else:
gt = GraphTuple(nodes, edges, s,r,n_nodes, n_edges)
return gt
def get_graph(self, graph_index):
"""
Returns a new graph with the same properties as the original graph.
gradients are not traced through this operation.
"""
assert(graph_index >=0 )
if graph_index > self.n_graphs:
raise ValueError("The provided index is larger than the available graphs in this GraphTuple object.")
get_start_stop_index = lambda sizes_list, index : np.cumsum([0,*sizes_list[0:index+1]])[-2:]
start_idx_nodes , end_idx_nodes = get_start_stop_index(self.n_nodes, graph_index)
start_idx_edges , end_idx_edges = get_start_stop_index(self.n_edges, graph_index)
nodes_attrs = self.nodes[start_idx_nodes:end_idx_nodes]
senders, receivers, edge_attr = [v[start_idx_edges:end_idx_edges] for v in [self.senders, self.receivers,self.edges]]
senders = senders-start_idx_nodes
receivers = receivers - start_idx_nodes
nodes = [Node(node_attr[tf.newaxis]) for node_attr in nodes_attrs]
edges = [Edge(edge_attr_tensor[tf.newaxis], nodes[node_from_idx], nodes[node_to_idx]) for edge_attr_tensor, node_from_idx, node_to_idx in zip(edge_attr, senders,receivers)]
if self.has_global:
global_attr = self.global_attr[graph_index]
else:
global_attr = None
return Graph(nodes, edges, global_attr = global_attr)
def to_tensor_dict(self):
return _graphtuple_to_tensor_dict(self)
def _graphtuple_to_tensor_dict(gt_):
"""
Transform a GT to a dictionary.
Used for employing the traceable graph_dict evaluation function.
"""
def _tf_constant_or_none(v):
if v is None:
return None
else:
return tf.constant(v)
return {'edges' : _tf_constant_or_none(gt_.edges),
'nodes' : _tf_constant_or_none(gt_.nodes),
'senders' : _tf_constant_or_none(gt_.senders),
'receivers' :_tf_constant_or_none(gt_.receivers),
'n_edges' : _tf_constant_or_none(gt_.n_edges),
'n_nodes' : _tf_constant_or_none(gt_.n_nodes),
'n_graphs' : _tf_constant_or_none(gt_.n_graphs),
'global_attr' : _tf_constant_or_none(gt_.global_attr),
'_global_reps_for_edges' : _tf_constant_or_none(gt_._global_reps_for_edges),
'_global_reps_for_nodes' : _tf_constant_or_none(gt_._global_reps_for_nodes)}
| en | 0.888632 | Classes for basic manipulation of GraphNet Copy semantics for different datatypes accepted. This affects what happens when copying nodes, edges and graphs. In order to trace gradients, and defines a consistent interface regardless of the input data-structure. # TODO: maybe have a flag to override this? Adding more ops does not always make sense. # Keep a reference to this edge since it is needed for aggregation afterwards. Creates a graph from a set of edges and nodes Checks if the graphs have the same values for node and edge attributes Checks if the connectivity of two graphs is the same. # validate that the edges are all # copy attributes of nodes and edges and re-create graph connectivity: # Instantiate the new edges: Node should belong to graph. Creates a new graph with coppied edge and node properties, defined from a sub-graph of the original graph. parameters: self (type = Graph): the graph we want a sub-graph from nodes: the nodes of the graph we want the subgraph of. mode: "+from+to" - keep an edge if there is a "from" node or a "to" node at that edge (and the corresponding node) "-from-to" - keep an edge if there is NOT a "from" node and NOT a "to" node at that edge (and the corresponding node) "+from" - keep an edge only if it has a "from" node that coincides with any of the nodes in the list (not implemented) "+to" - keep an edge only if it has a "to" node that coincides with any of the nodes in the list (not implemented) "-from" - keep an edge only if it DOESN't have a "from" node that concides with any of the nodes in the list (not implemented) This should only work with graphs that have compatible node and edge features Assumed also that the two graphs have the same connectivity (otherwise this will fail ugly) Takes in a list of graphs (with consistent sizes - not checked) and creates a graph tuple (input tensors + some book keeping) Because there is some initial functionality I don't want to throw away currently, that implements special treatment for nodes and edges coming from graphs with the same topology, it is currently required that the first dimension of nodes and edges for the list of graphs that are entered in this function is always 1 (this dimension is the batch dimension in the previous implementation.) # check the first dimension is 1 - instruct to split graphs if not. # TODO: Support splitting a list of same graphs with the first dimension of node and edge # features different than one and constructing a GraphTuple. Currently the first # dimension is required to be "1" (but squeezed later on!) # graph_id = [id_ for id_, dummy in enumerate(list_of_graphs)] # The 2nd dimension (dimension index 1) should be of size 1 (there is a test in the start of the constructor). # The same framework supports efficient computation on graphs of the same topology batched together where the first dimension # is the batched size. It is required that such graphs were provided for the construction (or at least the first dimension is "1"). # , graph_id) A graph tuple contains multiple graphs for faster batched computation. parameters: nodes : a `tf.Tensor` containing all the node attributes edges : a `tf.Tensor` containing all the edge attributes senders : a list of sender node indices defining the graph connectivity. The indices are unique accross graphs receivers : a list of receiver node indices defining the graph connectivity. The indices are unique accross graphs n_nodes : a list, a numpy array or a tf.Tensor containing how many nodes are in each graph represented by the nodes and edges in the object n_edges : a list,a numpy array or a tf.Tensor containing how many edges are in each graph represented by the nodes and edges in the object global_attr: (optional) a `tf.Tensor` or a `np.array` containing global attributes (first size - self.n_graphs) sort_receivers : (optional) whether to sort the edges on construction, allowing for not needing to sort the output of the node receiver aggregators. _global_reps_for_edges : (optional) used for the aggregation of the global var. _global_reps_for_nodes : (optional) used for the aggregation of the global var. n_graphs : (optional) # Sort edges according to receivers and sort receivers: # floats tensor # floats tensor # integers # integers # integers # integers # assuming the n_nodes is a list containing the number of nodes for each graph. # <- default global is "None". If it was provided, set the global variable (together with some aggregator indices for convenience and performance). Some flat vectors for segment sums when dealing with global variables. This is created even when there are no globals (one just needs the node and edge counts for each graph.) # <- used to cast the global tensor to a compatible size for the edges. # <- similarly for nodes: Returns a new graph with the same properties as the original graph. gradients are not traced through this operation. Transform a GT to a dictionary. Used for employing the traceable graph_dict evaluation function. | 2.880059 | 3 |
Spark/random_forests/src/unit.py | leonardoads/RecSys-cariris | 0 | 6619924 | <gh_stars>0
from pyspark.mllib.tree import RandomForest
from pyspark import SparkContext, SparkConf
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
import logging
import time, os, sys
import re
def union(line):
predic = line[0]
values = line[1]
#if(predic == u'1.0'):
return (str(values[0]),str(values[1]))
def saveCSV(line):
newValue = line[0]+","+line[1]
return newValue
# f.write(line[0]+","+line[1])
# f.close()
conf = (SparkConf()
.setMaster("local")
.setAppName("My app"))
sc = SparkContext(conf = conf)
predicitions = sc.textFile("/local/data/recsys/predicitions_real.dat/",10)
data2 = sc.textFile( '/local/data/recsysTales/all_samples/all_clicks_test_i.dat',10)
#data2 = sc.textFile( '/local/data/recsys/rel_test.dat/',9)
data2 = data2.map(lambda line:line.split(','))
#data2.saveAsTextFile('/local/data/recsys/rel_test.dat/',)
#lines = predicitions.count()
print data2.getNumPartitions()
print predicitions.getNumPartitions()
a = predicitions.zip(data2)
print a.take(3)
paraSalvar = a.map(union).filter(lambda line: line!=None)
paraSalvar = paraSalvar.map(saveCSV)
print paraSalvar.take(3)
paraSalvar.saveAsTextFile('/local/data/recsysTales/all_samples/output_real')
| from pyspark.mllib.tree import RandomForest
from pyspark import SparkContext, SparkConf
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
import logging
import time, os, sys
import re
def union(line):
predic = line[0]
values = line[1]
#if(predic == u'1.0'):
return (str(values[0]),str(values[1]))
def saveCSV(line):
newValue = line[0]+","+line[1]
return newValue
# f.write(line[0]+","+line[1])
# f.close()
conf = (SparkConf()
.setMaster("local")
.setAppName("My app"))
sc = SparkContext(conf = conf)
predicitions = sc.textFile("/local/data/recsys/predicitions_real.dat/",10)
data2 = sc.textFile( '/local/data/recsysTales/all_samples/all_clicks_test_i.dat',10)
#data2 = sc.textFile( '/local/data/recsys/rel_test.dat/',9)
data2 = data2.map(lambda line:line.split(','))
#data2.saveAsTextFile('/local/data/recsys/rel_test.dat/',)
#lines = predicitions.count()
print data2.getNumPartitions()
print predicitions.getNumPartitions()
a = predicitions.zip(data2)
print a.take(3)
paraSalvar = a.map(union).filter(lambda line: line!=None)
paraSalvar = paraSalvar.map(saveCSV)
print paraSalvar.take(3)
paraSalvar.saveAsTextFile('/local/data/recsysTales/all_samples/output_real') | en | 0.208547 | #if(predic == u'1.0'): # f.write(line[0]+","+line[1]) # f.close() #data2 = sc.textFile( '/local/data/recsys/rel_test.dat/',9) #data2.saveAsTextFile('/local/data/recsys/rel_test.dat/',) #lines = predicitions.count() | 2.325909 | 2 |
tests/test_ipgroups.py | stryng/python-cloudservers | 1 | 6619925 | <filename>tests/test_ipgroups.py
from cloudservers import IPGroup
from fakeserver import FakeServer
from utils import assert_isinstance
from nose.tools import assert_equal
cs = FakeServer()
def test_list_ipgroups():
ipl = cs.ipgroups.list()
cs.assert_called('GET', '/shared_ip_groups/detail')
[assert_isinstance(ipg, IPGroup) for ipg in ipl]
def test_get_ipgroup():
ipg = cs.ipgroups.get(1)
cs.assert_called('GET', '/shared_ip_groups/1')
assert_isinstance(ipg, IPGroup)
def test_create_ipgroup():
ipg = cs.ipgroups.create("My group", 1234)
cs.assert_called('POST', '/shared_ip_groups')
assert_isinstance(ipg, IPGroup)
def test_delete_ipgroup():
ipg = cs.ipgroups.get(1)
ipg.delete()
cs.assert_called('DELETE', '/shared_ip_groups/1')
cs.ipgroups.delete(ipg)
cs.assert_called('DELETE', '/shared_ip_groups/1')
cs.ipgroups.delete(1)
cs.assert_called('DELETE', '/shared_ip_groups/1')
def test_find():
ipg = cs.ipgroups.find(name='group1')
cs.assert_called('GET', '/shared_ip_groups/detail')
assert_equal(ipg.name, 'group1')
ipgl = cs.ipgroups.findall(id=1)
assert_equal(ipgl, [IPGroup(None, {'id': 1})]) | <filename>tests/test_ipgroups.py
from cloudservers import IPGroup
from fakeserver import FakeServer
from utils import assert_isinstance
from nose.tools import assert_equal
cs = FakeServer()
def test_list_ipgroups():
ipl = cs.ipgroups.list()
cs.assert_called('GET', '/shared_ip_groups/detail')
[assert_isinstance(ipg, IPGroup) for ipg in ipl]
def test_get_ipgroup():
ipg = cs.ipgroups.get(1)
cs.assert_called('GET', '/shared_ip_groups/1')
assert_isinstance(ipg, IPGroup)
def test_create_ipgroup():
ipg = cs.ipgroups.create("My group", 1234)
cs.assert_called('POST', '/shared_ip_groups')
assert_isinstance(ipg, IPGroup)
def test_delete_ipgroup():
ipg = cs.ipgroups.get(1)
ipg.delete()
cs.assert_called('DELETE', '/shared_ip_groups/1')
cs.ipgroups.delete(ipg)
cs.assert_called('DELETE', '/shared_ip_groups/1')
cs.ipgroups.delete(1)
cs.assert_called('DELETE', '/shared_ip_groups/1')
def test_find():
ipg = cs.ipgroups.find(name='group1')
cs.assert_called('GET', '/shared_ip_groups/detail')
assert_equal(ipg.name, 'group1')
ipgl = cs.ipgroups.findall(id=1)
assert_equal(ipgl, [IPGroup(None, {'id': 1})]) | none | 1 | 2.30459 | 2 | |
func_tests/pages/submissionlogpage/submission_log_locator.py | ICT4H/dcs-web | 1 | 6619926 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from framework.utils.common_utils import *
# By default every locator should be CSS
# Abbr:
# TB - Text Box
# CB - Check Box
# RB - Radio Button
# BTN - Button
# DD - Drop Down
# LINK - Links
# LABEL - Label
# TA - Text Area
# TR - Table Row
# variable to access locators
LOCATOR = "locator"
BY = "by"
SUBMISSION_LOG_TR = by_xpath("//div[@id='submission_logs']//../table/tbody/tr[2]")
SUBMISSION_LOG_TR_XPATH = "//div[@id='submission_logs']//../table/tbody/tr/td[contains(text(),\"%s\")]/../td"
SUBMISSION_LOG_FAILURE_MSG_XPATH = "/td[5]/span"
ACTIVE_TAB_LOCATOR = by_css("ul.secondary_tab li.active")
ACTION_SELECT_CSS_LOCATOR = by_css(".dataTables_wrapper .action")
DELETE_BUTTON = by_css(".delete")
EDIT_BUTTON = by_css(".edit")
CHECKALL_CB_CSS_LOCATOR = by_css(".checkall-checkbox")
SHOWN_RECORDS_COUNT_CSS_LOCATOR = by_css(".dataTables_info span:first-child")
TOTAL_RECORDS_COUNT = by_id("total_count")
XPATH_TO_CELL = "//div[@id='submission_logs']//../table/tbody/tr[%s]/td[%s]"
HEADER_CELL_CSS_LOCATOR = "#submission_logs table>thead>tr>th:nth-child(%s)"
HEADER_CELLS = "#submission_logs table>thead>tr>th"
SUCCESS_TAB_CSS_LOCATOR = by_css("#tabs ul li:nth-child(2) a:first-child")
ACTION_DROP_DOWN = by_css("button.action")
NONE_SELECTED_LOCATOR = by_id("none-selected")
ACTION_MENU = by_id("action_menu")
SUBMISSION_CB_LOCATOR = "table.submission_table tbody tr:nth-child(%s) td:first-child input"
SUBMISSION_DATE_FILTER = by_id("submissionDatePicker")
DS_AND_SUBJECT_FILTER_LOCATOR_BY_NAME = "//a[contains(text(),'%s')]"
DS_AND_SUBJECT_FILTER_LOCATOR_BY_ID = "//a/span[@class='small_grey'][contains(text(),'%s')]"
DATASENDER_FILTER_LOCATOR_BY_NAME = "//a[contains(text(),'%s')]"
DATASENDER_FILTER_LOCATOR_BY_ID = "//a/span[@class='small_grey'][contains(text(),'%s')]"
ALL_PERIODS_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='All Dates']")
CURRENT_MONTH_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Current month']")
LAST_MONTH_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Last Month']")
YEAR_TO_DATE_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Year to date']")
DAILY_DATE_RANGE_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Choose Date(s)']")
MONTHLY_DATE_RANGE_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Choose Month(s)']")
#BTN_DONE_ = by_xpath('//div[contains(@class, "ui-daterangepickercontain")]//button[contains(@class, "btnDone")]')
BTN_DONE_ = by_css('button.btnDone')
EMPTY_TABLE_MSG_ROW = by_css('td.dataTables_empty') | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from framework.utils.common_utils import *
# By default every locator should be CSS
# Abbr:
# TB - Text Box
# CB - Check Box
# RB - Radio Button
# BTN - Button
# DD - Drop Down
# LINK - Links
# LABEL - Label
# TA - Text Area
# TR - Table Row
# variable to access locators
LOCATOR = "locator"
BY = "by"
SUBMISSION_LOG_TR = by_xpath("//div[@id='submission_logs']//../table/tbody/tr[2]")
SUBMISSION_LOG_TR_XPATH = "//div[@id='submission_logs']//../table/tbody/tr/td[contains(text(),\"%s\")]/../td"
SUBMISSION_LOG_FAILURE_MSG_XPATH = "/td[5]/span"
ACTIVE_TAB_LOCATOR = by_css("ul.secondary_tab li.active")
ACTION_SELECT_CSS_LOCATOR = by_css(".dataTables_wrapper .action")
DELETE_BUTTON = by_css(".delete")
EDIT_BUTTON = by_css(".edit")
CHECKALL_CB_CSS_LOCATOR = by_css(".checkall-checkbox")
SHOWN_RECORDS_COUNT_CSS_LOCATOR = by_css(".dataTables_info span:first-child")
TOTAL_RECORDS_COUNT = by_id("total_count")
XPATH_TO_CELL = "//div[@id='submission_logs']//../table/tbody/tr[%s]/td[%s]"
HEADER_CELL_CSS_LOCATOR = "#submission_logs table>thead>tr>th:nth-child(%s)"
HEADER_CELLS = "#submission_logs table>thead>tr>th"
SUCCESS_TAB_CSS_LOCATOR = by_css("#tabs ul li:nth-child(2) a:first-child")
ACTION_DROP_DOWN = by_css("button.action")
NONE_SELECTED_LOCATOR = by_id("none-selected")
ACTION_MENU = by_id("action_menu")
SUBMISSION_CB_LOCATOR = "table.submission_table tbody tr:nth-child(%s) td:first-child input"
SUBMISSION_DATE_FILTER = by_id("submissionDatePicker")
DS_AND_SUBJECT_FILTER_LOCATOR_BY_NAME = "//a[contains(text(),'%s')]"
DS_AND_SUBJECT_FILTER_LOCATOR_BY_ID = "//a/span[@class='small_grey'][contains(text(),'%s')]"
DATASENDER_FILTER_LOCATOR_BY_NAME = "//a[contains(text(),'%s')]"
DATASENDER_FILTER_LOCATOR_BY_ID = "//a/span[@class='small_grey'][contains(text(),'%s')]"
ALL_PERIODS_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='All Dates']")
CURRENT_MONTH_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Current month']")
LAST_MONTH_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Last Month']")
YEAR_TO_DATE_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Year to date']")
DAILY_DATE_RANGE_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Choose Date(s)']")
MONTHLY_DATE_RANGE_LABEL = by_xpath("//div[contains(@class,'ui-daterangepicker') and contains(@style,'block')]/ul/li/a[text()='Choose Month(s)']")
#BTN_DONE_ = by_xpath('//div[contains(@class, "ui-daterangepickercontain")]//button[contains(@class, "btnDone")]')
BTN_DONE_ = by_css('button.btnDone')
EMPTY_TABLE_MSG_ROW = by_css('td.dataTables_empty') | en | 0.429044 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 # By default every locator should be CSS # Abbr: # TB - Text Box # CB - Check Box # RB - Radio Button # BTN - Button # DD - Drop Down # LINK - Links # LABEL - Label # TA - Text Area # TR - Table Row # variable to access locators #BTN_DONE_ = by_xpath('//div[contains(@class, "ui-daterangepickercontain")]//button[contains(@class, "btnDone")]') | 1.940099 | 2 |
CODataViewer/main.py | adamdadd/DatSci | 0 | 6619927 | #! python
import kivy | #! python
import kivy | none | 1 | 1.025212 | 1 | |
tests/core/diag/test_config_diff.py | rhamlin/sperf | 12 | 6619928 | <filename>tests/core/diag/test_config_diff.py<gh_stars>10-100
# Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""validate the config_diff module"""
import unittest
from collections import OrderedDict
from pysper.core.diag import config_diff
class TestConfigDiff(unittest.TestCase):
"""test config diff"""
def test_group_configurations(self):
"""all grouped"""
node_config = OrderedDict(
[
("node1", OrderedDict([("abc", "xyz")])),
("node2", OrderedDict([("abc", "xyz")])),
]
)
groups = config_diff.group_configurations(node_config)
self.assertEqual(len(groups), 1)
first_group = groups[0]
self.assertEqual(first_group["nodes_list"], ["node1", "node2"])
self.assertEqual(first_group["nodes"], 2)
self.assertEqual(first_group["abc"], "xyz")
def test_group_configurations_with_diffs(self):
"""one of each"""
node_config = OrderedDict(
[
("node1", OrderedDict([("abc", "zzz")])),
("node2", OrderedDict([("abc", "xyz")])),
]
)
groups = config_diff.group_configurations(node_config)
self.assertEqual(len(groups), 2)
first_group = groups[0]
self.assertEqual(first_group["nodes_list"], ["node1"])
self.assertEqual(first_group["nodes"], 1)
self.assertEqual(first_group["abc"], "zzz")
second_group = groups[1]
self.assertEqual(second_group["nodes_list"], ["node2"])
self.assertEqual(second_group["nodes"], 1)
self.assertEqual(second_group["abc"], "xyz")
def test_ignore_known_problem_configuration_fields(self):
"""garballed output sper60. this was a known issue but is too annoying to ignore.
to handle this we're going to skip any of the parameters that provide instances
"""
conf_string = (
"[aggregated_request_timeout_in_ms=120000; "
+ "audit_logging_options=com.datastax.bdp.db.audit.AuditLoggingOptions@31j0fa0; "
+ "cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@31801f; "
+ "system_info_encryption=org.apache.cassandra.config.SystemTableEncryptionOptions@590c73d3; "
+ "transparent_data_encryption_options="
+ "org.apache.cassandra.config.TransparentDataEncryptionOptions@311d3a138a; "
+ "user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; "
+ "user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000]"
)
node_config_params = [x.split("=") for x in conf_string.split("; ")]
node_config = {x[0]: x[1] for x in node_config_params}
conf = config_diff.filter_unique_config_flags(node_config)
self.assertIsNotNone(conf.get("user_defined_function_fail_timeout", "1500"))
self.assertNotIn("system_info_encryption", conf)
self.assertNotIn("data_file_directories", conf)
self.assertNotIn("audit_logging_options", conf)
self.assertNotIn("transparent_data_encryption_options", conf)
| <filename>tests/core/diag/test_config_diff.py<gh_stars>10-100
# Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""validate the config_diff module"""
import unittest
from collections import OrderedDict
from pysper.core.diag import config_diff
class TestConfigDiff(unittest.TestCase):
"""test config diff"""
def test_group_configurations(self):
"""all grouped"""
node_config = OrderedDict(
[
("node1", OrderedDict([("abc", "xyz")])),
("node2", OrderedDict([("abc", "xyz")])),
]
)
groups = config_diff.group_configurations(node_config)
self.assertEqual(len(groups), 1)
first_group = groups[0]
self.assertEqual(first_group["nodes_list"], ["node1", "node2"])
self.assertEqual(first_group["nodes"], 2)
self.assertEqual(first_group["abc"], "xyz")
def test_group_configurations_with_diffs(self):
"""one of each"""
node_config = OrderedDict(
[
("node1", OrderedDict([("abc", "zzz")])),
("node2", OrderedDict([("abc", "xyz")])),
]
)
groups = config_diff.group_configurations(node_config)
self.assertEqual(len(groups), 2)
first_group = groups[0]
self.assertEqual(first_group["nodes_list"], ["node1"])
self.assertEqual(first_group["nodes"], 1)
self.assertEqual(first_group["abc"], "zzz")
second_group = groups[1]
self.assertEqual(second_group["nodes_list"], ["node2"])
self.assertEqual(second_group["nodes"], 1)
self.assertEqual(second_group["abc"], "xyz")
def test_ignore_known_problem_configuration_fields(self):
"""garballed output sper60. this was a known issue but is too annoying to ignore.
to handle this we're going to skip any of the parameters that provide instances
"""
conf_string = (
"[aggregated_request_timeout_in_ms=120000; "
+ "audit_logging_options=com.datastax.bdp.db.audit.AuditLoggingOptions@31j0fa0; "
+ "cross_node_timeout=false; data_file_directories=[Ljava.lang.String;@31801f; "
+ "system_info_encryption=org.apache.cassandra.config.SystemTableEncryptionOptions@590c73d3; "
+ "transparent_data_encryption_options="
+ "org.apache.cassandra.config.TransparentDataEncryptionOptions@311d3a138a; "
+ "user_defined_function_fail_timeout=1500; user_defined_function_warn_timeout=500; "
+ "user_function_timeout_policy=die; windows_timer_interval=1; write_request_timeout_in_ms=2000]"
)
node_config_params = [x.split("=") for x in conf_string.split("; ")]
node_config = {x[0]: x[1] for x in node_config_params}
conf = config_diff.filter_unique_config_flags(node_config)
self.assertIsNotNone(conf.get("user_defined_function_fail_timeout", "1500"))
self.assertNotIn("system_info_encryption", conf)
self.assertNotIn("data_file_directories", conf)
self.assertNotIn("audit_logging_options", conf)
self.assertNotIn("transparent_data_encryption_options", conf)
| en | 0.888606 | # Copyright 2020 DataStax, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. validate the config_diff module test config diff all grouped one of each garballed output sper60. this was a known issue but is too annoying to ignore. to handle this we're going to skip any of the parameters that provide instances | 2.405906 | 2 |
gemini/order_book.py | GeekDougle/gemini-python | 77 | 6619929 | <reponame>GeekDougle/gemini-python
# order_book.py
# <NAME>
#
# A python wrapper for Gemini which keeps an updated order book
import time
from .basewebsocket import BaseWebSocket
from .debugly import typeassert
class GeminiOrderBook(BaseWebSocket):
"""
Market data is a public API that streams all the market data on a
given symbol.
"""
@typeassert(product_id=str, sandbox=bool)
def __init__(self, product_id, sandbox=False):
if sandbox:
super().__init__(base_url='wss://api.sandbox.gemini.com/v1/marketdata/{}'
.format(product_id))
else:
super().__init__(base_url='wss://api.gemini.com/v1/marketdata/{}'
.format(product_id))
self.product_id = product_id
self.asks = {}
self.bids = {}
def on_message(self, msg):
if msg['socket_sequence'] >= 1:
for event in msg['events']:
if event['type'] == 'change':
price = float(event['price'])
remaining = float(event['remaining'])
if(event['side'] == 'ask'):
if(remaining == 0.0 and price in self.asks):
self.asks.pop(price)
elif(remaining != 0.0):
self.asks[price] = remaining
elif(event['side'] == 'bid'):
if(remaining == 0.0 and price in self.bids):
self.bids.pop(price)
elif(remaining != 0.0):
self.bids[price] = remaining
def get_ask(self):
return min(self.asks.keys())
def get_bid(self):
return max(self.bids.keys())
def get_market_book(self):
result = {
'asks': self.asks,
'bids': self.bids
}
return result
def reset_market_book(self):
self.asks, self.bids = {}, {}
print('Market book reset to empty')
| # order_book.py
# <NAME>
#
# A python wrapper for Gemini which keeps an updated order book
import time
from .basewebsocket import BaseWebSocket
from .debugly import typeassert
class GeminiOrderBook(BaseWebSocket):
"""
Market data is a public API that streams all the market data on a
given symbol.
"""
@typeassert(product_id=str, sandbox=bool)
def __init__(self, product_id, sandbox=False):
if sandbox:
super().__init__(base_url='wss://api.sandbox.gemini.com/v1/marketdata/{}'
.format(product_id))
else:
super().__init__(base_url='wss://api.gemini.com/v1/marketdata/{}'
.format(product_id))
self.product_id = product_id
self.asks = {}
self.bids = {}
def on_message(self, msg):
if msg['socket_sequence'] >= 1:
for event in msg['events']:
if event['type'] == 'change':
price = float(event['price'])
remaining = float(event['remaining'])
if(event['side'] == 'ask'):
if(remaining == 0.0 and price in self.asks):
self.asks.pop(price)
elif(remaining != 0.0):
self.asks[price] = remaining
elif(event['side'] == 'bid'):
if(remaining == 0.0 and price in self.bids):
self.bids.pop(price)
elif(remaining != 0.0):
self.bids[price] = remaining
def get_ask(self):
return min(self.asks.keys())
def get_bid(self):
return max(self.bids.keys())
def get_market_book(self):
result = {
'asks': self.asks,
'bids': self.bids
}
return result
def reset_market_book(self):
self.asks, self.bids = {}, {}
print('Market book reset to empty') | en | 0.789619 | # order_book.py # <NAME> # # A python wrapper for Gemini which keeps an updated order book Market data is a public API that streams all the market data on a given symbol. | 2.36085 | 2 |
src/datamgr/metadata/metadata/service/access/token.py | Chromico/bk-base | 84 | 6619930 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import base64
from Crypto import Random
from Crypto.Cipher import AES
from metadata.runtime import rt_g
class TokenManager(object):
public_key = ''
def __init__(self, private_key=''):
normal_conf = rt_g.config_collection.normal_config
self.token_dict = normal_conf.TOKEN_DICT
self.filling_chr = normal_conf.TOKEN_FILLING_CHR
self.public_key = normal_conf.TOKEN_PUBLIC_KEY
self.private_key = self.private_key_format(private_key)
self.is_legal = self.check_key_illegal()
def private_key_format(self, pk):
"""
格式化private_key为标准AES格式<#pk###...>右侧#补满16位
:param pk: 私钥
:return: 格式化之后的标准16位AES私钥
"""
pk = ("{:" + self.filling_chr + "<16}").format(self.filling_chr + pk)
return pk[:16]
def check_key_illegal(self):
"""
检查private_key是否合法注册
:return: boolean 合法与否
"""
if not self.private_key or self.private_key.strip(self.filling_chr) not in self.token_dict:
return False
return True
def generate_token(self):
if not self.is_legal:
raise
secret_bin = AES.new(self.private_key, AES.MODE_CBC, self.public_key).encrypt(self.private_key)
return base64.urlsafe_b64encode(secret_bin).decode('utf-8').strip('=')
def check_shunt(self, custom_token):
"""
根据token判断用户请求是否分配到主集群
:param custom_token: 用户上传的token信息
:return: True - 分流到备集群 / False - 不分流
"""
if not self.is_legal:
return False
fill_cnt = 4 - (len(custom_token) % 4)
custom_token = "{}{}".format(custom_token, '=' * fill_cnt)
secret_bin = base64.urlsafe_b64decode(str(custom_token))
msg = AES.new(self.private_key, AES.MODE_CBC, self.public_key).decrypt(secret_bin)
# token检查通过则返回False,表示不分流,继续访问主集群
return False if msg == self.private_key else True
class BaseCrypt(object):
_bk_crypt = False
def __init__(self):
db_conf = rt_g.config_collection.db_config
self.INSTANCE_KEY = db_conf.CRYPT_INSTANCE_KEY
self.ROOT_KEY = db_conf.CRYPT_ROOT_KEY
self.ROOT_IV = db_conf.CRYPT_ROOT_IV
def generate_instance_key(self):
"""
生成秘钥,加密解密时需要传入
:return:
"""
random = Random.new().read(AES.block_size)
return base64.b64encode(self.__encrypt(random))
def set_instance_key(self, instance_key):
self.INSTANCE_KEY = instance_key
def encrypt(self, plaintext):
"""
加密
:param plaintext: 需要加密的内容
:return:
"""
decrypt_key = self.__parse_key()
secret_txt = AES.new(decrypt_key, AES.MODE_CFB, self.ROOT_IV).encrypt(plaintext)
base64_txt = base64.b64encode(secret_txt)
return base64_txt
def decrypt(self, ciphertext):
"""
解密
:param ciphertext: 需要解密的内容
:return:
"""
decrypt_key = self.__parse_key()
# 先解base64
secret_txt = base64.decodebytes(ciphertext)
# 再解对称加密
plain = AES.new(decrypt_key, AES.MODE_CFB, self.ROOT_IV).decrypt(secret_txt)
return plain
def __encrypt(self, plaintext):
"""
根据私钥加密,内部方法,请勿调用
:param plaintext: 需要加密的内容
:return:
"""
return AES.new(self.ROOT_KEY, AES.MODE_CFB, self.ROOT_IV).encrypt(plaintext)
def __decrypt(self, ciphertext):
"""
根据私钥解密,内部方法,请勿调用
:param ciphertext: 需要加密的内容
:return:
"""
return AES.new(self.ROOT_KEY, AES.MODE_CFB, self.ROOT_IV).decrypt(ciphertext)
def __parse_key(self):
decode_key = base64.decodebytes(self.INSTANCE_KEY)
return self.__decrypt(decode_key)
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import base64
from Crypto import Random
from Crypto.Cipher import AES
from metadata.runtime import rt_g
class TokenManager(object):
public_key = ''
def __init__(self, private_key=''):
normal_conf = rt_g.config_collection.normal_config
self.token_dict = normal_conf.TOKEN_DICT
self.filling_chr = normal_conf.TOKEN_FILLING_CHR
self.public_key = normal_conf.TOKEN_PUBLIC_KEY
self.private_key = self.private_key_format(private_key)
self.is_legal = self.check_key_illegal()
def private_key_format(self, pk):
"""
格式化private_key为标准AES格式<#pk###...>右侧#补满16位
:param pk: 私钥
:return: 格式化之后的标准16位AES私钥
"""
pk = ("{:" + self.filling_chr + "<16}").format(self.filling_chr + pk)
return pk[:16]
def check_key_illegal(self):
"""
检查private_key是否合法注册
:return: boolean 合法与否
"""
if not self.private_key or self.private_key.strip(self.filling_chr) not in self.token_dict:
return False
return True
def generate_token(self):
if not self.is_legal:
raise
secret_bin = AES.new(self.private_key, AES.MODE_CBC, self.public_key).encrypt(self.private_key)
return base64.urlsafe_b64encode(secret_bin).decode('utf-8').strip('=')
def check_shunt(self, custom_token):
"""
根据token判断用户请求是否分配到主集群
:param custom_token: 用户上传的token信息
:return: True - 分流到备集群 / False - 不分流
"""
if not self.is_legal:
return False
fill_cnt = 4 - (len(custom_token) % 4)
custom_token = "{}{}".format(custom_token, '=' * fill_cnt)
secret_bin = base64.urlsafe_b64decode(str(custom_token))
msg = AES.new(self.private_key, AES.MODE_CBC, self.public_key).decrypt(secret_bin)
# token检查通过则返回False,表示不分流,继续访问主集群
return False if msg == self.private_key else True
class BaseCrypt(object):
_bk_crypt = False
def __init__(self):
db_conf = rt_g.config_collection.db_config
self.INSTANCE_KEY = db_conf.CRYPT_INSTANCE_KEY
self.ROOT_KEY = db_conf.CRYPT_ROOT_KEY
self.ROOT_IV = db_conf.CRYPT_ROOT_IV
def generate_instance_key(self):
"""
生成秘钥,加密解密时需要传入
:return:
"""
random = Random.new().read(AES.block_size)
return base64.b64encode(self.__encrypt(random))
def set_instance_key(self, instance_key):
self.INSTANCE_KEY = instance_key
def encrypt(self, plaintext):
"""
加密
:param plaintext: 需要加密的内容
:return:
"""
decrypt_key = self.__parse_key()
secret_txt = AES.new(decrypt_key, AES.MODE_CFB, self.ROOT_IV).encrypt(plaintext)
base64_txt = base64.b64encode(secret_txt)
return base64_txt
def decrypt(self, ciphertext):
"""
解密
:param ciphertext: 需要解密的内容
:return:
"""
decrypt_key = self.__parse_key()
# 先解base64
secret_txt = base64.decodebytes(ciphertext)
# 再解对称加密
plain = AES.new(decrypt_key, AES.MODE_CFB, self.ROOT_IV).decrypt(secret_txt)
return plain
def __encrypt(self, plaintext):
"""
根据私钥加密,内部方法,请勿调用
:param plaintext: 需要加密的内容
:return:
"""
return AES.new(self.ROOT_KEY, AES.MODE_CFB, self.ROOT_IV).encrypt(plaintext)
def __decrypt(self, ciphertext):
"""
根据私钥解密,内部方法,请勿调用
:param ciphertext: 需要加密的内容
:return:
"""
return AES.new(self.ROOT_KEY, AES.MODE_CFB, self.ROOT_IV).decrypt(ciphertext)
def __parse_key(self):
decode_key = base64.decodebytes(self.INSTANCE_KEY)
return self.__decrypt(decode_key)
| en | 0.48101 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 格式化private_key为标准AES格式<#pk###...>右侧#补满16位 :param pk: 私钥 :return: 格式化之后的标准16位AES私钥 检查private_key是否合法注册 :return: boolean 合法与否 根据token判断用户请求是否分配到主集群 :param custom_token: 用户上传的token信息 :return: True - 分流到备集群 / False - 不分流 # token检查通过则返回False,表示不分流,继续访问主集群 生成秘钥,加密解密时需要传入 :return: 加密 :param plaintext: 需要加密的内容 :return: 解密 :param ciphertext: 需要解密的内容 :return: # 先解base64 # 再解对称加密 根据私钥加密,内部方法,请勿调用 :param plaintext: 需要加密的内容 :return: 根据私钥解密,内部方法,请勿调用 :param ciphertext: 需要加密的内容 :return: | 1.538787 | 2 |
examples/compare_linear.py | ycsun2017/simple_transfer | 0 | 6619931 | #!/usr/bin/env python
# Python imports.
from pickle import TRUE
import sys
import os
from numpy.lib.npyio import load
from torch import rand
# Other imports.
import srl_example_setup
from simple_rl.agents import QLearningAgent, RandomAgent, LinearQLearningAgent, DeepQLearningAgent
from simple_rl.tasks import EncodeGridWorldMDP, ContGridWorldMDP
from simple_rl.run_experiments import run_agents_on_mdp
from simple_rl.abstraction import AbstractionWrapper
import random
import torch
def source(n=4):
mdp = EncodeGridWorldMDP(width=n, height=n, init_loc=(1, 1), goal_locs=[(n, n)], step_cost=0.1)
ql_agent = LinearQLearningAgent(actions=mdp.get_actions(), state_dim=n**2, random=False,
load=False, save=False, learn=True, learn_dynamics=False, load_dynamics=False, name="source")
rand_agent = RandomAgent(actions=mdp.get_actions())
# Run experiment and make plot.
run_agents_on_mdp([ql_agent, rand_agent], mdp, instances=5, cumulative_plot=False,
episodes=200, steps=40, reset_at_terminal=True, verbose=False, open_plot=False)
def target(n=4):
mdp = ContGridWorldMDP(width=n, height=n, init_loc=(1, 1), goal_locs=[(n, n)],
step_cost=0.1)
ql_agent = LinearQLearningAgent(actions=mdp.get_actions(), state_dim=2, random=False,
load=False, save=False, learn=True, learn_dynamics=False, load_dynamics=False)
deep_agent = DeepQLearningAgent(actions=mdp.get_actions(), state_dim=2, encode_size=n**2,
save=False, learn_type="single", name="deep", lr=1e-4)
# Run experiment and make plot.
run_agents_on_mdp([deep_agent], mdp, instances=5, cumulative_plot=False,
episodes=200, steps=40, reset_at_terminal=True, verbose=False, open_plot=False)
def compare(n = 4, open_plot=True):
# Setup MDP, Agents.
mdp = ContGridWorldMDP(width=n, height=n, init_loc=(1, 1), goal_locs=[(n, n)],
step_cost=0.1)
agents = []
xs = list(range(50, 2000, 100)) + list(range(2000, 10000, 1000))
for ck in xs:
agent = LinearQLearningAgent(actions=mdp.get_actions(), state_dim=2, encode_size=n**2,
save_dir="../models/linear_4-4/", name="Transfer-Q_ck{}".format(ck),
learn=True, load_dynamics=True, load_encoder=True,
checkpoint="checkpoints/encoder_{}.pkl".format(ck))
agents.append(agent)
run_agents_on_mdp(
agents,
mdp, instances=5, cumulative_plot=False, open_plot=False,
episodes=200, steps=40, reset_at_terminal=True, verbose=False)
if __name__ == "__main__":
# compare()
# source()
target() | #!/usr/bin/env python
# Python imports.
from pickle import TRUE
import sys
import os
from numpy.lib.npyio import load
from torch import rand
# Other imports.
import srl_example_setup
from simple_rl.agents import QLearningAgent, RandomAgent, LinearQLearningAgent, DeepQLearningAgent
from simple_rl.tasks import EncodeGridWorldMDP, ContGridWorldMDP
from simple_rl.run_experiments import run_agents_on_mdp
from simple_rl.abstraction import AbstractionWrapper
import random
import torch
def source(n=4):
mdp = EncodeGridWorldMDP(width=n, height=n, init_loc=(1, 1), goal_locs=[(n, n)], step_cost=0.1)
ql_agent = LinearQLearningAgent(actions=mdp.get_actions(), state_dim=n**2, random=False,
load=False, save=False, learn=True, learn_dynamics=False, load_dynamics=False, name="source")
rand_agent = RandomAgent(actions=mdp.get_actions())
# Run experiment and make plot.
run_agents_on_mdp([ql_agent, rand_agent], mdp, instances=5, cumulative_plot=False,
episodes=200, steps=40, reset_at_terminal=True, verbose=False, open_plot=False)
def target(n=4):
mdp = ContGridWorldMDP(width=n, height=n, init_loc=(1, 1), goal_locs=[(n, n)],
step_cost=0.1)
ql_agent = LinearQLearningAgent(actions=mdp.get_actions(), state_dim=2, random=False,
load=False, save=False, learn=True, learn_dynamics=False, load_dynamics=False)
deep_agent = DeepQLearningAgent(actions=mdp.get_actions(), state_dim=2, encode_size=n**2,
save=False, learn_type="single", name="deep", lr=1e-4)
# Run experiment and make plot.
run_agents_on_mdp([deep_agent], mdp, instances=5, cumulative_plot=False,
episodes=200, steps=40, reset_at_terminal=True, verbose=False, open_plot=False)
def compare(n = 4, open_plot=True):
# Setup MDP, Agents.
mdp = ContGridWorldMDP(width=n, height=n, init_loc=(1, 1), goal_locs=[(n, n)],
step_cost=0.1)
agents = []
xs = list(range(50, 2000, 100)) + list(range(2000, 10000, 1000))
for ck in xs:
agent = LinearQLearningAgent(actions=mdp.get_actions(), state_dim=2, encode_size=n**2,
save_dir="../models/linear_4-4/", name="Transfer-Q_ck{}".format(ck),
learn=True, load_dynamics=True, load_encoder=True,
checkpoint="checkpoints/encoder_{}.pkl".format(ck))
agents.append(agent)
run_agents_on_mdp(
agents,
mdp, instances=5, cumulative_plot=False, open_plot=False,
episodes=200, steps=40, reset_at_terminal=True, verbose=False)
if __name__ == "__main__":
# compare()
# source()
target() | en | 0.626523 | #!/usr/bin/env python # Python imports. # Other imports. # Run experiment and make plot. # Run experiment and make plot. # Setup MDP, Agents. # compare() # source() | 2.180717 | 2 |
tool/get_ferq.py | KEVINYZY/python-tutorial | 0 | 6619932 | # -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Data: 17/9/13
# Brief:
import sys
content = set()
with open("./ad_count.txt", encoding="utf-8") as f:
for line in f:
content.add(line.strip())
stop_words = ['.', '-', ' ', '%']
with open("./ad__count2.txt", mode="w", encoding="utf-8") as f:
for line in content:
parts = line.strip().split()
try:
name = parts[0]
count = parts[1]
for i in stop_words:
name = name.replace(i, "")
if not name.isnumeric():
if int(count) > 34548 and len(name) > 1:
f.write("\t".join([parts[0], count]))
f.write("\n")
except Exception:
pass
| # -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Data: 17/9/13
# Brief:
import sys
content = set()
with open("./ad_count.txt", encoding="utf-8") as f:
for line in f:
content.add(line.strip())
stop_words = ['.', '-', ' ', '%']
with open("./ad__count2.txt", mode="w", encoding="utf-8") as f:
for line in content:
parts = line.strip().split()
try:
name = parts[0]
count = parts[1]
for i in stop_words:
name = name.replace(i, "")
if not name.isnumeric():
if int(count) > 34548 and len(name) > 1:
f.write("\t".join([parts[0], count]))
f.write("\n")
except Exception:
pass
| en | 0.580388 | # -*- coding: utf-8 -*- # Author: XuMing <<EMAIL>> # Data: 17/9/13 # Brief: | 2.898242 | 3 |
plot_signal_aug.py | matteoscrugli/ecg-classification-quantized-cnn | 0 | 6619933 | import wfdb
import os
import argparse
import shutil
import math
import random
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f','--files', dest='files', required=True, nargs='*', help="ecg recordings file name")
parser.add_argument('-p','--position', dest='position', nargs='*', type=int, help="position (multimple input works only for one registration at a time)")
parser.add_argument('-s','--size', dest='size', default=1000, type=int, help="frame half size")
parser.add_argument('-a','--augmentation', dest='augmentation', nargs=2, type=int, default=[0,1], help='augmentation, number of lateral shifts and pitch (two arguments)')
parser.add_argument('-c','--colprop', dest='colprop', default=1, help="columns proportion")
parser.add_argument('-r','--rowprop', dest='rowprop', default=4, help="row proportion")
args = parser.parse_args()
files = args.files
labels = ['N', 'L', 'R', 'e', 'j', 'A', 'a', 'J', 'S', 'V', 'E', 'F', '/', 'f', 'Q']
sig = []
lab = []
pos = []
lab_filtered = []
pos_filtered = []
sig_filtered = []
th = []
for i, file in enumerate(files):
f_th = open('./output/threshold/default/'+file+'_th.txt', 'r')
th.append(int(f_th.read()))
lab_filtered.append([])
pos_filtered.append([])
r = wfdb.rdrecord('./dataset/raw/'+file)
ann = wfdb.rdann('./dataset/raw/'+file, 'atr', return_label_elements=['label_store', 'symbol'])
sig.append(np.array(r.p_signal[:,0]))
# intsig = np.array(r.p_signal[:,0])
# sig_len = len(sig)
lab.append(ann.symbol)
pos.append(ann.sample)
for l, p in zip(lab[i], pos[i]):
if l in labels:
lab_filtered[i].append(l)
pos_filtered[i].append(p)
sig_filtered.append([])
f_sig_filtered = open('./output/dataset/raw_text/'+file+'_filtered.txt', 'r')
Lines = f_sig_filtered.readlines()
for line in Lines:
sig_filtered[i].append(int(line.strip()))
f_sig_filtered.close()
# os.remove('./output/dataset/raw_text/'+d+'_filtered.txt')
f_filter_delay = open('./output/dataset/raw_text/filter_delay.txt', 'r')
filter_delay = int(f_filter_delay.read())
subplot_col_prop = float(args.colprop)
subplot_row_prop = float(args.rowprop)
position = []
if args.position != None:
position.extend(args.position)
if args.size != None:
size = args.size
num_files = len(files)
num_plot = len(position) if len(files) == 1 else len(files)
num_plot = num_plot if num_plot else 1
subplot_col = round(math.sqrt(num_plot/(subplot_col_prop*subplot_row_prop)) * subplot_col_prop)
if subplot_col < 1:
subplot_col = 1
subplot_row = math.ceil(num_plot/subplot_col)
if subplot_col:
if subplot_col > num_plot:
subplot_col = num_plot
else:
subplot_col = 1
while (subplot_col*subplot_row - subplot_row) >= num_plot:
subplot_col -= 1
augmentation = args.augmentation
fig = plt.figure()
# fig.suptitle(f"ECG raw & filtered signal")
for i, P in enumerate(position):
ax1 = fig.add_subplot(subplot_row,subplot_col,i+1)
color = 'black'
ax1.plot(range(P-size,P+size),np.array(sig[0][P-size:P+size]), color=color)
# ax1.set(title=f'File: {file}')
# ax1.set(title=f'Augmentation')
ax1.set_ylabel("Raw signal") #, color=color
ax1.set_xlabel("Sample") #, color=color
ax1.grid()
for j in range(-augmentation[0]*augmentation[1],augmentation[0]*augmentation[1]+1,augmentation[1]):
color = (random.random(), random.random(), random.random())
linewidth = 2.5
# alpha = 1
alpha = 1-((abs(j)/(augmentation[0]*augmentation[1]))*0.9)
# alpha = 1-((j-(-augmentation[0]*augmentation[1]))/(2*augmentation[0]*augmentation[1])*0.9)
ax1.plot(range(P-99+j,P+99+j), [min(sig[0][P-size:P+size]) - (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1]*(99*2), linewidth=linewidth, color=color, alpha=alpha)
ax1.plot(range(P-99+j,P+99+j), [max(sig[0][P-size:P+size]) + (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1]*(99*2), linewidth=linewidth, color=color, alpha=alpha)
ax1.plot([P-0.0000001-99+j,P+0.0000001-99+j], [min(sig[0][P-size:P+size]) - (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1 , max(sig[0][P-size:P+size]) + (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1], linewidth=linewidth, color=color, alpha=alpha)
ax1.plot([P-0.0000001+99+j,P+0.0000001+99+j], [min(sig[0][P-size:P+size]) - (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1 , max(sig[0][P-size:P+size]) + (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1], linewidth=linewidth, color=color, alpha=alpha)
fig.tight_layout()
plt.show()
| import wfdb
import os
import argparse
import shutil
import math
import random
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f','--files', dest='files', required=True, nargs='*', help="ecg recordings file name")
parser.add_argument('-p','--position', dest='position', nargs='*', type=int, help="position (multimple input works only for one registration at a time)")
parser.add_argument('-s','--size', dest='size', default=1000, type=int, help="frame half size")
parser.add_argument('-a','--augmentation', dest='augmentation', nargs=2, type=int, default=[0,1], help='augmentation, number of lateral shifts and pitch (two arguments)')
parser.add_argument('-c','--colprop', dest='colprop', default=1, help="columns proportion")
parser.add_argument('-r','--rowprop', dest='rowprop', default=4, help="row proportion")
args = parser.parse_args()
files = args.files
labels = ['N', 'L', 'R', 'e', 'j', 'A', 'a', 'J', 'S', 'V', 'E', 'F', '/', 'f', 'Q']
sig = []
lab = []
pos = []
lab_filtered = []
pos_filtered = []
sig_filtered = []
th = []
for i, file in enumerate(files):
f_th = open('./output/threshold/default/'+file+'_th.txt', 'r')
th.append(int(f_th.read()))
lab_filtered.append([])
pos_filtered.append([])
r = wfdb.rdrecord('./dataset/raw/'+file)
ann = wfdb.rdann('./dataset/raw/'+file, 'atr', return_label_elements=['label_store', 'symbol'])
sig.append(np.array(r.p_signal[:,0]))
# intsig = np.array(r.p_signal[:,0])
# sig_len = len(sig)
lab.append(ann.symbol)
pos.append(ann.sample)
for l, p in zip(lab[i], pos[i]):
if l in labels:
lab_filtered[i].append(l)
pos_filtered[i].append(p)
sig_filtered.append([])
f_sig_filtered = open('./output/dataset/raw_text/'+file+'_filtered.txt', 'r')
Lines = f_sig_filtered.readlines()
for line in Lines:
sig_filtered[i].append(int(line.strip()))
f_sig_filtered.close()
# os.remove('./output/dataset/raw_text/'+d+'_filtered.txt')
f_filter_delay = open('./output/dataset/raw_text/filter_delay.txt', 'r')
filter_delay = int(f_filter_delay.read())
subplot_col_prop = float(args.colprop)
subplot_row_prop = float(args.rowprop)
position = []
if args.position != None:
position.extend(args.position)
if args.size != None:
size = args.size
num_files = len(files)
num_plot = len(position) if len(files) == 1 else len(files)
num_plot = num_plot if num_plot else 1
subplot_col = round(math.sqrt(num_plot/(subplot_col_prop*subplot_row_prop)) * subplot_col_prop)
if subplot_col < 1:
subplot_col = 1
subplot_row = math.ceil(num_plot/subplot_col)
if subplot_col:
if subplot_col > num_plot:
subplot_col = num_plot
else:
subplot_col = 1
while (subplot_col*subplot_row - subplot_row) >= num_plot:
subplot_col -= 1
augmentation = args.augmentation
fig = plt.figure()
# fig.suptitle(f"ECG raw & filtered signal")
for i, P in enumerate(position):
ax1 = fig.add_subplot(subplot_row,subplot_col,i+1)
color = 'black'
ax1.plot(range(P-size,P+size),np.array(sig[0][P-size:P+size]), color=color)
# ax1.set(title=f'File: {file}')
# ax1.set(title=f'Augmentation')
ax1.set_ylabel("Raw signal") #, color=color
ax1.set_xlabel("Sample") #, color=color
ax1.grid()
for j in range(-augmentation[0]*augmentation[1],augmentation[0]*augmentation[1]+1,augmentation[1]):
color = (random.random(), random.random(), random.random())
linewidth = 2.5
# alpha = 1
alpha = 1-((abs(j)/(augmentation[0]*augmentation[1]))*0.9)
# alpha = 1-((j-(-augmentation[0]*augmentation[1]))/(2*augmentation[0]*augmentation[1])*0.9)
ax1.plot(range(P-99+j,P+99+j), [min(sig[0][P-size:P+size]) - (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1]*(99*2), linewidth=linewidth, color=color, alpha=alpha)
ax1.plot(range(P-99+j,P+99+j), [max(sig[0][P-size:P+size]) + (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1]*(99*2), linewidth=linewidth, color=color, alpha=alpha)
ax1.plot([P-0.0000001-99+j,P+0.0000001-99+j], [min(sig[0][P-size:P+size]) - (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1 , max(sig[0][P-size:P+size]) + (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1], linewidth=linewidth, color=color, alpha=alpha)
ax1.plot([P-0.0000001+99+j,P+0.0000001+99+j], [min(sig[0][P-size:P+size]) - (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1 , max(sig[0][P-size:P+size]) + (max(sig[0][P-size:P+size]) - min(sig[0][P-size:P+size]))*0.1], linewidth=linewidth, color=color, alpha=alpha)
fig.tight_layout()
plt.show()
| en | 0.261623 | # intsig = np.array(r.p_signal[:,0]) # sig_len = len(sig) # os.remove('./output/dataset/raw_text/'+d+'_filtered.txt') # fig.suptitle(f"ECG raw & filtered signal") # ax1.set(title=f'File: {file}') # ax1.set(title=f'Augmentation') #, color=color #, color=color # alpha = 1 # alpha = 1-((j-(-augmentation[0]*augmentation[1]))/(2*augmentation[0]*augmentation[1])*0.9) | 2.454337 | 2 |
test/doraamon.py | strakam/PyEasyGraphics | 5 | 6619934 | from easygraphics import *
DELAY_TIME = 200 # delay in drawing
init_graph(800, 600)
set_render_mode(RenderMode.RENDER_MANUAL)
set_origin(400, 300)
# fill the background
set_background_color(Color.WHITE)
clear_device()
# head 头
set_fill_color(color_rgb(7, 190, 234))
set_color(Color.BLACK)
draw_rounded_rect(-135, -206, 135, 54, 248, 248)
delay(DELAY_TIME)
## face 脸
set_fill_color(Color.WHITE)
draw_ellipse(0, -46, 115, 95)
delay(DELAY_TIME)
# right eye 右眼
draw_rounded_rect(-63, -169, 0, -95, 56, 56)
delay(DELAY_TIME)
# left eye 左眼
draw_rounded_rect(0, -169, 63, -95, 56, 56)
delay(DELAY_TIME)
# right eyeball 右眼球
set_fill_color(Color.BLACK)
draw_circle(-16, -116, 6)
delay(DELAY_TIME)
# left eyeball 左眼球
draw_circle(16, -116, 6)
delay(DELAY_TIME)
# nose 鼻子
set_fill_color(color_rgb(201, 62, 0))
draw_circle(0, -92, 15)
delay(DELAY_TIME)
# philtrum 人中
line(0, -77, 0, -4)
delay(DELAY_TIME)
# mouse 嘴
arc(0, -112, 180 * 5 / 4, 180 * 7 / 4, 108, 108)
delay(DELAY_TIME)
# whistkers 胡须
line(-42, -73, -90, -91)
line(42, -73, 90, -91)
line(-41, -65, -92, -65)
line(41, -65, 92, -65)
line(-42, -57, -90, -39)
line(42, -57, 90, -39)
delay(DELAY_TIME)
# body 身体
# arms 手臂
line(-76, 32, -138, 72)
line(76, 32, 138, 72)
line(-96, 96, -116, 110)
line(96, 96, 116, 110)
delay(DELAY_TIME)
# legs 腿
line(-96, 85, -96, 178) # 腿外侧
line(96, 85, 96, 178)
arc(0, 179, 0, 180, 15, 11) # 腿内侧
delay(DELAY_TIME)
# hands 手
set_fill_color(Color.WHITE)
draw_circle(-140, 99, 27)
draw_circle(140, 99, 27)
delay(DELAY_TIME)
# foots 脚
draw_rounded_rect(-112, 178, -2, 205, 24, 24)
draw_rounded_rect(2, 178, 112, 205, 24, 24)
delay(DELAY_TIME)
# fill body with blue 身体填充蓝色
set_fill_color(color_rgb(7, 190, 234))
flood_fill(0, 100, Color.BLACK)
delay(DELAY_TIME)
# tummy 肚皮
set_fill_color(Color.WHITE)
draw_circle(0, 81, 75)
fill_rect(-60, 4, 60, 24) # 用白色矩形擦掉多余的肚皮
delay(DELAY_TIME)
# pocket 口袋
draw_pie(0, 81, 180, 360, 58, 58)
delay(DELAY_TIME)
# bell 铃铛
# rope 绳子
set_fill_color(color_rgb(169, 38, 0))
draw_rounded_rect(-100, 23, 100, 42, 12, 12)
delay(DELAY_TIME)
# outline of the bell 铃铛外形
set_fill_color(color_rgb(245, 237, 38))
draw_circle(0, 49, 19)
delay(DELAY_TIME)
# hole in the bell 铃铛上的洞
set_fill_color(Color.BLACK)
draw_ellipse(0, 53, 4, 4)
delay(DELAY_TIME)
# texture on the bell 铃铛上的纹路
set_line_width(3)
line(0, 57, 0, 68)
set_line_width(1)
line(-16, 40, 16, 40)
line(-18, 44, 18, 44)
set_background_color(Color.LIGHT_RED)
pause()
close_graph()
| from easygraphics import *
DELAY_TIME = 200 # delay in drawing
init_graph(800, 600)
set_render_mode(RenderMode.RENDER_MANUAL)
set_origin(400, 300)
# fill the background
set_background_color(Color.WHITE)
clear_device()
# head 头
set_fill_color(color_rgb(7, 190, 234))
set_color(Color.BLACK)
draw_rounded_rect(-135, -206, 135, 54, 248, 248)
delay(DELAY_TIME)
## face 脸
set_fill_color(Color.WHITE)
draw_ellipse(0, -46, 115, 95)
delay(DELAY_TIME)
# right eye 右眼
draw_rounded_rect(-63, -169, 0, -95, 56, 56)
delay(DELAY_TIME)
# left eye 左眼
draw_rounded_rect(0, -169, 63, -95, 56, 56)
delay(DELAY_TIME)
# right eyeball 右眼球
set_fill_color(Color.BLACK)
draw_circle(-16, -116, 6)
delay(DELAY_TIME)
# left eyeball 左眼球
draw_circle(16, -116, 6)
delay(DELAY_TIME)
# nose 鼻子
set_fill_color(color_rgb(201, 62, 0))
draw_circle(0, -92, 15)
delay(DELAY_TIME)
# philtrum 人中
line(0, -77, 0, -4)
delay(DELAY_TIME)
# mouse 嘴
arc(0, -112, 180 * 5 / 4, 180 * 7 / 4, 108, 108)
delay(DELAY_TIME)
# whistkers 胡须
line(-42, -73, -90, -91)
line(42, -73, 90, -91)
line(-41, -65, -92, -65)
line(41, -65, 92, -65)
line(-42, -57, -90, -39)
line(42, -57, 90, -39)
delay(DELAY_TIME)
# body 身体
# arms 手臂
line(-76, 32, -138, 72)
line(76, 32, 138, 72)
line(-96, 96, -116, 110)
line(96, 96, 116, 110)
delay(DELAY_TIME)
# legs 腿
line(-96, 85, -96, 178) # 腿外侧
line(96, 85, 96, 178)
arc(0, 179, 0, 180, 15, 11) # 腿内侧
delay(DELAY_TIME)
# hands 手
set_fill_color(Color.WHITE)
draw_circle(-140, 99, 27)
draw_circle(140, 99, 27)
delay(DELAY_TIME)
# foots 脚
draw_rounded_rect(-112, 178, -2, 205, 24, 24)
draw_rounded_rect(2, 178, 112, 205, 24, 24)
delay(DELAY_TIME)
# fill body with blue 身体填充蓝色
set_fill_color(color_rgb(7, 190, 234))
flood_fill(0, 100, Color.BLACK)
delay(DELAY_TIME)
# tummy 肚皮
set_fill_color(Color.WHITE)
draw_circle(0, 81, 75)
fill_rect(-60, 4, 60, 24) # 用白色矩形擦掉多余的肚皮
delay(DELAY_TIME)
# pocket 口袋
draw_pie(0, 81, 180, 360, 58, 58)
delay(DELAY_TIME)
# bell 铃铛
# rope 绳子
set_fill_color(color_rgb(169, 38, 0))
draw_rounded_rect(-100, 23, 100, 42, 12, 12)
delay(DELAY_TIME)
# outline of the bell 铃铛外形
set_fill_color(color_rgb(245, 237, 38))
draw_circle(0, 49, 19)
delay(DELAY_TIME)
# hole in the bell 铃铛上的洞
set_fill_color(Color.BLACK)
draw_ellipse(0, 53, 4, 4)
delay(DELAY_TIME)
# texture on the bell 铃铛上的纹路
set_line_width(3)
line(0, 57, 0, 68)
set_line_width(1)
line(-16, 40, 16, 40)
line(-18, 44, 18, 44)
set_background_color(Color.LIGHT_RED)
pause()
close_graph()
| zh | 0.300138 | # delay in drawing # fill the background # head 头 ## face 脸 # right eye 右眼 # left eye 左眼 # right eyeball 右眼球 # left eyeball 左眼球 # nose 鼻子 # philtrum 人中 # mouse 嘴 # whistkers 胡须 # body 身体 # arms 手臂 # legs 腿 # 腿外侧 # 腿内侧 # hands 手 # foots 脚 # fill body with blue 身体填充蓝色 # tummy 肚皮 # 用白色矩形擦掉多余的肚皮 # pocket 口袋 # bell 铃铛 # rope 绳子 # outline of the bell 铃铛外形 # hole in the bell 铃铛上的洞 # texture on the bell 铃铛上的纹路 | 2.392334 | 2 |
tests/test_matrix.py | robtucker/pyspark-tooling | 0 | 6619935 | import pytest
import numpy as np
from pyspark.sql import SQLContext
from pyspark.mllib.linalg import Vectors as MllibVectors
from pyspark.ml.linalg import Vectors
from pyspark.mllib.linalg.distributed import IndexedRowMatrix, BlockMatrix
from pyspark_tooling import matrix
from tests import base
# @pytest.mark.focus
@pytest.mark.nlp
class TestMatrix(base.BaseTest):
"""Test common matrix methods in spark"""
@pytest.mark.usefixtures("spark")
def test_df_to_indexed_row_matrix(self, spark: SQLContext):
data = [
(0, Vectors.dense(1.0, 2.0, 3.0)),
(1, Vectors.dense(4.0, 5.0, 6.0)),
(2, Vectors.dense(7.0, 8.0, 9.0)),
]
cols = ["row_number", "vector"]
df = spark.createDataFrame(data, cols)
res = matrix.df_to_indexed_row_matrix("row_number", "vector", df)
assert isinstance(res, IndexedRowMatrix)
actual = [i.tolist() for i in res.toBlockMatrix().toLocalMatrix().toArray()]
expected = [b.toArray().tolist() for a, b in data]
assert actual == expected
@pytest.mark.usefixtures("spark")
def test_df_to_dense_matrix(self, spark: SQLContext):
data = [
(0, Vectors.dense(1.0, 2.0, 3.0)),
(1, Vectors.dense(4.0, 5.0, 6.0)),
(2, Vectors.dense(7.0, 8.0, 9.0)),
]
cols = ["row_number", "vector"]
df = spark.createDataFrame(data, cols)
res = matrix.df_to_dense_matrix("row_number", "vector", df)
actual = [i.tolist() for i in res.toArray()]
expected = [b.toArray().tolist() for a, b in data]
assert actual == expected
@pytest.mark.usefixtures("spark")
def test_df_to_block_matrix(self, spark: SQLContext):
data = [
(0, Vectors.dense(1.0, 2.0, 3.0)),
(1, Vectors.dense(4.0, 5.0, 6.0)),
(2, Vectors.dense(7.0, 8.0, 9.0)),
]
cols = ["row_number", "vector"]
df = spark.createDataFrame(data, cols)
res = matrix.df_to_block_matrix("row_number", "vector", df)
assert isinstance(res, BlockMatrix)
actual = [i.tolist() for i in res.toLocalMatrix().toArray()]
expected = [b.toArray().tolist() for a, b in data]
assert actual == expected
@pytest.mark.usefixtures("spark")
def test_multiply_coordinate_matrices(self, spark: SQLContext):
a_data = [(0, MllibVectors.dense(0, 3, 4)), (1, MllibVectors.dense(1, 2, 3))]
b_data = [
(0, MllibVectors.dense(1, 0)),
(1, MllibVectors.dense(4, 2)),
(2, MllibVectors.dense(1, 3)),
]
matrix_a = IndexedRowMatrix(spark._sc.parallelize(a_data)).toCoordinateMatrix()
matrix_b = IndexedRowMatrix(spark._sc.parallelize(b_data)).toCoordinateMatrix()
product = matrix.multiply_coordinate_matrices(matrix_a, matrix_b)
actual = product.toBlockMatrix().toLocalMatrix().toArray()
expected = [[16.0, 18.0], [12.0, 13.0]]
assert actual.tolist() == expected
@pytest.mark.usefixtures("spark")
def test_sparse_dot_product_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c, c, d]
primary_data = [
(4, Vectors.sparse(4, [0, 1], [2.0, 1.0])),
(5, Vectors.sparse(4, [1, 2, 3], [1.0, 3.0, 1.0])),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(7, Vectors.sparse(4, [0, 2], [1.0, 1.0])),
(9, Vectors.sparse(4, [0, 1, 2, 3], [1.0, 1.0, 1.0, 1.0])),
]
# if we were to write these as dense vectors:
row_4 = np.array([2, 1, 0, 0])
row_5 = np.array([0, 1, 3, 1])
row_7 = np.array([1, 0, 1, 0])
row_9 = np.array([1, 1, 1, 1])
# calculate the expected dot product for each pair
expected_values = [
(4, 7, np.dot(row_4, row_7)),
(4, 9, np.dot(row_4, row_9)),
(5, 7, np.dot(row_5, row_7)),
(5, 9, np.dot(row_5, row_9)),
]
primary_cols = ["p_id", "p_vectors"]
secondary_cols = ["s_id", "s_vectors"]
primary_df = spark.createDataFrame(primary_data, primary_cols)
secondary_df = spark.createDataFrame(secondary_data, secondary_cols)
df = matrix.sparse_dot_product_cross_join(
spark,
"output",
"p_id",
"p_vectors",
primary_df,
"s_id",
"s_vectors",
secondary_df,
)
res = df.orderBy("p_id", "s_id")
expected_cols = ["p_id", "s_id", "output"]
self.validate_values(res, expected_cols, expected_values)
@pytest.mark.usefixtures("spark")
def test_dense_dot_product_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c, c, d]
primary_data = [
(4, Vectors.dense(2.0, 1.0, 0.0, 0.0)),
(5, Vectors.dense(0.0, 1.0, 3.0, 1.0)),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(7, Vectors.dense(1.0, 0.0, 1.0, 0.0)),
(9, Vectors.dense(1.0, 1.0, 1.0, 1.0)),
]
# if we were to write these as dense vectors:
row_4 = np.array([2, 1, 0, 0])
row_5 = np.array([0, 1, 3, 1])
row_7 = np.array([1, 0, 1, 0])
row_9 = np.array([1, 1, 1, 1])
# calculate the expected dot product for each pair
expected_values = [
(4, 7, np.dot(row_4, row_7)),
(4, 9, np.dot(row_4, row_9)),
(5, 7, np.dot(row_5, row_7)),
(5, 9, np.dot(row_5, row_9)),
]
primary_cols = ["p_id", "p_vectors"]
secondary_cols = ["s_id", "s_vectors"]
primary_df = spark.createDataFrame(primary_data, primary_cols)
secondary_df = spark.createDataFrame(secondary_data, secondary_cols)
df = matrix.dense_dot_product_cross_join(
spark,
"output",
"p_id",
"p_vectors",
primary_df,
"s_id",
"s_vectors",
secondary_df,
)
res = df.orderBy("p_id", "s_id")
expected_cols = ["p_id", "s_id", "output"]
self.validate_values(res, expected_cols, expected_values)
@pytest.mark.usefixtures("spark")
def test_dense_matrix_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c, c, d]
primary_data = [
(4, Vectors.dense(2.0, 1.0, 0.0, 0.0)),
(5, Vectors.dense(0.0, 1.0, 3.0, 1.0)),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(7, Vectors.dense(1.0, 0.0, 1.0, 0.0)),
(9, Vectors.dense(1.0, 1.0, 1.0, 1.0)),
]
# if we were to write these as dense vectors:
row_4 = np.array([2, 1, 0, 0])
row_5 = np.array([0, 1, 3, 1])
row_7 = np.array([1, 0, 1, 0])
row_9 = np.array([1, 1, 1, 1])
# calculate the expected dot product for each pair
expected_values = [
(4, 7, np.dot(row_4, row_7)),
(4, 9, np.dot(row_4, row_9)),
(5, 7, np.dot(row_5, row_7)),
(5, 9, np.dot(row_5, row_9)),
]
primary_cols = ["p_id", "p_vectors"]
secondary_cols = ["s_id", "s_vectors"]
primary_df = spark.createDataFrame(primary_data, primary_cols)
secondary_df = spark.createDataFrame(secondary_data, secondary_cols)
df = matrix.dense_matrix_cross_join(
spark,
"output",
"p_id",
matrix.df_to_block_matrix("p_id", "p_vectors", primary_df),
"s_id",
matrix.df_to_block_matrix("s_id", "s_vectors", secondary_df).transpose(),
)
res = df.orderBy("p_id", "s_id")
expected_cols = ["p_id", "s_id", "output"]
self.validate_values(res, expected_cols, expected_values)
@pytest.mark.usefixtures("spark")
def test_jaccard_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c]
primary_data = [
(0, Vectors.sparse(4, [0, 1], [2.0, 1.0])),
(1, Vectors.sparse(4, [1, 2], [1.0, 2.0])),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(2, Vectors.sparse(4, [0, 2], [1.0, 1.0])),
(3, Vectors.sparse(4, [0, 1, 2, 3], [1.0, 1.0, 1.0, 1.0])),
]
cols = ["id", "vectors"]
all_df = spark.createDataFrame(primary_data + secondary_data, cols)
primary_df = spark.createDataFrame(primary_data, cols)
secondary_df = spark.createDataFrame(secondary_data, cols)
res = matrix.jaccard_cross_join(
"vectors", "distances", all_df, primary_df, secondary_df
)
res.show()
# TODO - get this min hash version of jaccard working!!
# calculate the jaccard index as the
# intersection divided by the union
# expected_data = [
# # [a, a, b] vs [a, c]
# (0, 2, 1 / 3),
# # [a, a, b] vs [a, b, c, d]
# (0, 3, 2 / 4),
# # [b, c, c] vs [a, c]
# (1, 2, 1 / 3),
# # [b, c, c] vs [a, b, c, d]
# (1, 3, 2 / 4),
# ]
| import pytest
import numpy as np
from pyspark.sql import SQLContext
from pyspark.mllib.linalg import Vectors as MllibVectors
from pyspark.ml.linalg import Vectors
from pyspark.mllib.linalg.distributed import IndexedRowMatrix, BlockMatrix
from pyspark_tooling import matrix
from tests import base
# @pytest.mark.focus
@pytest.mark.nlp
class TestMatrix(base.BaseTest):
"""Test common matrix methods in spark"""
@pytest.mark.usefixtures("spark")
def test_df_to_indexed_row_matrix(self, spark: SQLContext):
data = [
(0, Vectors.dense(1.0, 2.0, 3.0)),
(1, Vectors.dense(4.0, 5.0, 6.0)),
(2, Vectors.dense(7.0, 8.0, 9.0)),
]
cols = ["row_number", "vector"]
df = spark.createDataFrame(data, cols)
res = matrix.df_to_indexed_row_matrix("row_number", "vector", df)
assert isinstance(res, IndexedRowMatrix)
actual = [i.tolist() for i in res.toBlockMatrix().toLocalMatrix().toArray()]
expected = [b.toArray().tolist() for a, b in data]
assert actual == expected
@pytest.mark.usefixtures("spark")
def test_df_to_dense_matrix(self, spark: SQLContext):
data = [
(0, Vectors.dense(1.0, 2.0, 3.0)),
(1, Vectors.dense(4.0, 5.0, 6.0)),
(2, Vectors.dense(7.0, 8.0, 9.0)),
]
cols = ["row_number", "vector"]
df = spark.createDataFrame(data, cols)
res = matrix.df_to_dense_matrix("row_number", "vector", df)
actual = [i.tolist() for i in res.toArray()]
expected = [b.toArray().tolist() for a, b in data]
assert actual == expected
@pytest.mark.usefixtures("spark")
def test_df_to_block_matrix(self, spark: SQLContext):
data = [
(0, Vectors.dense(1.0, 2.0, 3.0)),
(1, Vectors.dense(4.0, 5.0, 6.0)),
(2, Vectors.dense(7.0, 8.0, 9.0)),
]
cols = ["row_number", "vector"]
df = spark.createDataFrame(data, cols)
res = matrix.df_to_block_matrix("row_number", "vector", df)
assert isinstance(res, BlockMatrix)
actual = [i.tolist() for i in res.toLocalMatrix().toArray()]
expected = [b.toArray().tolist() for a, b in data]
assert actual == expected
@pytest.mark.usefixtures("spark")
def test_multiply_coordinate_matrices(self, spark: SQLContext):
a_data = [(0, MllibVectors.dense(0, 3, 4)), (1, MllibVectors.dense(1, 2, 3))]
b_data = [
(0, MllibVectors.dense(1, 0)),
(1, MllibVectors.dense(4, 2)),
(2, MllibVectors.dense(1, 3)),
]
matrix_a = IndexedRowMatrix(spark._sc.parallelize(a_data)).toCoordinateMatrix()
matrix_b = IndexedRowMatrix(spark._sc.parallelize(b_data)).toCoordinateMatrix()
product = matrix.multiply_coordinate_matrices(matrix_a, matrix_b)
actual = product.toBlockMatrix().toLocalMatrix().toArray()
expected = [[16.0, 18.0], [12.0, 13.0]]
assert actual.tolist() == expected
@pytest.mark.usefixtures("spark")
def test_sparse_dot_product_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c, c, d]
primary_data = [
(4, Vectors.sparse(4, [0, 1], [2.0, 1.0])),
(5, Vectors.sparse(4, [1, 2, 3], [1.0, 3.0, 1.0])),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(7, Vectors.sparse(4, [0, 2], [1.0, 1.0])),
(9, Vectors.sparse(4, [0, 1, 2, 3], [1.0, 1.0, 1.0, 1.0])),
]
# if we were to write these as dense vectors:
row_4 = np.array([2, 1, 0, 0])
row_5 = np.array([0, 1, 3, 1])
row_7 = np.array([1, 0, 1, 0])
row_9 = np.array([1, 1, 1, 1])
# calculate the expected dot product for each pair
expected_values = [
(4, 7, np.dot(row_4, row_7)),
(4, 9, np.dot(row_4, row_9)),
(5, 7, np.dot(row_5, row_7)),
(5, 9, np.dot(row_5, row_9)),
]
primary_cols = ["p_id", "p_vectors"]
secondary_cols = ["s_id", "s_vectors"]
primary_df = spark.createDataFrame(primary_data, primary_cols)
secondary_df = spark.createDataFrame(secondary_data, secondary_cols)
df = matrix.sparse_dot_product_cross_join(
spark,
"output",
"p_id",
"p_vectors",
primary_df,
"s_id",
"s_vectors",
secondary_df,
)
res = df.orderBy("p_id", "s_id")
expected_cols = ["p_id", "s_id", "output"]
self.validate_values(res, expected_cols, expected_values)
@pytest.mark.usefixtures("spark")
def test_dense_dot_product_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c, c, d]
primary_data = [
(4, Vectors.dense(2.0, 1.0, 0.0, 0.0)),
(5, Vectors.dense(0.0, 1.0, 3.0, 1.0)),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(7, Vectors.dense(1.0, 0.0, 1.0, 0.0)),
(9, Vectors.dense(1.0, 1.0, 1.0, 1.0)),
]
# if we were to write these as dense vectors:
row_4 = np.array([2, 1, 0, 0])
row_5 = np.array([0, 1, 3, 1])
row_7 = np.array([1, 0, 1, 0])
row_9 = np.array([1, 1, 1, 1])
# calculate the expected dot product for each pair
expected_values = [
(4, 7, np.dot(row_4, row_7)),
(4, 9, np.dot(row_4, row_9)),
(5, 7, np.dot(row_5, row_7)),
(5, 9, np.dot(row_5, row_9)),
]
primary_cols = ["p_id", "p_vectors"]
secondary_cols = ["s_id", "s_vectors"]
primary_df = spark.createDataFrame(primary_data, primary_cols)
secondary_df = spark.createDataFrame(secondary_data, secondary_cols)
df = matrix.dense_dot_product_cross_join(
spark,
"output",
"p_id",
"p_vectors",
primary_df,
"s_id",
"s_vectors",
secondary_df,
)
res = df.orderBy("p_id", "s_id")
expected_cols = ["p_id", "s_id", "output"]
self.validate_values(res, expected_cols, expected_values)
@pytest.mark.usefixtures("spark")
def test_dense_matrix_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c, c, d]
primary_data = [
(4, Vectors.dense(2.0, 1.0, 0.0, 0.0)),
(5, Vectors.dense(0.0, 1.0, 3.0, 1.0)),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(7, Vectors.dense(1.0, 0.0, 1.0, 0.0)),
(9, Vectors.dense(1.0, 1.0, 1.0, 1.0)),
]
# if we were to write these as dense vectors:
row_4 = np.array([2, 1, 0, 0])
row_5 = np.array([0, 1, 3, 1])
row_7 = np.array([1, 0, 1, 0])
row_9 = np.array([1, 1, 1, 1])
# calculate the expected dot product for each pair
expected_values = [
(4, 7, np.dot(row_4, row_7)),
(4, 9, np.dot(row_4, row_9)),
(5, 7, np.dot(row_5, row_7)),
(5, 9, np.dot(row_5, row_9)),
]
primary_cols = ["p_id", "p_vectors"]
secondary_cols = ["s_id", "s_vectors"]
primary_df = spark.createDataFrame(primary_data, primary_cols)
secondary_df = spark.createDataFrame(secondary_data, secondary_cols)
df = matrix.dense_matrix_cross_join(
spark,
"output",
"p_id",
matrix.df_to_block_matrix("p_id", "p_vectors", primary_df),
"s_id",
matrix.df_to_block_matrix("s_id", "s_vectors", secondary_df).transpose(),
)
res = df.orderBy("p_id", "s_id")
expected_cols = ["p_id", "s_id", "output"]
self.validate_values(res, expected_cols, expected_values)
@pytest.mark.usefixtures("spark")
def test_jaccard_cross_join(self, spark: SQLContext):
# based on the following imaginary primary tokens
# [a, a, b]
# [b, c, c]
primary_data = [
(0, Vectors.sparse(4, [0, 1], [2.0, 1.0])),
(1, Vectors.sparse(4, [1, 2], [1.0, 2.0])),
]
# and the following imaginary secondary tokens
# [a, c]
# [a, b, c, d]
secondary_data = [
(2, Vectors.sparse(4, [0, 2], [1.0, 1.0])),
(3, Vectors.sparse(4, [0, 1, 2, 3], [1.0, 1.0, 1.0, 1.0])),
]
cols = ["id", "vectors"]
all_df = spark.createDataFrame(primary_data + secondary_data, cols)
primary_df = spark.createDataFrame(primary_data, cols)
secondary_df = spark.createDataFrame(secondary_data, cols)
res = matrix.jaccard_cross_join(
"vectors", "distances", all_df, primary_df, secondary_df
)
res.show()
# TODO - get this min hash version of jaccard working!!
# calculate the jaccard index as the
# intersection divided by the union
# expected_data = [
# # [a, a, b] vs [a, c]
# (0, 2, 1 / 3),
# # [a, a, b] vs [a, b, c, d]
# (0, 3, 2 / 4),
# # [b, c, c] vs [a, c]
# (1, 2, 1 / 3),
# # [b, c, c] vs [a, b, c, d]
# (1, 3, 2 / 4),
# ]
| en | 0.919495 | # @pytest.mark.focus Test common matrix methods in spark # based on the following imaginary primary tokens # [a, a, b] # [b, c, c, c, d] # and the following imaginary secondary tokens # [a, c] # [a, b, c, d] # if we were to write these as dense vectors: # calculate the expected dot product for each pair # based on the following imaginary primary tokens # [a, a, b] # [b, c, c, c, d] # and the following imaginary secondary tokens # [a, c] # [a, b, c, d] # if we were to write these as dense vectors: # calculate the expected dot product for each pair # based on the following imaginary primary tokens # [a, a, b] # [b, c, c, c, d] # and the following imaginary secondary tokens # [a, c] # [a, b, c, d] # if we were to write these as dense vectors: # calculate the expected dot product for each pair # based on the following imaginary primary tokens # [a, a, b] # [b, c, c] # and the following imaginary secondary tokens # [a, c] # [a, b, c, d] # TODO - get this min hash version of jaccard working!! # calculate the jaccard index as the # intersection divided by the union # expected_data = [ # # [a, a, b] vs [a, c] # (0, 2, 1 / 3), # # [a, a, b] vs [a, b, c, d] # (0, 3, 2 / 4), # # [b, c, c] vs [a, c] # (1, 2, 1 / 3), # # [b, c, c] vs [a, b, c, d] # (1, 3, 2 / 4), # ] | 2.764331 | 3 |
code python/Optimiation_biais.py | antoine-rocher/Master-2-internship | 2 | 6619936 | import numpy as np
import subprocess
#Optimation du biais par brute force
def compute_model(f1,f2):
run_gs = subprocess.Popen("/home/arocher/Stage/gaussian_stream_model/code_GSRSD/GSRSD.exe %f %f"%(f1,f2), shell=True, stdout=subprocess.PIPE).stdout
x = np.array(run_gs.read().decode().split(" "))
x = np.delete(x,-1) #enleve l'espace a la fin
data = x.astype(np.float) #lis et convertit les outputs du GS
r = data[data%2.5==0]
xi0 = empty(len(r))
xi2 = empty(len(r))
g=1
d=2
for i in np.range(len(r)):
xi0[i] = data[g]
xi2[i] = data[d]
g += 3
d += 3
return xi0[np.range(4,20)],xi2[np.range(4,20)],r[np.range(4,20)]
#Calcul du xi^2 ou x est le monopole de la simu
def Xi_2 (bias,x):
f1,f2 = bias
y_model = compute_model(f1,f2)[0]
print(f1,f2)
return sum(y_model-x)**2
| import numpy as np
import subprocess
#Optimation du biais par brute force
def compute_model(f1,f2):
run_gs = subprocess.Popen("/home/arocher/Stage/gaussian_stream_model/code_GSRSD/GSRSD.exe %f %f"%(f1,f2), shell=True, stdout=subprocess.PIPE).stdout
x = np.array(run_gs.read().decode().split(" "))
x = np.delete(x,-1) #enleve l'espace a la fin
data = x.astype(np.float) #lis et convertit les outputs du GS
r = data[data%2.5==0]
xi0 = empty(len(r))
xi2 = empty(len(r))
g=1
d=2
for i in np.range(len(r)):
xi0[i] = data[g]
xi2[i] = data[d]
g += 3
d += 3
return xi0[np.range(4,20)],xi2[np.range(4,20)],r[np.range(4,20)]
#Calcul du xi^2 ou x est le monopole de la simu
def Xi_2 (bias,x):
f1,f2 = bias
y_model = compute_model(f1,f2)[0]
print(f1,f2)
return sum(y_model-x)**2
| fr | 0.93738 | #Optimation du biais par brute force #enleve l'espace a la fin #lis et convertit les outputs du GS #Calcul du xi^2 ou x est le monopole de la simu | 2.413843 | 2 |
http_client_demo.py | sawyermade/detectron_ros | 0 | 6619937 | <reponame>sawyermade/detectron_ros
import os, sys, requests
import numpy as np, cv2, jsonpickle
# Uploads to Detectron
def upload(url, frame):
# Prep headers for http req
content_type = 'application/json'
headers = {'content_type': content_type}
# jsonpickle the numpy frame
_, frame_png = cv2.imencode('.png', frame)
frame_json = jsonpickle.encode(frame_png)
# Post and get response
try:
response = requests.post(url, data=frame_json, headers=headers)
if response.text:
# Decode response and return it
retList = jsonpickle.decode(response.text)
retList[0] = cv2.imdecode(retList[0], cv2.IMREAD_COLOR)
retList[-1] = [cv2.imdecode(m, cv2.IMREAD_GRAYSCALE) for m in retList[-1]]
# returns [vis.png, bbList, labelList, scoreList, maskList]
return retList
else:
return None
except:
return None
def main():
# Arguments
if len(sys.argv) < 2:
domain = '127.0.0.1'
port = '665'
else:
domain = sys.argv[1]
port = sys.argv[2]
url = f'http://{domain}:{port}'
# Starts captures
width, height = 640, 480
try:
import pyrealsense2 as rs
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
config.enable_stream(rs.stream.color, width, height, rs.format.bgr8, 30)
profile = pipeline.start(config)
while True:
# Get frames
frames = pipeline.wait_for_frames()
frame = np.asanyarray(frames.get_color_frame().get_data())
# Sends to detectron
# returns [vis.png, bbList, labelList, scoreList, maskList]
retList = upload(url, frame)
if not retList:
continue
# Shows img
visImg = retList[0]
visImg = cv2.resize(visImg, (1200, 900))
cv2.imshow('Inference', visImg)
k = cv2.waitKey(1)
if k == 27:
cv2.destroyAllWindows()
break
except Exception as e:
print(e)
if __name__ == '__main__':
main()
| import os, sys, requests
import numpy as np, cv2, jsonpickle
# Uploads to Detectron
def upload(url, frame):
# Prep headers for http req
content_type = 'application/json'
headers = {'content_type': content_type}
# jsonpickle the numpy frame
_, frame_png = cv2.imencode('.png', frame)
frame_json = jsonpickle.encode(frame_png)
# Post and get response
try:
response = requests.post(url, data=frame_json, headers=headers)
if response.text:
# Decode response and return it
retList = jsonpickle.decode(response.text)
retList[0] = cv2.imdecode(retList[0], cv2.IMREAD_COLOR)
retList[-1] = [cv2.imdecode(m, cv2.IMREAD_GRAYSCALE) for m in retList[-1]]
# returns [vis.png, bbList, labelList, scoreList, maskList]
return retList
else:
return None
except:
return None
def main():
# Arguments
if len(sys.argv) < 2:
domain = '127.0.0.1'
port = '665'
else:
domain = sys.argv[1]
port = sys.argv[2]
url = f'http://{domain}:{port}'
# Starts captures
width, height = 640, 480
try:
import pyrealsense2 as rs
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
config.enable_stream(rs.stream.color, width, height, rs.format.bgr8, 30)
profile = pipeline.start(config)
while True:
# Get frames
frames = pipeline.wait_for_frames()
frame = np.asanyarray(frames.get_color_frame().get_data())
# Sends to detectron
# returns [vis.png, bbList, labelList, scoreList, maskList]
retList = upload(url, frame)
if not retList:
continue
# Shows img
visImg = retList[0]
visImg = cv2.resize(visImg, (1200, 900))
cv2.imshow('Inference', visImg)
k = cv2.waitKey(1)
if k == 27:
cv2.destroyAllWindows()
break
except Exception as e:
print(e)
if __name__ == '__main__':
main() | en | 0.500373 | # Uploads to Detectron # Prep headers for http req # jsonpickle the numpy frame # Post and get response # Decode response and return it # returns [vis.png, bbList, labelList, scoreList, maskList] # Arguments # Starts captures # Get frames # Sends to detectron # returns [vis.png, bbList, labelList, scoreList, maskList] # Shows img | 2.520806 | 3 |
biobb_vs/__init__.py | bioexcel/biobb_vs | 1 | 6619938 | name = "biobb_vs"
__all__ = ["fpocket", "vina", "utils"] | name = "biobb_vs"
__all__ = ["fpocket", "vina", "utils"] | none | 1 | 0.982617 | 1 | |
src/lib/make/plot/contour/common.py | r-snijders/bridge-sim | 0 | 6619939 | import itertools
from typing import Callable, List, Optional, Tuple
import numpy as np
from classify.data.responses import responses_to_traffic_array
from config import Config
from fem.params import SimParams
from fem.responses import load_fem_responses, Responses
from fem.run.opensees import OSRunner
from make.plot.distribution import load_normal_traffic_array
from model.bridge import Point
from model.response import ResponseType
from model.scenario import DamageScenario
from plot import plt
from plot.geometry import top_view_bridge
from plot.responses import plot_contour_deck
def damage_scenario_traffic_plot(
c: Config,
response_types: List[ResponseType],
damage_scenario: DamageScenario,
titles: List[str],
saves: List[str],
times: int = 3,
):
"""Save a contour plot of a scenarios scenario under normal traffic."""
# Grid of points where to record fem.
grid_points = [
Point(x=x, y=0, z=z)
for x, z in itertools.product(
np.linspace(c.bridge.x_min, c.bridge.x_max, 4),
np.linspace(c.bridge.z_min, c.bridge.z_max, 4),
)
]
c, sim_params = damage_scenario.use(
c=c, sim_params=SimParams(response_types=response_types)
)
# Generate a plot for each response type.
for t, response_type, title, save in zip(
range(times), response_types, titles, saves
):
time_index = -1 + abs(t)
response_array = responses_to_traffic_array(
c=c,
traffic_array=load_normal_traffic_array(c, mins=1)[0],
response_type=response_type,
bridge_scenario=damage_scenario,
points=grid_points,
sim_runner=OSRunner,
)
print(f"grid.shape = {np.array(grid).shape}")
mean_response_array = np.mean(response_array, axis=0).T
print(f"response_array.shape = {np.array(response_array).shape}")
print(f"mean_response_array.shape = {np.array(mean_response_array).shape}")
top_view_bridge(c.bridge, abutments=True, piers=True)
responses = Responses.from_responses(
response_type=response_type,
responses=[
(response_array[time_index][p], point)
for p, point in enumerate(grid_points)
],
)
plot_contour_deck(c=c, responses=responses, center_norm=True, levels=100)
plt.title(title)
plt.save(save)
plt.close()
| import itertools
from typing import Callable, List, Optional, Tuple
import numpy as np
from classify.data.responses import responses_to_traffic_array
from config import Config
from fem.params import SimParams
from fem.responses import load_fem_responses, Responses
from fem.run.opensees import OSRunner
from make.plot.distribution import load_normal_traffic_array
from model.bridge import Point
from model.response import ResponseType
from model.scenario import DamageScenario
from plot import plt
from plot.geometry import top_view_bridge
from plot.responses import plot_contour_deck
def damage_scenario_traffic_plot(
c: Config,
response_types: List[ResponseType],
damage_scenario: DamageScenario,
titles: List[str],
saves: List[str],
times: int = 3,
):
"""Save a contour plot of a scenarios scenario under normal traffic."""
# Grid of points where to record fem.
grid_points = [
Point(x=x, y=0, z=z)
for x, z in itertools.product(
np.linspace(c.bridge.x_min, c.bridge.x_max, 4),
np.linspace(c.bridge.z_min, c.bridge.z_max, 4),
)
]
c, sim_params = damage_scenario.use(
c=c, sim_params=SimParams(response_types=response_types)
)
# Generate a plot for each response type.
for t, response_type, title, save in zip(
range(times), response_types, titles, saves
):
time_index = -1 + abs(t)
response_array = responses_to_traffic_array(
c=c,
traffic_array=load_normal_traffic_array(c, mins=1)[0],
response_type=response_type,
bridge_scenario=damage_scenario,
points=grid_points,
sim_runner=OSRunner,
)
print(f"grid.shape = {np.array(grid).shape}")
mean_response_array = np.mean(response_array, axis=0).T
print(f"response_array.shape = {np.array(response_array).shape}")
print(f"mean_response_array.shape = {np.array(mean_response_array).shape}")
top_view_bridge(c.bridge, abutments=True, piers=True)
responses = Responses.from_responses(
response_type=response_type,
responses=[
(response_array[time_index][p], point)
for p, point in enumerate(grid_points)
],
)
plot_contour_deck(c=c, responses=responses, center_norm=True, levels=100)
plt.title(title)
plt.save(save)
plt.close()
| en | 0.749246 | Save a contour plot of a scenarios scenario under normal traffic. # Grid of points where to record fem. # Generate a plot for each response type. | 2.549906 | 3 |
dic_cont_caractere.py | rafaelblira/python-progressivo | 0 | 6619940 | <gh_stars>0
import pprint
texto = 'Curso Python Progressivo'
contador = {}
for char in texto:
contador.setdefault(char, 0)
contador[char] += 1
pprint.pprint(contador) | import pprint
texto = 'Curso Python Progressivo'
contador = {}
for char in texto:
contador.setdefault(char, 0)
contador[char] += 1
pprint.pprint(contador) | none | 1 | 3.047545 | 3 | |
Scripts/python/scripts mundo 1/scripts sem cor/Desafio017.py | BrenoNAlmeida/Scripts-Escola | 0 | 6619941 | <filename>Scripts/python/scripts mundo 1/scripts sem cor/Desafio017.py
from math import sqrt
c1=float(input('cateto oposto = '))
c2=float(input('cateto adjacente ='))
H= sqrt (c1**2 + c2**2)
print ('o valor da hipotenusa é {:.3}'.format(H)) | <filename>Scripts/python/scripts mundo 1/scripts sem cor/Desafio017.py
from math import sqrt
c1=float(input('cateto oposto = '))
c2=float(input('cateto adjacente ='))
H= sqrt (c1**2 + c2**2)
print ('o valor da hipotenusa é {:.3}'.format(H)) | none | 1 | 3.875208 | 4 | |
cimpyorm/Model/Elements/__init__.py | bertramr/cimpyorm | 4 | 6619942 | <filename>cimpyorm/Model/Elements/__init__.py
# Copyright (c) 2018 - 2020 Institute for High Voltage Technology and Institute for High Voltage Equipment and Grids, Digitalization and Power Economics
# RWTH Aachen University
# Contact: <NAME> (<EMAIL>)
# #
# This module is part of CIMPyORM.
# #
# CIMPyORM is licensed under the BSD-3-Clause license.
# For further information see LICENSE in the project's root directory.
#
from .Base import CIMNamespace, CIMPackage, CIMProfile
from .Enum import *
from .Class import *
from .Datatype import *
from .Property import *
| <filename>cimpyorm/Model/Elements/__init__.py
# Copyright (c) 2018 - 2020 Institute for High Voltage Technology and Institute for High Voltage Equipment and Grids, Digitalization and Power Economics
# RWTH Aachen University
# Contact: <NAME> (<EMAIL>)
# #
# This module is part of CIMPyORM.
# #
# CIMPyORM is licensed under the BSD-3-Clause license.
# For further information see LICENSE in the project's root directory.
#
from .Base import CIMNamespace, CIMPackage, CIMProfile
from .Enum import *
from .Class import *
from .Datatype import *
from .Property import *
| en | 0.835069 | # Copyright (c) 2018 - 2020 Institute for High Voltage Technology and Institute for High Voltage Equipment and Grids, Digitalization and Power Economics # RWTH Aachen University # Contact: <NAME> (<EMAIL>) # # # This module is part of CIMPyORM. # # # CIMPyORM is licensed under the BSD-3-Clause license. # For further information see LICENSE in the project's root directory. # | 1.536626 | 2 |
Aula5/Q3.py | Anderson842/LabADAGrupoC | 0 | 6619943 | <gh_stars>0
#Q3: What is the time complexity of
nums = [4, 78, 9, 84, 18]
for num in range(0,len(nums), 2):
print(nums[num])
#Es de orden lineal O(n) | #Q3: What is the time complexity of
nums = [4, 78, 9, 84, 18]
for num in range(0,len(nums), 2):
print(nums[num])
#Es de orden lineal O(n) | en | 0.675332 | #Q3: What is the time complexity of #Es de orden lineal O(n) | 3.389627 | 3 |
docs/tests/W0711.py | mrfyda/codacy-pylint-python3 | 17 | 6619944 | <gh_stars>10-100
##Patterns: W0711
try:
1 / 0
##Warn: W0711
except ValueError or TypeError:
pass
| ##Patterns: W0711
try:
1 / 0
##Warn: W0711
except ValueError or TypeError:
pass | ja | 0.413861 | ##Patterns: W0711 ##Warn: W0711 | 1.212493 | 1 |
fundamentals/14-advance-python-modules/4-collections-module-namedtuple.py | davidokun/Python | 0 | 6619945 | <reponame>davidokun/Python
"""
namedtuple
Create a new object type creating fields for each element
"""
from collections import namedtuple
# Normal tuple
t = (1, 2, 3)
print(t[0])
Dog = namedtuple('Dog', 'age breed name')
sam = Dog(age=2, breed='Lab', name='Sammy')
print(sam.name)
print(sam[2])
print(sam.breed)
print(sam[1])
| """
namedtuple
Create a new object type creating fields for each element
"""
from collections import namedtuple
# Normal tuple
t = (1, 2, 3)
print(t[0])
Dog = namedtuple('Dog', 'age breed name')
sam = Dog(age=2, breed='Lab', name='Sammy')
print(sam.name)
print(sam[2])
print(sam.breed)
print(sam[1]) | en | 0.588099 | namedtuple Create a new object type creating fields for each element # Normal tuple | 4.384576 | 4 |
main.py | EduFreit4s/end-to-end-encryption | 1 | 6619946 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Python e PyQt5.
Acessando/interagindo com um arquivo ``*.ui`` (XML).
"""
from PyQt5.QtWidgets import QApplication
from PyQt5.uic import loadUi
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtCore import QUrl
from libraries import cripto
class MeuAplicativo:
"""Classe."""
key_a_public, key_a_private, key_a_sprime, key_b_public, key_b_private, key_b_sprime = 0, 0, 0, 0, 0, 0
a_2_b_txt, b_2_a_txt = "", ""
state = False
def __init__(self, window):
"""Construtor"""
# Chat A
self.push_button_a = window.btn_a
self.a_up = window.a_up
self.a_down = window.a_down
self.chat_a = window.a_chat
self.txt_a_public = window.a_e
self.txt_a_private = window.a_d
self.txt_a_sprime = window.a_n
# Chat B
self.push_button_b = window.btn_b
self.b_up = window.b_up
self.b_down = window.b_down
self.chat_b = window.b_chat
self.txt_b_public = window.b_e
self.txt_b_private = window.b_d
self.txt_b_sprime = window.b_n
#btn
self.push_button_dec = window.btn_dec
self.push_button_key = window.btn_key
self.push_button_email = window.btn_email
self.push_button_git = window.btn_git
# Conectando um método ao evento de clique do botão.
self.push_button_dec.clicked.connect(self._on_button_clicked_dec)
self.push_button_key.clicked.connect(self._on_button_clicked_key)
self.push_button_email.clicked.connect(lambda: QDesktopServices.openUrl(QUrl("mailto:<EMAIL>")))
self.push_button_git.clicked.connect(lambda: QDesktopServices.openUrl(QUrl("https://github.com/EduFreit4s/end-to-end-encryption")))
self.push_button_a.clicked.connect(self._on_button_clicked_a)
self.push_button_b.clicked.connect(self._on_button_clicked_b)
"""Gerador de chaves"""
def keys(self):
self.key_a_public, self.key_a_sprime, self.key_a_private = cripto.generator()
self.key_b_public, self.key_b_sprime, self.key_b_private = cripto.generator()
self.output()
def empty_key(self):
if (self.key_a_public or self.key_a_sprime or self.key_b_public or self.key_b_sprime) == 0:
self.keys()
"""Exibição hexa/decimal"""
def output(self):
if self.state:
self.push_button_dec.setText("Decimal")
if self.a_2_b_txt != "":
self.a_up.setPlainText(self.a_2_b_txt)
if self.b_2_a_txt != "":
self.b_up.setPlainText(self.b_2_a_txt)
else:
self.push_button_dec.setText("Hexadecimal")
if self.a_2_b_txt != "":
self.a_up.setPlainText(hex(int(self.a_2_b_txt))[2:])
if self.b_2_a_txt != "":
self.b_up.setPlainText(hex(int(self.b_2_a_txt))[2:])
if self.state:
self.txt_a_public.setText(str(self.key_a_public))
self.txt_a_private.setText(str(self.key_a_private))
self.txt_a_sprime.setText(str(self.key_a_sprime))
self.txt_b_public.setText(str(self.key_b_public))
self.txt_b_private.setText(str(self.key_b_private))
self.txt_b_sprime.setText(str(self.key_b_sprime))
else:
self.txt_a_public.setText(hex(self.key_a_public))
self.txt_a_private.setText(hex(self.key_a_private))
self.txt_a_sprime.setText(hex(self.key_a_sprime))
self.txt_b_public.setText(hex(self.key_b_public))
self.txt_b_private.setText(hex(self.key_b_private))
self.txt_b_sprime.setText(hex(self.key_b_sprime))
"""Tradutor"""
def code(self, txt, user):
if user == "A":
self.a_2_b_txt = cripto.lock(txt, self.key_b_public, self.key_b_sprime)
elif user == "B":
self.b_2_a_txt = cripto.lock(txt, self.key_a_public, self.key_a_sprime)
def decode(self, user):
if user == "A":
self.a_down.setPlainText(cripto.unlock(self.b_2_a_txt, self.key_a_private, self.key_a_sprime))
elif user == "B":
self.b_down.setPlainText(cripto.unlock(self.a_2_b_txt, self.key_b_private, self.key_b_sprime))
"""Método é executado quando o botão é pressionado."""
def _on_button_clicked_dec(self): #Alterna exibição de dados para hexadecimal ou inteiro
self.state = not self.state
self.output()
def _on_button_clicked_key(self):
self.keys()
def _on_button_clicked_a(self):
if self.chat_a.text():
self.empty_key()
self.output()
self.code(self.chat_a.text(), "A") # Codifica texto digitado e envia para B
self.output() # Mostra texto codificado enviado
self.chat_a.clear() # Limpa o campo de digitação
self.decode("B") # Pede para B decodificar a mensagem
def _on_button_clicked_b(self):
if self.chat_b.text():
self.empty_key()
self.output()
self.code(self.chat_b.text(), "B") # Codifica texto digitado e envia para A
self.output() # Mostra texto codificado enviado
self.chat_b.clear() # Limpa o campo de digitação
self.decode("A") # Pede para A decodificar a mensagem
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
# Lendo o arquivo de interface.
window = loadUi('forms/mainwindow.ui')
ui = MeuAplicativo(window=window)
window.show()
sys.exit(app.exec_())
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Python e PyQt5.
Acessando/interagindo com um arquivo ``*.ui`` (XML).
"""
from PyQt5.QtWidgets import QApplication
from PyQt5.uic import loadUi
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtCore import QUrl
from libraries import cripto
class MeuAplicativo:
"""Classe."""
key_a_public, key_a_private, key_a_sprime, key_b_public, key_b_private, key_b_sprime = 0, 0, 0, 0, 0, 0
a_2_b_txt, b_2_a_txt = "", ""
state = False
def __init__(self, window):
"""Construtor"""
# Chat A
self.push_button_a = window.btn_a
self.a_up = window.a_up
self.a_down = window.a_down
self.chat_a = window.a_chat
self.txt_a_public = window.a_e
self.txt_a_private = window.a_d
self.txt_a_sprime = window.a_n
# Chat B
self.push_button_b = window.btn_b
self.b_up = window.b_up
self.b_down = window.b_down
self.chat_b = window.b_chat
self.txt_b_public = window.b_e
self.txt_b_private = window.b_d
self.txt_b_sprime = window.b_n
#btn
self.push_button_dec = window.btn_dec
self.push_button_key = window.btn_key
self.push_button_email = window.btn_email
self.push_button_git = window.btn_git
# Conectando um método ao evento de clique do botão.
self.push_button_dec.clicked.connect(self._on_button_clicked_dec)
self.push_button_key.clicked.connect(self._on_button_clicked_key)
self.push_button_email.clicked.connect(lambda: QDesktopServices.openUrl(QUrl("mailto:<EMAIL>")))
self.push_button_git.clicked.connect(lambda: QDesktopServices.openUrl(QUrl("https://github.com/EduFreit4s/end-to-end-encryption")))
self.push_button_a.clicked.connect(self._on_button_clicked_a)
self.push_button_b.clicked.connect(self._on_button_clicked_b)
"""Gerador de chaves"""
def keys(self):
self.key_a_public, self.key_a_sprime, self.key_a_private = cripto.generator()
self.key_b_public, self.key_b_sprime, self.key_b_private = cripto.generator()
self.output()
def empty_key(self):
if (self.key_a_public or self.key_a_sprime or self.key_b_public or self.key_b_sprime) == 0:
self.keys()
"""Exibição hexa/decimal"""
def output(self):
if self.state:
self.push_button_dec.setText("Decimal")
if self.a_2_b_txt != "":
self.a_up.setPlainText(self.a_2_b_txt)
if self.b_2_a_txt != "":
self.b_up.setPlainText(self.b_2_a_txt)
else:
self.push_button_dec.setText("Hexadecimal")
if self.a_2_b_txt != "":
self.a_up.setPlainText(hex(int(self.a_2_b_txt))[2:])
if self.b_2_a_txt != "":
self.b_up.setPlainText(hex(int(self.b_2_a_txt))[2:])
if self.state:
self.txt_a_public.setText(str(self.key_a_public))
self.txt_a_private.setText(str(self.key_a_private))
self.txt_a_sprime.setText(str(self.key_a_sprime))
self.txt_b_public.setText(str(self.key_b_public))
self.txt_b_private.setText(str(self.key_b_private))
self.txt_b_sprime.setText(str(self.key_b_sprime))
else:
self.txt_a_public.setText(hex(self.key_a_public))
self.txt_a_private.setText(hex(self.key_a_private))
self.txt_a_sprime.setText(hex(self.key_a_sprime))
self.txt_b_public.setText(hex(self.key_b_public))
self.txt_b_private.setText(hex(self.key_b_private))
self.txt_b_sprime.setText(hex(self.key_b_sprime))
"""Tradutor"""
def code(self, txt, user):
if user == "A":
self.a_2_b_txt = cripto.lock(txt, self.key_b_public, self.key_b_sprime)
elif user == "B":
self.b_2_a_txt = cripto.lock(txt, self.key_a_public, self.key_a_sprime)
def decode(self, user):
if user == "A":
self.a_down.setPlainText(cripto.unlock(self.b_2_a_txt, self.key_a_private, self.key_a_sprime))
elif user == "B":
self.b_down.setPlainText(cripto.unlock(self.a_2_b_txt, self.key_b_private, self.key_b_sprime))
"""Método é executado quando o botão é pressionado."""
def _on_button_clicked_dec(self): #Alterna exibição de dados para hexadecimal ou inteiro
self.state = not self.state
self.output()
def _on_button_clicked_key(self):
self.keys()
def _on_button_clicked_a(self):
if self.chat_a.text():
self.empty_key()
self.output()
self.code(self.chat_a.text(), "A") # Codifica texto digitado e envia para B
self.output() # Mostra texto codificado enviado
self.chat_a.clear() # Limpa o campo de digitação
self.decode("B") # Pede para B decodificar a mensagem
def _on_button_clicked_b(self):
if self.chat_b.text():
self.empty_key()
self.output()
self.code(self.chat_b.text(), "B") # Codifica texto digitado e envia para A
self.output() # Mostra texto codificado enviado
self.chat_b.clear() # Limpa o campo de digitação
self.decode("A") # Pede para A decodificar a mensagem
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
# Lendo o arquivo de interface.
window = loadUi('forms/mainwindow.ui')
ui = MeuAplicativo(window=window)
window.show()
sys.exit(app.exec_()) | pt | 0.928435 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Python e PyQt5.
Acessando/interagindo com um arquivo ``*.ui`` (XML). Classe. Construtor # Chat A # Chat B #btn # Conectando um método ao evento de clique do botão. Gerador de chaves Exibição hexa/decimal Tradutor Método é executado quando o botão é pressionado. #Alterna exibição de dados para hexadecimal ou inteiro # Codifica texto digitado e envia para B # Mostra texto codificado enviado # Limpa o campo de digitação # Pede para B decodificar a mensagem # Codifica texto digitado e envia para A # Mostra texto codificado enviado # Limpa o campo de digitação # Pede para A decodificar a mensagem # Lendo o arquivo de interface. | 2.368691 | 2 |
lldl/model/residual.py | svenschultze/Lidar-Localization-DL | 2 | 6619947 | <reponame>svenschultze/Lidar-Localization-DL
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from lldl.model.utils import wrap_padding
def resnet_block_1d(x, filters, kernel_size):
c1 = Lambda(wrap_padding(int(kernel_size / 2)))(x)
c1 = Conv1D(filters, kernel_size, use_bias=False)(c1)
c1 = BatchNormalization()(c1)
c1 = ReLU()(c1)
c2 = Lambda(wrap_padding(int(kernel_size / 2)))(c1)
c2 = Conv1D(filters, kernel_size, use_bias=False)(c2)
c2 = BatchNormalization()(c2)
c2 = Add()([x, c2])
return ReLU()(c2)
def conv_block_1d(x, filters, kernel_size):
c1 = Lambda(wrap_padding(int(kernel_size / 2)))(x)
c1 = Conv1D(filters, kernel_size, use_bias=False)(c1)
c1 = BatchNormalization()(c1)
c1 = ReLU()(c1)
c2 = Lambda(wrap_padding(int(kernel_size / 2)))(c1)
c2 = Conv1D(filters, kernel_size, use_bias=False)(c2)
c2 = BatchNormalization()(c2)
return ReLU()(c2)
def build_model():
x = input = Input((360,1))
x = conv_block_1d(x, 16, 21)
x = MaxPool1D(2)(x)
x = resnet_block_1d(x, 16, 9)
x = resnet_block_1d(x, 16, 9)
x = resnet_block_1d(x, 16, 9)
x = MaxPool1D(2)(x)
x = conv_block_1d(x, 32, 9)
x = resnet_block_1d(x, 32, 9)
x = resnet_block_1d(x, 32, 9)
x = MaxPool1D(2)(x)
x = conv_block_1d(x, 64, 9)
x = resnet_block_1d(x, 64, 9)
x = resnet_block_1d(x, 64, 9)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dense(64, activation='relu')(x)
output = Dense(2)(x)
return Model(inputs=input, outputs=output, name="residual_model") | import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from lldl.model.utils import wrap_padding
def resnet_block_1d(x, filters, kernel_size):
c1 = Lambda(wrap_padding(int(kernel_size / 2)))(x)
c1 = Conv1D(filters, kernel_size, use_bias=False)(c1)
c1 = BatchNormalization()(c1)
c1 = ReLU()(c1)
c2 = Lambda(wrap_padding(int(kernel_size / 2)))(c1)
c2 = Conv1D(filters, kernel_size, use_bias=False)(c2)
c2 = BatchNormalization()(c2)
c2 = Add()([x, c2])
return ReLU()(c2)
def conv_block_1d(x, filters, kernel_size):
c1 = Lambda(wrap_padding(int(kernel_size / 2)))(x)
c1 = Conv1D(filters, kernel_size, use_bias=False)(c1)
c1 = BatchNormalization()(c1)
c1 = ReLU()(c1)
c2 = Lambda(wrap_padding(int(kernel_size / 2)))(c1)
c2 = Conv1D(filters, kernel_size, use_bias=False)(c2)
c2 = BatchNormalization()(c2)
return ReLU()(c2)
def build_model():
x = input = Input((360,1))
x = conv_block_1d(x, 16, 21)
x = MaxPool1D(2)(x)
x = resnet_block_1d(x, 16, 9)
x = resnet_block_1d(x, 16, 9)
x = resnet_block_1d(x, 16, 9)
x = MaxPool1D(2)(x)
x = conv_block_1d(x, 32, 9)
x = resnet_block_1d(x, 32, 9)
x = resnet_block_1d(x, 32, 9)
x = MaxPool1D(2)(x)
x = conv_block_1d(x, 64, 9)
x = resnet_block_1d(x, 64, 9)
x = resnet_block_1d(x, 64, 9)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dense(64, activation='relu')(x)
output = Dense(2)(x)
return Model(inputs=input, outputs=output, name="residual_model") | none | 1 | 2.989159 | 3 | |
RPG/bot_classes/locations/spaceship/captain_bridge.py | JuanShotLC/Negative_Space_Bot | 0 | 6619948 | from RPG.consts.game_states import CAPTAIN_BRIDGE
from RPG.bot_classes.locations.base_location import BaseLocation
class CaptainBridge(BaseLocation):
def __init__(self, game, spaceship):
super().__init__(game, CAPTAIN_BRIDGE, 'Puente de mando', 'Vas al puente del capitán., '
'en todas partes se ven varios elementos '
'control de la nave. En gran panorámica '
'el ojo de buey abre una vista de la galaxia. A '
'el panel de control principal que ves '
'interfaz de control del ordenador de a bordo')
self.spaceship = spaceship
self.reply_keyboard.row('📟Ordenador de a bordo', '🛏Cabina personal')
self.reply_keyboard.row('📦Bodega', '👣Salir de la nave')
self.reply_keyboard.row('📟Menú principal')
def handle(self, message):
if message.text == '📟Ordenador de a bordo':
self.spaceship.computer.start(message)
elif message.text == '🛏Cabina personal':
self.spaceship.cabin.start(message)
elif message.text == '📦Bodega':
self.spaceship.cargo_hold.start(message)
elif message.text == '👣Salir de la nave':
if not self.game.current_planet:
self.game.bot.send_message(message.chat.id, '¿Un paseo espacial?0_o No es la mejor idea.',
reply_markup=self.reply_keyboard)
else:
self.game.current_planet.start(message)
elif message.text == '📟Menú principal':
self.game.main_menu.start(message)
else:
self.show_input_error(message)
| from RPG.consts.game_states import CAPTAIN_BRIDGE
from RPG.bot_classes.locations.base_location import BaseLocation
class CaptainBridge(BaseLocation):
def __init__(self, game, spaceship):
super().__init__(game, CAPTAIN_BRIDGE, 'Puente de mando', 'Vas al puente del capitán., '
'en todas partes se ven varios elementos '
'control de la nave. En gran panorámica '
'el ojo de buey abre una vista de la galaxia. A '
'el panel de control principal que ves '
'interfaz de control del ordenador de a bordo')
self.spaceship = spaceship
self.reply_keyboard.row('📟Ordenador de a bordo', '🛏Cabina personal')
self.reply_keyboard.row('📦Bodega', '👣Salir de la nave')
self.reply_keyboard.row('📟Menú principal')
def handle(self, message):
if message.text == '📟Ordenador de a bordo':
self.spaceship.computer.start(message)
elif message.text == '🛏Cabina personal':
self.spaceship.cabin.start(message)
elif message.text == '📦Bodega':
self.spaceship.cargo_hold.start(message)
elif message.text == '👣Salir de la nave':
if not self.game.current_planet:
self.game.bot.send_message(message.chat.id, '¿Un paseo espacial?0_o No es la mejor idea.',
reply_markup=self.reply_keyboard)
else:
self.game.current_planet.start(message)
elif message.text == '📟Menú principal':
self.game.main_menu.start(message)
else:
self.show_input_error(message)
| none | 1 | 2.931104 | 3 | |
tests/test_direction.py | Strubbl/map-machine | 62 | 6619949 | """
Test direction processing.
"""
import numpy as np
from map_machine.feature.direction import DirectionSet, parse_vector, Sector
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def test_compass_points_1() -> None:
"""Test north direction."""
assert np.allclose(parse_vector("N"), np.array([0, -1]))
def test_compass_points_2() -> None:
"""Test north-west direction."""
root: np.float64 = -np.sqrt(2) / 2
assert np.allclose(parse_vector("NW"), np.array([root, root]))
def test_compass_points_3() -> None:
"""Test south-south-west direction."""
assert np.allclose(parse_vector("SSW"), np.array([-0.38268343, 0.92387953]))
def test_invalid() -> None:
"""Test invalid direction representation string."""
assert not parse_vector("O")
def test_degree() -> None:
"""Test east direction."""
assert np.allclose(parse_vector("90"), np.array([1, 0]))
def test_main_direction() -> None:
"""Test main direction computing."""
assert DirectionSet("0").is_right() is None
assert DirectionSet("70").is_right() is True
assert DirectionSet("270").is_right() is False
assert DirectionSet("180").is_right() is None
def test_sector_parsing() -> None:
"""Test constructing sector from the string representation."""
Sector("0", angle=0)
Sector("90", angle=0)
Sector("-90", angle=0)
sector: Sector = Sector("0-180")
assert np.allclose(sector.start, [0, -1])
assert np.allclose(sector.end, [0, 1])
| """
Test direction processing.
"""
import numpy as np
from map_machine.feature.direction import DirectionSet, parse_vector, Sector
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def test_compass_points_1() -> None:
"""Test north direction."""
assert np.allclose(parse_vector("N"), np.array([0, -1]))
def test_compass_points_2() -> None:
"""Test north-west direction."""
root: np.float64 = -np.sqrt(2) / 2
assert np.allclose(parse_vector("NW"), np.array([root, root]))
def test_compass_points_3() -> None:
"""Test south-south-west direction."""
assert np.allclose(parse_vector("SSW"), np.array([-0.38268343, 0.92387953]))
def test_invalid() -> None:
"""Test invalid direction representation string."""
assert not parse_vector("O")
def test_degree() -> None:
"""Test east direction."""
assert np.allclose(parse_vector("90"), np.array([1, 0]))
def test_main_direction() -> None:
"""Test main direction computing."""
assert DirectionSet("0").is_right() is None
assert DirectionSet("70").is_right() is True
assert DirectionSet("270").is_right() is False
assert DirectionSet("180").is_right() is None
def test_sector_parsing() -> None:
"""Test constructing sector from the string representation."""
Sector("0", angle=0)
Sector("90", angle=0)
Sector("-90", angle=0)
sector: Sector = Sector("0-180")
assert np.allclose(sector.start, [0, -1])
assert np.allclose(sector.end, [0, 1])
| en | 0.906985 | Test direction processing. Test north direction. Test north-west direction. Test south-south-west direction. Test invalid direction representation string. Test east direction. Test main direction computing. Test constructing sector from the string representation. | 3.065594 | 3 |
test/perf.py | doublereedkurt/uhttp | 0 | 6619950 | '''
Module for comparing the performance of various WSGI containers.
'''
import thread
import urllib2
import time
IP = '127.1.2.3'
def make_client_requests(n=100, thread=thread):
times = []
lock = thread.allocate_lock()
lock.acquire()
def run():
for i in range(n):
start = time.time()
urllib2.urlopen('http://' + IP + ':8080').read()
times.append(time.time() - start)
thread.start_new_thread(run, ())
lock.acquire()
return times
def hello_wsgi(environ, start_response):
start_response(200)
return ["hello world!"]
def test_gevent():
from gevent import pywsgi
import gevent.threading
import gevent
server = pywsgi.WSGIServer((IP, 8080), hello_wsgi)
server.start()
t = make_client_requests(thread=gevent.thread)
server.stop()
print "avg latency {0.03f}ms".format(1000 * sum(t) / len(t))
def main():
test_gevent()
if __name__ == "__main__":
main()
| '''
Module for comparing the performance of various WSGI containers.
'''
import thread
import urllib2
import time
IP = '127.1.2.3'
def make_client_requests(n=100, thread=thread):
times = []
lock = thread.allocate_lock()
lock.acquire()
def run():
for i in range(n):
start = time.time()
urllib2.urlopen('http://' + IP + ':8080').read()
times.append(time.time() - start)
thread.start_new_thread(run, ())
lock.acquire()
return times
def hello_wsgi(environ, start_response):
start_response(200)
return ["hello world!"]
def test_gevent():
from gevent import pywsgi
import gevent.threading
import gevent
server = pywsgi.WSGIServer((IP, 8080), hello_wsgi)
server.start()
t = make_client_requests(thread=gevent.thread)
server.stop()
print "avg latency {0.03f}ms".format(1000 * sum(t) / len(t))
def main():
test_gevent()
if __name__ == "__main__":
main()
| en | 0.864874 | Module for comparing the performance of various WSGI containers. | 2.898162 | 3 |