max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
sockpuppet/utils.py | JMS-Software-Development/django-sockpuppet | 371 | 6614851 | <gh_stars>100-1000
try:
from lxml import etree
from io import StringIO
from lxml.cssselect import CSSSelector
HAS_LXML = True
except ImportError:
HAS_LXML = False
from bs4 import BeautifulSoup
def pascalcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component.
This method preserves already pascalized strings."""
components = value.split("_")
if len(components) == 1:
return value[0].upper() + value[1:]
else:
components[0] = components[0][0].upper() + components[0][1:]
return "".join(x.title() for x in components)
def camelcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component except the first one.
This method preserves already camelcased strings."""
components = value.split("_")
if len(components) == 1:
return value[0].lower() + value[1:]
else:
components[0] = components[0][0].lower() + components[0][1:]
return components[0].lower() + "".join(x.title() for x in components[1:])
def camelize_value(value):
"""camelizes all keys/values in a given dict or list"""
if isinstance(value, list):
value = [camelize_value(val) for val in value]
elif isinstance(value, dict):
value = {camelcase(key): camelize_value(val) for key, val in value.items()}
return value
def _lxml_selectors(html, selectors):
parser = etree.HTMLParser()
document = etree.parse(StringIO(html), parser)
selectors = [CSSSelector(selector) for selector in selectors]
selectors = [selector for selector in selectors if selector(document)]
return document, selectors
def _bs_selectors(html, selectors):
document = BeautifulSoup(html)
selectors = [selector for selector in selectors if document.select(selector)]
return document, selectors
def get_document_and_selectors(html, selectors):
if HAS_LXML:
return _lxml_selectors(html, selectors)
return _bs_selectors(html, selectors)
def parse_out_html(document, selector):
if HAS_LXML:
return "".join(
[
etree.tostring(e, method="html").decode("utf-8")
for e in selector(document)
]
)
return "".join([e.decode_contents() for e in document.select(selector)])
| try:
from lxml import etree
from io import StringIO
from lxml.cssselect import CSSSelector
HAS_LXML = True
except ImportError:
HAS_LXML = False
from bs4 import BeautifulSoup
def pascalcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component.
This method preserves already pascalized strings."""
components = value.split("_")
if len(components) == 1:
return value[0].upper() + value[1:]
else:
components[0] = components[0][0].upper() + components[0][1:]
return "".join(x.title() for x in components)
def camelcase(value: str) -> str:
"""capitalizes the first letter of each _-separated component except the first one.
This method preserves already camelcased strings."""
components = value.split("_")
if len(components) == 1:
return value[0].lower() + value[1:]
else:
components[0] = components[0][0].lower() + components[0][1:]
return components[0].lower() + "".join(x.title() for x in components[1:])
def camelize_value(value):
"""camelizes all keys/values in a given dict or list"""
if isinstance(value, list):
value = [camelize_value(val) for val in value]
elif isinstance(value, dict):
value = {camelcase(key): camelize_value(val) for key, val in value.items()}
return value
def _lxml_selectors(html, selectors):
parser = etree.HTMLParser()
document = etree.parse(StringIO(html), parser)
selectors = [CSSSelector(selector) for selector in selectors]
selectors = [selector for selector in selectors if selector(document)]
return document, selectors
def _bs_selectors(html, selectors):
document = BeautifulSoup(html)
selectors = [selector for selector in selectors if document.select(selector)]
return document, selectors
def get_document_and_selectors(html, selectors):
if HAS_LXML:
return _lxml_selectors(html, selectors)
return _bs_selectors(html, selectors)
def parse_out_html(document, selector):
if HAS_LXML:
return "".join(
[
etree.tostring(e, method="html").decode("utf-8")
for e in selector(document)
]
)
return "".join([e.decode_contents() for e in document.select(selector)]) | en | 0.846302 | capitalizes the first letter of each _-separated component. This method preserves already pascalized strings. capitalizes the first letter of each _-separated component except the first one. This method preserves already camelcased strings. camelizes all keys/values in a given dict or list | 3.057309 | 3 |
setup.py | vpaliy/snakecoin | 0 | 6614852 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import sys
import re
import io
from shutil import rmtree
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'snakecoin'
DESCRIPTION = 'A blockchain for a simple cryptocurrency.'
URL = 'https://github.com/vpaliy/snakecoin'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.0'
VERSION = None
with io.open(os.path.join(here, 'snakecoin', '__init__.py'), encoding='utf-8') as fp:
VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(fp.read()).group(1)
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = str()
try:
with io.open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as fp:
requires = [r.strip() for r in fp.readlines()]
except FileNotFoundError:
requires = [
'requests',
'six',
'future-strings'
]
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
license='MIT',
python_requires=REQUIRES_PYTHON,
packages=find_packages(exclude=('tests',)),
install_requires=requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
| # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import sys
import re
import io
from shutil import rmtree
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'snakecoin'
DESCRIPTION = 'A blockchain for a simple cryptocurrency.'
URL = 'https://github.com/vpaliy/snakecoin'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.0'
VERSION = None
with io.open(os.path.join(here, 'snakecoin', '__init__.py'), encoding='utf-8') as fp:
VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(fp.read()).group(1)
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = str()
try:
with io.open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as fp:
requires = [r.strip() for r in fp.readlines()]
except FileNotFoundError:
requires = [
'requests',
'six',
'future-strings'
]
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
license='MIT',
python_requires=REQUIRES_PYTHON,
packages=find_packages(exclude=('tests',)),
install_requires=requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
| en | 0.352855 | # -*- coding: utf-8 -*- #!/usr/bin/env python | 1.784579 | 2 |
docs/user_guide/operation/scripts/examples/argus/extraction/jan_co2-NP10H.py | ASUPychron/pychron | 31 | 6614853 |
def main():
info('Jan CO2 laser analysis')
gosub('jan:WaitForCO2Access')
gosub('jan:PrepareForCO2Analysis')
close(name="K", description="Microbone to Getter NP-10C")
gosub('jan:CO2Analysis')
|
def main():
info('Jan CO2 laser analysis')
gosub('jan:WaitForCO2Access')
gosub('jan:PrepareForCO2Analysis')
close(name="K", description="Microbone to Getter NP-10C")
gosub('jan:CO2Analysis')
| none | 1 | 1.402602 | 1 | |
cnddh/export_to_excel/forms.py | dedeco/cnddh-denuncias | 2 | 6614854 | # -*- coding: utf-8 -*-
from wtforms import Form
from wtforms import validators
from wtforms import SelectMultipleField, SelectField
from wtforms.fields.html5 import DateField
from wtforms.fields import IntegerField, BooleanField
from cnddh.database import db
from cnddh.models import Cidade, Status, TipoLocal, TipoViolacao
from cnddh.models import TipoVitima, TipoSuspeito, TipoFonte, TipoMeioUtilizado
from cnddh.mapeamentos import (estados_choices, sexo_choices, cor_choices,
periodo_choices)
class ExportToExcelFiltroForm(Form):
"""Form to specific filter to create sheet."""
class Meta:
"""Just set language to form. For mesage erros."""
locales = ['pt_BR', 'pt']
def __init__(self, *args, **kwargs):
"""O formulário precisa de opções para o select."""
"""Essas opções são iniciadas nesse init."""
super(ExportToExcelFiltroForm, self).__init__(*args, **kwargs)
status = db.session.query(Status).order_by(Status.status).distinct()
cidades = db.session.query(Cidade.cidade).order_by(
Cidade.cidade).distinct()
self.cidades.choices = map(lambda item: (item.cidade, item.cidade),
cidades)
self.status_denuncia.choices = map(
lambda item: (str(item.id), item.status), status)
tipo_de_locais = db.session.query(TipoLocal).order_by(
TipoLocal.local).distinct()
self.tipo_de_locais.choices = map(
lambda item: (str(item.id), item.local), tipo_de_locais)
violacoes_macrocategoria = db.session.query(
TipoViolacao.macrocategoria).order_by(
TipoViolacao.macrocategoria).distinct(
TipoViolacao.macrocategoria)
violacoes_microcategoria = db.session.query(
TipoViolacao.microcategoria).order_by(
TipoViolacao.microcategoria).distinct(
TipoViolacao.microcategoria)
self.violacoes_macrocategoria.choices = map(
lambda item: (item.macrocategoria, item.macrocategoria),
violacoes_macrocategoria)
self.violacoes_microcategoria.choices = map(
lambda item: (item.microcategoria, item.microcategoria),
violacoes_microcategoria)
tipo_de_vitimas = db.session.query(TipoVitima).order_by(
TipoVitima.tipo).distinct()
self.tipo_de_vitimas.choices = tipo_de_vitimas = map(
lambda item: (str(item.id), item.tipo), tipo_de_vitimas)
tipo_suspeito_tipo = db.session.query(TipoSuspeito.tipo).order_by(
TipoSuspeito.tipo).distinct()
tipo_suspeito_instituicao = db.session.query(
TipoSuspeito.instituicao).order_by(
TipoSuspeito.instituicao).distinct()
# TOdo One Query for both
self.tipo_de_suspeitos_tipo.choices = map(
lambda item: (item.tipo, item.tipo), tipo_suspeito_tipo)
self.tipo_de_suspeitos_instituicao.choices = map(
lambda item: (item.instituicao, item.instituicao),
tipo_suspeito_instituicao)
tipo_de_fontes = db.session.query(TipoFonte).order_by(
TipoFonte.tipofonte)
self.tipo_de_fontes.choices = map(
lambda item: (str(item.id), item.tipofonte), tipo_de_fontes)
tipo_de_meio = db.session.query(TipoMeioUtilizado).order_by(
TipoMeioUtilizado.meio)
self.meio_utilizado.choices = map(
lambda item: (str(item.id), item.meio), tipo_de_meio)
cidades = SelectMultipleField(u"Cidades", [], choices=[])
estados = SelectMultipleField(u"Estados", [], choices=estados_choices)
status_denuncia = SelectMultipleField(u"Status Denúncia", [], choices=[])
tipo_de_locais = SelectMultipleField(u"Tipo de locais", [], choices=[])
tipo_de_fontes = SelectMultipleField(u"Tipo de fontes", [], choices=[])
violacoes_macrocategoria = SelectMultipleField(
u"Violações Macro Categoria", [], choices=[])
violacoes_microcategoria = SelectMultipleField(
u"Violações Macro Categoria", [], choices=[])
tipo_de_vitimas = SelectMultipleField(u"Tipo de Vítimas", [], choices=[])
quantidade_de_vitimas_inicio = IntegerField(u"Quantidade de vítimas", [
validators.optional(),
validators.NumberRange(0, 50)
])
quantidade_de_vitimas_fim = IntegerField(u"Quantidade de vítimas", [
validators.optional(),
validators.NumberRange(0, 50)
])
data_criacao_inicio = DateField(u'Data criação inicio',
[validators.optional()])
data_criacao_fim = DateField(u'Data criação fim', [validators.optional()])
data_denuncia_inicio = DateField(u'Data denúncia', [validators.optional()])
data_denuncia_fim = DateField(u'Data denúncia', [validators.optional()])
sexo_vitima = SelectMultipleField(u"Sexo", [], choices=sexo_choices)
cor_vitima = SelectMultipleField(u"Cor", [], choices=cor_choices)
vitima_idade_inicio = IntegerField(u"Idade", [validators.optional()])
vitima_idade_fim = IntegerField(u"Idade", [validators.optional()])
tipo_de_suspeitos_tipo = SelectMultipleField(
u"Tipo de Suspeitos", [], choices=[])
tipo_de_suspeitos_instituicao = SelectMultipleField(
u"Tipo de Suspeitos", [], choices=[])
quantidade_de_suspeitos_inicio = IntegerField(u"Quantidade de suspeitos", [
validators.optional(),
validators.NumberRange(0, 50)
])
quantidade_de_suspeitos_fim = IntegerField(u"Quantidade de suspeitos", [
validators.optional(),
validators.NumberRange(0, 50)
])
sexo_suspeito = SelectMultipleField(u"Sexo", [], choices=sexo_choices)
cor_suspeito = SelectMultipleField(u"Cor", [], choices=cor_choices)
suspeito_idade_inicio = IntegerField(u"Idade", [validators.optional()])
suspeito_idade_fim = IntegerField(u"Idade", [validators.optional()])
homicidio_periodo = SelectMultipleField(
u"Homicídio período", [], choices=periodo_choices)
meio_utilizado = SelectMultipleField(u"Meio utilizado", [], choices=[])
recuperar_encaminhamentos = BooleanField(u"Recuperar Encaminhamentos", [])
data_formato = SelectField(
u'Data Formato', choices=[('dd/mm/yyyy', 'Normal'), ('yyyy', 'Ano')])
# TODO FIltro encaminhamento e retorno?
| # -*- coding: utf-8 -*-
from wtforms import Form
from wtforms import validators
from wtforms import SelectMultipleField, SelectField
from wtforms.fields.html5 import DateField
from wtforms.fields import IntegerField, BooleanField
from cnddh.database import db
from cnddh.models import Cidade, Status, TipoLocal, TipoViolacao
from cnddh.models import TipoVitima, TipoSuspeito, TipoFonte, TipoMeioUtilizado
from cnddh.mapeamentos import (estados_choices, sexo_choices, cor_choices,
periodo_choices)
class ExportToExcelFiltroForm(Form):
"""Form to specific filter to create sheet."""
class Meta:
"""Just set language to form. For mesage erros."""
locales = ['pt_BR', 'pt']
def __init__(self, *args, **kwargs):
"""O formulário precisa de opções para o select."""
"""Essas opções são iniciadas nesse init."""
super(ExportToExcelFiltroForm, self).__init__(*args, **kwargs)
status = db.session.query(Status).order_by(Status.status).distinct()
cidades = db.session.query(Cidade.cidade).order_by(
Cidade.cidade).distinct()
self.cidades.choices = map(lambda item: (item.cidade, item.cidade),
cidades)
self.status_denuncia.choices = map(
lambda item: (str(item.id), item.status), status)
tipo_de_locais = db.session.query(TipoLocal).order_by(
TipoLocal.local).distinct()
self.tipo_de_locais.choices = map(
lambda item: (str(item.id), item.local), tipo_de_locais)
violacoes_macrocategoria = db.session.query(
TipoViolacao.macrocategoria).order_by(
TipoViolacao.macrocategoria).distinct(
TipoViolacao.macrocategoria)
violacoes_microcategoria = db.session.query(
TipoViolacao.microcategoria).order_by(
TipoViolacao.microcategoria).distinct(
TipoViolacao.microcategoria)
self.violacoes_macrocategoria.choices = map(
lambda item: (item.macrocategoria, item.macrocategoria),
violacoes_macrocategoria)
self.violacoes_microcategoria.choices = map(
lambda item: (item.microcategoria, item.microcategoria),
violacoes_microcategoria)
tipo_de_vitimas = db.session.query(TipoVitima).order_by(
TipoVitima.tipo).distinct()
self.tipo_de_vitimas.choices = tipo_de_vitimas = map(
lambda item: (str(item.id), item.tipo), tipo_de_vitimas)
tipo_suspeito_tipo = db.session.query(TipoSuspeito.tipo).order_by(
TipoSuspeito.tipo).distinct()
tipo_suspeito_instituicao = db.session.query(
TipoSuspeito.instituicao).order_by(
TipoSuspeito.instituicao).distinct()
# TOdo One Query for both
self.tipo_de_suspeitos_tipo.choices = map(
lambda item: (item.tipo, item.tipo), tipo_suspeito_tipo)
self.tipo_de_suspeitos_instituicao.choices = map(
lambda item: (item.instituicao, item.instituicao),
tipo_suspeito_instituicao)
tipo_de_fontes = db.session.query(TipoFonte).order_by(
TipoFonte.tipofonte)
self.tipo_de_fontes.choices = map(
lambda item: (str(item.id), item.tipofonte), tipo_de_fontes)
tipo_de_meio = db.session.query(TipoMeioUtilizado).order_by(
TipoMeioUtilizado.meio)
self.meio_utilizado.choices = map(
lambda item: (str(item.id), item.meio), tipo_de_meio)
cidades = SelectMultipleField(u"Cidades", [], choices=[])
estados = SelectMultipleField(u"Estados", [], choices=estados_choices)
status_denuncia = SelectMultipleField(u"Status Denúncia", [], choices=[])
tipo_de_locais = SelectMultipleField(u"Tipo de locais", [], choices=[])
tipo_de_fontes = SelectMultipleField(u"Tipo de fontes", [], choices=[])
violacoes_macrocategoria = SelectMultipleField(
u"Violações Macro Categoria", [], choices=[])
violacoes_microcategoria = SelectMultipleField(
u"Violações Macro Categoria", [], choices=[])
tipo_de_vitimas = SelectMultipleField(u"Tipo de Vítimas", [], choices=[])
quantidade_de_vitimas_inicio = IntegerField(u"Quantidade de vítimas", [
validators.optional(),
validators.NumberRange(0, 50)
])
quantidade_de_vitimas_fim = IntegerField(u"Quantidade de vítimas", [
validators.optional(),
validators.NumberRange(0, 50)
])
data_criacao_inicio = DateField(u'Data criação inicio',
[validators.optional()])
data_criacao_fim = DateField(u'Data criação fim', [validators.optional()])
data_denuncia_inicio = DateField(u'Data denúncia', [validators.optional()])
data_denuncia_fim = DateField(u'Data denúncia', [validators.optional()])
sexo_vitima = SelectMultipleField(u"Sexo", [], choices=sexo_choices)
cor_vitima = SelectMultipleField(u"Cor", [], choices=cor_choices)
vitima_idade_inicio = IntegerField(u"Idade", [validators.optional()])
vitima_idade_fim = IntegerField(u"Idade", [validators.optional()])
tipo_de_suspeitos_tipo = SelectMultipleField(
u"Tipo de Suspeitos", [], choices=[])
tipo_de_suspeitos_instituicao = SelectMultipleField(
u"Tipo de Suspeitos", [], choices=[])
quantidade_de_suspeitos_inicio = IntegerField(u"Quantidade de suspeitos", [
validators.optional(),
validators.NumberRange(0, 50)
])
quantidade_de_suspeitos_fim = IntegerField(u"Quantidade de suspeitos", [
validators.optional(),
validators.NumberRange(0, 50)
])
sexo_suspeito = SelectMultipleField(u"Sexo", [], choices=sexo_choices)
cor_suspeito = SelectMultipleField(u"Cor", [], choices=cor_choices)
suspeito_idade_inicio = IntegerField(u"Idade", [validators.optional()])
suspeito_idade_fim = IntegerField(u"Idade", [validators.optional()])
homicidio_periodo = SelectMultipleField(
u"Homicídio período", [], choices=periodo_choices)
meio_utilizado = SelectMultipleField(u"Meio utilizado", [], choices=[])
recuperar_encaminhamentos = BooleanField(u"Recuperar Encaminhamentos", [])
data_formato = SelectField(
u'Data Formato', choices=[('dd/mm/yyyy', 'Normal'), ('yyyy', 'Ano')])
# TODO FIltro encaminhamento e retorno?
| pt | 0.742743 | # -*- coding: utf-8 -*- Form to specific filter to create sheet. Just set language to form. For mesage erros. O formulário precisa de opções para o select. Essas opções são iniciadas nesse init. # TOdo One Query for both # TODO FIltro encaminhamento e retorno? | 2.218486 | 2 |
squareroot.py | Aine-Kearns/pands-problems | 0 | 6614855 | # Homework week 6
# Find the square root of 14.5
# the programme will return a square root of the number
# I originally used the math function and then following feedback from Andrew I discovered I needed to use Newton's method or my own method
# So this prompted me check online and to make changes while holding on to the original version that I used so that I could check I was right in my approach
# In my first attempts I decided I need to import the math library in order to use sqrt()
import math
y = float(14.5)
# this was the original attempt to define the function sqrt()
def sqrt(y):
ans = math.sqrt(y)
# round the answer to 1 decimal place
ans = round(ans,1)
return ans
# updated now to define the function myNewtonSqrt which will use a different method
def myNewtonSqrt(y):
# Following feedback from Andrew I've tried using Newton's method with reference to the following three links
# https://www.youtube.com/watch?v=2158QbsunA8
# https://aaronschlegel.me/newtons-method-equation-roots.html
# https://en.wikibooks.org/wiki/Think_Python/Iteration
x = float(input("Let's find the square root of 14.5. Please enter a guess first: "))
# so y=14.5 and x=a guess at the squareroot of y, therefore taking that guess I must
# first square x and subtract y
# second multiply x by 2
# then divide the first calculation by the second and this result is subtracted from the original best guess
while True:
a = x**2 - y
z = 2 * x
ans2 = x - a/z
if abs(x - ans2) < 0.0000001:
break
x = ans2
# this answer is looped back into the first calculation to refine down the answer until the difference is less that 0.0000001
return x
estimate = myNewtonSqrt(y)
print("Using your guess in the Newton Method the square root of", y, "is approx.", estimate)
print("This can be rounded to", round(estimate,1))
print("And actually using the imported square root function, the square root of", y, "is approx.", sqrt(y))
| # Homework week 6
# Find the square root of 14.5
# the programme will return a square root of the number
# I originally used the math function and then following feedback from Andrew I discovered I needed to use Newton's method or my own method
# So this prompted me check online and to make changes while holding on to the original version that I used so that I could check I was right in my approach
# In my first attempts I decided I need to import the math library in order to use sqrt()
import math
y = float(14.5)
# this was the original attempt to define the function sqrt()
def sqrt(y):
ans = math.sqrt(y)
# round the answer to 1 decimal place
ans = round(ans,1)
return ans
# updated now to define the function myNewtonSqrt which will use a different method
def myNewtonSqrt(y):
# Following feedback from Andrew I've tried using Newton's method with reference to the following three links
# https://www.youtube.com/watch?v=2158QbsunA8
# https://aaronschlegel.me/newtons-method-equation-roots.html
# https://en.wikibooks.org/wiki/Think_Python/Iteration
x = float(input("Let's find the square root of 14.5. Please enter a guess first: "))
# so y=14.5 and x=a guess at the squareroot of y, therefore taking that guess I must
# first square x and subtract y
# second multiply x by 2
# then divide the first calculation by the second and this result is subtracted from the original best guess
while True:
a = x**2 - y
z = 2 * x
ans2 = x - a/z
if abs(x - ans2) < 0.0000001:
break
x = ans2
# this answer is looped back into the first calculation to refine down the answer until the difference is less that 0.0000001
return x
estimate = myNewtonSqrt(y)
print("Using your guess in the Newton Method the square root of", y, "is approx.", estimate)
print("This can be rounded to", round(estimate,1))
print("And actually using the imported square root function, the square root of", y, "is approx.", sqrt(y))
| en | 0.90725 | # Homework week 6 # Find the square root of 14.5 # the programme will return a square root of the number # I originally used the math function and then following feedback from Andrew I discovered I needed to use Newton's method or my own method # So this prompted me check online and to make changes while holding on to the original version that I used so that I could check I was right in my approach # In my first attempts I decided I need to import the math library in order to use sqrt() # this was the original attempt to define the function sqrt() # round the answer to 1 decimal place # updated now to define the function myNewtonSqrt which will use a different method # Following feedback from Andrew I've tried using Newton's method with reference to the following three links # https://www.youtube.com/watch?v=2158QbsunA8 # https://aaronschlegel.me/newtons-method-equation-roots.html # https://en.wikibooks.org/wiki/Think_Python/Iteration # so y=14.5 and x=a guess at the squareroot of y, therefore taking that guess I must # first square x and subtract y # second multiply x by 2 # then divide the first calculation by the second and this result is subtracted from the original best guess # this answer is looped back into the first calculation to refine down the answer until the difference is less that 0.0000001 | 4.259212 | 4 |
src/dispatch/alembic/versions/fb5639709294_.py | BuildJet/dispatch | 1 | 6614856 | <filename>src/dispatch/alembic/versions/fb5639709294_.py<gh_stars>1-10
"""Migrates a plugin's configuration to a
per project instance.
Revision ID: fb5639709294
Revises: <PASSWORD>
Create Date: 2021-04-16 11:35:15.473228
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
from sqlalchemy.orm import Session
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# revision identifiers, used by Alembic.
revision = "fb5639709294"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
class Project(Base):
__tablename__ = "project"
id = sa.Column(sa.Integer, primary_key=True)
class Plugin(Base):
__tablename__ = "plugin"
id = sa.Column(sa.Integer, primary_key=True)
enabled = sa.Column(sa.Boolean)
configuration = sa.Column(sqlalchemy_utils.types.json.JSONType())
project_id = sa.Column(sa.Integer, sa.ForeignKey("project.id"))
class PluginInstance(Base):
__tablename__ = "plugin_instance"
id = sa.Column(sa.Integer, primary_key=True)
enabled = sa.Column(sa.Boolean)
configuration = sa.Column(sqlalchemy_utils.types.json.JSONType())
plugin_id = sa.Column(sa.Integer, sa.ForeignKey("plugin.id"))
project_id = sa.Column(sa.Integer, sa.ForeignKey("project.id"))
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
bind = op.get_bind()
session = Session(bind=bind)
op.create_table(
"plugin_instance",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.Column("configuration", sqlalchemy_utils.types.json.JSONType(), nullable=True),
sa.Column("plugin_id", sa.Integer(), nullable=True),
sa.Column("project_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["plugin_id"],
["plugin.id"],
),
sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# migrate current plugin configuration to it's own instance
for p in session.query(Plugin).all():
configuration = p.configuration if p.configuration else {}
session.add(
PluginInstance(
project_id=p.project_id,
plugin_id=p.id,
enabled=p.enabled,
configuration=configuration,
)
)
session.flush()
op.drop_constraint("plugin_project_id_fkey", "plugin", type_="foreignkey")
op.drop_column("plugin", "enabled")
op.drop_column("plugin", "project_id")
op.drop_column("plugin", "configuration")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
bind = op.get_bind()
session = Session(bind=bind)
op.add_column(
"plugin",
sa.Column(
"configuration",
postgresql.JSON(astext_type=sa.Text()),
autoincrement=False,
nullable=True,
),
)
op.add_column(
"plugin", sa.Column("project_id", sa.INTEGER(), autoincrement=False, nullable=True)
)
op.add_column("plugin", sa.Column("enabled", sa.BOOLEAN(), autoincrement=False, nullable=True))
op.create_foreign_key(
"plugin_project_id_fkey", "plugin", "project", ["project_id"], ["id"], ondelete="CASCADE"
)
for i in session.query(PluginInstance).all():
plugin = session.query(Plugin).filter(Plugin.id == i.plugin_id).one()
plugin.enabled = i.enabled
plugin.configuration = i.configuration
plugin.project_id = i.project_id
session.add(plugin)
session.flush()
op.drop_table("plugin_instance")
# ### end Alembic commands ###
| <filename>src/dispatch/alembic/versions/fb5639709294_.py<gh_stars>1-10
"""Migrates a plugin's configuration to a
per project instance.
Revision ID: fb5639709294
Revises: <PASSWORD>
Create Date: 2021-04-16 11:35:15.473228
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
from sqlalchemy.orm import Session
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# revision identifiers, used by Alembic.
revision = "fb5639709294"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
class Project(Base):
__tablename__ = "project"
id = sa.Column(sa.Integer, primary_key=True)
class Plugin(Base):
__tablename__ = "plugin"
id = sa.Column(sa.Integer, primary_key=True)
enabled = sa.Column(sa.Boolean)
configuration = sa.Column(sqlalchemy_utils.types.json.JSONType())
project_id = sa.Column(sa.Integer, sa.ForeignKey("project.id"))
class PluginInstance(Base):
__tablename__ = "plugin_instance"
id = sa.Column(sa.Integer, primary_key=True)
enabled = sa.Column(sa.Boolean)
configuration = sa.Column(sqlalchemy_utils.types.json.JSONType())
plugin_id = sa.Column(sa.Integer, sa.ForeignKey("plugin.id"))
project_id = sa.Column(sa.Integer, sa.ForeignKey("project.id"))
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
bind = op.get_bind()
session = Session(bind=bind)
op.create_table(
"plugin_instance",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.Column("configuration", sqlalchemy_utils.types.json.JSONType(), nullable=True),
sa.Column("plugin_id", sa.Integer(), nullable=True),
sa.Column("project_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["plugin_id"],
["plugin.id"],
),
sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# migrate current plugin configuration to it's own instance
for p in session.query(Plugin).all():
configuration = p.configuration if p.configuration else {}
session.add(
PluginInstance(
project_id=p.project_id,
plugin_id=p.id,
enabled=p.enabled,
configuration=configuration,
)
)
session.flush()
op.drop_constraint("plugin_project_id_fkey", "plugin", type_="foreignkey")
op.drop_column("plugin", "enabled")
op.drop_column("plugin", "project_id")
op.drop_column("plugin", "configuration")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
bind = op.get_bind()
session = Session(bind=bind)
op.add_column(
"plugin",
sa.Column(
"configuration",
postgresql.JSON(astext_type=sa.Text()),
autoincrement=False,
nullable=True,
),
)
op.add_column(
"plugin", sa.Column("project_id", sa.INTEGER(), autoincrement=False, nullable=True)
)
op.add_column("plugin", sa.Column("enabled", sa.BOOLEAN(), autoincrement=False, nullable=True))
op.create_foreign_key(
"plugin_project_id_fkey", "plugin", "project", ["project_id"], ["id"], ondelete="CASCADE"
)
for i in session.query(PluginInstance).all():
plugin = session.query(Plugin).filter(Plugin.id == i.plugin_id).one()
plugin.enabled = i.enabled
plugin.configuration = i.configuration
plugin.project_id = i.project_id
session.add(plugin)
session.flush()
op.drop_table("plugin_instance")
# ### end Alembic commands ###
| en | 0.631934 | Migrates a plugin's configuration to a per project instance. Revision ID: fb5639709294 Revises: <PASSWORD> Create Date: 2021-04-16 11:35:15.473228 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # migrate current plugin configuration to it's own instance # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.601927 | 2 |
function.py | dgg32/gcp-delivery-lite | 2 | 6614857 | <gh_stars>1-10
import os
import itertools
import json
import requests
import numpy as np
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
KEY = os.environ.get('KEY')
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
SENDER = os.environ.get('SENDER')
def driving_time_and_distance(ori, dest):
"""get the dict of distance between two places
Args:
ori (str): Place A
dest (str): Place B
Returns:
dict: return a dict of distance description
"""
url = f"https://maps.googleapis.com/maps/api/distancematrix/json?key={KEY}&origins={ori}&destinations={dest}&mode=driving&language=en-EN&sensor=false"
result= json.loads(requests.get(url).text)
return {"distance_value": result["rows"][0]["elements"][0]["distance"]["value"], "distance_text": result["rows"][0]["elements"][0]["distance"]["text"], "duration_text": result["rows"][0]["elements"][0]["duration"]["text"], "duration_value": result["rows"][0]["elements"][0]["duration"]["value"]}
def distance_matrix_gcp(destinations):
"""get the pairwise distance matrix with gcp
Args:
df (pd.Dataframe): a dataframe with a column "address"
Returns:
dict: return a dict of distance description
"""
indice = range(len(destinations))
dis_max = np.zeros(shape=(len(destinations),len(destinations)))
for pair in itertools.combinations(indice, 2):
dis = driving_time_and_distance(destinations[pair[0]]["address"], destinations[pair[1]]["address"])['distance_value']
dis_max[pair[0]][pair[1]] = dis
dis_max[pair[1]][pair[0]] = dis
return {"distance_matrix": dis_max.tolist()}
def send_email (dest_mail, subject, text):
"""Send email to carriers
Args:
dest_mail (str): target email
subject (str): email subject
text (str): email content
Returns:
"""
message = Mail(
from_email=SENDER,
to_emails=dest_mail,
subject=subject,
html_content=text)
try:
sg = SendGridAPIClient(f"{SENDGRID_API_KEY}")
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message) | import os
import itertools
import json
import requests
import numpy as np
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
KEY = os.environ.get('KEY')
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
SENDER = os.environ.get('SENDER')
def driving_time_and_distance(ori, dest):
"""get the dict of distance between two places
Args:
ori (str): Place A
dest (str): Place B
Returns:
dict: return a dict of distance description
"""
url = f"https://maps.googleapis.com/maps/api/distancematrix/json?key={KEY}&origins={ori}&destinations={dest}&mode=driving&language=en-EN&sensor=false"
result= json.loads(requests.get(url).text)
return {"distance_value": result["rows"][0]["elements"][0]["distance"]["value"], "distance_text": result["rows"][0]["elements"][0]["distance"]["text"], "duration_text": result["rows"][0]["elements"][0]["duration"]["text"], "duration_value": result["rows"][0]["elements"][0]["duration"]["value"]}
def distance_matrix_gcp(destinations):
"""get the pairwise distance matrix with gcp
Args:
df (pd.Dataframe): a dataframe with a column "address"
Returns:
dict: return a dict of distance description
"""
indice = range(len(destinations))
dis_max = np.zeros(shape=(len(destinations),len(destinations)))
for pair in itertools.combinations(indice, 2):
dis = driving_time_and_distance(destinations[pair[0]]["address"], destinations[pair[1]]["address"])['distance_value']
dis_max[pair[0]][pair[1]] = dis
dis_max[pair[1]][pair[0]] = dis
return {"distance_matrix": dis_max.tolist()}
def send_email (dest_mail, subject, text):
"""Send email to carriers
Args:
dest_mail (str): target email
subject (str): email subject
text (str): email content
Returns:
"""
message = Mail(
from_email=SENDER,
to_emails=dest_mail,
subject=subject,
html_content=text)
try:
sg = SendGridAPIClient(f"{SENDGRID_API_KEY}")
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message) | en | 0.631704 | get the dict of distance between two places Args: ori (str): Place A dest (str): Place B Returns: dict: return a dict of distance description get the pairwise distance matrix with gcp Args: df (pd.Dataframe): a dataframe with a column "address" Returns: dict: return a dict of distance description Send email to carriers Args: dest_mail (str): target email subject (str): email subject text (str): email content Returns: | 2.800708 | 3 |
birthday_probabilities/birthday_probabilities.py | jShiohaha/math-simulations | 1 | 6614858 | <reponame>jShiohaha/math-simulations<filename>birthday_probabilities/birthday_probabilities.py
import sys
import math
import decimal
'''
Problem Description: Among n people, let A be the event that at least two share a birthday.
Then, the complementary event, A_not, is the probability that no two
share a birthday.
The main logic that does the computation here is found in main() and the auxiliary helper
functions are found directly below this comment.
'''
def stirlings_factorial(n):
# math.sqrt(2 * math.pi) * (n ** (n + 0.5)) * (math.e ** (-1 * n))
return math.sqrt(2 * math.pi) * (n ** (n + 0.5)) * (math.e ** (-1 * n))
def modified_factorial(n, k):
'''
instead of computing n! / (n-k)! everytime, we can just compute
the value of the wholistic value as n * (n-1) * ... * (n-k+1)
'''
result, i = 1, 0
inverse_k = n-k
while i < inverse_k:
result *= (n-i)
i += 1
return result
def exp_by_squaring(x, n):
'''
The math.pow() function could not handle the size of exponentiation that
was being done in this problem, so here is a recursive solution to implement
exponentiation by squaring.
'''
if n == 0:
return 1
elif n == 1:
return x
elif n % 2 == 0:
return exp_by_squaring(x * x, n / 2)
else:
return x * exp_by_squaring(x * x, (n - 1) / 2)
def samp_no_replacement(n, set_size, set_factorial):
temp = decimal.Decimal(set_factorial) / decimal.Decimal(math.factorial(set_size-n))
return temp / decimal.Decimal(exp_by_squaring(set_size, n))
def main():
# set precision of decimals of probabilities to be calculated
decimal.getcontext().prec = 10
i = 2
days_in_year = 365
days_factorial = math.factorial(days_in_year)
file = open("birthdays.txt", "w")
while i <= days_in_year:
result = samp_no_replacement(i, days_in_year, days_factorial)
inverse_result = 1-result
file.write(str(i) + "," + str(result) + "," + str(inverse_result) + "\n")
i += 1
file.close()
if __name__ == "__main__":
main() | import sys
import math
import decimal
'''
Problem Description: Among n people, let A be the event that at least two share a birthday.
Then, the complementary event, A_not, is the probability that no two
share a birthday.
The main logic that does the computation here is found in main() and the auxiliary helper
functions are found directly below this comment.
'''
def stirlings_factorial(n):
# math.sqrt(2 * math.pi) * (n ** (n + 0.5)) * (math.e ** (-1 * n))
return math.sqrt(2 * math.pi) * (n ** (n + 0.5)) * (math.e ** (-1 * n))
def modified_factorial(n, k):
'''
instead of computing n! / (n-k)! everytime, we can just compute
the value of the wholistic value as n * (n-1) * ... * (n-k+1)
'''
result, i = 1, 0
inverse_k = n-k
while i < inverse_k:
result *= (n-i)
i += 1
return result
def exp_by_squaring(x, n):
'''
The math.pow() function could not handle the size of exponentiation that
was being done in this problem, so here is a recursive solution to implement
exponentiation by squaring.
'''
if n == 0:
return 1
elif n == 1:
return x
elif n % 2 == 0:
return exp_by_squaring(x * x, n / 2)
else:
return x * exp_by_squaring(x * x, (n - 1) / 2)
def samp_no_replacement(n, set_size, set_factorial):
temp = decimal.Decimal(set_factorial) / decimal.Decimal(math.factorial(set_size-n))
return temp / decimal.Decimal(exp_by_squaring(set_size, n))
def main():
# set precision of decimals of probabilities to be calculated
decimal.getcontext().prec = 10
i = 2
days_in_year = 365
days_factorial = math.factorial(days_in_year)
file = open("birthdays.txt", "w")
while i <= days_in_year:
result = samp_no_replacement(i, days_in_year, days_factorial)
inverse_result = 1-result
file.write(str(i) + "," + str(result) + "," + str(inverse_result) + "\n")
i += 1
file.close()
if __name__ == "__main__":
main() | en | 0.909247 | Problem Description: Among n people, let A be the event that at least two share a birthday. Then, the complementary event, A_not, is the probability that no two share a birthday. The main logic that does the computation here is found in main() and the auxiliary helper functions are found directly below this comment. # math.sqrt(2 * math.pi) * (n ** (n + 0.5)) * (math.e ** (-1 * n)) instead of computing n! / (n-k)! everytime, we can just compute the value of the wholistic value as n * (n-1) * ... * (n-k+1) The math.pow() function could not handle the size of exponentiation that was being done in this problem, so here is a recursive solution to implement exponentiation by squaring. # set precision of decimals of probabilities to be calculated | 3.783463 | 4 |
pose/data/mpii/loadheadsize.py | princeton-vl/uniloss | 8 | 6614859 | <filename>pose/data/mpii/loadheadsize.py
import numpy as np
import json
import scipy.io as sio
import torch
with open('mpii_annotations.json') as f:
data = json.load(f)
anno = sio.loadmat('mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat')
annolist = anno['RELEASE'][0][0][0]
headsize = np.ndarray(shape=(25204,4))
for i in range(len(data)):
i1 = int(data[i]['annolist_index']-1)
i2 = int(data[i]['people_index']-1)
print (i,i1,i2)
x1 = annolist[0,i1][1][0][i2][0]
y1 = annolist[0,i1][1][0][i2][1]
x2 = annolist[0,i1][1][0][i2][2]
y2 = annolist[0,i1][1][0][i2][3]
headsize[i] = (x1,y1,x2,y2)
torch.save(headsize, 'headsize.bin')
| <filename>pose/data/mpii/loadheadsize.py
import numpy as np
import json
import scipy.io as sio
import torch
with open('mpii_annotations.json') as f:
data = json.load(f)
anno = sio.loadmat('mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat')
annolist = anno['RELEASE'][0][0][0]
headsize = np.ndarray(shape=(25204,4))
for i in range(len(data)):
i1 = int(data[i]['annolist_index']-1)
i2 = int(data[i]['people_index']-1)
print (i,i1,i2)
x1 = annolist[0,i1][1][0][i2][0]
y1 = annolist[0,i1][1][0][i2][1]
x2 = annolist[0,i1][1][0][i2][2]
y2 = annolist[0,i1][1][0][i2][3]
headsize[i] = (x1,y1,x2,y2)
torch.save(headsize, 'headsize.bin')
| none | 1 | 2.069159 | 2 | |
Vigenere/vigenere.py | PeterHall16/Cryptography | 0 | 6614860 | <reponame>PeterHall16/Cryptography
# Define variables and certain arrays
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
counter = 0
numericKey = []
extendedKey = []
numericPhrase = []
def crypt():
# Define local variables
counter = 0
final = [int(numericPhrase[i]) + int(extendedKey[i]) for i in range(len(numericPhrase))]
# Select to encrypt or decrypt
toggle = str(input("Encrypt(1) or decrypt(2) the phrase? "))
if (toggle == '1'):
# Add lists
print(final)
# Convert back to letters
finalLetters = []
while (counter < len(phrase)):
finalLetters.append(alphabet[final[counter]])
counter = counter + 1
print("Encrypted: " + ''.join(finalLetters))
quit()
if (toggle == '2'):
decryptFinal = [int(numericPhrase[i]) - int(extendedKey[i]) for i in range(len(numericPhrase))]
print(decryptFinal)
finalDecryptLetters = []
while (counter < len(phrase)):
finalDecryptLetters.append(alphabet[decryptFinal[counter]])
counter = counter + 1
print("Decrypted: " + ''.join(finalDecryptLetters))
quit()
else:
print("Please enter a valid response.")
crypt()
# Get key
key = str(input("enter key (letters only, no spaces): ")).upper()
key = list(key)
while (counter < 26):
numericKey.append(alphabet.index(key[counter]))
counter = counter + 1
if (len(numericKey) == len(key)):
counter = 0
break
print("Numeric Key:" + numericKey)
# Get phrase to encrypt
phrase = list(str(input("enter phrase to encrypt/decrypt: ")).upper())
while (counter < 26):
numericPhrase.append(alphabet.index(phrase[counter]))
print(numericPhrase)
counter = counter + 1
if (len(numericPhrase) == len(phrase)):
counter = 0
break
while (len(extendedKey) < len(phrase)):
extendedKey.append(str(numericKey[counter]))
print(extendedKey)
print(counter)
counter = counter + 1
if (counter >= len(key)):
counter = 0
crypt()
| # Define variables and certain arrays
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
counter = 0
numericKey = []
extendedKey = []
numericPhrase = []
def crypt():
# Define local variables
counter = 0
final = [int(numericPhrase[i]) + int(extendedKey[i]) for i in range(len(numericPhrase))]
# Select to encrypt or decrypt
toggle = str(input("Encrypt(1) or decrypt(2) the phrase? "))
if (toggle == '1'):
# Add lists
print(final)
# Convert back to letters
finalLetters = []
while (counter < len(phrase)):
finalLetters.append(alphabet[final[counter]])
counter = counter + 1
print("Encrypted: " + ''.join(finalLetters))
quit()
if (toggle == '2'):
decryptFinal = [int(numericPhrase[i]) - int(extendedKey[i]) for i in range(len(numericPhrase))]
print(decryptFinal)
finalDecryptLetters = []
while (counter < len(phrase)):
finalDecryptLetters.append(alphabet[decryptFinal[counter]])
counter = counter + 1
print("Decrypted: " + ''.join(finalDecryptLetters))
quit()
else:
print("Please enter a valid response.")
crypt()
# Get key
key = str(input("enter key (letters only, no spaces): ")).upper()
key = list(key)
while (counter < 26):
numericKey.append(alphabet.index(key[counter]))
counter = counter + 1
if (len(numericKey) == len(key)):
counter = 0
break
print("Numeric Key:" + numericKey)
# Get phrase to encrypt
phrase = list(str(input("enter phrase to encrypt/decrypt: ")).upper())
while (counter < 26):
numericPhrase.append(alphabet.index(phrase[counter]))
print(numericPhrase)
counter = counter + 1
if (len(numericPhrase) == len(phrase)):
counter = 0
break
while (len(extendedKey) < len(phrase)):
extendedKey.append(str(numericKey[counter]))
print(extendedKey)
print(counter)
counter = counter + 1
if (counter >= len(key)):
counter = 0
crypt() | en | 0.591033 | # Define variables and certain arrays # Define local variables # Select to encrypt or decrypt # Add lists # Convert back to letters # Get key # Get phrase to encrypt | 3.740007 | 4 |
tests/unit/flow/test_flow_visualization.py | lsgrep/jina | 1 | 6614861 | from pathlib import Path
from jina.flow import Flow
cur_dir = Path(__file__).parent
def test_visualization_with_yml_file_img(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow.load_config(str(cur_dir.parent / 'yaml' / 'test_flow_visualization.yml')).plot(
output=str(output_file))
assert output_file.exists()
def test_visualization_with_yml_file_jpg(tmpdir):
output_file = Path(tmpdir) / 'flow.jpg'
Flow.load_config(str(cur_dir.parent / 'yaml' / 'test_flow_visualization.yml')).plot(
output=str(output_file))
assert output_file.exists()
def test_visualization_with_yml_file_jpg_lr(tmpdir):
output_file = Path(tmpdir) / 'flow-hor.jpg'
Flow.load_config(str(cur_dir.parent / 'yaml' / 'test_flow_visualization.yml')).plot(
output=str(output_file),
vertical_layout=False)
assert output_file.exists()
def test_visualization_plot_twice(tmpdir):
output_file1 = Path(tmpdir) / 'flow1.svg'
output_file2 = Path(tmpdir) / 'flow2.svg'
(Flow().add(name='pod_a')
.plot(output=str(output_file1))
.add(name='pod_b', needs='gateway')
.join(needs=['pod_a', 'pod_b']).plot(output=str(output_file2)))
assert output_file1.exists()
assert output_file2.exists()
def test_visualization_plot_in_middle(tmpdir):
output_file = Path(tmpdir) / 'flow3.svg'
(Flow().add(name='pod_a')
.plot(output=str(output_file))
.add(name='pod_b', needs='gateway')
.join(needs=['pod_a', 'pod_b']))
assert output_file.exists()
def test_flow_before_after_plot(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow().add(uses_before='_pass', uses_after='_pass', name='p1').plot(str(output_file))
assert output_file.exists()
def test_flow_before_plot(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow().add(uses_before='_pass', name='p1').plot(str(output_file))
assert output_file.exists()
def test_flow_after_plot(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow().add(uses_after='_pass', name='p1').plot(str(output_file))
assert output_file.exists()
| from pathlib import Path
from jina.flow import Flow
cur_dir = Path(__file__).parent
def test_visualization_with_yml_file_img(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow.load_config(str(cur_dir.parent / 'yaml' / 'test_flow_visualization.yml')).plot(
output=str(output_file))
assert output_file.exists()
def test_visualization_with_yml_file_jpg(tmpdir):
output_file = Path(tmpdir) / 'flow.jpg'
Flow.load_config(str(cur_dir.parent / 'yaml' / 'test_flow_visualization.yml')).plot(
output=str(output_file))
assert output_file.exists()
def test_visualization_with_yml_file_jpg_lr(tmpdir):
output_file = Path(tmpdir) / 'flow-hor.jpg'
Flow.load_config(str(cur_dir.parent / 'yaml' / 'test_flow_visualization.yml')).plot(
output=str(output_file),
vertical_layout=False)
assert output_file.exists()
def test_visualization_plot_twice(tmpdir):
output_file1 = Path(tmpdir) / 'flow1.svg'
output_file2 = Path(tmpdir) / 'flow2.svg'
(Flow().add(name='pod_a')
.plot(output=str(output_file1))
.add(name='pod_b', needs='gateway')
.join(needs=['pod_a', 'pod_b']).plot(output=str(output_file2)))
assert output_file1.exists()
assert output_file2.exists()
def test_visualization_plot_in_middle(tmpdir):
output_file = Path(tmpdir) / 'flow3.svg'
(Flow().add(name='pod_a')
.plot(output=str(output_file))
.add(name='pod_b', needs='gateway')
.join(needs=['pod_a', 'pod_b']))
assert output_file.exists()
def test_flow_before_after_plot(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow().add(uses_before='_pass', uses_after='_pass', name='p1').plot(str(output_file))
assert output_file.exists()
def test_flow_before_plot(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow().add(uses_before='_pass', name='p1').plot(str(output_file))
assert output_file.exists()
def test_flow_after_plot(tmpdir):
output_file = Path(tmpdir) / 'flow.svg'
Flow().add(uses_after='_pass', name='p1').plot(str(output_file))
assert output_file.exists()
| none | 1 | 2.40793 | 2 | |
uri/beginner/1037.py | alicefrancener/problem-solving | 0 | 6614862 | <reponame>alicefrancener/problem-solving
n = float(input())
if (n < 0 or n > 100):
print('Fora de intervalo')
else:
for i in range(25,101,25):
if n <= i:
if (i == 25):
print('Intervalo [{},{}]'.format(i-25, i))
else:
print('Intervalo ({},{}]'.format(i-25, i))
break
| n = float(input())
if (n < 0 or n > 100):
print('Fora de intervalo')
else:
for i in range(25,101,25):
if n <= i:
if (i == 25):
print('Intervalo [{},{}]'.format(i-25, i))
else:
print('Intervalo ({},{}]'.format(i-25, i))
break | none | 1 | 3.709409 | 4 | |
Desafio52.py | sergioboff/Desafios-Curso-em-Video | 0 | 6614863 | <filename>Desafio52.py
#leitura de um inteiro verificação se primo
n = int(input('Digite um valor inteiro: '))
x = 0
for i in range(1, n+1):
if n % i == 0:
print ('\033[34m', end='')
x += 1
else:
print('\033[31m', end='')
print ('{} '.format(i), end='')
print('\n\033[m')
if x == 2:
print(' O número {} foi divisivel {}x, portanto ele é Primo'.format(n, x))
else:
print('O número {} foi divisivel {}x, portanto ele não é Primo'.format(n, x))
| <filename>Desafio52.py
#leitura de um inteiro verificação se primo
n = int(input('Digite um valor inteiro: '))
x = 0
for i in range(1, n+1):
if n % i == 0:
print ('\033[34m', end='')
x += 1
else:
print('\033[31m', end='')
print ('{} '.format(i), end='')
print('\n\033[m')
if x == 2:
print(' O número {} foi divisivel {}x, portanto ele é Primo'.format(n, x))
else:
print('O número {} foi divisivel {}x, portanto ele não é Primo'.format(n, x))
| pt | 0.999103 | #leitura de um inteiro verificação se primo | 3.996178 | 4 |
opencv/pysource/09.hsv_color.py | hainguyenvan/images-processing | 0 | 6614864 | <gh_stars>0
import cv2
import numpy as np
def notthing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("trackbars")
cv2.createTrackbar("L-H", "trackbars", 0, 179, notthing)
cv2.createTrackbar("L-S", "trackbars", 0, 255, notthing)
cv2.createTrackbar("L-V", "trackbars", 0, 255, notthing)
cv2.createTrackbar("U-H", "trackbars", 179, 179, notthing)
cv2.createTrackbar("U-S", "trackbars", 255, 255, notthing)
cv2.createTrackbar("U-V", "trackbars", 255, 255, notthing)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L-H", "trackbars")
l_s = cv2.getTrackbarPos("L-S", "trackbars")
l_v = cv2.getTrackbarPos("L-V", "trackbars")
u_h = cv2.getTrackbarPos("U-H", "trackbars")
u_s = cv2.getTrackbarPos("U-S", "trackbars")
u_v = cv2.getTrackbarPos("U-V", "trackbars")
lower_blue = np.array([l_h, l_s, l_v])
upper_blue = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| import cv2
import numpy as np
def notthing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("trackbars")
cv2.createTrackbar("L-H", "trackbars", 0, 179, notthing)
cv2.createTrackbar("L-S", "trackbars", 0, 255, notthing)
cv2.createTrackbar("L-V", "trackbars", 0, 255, notthing)
cv2.createTrackbar("U-H", "trackbars", 179, 179, notthing)
cv2.createTrackbar("U-S", "trackbars", 255, 255, notthing)
cv2.createTrackbar("U-V", "trackbars", 255, 255, notthing)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L-H", "trackbars")
l_s = cv2.getTrackbarPos("L-S", "trackbars")
l_v = cv2.getTrackbarPos("L-V", "trackbars")
u_h = cv2.getTrackbarPos("U-H", "trackbars")
u_s = cv2.getTrackbarPos("U-S", "trackbars")
u_v = cv2.getTrackbarPos("U-V", "trackbars")
lower_blue = np.array([l_h, l_s, l_v])
upper_blue = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows() | none | 1 | 2.827879 | 3 | |
src/lib/p4/github.py | greenpeace/planet4-circleci | 1 | 6614865 | import json
import os
import re
import requests
from p4.apis import api_query
GITHUB_API = 'https://api.github.com'
def _get_headers():
oauth_key = os.getenv('GITHUB_OAUTH_TOKEN')
return {
'Authorization': 'token {0}'.format(oauth_key),
'Accept': 'application/vnd.github.v3+json'
}
def get_repo_endpoints(pr_url):
"""
Creates API endpoint for a give PR url
"""
regex = re.compile('https://github.com/(.*)/pull/([0-9]{1,6})')
matches = regex.match(pr_url)
repository = matches.group(1) or None
pr_number = matches.group(2) or None
if not repository or not pr_number:
raise Exception('PR id could not be parsed.')
pr_endpoint = '{0}/repos/{1}/issues/{2}'.format(
GITHUB_API,
repository,
pr_number
)
comment_endpoint = '{0}/repos/{1}/issues/comments/'.format(
GITHUB_API,
repository
)
return pr_endpoint, comment_endpoint
def check_for_comment(pr_endpoint, title):
comments_endpoint = '{0}/comments'.format(pr_endpoint)
response = requests.get(comments_endpoint, headers=_get_headers())
for comment in response.json():
if comment['body'].splitlines()[0] == title:
return comment['id']
return False
def get_last_commit_date(repo):
"""
Return last commit date for a repo.
"""
commit = api_query(
GITHUB_API + '/repos/' + repo + '/commits/main',
{'Accept': 'application/vnd.github.v3+json'}
)
return commit['commit']['committer']['date']
def post_issue_comment(pr_endpoint, comment_endpoint, comment_id, body):
data = {
'body': body
}
comments_endpoint = '{0}/comments'.format(pr_endpoint)
if comment_id:
endpoint = '{0}{1}'.format(comment_endpoint, comment_id)
response = requests.patch(endpoint, headers=_get_headers(), data=json.dumps(data))
return response.json()
response = requests.post(comments_endpoint, headers=_get_headers(), data=json.dumps(data))
return response.json()
def add_issue_label(pr_endpoint, label_name):
data = {
'labels': [label_name]
}
labels_endpoint = '{0}/labels'.format(pr_endpoint)
response = requests.post(labels_endpoint, headers=_get_headers(), data=json.dumps(data))
return response.json()
def get_pr_test_instance(pr_endpoint, prefix='[Test Env] '):
response = requests.get(pr_endpoint, headers=_get_headers())
labels = response.json()['labels']
for label in labels:
if label['name'].startswith(prefix):
return label['name'][len(prefix):]
return False
def has_open_pr_labeled_with_instance(name):
BLOCKS_ENDPOINT = ('{0}/repos/greenpeace/planet4-plugin-gutenberg-blocks/'
'issues?state=open&labels=[Test Env] ').format(GITHUB_API)
THEME_ENDPOINT = ('{0}/repos/greenpeace/planet4-master-theme/'
'issues?state=open&labels=[Test Env] ').format(GITHUB_API)
blocks_prs = api_query('{0}{1}'.format(BLOCKS_ENDPOINT, name), _get_headers())
if len(blocks_prs) > 0:
return True
theme_prs = api_query(THEME_ENDPOINT.format(name), _get_headers())
return len(theme_prs) > 0
| import json
import os
import re
import requests
from p4.apis import api_query
GITHUB_API = 'https://api.github.com'
def _get_headers():
oauth_key = os.getenv('GITHUB_OAUTH_TOKEN')
return {
'Authorization': 'token {0}'.format(oauth_key),
'Accept': 'application/vnd.github.v3+json'
}
def get_repo_endpoints(pr_url):
"""
Creates API endpoint for a give PR url
"""
regex = re.compile('https://github.com/(.*)/pull/([0-9]{1,6})')
matches = regex.match(pr_url)
repository = matches.group(1) or None
pr_number = matches.group(2) or None
if not repository or not pr_number:
raise Exception('PR id could not be parsed.')
pr_endpoint = '{0}/repos/{1}/issues/{2}'.format(
GITHUB_API,
repository,
pr_number
)
comment_endpoint = '{0}/repos/{1}/issues/comments/'.format(
GITHUB_API,
repository
)
return pr_endpoint, comment_endpoint
def check_for_comment(pr_endpoint, title):
comments_endpoint = '{0}/comments'.format(pr_endpoint)
response = requests.get(comments_endpoint, headers=_get_headers())
for comment in response.json():
if comment['body'].splitlines()[0] == title:
return comment['id']
return False
def get_last_commit_date(repo):
"""
Return last commit date for a repo.
"""
commit = api_query(
GITHUB_API + '/repos/' + repo + '/commits/main',
{'Accept': 'application/vnd.github.v3+json'}
)
return commit['commit']['committer']['date']
def post_issue_comment(pr_endpoint, comment_endpoint, comment_id, body):
data = {
'body': body
}
comments_endpoint = '{0}/comments'.format(pr_endpoint)
if comment_id:
endpoint = '{0}{1}'.format(comment_endpoint, comment_id)
response = requests.patch(endpoint, headers=_get_headers(), data=json.dumps(data))
return response.json()
response = requests.post(comments_endpoint, headers=_get_headers(), data=json.dumps(data))
return response.json()
def add_issue_label(pr_endpoint, label_name):
data = {
'labels': [label_name]
}
labels_endpoint = '{0}/labels'.format(pr_endpoint)
response = requests.post(labels_endpoint, headers=_get_headers(), data=json.dumps(data))
return response.json()
def get_pr_test_instance(pr_endpoint, prefix='[Test Env] '):
response = requests.get(pr_endpoint, headers=_get_headers())
labels = response.json()['labels']
for label in labels:
if label['name'].startswith(prefix):
return label['name'][len(prefix):]
return False
def has_open_pr_labeled_with_instance(name):
BLOCKS_ENDPOINT = ('{0}/repos/greenpeace/planet4-plugin-gutenberg-blocks/'
'issues?state=open&labels=[Test Env] ').format(GITHUB_API)
THEME_ENDPOINT = ('{0}/repos/greenpeace/planet4-master-theme/'
'issues?state=open&labels=[Test Env] ').format(GITHUB_API)
blocks_prs = api_query('{0}{1}'.format(BLOCKS_ENDPOINT, name), _get_headers())
if len(blocks_prs) > 0:
return True
theme_prs = api_query(THEME_ENDPOINT.format(name), _get_headers())
return len(theme_prs) > 0
| en | 0.589478 | Creates API endpoint for a give PR url Return last commit date for a repo. | 2.793473 | 3 |
model/text_expert.py | WILeroy/MMSimilarity | 0 | 6614866 | import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModel
class TextExpert(nn.Module):
def __init__(self, conf, reducer=None):
super().__init__()
self.model_id = conf['model_id']
self.num_tokens = conf['max_length']
self.model = AutoModel.from_pretrained(self.model_id)
self.reducer = reducer
def forward(self, data, mask):
"""
args:
data: [b, max_length], tokens created by tokenizer
mask: [b, max_length]
"""
model_output = self.model(input_ids=data, attention_mask=mask)
token_features = model_output[0] #First element of model_output contains all token embeddings
if self.reducer is not None:
token_features = self.reducer(token_features)
outputs = collections.OrderedDict()
outputs['pooled_feature'] = F.normalize(self.mean_pooling(token_features, mask), p=2, dim=1)
outputs['token_features'] = F.normalize(token_features, p=2, dim=1)
outputs['attention_mask'] = mask
return outputs
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def logging(self, logger):
logger.info('TextExpert model_id: {}'.format(self.model_id))
logger.info('TextExpert num_tokens: {}'.format(self.num_tokens))
| import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModel
class TextExpert(nn.Module):
def __init__(self, conf, reducer=None):
super().__init__()
self.model_id = conf['model_id']
self.num_tokens = conf['max_length']
self.model = AutoModel.from_pretrained(self.model_id)
self.reducer = reducer
def forward(self, data, mask):
"""
args:
data: [b, max_length], tokens created by tokenizer
mask: [b, max_length]
"""
model_output = self.model(input_ids=data, attention_mask=mask)
token_features = model_output[0] #First element of model_output contains all token embeddings
if self.reducer is not None:
token_features = self.reducer(token_features)
outputs = collections.OrderedDict()
outputs['pooled_feature'] = F.normalize(self.mean_pooling(token_features, mask), p=2, dim=1)
outputs['token_features'] = F.normalize(token_features, p=2, dim=1)
outputs['attention_mask'] = mask
return outputs
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def logging(self, logger):
logger.info('TextExpert model_id: {}'.format(self.model_id))
logger.info('TextExpert num_tokens: {}'.format(self.num_tokens))
| en | 0.732255 | args: data: [b, max_length], tokens created by tokenizer mask: [b, max_length] #First element of model_output contains all token embeddings | 2.633506 | 3 |
2020/solutions/day_09.py | Noettore/AdventOfCode | 0 | 6614867 | """AOC 2020 Day 9"""
import pathlib
import time
import itertools
TEST_INPUT = """35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576"""
def read_input(input_path: str) -> str:
"""take input file path and return a str with the file's content"""
with open(input_path, 'r') as input_file:
input_data = input_file.read().strip()
return input_data
def extract(input_data: str) -> list:
"""take input data and return the appropriate data structure"""
entries = list()
for entry in input_data.split('\n'):
entries.append(int(entry))
return entries
def part1(entries: list, preamble_length: int) -> int:
"""part1 solver"""
for index, entry in enumerate(entries[preamble_length:]):
preamble = entries[index:index+preamble_length]
if entry not in [i+j for i, j in itertools.combinations(preamble, 2)]:
return entry
return None
def part2(entries: list, invalid_number: int) -> int:
"""part2 solver"""
left, right = 0, 1
interval_sum = entries[left] + entries[right]
while True:
if interval_sum < invalid_number:
right += 1
interval_sum += entries[right]
elif interval_sum > invalid_number:
interval_sum -= entries[left]
left += 1
else:
numbers = sorted(entries[left:right+1])
return numbers[0] + numbers[-1]
def test_input_day_9():
"""pytest testing function"""
entries = extract(TEST_INPUT)
assert part1(entries, 5) == 127
assert part2(entries, 127) == 62
def test_bench_day_9(benchmark):
"""pytest-benchmark function"""
benchmark(main)
def main():
"""main function"""
input_path = str(pathlib.Path(__file__).resolve().parent.parent) + "/inputs/" + str(pathlib.Path(__file__).stem)
start_time = time.time()
input_data = read_input(input_path)
entries = extract(input_data)
invalid_number = part1(entries, 25)
print("Part 1: %d" % invalid_number)
print("Part 2: %d" % part2(entries, invalid_number))
end_time = time.time()
print("Execution time: %f" % (end_time-start_time))
if __name__ == "__main__":
main()
| """AOC 2020 Day 9"""
import pathlib
import time
import itertools
TEST_INPUT = """35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576"""
def read_input(input_path: str) -> str:
"""take input file path and return a str with the file's content"""
with open(input_path, 'r') as input_file:
input_data = input_file.read().strip()
return input_data
def extract(input_data: str) -> list:
"""take input data and return the appropriate data structure"""
entries = list()
for entry in input_data.split('\n'):
entries.append(int(entry))
return entries
def part1(entries: list, preamble_length: int) -> int:
"""part1 solver"""
for index, entry in enumerate(entries[preamble_length:]):
preamble = entries[index:index+preamble_length]
if entry not in [i+j for i, j in itertools.combinations(preamble, 2)]:
return entry
return None
def part2(entries: list, invalid_number: int) -> int:
"""part2 solver"""
left, right = 0, 1
interval_sum = entries[left] + entries[right]
while True:
if interval_sum < invalid_number:
right += 1
interval_sum += entries[right]
elif interval_sum > invalid_number:
interval_sum -= entries[left]
left += 1
else:
numbers = sorted(entries[left:right+1])
return numbers[0] + numbers[-1]
def test_input_day_9():
"""pytest testing function"""
entries = extract(TEST_INPUT)
assert part1(entries, 5) == 127
assert part2(entries, 127) == 62
def test_bench_day_9(benchmark):
"""pytest-benchmark function"""
benchmark(main)
def main():
"""main function"""
input_path = str(pathlib.Path(__file__).resolve().parent.parent) + "/inputs/" + str(pathlib.Path(__file__).stem)
start_time = time.time()
input_data = read_input(input_path)
entries = extract(input_data)
invalid_number = part1(entries, 25)
print("Part 1: %d" % invalid_number)
print("Part 2: %d" % part2(entries, invalid_number))
end_time = time.time()
print("Execution time: %f" % (end_time-start_time))
if __name__ == "__main__":
main()
| en | 0.387081 | AOC 2020 Day 9 35 20 15 25 47 40 62 55 65 95 102 117 150 182 127 219 299 277 309 576 take input file path and return a str with the file's content take input data and return the appropriate data structure part1 solver part2 solver pytest testing function pytest-benchmark function main function | 3.151208 | 3 |
ubivar/test/resources/test_filter_rules_base.py | oriskami/oriskami-python | 4 | 6614868 | <gh_stars>1-10
import os
import ubivar
import warnings
from ubivar.test.helper import (UbivarTestCase)
DUMMY_RULES_BASE = {
"description": "a_rule_description",
"feature": "a_rule_feature",
"value": "1 Days",
"is_active": "false"
}
class UbivarAPIResourcesTests(UbivarTestCase):
def test_filter_rules_base_list(self):
response = ubivar.FilterRulesBase.list()
self.assertTrue(hasattr(response.data, "__iter__"))
self.assertEqual(response.object, "filter_rules_base")
def test_filter_rules_base_update(self):
response = ubivar.FilterRulesBase.list()
originalRulesBase = response.data[0]
response = ubivar.FilterRulesBase.update("0", **DUMMY_RULES_BASE)
filterRulesBase = response.data[0]
self.assertEqual(filterRulesBase["feature"] , originalRulesBase["feature"])
self.assertEqual(filterRulesBase["value"] , DUMMY_RULES_BASE["value"])
self.assertEqual(filterRulesBase["is_active"] , DUMMY_RULES_BASE["is_active"])
self.assertRaises(ubivar.error.APIError, ubivar.FilterRulesBase.update, "0", value="new value")
response = ubivar.FilterRulesBase.update("0", value="1 Months")
self.assertEqual(response.object, "filter_rules_base")
| import os
import ubivar
import warnings
from ubivar.test.helper import (UbivarTestCase)
DUMMY_RULES_BASE = {
"description": "a_rule_description",
"feature": "a_rule_feature",
"value": "1 Days",
"is_active": "false"
}
class UbivarAPIResourcesTests(UbivarTestCase):
def test_filter_rules_base_list(self):
response = ubivar.FilterRulesBase.list()
self.assertTrue(hasattr(response.data, "__iter__"))
self.assertEqual(response.object, "filter_rules_base")
def test_filter_rules_base_update(self):
response = ubivar.FilterRulesBase.list()
originalRulesBase = response.data[0]
response = ubivar.FilterRulesBase.update("0", **DUMMY_RULES_BASE)
filterRulesBase = response.data[0]
self.assertEqual(filterRulesBase["feature"] , originalRulesBase["feature"])
self.assertEqual(filterRulesBase["value"] , DUMMY_RULES_BASE["value"])
self.assertEqual(filterRulesBase["is_active"] , DUMMY_RULES_BASE["is_active"])
self.assertRaises(ubivar.error.APIError, ubivar.FilterRulesBase.update, "0", value="new value")
response = ubivar.FilterRulesBase.update("0", value="1 Months")
self.assertEqual(response.object, "filter_rules_base") | none | 1 | 2.588512 | 3 | |
CRM_project/accounts/migrations/0002_car.py | rzhvn1/Military_CRM | 0 | 6614869 | # Generated by Django 3.2 on 2021-04-28 13:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mark', models.CharField(max_length=100)),
('car_model', models.CharField(max_length=100)),
('year', models.DateField(auto_now_add=True)),
('number', models.CharField(max_length=100)),
('color', models.CharField(max_length=100)),
('type', models.CharField(choices=[('State', 'State'), ('Private', 'Private')], max_length=10)),
('dossier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.dossier')),
],
),
]
| # Generated by Django 3.2 on 2021-04-28 13:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mark', models.CharField(max_length=100)),
('car_model', models.CharField(max_length=100)),
('year', models.DateField(auto_now_add=True)),
('number', models.CharField(max_length=100)),
('color', models.CharField(max_length=100)),
('type', models.CharField(choices=[('State', 'State'), ('Private', 'Private')], max_length=10)),
('dossier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.dossier')),
],
),
]
| en | 0.837294 | # Generated by Django 3.2 on 2021-04-28 13:55 | 2.006752 | 2 |
Tkinterspain/ventana9.py | SweydAbdul/estudos-python | 0 | 6614870 | # Agenda II
from tkinter import *
from tkinter import messagebox
lista = []
def guardar():
n = nome.get()
ap = app.get()
am = apm.get()
c = coreio.get()
t = telefone.get()
lista.append(f'{n}${ap}${am}${t}${c}')
escrever()
messagebox.showinfo('Guardado', 'O contacto foi guardado na agenda.')
nome.set('')
app.set('')
apm.set('')
apm.set('')
coreio.set('')
telefone.set('')
consultar()
def eliminar():
eliminado = conteliminar.get()
removido = False
for elemento in lista:
arreglo = elemento.split('$')
if conteliminar.get() == arreglo[3]:
lista.remove(elemento)
removido = True
escrever()
consultar()
if removido:
messagebox.showinfo('Eliminar', f'Elemento eliminado {eliminado}')
def consultar():
r = Text(ventana, width=80, height=15)
lista.sort()
valores = []
r.insert(INSERT, 'Nome\tApelido P\tApelido M\t\tTelefone\t\tCorreio\n')
for elemento in lista:
arreglo = elemento.split('$')
valores.append(arreglo[3])
r.insert(INSERT, f'{arreglo[0]}\t{arreglo[1]}\t'
f' {arreglo[2]}\t\t{arreglo[3]}\t\t'
f'{arreglo[4]}\t\n')
r.place(x=20, y=230)
spinTelefone = Spinbox(ventana, value=(valores),
textvariable=conteliminar).place(x=450, y=50)
if lista == []:
spinTelefone = Spinbox(ventana, values=(valores),
textvariable=conteliminar).place(x=450, y=50)
r.config(state=DISABLED)
def iniciarArquivo():
arquivo = open('agenda.txt', 'a')
arquivo.close()
def carregar():
arquivo = open('agenda.txt', 'r')
linha = arquivo.readline()
if linha:
while linha:
if linha[-1] == '\n':
linha = linha[:-1]
lista.append(linha)
linha = arquivo.readline()
arquivo.close()
def escrever():
arquivo = open('agenda.txt', 'w')
lista.sort()
for elemento in lista:
arquivo.write(elemento + '\n')
arquivo.close()
ventana = Tk()
nome = StringVar()
app = StringVar()
apm = StringVar()
coreio = StringVar()
telefone = StringVar()
conteliminar = StringVar()
colorFundo = '#006'
colorLetra = "#fff"
ventana.title('Agenda com arquivos')
ventana.geometry('700x500')
ventana.configure(background=colorFundo)
etiquetaTitulo = Label(ventana, text='Agenda com arquivos',
bg=colorFundo, fg=colorLetra).place(x=270, y=10)
etiquetaN = Label(ventana, text='Nome', bg=colorFundo, fg=colorLetra
).place(x=50, y=50)
caixaN = Entry(ventana, textvariable=nome).place(x=150, y=50)
etiquetaApp = Label(ventana, text='Apelido Paterno',
bg=colorFundo, fg=colorLetra).place(x=50, y=80)
caixaApp = Entry(ventana, textvariable=app).place(x=150, y=80)
etiquetaApm = Label(ventana, text='Apelido Materno',
bg=colorFundo, fg=colorLetra).place(x=50, y=110)
caixaApm = Entry(ventana, textvariable=apm).place(x=150, y=110)
etiquetaT = Label(ventana, text='Telefone',
bg=colorFundo, fg=colorLetra).place(x=50, y=140)
caixaT = Entry(ventana, textvariable=telefone).place(x=150, y=140)
etiquetaC = Label(ventana, text='Correo', bg=colorFundo,
fg=colorLetra).place(x=50, y=170)
caixaC = Entry(ventana, textvariable=coreio).place(x=150, y=170)
etiquetaEliminar = Label(ventana, text='Telfone',
bg=colorFundo, fg=colorLetra).place(x=370, y=50)
spinTelefone = Spinbox(ventana, textvariable=conteliminar).place(x=450, y=50)
botaoGuardar = Button(ventana, text='Guardar', command=guardar,
bg='#009', fg='white').place(x=180, y=200)
botaoEliminar = Button(ventana, text='Eliminar', command=eliminar,
bg='#009', fg='white').place(x=490, y=80)
mainloop()
| # Agenda II
from tkinter import *
from tkinter import messagebox
lista = []
def guardar():
n = nome.get()
ap = app.get()
am = apm.get()
c = coreio.get()
t = telefone.get()
lista.append(f'{n}${ap}${am}${t}${c}')
escrever()
messagebox.showinfo('Guardado', 'O contacto foi guardado na agenda.')
nome.set('')
app.set('')
apm.set('')
apm.set('')
coreio.set('')
telefone.set('')
consultar()
def eliminar():
eliminado = conteliminar.get()
removido = False
for elemento in lista:
arreglo = elemento.split('$')
if conteliminar.get() == arreglo[3]:
lista.remove(elemento)
removido = True
escrever()
consultar()
if removido:
messagebox.showinfo('Eliminar', f'Elemento eliminado {eliminado}')
def consultar():
r = Text(ventana, width=80, height=15)
lista.sort()
valores = []
r.insert(INSERT, 'Nome\tApelido P\tApelido M\t\tTelefone\t\tCorreio\n')
for elemento in lista:
arreglo = elemento.split('$')
valores.append(arreglo[3])
r.insert(INSERT, f'{arreglo[0]}\t{arreglo[1]}\t'
f' {arreglo[2]}\t\t{arreglo[3]}\t\t'
f'{arreglo[4]}\t\n')
r.place(x=20, y=230)
spinTelefone = Spinbox(ventana, value=(valores),
textvariable=conteliminar).place(x=450, y=50)
if lista == []:
spinTelefone = Spinbox(ventana, values=(valores),
textvariable=conteliminar).place(x=450, y=50)
r.config(state=DISABLED)
def iniciarArquivo():
arquivo = open('agenda.txt', 'a')
arquivo.close()
def carregar():
arquivo = open('agenda.txt', 'r')
linha = arquivo.readline()
if linha:
while linha:
if linha[-1] == '\n':
linha = linha[:-1]
lista.append(linha)
linha = arquivo.readline()
arquivo.close()
def escrever():
arquivo = open('agenda.txt', 'w')
lista.sort()
for elemento in lista:
arquivo.write(elemento + '\n')
arquivo.close()
ventana = Tk()
nome = StringVar()
app = StringVar()
apm = StringVar()
coreio = StringVar()
telefone = StringVar()
conteliminar = StringVar()
colorFundo = '#006'
colorLetra = "#fff"
ventana.title('Agenda com arquivos')
ventana.geometry('700x500')
ventana.configure(background=colorFundo)
etiquetaTitulo = Label(ventana, text='Agenda com arquivos',
bg=colorFundo, fg=colorLetra).place(x=270, y=10)
etiquetaN = Label(ventana, text='Nome', bg=colorFundo, fg=colorLetra
).place(x=50, y=50)
caixaN = Entry(ventana, textvariable=nome).place(x=150, y=50)
etiquetaApp = Label(ventana, text='Apelido Paterno',
bg=colorFundo, fg=colorLetra).place(x=50, y=80)
caixaApp = Entry(ventana, textvariable=app).place(x=150, y=80)
etiquetaApm = Label(ventana, text='Apelido Materno',
bg=colorFundo, fg=colorLetra).place(x=50, y=110)
caixaApm = Entry(ventana, textvariable=apm).place(x=150, y=110)
etiquetaT = Label(ventana, text='Telefone',
bg=colorFundo, fg=colorLetra).place(x=50, y=140)
caixaT = Entry(ventana, textvariable=telefone).place(x=150, y=140)
etiquetaC = Label(ventana, text='Correo', bg=colorFundo,
fg=colorLetra).place(x=50, y=170)
caixaC = Entry(ventana, textvariable=coreio).place(x=150, y=170)
etiquetaEliminar = Label(ventana, text='Telfone',
bg=colorFundo, fg=colorLetra).place(x=370, y=50)
spinTelefone = Spinbox(ventana, textvariable=conteliminar).place(x=450, y=50)
botaoGuardar = Button(ventana, text='Guardar', command=guardar,
bg='#009', fg='white').place(x=180, y=200)
botaoEliminar = Button(ventana, text='Eliminar', command=eliminar,
bg='#009', fg='white').place(x=490, y=80)
mainloop()
| it | 0.319141 | # Agenda II | 3.021889 | 3 |
benchmarking/create_error_benchmarks.py | MartLugt/wastewater_analysis | 0 | 6614871 | <filename>benchmarking/create_error_benchmarks.py
#!/usr/bin/env python3
import sys
import os
import argparse
import subprocess
import pandas as pd
from math import floor, log10
from select_samples import filter_fasta, read_metadata
def main():
parser = argparse.ArgumentParser(description="Create wastewater benchmarks.")
parser.add_argument('-m, --metadata', dest='metadata', type=str, required=True, help="metadata tsv file for full sequence database")
parser.add_argument('-s, --state', dest='state', type=str, default="Connecticut", help="sample location")
parser.add_argument('-d, --date', dest='date', type=str, default="2021-02-11", help="sample date")
parser.add_argument('-fr, --fasta_ref', dest='fasta_ref', required=True, type=str, help="fasta file representing full sequence database")
parser.add_argument('-fv, --fasta_voc', dest='fasta_VOC', required=True, type=str, help="comma-separated list of fasta files for Variants Of Concern (VOC)")
parser.add_argument('-o, --outdir', dest='outdir', required=True, type=str, help="output directory")
parser.add_argument('--voc_perc', dest='voc_perc', required=True, type=str, help="comma-separated list of VOC frequencies (%) to be simulated")
parser.add_argument('--err_perc', dest='err_perc', required=True, type=str, help="comma-separated list of error frequencies (%) to be simulated")
parser.add_argument('--total_cov', dest='total_cov', default=10000, type=int, help="total sequencing depth to be simulated")
parser.add_argument('--data_exploration_only', action='store_true', help="exit after sequence selection")
parser.add_argument('--spike_only', action='store_true', help="simulate reads for spike region only")
# parser.add_argument('--sub_error_rate', dest='sub_error_rate', default=1.0, type=float, help="substitution error rate for art_illumina")
# parser.add_argument('--ins_error_rate', dest='ins_error_rate', default=1.0, type=float, help="insertion error rate for art_illumina")
# parser.add_argument('--del_error_rate', dest='del_error_rate', default=1.0, type=float, help="deletion error rate for art_illumina")
parser.add_argument('--sub_error', action='store_true', help="simulate substitution error. (default: keep error rate at 0)")
parser.add_argument('--ins_error', action='store_true', help="simulate insertion error. (default: keep error rate at 0)")
parser.add_argument('--del_error', action='store_true', help="simulate deletion error. (default: keep error rate at 0)")
args = parser.parse_args()
# create output directory
try:
os.makedirs(args.outdir)
except FileExistsError:
pass
VOC_frequencies = args.voc_perc.split(',')
err_frequencies = args.err_perc.split(',')
total_cov = args.total_cov
VOC_files = args.fasta_VOC.split(',')
VOC_names = [filepath.split('/')[-1] for filepath in VOC_files]
exclude_list = [name.split('_')[0] for name in VOC_names]
full_df = read_metadata(args.metadata)
selection_df = select_benchmark_genomes(full_df, args.state, args.date,
exclude_list)
# filter fasta according to selection and write new fasta
fasta_selection = args.outdir + "/sequences.fasta"
filter_fasta(args.fasta_ref, fasta_selection, selection_df)
print("Selected sequences written to {}".format(fasta_selection))
# write corresponding metadata to tsv
metadata_out = args.outdir + "/metadata.tsv"
selection_df.to_csv(metadata_out, sep='\t', index=False)
print("Metadata for selected sequences is in {}".format(metadata_out))
if args.data_exploration_only:
sys.exit()
if args.spike_only:
# trim sequences to select spike region
print("\nTrimming genomes around spike region (21063--25884)")
trimmed_selection = args.outdir + "/sequences.trimmed.fasta"
subprocess.check_call("reformat.sh in={} out={} fastawrap=0 overwrite=t forcetrimleft=21063 forcetrimright=25884".format(fasta_selection, trimmed_selection), shell=True)
# also trim VOC sequences
for filename in VOC_files:
VOC_name = filename.rstrip('.fasta').split('/')[-1]
trimmed_file = args.outdir + "/{}.trimmed.fasta".format(VOC_name)
subprocess.check_call("reformat.sh in={} out={} fastawrap=0 overwrite=t forcetrimleft=21063 forcetrimright=25884".format(filename, trimmed_file), shell=True)
fasta_selection = trimmed_selection
print("\nSpike sequences ready\n")
# simulate reads
for voc_freq in VOC_frequencies:
VOC_cov = round(total_cov * float(voc_freq)/100, 2)
background_cov = round((total_cov - VOC_cov) / len(selection_df.index), 2)
voc_freq = str(round(float(voc_freq), 2))
for err_freq in err_frequencies:
sub_err = float(err_freq) if args.sub_error else 0
ins_err = float(err_freq) if args.ins_error else 0
del_err = float(err_freq) if args.del_error else 0
# quality shift 0 = 0.112 %
# % = default * 1/(10^(qs/10))
if sub_err == 0:
quality_shift = 93 # Max positive quality shift.
else:
quality_shift = 10 * log10((0.112 / sub_err))
insRate1 = insRate2 = round_sig(ins_err / 100, 3)
delRate1 = delRate2 = round_sig(del_err / 100, 3)
# simulate background sequence read
print("Simulating background reads from {} at {}x coverage ".format(fasta_selection, background_cov))
print("Error rate: {} sub, {} ins, {} del ".format(sub_err, ins_err, del_err))
subprocess.check_call("art_illumina -ss HS25 -rs 0 -i {0} -l 150 -f {1} -p -o {2}/background_ab{1}_er{8}_ -m 250 -s 10 -qs {3} -qs2 {3} -ir {4} -ir2 {5} -dr {6} -dr2 {7}"
.format(fasta_selection, background_cov, args.outdir, quality_shift, insRate1, insRate2, delRate1, delRate2, err_freq), shell=True)
# simulate reads for VOC, merge and shuffle
for filename in VOC_files:
VOC_name = filename.rstrip('.fasta').split('/')[-1]
if args.spike_only:
voc_fasta = args.outdir + "/{}.trimmed.fasta".format(VOC_name)
else:
voc_fasta = filename
print("Simulating reads from {} at {}x coverage".format(VOC_name, VOC_cov))
print("Error rate: {} sub, {} ins, {} del ".format(sub_err, ins_err, del_err))
subprocess.check_call("art_illumina -ss HS25 -rs 0 -i {0} -l 150 -f {1} -p -o {2}/{3}_ab{1}_er{9}_ -m 250 -s 10 -qs {4} -qs2 {4} -ir {5} -ir2 {6} -dr {7} -dr2 {8}"
.format(voc_fasta, VOC_cov, args.outdir, VOC_name, quality_shift, insRate1, insRate2, delRate1, delRate2, err_freq), shell=True)
print("\nMerging fastqs...")
subprocess.check_call("cat {0}/background_ab{3}_er{2}_1.fq {0}/{1}_ab{4}_er{2}_1.fq > {0}/tmp1.fq"
.format(args.outdir, VOC_name, err_freq, background_cov, VOC_cov), shell=True)
subprocess.check_call("cat {0}/background_ab{3}_er{2}_2.fq {0}/{1}_ab{4}_er{2}_2.fq > {0}/tmp2.fq"
.format(args.outdir, VOC_name, err_freq, background_cov, VOC_cov), shell=True)
print("Shuffling reads...")
subprocess.check_call("shuffle.sh in={0}/tmp1.fq in2={0}/tmp2.fq out={0}/wwsim_{1}_ab{3}_er{2}_1.fastq out2={0}/wwsim_{1}_ab{3}_er{2}_2.fastq overwrite=t fastawrap=0 ignorebadquality"
.format(args.outdir, VOC_name, err_freq, voc_freq), shell=True)
print("\nBenchmarks with a err frequency of {}% are ready!\n\n".format(err_freq))
# clean up temporary files
os.remove("{}/tmp1.fq".format(args.outdir))
os.remove("{}/tmp2.fq".format(args.outdir))
return
def select_benchmark_genomes(df, state, date, exclude_list, variant_exclude_list=None):
"""Select genomes by location and date"""
state_df = df.loc[df["Location"].str.contains(state)]
if date == "-":
selection_df = state_df
if not variant_exclude_list:
variant_exclude_list = ["VOC Alpha", "VOC Beta", "VOC Gamma", "VOC Delta"]
else:
selection_df = state_df.loc[state_df["date"] == date]
print("\nLineage counts for {}:".format(state))
print(selection_df["Pango lineage"].value_counts())
print("\nExcluding VOC lineages {} from selection\n".format(exclude_list))
selection_df = selection_df.loc[~selection_df["Pango lineage"].isin(exclude_list)]
if variant_exclude_list:
print("Excluding variants {} from selection \n".format(variant_exclude_list))
# selection_df = selection_df.loc[~selection_df["Variant"].isin(variant_exclude_list)]
pattern = "|".join(variant_exclude_list)
selection_df = selection_df.loc[~selection_df["Variant"].str.contains(pattern, na=False)]
print("\nLineage counts for {}:".format(state))
print(selection_df["Pango lineage"].value_counts())
return selection_df
def round_sig(x, sig=2):
if x == 0: return x
return round(x, sig-int(floor(log10(abs(x))))-1)
if __name__ == "__main__":
sys.exit(main())
| <filename>benchmarking/create_error_benchmarks.py
#!/usr/bin/env python3
import sys
import os
import argparse
import subprocess
import pandas as pd
from math import floor, log10
from select_samples import filter_fasta, read_metadata
def main():
parser = argparse.ArgumentParser(description="Create wastewater benchmarks.")
parser.add_argument('-m, --metadata', dest='metadata', type=str, required=True, help="metadata tsv file for full sequence database")
parser.add_argument('-s, --state', dest='state', type=str, default="Connecticut", help="sample location")
parser.add_argument('-d, --date', dest='date', type=str, default="2021-02-11", help="sample date")
parser.add_argument('-fr, --fasta_ref', dest='fasta_ref', required=True, type=str, help="fasta file representing full sequence database")
parser.add_argument('-fv, --fasta_voc', dest='fasta_VOC', required=True, type=str, help="comma-separated list of fasta files for Variants Of Concern (VOC)")
parser.add_argument('-o, --outdir', dest='outdir', required=True, type=str, help="output directory")
parser.add_argument('--voc_perc', dest='voc_perc', required=True, type=str, help="comma-separated list of VOC frequencies (%) to be simulated")
parser.add_argument('--err_perc', dest='err_perc', required=True, type=str, help="comma-separated list of error frequencies (%) to be simulated")
parser.add_argument('--total_cov', dest='total_cov', default=10000, type=int, help="total sequencing depth to be simulated")
parser.add_argument('--data_exploration_only', action='store_true', help="exit after sequence selection")
parser.add_argument('--spike_only', action='store_true', help="simulate reads for spike region only")
# parser.add_argument('--sub_error_rate', dest='sub_error_rate', default=1.0, type=float, help="substitution error rate for art_illumina")
# parser.add_argument('--ins_error_rate', dest='ins_error_rate', default=1.0, type=float, help="insertion error rate for art_illumina")
# parser.add_argument('--del_error_rate', dest='del_error_rate', default=1.0, type=float, help="deletion error rate for art_illumina")
parser.add_argument('--sub_error', action='store_true', help="simulate substitution error. (default: keep error rate at 0)")
parser.add_argument('--ins_error', action='store_true', help="simulate insertion error. (default: keep error rate at 0)")
parser.add_argument('--del_error', action='store_true', help="simulate deletion error. (default: keep error rate at 0)")
args = parser.parse_args()
# create output directory
try:
os.makedirs(args.outdir)
except FileExistsError:
pass
VOC_frequencies = args.voc_perc.split(',')
err_frequencies = args.err_perc.split(',')
total_cov = args.total_cov
VOC_files = args.fasta_VOC.split(',')
VOC_names = [filepath.split('/')[-1] for filepath in VOC_files]
exclude_list = [name.split('_')[0] for name in VOC_names]
full_df = read_metadata(args.metadata)
selection_df = select_benchmark_genomes(full_df, args.state, args.date,
exclude_list)
# filter fasta according to selection and write new fasta
fasta_selection = args.outdir + "/sequences.fasta"
filter_fasta(args.fasta_ref, fasta_selection, selection_df)
print("Selected sequences written to {}".format(fasta_selection))
# write corresponding metadata to tsv
metadata_out = args.outdir + "/metadata.tsv"
selection_df.to_csv(metadata_out, sep='\t', index=False)
print("Metadata for selected sequences is in {}".format(metadata_out))
if args.data_exploration_only:
sys.exit()
if args.spike_only:
# trim sequences to select spike region
print("\nTrimming genomes around spike region (21063--25884)")
trimmed_selection = args.outdir + "/sequences.trimmed.fasta"
subprocess.check_call("reformat.sh in={} out={} fastawrap=0 overwrite=t forcetrimleft=21063 forcetrimright=25884".format(fasta_selection, trimmed_selection), shell=True)
# also trim VOC sequences
for filename in VOC_files:
VOC_name = filename.rstrip('.fasta').split('/')[-1]
trimmed_file = args.outdir + "/{}.trimmed.fasta".format(VOC_name)
subprocess.check_call("reformat.sh in={} out={} fastawrap=0 overwrite=t forcetrimleft=21063 forcetrimright=25884".format(filename, trimmed_file), shell=True)
fasta_selection = trimmed_selection
print("\nSpike sequences ready\n")
# simulate reads
for voc_freq in VOC_frequencies:
VOC_cov = round(total_cov * float(voc_freq)/100, 2)
background_cov = round((total_cov - VOC_cov) / len(selection_df.index), 2)
voc_freq = str(round(float(voc_freq), 2))
for err_freq in err_frequencies:
sub_err = float(err_freq) if args.sub_error else 0
ins_err = float(err_freq) if args.ins_error else 0
del_err = float(err_freq) if args.del_error else 0
# quality shift 0 = 0.112 %
# % = default * 1/(10^(qs/10))
if sub_err == 0:
quality_shift = 93 # Max positive quality shift.
else:
quality_shift = 10 * log10((0.112 / sub_err))
insRate1 = insRate2 = round_sig(ins_err / 100, 3)
delRate1 = delRate2 = round_sig(del_err / 100, 3)
# simulate background sequence read
print("Simulating background reads from {} at {}x coverage ".format(fasta_selection, background_cov))
print("Error rate: {} sub, {} ins, {} del ".format(sub_err, ins_err, del_err))
subprocess.check_call("art_illumina -ss HS25 -rs 0 -i {0} -l 150 -f {1} -p -o {2}/background_ab{1}_er{8}_ -m 250 -s 10 -qs {3} -qs2 {3} -ir {4} -ir2 {5} -dr {6} -dr2 {7}"
.format(fasta_selection, background_cov, args.outdir, quality_shift, insRate1, insRate2, delRate1, delRate2, err_freq), shell=True)
# simulate reads for VOC, merge and shuffle
for filename in VOC_files:
VOC_name = filename.rstrip('.fasta').split('/')[-1]
if args.spike_only:
voc_fasta = args.outdir + "/{}.trimmed.fasta".format(VOC_name)
else:
voc_fasta = filename
print("Simulating reads from {} at {}x coverage".format(VOC_name, VOC_cov))
print("Error rate: {} sub, {} ins, {} del ".format(sub_err, ins_err, del_err))
subprocess.check_call("art_illumina -ss HS25 -rs 0 -i {0} -l 150 -f {1} -p -o {2}/{3}_ab{1}_er{9}_ -m 250 -s 10 -qs {4} -qs2 {4} -ir {5} -ir2 {6} -dr {7} -dr2 {8}"
.format(voc_fasta, VOC_cov, args.outdir, VOC_name, quality_shift, insRate1, insRate2, delRate1, delRate2, err_freq), shell=True)
print("\nMerging fastqs...")
subprocess.check_call("cat {0}/background_ab{3}_er{2}_1.fq {0}/{1}_ab{4}_er{2}_1.fq > {0}/tmp1.fq"
.format(args.outdir, VOC_name, err_freq, background_cov, VOC_cov), shell=True)
subprocess.check_call("cat {0}/background_ab{3}_er{2}_2.fq {0}/{1}_ab{4}_er{2}_2.fq > {0}/tmp2.fq"
.format(args.outdir, VOC_name, err_freq, background_cov, VOC_cov), shell=True)
print("Shuffling reads...")
subprocess.check_call("shuffle.sh in={0}/tmp1.fq in2={0}/tmp2.fq out={0}/wwsim_{1}_ab{3}_er{2}_1.fastq out2={0}/wwsim_{1}_ab{3}_er{2}_2.fastq overwrite=t fastawrap=0 ignorebadquality"
.format(args.outdir, VOC_name, err_freq, voc_freq), shell=True)
print("\nBenchmarks with a err frequency of {}% are ready!\n\n".format(err_freq))
# clean up temporary files
os.remove("{}/tmp1.fq".format(args.outdir))
os.remove("{}/tmp2.fq".format(args.outdir))
return
def select_benchmark_genomes(df, state, date, exclude_list, variant_exclude_list=None):
"""Select genomes by location and date"""
state_df = df.loc[df["Location"].str.contains(state)]
if date == "-":
selection_df = state_df
if not variant_exclude_list:
variant_exclude_list = ["VOC Alpha", "VOC Beta", "VOC Gamma", "VOC Delta"]
else:
selection_df = state_df.loc[state_df["date"] == date]
print("\nLineage counts for {}:".format(state))
print(selection_df["Pango lineage"].value_counts())
print("\nExcluding VOC lineages {} from selection\n".format(exclude_list))
selection_df = selection_df.loc[~selection_df["Pango lineage"].isin(exclude_list)]
if variant_exclude_list:
print("Excluding variants {} from selection \n".format(variant_exclude_list))
# selection_df = selection_df.loc[~selection_df["Variant"].isin(variant_exclude_list)]
pattern = "|".join(variant_exclude_list)
selection_df = selection_df.loc[~selection_df["Variant"].str.contains(pattern, na=False)]
print("\nLineage counts for {}:".format(state))
print(selection_df["Pango lineage"].value_counts())
return selection_df
def round_sig(x, sig=2):
if x == 0: return x
return round(x, sig-int(floor(log10(abs(x))))-1)
if __name__ == "__main__":
sys.exit(main())
| en | 0.348597 | #!/usr/bin/env python3 # parser.add_argument('--sub_error_rate', dest='sub_error_rate', default=1.0, type=float, help="substitution error rate for art_illumina") # parser.add_argument('--ins_error_rate', dest='ins_error_rate', default=1.0, type=float, help="insertion error rate for art_illumina") # parser.add_argument('--del_error_rate', dest='del_error_rate', default=1.0, type=float, help="deletion error rate for art_illumina") # create output directory # filter fasta according to selection and write new fasta # write corresponding metadata to tsv # trim sequences to select spike region # also trim VOC sequences # simulate reads # quality shift 0 = 0.112 % # % = default * 1/(10^(qs/10)) # Max positive quality shift. # simulate background sequence read # simulate reads for VOC, merge and shuffle # clean up temporary files Select genomes by location and date # selection_df = selection_df.loc[~selection_df["Variant"].isin(variant_exclude_list)] | 2.632503 | 3 |
ryu/openexchange/network/__init__.py | xiaobinglu/Ryu-Qos | 3 | 6614872 | '''
This module is about resources of domain network.
Author:www.muzixing.com
Date Work
2015/7/30 new this file
'''
| '''
This module is about resources of domain network.
Author:www.muzixing.com
Date Work
2015/7/30 new this file
'''
| en | 0.78847 | This module is about resources of domain network. Author:www.muzixing.com Date Work 2015/7/30 new this file | 1.229325 | 1 |
faster_rcnn/lib/roi.py | DevBruce/torch-implementation | 0 | 6614873 | <reponame>DevBruce/torch-implementation<filename>faster_rcnn/lib/roi.py
import numpy as np
from lib.iou import get_iou
__all__ = ['get_rois', 'rm_cross_boundary_rois', 'get_roi_ious', 'get_roi_labels']
def get_rois(input_img_shape, fmap_shape, anchor_ratios, anchor_scales):
"""
Get All Region of Interest (RoI == Region Proposals) with Anchors
"""
_, img_height, img_width = input_img_shape
_, fmap_height, fmap_width = fmap_shape
fmap_downsample_ratio = img_height // fmap_height
# Generate Anchors
y_center_arr = np.arange(
fmap_downsample_ratio,
fmap_downsample_ratio * (fmap_height + 1),
fmap_downsample_ratio,
)
y_center_arr -= fmap_downsample_ratio // 2
x_center_arr = np.arange(
fmap_downsample_ratio,
fmap_downsample_ratio * (fmap_width + 1),
fmap_downsample_ratio,
)
x_center_arr -= fmap_downsample_ratio // 2
## Generate array of anchor coordinates (y_center, x_center)
anchor_yx_arr = np.zeros([fmap_height, fmap_width], dtype=object)
for i in range(len(y_center_arr)):
for j in range(len(x_center_arr)):
anchor_yx_arr[i, j] = y_center_arr[i], x_center_arr[j]
anchor_yx_arr = anchor_yx_arr.flatten()
# Generate all region proposals with anchors
rois = np.zeros([fmap_height * fmap_width * len(anchor_ratios) * len(anchor_scales), 4])
idx = 0
for y_center, x_center in anchor_yx_arr:
# Region proposals with ratios and scales per anchor coordinates
for ratio in anchor_ratios:
for scale in anchor_scales:
h = fmap_downsample_ratio * scale * np.sqrt(ratio)
w = fmap_downsample_ratio * scale * np.sqrt(1. / ratio)
rois[idx, 0] = y_center - (h / 2.)
rois[idx, 1] = x_center - (w / 2.)
rois[idx, 2] = y_center + (h / 2.)
rois[idx, 3] = x_center + (w / 2.)
idx += 1
return rois
def rm_cross_boundary_rois(rois, img_height, img_width):
"""
Remove Cross Boundary Region Proposals
"""
cross_boundary_roi_indices = np.where(
(rois[:, 0] < 0) | # y1
(rois[:, 1] < 0) | # x1
(rois[:, 2] > img_height) | # y2
(rois[:, 3] > img_width) # x2
)[0]
return np.delete(rois, cross_boundary_roi_indices, axis=0)
def get_roi_ious(rois, gt_boxes):
"""
Get IoU per Reion Proposals (Column 1 == GT Box1, Column 2 == GT Box2 ...)
"""
roi_ious = np.zeros([len(rois), len(gt_boxes)])
for i in range(len(rois)):
for j in range(len(gt_boxes)):
roi_ious[i, j] = get_iou(boxA_pts=rois[i], boxB_pts=gt_boxes[j])
return roi_ious
def get_roi_labels(roi_ious, num_gt_boxes, config):
"""
Get RoI Labels (Positivie or Negative) for Training RPN
"""
def _get_max_iou_per_gt_idx(roi_ious, num_gt_boxes, highest_iou_per_gt):
indices = list()
for i in range(num_gt_boxes):
indices.extend(list(np.where(roi_ious[:, i] == highest_iou_per_gt[i])[0]))
return list(set(indices))
highest_iou_per_anchor = np.max(roi_ious, axis=1)
highest_iou_per_gt = np.max(roi_ious, axis=0)
highest_iou_per_gt_idx = _get_max_iou_per_gt_idx(roi_ious=roi_ious, num_gt_boxes=num_gt_boxes, highest_iou_per_gt=highest_iou_per_gt)
# Positive: 1
# Negative: 0
# None: -1
roi_labels = np.full(roi_ious.shape[0], -1)
roi_labels[highest_iou_per_anchor < config.roi_neg_iou_thr] = 0
roi_labels[highest_iou_per_anchor >= config.roi_pos_iou_thr] = 1
roi_labels[highest_iou_per_gt_idx] = 1
return roi_labels
| import numpy as np
from lib.iou import get_iou
__all__ = ['get_rois', 'rm_cross_boundary_rois', 'get_roi_ious', 'get_roi_labels']
def get_rois(input_img_shape, fmap_shape, anchor_ratios, anchor_scales):
"""
Get All Region of Interest (RoI == Region Proposals) with Anchors
"""
_, img_height, img_width = input_img_shape
_, fmap_height, fmap_width = fmap_shape
fmap_downsample_ratio = img_height // fmap_height
# Generate Anchors
y_center_arr = np.arange(
fmap_downsample_ratio,
fmap_downsample_ratio * (fmap_height + 1),
fmap_downsample_ratio,
)
y_center_arr -= fmap_downsample_ratio // 2
x_center_arr = np.arange(
fmap_downsample_ratio,
fmap_downsample_ratio * (fmap_width + 1),
fmap_downsample_ratio,
)
x_center_arr -= fmap_downsample_ratio // 2
## Generate array of anchor coordinates (y_center, x_center)
anchor_yx_arr = np.zeros([fmap_height, fmap_width], dtype=object)
for i in range(len(y_center_arr)):
for j in range(len(x_center_arr)):
anchor_yx_arr[i, j] = y_center_arr[i], x_center_arr[j]
anchor_yx_arr = anchor_yx_arr.flatten()
# Generate all region proposals with anchors
rois = np.zeros([fmap_height * fmap_width * len(anchor_ratios) * len(anchor_scales), 4])
idx = 0
for y_center, x_center in anchor_yx_arr:
# Region proposals with ratios and scales per anchor coordinates
for ratio in anchor_ratios:
for scale in anchor_scales:
h = fmap_downsample_ratio * scale * np.sqrt(ratio)
w = fmap_downsample_ratio * scale * np.sqrt(1. / ratio)
rois[idx, 0] = y_center - (h / 2.)
rois[idx, 1] = x_center - (w / 2.)
rois[idx, 2] = y_center + (h / 2.)
rois[idx, 3] = x_center + (w / 2.)
idx += 1
return rois
def rm_cross_boundary_rois(rois, img_height, img_width):
"""
Remove Cross Boundary Region Proposals
"""
cross_boundary_roi_indices = np.where(
(rois[:, 0] < 0) | # y1
(rois[:, 1] < 0) | # x1
(rois[:, 2] > img_height) | # y2
(rois[:, 3] > img_width) # x2
)[0]
return np.delete(rois, cross_boundary_roi_indices, axis=0)
def get_roi_ious(rois, gt_boxes):
"""
Get IoU per Reion Proposals (Column 1 == GT Box1, Column 2 == GT Box2 ...)
"""
roi_ious = np.zeros([len(rois), len(gt_boxes)])
for i in range(len(rois)):
for j in range(len(gt_boxes)):
roi_ious[i, j] = get_iou(boxA_pts=rois[i], boxB_pts=gt_boxes[j])
return roi_ious
def get_roi_labels(roi_ious, num_gt_boxes, config):
"""
Get RoI Labels (Positivie or Negative) for Training RPN
"""
def _get_max_iou_per_gt_idx(roi_ious, num_gt_boxes, highest_iou_per_gt):
indices = list()
for i in range(num_gt_boxes):
indices.extend(list(np.where(roi_ious[:, i] == highest_iou_per_gt[i])[0]))
return list(set(indices))
highest_iou_per_anchor = np.max(roi_ious, axis=1)
highest_iou_per_gt = np.max(roi_ious, axis=0)
highest_iou_per_gt_idx = _get_max_iou_per_gt_idx(roi_ious=roi_ious, num_gt_boxes=num_gt_boxes, highest_iou_per_gt=highest_iou_per_gt)
# Positive: 1
# Negative: 0
# None: -1
roi_labels = np.full(roi_ious.shape[0], -1)
roi_labels[highest_iou_per_anchor < config.roi_neg_iou_thr] = 0
roi_labels[highest_iou_per_anchor >= config.roi_pos_iou_thr] = 1
roi_labels[highest_iou_per_gt_idx] = 1
return roi_labels | en | 0.670486 | Get All Region of Interest (RoI == Region Proposals) with Anchors # Generate Anchors ## Generate array of anchor coordinates (y_center, x_center) # Generate all region proposals with anchors # Region proposals with ratios and scales per anchor coordinates Remove Cross Boundary Region Proposals # y1 # x1 # y2 # x2 Get IoU per Reion Proposals (Column 1 == GT Box1, Column 2 == GT Box2 ...) Get RoI Labels (Positivie or Negative) for Training RPN # Positive: 1 # Negative: 0 # None: -1 | 2.471325 | 2 |
examples/TestesAntigos/teste_if/teste2.py | PqES/ArchPython | 6 | 6614874 | def teste1():
a = int(input())
if a % 2 == 0:
return True
return "Impar"
def teste2(variavel):
b = int(input())
if b % 2 == 0:
return teste3(variavel)
return dict()
def teste3(p1):
return 1.1
def main():
resultado = teste1()
saida = teste2(2)
main()
| def teste1():
a = int(input())
if a % 2 == 0:
return True
return "Impar"
def teste2(variavel):
b = int(input())
if b % 2 == 0:
return teste3(variavel)
return dict()
def teste3(p1):
return 1.1
def main():
resultado = teste1()
saida = teste2(2)
main()
| none | 1 | 3.519664 | 4 | |
analytics/datamanager/datamanager.py | sadikovi/Pulsar | 0 | 6614875 | #!/usr/bin/env python
'''
Copyright 2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# import libs
import os
import json
from types import DictType, StringType
# import classes
import analytics.utils.misc as misc
import analytics.loading.jsonloader as jsl
# global parameters, like manifest name and default directory
_MANIFEST_JSON = "manifest.json"
_DIRECTORY = os.path.join(os.path.dirname(__file__), "datasets")
# Dataset parameters that are used for an parsing object
ID = "id"
NAME = "name"
DESC = "desc"
DISCOVER = "discover"
DATA = "data"
DATA_CLU = "clusters"
DATA_ELE = "elements"
DATA_PUL = "pulses"
FILE = "file"
PATH = "path"
TYPE = "type"
class Dataset(object):
"""
Simple dataset class to hold all the parameters. Converts filenames
into paths for download by specifying "dr" attribute.
Attributes:
_id (str): dataset id
_name (str): dataset name
_desc (str): dataset desc
_discover (bool): dataset isDiscover property
_clusters (dict<str, str>): clusters information (path and type)
_elements (dict<str, str>): elements information (path and type)
_pulses (dict<str, str>): pulses information (path and type)
"""
def __init__(self, obj, dr):
misc.checkTypeAgainst(type(obj), DictType, __file__)
misc.checkTypeAgainst(type(dr), StringType, __file__)
self._id = obj[ID]
self._name = obj[NAME]
self._desc = obj[DESC]
self._discover = bool(obj[DISCOVER])
# files data
_data = obj[DATA]
# path and type constants
# clusters file name and type
_clusters_filename = _data[DATA_CLU][FILE]
_clusters_filetype = _data[DATA_CLU][TYPE]
self._clusters = {
PATH: self._filepath(dr, _clusters_filename, _clusters_filetype),
TYPE: _clusters_filetype
}
# elements file name and type
_elements_filename = _data[DATA_ELE][FILE]
_elements_filetype = _data[DATA_ELE][TYPE]
self._elements = {
PATH: self._filepath(dr, _elements_filename, _elements_filetype),
TYPE: _elements_filetype
}
self._pulses = None
# if discover is False, specify pulses
if not self._discover:
_pulses_filename = _data[DATA_PUL][FILE]
_pulses_filetype = _data[DATA_PUL][TYPE]
self._pulses = {
PATH: self._filepath(dr, _pulses_filename, _pulses_filetype),
TYPE: _pulses_filetype
}
# [Public]
def getJSON(self):
"""
Returns json object.
Returns:
dict<str, str>: json representation of Dataset instance
"""
return {
"id": self._id,
"name": self._name,
"desc": self._desc
}
# [Private]
def _filepath(self, directory, filename, filetype):
"""
Returns full file path for specified directory, file name and file
type.
Args:
directory (str): directory
filename (str): file name
filetype (str): file type
Returns:
str: full file path
"""
# replace any "/" on "_"
filename = filename.replace("/", "_")
return os.path.join(directory, "%s.%s"%(filename, filetype))
class DataManager(object):
"""
DataManager class helps with datasets maintainance. It searches
specified directory and collect manifests, then parses them into
Dataset objects.
Attributes:
_manifests (dir<str, str>): map of dirs and manifest file paths
_datasets (dir<str, Dataset>): map of datasets
_directory (str): search directory
"""
def __init__(self):
# declare attributes
self._manifests = {}; self._datasets = {}; self._directory = ""
self.resetToDefault()
# [Private]
def _findManifests(self, directory):
"""
Scans directory and collects manifests paths into _manifests
attribute.
Args:
directory (str): directory to search in
"""
misc.checkTypeAgainst(type(directory), StringType, __file__)
for root, dirs, files in os.walk(directory):
for file in files:
if file == _MANIFEST_JSON:
self._manifests[root] = os.path.join(root, file)
# [Private]
def _parseManifest(self, path):
"""
Parses manifest that has path specified. Returns True if dataset
was parsed successfully and added to _datasets dictionary.
Otherwise, aborts the parsing and returns False.
Args:
path (str): path to the manifest file
Returns:
bool: status of parsing operation
"""
# load from path, if it fails, skip it
dataset = None
try:
obj = None
loader = jsl.JsonLoader(path)
obj = loader.processData()
# create dataset
dataset = Dataset(obj, os.path.dirname(path))
except:
dataset = None
if dataset is None:
return False
else:
self._datasets[dataset._id] = dataset
return True
# [Public]
def loadDatasets(self, searchpath=None):
"""
Loads datasets from _directory path.
Args:
searchpath (str): search path for datasets
"""
# assign search path or use it to save previous path
searchpath = searchpath or self._directory
# clean previous datasets and manifests info
self.resetToDefault()
# assign back search path
self.setSearchPath(searchpath)
# look for manifests and parse them
self._findManifests(self._directory)
for manifestpath in self._manifests.values():
self._parseManifest(manifestpath)
# [Public]
def getDatasets(self):
"""
Returns list of Dataset object collected.
Returns:
list<Dataset>: list of Dataset instances
"""
return self._datasets.values()
# [Public]
def getDataset(self, id):
"""
Returns dataset by id specified. If there is no such id, then
returns None.
Args:
id (str): dataset id
Returns:
Dataset: dataset object with id specified
"""
return self._datasets[id] if id in self._datasets else None
# [Public]
def setSearchPath(self, path):
"""
Sets searching path.
Args:
path (str): new searching directory
"""
self._directory = path
# [Public]
def resetToDefault(self):
"""
Resets instance to default parameters.
"""
self._directory = _DIRECTORY
self._manifests = {}
self._datasets = {}
# [Public]
def util_testDatasets(self, searchpath=None):
"""
Collects and tests datasets that are in the folder. Keeps results
in list for a particular manifest found.
Args:
searchpath (str): search path
Returns:
dict<str, obj>: result of the testing
"""
searchpath = searchpath or self._directory
self.resetToDefault()
# assign back search path
self.setSearchPath(searchpath)
# look for manifests and parse them
self._findManifests(self._directory)
# statistics for manifests
manifest_stats = {}
for manifestpath in self._manifests.values():
manifest_stats[manifestpath] = {}
_exists = os.path.isfile(manifestpath)
manifest_stats[manifestpath]["manifest"] = _exists
if _exists:
try:
self._parseManifest(manifestpath)
manifest_stats[manifestpath]["dataset"] = True
except:
manifest_stats[manifestpath]["dataset"] = False
# statistics for datasets
ds_stats = {}
for ds in self._datasets.values():
ds_stats[ds._id] = {}
ds_stats[ds._id][DATA_CLU] = os.path.isfile(ds._clusters[PATH])
ds_stats[ds._id][DATA_ELE] = os.path.isfile(ds._elements[PATH])
if ds._discover:
continue
ds_stats[ds._id][DATA_PUL] = os.path.isfile(ds._pulses[PATH])
# stats finished, report statistics
return {"manifests": manifest_stats, "datasets": ds_stats}
# [Public]
def util_checkDatasetsResult(self, obj):
"""
Checks overall result of the object received from
"util_testDatasets" and returns bool value that indicates whether
test is passed or not.
Args:
obj (dict<str, obj>): result of the testing
Returns:
bool: flag to show whether test is passed or failed
"""
manfs = obj["manifests"]
ds = obj["datasets"]
flag = True
# test types
flag = flag and type(manfs) is DictType and type(ds) is DictType
# test lengths of keys
flag = flag and len(manfs.keys()) == len(ds.keys())
# check result of common assertions
if not flag:
return flag
# continue testing of each manifest and dataset
# check manifests
mn = [m for manf in manfs.values() for m in manf.values() if not m]
# check datasets
dt = [p for dsf in ds.values() for p in dsf.values() if not p]
return flag and len(mn)==0 and len(dt)==0
| #!/usr/bin/env python
'''
Copyright 2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# import libs
import os
import json
from types import DictType, StringType
# import classes
import analytics.utils.misc as misc
import analytics.loading.jsonloader as jsl
# global parameters, like manifest name and default directory
_MANIFEST_JSON = "manifest.json"
_DIRECTORY = os.path.join(os.path.dirname(__file__), "datasets")
# Dataset parameters that are used for an parsing object
ID = "id"
NAME = "name"
DESC = "desc"
DISCOVER = "discover"
DATA = "data"
DATA_CLU = "clusters"
DATA_ELE = "elements"
DATA_PUL = "pulses"
FILE = "file"
PATH = "path"
TYPE = "type"
class Dataset(object):
"""
Simple dataset class to hold all the parameters. Converts filenames
into paths for download by specifying "dr" attribute.
Attributes:
_id (str): dataset id
_name (str): dataset name
_desc (str): dataset desc
_discover (bool): dataset isDiscover property
_clusters (dict<str, str>): clusters information (path and type)
_elements (dict<str, str>): elements information (path and type)
_pulses (dict<str, str>): pulses information (path and type)
"""
def __init__(self, obj, dr):
misc.checkTypeAgainst(type(obj), DictType, __file__)
misc.checkTypeAgainst(type(dr), StringType, __file__)
self._id = obj[ID]
self._name = obj[NAME]
self._desc = obj[DESC]
self._discover = bool(obj[DISCOVER])
# files data
_data = obj[DATA]
# path and type constants
# clusters file name and type
_clusters_filename = _data[DATA_CLU][FILE]
_clusters_filetype = _data[DATA_CLU][TYPE]
self._clusters = {
PATH: self._filepath(dr, _clusters_filename, _clusters_filetype),
TYPE: _clusters_filetype
}
# elements file name and type
_elements_filename = _data[DATA_ELE][FILE]
_elements_filetype = _data[DATA_ELE][TYPE]
self._elements = {
PATH: self._filepath(dr, _elements_filename, _elements_filetype),
TYPE: _elements_filetype
}
self._pulses = None
# if discover is False, specify pulses
if not self._discover:
_pulses_filename = _data[DATA_PUL][FILE]
_pulses_filetype = _data[DATA_PUL][TYPE]
self._pulses = {
PATH: self._filepath(dr, _pulses_filename, _pulses_filetype),
TYPE: _pulses_filetype
}
# [Public]
def getJSON(self):
"""
Returns json object.
Returns:
dict<str, str>: json representation of Dataset instance
"""
return {
"id": self._id,
"name": self._name,
"desc": self._desc
}
# [Private]
def _filepath(self, directory, filename, filetype):
"""
Returns full file path for specified directory, file name and file
type.
Args:
directory (str): directory
filename (str): file name
filetype (str): file type
Returns:
str: full file path
"""
# replace any "/" on "_"
filename = filename.replace("/", "_")
return os.path.join(directory, "%s.%s"%(filename, filetype))
class DataManager(object):
"""
DataManager class helps with datasets maintainance. It searches
specified directory and collect manifests, then parses them into
Dataset objects.
Attributes:
_manifests (dir<str, str>): map of dirs and manifest file paths
_datasets (dir<str, Dataset>): map of datasets
_directory (str): search directory
"""
def __init__(self):
# declare attributes
self._manifests = {}; self._datasets = {}; self._directory = ""
self.resetToDefault()
# [Private]
def _findManifests(self, directory):
"""
Scans directory and collects manifests paths into _manifests
attribute.
Args:
directory (str): directory to search in
"""
misc.checkTypeAgainst(type(directory), StringType, __file__)
for root, dirs, files in os.walk(directory):
for file in files:
if file == _MANIFEST_JSON:
self._manifests[root] = os.path.join(root, file)
# [Private]
def _parseManifest(self, path):
"""
Parses manifest that has path specified. Returns True if dataset
was parsed successfully and added to _datasets dictionary.
Otherwise, aborts the parsing and returns False.
Args:
path (str): path to the manifest file
Returns:
bool: status of parsing operation
"""
# load from path, if it fails, skip it
dataset = None
try:
obj = None
loader = jsl.JsonLoader(path)
obj = loader.processData()
# create dataset
dataset = Dataset(obj, os.path.dirname(path))
except:
dataset = None
if dataset is None:
return False
else:
self._datasets[dataset._id] = dataset
return True
# [Public]
def loadDatasets(self, searchpath=None):
"""
Loads datasets from _directory path.
Args:
searchpath (str): search path for datasets
"""
# assign search path or use it to save previous path
searchpath = searchpath or self._directory
# clean previous datasets and manifests info
self.resetToDefault()
# assign back search path
self.setSearchPath(searchpath)
# look for manifests and parse them
self._findManifests(self._directory)
for manifestpath in self._manifests.values():
self._parseManifest(manifestpath)
# [Public]
def getDatasets(self):
"""
Returns list of Dataset object collected.
Returns:
list<Dataset>: list of Dataset instances
"""
return self._datasets.values()
# [Public]
def getDataset(self, id):
"""
Returns dataset by id specified. If there is no such id, then
returns None.
Args:
id (str): dataset id
Returns:
Dataset: dataset object with id specified
"""
return self._datasets[id] if id in self._datasets else None
# [Public]
def setSearchPath(self, path):
"""
Sets searching path.
Args:
path (str): new searching directory
"""
self._directory = path
# [Public]
def resetToDefault(self):
"""
Resets instance to default parameters.
"""
self._directory = _DIRECTORY
self._manifests = {}
self._datasets = {}
# [Public]
def util_testDatasets(self, searchpath=None):
"""
Collects and tests datasets that are in the folder. Keeps results
in list for a particular manifest found.
Args:
searchpath (str): search path
Returns:
dict<str, obj>: result of the testing
"""
searchpath = searchpath or self._directory
self.resetToDefault()
# assign back search path
self.setSearchPath(searchpath)
# look for manifests and parse them
self._findManifests(self._directory)
# statistics for manifests
manifest_stats = {}
for manifestpath in self._manifests.values():
manifest_stats[manifestpath] = {}
_exists = os.path.isfile(manifestpath)
manifest_stats[manifestpath]["manifest"] = _exists
if _exists:
try:
self._parseManifest(manifestpath)
manifest_stats[manifestpath]["dataset"] = True
except:
manifest_stats[manifestpath]["dataset"] = False
# statistics for datasets
ds_stats = {}
for ds in self._datasets.values():
ds_stats[ds._id] = {}
ds_stats[ds._id][DATA_CLU] = os.path.isfile(ds._clusters[PATH])
ds_stats[ds._id][DATA_ELE] = os.path.isfile(ds._elements[PATH])
if ds._discover:
continue
ds_stats[ds._id][DATA_PUL] = os.path.isfile(ds._pulses[PATH])
# stats finished, report statistics
return {"manifests": manifest_stats, "datasets": ds_stats}
# [Public]
def util_checkDatasetsResult(self, obj):
"""
Checks overall result of the object received from
"util_testDatasets" and returns bool value that indicates whether
test is passed or not.
Args:
obj (dict<str, obj>): result of the testing
Returns:
bool: flag to show whether test is passed or failed
"""
manfs = obj["manifests"]
ds = obj["datasets"]
flag = True
# test types
flag = flag and type(manfs) is DictType and type(ds) is DictType
# test lengths of keys
flag = flag and len(manfs.keys()) == len(ds.keys())
# check result of common assertions
if not flag:
return flag
# continue testing of each manifest and dataset
# check manifests
mn = [m for manf in manfs.values() for m in manf.values() if not m]
# check datasets
dt = [p for dsf in ds.values() for p in dsf.values() if not p]
return flag and len(mn)==0 and len(dt)==0
| en | 0.725788 | #!/usr/bin/env python Copyright 2015 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # import libs # import classes # global parameters, like manifest name and default directory # Dataset parameters that are used for an parsing object Simple dataset class to hold all the parameters. Converts filenames into paths for download by specifying "dr" attribute. Attributes: _id (str): dataset id _name (str): dataset name _desc (str): dataset desc _discover (bool): dataset isDiscover property _clusters (dict<str, str>): clusters information (path and type) _elements (dict<str, str>): elements information (path and type) _pulses (dict<str, str>): pulses information (path and type) # files data # path and type constants # clusters file name and type # elements file name and type # if discover is False, specify pulses # [Public] Returns json object. Returns: dict<str, str>: json representation of Dataset instance # [Private] Returns full file path for specified directory, file name and file type. Args: directory (str): directory filename (str): file name filetype (str): file type Returns: str: full file path # replace any "/" on "_" DataManager class helps with datasets maintainance. It searches specified directory and collect manifests, then parses them into Dataset objects. Attributes: _manifests (dir<str, str>): map of dirs and manifest file paths _datasets (dir<str, Dataset>): map of datasets _directory (str): search directory # declare attributes # [Private] Scans directory and collects manifests paths into _manifests attribute. Args: directory (str): directory to search in # [Private] Parses manifest that has path specified. Returns True if dataset was parsed successfully and added to _datasets dictionary. Otherwise, aborts the parsing and returns False. Args: path (str): path to the manifest file Returns: bool: status of parsing operation # load from path, if it fails, skip it # create dataset # [Public] Loads datasets from _directory path. Args: searchpath (str): search path for datasets # assign search path or use it to save previous path # clean previous datasets and manifests info # assign back search path # look for manifests and parse them # [Public] Returns list of Dataset object collected. Returns: list<Dataset>: list of Dataset instances # [Public] Returns dataset by id specified. If there is no such id, then returns None. Args: id (str): dataset id Returns: Dataset: dataset object with id specified # [Public] Sets searching path. Args: path (str): new searching directory # [Public] Resets instance to default parameters. # [Public] Collects and tests datasets that are in the folder. Keeps results in list for a particular manifest found. Args: searchpath (str): search path Returns: dict<str, obj>: result of the testing # assign back search path # look for manifests and parse them # statistics for manifests # statistics for datasets # stats finished, report statistics # [Public] Checks overall result of the object received from "util_testDatasets" and returns bool value that indicates whether test is passed or not. Args: obj (dict<str, obj>): result of the testing Returns: bool: flag to show whether test is passed or failed # test types # test lengths of keys # check result of common assertions # continue testing of each manifest and dataset # check manifests # check datasets | 2.183151 | 2 |
django-example/app/views.py | jordaneremieff/serverless-mangum-examples | 19 | 6614876 | <filename>django-example/app/views.py
from pprint import pformat
from django.views.generic import TemplateView
class HelloWorldView(TemplateView):
template_name = "helloworld.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["scope"] = pformat(self.request.scope)
return context
| <filename>django-example/app/views.py
from pprint import pformat
from django.views.generic import TemplateView
class HelloWorldView(TemplateView):
template_name = "helloworld.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["scope"] = pformat(self.request.scope)
return context
| none | 1 | 2.375455 | 2 | |
datastage_functions_unit_test.py | stedoherty1/DataStage-Tools | 0 | 6614877 | #!/usr/bin/env python3
import unittest
import datastage_functions
class TestGetDSAdminNames(unittest.TestCase):
def test_correct_version_xml(self):
"""
Environment variables: unset
Args : default names
"""
version_xml='/iis/01/InformationServer/Version.xml'
admin_name='dsadm'
self.assertEqual(datastage_functions.GetDSAdminName(version_xml), admin_name)
class TestGetProjectPath(unittest.TestCase):
def test_existing_project(self):
version_xml='/iis/01/InformationServer/Version.xml'
dshome='/iis/test/InformationServer/Server/DSEngine'
dsadm_user='dsadm'
project='dstage1'
expected_path='/iis/01/InformationServer/Server/Projects/dstage1'
self.assertEqual(datastage_functions.GetProjectPath(project_name='dstage1',dsadm_user='dsadm', dshome='/iis/01/InformationServer/Server/DSEngine'), expected_path)
class TestGetListOfComponentsRecentlyModified(unittest.TestCase):
def test_list_all_components(self):
from datetime import datetime
modified_since = datetime.strptime("2021-04-02 01:02:03",'%Y-%m-%d %H:%M:%S')
expected_result = datastage_functions.GetListOfComponentsRecentlyModified(modified_since=modified_since)
self.assertEqual(datastage_functions.GetListOfComponentsRecentlyModified(modified_since=modified_since),expected_result)
if __name__ == '__main__':
# begin the unittest.main()
import sys
scriptname=sys.argv[0]
sys.argv=['']
sys.argv[0]=scriptname
unittest.main() | #!/usr/bin/env python3
import unittest
import datastage_functions
class TestGetDSAdminNames(unittest.TestCase):
def test_correct_version_xml(self):
"""
Environment variables: unset
Args : default names
"""
version_xml='/iis/01/InformationServer/Version.xml'
admin_name='dsadm'
self.assertEqual(datastage_functions.GetDSAdminName(version_xml), admin_name)
class TestGetProjectPath(unittest.TestCase):
def test_existing_project(self):
version_xml='/iis/01/InformationServer/Version.xml'
dshome='/iis/test/InformationServer/Server/DSEngine'
dsadm_user='dsadm'
project='dstage1'
expected_path='/iis/01/InformationServer/Server/Projects/dstage1'
self.assertEqual(datastage_functions.GetProjectPath(project_name='dstage1',dsadm_user='dsadm', dshome='/iis/01/InformationServer/Server/DSEngine'), expected_path)
class TestGetListOfComponentsRecentlyModified(unittest.TestCase):
def test_list_all_components(self):
from datetime import datetime
modified_since = datetime.strptime("2021-04-02 01:02:03",'%Y-%m-%d %H:%M:%S')
expected_result = datastage_functions.GetListOfComponentsRecentlyModified(modified_since=modified_since)
self.assertEqual(datastage_functions.GetListOfComponentsRecentlyModified(modified_since=modified_since),expected_result)
if __name__ == '__main__':
# begin the unittest.main()
import sys
scriptname=sys.argv[0]
sys.argv=['']
sys.argv[0]=scriptname
unittest.main() | en | 0.206981 | #!/usr/bin/env python3 Environment variables: unset Args : default names # begin the unittest.main() | 2.416094 | 2 |
crawler/weibo/weibo/utils.py | markhuyong/galaxy | 0 | 6614878 | # -*- coding: utf-8 -*-
import random
import sys
from scrapy.http.headers import Headers
from crawler.misc import mobile_agents
reload(sys)
sys.setdefaultencoding('utf8')
class BaseHelper(object):
WEIBO_LIST_PAGE_URL_PREFIX = "https://weibo.cn/u/{uid}?page={page}"
WEIBO_SEARCH_URL = "https://weibo.cn/search/user/?keyword={nick_name}"
M_WEIBO_HOME_URL = "https://m.weibo.cn/u/{uid}"
M_WEIBO_STATUS_URL = "https://m.weibo.cn/container/getIndex?type=uid&value={uid}&containerid={cid}&page={page}"
M_WEIBO_USER_URL = "https://m.weibo.cn/n/{nick_name}"
M_WEIBO_USER_INFO_URL = "https://m.weibo.cn/container/getIndex?type=uid&value={uid}"
M_WEIBO_LONG_TEXT = "https://m.weibo.cn/statuses/extend?id={text_id}"
M_WEIBO_SINGLE_STATUS = "https://m.weibo.cn/{container_id}/{mid}"
M_WEIBO_STATUS_MID = "https://m.weibo.cn/status/{mid}"
@classmethod
def get_headers(cls):
return Headers({
# 'User-Agent': self._get_user_agent(),
# 'Content-Type': 'application/json',
# "Connection": "keep-alive",
'Accept': 'application/json; charset=utf-8',
'Host': 'm.weibo.cn',
})
@classmethod
def get_login_headers(cls):
return {
# 'User-Agent': cls._get_user_agent(),
# 'User-Agent': "Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Pragma": "no-cache",
"Origin": "https://passport.weibo.cn",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.8,zh;q=0.6",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"Referer": "https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F",
"Connection": "keep-alive",
}
@classmethod
def get_status_headers(cls, uid):
return Headers({
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh;q=0.6',
'Accept': 'application/json, text/plain, */*',
'Host': 'm.weibo.cn',
'Referer': cls.get_m_weibo_home_url(uid),
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
})
@classmethod
def get_single_status_headers(cls, uid):
"""
head for single status of weibo
:param uid:
:return:
"""
return Headers({
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh;q=0.6',
'Host': 'm.weibo.cn',
'Referer': cls.get_m_weibo_home_url(uid),
})
@staticmethod
def random_user_agent():
return str(random.choice(mobile_agents.AGENTS))
@classmethod
def get_common_page_url(cls, nick_name):
# encode = nick_name.encode('utf-8') if not isinstance(nick_name, unicode) else nick_name
# valid_utf8 = True
# try:
# nick_name.decode('utf-8')
# except UnicodeDecodeError:
# valid_utf8 = False
# encode = nick_name if valid_utf8 else nick_name.encode('utf-8')
return cls.WEIBO_SEARCH_URL.format(nick_name=nick_name)
@classmethod
def get_weibo_status_url(cls, uid, page=1):
return cls.WEIBO_LIST_PAGE_URL_PREFIX.format(uid=uid, page=page)
@classmethod
def get_m_weibo_status_url(cls, uid, cid, page=1):
return cls.M_WEIBO_STATUS_URL.format(uid=uid, cid=cid, page=page)
@classmethod
def get_m_weibo_home_url(cls, uid):
return cls.M_WEIBO_HOME_URL.format(uid=uid)
@classmethod
def get_m_weibo_user_url(cls, nick_name):
return cls.M_WEIBO_USER_URL.format(nick_name=nick_name)
@classmethod
def get_m_weibo_user_info_url(cls, uid):
return cls.M_WEIBO_USER_INFO_URL.format(uid=uid)
@classmethod
def get_m_weibo_long_text(cls, text_id):
return cls.M_WEIBO_LONG_TEXT.format(text_id=text_id)
@classmethod
def get_m_weibo_single_status(cls, container_id, mid):
return cls.M_WEIBO_SINGLE_STATUS.format(container_id=container_id, mid=mid)
@classmethod
def get_m_weibo_status_mid(cls, mid):
return cls.M_WEIBO_STATUS_MID.format(mid=mid)
@staticmethod
def get_cookie_key_prefix(spider):
sep = "_"
assert spider.name.index(sep) > 0
return "{}:Cookies".format(spider.name.split(sep)[0])
| # -*- coding: utf-8 -*-
import random
import sys
from scrapy.http.headers import Headers
from crawler.misc import mobile_agents
reload(sys)
sys.setdefaultencoding('utf8')
class BaseHelper(object):
WEIBO_LIST_PAGE_URL_PREFIX = "https://weibo.cn/u/{uid}?page={page}"
WEIBO_SEARCH_URL = "https://weibo.cn/search/user/?keyword={nick_name}"
M_WEIBO_HOME_URL = "https://m.weibo.cn/u/{uid}"
M_WEIBO_STATUS_URL = "https://m.weibo.cn/container/getIndex?type=uid&value={uid}&containerid={cid}&page={page}"
M_WEIBO_USER_URL = "https://m.weibo.cn/n/{nick_name}"
M_WEIBO_USER_INFO_URL = "https://m.weibo.cn/container/getIndex?type=uid&value={uid}"
M_WEIBO_LONG_TEXT = "https://m.weibo.cn/statuses/extend?id={text_id}"
M_WEIBO_SINGLE_STATUS = "https://m.weibo.cn/{container_id}/{mid}"
M_WEIBO_STATUS_MID = "https://m.weibo.cn/status/{mid}"
@classmethod
def get_headers(cls):
return Headers({
# 'User-Agent': self._get_user_agent(),
# 'Content-Type': 'application/json',
# "Connection": "keep-alive",
'Accept': 'application/json; charset=utf-8',
'Host': 'm.weibo.cn',
})
@classmethod
def get_login_headers(cls):
return {
# 'User-Agent': cls._get_user_agent(),
# 'User-Agent': "Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Pragma": "no-cache",
"Origin": "https://passport.weibo.cn",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.8,zh;q=0.6",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"Referer": "https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F",
"Connection": "keep-alive",
}
@classmethod
def get_status_headers(cls, uid):
return Headers({
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh;q=0.6',
'Accept': 'application/json, text/plain, */*',
'Host': 'm.weibo.cn',
'Referer': cls.get_m_weibo_home_url(uid),
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
})
@classmethod
def get_single_status_headers(cls, uid):
"""
head for single status of weibo
:param uid:
:return:
"""
return Headers({
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh;q=0.6',
'Host': 'm.weibo.cn',
'Referer': cls.get_m_weibo_home_url(uid),
})
@staticmethod
def random_user_agent():
return str(random.choice(mobile_agents.AGENTS))
@classmethod
def get_common_page_url(cls, nick_name):
# encode = nick_name.encode('utf-8') if not isinstance(nick_name, unicode) else nick_name
# valid_utf8 = True
# try:
# nick_name.decode('utf-8')
# except UnicodeDecodeError:
# valid_utf8 = False
# encode = nick_name if valid_utf8 else nick_name.encode('utf-8')
return cls.WEIBO_SEARCH_URL.format(nick_name=nick_name)
@classmethod
def get_weibo_status_url(cls, uid, page=1):
return cls.WEIBO_LIST_PAGE_URL_PREFIX.format(uid=uid, page=page)
@classmethod
def get_m_weibo_status_url(cls, uid, cid, page=1):
return cls.M_WEIBO_STATUS_URL.format(uid=uid, cid=cid, page=page)
@classmethod
def get_m_weibo_home_url(cls, uid):
return cls.M_WEIBO_HOME_URL.format(uid=uid)
@classmethod
def get_m_weibo_user_url(cls, nick_name):
return cls.M_WEIBO_USER_URL.format(nick_name=nick_name)
@classmethod
def get_m_weibo_user_info_url(cls, uid):
return cls.M_WEIBO_USER_INFO_URL.format(uid=uid)
@classmethod
def get_m_weibo_long_text(cls, text_id):
return cls.M_WEIBO_LONG_TEXT.format(text_id=text_id)
@classmethod
def get_m_weibo_single_status(cls, container_id, mid):
return cls.M_WEIBO_SINGLE_STATUS.format(container_id=container_id, mid=mid)
@classmethod
def get_m_weibo_status_mid(cls, mid):
return cls.M_WEIBO_STATUS_MID.format(mid=mid)
@staticmethod
def get_cookie_key_prefix(spider):
sep = "_"
assert spider.name.index(sep) > 0
return "{}:Cookies".format(spider.name.split(sep)[0])
| en | 0.401841 | # -*- coding: utf-8 -*- # 'User-Agent': self._get_user_agent(), # 'Content-Type': 'application/json', # "Connection": "keep-alive", # 'User-Agent': cls._get_user_agent(), # 'User-Agent': "Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1", head for single status of weibo :param uid: :return: # encode = nick_name.encode('utf-8') if not isinstance(nick_name, unicode) else nick_name # valid_utf8 = True # try: # nick_name.decode('utf-8') # except UnicodeDecodeError: # valid_utf8 = False # encode = nick_name if valid_utf8 else nick_name.encode('utf-8') | 2.246939 | 2 |
PythonCrashCourse/Classes/classes.py | fajardodiaz/python_devops | 0 | 6614879 | <gh_stars>0
class Dog:
"""A simple class to model dogs"""
def __init__(self,name,age):
"""Initialize the name and age attributes"""
self.name = name
self.age = age
def sit(self):
return f"{self.name} is now sitting."
def roll_over(self):
return f"{self.name} rolled over!"
#A new instance
my_dog = Dog("Bobby",11)
#Get attributes
print(f"My dog's name is {my_dog.name}.")
print(f"My dog's age is {my_dog.age}.")
#Call the methods
print(my_dog.sit())
print(my_dog.roll_over())
print("\n\nAnother Dog:\n\n")
#Another instance
another_dog = Dog("Holly",2)
#Attributes
print(f"My dog's name is {another_dog.name}.")
print(f"My dog's age is {another_dog.age}.")
#Methods
print(another_dog.sit())
print(another_dog.roll_over()) | class Dog:
"""A simple class to model dogs"""
def __init__(self,name,age):
"""Initialize the name and age attributes"""
self.name = name
self.age = age
def sit(self):
return f"{self.name} is now sitting."
def roll_over(self):
return f"{self.name} rolled over!"
#A new instance
my_dog = Dog("Bobby",11)
#Get attributes
print(f"My dog's name is {my_dog.name}.")
print(f"My dog's age is {my_dog.age}.")
#Call the methods
print(my_dog.sit())
print(my_dog.roll_over())
print("\n\nAnother Dog:\n\n")
#Another instance
another_dog = Dog("Holly",2)
#Attributes
print(f"My dog's name is {another_dog.name}.")
print(f"My dog's age is {another_dog.age}.")
#Methods
print(another_dog.sit())
print(another_dog.roll_over()) | en | 0.670279 | A simple class to model dogs Initialize the name and age attributes #A new instance #Get attributes #Call the methods #Another instance #Attributes #Methods | 4.331899 | 4 |
Spectrum_realm/Parser.py | thesombady/Spectrum-realm | 0 | 6614880 | import os
import numpy as np
def Parser(Path):
"""Parser function provides the parsered data provided from a .xyd file. This method requires that the data
is strictly ordered"""
try:
with open(Path, 'r') as file:
Data = file.readlines()
Xlist = []
Ylist = []
for i in range(len(Data)):
Values = Data[i].split(' ')
val2 = Values[-1].replace("\n", '')#Remove a newline and replace by nothing
Xlist.append(float(Values[0]))
Ylist.append(float(val2))
return np.array(Xlist), np.array(Ylist)
except:
raise Exception("[Parser]: Cant find the input")
| import os
import numpy as np
def Parser(Path):
"""Parser function provides the parsered data provided from a .xyd file. This method requires that the data
is strictly ordered"""
try:
with open(Path, 'r') as file:
Data = file.readlines()
Xlist = []
Ylist = []
for i in range(len(Data)):
Values = Data[i].split(' ')
val2 = Values[-1].replace("\n", '')#Remove a newline and replace by nothing
Xlist.append(float(Values[0]))
Ylist.append(float(val2))
return np.array(Xlist), np.array(Ylist)
except:
raise Exception("[Parser]: Cant find the input")
| en | 0.77894 | Parser function provides the parsered data provided from a .xyd file. This method requires that the data is strictly ordered #Remove a newline and replace by nothing | 3.288876 | 3 |
benchmarking/awrams/benchmarking/config.py | Zac-HD/awra_cms | 2 | 6614881 | <reponame>Zac-HD/awra_cms<gh_stars>1-10
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
BENCHMARK_SITES = os.path.join(dir_path,'data','SiteLocationsWithUniqueID.csv')
SOILMOISTURE_OBS_PATH = os.path.join(dir_path,'data')
FIG_SIZE = (14,6)
MONTHLY_REJECTION_THRESHOLD=15
ANNUAL_REJECTION_THRESHOLD=6
SM_MODEL_VARNAMES = ['s0_avg', 'ss_avg', 'sd_avg']
SM_MODEL_LAYERS = {'s0_avg': 100., 'ss_avg': 900., 'sd_avg': 5000.}
SM_OBSERVED_LAYERS = ('profile','top','shallow','middle','deep')
| import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
BENCHMARK_SITES = os.path.join(dir_path,'data','SiteLocationsWithUniqueID.csv')
SOILMOISTURE_OBS_PATH = os.path.join(dir_path,'data')
FIG_SIZE = (14,6)
MONTHLY_REJECTION_THRESHOLD=15
ANNUAL_REJECTION_THRESHOLD=6
SM_MODEL_VARNAMES = ['s0_avg', 'ss_avg', 'sd_avg']
SM_MODEL_LAYERS = {'s0_avg': 100., 'ss_avg': 900., 'sd_avg': 5000.}
SM_OBSERVED_LAYERS = ('profile','top','shallow','middle','deep') | none | 1 | 1.694988 | 2 | |
py_cui/widgets/__init__.py | zexee/py_cui | 0 | 6614882 | from .button import *
from .checkbox_menu import *
from .label import *
from .scroll_menu import *
from .scroll_text_block import *
from .slider import *
from .text_box import *
| from .button import *
from .checkbox_menu import *
from .label import *
from .scroll_menu import *
from .scroll_text_block import *
from .slider import *
from .text_box import *
| none | 1 | 1.141852 | 1 | |
meridian/acupoints/qixue42.py | sinotradition/meridian | 5 | 6614883 | <reponame>sinotradition/meridian
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'qìxué'
CN=u'气穴'
NAME=u'qixue42'
CHANNEL='kidney'
CHANNEL_FULLNAME='KidneyChannelofFoot-Shaoyin'
SEQ='KI13'
if __name__ == '__main__':
pass
| #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'qìxué'
CN=u'气穴'
NAME=u'qixue42'
CHANNEL='kidney'
CHANNEL_FULLNAME='KidneyChannelofFoot-Shaoyin'
SEQ='KI13'
if __name__ == '__main__':
pass | en | 0.406311 | #!/usr/bin/python #coding=utf-8 @author: sheng @license: | 1.160505 | 1 |
evaluation/f-measure/fmeasure.py | zqyjixiang/machinelearning | 2 | 6614884 | <reponame>zqyjixiang/machinelearning
#!/usr/bin/env python
# Author: AppleFairy
# Date: 8/11/2016
#
#
# *************************************** #
from __future__ import division
import pandas as pd
import math
class FMeasure:
'''
p : precision
r : recall
f : f measure
tp: true positive
fp: false positive
fn: false negetive
tn: true negetive
p = tp / (tp + fp)
r = tp / (tp + fn)
f = (e^2 + 1) * (p * r) / (e^2 * p + r)
'''
def __init__(self, golden_vs_predict_result):
self.__classIdList = set(golden_vs_predict_result.loc[:, "ClassId"].tolist())
self.__clusterIdList = set(golden_vs_predict_result.loc[:, "ClusterId"].tolist())
self.__gvpr = golden_vs_predict_result
def __calculate(self, classId, clusterId, eps):
truePositive = len(self.__gvpr.loc[(self.__gvpr.ClassId == classId) & (self.__gvpr.ClusterId == clusterId)])
precision = truePositive/len(self.__gvpr[self.__gvpr.ClusterId == clusterId])
recall = truePositive/len(self.__gvpr[self.__gvpr.ClassId == classId])
if 0 == precision and 0 == recall:
return 0
else:
return (math.pow(eps,2) +1)*(precision*recall)/(math.pow(eps,2)*precision+recall)
def __mapping(self, classId, eps):
maxPrecision = 0
mappedClusterId = -1
for clusterId in self.__clusterIdList:
if(clusterId != -1):
f = self.__calculate(classId, clusterId, eps)
if f > maxPrecision:
maxPrecision = f
mappedClusterId = clusterId
print(" class <==> cluster : %r <==> %r, precision=%f" % (classId, mappedClusterId, maxPrecision))
return maxPrecision
def get_fmeasure(self, eps=1):
result = 0
for classId in self.__classIdList:
f = self.__mapping(classId, eps)
result = result + f * len(self.__gvpr[self.__gvpr.ClassId == classId])
return result/len(self.__gvpr)
if __name__ == "__main__":
result = FMeasure([1,1,0,0,1,2],[0,0,1,1,0,2])
print(result.get_fmeasure()) | #!/usr/bin/env python
# Author: AppleFairy
# Date: 8/11/2016
#
#
# *************************************** #
from __future__ import division
import pandas as pd
import math
class FMeasure:
'''
p : precision
r : recall
f : f measure
tp: true positive
fp: false positive
fn: false negetive
tn: true negetive
p = tp / (tp + fp)
r = tp / (tp + fn)
f = (e^2 + 1) * (p * r) / (e^2 * p + r)
'''
def __init__(self, golden_vs_predict_result):
self.__classIdList = set(golden_vs_predict_result.loc[:, "ClassId"].tolist())
self.__clusterIdList = set(golden_vs_predict_result.loc[:, "ClusterId"].tolist())
self.__gvpr = golden_vs_predict_result
def __calculate(self, classId, clusterId, eps):
truePositive = len(self.__gvpr.loc[(self.__gvpr.ClassId == classId) & (self.__gvpr.ClusterId == clusterId)])
precision = truePositive/len(self.__gvpr[self.__gvpr.ClusterId == clusterId])
recall = truePositive/len(self.__gvpr[self.__gvpr.ClassId == classId])
if 0 == precision and 0 == recall:
return 0
else:
return (math.pow(eps,2) +1)*(precision*recall)/(math.pow(eps,2)*precision+recall)
def __mapping(self, classId, eps):
maxPrecision = 0
mappedClusterId = -1
for clusterId in self.__clusterIdList:
if(clusterId != -1):
f = self.__calculate(classId, clusterId, eps)
if f > maxPrecision:
maxPrecision = f
mappedClusterId = clusterId
print(" class <==> cluster : %r <==> %r, precision=%f" % (classId, mappedClusterId, maxPrecision))
return maxPrecision
def get_fmeasure(self, eps=1):
result = 0
for classId in self.__classIdList:
f = self.__mapping(classId, eps)
result = result + f * len(self.__gvpr[self.__gvpr.ClassId == classId])
return result/len(self.__gvpr)
if __name__ == "__main__":
result = FMeasure([1,1,0,0,1,2],[0,0,1,1,0,2])
print(result.get_fmeasure()) | en | 0.320903 | #!/usr/bin/env python # Author: AppleFairy # Date: 8/11/2016 # # # *************************************** # p : precision r : recall f : f measure tp: true positive fp: false positive fn: false negetive tn: true negetive p = tp / (tp + fp) r = tp / (tp + fn) f = (e^2 + 1) * (p * r) / (e^2 * p + r) | 2.416176 | 2 |
packageopt/models/database.py | nspostnov/for-article-optimal-position-liquidation | 0 | 6614885 | __all__ = ['DataBase']
class DataBase:
def __init__(self, dbname,
user='postnov', password='password', host='localhost', port=5432):
self._dbname = dbname
self._user = user
self._password = password
self._host = host
self._port = port
def get_dbname(self):
return self._dbname
def set_dbname(self, name):
self._dbname = name
def get_user(self):
return self._user
def set_user(self, user):
self._user = user
def get_password(self):
return self._password
def set_password(self, password):
self._password = password
def get_host(self):
return self._host
def set_host(self, host):
self._host = host
def get_port(self):
return self._port
def set_port(self, port):
self._port = port
| __all__ = ['DataBase']
class DataBase:
def __init__(self, dbname,
user='postnov', password='password', host='localhost', port=5432):
self._dbname = dbname
self._user = user
self._password = password
self._host = host
self._port = port
def get_dbname(self):
return self._dbname
def set_dbname(self, name):
self._dbname = name
def get_user(self):
return self._user
def set_user(self, user):
self._user = user
def get_password(self):
return self._password
def set_password(self, password):
self._password = password
def get_host(self):
return self._host
def set_host(self, host):
self._host = host
def get_port(self):
return self._port
def set_port(self, port):
self._port = port
| none | 1 | 2.953585 | 3 | |
cgs/member_add_leave.py | NikStor03/schoolboy | 1 | 6614886 | import discord
import SchoolBoy
import config
from discord.ext import commands
class member_add_leave(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
if member.bot:
return
else:
channel = self.bot.get_channel(785129670559531079) # получаем объект канала
role = discord.utils.get(member.guild.roles, id=783669858838773781)
await member.add_roles(role)
await channel.send(f":partying_face: **Користувач приєднався до нас {member.mention}, так давайте привітаємо його!!!**")
def setup(bot):
bot.add_cog(member_add_leave(bot)) | import discord
import SchoolBoy
import config
from discord.ext import commands
class member_add_leave(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
if member.bot:
return
else:
channel = self.bot.get_channel(785129670559531079) # получаем объект канала
role = discord.utils.get(member.guild.roles, id=783669858838773781)
await member.add_roles(role)
await channel.send(f":partying_face: **Користувач приєднався до нас {member.mention}, так давайте привітаємо його!!!**")
def setup(bot):
bot.add_cog(member_add_leave(bot)) | ru | 0.994677 | # получаем объект канала | 2.46719 | 2 |
server/tracker/apps.py | LaurentColoma/TicketManager-server | 2 | 6614887 | <filename>server/tracker/apps.py<gh_stars>1-10
"""
Application configuration file.
see https://docs.djangoproject.com/fr/1.9/ref/applications/.
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from importlib import import_module
class TrackerConfig(AppConfig):
"""
Application configuration class.
TODO
"""
name = "tracker"
verbose_name = _("Tracker")
def ready(self):
import_module("{}.signals".format(self.name))
| <filename>server/tracker/apps.py<gh_stars>1-10
"""
Application configuration file.
see https://docs.djangoproject.com/fr/1.9/ref/applications/.
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from importlib import import_module
class TrackerConfig(AppConfig):
"""
Application configuration class.
TODO
"""
name = "tracker"
verbose_name = _("Tracker")
def ready(self):
import_module("{}.signals".format(self.name))
| en | 0.461292 | Application configuration file. see https://docs.djangoproject.com/fr/1.9/ref/applications/. Application configuration class. TODO | 1.437135 | 1 |
planetoids/__init__.py | paulds8/planetoids | 1 | 6614888 | from .planetoids import *
| from .planetoids import *
| none | 1 | 1.16285 | 1 | |
hardware_analysis.py | 260601068j/X-Planef | 8 | 6614889 | <gh_stars>1-10
"""Accesses the Google Analytics API to spit out a CSV of aircraft usage"""
from __future__ import division, print_function
import argparse
import collections
import logging
from ga_library import *
from utils import *
from collections import defaultdict, OrderedDict
SHOW_ABSOLUTE_NUMBERS = False
_out = ''
def _log(s, end='\n'):
global _out
_out += s + end
file_name_suffix = ''
def main():
argparser = argparse.ArgumentParser(description='Dumps hardware stats from X-Plane Desktop; you probably want to pipe the output to a CSV file')
argparser.add_argument('--version', type=int, default=11, help='The major version of X-Plane you want data on (10 or 11)')
args = argparser.parse_args()
write_hardware_analysis_files(Version.v11 if args.version == 11 else Version.v10, UserGroup.PaidOnly)
def write_hardware_analysis_files(version: Union[int, Version], user_group: UserGroup, csv_path=None):
"""
:type csv_path: Union[str,None]
"""
global file_name_suffix
file_name_suffix = "_%s_%s_%s" % (version, user_group.name, today_file_suffix())
qm = SimpleQueryMgr(GaService.desktop(), version, Metric.Users, user_group)
perform_cpu_analysis(qm.query(CustomDimension.Cpu))
perform_flight_controls_analysis(qm.query(CustomDimension.FlightControls))
stats = HardwareStats(GaService.desktop(), version, Metric.Users, user_group)
grapher = HardwareGrapher(stats)
perform_ram_analysis(stats)
perform_gpu_analysis(stats)
perform_os_analysis(stats, grapher)
perform_vr_analysis(stats, grapher)
if not csv_path:
csv_path = "hardware_analysis%s.csv" % file_name_suffix
with open(csv_path, 'w') as out_file:
out_file.write(_out)
out_file.write('\n')
class HardwareStats:
def __init__(self, service: GaService, version: Union[int, Version]=Version.v11, user_group: UserGroup=UserGroup.PaidOnly):
self.qm = SimpleQueryMgr(service, version, Metric.Users, user_group)
def operating_systems(self) -> Dict[str, int]:
platform_count = defaultdict(int)
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
platform_count[os_name] += val
return counts_to_percents(platform_count)
def operating_system_versions(self) -> Dict[str, Dict[str, int]]:
version_count = defaultdict(lambda: defaultdict(int))
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
version = get_os_version(row[0])
if version:
version_count[os_name][version] += val
return version_count
def ram_amounts(self) -> Dict[str, int]:
users_with_at_least_this_much_ram = collections.defaultdict(int)
total_users = 0
for row in self.qm.query(CustomDimension.Ram):
val = str_to_int(row[1])
total_users += val
ram_class = int(row[0])
if ram_class >= 2:
users_with_at_least_this_much_ram["2GB"] += val
if ram_class >= 4:
users_with_at_least_this_much_ram["4GB"] += val
if ram_class >= 8:
users_with_at_least_this_much_ram["8GB"] += val
if ram_class >= 16:
users_with_at_least_this_much_ram["16GB"] += val
if ram_class >= 32:
users_with_at_least_this_much_ram["32GB"] += val
return counts_to_percents(users_with_at_least_this_much_ram, total_users)
def gpu_manufacturers(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_manufacturer(row[0])] += str_to_int(row[1])
out = counts_to_percents(out)
with suppress(KeyError):
if out['Unknown'] < 0.3:
del out['Unknown']
return out
def gpu_generation(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_generation(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def gpu_platform(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_mobile_versus_desktop(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def vr_headsets(self):
known_headsets = {
'rift': 'Oculus Rift',
'oculus': 'Oculus Rift',
'pimax 5k': 'Pimax 5K',
'psvr': 'PSVR Headset',
'windows': 'Windows Mixed Reality',
'lighthouse': 'OpenVR (like HTC Vive)',
'vive': 'OpenVR (like HTC Vive)',
'aapvr': 'Phone',
'vridge': 'Phone',
'ivry': 'Phone',
'phonevr': 'Phone',
}
headset_count = collections.defaultdict(int)
for row in self.qm.query(CustomDimension.VrHeadset):
label = row[0]
for search_term, deduped_name in known_headsets.items():
if search_term in label.lower():
label = deduped_name
break
else:
logging.debug('unknown headset: ' + label)
headset_count[label] += str_to_int(row[1])
return counts_to_percents(headset_count, smush_into_other_below_percent=1)
def vr_usage(self):
vr_start_date = Version.v1120r4.value.start_date
total_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.Ram, override_start_date=vr_start_date))
vr_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.VrHeadset, override_start_date=vr_start_date))
vr_pct = round((vr_users / total_users) * 100, 2)
return {
'Have Used VR': vr_pct,
'2-D Monitor Only': 100 - vr_pct
}
@property
def total_users(self):
ram_data = self.qm.query(CustomDimension.Ram)
return sum(str_to_int(row[1]) for row in ram_data)
class HardwareGrapher:
def __init__(self, stats: HardwareStats):
self.stats = stats
def operating_systems(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.operating_systems())
def ram_amounts(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.ram_amounts(), 'Users with at Least <em>x</em> GB RAM', make_x_label=lambda l: str(l) + '+')
def gpu_mobile_vs_desktop(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.gpu_platform())
def gpu_manufacturers(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.gpu_manufacturers(), 'GPU Manufacturers')
def vr_headsets(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.vr_headsets(), 'VR Headsets', already_sorted=True, y_label='% VR Users')
def vr_usage(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.vr_usage(), top_pad_px=40)
def perform_os_analysis(stats: HardwareStats, grapher: HardwareGrapher):
# Overall platform breakdown
platform_count = stats.operating_systems()
_log("PLATFORM BREAKDOWN")
dump_generic_count_dict(platform_count, "Operating System", "Machines")
plotly.offline.plot(grapher.operating_systems(), image='png', image_filename='os_breakdown' + file_name_suffix, image_width=1024, output_type='file')
version_count = stats.operating_system_versions()
_log("OS VERSIONS")
dump_generic_count_dict(version_count["Windows"], "OS Version", "Windows Machines")
dump_generic_count_dict(version_count["Mac"], "OS Version", "Macs")
dump_generic_count_dict(version_count["Linux"], "OS Version", "Linux Machines")
def clean_up_string_formatting(string):
return str(string).strip()
def perform_cpu_analysis(results_rows):
def get_cpu_core_count(cpu_line):
stats = cpu_line.split(" - ")
for stat in stats:
if stat.startswith("Cores:"):
label_and_cores = stat.split(" ")
return int(label_and_cores[1])
return 0
cpu_cores = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
core_count = get_cpu_core_count(row[0])
cpu_cores[core_count] += val
_log("NUMBER OF CPU CORES")
dump_generic_count_dict(cpu_cores, "CPU Cores", "Machines")
def perform_vr_analysis(stats: HardwareStats, grapher: HardwareGrapher):
_log("VR USAGE")
dump_generic_count_dict(stats.vr_usage(), "VR Status", "Users")
_log("VR HEADSETS")
dump_generic_count_dict(stats.vr_headsets(), "Headset Type", "Users")
plotly.offline.plot(grapher.vr_usage(), image='png', image_filename='vr_usage' + file_name_suffix, image_width=1024, output_type='file')
plotly.offline.plot(grapher.vr_headsets(), image='png', image_filename='vr_headsets' + file_name_suffix, image_width=1024, output_type='file')
def get_gpu_manufacturer(gpu_string):
if lower_contains(gpu_string, ('firepro', 'firegl', 'radeon', 'amd ')) or gpu_string.startswith(('67EF', '67DF', 'ASUS EAH', 'ASUS R')):
return "AMD/ATI"
elif lower_contains(gpu_string, ('Quadro', 'GeForce', 'TITAN')) or gpu_string.startswith(('NVS ', 'NV1')):
return "Nvidia"
elif "Intel" in gpu_string:
return "Intel"
return "Unknown"
def get_gpu_generation(gpu_string):
gpu = gpu_string.lower()
if "quadro" in gpu:
return "Nvidia Quadro (All Generations)"
elif "firepro" in gpu or "firegl" in gpu:
return "AMD FirePro (All Generations)"
if "radeon" in gpu or "asus" in gpu:
for gen in [2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
if "R" + gen + " M" in gpu_string:
return "Radeon R" + gen + "M"
elif "R" + gen + " " in gpu_string:
return "Radeon R" + gen
elif re.search(gen + "\d\d\dM", gpu_string) or ("Mobility" in gpu_string and re.search(gen + "\d\d\d", gpu_string)):
return "Radeon " + gen + "xxxM"
elif re.search(gen + "\d\d\d", gpu_string):
return "Radeon " + gen + "xxxM"
else:
return "Radeon (Other)"
elif "titan x" in gpu:
return "GeForce 9xx"
elif "titan" in gpu:
return "GeForce 7xx"
elif "geforce" in gpu:
for gen in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
base_radeon_re = "GeForce (G|GT|GTX|GTS)?\s*"
if re.search(base_radeon_re + gen + "\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xx"
elif re.search(base_radeon_re + gen + "\d\dM", gpu_string):
return "GeForce " + gen + "xxM"
elif re.search(base_radeon_re + gen + "\d\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xxx"
elif re.search(base_radeon_re + gen + "\d\d\dM", gpu_string):
return "GeForce " + gen + "xxxM"
else:
return "GeForce (Other)"
elif "intel" in gpu:
if any(ident in gpu for ident in ["gma", "gm45", "g41", "g45", "q45", "eaglelake", "4 series"]):
return "Intel Integrated (GMA or earlier)"
elif "hd" in gpu or "iris" in gpu:
if any(ident in gpu for ident in ["2000", "3000"]):
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif any(ident in gpu for ident in ["4000", "4200", "4400", "4600", "4700", "5000", "5100", "5200"]):
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif any(ident in gpu_string for ident in ["5300", "5500", "5600", "5700", "6000", "6100", "6200", "6300"]):
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif any(ident in gpu_string for ident in ["500", "505", "510", "515", "520", "530", "540", "550", "580"]):
return "Intel Integrated (9th Generation; HD 5xx)"
else:
return "Intel Integrated (5th Generation; HD)"
elif "sandybridge" in gpu:
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif "haswell" in gpu or "ivybridge" in gpu or "bay trail" in gpu:
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif "broadwell" in gpu:
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif "skylake" in gpu:
return "Intel Integrated (9th Generation; HD 5xx)"
elif "ironlake" in gpu:
return "Intel Integrated (5th Generation; HD)"
else:
return gpu_string
return "Other"
def get_mobile_versus_desktop(gpu_string):
gen = get_gpu_generation(gpu_string)
if gen.startswith("Intel"):
return "Intel"
elif gen.endswith("M"):
return "Mobile"
else:
return "Desktop"
def perform_gpu_analysis(stats: HardwareStats):
gpu_manufacturer = stats.gpu_manufacturers()
_log("GPU PLATFORM")
dump_generic_count_dict(stats.gpu_platform(), "GPU Platform", "Machines")
_log("GPU MANUFACTURER")
dump_generic_count_dict(gpu_manufacturer, "GPU Manufacturer", "Machines")
_log("GPU GENERATION")
dump_generic_count_dict(stats.gpu_generation(), "GPU Generation", "Machines")
with suppress(KeyError):
del gpu_manufacturer['Unknown']
make_bar_chart(gpu_manufacturer, 'gpu_manufacturer' + file_name_suffix, 'Manufacturer', needs_conversion_to_percents=False, height_scaling_factor=0.7)
def perform_ram_analysis(stats: HardwareStats):
users_with_at_least_this_much_ram = stats.ram_amounts()
_log("USERS WITH AT LEAST THIS MUCH RAM")
for ram_amount, value in users_with_at_least_this_much_ram.items():
_log(','.join([str(ram_amount), str(value)]))
_log("\n" * 3)
make_bar_chart(users_with_at_least_this_much_ram, 'ram_amounts' + file_name_suffix, 'RAM Amount', make_x_label=lambda l: str(l) + '+', height_scaling_factor=0.7)
def perform_flight_controls_analysis(results_rows):
known_yokes = [
"Saitek Pro Flight Yoke",
"Saitek X52",
"CH FLIGHT SIM YOKE",
"CH ECLIPSE YOKE",
"Pro Flight Cessna Yoke",
"PFC Cirrus Yoke",
"CH 3-Axis 10-Button POV USB Yoke",
]
known_sticks = [
"Logitech 3D Pro",
"T.Flight Hotas",
"T.Flight Stick X",
"Logitech Attack 3",
"Mad Catz F.L.Y.5 Stick",
"SideWinder Precision 2",
"T.16000M",
"SideWinder Force Feedback 2",
"Saitek Pro Flight X-55 Rhino Stick",
"Cyborg",
"Saitek Cyborg USB Stick",
"AV8R",
"Logitech Freedom 2.4",
"SideWinder Joystick",
"Mad Catz V.1 Stick",
"SideWinder Precision Pro",
"SideWinder 3D Pro",
"Logitech Force 3D Pro",
"WingMan Force 3D",
"Joystick - HOTAS Warthog",
"WingMan Extreme Digital 3D",
"WingMan Extreme 3D",
"Top Gun Afterburner",
"CH FLIGHTSTICK PRO",
"CH FIGHTERSTICK",
"CH COMBATSTICK",
"Saitek ST290",
"Saitek ST90",
"Top Gun Fox 2",
"Aviator for Playstation 3",
"Dark Tornado Joystick",
"Saitek X45",
"Saitek X36",
"USB Joystick",
"Pro Flight X65",
"G940",
"HOTAS Cougar Joystick",
"MetalStrik 3D",
"WingMan Attack 2"
]
known_controllers = [
"XBOX",
"Playstation(R)3 Controller",
"WingMan Cordless Gamepad",
"WingMan RumblePad",
"Logitech Dual Action",
"RumblePad 2",
"ASUS Gamepad",
"USB WirelessGamepad",
"Betop Controller",
"Logitech(R) Precision(TM) Gamepad",
"Wireless Gamepad F710"
]
known_rc_controllers = [
"InterLink Elite",
"RealFlight Interface"
]
def canonicalize_stick_or_yoke_name(flight_control_row):
flight_control_row = clean_up_string_formatting(flight_control_row)
if "Mouse" in flight_control_row:
return "Mouse"
elif "VID:1133PID:49685" in flight_control_row:
return "Logitech Extreme 3D"
elif "WingMan Ext Digital 3D" in flight_control_row:
return "WingMan Extreme Digital 3D"
elif "VID:1699PID:1890" in flight_control_row:
return "Saitek X52"
elif "Wireless 360 Controller" in flight_control_row:
return "XBOX"
elif "VID:121PID:6" in flight_control_row:
return "Generic USB Joystick"
elif "VID:1678PID:49402" in flight_control_row:
return "CH Products (Unknown)"
for control in known_yokes + known_sticks + known_controllers:
if control.lower() in flight_control_row.lower():
return control
if "," in flight_control_row:
return flight_control_row.replace(",", ";")
return flight_control_row
def classify_stick_or_yoke(flight_control_row):
flight_control_row = canonicalize_stick_or_yoke_name(flight_control_row)
if flight_control_row == "Mouse":
return "Mouse"
elif flight_control_row in known_yokes:
return "Yoke"
elif flight_control_row in known_sticks:
return "Joystick"
elif flight_control_row in known_controllers:
return "Gamepad"
elif flight_control_row in known_rc_controllers:
return "RC Controller"
elif "yoke" in flight_control_row.lower():
return "Yoke"
elif "stick" in flight_control_row.lower():
return "Joystick"
elif "pad" in flight_control_row.lower():
return "Gamepad"
else:
return "Unknown"
flight_controls = collections.defaultdict(int)
flight_control_type = collections.defaultdict(int)
has_rudder_pedals = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
flight_controls[canonicalize_stick_or_yoke_name(row[0])] += val
flight_control_type[classify_stick_or_yoke(row[0])] += val
row = clean_up_string_formatting(row[0])
if "rudder" in row.lower() or "pedals" in row.lower():
has_rudder_pedals[True] += val
else:
has_rudder_pedals[False] += val
nuke_these_keys = []
for controls, count in flight_controls.items():
if count < 5:
nuke_these_keys.append(controls)
for key in nuke_these_keys:
flight_controls["Other"] += flight_controls[key]
del flight_controls[key]
_log("PRIMARY FLIGHT CONTROLS TYPE")
dump_generic_count_dict(flight_control_type, "Flight Controls Type", "Users")
_log("PRIMARY FLIGHT CONTROLS MODEL (for non-mouse users)")
del flight_controls["Mouse"]
dump_generic_count_dict(flight_controls, "Flight Controls Model", "Users")
_log("USERS FLYING WITH PEDALS")
dump_generic_count_dict(has_rudder_pedals, "Has Pedals?", "Users")
def dump_generic_count_dict(dictionary, label, metric_category):
if SHOW_ABSOLUTE_NUMBERS:
_log(label + ",Num " + metric_category + ",% of All " + metric_category)
else:
_log(label + ",% of All " + metric_category)
total = total_entries_in_dict(dictionary)
sorted_dict = sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True)
for i, label_and_count in enumerate(sorted_dict):
if SHOW_ABSOLUTE_NUMBERS:
_log(','.join([str(label_and_count[0]), str(label_and_count[1]), str((label_and_count[1] / total) * 100) + "%"]))
else:
# Coerce to ASCII
label = clean_up_string_formatting(label_and_count[0])
percent_str = clean_up_string_formatting(str((label_and_count[1] / total) * 100) + u"%")
_log(label, end="")
_log(",", end="")
_log(percent_str)
_log("\n" * 3)
def lower_contains(s: str, check: Iterable[str]) -> bool:
return any(sub.lower() in s.lower() for sub in check)
if __name__ == '__main__':
main()
| """Accesses the Google Analytics API to spit out a CSV of aircraft usage"""
from __future__ import division, print_function
import argparse
import collections
import logging
from ga_library import *
from utils import *
from collections import defaultdict, OrderedDict
SHOW_ABSOLUTE_NUMBERS = False
_out = ''
def _log(s, end='\n'):
global _out
_out += s + end
file_name_suffix = ''
def main():
argparser = argparse.ArgumentParser(description='Dumps hardware stats from X-Plane Desktop; you probably want to pipe the output to a CSV file')
argparser.add_argument('--version', type=int, default=11, help='The major version of X-Plane you want data on (10 or 11)')
args = argparser.parse_args()
write_hardware_analysis_files(Version.v11 if args.version == 11 else Version.v10, UserGroup.PaidOnly)
def write_hardware_analysis_files(version: Union[int, Version], user_group: UserGroup, csv_path=None):
"""
:type csv_path: Union[str,None]
"""
global file_name_suffix
file_name_suffix = "_%s_%s_%s" % (version, user_group.name, today_file_suffix())
qm = SimpleQueryMgr(GaService.desktop(), version, Metric.Users, user_group)
perform_cpu_analysis(qm.query(CustomDimension.Cpu))
perform_flight_controls_analysis(qm.query(CustomDimension.FlightControls))
stats = HardwareStats(GaService.desktop(), version, Metric.Users, user_group)
grapher = HardwareGrapher(stats)
perform_ram_analysis(stats)
perform_gpu_analysis(stats)
perform_os_analysis(stats, grapher)
perform_vr_analysis(stats, grapher)
if not csv_path:
csv_path = "hardware_analysis%s.csv" % file_name_suffix
with open(csv_path, 'w') as out_file:
out_file.write(_out)
out_file.write('\n')
class HardwareStats:
def __init__(self, service: GaService, version: Union[int, Version]=Version.v11, user_group: UserGroup=UserGroup.PaidOnly):
self.qm = SimpleQueryMgr(service, version, Metric.Users, user_group)
def operating_systems(self) -> Dict[str, int]:
platform_count = defaultdict(int)
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
platform_count[os_name] += val
return counts_to_percents(platform_count)
def operating_system_versions(self) -> Dict[str, Dict[str, int]]:
version_count = defaultdict(lambda: defaultdict(int))
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
version = get_os_version(row[0])
if version:
version_count[os_name][version] += val
return version_count
def ram_amounts(self) -> Dict[str, int]:
users_with_at_least_this_much_ram = collections.defaultdict(int)
total_users = 0
for row in self.qm.query(CustomDimension.Ram):
val = str_to_int(row[1])
total_users += val
ram_class = int(row[0])
if ram_class >= 2:
users_with_at_least_this_much_ram["2GB"] += val
if ram_class >= 4:
users_with_at_least_this_much_ram["4GB"] += val
if ram_class >= 8:
users_with_at_least_this_much_ram["8GB"] += val
if ram_class >= 16:
users_with_at_least_this_much_ram["16GB"] += val
if ram_class >= 32:
users_with_at_least_this_much_ram["32GB"] += val
return counts_to_percents(users_with_at_least_this_much_ram, total_users)
def gpu_manufacturers(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_manufacturer(row[0])] += str_to_int(row[1])
out = counts_to_percents(out)
with suppress(KeyError):
if out['Unknown'] < 0.3:
del out['Unknown']
return out
def gpu_generation(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_generation(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def gpu_platform(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_mobile_versus_desktop(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def vr_headsets(self):
known_headsets = {
'rift': 'Oculus Rift',
'oculus': 'Oculus Rift',
'pimax 5k': 'Pimax 5K',
'psvr': 'PSVR Headset',
'windows': 'Windows Mixed Reality',
'lighthouse': 'OpenVR (like HTC Vive)',
'vive': 'OpenVR (like HTC Vive)',
'aapvr': 'Phone',
'vridge': 'Phone',
'ivry': 'Phone',
'phonevr': 'Phone',
}
headset_count = collections.defaultdict(int)
for row in self.qm.query(CustomDimension.VrHeadset):
label = row[0]
for search_term, deduped_name in known_headsets.items():
if search_term in label.lower():
label = deduped_name
break
else:
logging.debug('unknown headset: ' + label)
headset_count[label] += str_to_int(row[1])
return counts_to_percents(headset_count, smush_into_other_below_percent=1)
def vr_usage(self):
vr_start_date = Version.v1120r4.value.start_date
total_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.Ram, override_start_date=vr_start_date))
vr_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.VrHeadset, override_start_date=vr_start_date))
vr_pct = round((vr_users / total_users) * 100, 2)
return {
'Have Used VR': vr_pct,
'2-D Monitor Only': 100 - vr_pct
}
@property
def total_users(self):
ram_data = self.qm.query(CustomDimension.Ram)
return sum(str_to_int(row[1]) for row in ram_data)
class HardwareGrapher:
def __init__(self, stats: HardwareStats):
self.stats = stats
def operating_systems(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.operating_systems())
def ram_amounts(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.ram_amounts(), 'Users with at Least <em>x</em> GB RAM', make_x_label=lambda l: str(l) + '+')
def gpu_mobile_vs_desktop(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.gpu_platform())
def gpu_manufacturers(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.gpu_manufacturers(), 'GPU Manufacturers')
def vr_headsets(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.vr_headsets(), 'VR Headsets', already_sorted=True, y_label='% VR Users')
def vr_usage(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.vr_usage(), top_pad_px=40)
def perform_os_analysis(stats: HardwareStats, grapher: HardwareGrapher):
# Overall platform breakdown
platform_count = stats.operating_systems()
_log("PLATFORM BREAKDOWN")
dump_generic_count_dict(platform_count, "Operating System", "Machines")
plotly.offline.plot(grapher.operating_systems(), image='png', image_filename='os_breakdown' + file_name_suffix, image_width=1024, output_type='file')
version_count = stats.operating_system_versions()
_log("OS VERSIONS")
dump_generic_count_dict(version_count["Windows"], "OS Version", "Windows Machines")
dump_generic_count_dict(version_count["Mac"], "OS Version", "Macs")
dump_generic_count_dict(version_count["Linux"], "OS Version", "Linux Machines")
def clean_up_string_formatting(string):
return str(string).strip()
def perform_cpu_analysis(results_rows):
def get_cpu_core_count(cpu_line):
stats = cpu_line.split(" - ")
for stat in stats:
if stat.startswith("Cores:"):
label_and_cores = stat.split(" ")
return int(label_and_cores[1])
return 0
cpu_cores = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
core_count = get_cpu_core_count(row[0])
cpu_cores[core_count] += val
_log("NUMBER OF CPU CORES")
dump_generic_count_dict(cpu_cores, "CPU Cores", "Machines")
def perform_vr_analysis(stats: HardwareStats, grapher: HardwareGrapher):
_log("VR USAGE")
dump_generic_count_dict(stats.vr_usage(), "VR Status", "Users")
_log("VR HEADSETS")
dump_generic_count_dict(stats.vr_headsets(), "Headset Type", "Users")
plotly.offline.plot(grapher.vr_usage(), image='png', image_filename='vr_usage' + file_name_suffix, image_width=1024, output_type='file')
plotly.offline.plot(grapher.vr_headsets(), image='png', image_filename='vr_headsets' + file_name_suffix, image_width=1024, output_type='file')
def get_gpu_manufacturer(gpu_string):
if lower_contains(gpu_string, ('firepro', 'firegl', 'radeon', 'amd ')) or gpu_string.startswith(('67EF', '67DF', 'ASUS EAH', 'ASUS R')):
return "AMD/ATI"
elif lower_contains(gpu_string, ('Quadro', 'GeForce', 'TITAN')) or gpu_string.startswith(('NVS ', 'NV1')):
return "Nvidia"
elif "Intel" in gpu_string:
return "Intel"
return "Unknown"
def get_gpu_generation(gpu_string):
gpu = gpu_string.lower()
if "quadro" in gpu:
return "Nvidia Quadro (All Generations)"
elif "firepro" in gpu or "firegl" in gpu:
return "AMD FirePro (All Generations)"
if "radeon" in gpu or "asus" in gpu:
for gen in [2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
if "R" + gen + " M" in gpu_string:
return "Radeon R" + gen + "M"
elif "R" + gen + " " in gpu_string:
return "Radeon R" + gen
elif re.search(gen + "\d\d\dM", gpu_string) or ("Mobility" in gpu_string and re.search(gen + "\d\d\d", gpu_string)):
return "Radeon " + gen + "xxxM"
elif re.search(gen + "\d\d\d", gpu_string):
return "Radeon " + gen + "xxxM"
else:
return "Radeon (Other)"
elif "titan x" in gpu:
return "GeForce 9xx"
elif "titan" in gpu:
return "GeForce 7xx"
elif "geforce" in gpu:
for gen in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
base_radeon_re = "GeForce (G|GT|GTX|GTS)?\s*"
if re.search(base_radeon_re + gen + "\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xx"
elif re.search(base_radeon_re + gen + "\d\dM", gpu_string):
return "GeForce " + gen + "xxM"
elif re.search(base_radeon_re + gen + "\d\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xxx"
elif re.search(base_radeon_re + gen + "\d\d\dM", gpu_string):
return "GeForce " + gen + "xxxM"
else:
return "GeForce (Other)"
elif "intel" in gpu:
if any(ident in gpu for ident in ["gma", "gm45", "g41", "g45", "q45", "eaglelake", "4 series"]):
return "Intel Integrated (GMA or earlier)"
elif "hd" in gpu or "iris" in gpu:
if any(ident in gpu for ident in ["2000", "3000"]):
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif any(ident in gpu for ident in ["4000", "4200", "4400", "4600", "4700", "5000", "5100", "5200"]):
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif any(ident in gpu_string for ident in ["5300", "5500", "5600", "5700", "6000", "6100", "6200", "6300"]):
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif any(ident in gpu_string for ident in ["500", "505", "510", "515", "520", "530", "540", "550", "580"]):
return "Intel Integrated (9th Generation; HD 5xx)"
else:
return "Intel Integrated (5th Generation; HD)"
elif "sandybridge" in gpu:
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif "haswell" in gpu or "ivybridge" in gpu or "bay trail" in gpu:
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif "broadwell" in gpu:
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif "skylake" in gpu:
return "Intel Integrated (9th Generation; HD 5xx)"
elif "ironlake" in gpu:
return "Intel Integrated (5th Generation; HD)"
else:
return gpu_string
return "Other"
def get_mobile_versus_desktop(gpu_string):
gen = get_gpu_generation(gpu_string)
if gen.startswith("Intel"):
return "Intel"
elif gen.endswith("M"):
return "Mobile"
else:
return "Desktop"
def perform_gpu_analysis(stats: HardwareStats):
gpu_manufacturer = stats.gpu_manufacturers()
_log("GPU PLATFORM")
dump_generic_count_dict(stats.gpu_platform(), "GPU Platform", "Machines")
_log("GPU MANUFACTURER")
dump_generic_count_dict(gpu_manufacturer, "GPU Manufacturer", "Machines")
_log("GPU GENERATION")
dump_generic_count_dict(stats.gpu_generation(), "GPU Generation", "Machines")
with suppress(KeyError):
del gpu_manufacturer['Unknown']
make_bar_chart(gpu_manufacturer, 'gpu_manufacturer' + file_name_suffix, 'Manufacturer', needs_conversion_to_percents=False, height_scaling_factor=0.7)
def perform_ram_analysis(stats: HardwareStats):
users_with_at_least_this_much_ram = stats.ram_amounts()
_log("USERS WITH AT LEAST THIS MUCH RAM")
for ram_amount, value in users_with_at_least_this_much_ram.items():
_log(','.join([str(ram_amount), str(value)]))
_log("\n" * 3)
make_bar_chart(users_with_at_least_this_much_ram, 'ram_amounts' + file_name_suffix, 'RAM Amount', make_x_label=lambda l: str(l) + '+', height_scaling_factor=0.7)
def perform_flight_controls_analysis(results_rows):
known_yokes = [
"Saitek Pro Flight Yoke",
"Saitek X52",
"CH FLIGHT SIM YOKE",
"CH ECLIPSE YOKE",
"Pro Flight Cessna Yoke",
"PFC Cirrus Yoke",
"CH 3-Axis 10-Button POV USB Yoke",
]
known_sticks = [
"Logitech 3D Pro",
"T.Flight Hotas",
"T.Flight Stick X",
"Logitech Attack 3",
"Mad Catz F.L.Y.5 Stick",
"SideWinder Precision 2",
"T.16000M",
"SideWinder Force Feedback 2",
"Saitek Pro Flight X-55 Rhino Stick",
"Cyborg",
"Saitek Cyborg USB Stick",
"AV8R",
"Logitech Freedom 2.4",
"SideWinder Joystick",
"Mad Catz V.1 Stick",
"SideWinder Precision Pro",
"SideWinder 3D Pro",
"Logitech Force 3D Pro",
"WingMan Force 3D",
"Joystick - HOTAS Warthog",
"WingMan Extreme Digital 3D",
"WingMan Extreme 3D",
"Top Gun Afterburner",
"CH FLIGHTSTICK PRO",
"CH FIGHTERSTICK",
"CH COMBATSTICK",
"Saitek ST290",
"Saitek ST90",
"Top Gun Fox 2",
"Aviator for Playstation 3",
"Dark Tornado Joystick",
"Saitek X45",
"Saitek X36",
"USB Joystick",
"Pro Flight X65",
"G940",
"HOTAS Cougar Joystick",
"MetalStrik 3D",
"WingMan Attack 2"
]
known_controllers = [
"XBOX",
"Playstation(R)3 Controller",
"WingMan Cordless Gamepad",
"WingMan RumblePad",
"Logitech Dual Action",
"RumblePad 2",
"ASUS Gamepad",
"USB WirelessGamepad",
"Betop Controller",
"Logitech(R) Precision(TM) Gamepad",
"Wireless Gamepad F710"
]
known_rc_controllers = [
"InterLink Elite",
"RealFlight Interface"
]
def canonicalize_stick_or_yoke_name(flight_control_row):
flight_control_row = clean_up_string_formatting(flight_control_row)
if "Mouse" in flight_control_row:
return "Mouse"
elif "VID:1133PID:49685" in flight_control_row:
return "Logitech Extreme 3D"
elif "WingMan Ext Digital 3D" in flight_control_row:
return "WingMan Extreme Digital 3D"
elif "VID:1699PID:1890" in flight_control_row:
return "Saitek X52"
elif "Wireless 360 Controller" in flight_control_row:
return "XBOX"
elif "VID:121PID:6" in flight_control_row:
return "Generic USB Joystick"
elif "VID:1678PID:49402" in flight_control_row:
return "CH Products (Unknown)"
for control in known_yokes + known_sticks + known_controllers:
if control.lower() in flight_control_row.lower():
return control
if "," in flight_control_row:
return flight_control_row.replace(",", ";")
return flight_control_row
def classify_stick_or_yoke(flight_control_row):
flight_control_row = canonicalize_stick_or_yoke_name(flight_control_row)
if flight_control_row == "Mouse":
return "Mouse"
elif flight_control_row in known_yokes:
return "Yoke"
elif flight_control_row in known_sticks:
return "Joystick"
elif flight_control_row in known_controllers:
return "Gamepad"
elif flight_control_row in known_rc_controllers:
return "RC Controller"
elif "yoke" in flight_control_row.lower():
return "Yoke"
elif "stick" in flight_control_row.lower():
return "Joystick"
elif "pad" in flight_control_row.lower():
return "Gamepad"
else:
return "Unknown"
flight_controls = collections.defaultdict(int)
flight_control_type = collections.defaultdict(int)
has_rudder_pedals = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
flight_controls[canonicalize_stick_or_yoke_name(row[0])] += val
flight_control_type[classify_stick_or_yoke(row[0])] += val
row = clean_up_string_formatting(row[0])
if "rudder" in row.lower() or "pedals" in row.lower():
has_rudder_pedals[True] += val
else:
has_rudder_pedals[False] += val
nuke_these_keys = []
for controls, count in flight_controls.items():
if count < 5:
nuke_these_keys.append(controls)
for key in nuke_these_keys:
flight_controls["Other"] += flight_controls[key]
del flight_controls[key]
_log("PRIMARY FLIGHT CONTROLS TYPE")
dump_generic_count_dict(flight_control_type, "Flight Controls Type", "Users")
_log("PRIMARY FLIGHT CONTROLS MODEL (for non-mouse users)")
del flight_controls["Mouse"]
dump_generic_count_dict(flight_controls, "Flight Controls Model", "Users")
_log("USERS FLYING WITH PEDALS")
dump_generic_count_dict(has_rudder_pedals, "Has Pedals?", "Users")
def dump_generic_count_dict(dictionary, label, metric_category):
if SHOW_ABSOLUTE_NUMBERS:
_log(label + ",Num " + metric_category + ",% of All " + metric_category)
else:
_log(label + ",% of All " + metric_category)
total = total_entries_in_dict(dictionary)
sorted_dict = sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True)
for i, label_and_count in enumerate(sorted_dict):
if SHOW_ABSOLUTE_NUMBERS:
_log(','.join([str(label_and_count[0]), str(label_and_count[1]), str((label_and_count[1] / total) * 100) + "%"]))
else:
# Coerce to ASCII
label = clean_up_string_formatting(label_and_count[0])
percent_str = clean_up_string_formatting(str((label_and_count[1] / total) * 100) + u"%")
_log(label, end="")
_log(",", end="")
_log(percent_str)
_log("\n" * 3)
def lower_contains(s: str, check: Iterable[str]) -> bool:
return any(sub.lower() in s.lower() for sub in check)
if __name__ == '__main__':
main() | en | 0.784428 | Accesses the Google Analytics API to spit out a CSV of aircraft usage :type csv_path: Union[str,None] # Overall platform breakdown # Coerce to ASCII | 2.954684 | 3 |
engine/src/wolf.plugins/Wolf.Plugins.Maya/WolfCmds.py | SiminBadri/Wolf.Engine | 1 | 6614890 | <reponame>SiminBadri/Wolf.Engine
from PySide import QtCore
from PySide import QtGui
from shiboken import wrapInstance
import maya.cmds as cmds
import maya.OpenMayaUI as omui
def maya_main_window():
main_window_ptr = omui.MQtUtil.mainWindow()
return wrapInstance(long(main_window_ptr), QtGui.QWidget)
class WShelf(QtGui.QDialog):
def __init__(self, parent=maya_main_window()):
super(WShelf, self).__init__(parent)
self.setWindowTitle("WolfEngine")
self.setWindowFlags(QtCore.Qt.Tool)
# Delete UI on close to avoid winEvent error
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.create_layout()
self.create_connections()
def create_layout(self):
#Load Scene button
self.loadSceneBtn = QtGui.QPushButton("Load Scene")
main_layout = QtGui.QVBoxLayout()
main_layout.setContentsMargins(2, 2, 2, 2)
main_layout.setSpacing(2)
main_layout.addWidget(self.loadSceneBtn)
main_layout.addStretch()
self.setLayout(main_layout)
def create_connections(self):
self.loadSceneBtn.clicked.connect(WShelf.WLoadScene)
@classmethod
def WLoadScene(cls):
#cmds.polyCube()
cmds.WLoadScene()
if __name__ == "__main__":
# Development workaround for winEvent error when running
# the script multiple times
try:
ui.close()
except:
pass
ui = WShelf()
ui.show()
| from PySide import QtCore
from PySide import QtGui
from shiboken import wrapInstance
import maya.cmds as cmds
import maya.OpenMayaUI as omui
def maya_main_window():
main_window_ptr = omui.MQtUtil.mainWindow()
return wrapInstance(long(main_window_ptr), QtGui.QWidget)
class WShelf(QtGui.QDialog):
def __init__(self, parent=maya_main_window()):
super(WShelf, self).__init__(parent)
self.setWindowTitle("WolfEngine")
self.setWindowFlags(QtCore.Qt.Tool)
# Delete UI on close to avoid winEvent error
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.create_layout()
self.create_connections()
def create_layout(self):
#Load Scene button
self.loadSceneBtn = QtGui.QPushButton("Load Scene")
main_layout = QtGui.QVBoxLayout()
main_layout.setContentsMargins(2, 2, 2, 2)
main_layout.setSpacing(2)
main_layout.addWidget(self.loadSceneBtn)
main_layout.addStretch()
self.setLayout(main_layout)
def create_connections(self):
self.loadSceneBtn.clicked.connect(WShelf.WLoadScene)
@classmethod
def WLoadScene(cls):
#cmds.polyCube()
cmds.WLoadScene()
if __name__ == "__main__":
# Development workaround for winEvent error when running
# the script multiple times
try:
ui.close()
except:
pass
ui = WShelf()
ui.show() | en | 0.676372 | # Delete UI on close to avoid winEvent error #Load Scene button #cmds.polyCube() # Development workaround for winEvent error when running # the script multiple times | 2.174626 | 2 |
survol/sources_types/mysql/instance/__init__.py | AugustinMascarelli/survol | 0 | 6614891 | """
MySql instance
"""
# This does not import mysql packages, so this will always work.
def Graphic_colorbg():
return "#66CC00"
import lib_common
def EntityOntology():
return ( ["Instance",], )
def MakeUri(instanceName):
return lib_common.gUriGen.UriMakeFromDict("mysql/instance", { "Instance": instanceName } )
#def EntityName(entity_ids_arr):
# return entity_ids_arr[1]+ "@" + entity_ids_arr[0]
def AddInfo(grph,node,entity_ids_arr):
instanceMySql = entity_ids_arr[0]
instanceHost = instanceMySql.split(":")[0]
nodeHost = lib_common.gUriGen.HostnameUri( instanceHost )
grph.add((node,lib_common.MakeProp("Instance"),nodeHost))
| """
MySql instance
"""
# This does not import mysql packages, so this will always work.
def Graphic_colorbg():
return "#66CC00"
import lib_common
def EntityOntology():
return ( ["Instance",], )
def MakeUri(instanceName):
return lib_common.gUriGen.UriMakeFromDict("mysql/instance", { "Instance": instanceName } )
#def EntityName(entity_ids_arr):
# return entity_ids_arr[1]+ "@" + entity_ids_arr[0]
def AddInfo(grph,node,entity_ids_arr):
instanceMySql = entity_ids_arr[0]
instanceHost = instanceMySql.split(":")[0]
nodeHost = lib_common.gUriGen.HostnameUri( instanceHost )
grph.add((node,lib_common.MakeProp("Instance"),nodeHost))
| en | 0.634467 | MySql instance # This does not import mysql packages, so this will always work. #def EntityName(entity_ids_arr): # return entity_ids_arr[1]+ "@" + entity_ids_arr[0] | 2.397352 | 2 |
truthdb/exec/tex.py | sjyk/truth-db | 0 | 6614892 | # -*- coding: utf-8 -*-
"""This file is part of TruthDB which is released under MIT License and
is copyrighted by the University of Chicago Database Group.
This file describes the language primitives for tuple expressions.
"""
"""Basic filter conditions
"""
def exists(x):
return lambda t: x in t
def existsAttribute(x):
def _inner(tup):
for t in tup:
try:
if t[0] == x:
return True
except:
pass
return False
return _inner
def existsValue(x):
def _inner(tup):
for t in tup:
try:
if t[1] == x:
return True
except:
pass
return False
return _inner
"""Join conditions (assumes cart is materialized)
"""
def match(a1, a2):
"""Exact equality on two attribute conditions
"""
return jaccard(a1,a2, 1.0)
def jaccard(a1, a2, thresh):
"""Match on jaccard similarity
"""
def _inner(tup):
left = set([a[1] for a in tup if a1(a)])
right = set([a[1] for a in tup if a2(a)])
jac = len(left.intersection(right))/len(left.union(right))
return (jac >= thresh)
return _inner
def atl_one(a1, a2):
"""If at least one hit
"""
def _inner(tup):
left = set([a[1] for a in tup if a1(a)])
right = set([a[1] for a in tup if a2(a)])
return (len(left.intersection(right)) > 0)
return _inner | # -*- coding: utf-8 -*-
"""This file is part of TruthDB which is released under MIT License and
is copyrighted by the University of Chicago Database Group.
This file describes the language primitives for tuple expressions.
"""
"""Basic filter conditions
"""
def exists(x):
return lambda t: x in t
def existsAttribute(x):
def _inner(tup):
for t in tup:
try:
if t[0] == x:
return True
except:
pass
return False
return _inner
def existsValue(x):
def _inner(tup):
for t in tup:
try:
if t[1] == x:
return True
except:
pass
return False
return _inner
"""Join conditions (assumes cart is materialized)
"""
def match(a1, a2):
"""Exact equality on two attribute conditions
"""
return jaccard(a1,a2, 1.0)
def jaccard(a1, a2, thresh):
"""Match on jaccard similarity
"""
def _inner(tup):
left = set([a[1] for a in tup if a1(a)])
right = set([a[1] for a in tup if a2(a)])
jac = len(left.intersection(right))/len(left.union(right))
return (jac >= thresh)
return _inner
def atl_one(a1, a2):
"""If at least one hit
"""
def _inner(tup):
left = set([a[1] for a in tup if a1(a)])
right = set([a[1] for a in tup if a2(a)])
return (len(left.intersection(right)) > 0)
return _inner | en | 0.901256 | # -*- coding: utf-8 -*- This file is part of TruthDB which is released under MIT License and is copyrighted by the University of Chicago Database Group. This file describes the language primitives for tuple expressions. Basic filter conditions Join conditions (assumes cart is materialized) Exact equality on two attribute conditions Match on jaccard similarity If at least one hit | 3.127786 | 3 |
HiggsAnalysis/Skimming/python/higgsToWW2Leptons_Sequences_cff.py | SWuchterl/cmssw | 6 | 6614893 | import FWCore.ParameterSet.Config as cms
from HiggsAnalysis.Skimming.higgsToWW2Leptons_HLTPaths_cfi import *
from HiggsAnalysis.Skimming.higgsToWW2Leptons_Filter_cfi import *
higgsToWWTrigReport = cms.EDAnalyzer("HLTrigReport",
HLTriggerResults = cms.InputTag("TriggerResults")
)
higgsToWW2LeptonsSequence = cms.Sequence(higgsToWWTrigReport+higgsToWW2LeptonsHLTFilter+higgsToWW2LeptonsFilter)
| import FWCore.ParameterSet.Config as cms
from HiggsAnalysis.Skimming.higgsToWW2Leptons_HLTPaths_cfi import *
from HiggsAnalysis.Skimming.higgsToWW2Leptons_Filter_cfi import *
higgsToWWTrigReport = cms.EDAnalyzer("HLTrigReport",
HLTriggerResults = cms.InputTag("TriggerResults")
)
higgsToWW2LeptonsSequence = cms.Sequence(higgsToWWTrigReport+higgsToWW2LeptonsHLTFilter+higgsToWW2LeptonsFilter)
| none | 1 | 1.194334 | 1 | |
PyQuM/ver(1.1)/pyqum/correction.py | takehuge/PYQUM | 0 | 6614894 | # Loading Basics
from colorama import init, Back, Fore
init(autoreset=True) #to convert termcolor to wins color
from os.path import basename as bs
myname = bs(__file__).split('.')[0] # This py-script's name
from importlib import import_module as im
from flask import Flask, request, render_template, Response, redirect, Blueprint, jsonify, session, send_from_directory, abort, g
from pyqum.instrument.logger import address, get_status, set_status, status_code, output_code
# Error handling
from contextlib import suppress
# Scientific
from scipy import constants as cnst
from si_prefix import si_format, si_parse
from numpy import cos, sin, pi, polyfit, poly1d, array, roots, isreal, sqrt, mean
# Load instruments
from pyqum.instrument.modular import AWG, VSA # open native Agilent M933x -> Initiate VSA -> Initiate AWG (Success!!!)
from pyqum.instrument.benchtop import PSGA, RSA5, MXA
from pyqum.instrument.toolbox import match, waveform, pauselog, squarewave
from pyqum.instrument.analyzer import IQAParray
encryp = 'ghhgjadz'
bp = Blueprint(myname, __name__, url_prefix='/corr')
# Main
@bp.route('/')
def show():
with suppress(KeyError):
print(Fore.LIGHTBLUE_EX + "USER " + Fore.YELLOW + "%s "%session['user_name'] + Fore.LIGHTBLUE_EX + "has just logged in as Guest #%s!"%session['user_id'])
# Security implementation:
if not g.user['instrument']:
abort(404)
return render_template("blog/machn/machine.html")
return("<h3>WHO ARE YOU?</h3><h3>Please F**k*ng Login!</h3><h3>Courtesy from <a href='http://qum.phys.sinica.edu.tw:5300/auth/login'>HoDoR</a></h3>")
# ALL
@bp.route('/all', methods=['POST', 'GET'])
def all():
# Test Bed # All Task # Great Work
current_usr = session['user_name']
return render_template("blog/machn/all.html", current_usr=current_usr)
@bp.route('/all/status', methods=['GET'])
def allstatus():
return jsonify()
# AWG
@bp.route('/awg', methods=['GET'])
def awg():
return render_template("blog/machn/awg.html")
@bp.route('/awg/log', methods=['GET'])
def awglog():
log = get_status('AWG')
return jsonify(log=log)
@bp.route('/awg/reset', methods=['GET'])
def awgreset():
global awgsess
awgsess = AWG.InitWithOptions()
AWG.Abort_Gen(awgsess)
status = AWG.model(awgsess) # model
print('Model: %s (%s)' % (status[1], status_code(status[0])))
return jsonify(message=awgsess)
print(Back.BLUE + Fore.CYAN + myname + ".bp registered!") # leave 2 lines blank before this
| # Loading Basics
from colorama import init, Back, Fore
init(autoreset=True) #to convert termcolor to wins color
from os.path import basename as bs
myname = bs(__file__).split('.')[0] # This py-script's name
from importlib import import_module as im
from flask import Flask, request, render_template, Response, redirect, Blueprint, jsonify, session, send_from_directory, abort, g
from pyqum.instrument.logger import address, get_status, set_status, status_code, output_code
# Error handling
from contextlib import suppress
# Scientific
from scipy import constants as cnst
from si_prefix import si_format, si_parse
from numpy import cos, sin, pi, polyfit, poly1d, array, roots, isreal, sqrt, mean
# Load instruments
from pyqum.instrument.modular import AWG, VSA # open native Agilent M933x -> Initiate VSA -> Initiate AWG (Success!!!)
from pyqum.instrument.benchtop import PSGA, RSA5, MXA
from pyqum.instrument.toolbox import match, waveform, pauselog, squarewave
from pyqum.instrument.analyzer import IQAParray
encryp = 'ghhgjadz'
bp = Blueprint(myname, __name__, url_prefix='/corr')
# Main
@bp.route('/')
def show():
with suppress(KeyError):
print(Fore.LIGHTBLUE_EX + "USER " + Fore.YELLOW + "%s "%session['user_name'] + Fore.LIGHTBLUE_EX + "has just logged in as Guest #%s!"%session['user_id'])
# Security implementation:
if not g.user['instrument']:
abort(404)
return render_template("blog/machn/machine.html")
return("<h3>WHO ARE YOU?</h3><h3>Please F**k*ng Login!</h3><h3>Courtesy from <a href='http://qum.phys.sinica.edu.tw:5300/auth/login'>HoDoR</a></h3>")
# ALL
@bp.route('/all', methods=['POST', 'GET'])
def all():
# Test Bed # All Task # Great Work
current_usr = session['user_name']
return render_template("blog/machn/all.html", current_usr=current_usr)
@bp.route('/all/status', methods=['GET'])
def allstatus():
return jsonify()
# AWG
@bp.route('/awg', methods=['GET'])
def awg():
return render_template("blog/machn/awg.html")
@bp.route('/awg/log', methods=['GET'])
def awglog():
log = get_status('AWG')
return jsonify(log=log)
@bp.route('/awg/reset', methods=['GET'])
def awgreset():
global awgsess
awgsess = AWG.InitWithOptions()
AWG.Abort_Gen(awgsess)
status = AWG.model(awgsess) # model
print('Model: %s (%s)' % (status[1], status_code(status[0])))
return jsonify(message=awgsess)
print(Back.BLUE + Fore.CYAN + myname + ".bp registered!") # leave 2 lines blank before this
| en | 0.537717 | # Loading Basics #to convert termcolor to wins color # This py-script's name # Error handling # Scientific # Load instruments # open native Agilent M933x -> Initiate VSA -> Initiate AWG (Success!!!) # Main #%s!"%session['user_id']) # Security implementation: # ALL # Test Bed # All Task # Great Work # AWG # model # leave 2 lines blank before this | 1.972361 | 2 |
prj/setup_simple.py | asvetlov/optimization-kiev-2017 | 0 | 6614895 | <reponame>asvetlov/optimization-kiev-2017
from Cython.Build import cythonize
from setuptools import setup
setup(name='prj',
version='0.0.1',
packages=['prj'],
ext_modules=cythonize('prj/_mod.pyx'))
| from Cython.Build import cythonize
from setuptools import setup
setup(name='prj',
version='0.0.1',
packages=['prj'],
ext_modules=cythonize('prj/_mod.pyx')) | none | 1 | 1.287718 | 1 | |
test/invest_core_test.py | phargogh/invest-natcap.invest-3 | 0 | 6614896 | import os, sys
import unittest
import random
import logging
import math
from osgeo import ogr, gdal, osr
from osgeo.gdalconst import *
import numpy as np
from nose.plugins.skip import SkipTest
from invest_natcap.dbfpy import dbf
import invest_cython_core
from invest_natcap.invest_core import invest_core
import invest_test_core
logger = logging.getLogger('invest_core_test')
logging.basicConfig(format='%(asctime)s %(name)-15s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
class TestInvestCore(unittest.TestCase):
def testflowDirectionD8(self):
"""Regression test for flow direction on a DEM"""
dem = gdal.Open('./invest-data/test/data/sediment_test_data/dem')
flow = invest_cython_core.newRasterFromBase(dem,
'./invest-data/test/data/test_out/flow.tif', 'GTiff', 0, gdal.GDT_Float32)
invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
regressionFlow = \
gdal.Open('./invest-data/test/data/sediment_test_data/flowregression.tif')
invest_test_core.assertTwoDatasetsEqual(self, flow, regressionFlow)
def testflowDirectionD8Simple(self):
"""Regression test for flow direction on a DEM with an example
constructed by hand"""
driver = gdal.GetDriverByName("MEM")
#Create a 3x3 dem raster
dem = driver.Create('', 3, 3, 1, gdal.GDT_Float32)
dem.GetRasterBand(1).SetNoDataValue(-1.0)
dem.GetRasterBand(1).WriteArray(np.array([[902, 909, 918], [895, 904, 916], [893, 904, 918]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flow.ReadAsArray(0, 0, 3, 3)
self.assertEqual(128, flowMatrix[1][1],
'Incorrect flow, should be 128 != %s' % flowMatrix[1][1])
dem.GetRasterBand(1).WriteArray(np.array([[190, 185, 181], [189, 185, 182], [189, 185, 182]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
flowDir = invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flowDir.ReadAsArray(0, 0, 3, 3)
self.assertEqual(8, flowMatrix[1][1],
'Incorrect flow, should be 8 != %s' % flowMatrix[1][1])
dem.GetRasterBand(1).WriteArray(np.array([[343, 332, 343],
[340, 341, 343],
[345, 338, 343]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
flowDir = invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flowDir.ReadAsArray(0, 0, 3, 3)
self.assertEqual(16, flowMatrix[1][1],
'Incorrect flow, should be 16 != %s' % flowMatrix[1][1])
dem.GetRasterBand(1).WriteArray(np.array([[194, 191, 191],
[191, 188, 188],
[191, 189, 189]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
flowDir = invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flowDir.ReadAsArray(0, 0, 3, 3)
self.assertEqual(4, flowMatrix[1][1],
'Incorrect flow, should be 4 != %s' % flowMatrix[1][1])
def testslopeCalculation(self):
"""Regression test for slope calculation"""
dem = gdal.Open('./invest-data/test/data/sediment_test_data/dem')
slope_output = invest_cython_core.newRasterFromBase(dem,
'./invest-data/test/data/test_out/slope.tif', 'GTiff', -1, gdal.GDT_Float32)
invest_cython_core.calculate_slope(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], slope_output)
regressionSlope = \
gdal.Open('./invest-data/test/data/sediment_test_data/slopeRegression.tif')
invest_test_core.assertTwoDatasetsEqual(self, slope_output, regressionSlope)
def testvectorizeRasters(self):
r1 = gdal.Open('./invest-data/test/data/base_data/terrestrial/lulc_samp_cur')
r2 = gdal.Open('./invest-data/test/data/base_data/Freshwater/precip')
def op(a, b):
return np.sqrt(a ** 2 + b ** 2)
invest_core.vectorizeRasters([r1, r2], op,
rasterName='./invest-data/test/data/test_out/rasterizeRasters.tiff', datatype=gdal.GDT_Float32)
def testinterpolateMatrix(self):
"""Test the matrix interpolation function"""
def assertEqualInterpPoints(x, y, newx, newy, z):
for xVal in x:
for yVal in y:
i = x.tolist().index(xVal)
j = y.tolist().index(yVal)
ii = newx.tolist().index(xVal)
jj = newy.tolist().index(yVal)
self.assertAlmostEquals(z[j][i], interpz[jj][ii], 5,
"z[%s][%s], interpz[%s][%s], %s != %s" %
(i, j, ii, jj, z[j][i], interpz[jj][ii]))
#Create a non-trivial somewhat random matrix
x = np.array([-4.2, 3, 6, 10, 11])
y = np.array([-9, 3, 6, 10])
z = np.array([[0., 8., 11., 12.5, 0.0],
[0., 1., 1., 0., 0.],
[-7.2, 3., 1.2, 0., 0.],
[0., 4.9, 2.5, 0, 0.]])
#print z.shape
#print 'x', x
#print 'y', y
#print 'z', z
newx = np.array([-8.2, -4.2, 0, 2.5, 3, 5, 6, 7.5, 10, 11, 15.2, 100.0])
newy = np.array([-9, 0, 2.5, 3, 5, 6, 7.5, 10, 22.2, 100.0])
#print 'newx', newx
#print 'newy', newy
logging.debug('calling interpolate matrix')
interpz = invest_cython_core.interpolateMatrix(x, y, z, newx, newy)
#print 'interpz:', interpz
logging.debug('testing the result of interpolate matrix')
assertEqualInterpPoints(x, y, newx, newy, z)
def testRasterDiff(self):
driver = gdal.GetDriverByName("MEM")
xDim, yDim = 417, 219
testA, testB = -248.23, 1829.2
#Create a blank xDim x yDim raster
datasetA = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetA.GetRasterBand(1).SetNoDataValue(-11.0)
datasetA.GetRasterBand(1).Fill(testA)
datasetB = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetB.GetRasterBand(1).SetNoDataValue(-11.0)
datasetB.GetRasterBand(1).Fill(testB)
datasetC = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetC.GetRasterBand(1).SetNoDataValue(-11.0)
datasetC.GetRasterBand(1).Fill(testA - testB)
datasetOut = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetOut.GetRasterBand(1).SetNoDataValue(-11.0)
invest_core.rasterDiff(datasetA.GetRasterBand(1),
datasetB.GetRasterBand(1),
datasetOut.GetRasterBand(1))
invest_test_core.assertTwoDatasetsEqual(self, datasetOut, datasetC)
def testRasterAdd(self):
driver = gdal.GetDriverByName("MEM")
xDim, yDim = 417, 219
testA, testB = -248.23, 1829.2
#Create a blank xDim x yDim raster
datasetA = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetA.GetRasterBand(1).SetNoDataValue(-11.0)
datasetA.GetRasterBand(1).Fill(testA)
datasetB = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetB.GetRasterBand(1).SetNoDataValue(-11.0)
datasetB.GetRasterBand(1).Fill(testB)
datasetC = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetC.GetRasterBand(1).SetNoDataValue(-11.0)
datasetC.GetRasterBand(1).Fill(testA + testB)
datasetOut = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetOut.GetRasterBand(1).SetNoDataValue(-11.0)
invest_core.rasterAdd(datasetA.GetRasterBand(1),
datasetB.GetRasterBand(1),
datasetOut.GetRasterBand(1))
invest_test_core.assertTwoDatasetsEqual(self, datasetOut, datasetC)
def test_carbon_pixel_area(self):
"""Verify the correct output of carbon.pixelArea()"""
dataset = gdal.Open('./invest-data/test/data/carbon_regression_data/sequest_regression.tif',
gdal.GA_ReadOnly)
area = invest_cython_core.pixelArea(dataset)
#assert the output of pixelArea against the known value
#(it's 30x30 meters) so 0.09 Ha
self.assertEqual(0.09, area)
def test_wave_energy_pixel_size_in_meters(self):
"""Verify the correct output of wave_energy.pixel_size_in_meters()"""
#This file is known/tested to have the right conversion
correct_pixel_path = './invest-data/test/data/wave_energy_regression_data/npv_usd_regression.tif'
dataset_correct_pixel = gdal.Open(correct_pixel_path, gdal.GA_ReadOnly)
dataset = gdal.Open('./invest-data/test/data/wave_energy_data/samp_input/global_dem',
gdal.GA_ReadOnly)
#We need to get a point from the shapefile in the same vacinity as
#the projection
shape = ogr.Open('./invest-data/test/data/wave_energy_regression_data/WEM_InputOutput_Pts_bio_regression.shp')
layer = shape.GetLayer(0)
feat = layer.GetNextFeature()
geom = feat.GetGeometryRef()
lat = geom.GetX()
longitude = geom.GetY()
#Create Coordinate Transformation from lat/long to meters
srs_prj = osr.SpatialReference()
srs_prj.SetWellKnownGeogCS("WGS84")
source_sr = srs_prj
trg_prj = osr.SpatialReference()
trg_prj.ImportFromWkt(dataset_correct_pixel.GetProjectionRef())
target_sr = trg_prj
coord_trans = osr.CoordinateTransformation(source_sr, target_sr)
#Convert the shapefiles geometry point to lat/long
coord_trans_opposite = osr.CoordinateTransformation(target_sr, source_sr)
point_decimal_degree = coord_trans_opposite.TransformPoint(lat, longitude)
#Get the size of the pixels in meters
pixel_size_tuple = invest_cython_core.pixel_size_in_meters(dataset,
coord_trans,
point_decimal_degree)
geo_tran = dataset_correct_pixel.GetGeoTransform()
logger.debug('correct pixel sizes : %s : %s', geo_tran[1], geo_tran[5])
logger.debug('returned pixel sizes %s : %s', pixel_size_tuple[0], pixel_size_tuple[1])
#assert that the x and y pixels are the same size.
#take absolute value because we are not concerned with direction
#but size.
self.assertEqual(pixel_size_tuple[0], abs(geo_tran[1]))
self.assertEqual(pixel_size_tuple[1], abs(geo_tran[5]))
def test_createRasterFromVectorExtents(self):
fsencoding = sys.getfilesystemencoding()
shp = ogr.Open('./invest-data/test/data/sediment_test_data/subwatersheds.shp'.\
encode(fsencoding))
raster = invest_cython_core.createRasterFromVectorExtents(30, 30,
gdal.GDT_Float32, -5.0, './invest-data/test/data/test_out/subwatershed.tif', shp)
| import os, sys
import unittest
import random
import logging
import math
from osgeo import ogr, gdal, osr
from osgeo.gdalconst import *
import numpy as np
from nose.plugins.skip import SkipTest
from invest_natcap.dbfpy import dbf
import invest_cython_core
from invest_natcap.invest_core import invest_core
import invest_test_core
logger = logging.getLogger('invest_core_test')
logging.basicConfig(format='%(asctime)s %(name)-15s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
class TestInvestCore(unittest.TestCase):
def testflowDirectionD8(self):
"""Regression test for flow direction on a DEM"""
dem = gdal.Open('./invest-data/test/data/sediment_test_data/dem')
flow = invest_cython_core.newRasterFromBase(dem,
'./invest-data/test/data/test_out/flow.tif', 'GTiff', 0, gdal.GDT_Float32)
invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
regressionFlow = \
gdal.Open('./invest-data/test/data/sediment_test_data/flowregression.tif')
invest_test_core.assertTwoDatasetsEqual(self, flow, regressionFlow)
def testflowDirectionD8Simple(self):
"""Regression test for flow direction on a DEM with an example
constructed by hand"""
driver = gdal.GetDriverByName("MEM")
#Create a 3x3 dem raster
dem = driver.Create('', 3, 3, 1, gdal.GDT_Float32)
dem.GetRasterBand(1).SetNoDataValue(-1.0)
dem.GetRasterBand(1).WriteArray(np.array([[902, 909, 918], [895, 904, 916], [893, 904, 918]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flow.ReadAsArray(0, 0, 3, 3)
self.assertEqual(128, flowMatrix[1][1],
'Incorrect flow, should be 128 != %s' % flowMatrix[1][1])
dem.GetRasterBand(1).WriteArray(np.array([[190, 185, 181], [189, 185, 182], [189, 185, 182]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
flowDir = invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flowDir.ReadAsArray(0, 0, 3, 3)
self.assertEqual(8, flowMatrix[1][1],
'Incorrect flow, should be 8 != %s' % flowMatrix[1][1])
dem.GetRasterBand(1).WriteArray(np.array([[343, 332, 343],
[340, 341, 343],
[345, 338, 343]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
flowDir = invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flowDir.ReadAsArray(0, 0, 3, 3)
self.assertEqual(16, flowMatrix[1][1],
'Incorrect flow, should be 16 != %s' % flowMatrix[1][1])
dem.GetRasterBand(1).WriteArray(np.array([[194, 191, 191],
[191, 188, 188],
[191, 189, 189]]).transpose())
flow = invest_cython_core.newRasterFromBase(dem,
'', 'MEM', 0, gdal.GDT_Byte)
flowDir = invest_cython_core.flowDirectionD8(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], flow)
flowMatrix = flowDir.ReadAsArray(0, 0, 3, 3)
self.assertEqual(4, flowMatrix[1][1],
'Incorrect flow, should be 4 != %s' % flowMatrix[1][1])
def testslopeCalculation(self):
"""Regression test for slope calculation"""
dem = gdal.Open('./invest-data/test/data/sediment_test_data/dem')
slope_output = invest_cython_core.newRasterFromBase(dem,
'./invest-data/test/data/test_out/slope.tif', 'GTiff', -1, gdal.GDT_Float32)
invest_cython_core.calculate_slope(dem,
[0, 0, dem.RasterXSize, dem.RasterYSize], slope_output)
regressionSlope = \
gdal.Open('./invest-data/test/data/sediment_test_data/slopeRegression.tif')
invest_test_core.assertTwoDatasetsEqual(self, slope_output, regressionSlope)
def testvectorizeRasters(self):
r1 = gdal.Open('./invest-data/test/data/base_data/terrestrial/lulc_samp_cur')
r2 = gdal.Open('./invest-data/test/data/base_data/Freshwater/precip')
def op(a, b):
return np.sqrt(a ** 2 + b ** 2)
invest_core.vectorizeRasters([r1, r2], op,
rasterName='./invest-data/test/data/test_out/rasterizeRasters.tiff', datatype=gdal.GDT_Float32)
def testinterpolateMatrix(self):
"""Test the matrix interpolation function"""
def assertEqualInterpPoints(x, y, newx, newy, z):
for xVal in x:
for yVal in y:
i = x.tolist().index(xVal)
j = y.tolist().index(yVal)
ii = newx.tolist().index(xVal)
jj = newy.tolist().index(yVal)
self.assertAlmostEquals(z[j][i], interpz[jj][ii], 5,
"z[%s][%s], interpz[%s][%s], %s != %s" %
(i, j, ii, jj, z[j][i], interpz[jj][ii]))
#Create a non-trivial somewhat random matrix
x = np.array([-4.2, 3, 6, 10, 11])
y = np.array([-9, 3, 6, 10])
z = np.array([[0., 8., 11., 12.5, 0.0],
[0., 1., 1., 0., 0.],
[-7.2, 3., 1.2, 0., 0.],
[0., 4.9, 2.5, 0, 0.]])
#print z.shape
#print 'x', x
#print 'y', y
#print 'z', z
newx = np.array([-8.2, -4.2, 0, 2.5, 3, 5, 6, 7.5, 10, 11, 15.2, 100.0])
newy = np.array([-9, 0, 2.5, 3, 5, 6, 7.5, 10, 22.2, 100.0])
#print 'newx', newx
#print 'newy', newy
logging.debug('calling interpolate matrix')
interpz = invest_cython_core.interpolateMatrix(x, y, z, newx, newy)
#print 'interpz:', interpz
logging.debug('testing the result of interpolate matrix')
assertEqualInterpPoints(x, y, newx, newy, z)
def testRasterDiff(self):
driver = gdal.GetDriverByName("MEM")
xDim, yDim = 417, 219
testA, testB = -248.23, 1829.2
#Create a blank xDim x yDim raster
datasetA = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetA.GetRasterBand(1).SetNoDataValue(-11.0)
datasetA.GetRasterBand(1).Fill(testA)
datasetB = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetB.GetRasterBand(1).SetNoDataValue(-11.0)
datasetB.GetRasterBand(1).Fill(testB)
datasetC = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetC.GetRasterBand(1).SetNoDataValue(-11.0)
datasetC.GetRasterBand(1).Fill(testA - testB)
datasetOut = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetOut.GetRasterBand(1).SetNoDataValue(-11.0)
invest_core.rasterDiff(datasetA.GetRasterBand(1),
datasetB.GetRasterBand(1),
datasetOut.GetRasterBand(1))
invest_test_core.assertTwoDatasetsEqual(self, datasetOut, datasetC)
def testRasterAdd(self):
driver = gdal.GetDriverByName("MEM")
xDim, yDim = 417, 219
testA, testB = -248.23, 1829.2
#Create a blank xDim x yDim raster
datasetA = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetA.GetRasterBand(1).SetNoDataValue(-11.0)
datasetA.GetRasterBand(1).Fill(testA)
datasetB = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetB.GetRasterBand(1).SetNoDataValue(-11.0)
datasetB.GetRasterBand(1).Fill(testB)
datasetC = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetC.GetRasterBand(1).SetNoDataValue(-11.0)
datasetC.GetRasterBand(1).Fill(testA + testB)
datasetOut = driver.Create('', xDim, yDim, 1, gdal.GDT_Float64)
datasetOut.GetRasterBand(1).SetNoDataValue(-11.0)
invest_core.rasterAdd(datasetA.GetRasterBand(1),
datasetB.GetRasterBand(1),
datasetOut.GetRasterBand(1))
invest_test_core.assertTwoDatasetsEqual(self, datasetOut, datasetC)
def test_carbon_pixel_area(self):
"""Verify the correct output of carbon.pixelArea()"""
dataset = gdal.Open('./invest-data/test/data/carbon_regression_data/sequest_regression.tif',
gdal.GA_ReadOnly)
area = invest_cython_core.pixelArea(dataset)
#assert the output of pixelArea against the known value
#(it's 30x30 meters) so 0.09 Ha
self.assertEqual(0.09, area)
def test_wave_energy_pixel_size_in_meters(self):
"""Verify the correct output of wave_energy.pixel_size_in_meters()"""
#This file is known/tested to have the right conversion
correct_pixel_path = './invest-data/test/data/wave_energy_regression_data/npv_usd_regression.tif'
dataset_correct_pixel = gdal.Open(correct_pixel_path, gdal.GA_ReadOnly)
dataset = gdal.Open('./invest-data/test/data/wave_energy_data/samp_input/global_dem',
gdal.GA_ReadOnly)
#We need to get a point from the shapefile in the same vacinity as
#the projection
shape = ogr.Open('./invest-data/test/data/wave_energy_regression_data/WEM_InputOutput_Pts_bio_regression.shp')
layer = shape.GetLayer(0)
feat = layer.GetNextFeature()
geom = feat.GetGeometryRef()
lat = geom.GetX()
longitude = geom.GetY()
#Create Coordinate Transformation from lat/long to meters
srs_prj = osr.SpatialReference()
srs_prj.SetWellKnownGeogCS("WGS84")
source_sr = srs_prj
trg_prj = osr.SpatialReference()
trg_prj.ImportFromWkt(dataset_correct_pixel.GetProjectionRef())
target_sr = trg_prj
coord_trans = osr.CoordinateTransformation(source_sr, target_sr)
#Convert the shapefiles geometry point to lat/long
coord_trans_opposite = osr.CoordinateTransformation(target_sr, source_sr)
point_decimal_degree = coord_trans_opposite.TransformPoint(lat, longitude)
#Get the size of the pixels in meters
pixel_size_tuple = invest_cython_core.pixel_size_in_meters(dataset,
coord_trans,
point_decimal_degree)
geo_tran = dataset_correct_pixel.GetGeoTransform()
logger.debug('correct pixel sizes : %s : %s', geo_tran[1], geo_tran[5])
logger.debug('returned pixel sizes %s : %s', pixel_size_tuple[0], pixel_size_tuple[1])
#assert that the x and y pixels are the same size.
#take absolute value because we are not concerned with direction
#but size.
self.assertEqual(pixel_size_tuple[0], abs(geo_tran[1]))
self.assertEqual(pixel_size_tuple[1], abs(geo_tran[5]))
def test_createRasterFromVectorExtents(self):
fsencoding = sys.getfilesystemencoding()
shp = ogr.Open('./invest-data/test/data/sediment_test_data/subwatersheds.shp'.\
encode(fsencoding))
raster = invest_cython_core.createRasterFromVectorExtents(30, 30,
gdal.GDT_Float32, -5.0, './invest-data/test/data/test_out/subwatershed.tif', shp)
| en | 0.677753 | Regression test for flow direction on a DEM Regression test for flow direction on a DEM with an example constructed by hand #Create a 3x3 dem raster Regression test for slope calculation Test the matrix interpolation function #Create a non-trivial somewhat random matrix #print z.shape #print 'x', x #print 'y', y #print 'z', z #print 'newx', newx #print 'newy', newy #print 'interpz:', interpz #Create a blank xDim x yDim raster #Create a blank xDim x yDim raster Verify the correct output of carbon.pixelArea() #assert the output of pixelArea against the known value #(it's 30x30 meters) so 0.09 Ha Verify the correct output of wave_energy.pixel_size_in_meters() #This file is known/tested to have the right conversion #We need to get a point from the shapefile in the same vacinity as #the projection #Create Coordinate Transformation from lat/long to meters #Convert the shapefiles geometry point to lat/long #Get the size of the pixels in meters #assert that the x and y pixels are the same size. #take absolute value because we are not concerned with direction #but size. | 2.188445 | 2 |
Caltor/main.py | trigger16f/Caltor | 2 | 6614897 | <filename>Caltor/main.py
from PySide2 import QtWidgets
import Caltor
import sys
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = Caltor.Caltor()
window.show()
sys.exit(app.exec_())
| <filename>Caltor/main.py
from PySide2 import QtWidgets
import Caltor
import sys
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = Caltor.Caltor()
window.show()
sys.exit(app.exec_())
| none | 1 | 2.15067 | 2 | |
huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/model/replication_record_metadata.py | huaweicloud/huaweicloud-sdk-python-v3 | 64 | 6614898 | <filename>huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/model/replication_record_metadata.py
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ReplicationRecordMetadata:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'multiattach': 'bool',
'bootable': 'bool',
'volume_size': 'int',
'volume_type': 'str'
}
attribute_map = {
'multiattach': 'multiattach',
'bootable': 'bootable',
'volume_size': 'volume_size',
'volume_type': 'volume_type'
}
def __init__(self, multiattach=None, bootable=None, volume_size=None, volume_type=None):
"""ReplicationRecordMetadata - a model defined in huaweicloud sdk"""
self._multiattach = None
self._bootable = None
self._volume_size = None
self._volume_type = None
self.discriminator = None
self.multiattach = multiattach
self.bootable = bootable
self.volume_size = volume_size
self.volume_type = volume_type
@property
def multiattach(self):
"""Gets the multiattach of this ReplicationRecordMetadata.
复制对中的云硬盘是否为共享云硬盘。
:return: The multiattach of this ReplicationRecordMetadata.
:rtype: bool
"""
return self._multiattach
@multiattach.setter
def multiattach(self, multiattach):
"""Sets the multiattach of this ReplicationRecordMetadata.
复制对中的云硬盘是否为共享云硬盘。
:param multiattach: The multiattach of this ReplicationRecordMetadata.
:type: bool
"""
self._multiattach = multiattach
@property
def bootable(self):
"""Gets the bootable of this ReplicationRecordMetadata.
复制对中的云硬盘是否为系统盘。
:return: The bootable of this ReplicationRecordMetadata.
:rtype: bool
"""
return self._bootable
@bootable.setter
def bootable(self, bootable):
"""Sets the bootable of this ReplicationRecordMetadata.
复制对中的云硬盘是否为系统盘。
:param bootable: The bootable of this ReplicationRecordMetadata.
:type: bool
"""
self._bootable = bootable
@property
def volume_size(self):
"""Gets the volume_size of this ReplicationRecordMetadata.
复制对中的云硬盘容量。单位:GB
:return: The volume_size of this ReplicationRecordMetadata.
:rtype: int
"""
return self._volume_size
@volume_size.setter
def volume_size(self, volume_size):
"""Sets the volume_size of this ReplicationRecordMetadata.
复制对中的云硬盘容量。单位:GB
:param volume_size: The volume_size of this ReplicationRecordMetadata.
:type: int
"""
self._volume_size = volume_size
@property
def volume_type(self):
"""Gets the volume_type of this ReplicationRecordMetadata.
复制对中的云硬盘类型。SATA:普通IO磁盘类型。SAS:高IO磁盘类型。SSD:超高IO磁盘类型。co-p1:高IO(性能优化I型)uh-l1:超高IO(时延优化)其中co-p1和uh-l1两种云硬盘只能使用在HANA云服务器、HL1型云服务器、HL2型云服务器上。
:return: The volume_type of this ReplicationRecordMetadata.
:rtype: str
"""
return self._volume_type
@volume_type.setter
def volume_type(self, volume_type):
"""Sets the volume_type of this ReplicationRecordMetadata.
复制对中的云硬盘类型。SATA:普通IO磁盘类型。SAS:高IO磁盘类型。SSD:超高IO磁盘类型。co-p1:高IO(性能优化I型)uh-l1:超高IO(时延优化)其中co-p1和uh-l1两种云硬盘只能使用在HANA云服务器、HL1型云服务器、HL2型云服务器上。
:param volume_type: The volume_type of this ReplicationRecordMetadata.
:type: str
"""
self._volume_type = volume_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicationRecordMetadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| <filename>huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/model/replication_record_metadata.py
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ReplicationRecordMetadata:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'multiattach': 'bool',
'bootable': 'bool',
'volume_size': 'int',
'volume_type': 'str'
}
attribute_map = {
'multiattach': 'multiattach',
'bootable': 'bootable',
'volume_size': 'volume_size',
'volume_type': 'volume_type'
}
def __init__(self, multiattach=None, bootable=None, volume_size=None, volume_type=None):
"""ReplicationRecordMetadata - a model defined in huaweicloud sdk"""
self._multiattach = None
self._bootable = None
self._volume_size = None
self._volume_type = None
self.discriminator = None
self.multiattach = multiattach
self.bootable = bootable
self.volume_size = volume_size
self.volume_type = volume_type
@property
def multiattach(self):
"""Gets the multiattach of this ReplicationRecordMetadata.
复制对中的云硬盘是否为共享云硬盘。
:return: The multiattach of this ReplicationRecordMetadata.
:rtype: bool
"""
return self._multiattach
@multiattach.setter
def multiattach(self, multiattach):
"""Sets the multiattach of this ReplicationRecordMetadata.
复制对中的云硬盘是否为共享云硬盘。
:param multiattach: The multiattach of this ReplicationRecordMetadata.
:type: bool
"""
self._multiattach = multiattach
@property
def bootable(self):
"""Gets the bootable of this ReplicationRecordMetadata.
复制对中的云硬盘是否为系统盘。
:return: The bootable of this ReplicationRecordMetadata.
:rtype: bool
"""
return self._bootable
@bootable.setter
def bootable(self, bootable):
"""Sets the bootable of this ReplicationRecordMetadata.
复制对中的云硬盘是否为系统盘。
:param bootable: The bootable of this ReplicationRecordMetadata.
:type: bool
"""
self._bootable = bootable
@property
def volume_size(self):
"""Gets the volume_size of this ReplicationRecordMetadata.
复制对中的云硬盘容量。单位:GB
:return: The volume_size of this ReplicationRecordMetadata.
:rtype: int
"""
return self._volume_size
@volume_size.setter
def volume_size(self, volume_size):
"""Sets the volume_size of this ReplicationRecordMetadata.
复制对中的云硬盘容量。单位:GB
:param volume_size: The volume_size of this ReplicationRecordMetadata.
:type: int
"""
self._volume_size = volume_size
@property
def volume_type(self):
"""Gets the volume_type of this ReplicationRecordMetadata.
复制对中的云硬盘类型。SATA:普通IO磁盘类型。SAS:高IO磁盘类型。SSD:超高IO磁盘类型。co-p1:高IO(性能优化I型)uh-l1:超高IO(时延优化)其中co-p1和uh-l1两种云硬盘只能使用在HANA云服务器、HL1型云服务器、HL2型云服务器上。
:return: The volume_type of this ReplicationRecordMetadata.
:rtype: str
"""
return self._volume_type
@volume_type.setter
def volume_type(self, volume_type):
"""Sets the volume_type of this ReplicationRecordMetadata.
复制对中的云硬盘类型。SATA:普通IO磁盘类型。SAS:高IO磁盘类型。SSD:超高IO磁盘类型。co-p1:高IO(性能优化I型)uh-l1:超高IO(时延优化)其中co-p1和uh-l1两种云硬盘只能使用在HANA云服务器、HL1型云服务器、HL2型云服务器上。
:param volume_type: The volume_type of this ReplicationRecordMetadata.
:type: str
"""
self._volume_type = volume_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicationRecordMetadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.322362 | # coding: utf-8 Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. ReplicationRecordMetadata - a model defined in huaweicloud sdk Gets the multiattach of this ReplicationRecordMetadata. 复制对中的云硬盘是否为共享云硬盘。 :return: The multiattach of this ReplicationRecordMetadata. :rtype: bool Sets the multiattach of this ReplicationRecordMetadata. 复制对中的云硬盘是否为共享云硬盘。 :param multiattach: The multiattach of this ReplicationRecordMetadata. :type: bool Gets the bootable of this ReplicationRecordMetadata. 复制对中的云硬盘是否为系统盘。 :return: The bootable of this ReplicationRecordMetadata. :rtype: bool Sets the bootable of this ReplicationRecordMetadata. 复制对中的云硬盘是否为系统盘。 :param bootable: The bootable of this ReplicationRecordMetadata. :type: bool Gets the volume_size of this ReplicationRecordMetadata. 复制对中的云硬盘容量。单位:GB :return: The volume_size of this ReplicationRecordMetadata. :rtype: int Sets the volume_size of this ReplicationRecordMetadata. 复制对中的云硬盘容量。单位:GB :param volume_size: The volume_size of this ReplicationRecordMetadata. :type: int Gets the volume_type of this ReplicationRecordMetadata. 复制对中的云硬盘类型。SATA:普通IO磁盘类型。SAS:高IO磁盘类型。SSD:超高IO磁盘类型。co-p1:高IO(性能优化I型)uh-l1:超高IO(时延优化)其中co-p1和uh-l1两种云硬盘只能使用在HANA云服务器、HL1型云服务器、HL2型云服务器上。 :return: The volume_type of this ReplicationRecordMetadata. :rtype: str Sets the volume_type of this ReplicationRecordMetadata. 复制对中的云硬盘类型。SATA:普通IO磁盘类型。SAS:高IO磁盘类型。SSD:超高IO磁盘类型。co-p1:高IO(性能优化I型)uh-l1:超高IO(时延优化)其中co-p1和uh-l1两种云硬盘只能使用在HANA云服务器、HL1型云服务器、HL2型云服务器上。 :param volume_type: The volume_type of this ReplicationRecordMetadata. :type: str Returns the model properties as a dict Returns the string representation of the model For `print` Returns true if both objects are equal Returns true if both objects are not equal | 2.390373 | 2 |
eval.py | dgudenius/football_win_predictions | 4 | 6614899 | from util import *
from Pythag_Win import *
# Read historical games from CSV
games = Util.read_games("data/nfl_games.csv")
# Forecast every game
Pythag_Win.pythag_win(games)
# Evaluate our forecasts against Elo
Util.evaluate_forecasts(games)
| from util import *
from Pythag_Win import *
# Read historical games from CSV
games = Util.read_games("data/nfl_games.csv")
# Forecast every game
Pythag_Win.pythag_win(games)
# Evaluate our forecasts against Elo
Util.evaluate_forecasts(games)
| en | 0.934354 | # Read historical games from CSV # Forecast every game # Evaluate our forecasts against Elo | 2.029965 | 2 |
03_boucles/01_somme_impairs.py | elemprog-python/elemprog-exercices-corriges | 0 | 6614900 | <gh_stars>0
# Exercice 3.1 : Somme des impairs
## Question 1
def somme_impairs_inf(n : int) -> int:
"""Précondition: n >= 0
Renvoie la somme de tous les entiers naturels impairs
inférieurs ou égaux à n.
"""
# somme calculée
s : int = 0
# impair courant (1 est le premier impair)
i : int = 1
while i <= n:
s = s + i
i = i + 2
return s
# Jeu de test
assert somme_impairs_inf(0) == 0
assert somme_impairs_inf(1) == 1
assert somme_impairs_inf(2) == 1
assert somme_impairs_inf(3) == 4
assert somme_impairs_inf(4) == 4
assert somme_impairs_inf(5) == 9
assert somme_impairs_inf(8) == 16
## Question 2
def somme_premiers_impairs(n : int) -> int:
"""Précondition : n > 0
Renvoie la somme des n premiers entiers impairs.
"""
# somme calculée
s : int = 0
# compteur
i : int = 1
# impair courant (1 est le premier impair)
imp : int = 1
while i <= n:
s = s + imp
imp = imp + 2
i = i + 1
return s
# Jeu de tests
assert somme_premiers_impairs(1) == 1 ** 2
assert somme_premiers_impairs(2) == 2 ** 2
assert somme_premiers_impairs(3) == 3 ** 2
assert somme_premiers_impairs(4) == 4 ** 2
assert somme_premiers_impairs(5) == 5 ** 2
assert somme_premiers_impairs(8) == 8 ** 2
assert somme_premiers_impairs(9) == 9 ** 2
| # Exercice 3.1 : Somme des impairs
## Question 1
def somme_impairs_inf(n : int) -> int:
"""Précondition: n >= 0
Renvoie la somme de tous les entiers naturels impairs
inférieurs ou égaux à n.
"""
# somme calculée
s : int = 0
# impair courant (1 est le premier impair)
i : int = 1
while i <= n:
s = s + i
i = i + 2
return s
# Jeu de test
assert somme_impairs_inf(0) == 0
assert somme_impairs_inf(1) == 1
assert somme_impairs_inf(2) == 1
assert somme_impairs_inf(3) == 4
assert somme_impairs_inf(4) == 4
assert somme_impairs_inf(5) == 9
assert somme_impairs_inf(8) == 16
## Question 2
def somme_premiers_impairs(n : int) -> int:
"""Précondition : n > 0
Renvoie la somme des n premiers entiers impairs.
"""
# somme calculée
s : int = 0
# compteur
i : int = 1
# impair courant (1 est le premier impair)
imp : int = 1
while i <= n:
s = s + imp
imp = imp + 2
i = i + 1
return s
# Jeu de tests
assert somme_premiers_impairs(1) == 1 ** 2
assert somme_premiers_impairs(2) == 2 ** 2
assert somme_premiers_impairs(3) == 3 ** 2
assert somme_premiers_impairs(4) == 4 ** 2
assert somme_premiers_impairs(5) == 5 ** 2
assert somme_premiers_impairs(8) == 8 ** 2
assert somme_premiers_impairs(9) == 9 ** 2 | fr | 0.890177 | # Exercice 3.1 : Somme des impairs ## Question 1 Précondition: n >= 0 Renvoie la somme de tous les entiers naturels impairs inférieurs ou égaux à n. # somme calculée # impair courant (1 est le premier impair) # Jeu de test ## Question 2 Précondition : n > 0 Renvoie la somme des n premiers entiers impairs. # somme calculée # compteur # impair courant (1 est le premier impair) # Jeu de tests | 3.427039 | 3 |
RPi/GPIO.py | sanielfishawy/rpi_gpio_stub | 0 | 6614901 | <gh_stars>0
#pylint: disable=invalid-name, unused-argument, redefined-builtin
BOARD = "Board"
OUT = "Out"
IN = "In"
LOW = 0
HIGH = 1
def setmode(a):
pass
def setup(a, b, initial=False):
pass
def output(a, b):
pass
def input(a):
return True
def cleanup():
pass
def setwarnings(flag):
pass
| #pylint: disable=invalid-name, unused-argument, redefined-builtin
BOARD = "Board"
OUT = "Out"
IN = "In"
LOW = 0
HIGH = 1
def setmode(a):
pass
def setup(a, b, initial=False):
pass
def output(a, b):
pass
def input(a):
return True
def cleanup():
pass
def setwarnings(flag):
pass | en | 0.30737 | #pylint: disable=invalid-name, unused-argument, redefined-builtin | 2.506716 | 3 |
tests/conftest.py | BenSDuggan/DAPT | 1 | 6614902 | <gh_stars>1-10
"""
Settings for pytest
"""
import pytest
from tests import preflight
def pytest_addoption(parser):
parser.addoption(
"--test_creds", action="store_true", default=False,
help="Run the tests that require credentials but not logging in"
)
parser.addoption(
"--test_login", action="store_true", default=False,
help="Run the tests that require users to login"
)
parser.addoption(
"--all", action="store_true", default=False, help="Run all of the tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "test_creds: mark test as requiring credentials")
config.addinivalue_line("markers", "test_login: mark test as requireing login")
def pytest_collection_modifyitems(config, items):
if config.getoption("--all"):
# Want to run all tests, so let them
return
skip_creds = pytest.mark.skip(reason="need --test_creds option to run")
skip_login = pytest.mark.skip(reason="need --test_login option to run")
for item in items:
if "test_creds" in item.keywords and not config.getoption("--test_creds"):
item.add_marker(skip_creds)
if "test_login" in item.keywords and not config.getoption("--test_login"):
item.add_marker(skip_login)
| """
Settings for pytest
"""
import pytest
from tests import preflight
def pytest_addoption(parser):
parser.addoption(
"--test_creds", action="store_true", default=False,
help="Run the tests that require credentials but not logging in"
)
parser.addoption(
"--test_login", action="store_true", default=False,
help="Run the tests that require users to login"
)
parser.addoption(
"--all", action="store_true", default=False, help="Run all of the tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "test_creds: mark test as requiring credentials")
config.addinivalue_line("markers", "test_login: mark test as requireing login")
def pytest_collection_modifyitems(config, items):
if config.getoption("--all"):
# Want to run all tests, so let them
return
skip_creds = pytest.mark.skip(reason="need --test_creds option to run")
skip_login = pytest.mark.skip(reason="need --test_login option to run")
for item in items:
if "test_creds" in item.keywords and not config.getoption("--test_creds"):
item.add_marker(skip_creds)
if "test_login" in item.keywords and not config.getoption("--test_login"):
item.add_marker(skip_login) | en | 0.871808 | Settings for pytest # Want to run all tests, so let them | 2.537465 | 3 |
loader.py | stanmain/IION-Data-analysis | 0 | 6614903 | <reponame>stanmain/IION-Data-analysis<filename>loader.py
# Copyright © 2018 <NAME>. All rights reserved.
#!/usr/bin/env python3
import sys
import psycopg2
import base64
import struct
import datetime
import json
def connection():
'''Создать соединение с базой данных ИИОН. Возвращает connection и cursor.'''
with open('.connection.json', 'rt') as file:
param = json.loads(file.read())
conn = psycopg2.connect(**param)
cur = conn.cursor()
return conn, cur
def get_struct(conn, cur):
'''Получить структуру БД.'''
try:
cur.execute("""SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='public'""")
for table in cur.fetchall():
print('{}:'.format(table[2]))
cur.execute("""SELECT * FROM {table} LIMIT 0""".format(table=table[2]))
for column in cur.description:
print('|{:<15}:{}'.format(column[0], column[1]))
print('\n')
except psycopg2.Error as e:
print(str(e))
def get_sfile(conn, cur, date=None):
'''Получить S-файлы из БД.'''
try:
table = 's_new_file'
if date:
condition = " WHERE DATE(date)='{}'".format(date)
else:
condition = ''
cur.execute("""SELECT date, session, acf1_arr FROM {table}{condition} ORDER BY date""".format(table=table, condition=condition))
downloaded = cur.fetchall()
for i, row in enumerate(downloaded):
print(i, row[0])
buf = base64.b64decode(row[2])
path = 'data/{}'.format(row[0].strftime('%d%m%y.%H%M'))
with open(path, 'wb') as file:
file.write(buf)
# print(len(buf))
# break
except psycopg2.Error as e:
print(str(e))
def get_dates(conn, cur):
'''Получить даты из БД.'''
try:
table = 's_new_file'
cur.execute("""SELECT DISTINCT EXTRACT(year from date) as date FROM {table} ORDER BY date""".format(table=table)) #WHERE DATE(date)>'2017-12-01'
downloaded = cur.fetchall()
for i, row in enumerate(downloaded):
print('{}:\t{}'.format(i, row[0]))
except psycopg2.Error as e:
print(str(e))
| # Copyright © 2018 <NAME>. All rights reserved.
#!/usr/bin/env python3
import sys
import psycopg2
import base64
import struct
import datetime
import json
def connection():
'''Создать соединение с базой данных ИИОН. Возвращает connection и cursor.'''
with open('.connection.json', 'rt') as file:
param = json.loads(file.read())
conn = psycopg2.connect(**param)
cur = conn.cursor()
return conn, cur
def get_struct(conn, cur):
'''Получить структуру БД.'''
try:
cur.execute("""SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='public'""")
for table in cur.fetchall():
print('{}:'.format(table[2]))
cur.execute("""SELECT * FROM {table} LIMIT 0""".format(table=table[2]))
for column in cur.description:
print('|{:<15}:{}'.format(column[0], column[1]))
print('\n')
except psycopg2.Error as e:
print(str(e))
def get_sfile(conn, cur, date=None):
'''Получить S-файлы из БД.'''
try:
table = 's_new_file'
if date:
condition = " WHERE DATE(date)='{}'".format(date)
else:
condition = ''
cur.execute("""SELECT date, session, acf1_arr FROM {table}{condition} ORDER BY date""".format(table=table, condition=condition))
downloaded = cur.fetchall()
for i, row in enumerate(downloaded):
print(i, row[0])
buf = base64.b64decode(row[2])
path = 'data/{}'.format(row[0].strftime('%d%m%y.%H%M'))
with open(path, 'wb') as file:
file.write(buf)
# print(len(buf))
# break
except psycopg2.Error as e:
print(str(e))
def get_dates(conn, cur):
'''Получить даты из БД.'''
try:
table = 's_new_file'
cur.execute("""SELECT DISTINCT EXTRACT(year from date) as date FROM {table} ORDER BY date""".format(table=table)) #WHERE DATE(date)>'2017-12-01'
downloaded = cur.fetchall()
for i, row in enumerate(downloaded):
print('{}:\t{}'.format(i, row[0]))
except psycopg2.Error as e:
print(str(e)) | ru | 0.430928 | # Copyright © 2018 <NAME>. All rights reserved. #!/usr/bin/env python3 Создать соединение с базой данных ИИОН. Возвращает connection и cursor. Получить структуру БД. SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='public' SELECT * FROM {table} LIMIT 0 Получить S-файлы из БД. SELECT date, session, acf1_arr FROM {table}{condition} ORDER BY date # print(len(buf)) # break Получить даты из БД. SELECT DISTINCT EXTRACT(year from date) as date FROM {table} ORDER BY date #WHERE DATE(date)>'2017-12-01' | 2.812268 | 3 |
problem0026/test_26.py | SimonZoo/LeetCode | 0 | 6614904 | <filename>problem0026/test_26.py
import unittest
from .remove_duplicates_from_sorted_array import Solution
class TestRemoveDuplicates(unittest.TestCase):
def test_1(self):
s = Solution()
input_list = [1,1,2]
output = 2
self.assertEqual(s.removeDuplicates(input_list), output)
def test_2(self):
s = Solution()
input_list = [0,0,1,1,1,2,2,3,3,4]
output = 5
self.assertEqual(s.removeDuplicates(input_list), output) | <filename>problem0026/test_26.py
import unittest
from .remove_duplicates_from_sorted_array import Solution
class TestRemoveDuplicates(unittest.TestCase):
def test_1(self):
s = Solution()
input_list = [1,1,2]
output = 2
self.assertEqual(s.removeDuplicates(input_list), output)
def test_2(self):
s = Solution()
input_list = [0,0,1,1,1,2,2,3,3,4]
output = 5
self.assertEqual(s.removeDuplicates(input_list), output) | none | 1 | 3.402575 | 3 | |
setup.py | levijoseph/Levi | 0 | 6614905 | #!/usr/bin/env python3
from setuptools import setup
from os.path import exists
from shutil import copyfile
if not exists('pokeminer/config.py'):
copyfile('config.example.py', 'pokeminer/config.py')
setup(
name="pokeminer",
version="0.8a0",
packages=('pokeminer',),
include_package_data=True,
zip_safe=False,
scripts=('scan.py', 'web.py', 'gyms.py', 'solve_captchas.py')
)
| #!/usr/bin/env python3
from setuptools import setup
from os.path import exists
from shutil import copyfile
if not exists('pokeminer/config.py'):
copyfile('config.example.py', 'pokeminer/config.py')
setup(
name="pokeminer",
version="0.8a0",
packages=('pokeminer',),
include_package_data=True,
zip_safe=False,
scripts=('scan.py', 'web.py', 'gyms.py', 'solve_captchas.py')
)
| fr | 0.221828 | #!/usr/bin/env python3 | 1.680811 | 2 |
robinhoodbot/stockrunner.py | nikunjshanti/RobinhoodBot | 0 | 6614906 | import robin_stocks as r
import logging
import yfinance as yf
logging.basicConfig(filename="stockrrunner.log", level=logging.DEBUG)
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
#Log in to Robinhood
#Put your username and password in a config.py file in the same directory (see sample file)
daytradesymbols = ['TECL']
#login = r.login(config.rh_username,config.rh_password)
#portfolio_symbols = get_portfolio_symbols()
#print("Current Portfolio: " + str(portfolio_symbols) + "\n")
# get stock info
#print(msft.info)
# get historical market data
from slack_webhook import Slack
slack = Slack(url='https://<KEY>')
def main():
daytradesymbols = ['AAPL']
#hist = msft.history(period="1d", interval="1m", prepost=True)
for stock in daytradesymbols:
hist = yf.download(stock, period="1d", interval="1m", prepost=False)
highprice = hist["High"]
lowprice = hist["Low"]
lastprice = hist.iloc[[-1]]
currentprice = lastprice.iloc[0]['Close']
highpricemax = highprice.max()
lowpricemin = lowprice.min()
slackmsg = ""
tradingrange = highprice.max() - lowprice.min()
msg = (f"Trading Range: {str(tradingrange)} ---- Current Price: {str(currentprice)}")
slackmsg = slackmsg + msg + "\n"
#slack.post(text=msg)
logging.info(msg)
msg = (f"High Price: {str(highpricemax)} ---- Low Price: {str(lowpricemin)} -----")
slackmsg = slackmsg + msg + "\n"
#slack.post(text=msg)
logging.info(msg)
differencefromhigh = (highprice.max() - currentprice) / currentprice
differencefromlow = (lowprice.min() - currentprice) / currentprice
msg = (f"Difference form high {str(differencefromhigh)} ---- Difference from Low {str(differencefromlow)}")
slackmsg = slackmsg + msg + "\n"
#slack.post(text=msg)
logging.info(msg)
#slack.post(text=slackmsg)
differntfromlow = currentprice - lowprice.min()
msg = f"Diff from Low {differntfromlow}"
slackmsg = slackmsg + msg + "\n"
logging.info(msg)
if differencefromhigh > 0.010:
if currentprice > lowprice:
logging.info("buy")
#and
'''
attachments=[{
"fallback": "Plan a vacation",
"author_name": "Owner: rdesoto",
"title": "Plan a vacation",
"text": "I've been working too hard, it's time for a break.",
"actions": [
{
"name": "action",
"type": "button",
"text": "Complete this task",
"style": "",
"value": "complete"
},
{
"name": "tags_list",
"type": "select",
"text": "Add a tag...",
"data_source": "static",
"options": [
{
"text": "Launch Blocking",
"value": "launch-blocking"
},
{
"text": "Enhancement",
"value": "enhancement"
},
{
"text": "Bug",
"value": "bug"
}
]
}
]
}])
'''
#buyprice = 1972
#buypricepotential = (lowprice.min() - buyprice) / buyprice
#print(buypricepotential)
if __name__ == '__main__':
main() | import robin_stocks as r
import logging
import yfinance as yf
logging.basicConfig(filename="stockrrunner.log", level=logging.DEBUG)
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
#Log in to Robinhood
#Put your username and password in a config.py file in the same directory (see sample file)
daytradesymbols = ['TECL']
#login = r.login(config.rh_username,config.rh_password)
#portfolio_symbols = get_portfolio_symbols()
#print("Current Portfolio: " + str(portfolio_symbols) + "\n")
# get stock info
#print(msft.info)
# get historical market data
from slack_webhook import Slack
slack = Slack(url='https://<KEY>')
def main():
daytradesymbols = ['AAPL']
#hist = msft.history(period="1d", interval="1m", prepost=True)
for stock in daytradesymbols:
hist = yf.download(stock, period="1d", interval="1m", prepost=False)
highprice = hist["High"]
lowprice = hist["Low"]
lastprice = hist.iloc[[-1]]
currentprice = lastprice.iloc[0]['Close']
highpricemax = highprice.max()
lowpricemin = lowprice.min()
slackmsg = ""
tradingrange = highprice.max() - lowprice.min()
msg = (f"Trading Range: {str(tradingrange)} ---- Current Price: {str(currentprice)}")
slackmsg = slackmsg + msg + "\n"
#slack.post(text=msg)
logging.info(msg)
msg = (f"High Price: {str(highpricemax)} ---- Low Price: {str(lowpricemin)} -----")
slackmsg = slackmsg + msg + "\n"
#slack.post(text=msg)
logging.info(msg)
differencefromhigh = (highprice.max() - currentprice) / currentprice
differencefromlow = (lowprice.min() - currentprice) / currentprice
msg = (f"Difference form high {str(differencefromhigh)} ---- Difference from Low {str(differencefromlow)}")
slackmsg = slackmsg + msg + "\n"
#slack.post(text=msg)
logging.info(msg)
#slack.post(text=slackmsg)
differntfromlow = currentprice - lowprice.min()
msg = f"Diff from Low {differntfromlow}"
slackmsg = slackmsg + msg + "\n"
logging.info(msg)
if differencefromhigh > 0.010:
if currentprice > lowprice:
logging.info("buy")
#and
'''
attachments=[{
"fallback": "Plan a vacation",
"author_name": "Owner: rdesoto",
"title": "Plan a vacation",
"text": "I've been working too hard, it's time for a break.",
"actions": [
{
"name": "action",
"type": "button",
"text": "Complete this task",
"style": "",
"value": "complete"
},
{
"name": "tags_list",
"type": "select",
"text": "Add a tag...",
"data_source": "static",
"options": [
{
"text": "Launch Blocking",
"value": "launch-blocking"
},
{
"text": "Enhancement",
"value": "enhancement"
},
{
"text": "Bug",
"value": "bug"
}
]
}
]
}])
'''
#buyprice = 1972
#buypricepotential = (lowprice.min() - buyprice) / buyprice
#print(buypricepotential)
if __name__ == '__main__':
main() | en | 0.414529 | Returns: the symbol for each stock in your portfolio as a list of strings #Log in to Robinhood #Put your username and password in a config.py file in the same directory (see sample file) #login = r.login(config.rh_username,config.rh_password) #portfolio_symbols = get_portfolio_symbols() #print("Current Portfolio: " + str(portfolio_symbols) + "\n") # get stock info #print(msft.info) # get historical market data #hist = msft.history(period="1d", interval="1m", prepost=True) #slack.post(text=msg) #slack.post(text=msg) #slack.post(text=msg) #slack.post(text=slackmsg) #and attachments=[{ "fallback": "Plan a vacation", "author_name": "Owner: rdesoto", "title": "Plan a vacation", "text": "I've been working too hard, it's time for a break.", "actions": [ { "name": "action", "type": "button", "text": "Complete this task", "style": "", "value": "complete" }, { "name": "tags_list", "type": "select", "text": "Add a tag...", "data_source": "static", "options": [ { "text": "Launch Blocking", "value": "launch-blocking" }, { "text": "Enhancement", "value": "enhancement" }, { "text": "Bug", "value": "bug" } ] } ] }]) #buyprice = 1972 #buypricepotential = (lowprice.min() - buyprice) / buyprice #print(buypricepotential) | 2.957006 | 3 |
dsg/RNN_MTM_classifier.py | AI-Companion/ds-gear | 0 | 6614907 | import os
import string
import pickle
import time
from typing import List
import numpy as np
from dsg.base import BasePreprocessor, BaseRNN
from dsg.layers import Glove6BEmbedding, FastTextEmbedding
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import pandas as pd
from sklearn.metrics import classification_report
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Input, load_model
from keras.layers import LSTM, Dense, TimeDistributed, Embedding, Bidirectional, add
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
nltk.download('punkt')
nltk.download('stopwords')
class RNNMTMPreprocessor(BasePreprocessor):
"""
Utility class performing several data preprocessing steps
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def init_from_config(self, max_sequence_length: int, vocab_size: int, validation_split: float):
self.max_sequence_length = max_sequence_length
self.vocab_size = vocab_size
self.validation_split = validation_split
self.tokenizer_obj = None
self.labels_to_idx = None
def init_from_file(self, preprocessor_file: str):
"""
Loads preprocessing tools for the model
Args:
preprocessor_file: url to saved preprocessing file
Return:
preprocessed object
"""
with open(preprocessor_file, 'rb') as f:
self.tokenizer_obj = pickle.load(f)
self.labels_to_idx = pickle.load(f)
self.max_sequence_length = pickle.load(f)
self.validation_split = pickle.load(f)
self.vocab_size = pickle.load(f)
def clean(self, X: List):
"""
Performs data cleaning operations such as removing html breaks, lower case,
remove stopwords ...
Args:
X: input reviews to be cleaned
Returns:
None
"""
print("===========> data cleaning")
review_lines = list()
for line in X:
tokens = [w.lower() for w in line]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
# stop_words = set(stopwords.words('english'))
# words = [word for word in words if word not in stop_words]
review_lines.append(words)
print("----> data cleaning finish")
return review_lines
def fit(self, X: List, y: List):
"""
Performs data tokenization into a format that is digestible by the model
Args:
X: list of predictors already cleaned
Returns:
tokenizer object and tokenized input features
"""
print("===========> data tokenization")
# features tokenization
self.tokenizer_obj = Tokenizer(num_words=self.vocab_size)
self.tokenizer_obj.fit_on_texts(X)
self.tokenizer_obj.word_index["pad"] = 0
flat_list = [item for sublist in y for item in sublist]
unique_labels = list(set(flat_list))
self.labels_to_idx = {t: i + 1 for i, t in enumerate(unique_labels)}
self.labels_to_idx["pad"] = 0
print("----> data fitting finish")
print("found %i unique tokens" % len(self.tokenizer_obj.word_index))
def save(self, file_name_prefix, save_folder):
"""
Stores the data preprocessor under 'models folder'
Args:
file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S'
save_folder: folder under which to save the files
Return:
None
"""
file_url = os.path.join(save_folder, file_name_prefix + "_preprocessor.pkl")
with open(file_url, 'wb') as handle:
pickle.dump(self.tokenizer_obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.labels_to_idx, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.max_sequence_length, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.validation_split, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.vocab_size, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("----> proprocessor object saved to %s" % file_url)
def preprocess(self, X, y=None):
"""
Performs data preprocessing before inference
Args:
X: Features
y: targets
Return:
preprocessed data
"""
lines = list()
n_tokens_list = list()
for line in X:
if not isinstance(line, list):
line = word_tokenize(line)
lines.append(line)
n_tokens_list.append(len(line))
sequences = self.tokenizer_obj.texts_to_sequences(lines)
review_pad = pad_sequences(sequences, maxlen=self.max_sequence_length, padding="post",
value=self.tokenizer_obj.word_index["pad"])
print("features tensor shape ", review_pad.shape)
tokenized_labels = None
if y is not None:
tokenized_labels = [[self.labels_to_idx[word] for word in sublist] for sublist in y]
tokenized_labels = pad_sequences(tokenized_labels, maxlen=self.max_sequence_length, padding="post",
value=self.labels_to_idx["pad"])
print("labels tensor shape ", tokenized_labels.shape)
return review_pad, lines, n_tokens_list, tokenized_labels
class RNNMTM(BaseRNN):
"""
RNN Many to Many classifier, this architecture serves applications such as Named Entity Recognition, machine translation ...
"""
def __init__(self, **kwargs):
self.n_labels = None
self.labels_to_idx = None
super().__init__(**kwargs)
def init_from_files(self, h5_file:str, class_file:str):
"""
Initialize the class from a previously saved model
Args:
h5_file: url to a saved class
class_file: url to a saved class file
Return:
None
"""
self.model = load_model(h5_file, custom_objects={'CRF': CRF,
'crf_loss': crf_loss,
'crf_viterbi_accuracy': crf_viterbi_accuracy})
with open(class_file, 'rb') as f:
self.use_pretrained_embedding = pickle.load(f)
self.vocab_size = pickle.load(f)
self.embedding_dimension = pickle.load(f)
self.embeddings_path = pickle.load(f)
self.max_length = pickle.load(f)
self.word_index = pickle.load(f)
def init_from_config(self, pre_trained_embedding: bool, vocab_size:int, embedding_dimension:int, embedding_algorithm: str,
save_folder: str, n_iter: int, embeddings_path: str, max_sequence_length: int,
data_preprocessor: RNNMTMPreprocessor):
"""
Initializes the class for the first time from a given configuration file and data processor
Args:
config: .json configuration reader
data_preprocessor: preprocessing tool for the training data
Return:
None
"""
self.use_pretrained_embedding = pre_trained_embedding
self.vocab_size = vocab_size
self.embedding_dimension = embedding_dimension
self.embeddings_name = embedding_algorithm
self.embeddings_path = embeddings_path
self.max_length = max_sequence_length
self.n_iter = n_iter
self.save_folder = save_folder
self.n_labels = len(data_preprocessor.labels_to_idx)
self.word_index = data_preprocessor.tokenizer_obj.word_index
self.embedding_layer = self.build_embedding()
self.model = self.build_model()
def build_model(self):
"""
Builds an RNN model according to fixed architecture
Return:
None
"""
print("===========> build model")
# Run the function
input_layer = Input(shape=(self.max_length,), name='input')
x = self.embedding_layer(input_layer)
# # archi 1: f1-macro 0.3-fasttext 0.3-no embedding
# x = Dropout(0.1)(x)
# x = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(x)
# x = TimeDistributed(Dense(self.n_labels, activation='softmax'))(x)
# model = Model(inputs=input_layer, outputs=x)
# model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc'])
# # archi 2: f1-macro 0.35-fasttext
# x = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
# x_rnn = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
# x = add([x, x_rnn]) # residual connection to the first biLSTM
# x = TimeDistributed(Dense(self.n_labels, activation='softmax'))(x)
# model = Model(inputs=input_layer, outputs=x)
# model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc'])
# # archi 3: crf layer
x = Bidirectional(LSTM(units=50, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
x_rnn = Bidirectional(LSTM(units=50, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
x = add([x, x_rnn]) # residual connection to the first biLSTM
x = TimeDistributed(Dense(50, activation='relu'))(x)
crf = CRF(self.n_labels, sparse_target=True)
x = crf(x)
model = Model(inputs=input_layer, outputs=x)
model.compile(loss=crf.loss_function, optimizer='adam', metrics=[crf.accuracy])
print(model.summary())
return model
def fit(self, X_train, y_train, X_test=None, y_test=None, labels_to_idx=None):
"""
Fits the model object to the data
Args:
X_train: numpy array containing encoded training features
y_train: numpy array containing training targets
X_test: numpy array containing encoded test features
y_test: numpy array containing test targets
labels_to_idx: a dictionary containing the conversion from each class label to its id
Return:
list of values related to each datasets and loss function
"""
wts = 10 * np.ones((y_train.shape[0], y_train.shape[1]))
classes = np.argmax(y_train, axis=1)
wts[classes == labels_to_idx["pad"]] = 1
if (X_test is not None) and (y_test is not None):
# history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=64,
# sample_weight=wts, validation_split=0.1, verbose=2)
y_train = np.expand_dims(y_train,2)
history = self.model.fit(X_train, y_train, batch_size=32, epochs=self.n_iter,
verbose=2, validation_split=0.1)
y_hat = self.predict(X_test, labels_to_idx)
y = [self.convert_idx_to_labels(sublist, labels_to_idx) for sublist in y_test]
y_flat = [val for sublist in y for val in sublist]
y_hat_flat = [val for sublist in y_hat for val in sublist]
report = classification_report(y_flat, y_hat_flat, output_dict=True)
df = pd.DataFrame(report).transpose().round(2)
print(df)
else:
report = None
history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=64, verbose=2)
return history, report
def convert_idx_to_labels(self, classes_vector, labels_to_idx):
"""
Utility function to convert encoded target idx to original labels
Args:
classes_vector: target vector containing indexed classes
labels_to_idx: a dictionary containing the conversion from each class label to its id
Return:
numpy array containing the corresponding labels
"""
idx_to_labels = {v: k for k, v in labels_to_idx.items()}
return [idx_to_labels[cl] for cl in classes_vector]
def predict(self, encoded_text_list, labels_to_idx, n_tokens_list=None):
"""
Inference method
Args:
encoded_text_list: a list of texts to be evaluated. the input is assumed to have been preprocessed
n_tokens_list: number of tokens in each input string before padding
labels_to_idx: a dictionary containing the conversion from each class label to its id
Return:
numpy array containing the class for token character in the sentence
"""
probs = self.model.predict(encoded_text_list)
labels_list = []
for i in range(len(probs)):
if n_tokens_list is not None:
real_probs = probs[i][:n_tokens_list[i]]
else:
real_probs = probs[i]
classes = np.argmax(real_probs, axis=1)
labels_list.append(self.convert_idx_to_labels(classes, labels_to_idx))
return labels_list
def predict_proba(self, encoded_text_list):
"""
Inference method
Args:
encoded_text_list: a list of texts to be evaluated. the input is assumed to have been
preprocessed
Return:
numpy array containing the probabilities of a positive review for each list entry
"""
probs = self.model.predict(encoded_text_list)
return [p[0] for p in probs]
def save(self, file_name_prefix, save_folder):
"""
Saves the trained model into a h5 file
Args:
file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S'
save_folder: folder under which to save the files
Return:
None
"""
file_url_keras_model = os.path.join(save_folder, file_name_prefix + "_rnn_model.h5")
self.model.save(file_url_keras_model)
file_url_class = os.path.join(save_folder, file_name_prefix + "_rnn_class.pkl")
with open(file_url_class, 'wb') as handle:
pickle.dump(self.use_pretrained_embedding, handle)
pickle.dump(self.vocab_size, handle)
pickle.dump(self.embedding_dimension, handle)
pickle.dump(self.embeddings_path, handle)
pickle.dump(self.max_length, handle)
pickle.dump(self.word_index, handle)
print("----> model saved to %s" % file_url_keras_model)
print("----> class saved to %s" % file_url_class)
| import os
import string
import pickle
import time
from typing import List
import numpy as np
from dsg.base import BasePreprocessor, BaseRNN
from dsg.layers import Glove6BEmbedding, FastTextEmbedding
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import pandas as pd
from sklearn.metrics import classification_report
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Input, load_model
from keras.layers import LSTM, Dense, TimeDistributed, Embedding, Bidirectional, add
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
nltk.download('punkt')
nltk.download('stopwords')
class RNNMTMPreprocessor(BasePreprocessor):
"""
Utility class performing several data preprocessing steps
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def init_from_config(self, max_sequence_length: int, vocab_size: int, validation_split: float):
self.max_sequence_length = max_sequence_length
self.vocab_size = vocab_size
self.validation_split = validation_split
self.tokenizer_obj = None
self.labels_to_idx = None
def init_from_file(self, preprocessor_file: str):
"""
Loads preprocessing tools for the model
Args:
preprocessor_file: url to saved preprocessing file
Return:
preprocessed object
"""
with open(preprocessor_file, 'rb') as f:
self.tokenizer_obj = pickle.load(f)
self.labels_to_idx = pickle.load(f)
self.max_sequence_length = pickle.load(f)
self.validation_split = pickle.load(f)
self.vocab_size = pickle.load(f)
def clean(self, X: List):
"""
Performs data cleaning operations such as removing html breaks, lower case,
remove stopwords ...
Args:
X: input reviews to be cleaned
Returns:
None
"""
print("===========> data cleaning")
review_lines = list()
for line in X:
tokens = [w.lower() for w in line]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
# stop_words = set(stopwords.words('english'))
# words = [word for word in words if word not in stop_words]
review_lines.append(words)
print("----> data cleaning finish")
return review_lines
def fit(self, X: List, y: List):
"""
Performs data tokenization into a format that is digestible by the model
Args:
X: list of predictors already cleaned
Returns:
tokenizer object and tokenized input features
"""
print("===========> data tokenization")
# features tokenization
self.tokenizer_obj = Tokenizer(num_words=self.vocab_size)
self.tokenizer_obj.fit_on_texts(X)
self.tokenizer_obj.word_index["pad"] = 0
flat_list = [item for sublist in y for item in sublist]
unique_labels = list(set(flat_list))
self.labels_to_idx = {t: i + 1 for i, t in enumerate(unique_labels)}
self.labels_to_idx["pad"] = 0
print("----> data fitting finish")
print("found %i unique tokens" % len(self.tokenizer_obj.word_index))
def save(self, file_name_prefix, save_folder):
"""
Stores the data preprocessor under 'models folder'
Args:
file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S'
save_folder: folder under which to save the files
Return:
None
"""
file_url = os.path.join(save_folder, file_name_prefix + "_preprocessor.pkl")
with open(file_url, 'wb') as handle:
pickle.dump(self.tokenizer_obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.labels_to_idx, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.max_sequence_length, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.validation_split, handle, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.vocab_size, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("----> proprocessor object saved to %s" % file_url)
def preprocess(self, X, y=None):
"""
Performs data preprocessing before inference
Args:
X: Features
y: targets
Return:
preprocessed data
"""
lines = list()
n_tokens_list = list()
for line in X:
if not isinstance(line, list):
line = word_tokenize(line)
lines.append(line)
n_tokens_list.append(len(line))
sequences = self.tokenizer_obj.texts_to_sequences(lines)
review_pad = pad_sequences(sequences, maxlen=self.max_sequence_length, padding="post",
value=self.tokenizer_obj.word_index["pad"])
print("features tensor shape ", review_pad.shape)
tokenized_labels = None
if y is not None:
tokenized_labels = [[self.labels_to_idx[word] for word in sublist] for sublist in y]
tokenized_labels = pad_sequences(tokenized_labels, maxlen=self.max_sequence_length, padding="post",
value=self.labels_to_idx["pad"])
print("labels tensor shape ", tokenized_labels.shape)
return review_pad, lines, n_tokens_list, tokenized_labels
class RNNMTM(BaseRNN):
"""
RNN Many to Many classifier, this architecture serves applications such as Named Entity Recognition, machine translation ...
"""
def __init__(self, **kwargs):
self.n_labels = None
self.labels_to_idx = None
super().__init__(**kwargs)
def init_from_files(self, h5_file:str, class_file:str):
"""
Initialize the class from a previously saved model
Args:
h5_file: url to a saved class
class_file: url to a saved class file
Return:
None
"""
self.model = load_model(h5_file, custom_objects={'CRF': CRF,
'crf_loss': crf_loss,
'crf_viterbi_accuracy': crf_viterbi_accuracy})
with open(class_file, 'rb') as f:
self.use_pretrained_embedding = pickle.load(f)
self.vocab_size = pickle.load(f)
self.embedding_dimension = pickle.load(f)
self.embeddings_path = pickle.load(f)
self.max_length = pickle.load(f)
self.word_index = pickle.load(f)
def init_from_config(self, pre_trained_embedding: bool, vocab_size:int, embedding_dimension:int, embedding_algorithm: str,
save_folder: str, n_iter: int, embeddings_path: str, max_sequence_length: int,
data_preprocessor: RNNMTMPreprocessor):
"""
Initializes the class for the first time from a given configuration file and data processor
Args:
config: .json configuration reader
data_preprocessor: preprocessing tool for the training data
Return:
None
"""
self.use_pretrained_embedding = pre_trained_embedding
self.vocab_size = vocab_size
self.embedding_dimension = embedding_dimension
self.embeddings_name = embedding_algorithm
self.embeddings_path = embeddings_path
self.max_length = max_sequence_length
self.n_iter = n_iter
self.save_folder = save_folder
self.n_labels = len(data_preprocessor.labels_to_idx)
self.word_index = data_preprocessor.tokenizer_obj.word_index
self.embedding_layer = self.build_embedding()
self.model = self.build_model()
def build_model(self):
"""
Builds an RNN model according to fixed architecture
Return:
None
"""
print("===========> build model")
# Run the function
input_layer = Input(shape=(self.max_length,), name='input')
x = self.embedding_layer(input_layer)
# # archi 1: f1-macro 0.3-fasttext 0.3-no embedding
# x = Dropout(0.1)(x)
# x = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(x)
# x = TimeDistributed(Dense(self.n_labels, activation='softmax'))(x)
# model = Model(inputs=input_layer, outputs=x)
# model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc'])
# # archi 2: f1-macro 0.35-fasttext
# x = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
# x_rnn = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
# x = add([x, x_rnn]) # residual connection to the first biLSTM
# x = TimeDistributed(Dense(self.n_labels, activation='softmax'))(x)
# model = Model(inputs=input_layer, outputs=x)
# model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc'])
# # archi 3: crf layer
x = Bidirectional(LSTM(units=50, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
x_rnn = Bidirectional(LSTM(units=50, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x)
x = add([x, x_rnn]) # residual connection to the first biLSTM
x = TimeDistributed(Dense(50, activation='relu'))(x)
crf = CRF(self.n_labels, sparse_target=True)
x = crf(x)
model = Model(inputs=input_layer, outputs=x)
model.compile(loss=crf.loss_function, optimizer='adam', metrics=[crf.accuracy])
print(model.summary())
return model
def fit(self, X_train, y_train, X_test=None, y_test=None, labels_to_idx=None):
"""
Fits the model object to the data
Args:
X_train: numpy array containing encoded training features
y_train: numpy array containing training targets
X_test: numpy array containing encoded test features
y_test: numpy array containing test targets
labels_to_idx: a dictionary containing the conversion from each class label to its id
Return:
list of values related to each datasets and loss function
"""
wts = 10 * np.ones((y_train.shape[0], y_train.shape[1]))
classes = np.argmax(y_train, axis=1)
wts[classes == labels_to_idx["pad"]] = 1
if (X_test is not None) and (y_test is not None):
# history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=64,
# sample_weight=wts, validation_split=0.1, verbose=2)
y_train = np.expand_dims(y_train,2)
history = self.model.fit(X_train, y_train, batch_size=32, epochs=self.n_iter,
verbose=2, validation_split=0.1)
y_hat = self.predict(X_test, labels_to_idx)
y = [self.convert_idx_to_labels(sublist, labels_to_idx) for sublist in y_test]
y_flat = [val for sublist in y for val in sublist]
y_hat_flat = [val for sublist in y_hat for val in sublist]
report = classification_report(y_flat, y_hat_flat, output_dict=True)
df = pd.DataFrame(report).transpose().round(2)
print(df)
else:
report = None
history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=64, verbose=2)
return history, report
def convert_idx_to_labels(self, classes_vector, labels_to_idx):
"""
Utility function to convert encoded target idx to original labels
Args:
classes_vector: target vector containing indexed classes
labels_to_idx: a dictionary containing the conversion from each class label to its id
Return:
numpy array containing the corresponding labels
"""
idx_to_labels = {v: k for k, v in labels_to_idx.items()}
return [idx_to_labels[cl] for cl in classes_vector]
def predict(self, encoded_text_list, labels_to_idx, n_tokens_list=None):
"""
Inference method
Args:
encoded_text_list: a list of texts to be evaluated. the input is assumed to have been preprocessed
n_tokens_list: number of tokens in each input string before padding
labels_to_idx: a dictionary containing the conversion from each class label to its id
Return:
numpy array containing the class for token character in the sentence
"""
probs = self.model.predict(encoded_text_list)
labels_list = []
for i in range(len(probs)):
if n_tokens_list is not None:
real_probs = probs[i][:n_tokens_list[i]]
else:
real_probs = probs[i]
classes = np.argmax(real_probs, axis=1)
labels_list.append(self.convert_idx_to_labels(classes, labels_to_idx))
return labels_list
def predict_proba(self, encoded_text_list):
"""
Inference method
Args:
encoded_text_list: a list of texts to be evaluated. the input is assumed to have been
preprocessed
Return:
numpy array containing the probabilities of a positive review for each list entry
"""
probs = self.model.predict(encoded_text_list)
return [p[0] for p in probs]
def save(self, file_name_prefix, save_folder):
"""
Saves the trained model into a h5 file
Args:
file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S'
save_folder: folder under which to save the files
Return:
None
"""
file_url_keras_model = os.path.join(save_folder, file_name_prefix + "_rnn_model.h5")
self.model.save(file_url_keras_model)
file_url_class = os.path.join(save_folder, file_name_prefix + "_rnn_class.pkl")
with open(file_url_class, 'wb') as handle:
pickle.dump(self.use_pretrained_embedding, handle)
pickle.dump(self.vocab_size, handle)
pickle.dump(self.embedding_dimension, handle)
pickle.dump(self.embeddings_path, handle)
pickle.dump(self.max_length, handle)
pickle.dump(self.word_index, handle)
print("----> model saved to %s" % file_url_keras_model)
print("----> class saved to %s" % file_url_class)
| en | 0.717887 | Utility class performing several data preprocessing steps Loads preprocessing tools for the model Args: preprocessor_file: url to saved preprocessing file Return: preprocessed object Performs data cleaning operations such as removing html breaks, lower case, remove stopwords ... Args: X: input reviews to be cleaned Returns: None # stop_words = set(stopwords.words('english')) # words = [word for word in words if word not in stop_words] Performs data tokenization into a format that is digestible by the model Args: X: list of predictors already cleaned Returns: tokenizer object and tokenized input features # features tokenization Stores the data preprocessor under 'models folder' Args: file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S' save_folder: folder under which to save the files Return: None Performs data preprocessing before inference Args: X: Features y: targets Return: preprocessed data RNN Many to Many classifier, this architecture serves applications such as Named Entity Recognition, machine translation ... Initialize the class from a previously saved model Args: h5_file: url to a saved class class_file: url to a saved class file Return: None Initializes the class for the first time from a given configuration file and data processor Args: config: .json configuration reader data_preprocessor: preprocessing tool for the training data Return: None Builds an RNN model according to fixed architecture Return: None # Run the function # # archi 1: f1-macro 0.3-fasttext 0.3-no embedding # x = Dropout(0.1)(x) # x = Bidirectional(LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(x) # x = TimeDistributed(Dense(self.n_labels, activation='softmax'))(x) # model = Model(inputs=input_layer, outputs=x) # model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc']) # # archi 2: f1-macro 0.35-fasttext # x = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x) # x_rnn = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x) # x = add([x, x_rnn]) # residual connection to the first biLSTM # x = TimeDistributed(Dense(self.n_labels, activation='softmax'))(x) # model = Model(inputs=input_layer, outputs=x) # model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc']) # # archi 3: crf layer # residual connection to the first biLSTM Fits the model object to the data Args: X_train: numpy array containing encoded training features y_train: numpy array containing training targets X_test: numpy array containing encoded test features y_test: numpy array containing test targets labels_to_idx: a dictionary containing the conversion from each class label to its id Return: list of values related to each datasets and loss function # history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=64, # sample_weight=wts, validation_split=0.1, verbose=2) Utility function to convert encoded target idx to original labels Args: classes_vector: target vector containing indexed classes labels_to_idx: a dictionary containing the conversion from each class label to its id Return: numpy array containing the corresponding labels Inference method Args: encoded_text_list: a list of texts to be evaluated. the input is assumed to have been preprocessed n_tokens_list: number of tokens in each input string before padding labels_to_idx: a dictionary containing the conversion from each class label to its id Return: numpy array containing the class for token character in the sentence Inference method Args: encoded_text_list: a list of texts to be evaluated. the input is assumed to have been preprocessed Return: numpy array containing the probabilities of a positive review for each list entry Saves the trained model into a h5 file Args: file_name_prefix: a file name prefix having the following format 'named_entity_recognition_%Y%m%d_%H%M%S' save_folder: folder under which to save the files Return: None | 2.275701 | 2 |
plugins/FuzzyMiner.py | thieminho/plugintest | 0 | 6614908 | import copy
import os
import sys
from datetime import datetime
from json import loads, dumps
from Levenshtein import distance
import numpy as np
import xmltodict
from PyQt5 import QtCore
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QDialog, QScrollArea, QGridLayout, QCheckBox, \
QSlider, QPushButton, QHBoxLayout, QButtonGroup, QRadioButton
class Plugin:
def __init__(self):
print('Plugin init ("Fuzzy Miner")')
self.hasParameters = True
self.myDialog = self.CustomDialog()
class CustomDialog(QDialog):
def __init__(self, *args, **kwargs):
super(Plugin.CustomDialog, self).__init__(*args, **kwargs)
self.resize(400, 500)
self.layout = QVBoxLayout()
self.setWindowTitle("Parameters")
self.acc_button = QPushButton('Ok')
self.acc_button.clicked.connect(self.close_window)
self.cancel_button = QPushButton('Cancel')
self.cancel_button.clicked.connect(self.close_cancel)
self.buttonBox = QHBoxLayout()
self.buttonBox.addWidget(self.acc_button)
self.buttonBox.addWidget(self.cancel_button)
self.scrollArea = QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QDialog()
self.vlayout = QVBoxLayout(self.scrollAreaWidgetContents)
self.vlayout.setSpacing(0)
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.layout.addWidget(self.scrollArea)
self.layout.addLayout(self.buttonBox)
self.setLayout(self.layout)
self.metrics = [Plugin.CustomDialog.MetricGrid('proximity_correlation_binary'),
Plugin.CustomDialog.MetricGrid('endpoint_correlation_binary'),
Plugin.CustomDialog.MetricGrid('originator_correlation_binary'),
Plugin.CustomDialog.MetricGrid('datatype_correlation_binary'),
Plugin.CustomDialog.MetricGrid('datavalue_correlation_binary'),
Plugin.CustomDialog.MetricGrid('routing_significance_unary'),
Plugin.CustomDialog.MetricGrid('distance_significance_binary'),
Plugin.CustomDialog.MetricGrid('frequency_significance_unary'),
Plugin.CustomDialog.MetricGrid('frequency_significance_binary')]
# add metric widgets
[self.vlayout.addWidget(metric) for metric in self.metrics]
# add edge filter widget
self.edge_filter = self.EdgeFilter()
self.vlayout.addWidget(self.edge_filter)
# add attenuation widget
self.attenuation_filter = self.AttenuationGrid()
self.vlayout.addWidget(self.attenuation_filter)
# add Node filter widget
hbox = QHBoxLayout()
self.node_sig_cutoff_slider = QSlider()
self.node_sig_cutoff_slider.setRange(0, 100)
self.acc_sig_cutoff_val = QLabel('0')
self.node_sig_cutoff_slider.valueChanged.connect(
lambda val: self.acc_sig_cutoff_val.setText(str(val / 100)))
self.vlayout.addLayout(hbox)
# add concurency filter
self.concurency_filter = self.ConcurencyFilter()
self.vlayout.addWidget(self.concurency_filter)
def close_window(self):
print(f'Save Configuration to File and close dialog')
os.makedirs(os.path.dirname('Parameters/param_file_fm.txt'), exist_ok=True)
with open('Parameters/param_file_fm.txt', 'w') as file:
# add node filter
file.write(self.acc_sig_cutoff_val.text() + '\n')
# add edge filter
fuzzy_or_best = self.edge_filter.fuzzy_button.isChecked()
file.write(f'{str(int(fuzzy_or_best))} {str(self.edge_filter.cut_off_acc_val.text())} '
f'{str(self.edge_filter.utility_acc_val.text())}\n')
# add concurency filter
is_concurency = self.concurency_filter.fiter_concurecy.isChecked()
file.write(f'{str(int(is_concurency))} {str(self.concurency_filter.preserve_slider.value()/100)} '
f'{str(self.concurency_filter.ratio_slider.value()/100)}\n')
# add attenuation
is_nthroot = self.attenuation_filter.nth_root.isChecked()
file.write(f'{str(int(is_nthroot))} {self.attenuation_filter.nth_label.text()} '
f'{self.attenuation_filter.max_ev_dis_acc_val.text()}\n')
# add metrics to file
for metric in self.metrics:
file.write(
f'{metric.acc_val.text()} {str(int(metric.inverted_button.isChecked()))} {str(int(metric.active_button.isChecked()))}' + ';')
self.close()
def close_cancel(self):
os.makedirs(os.path.dirname('Parameters/param_file_fm.txt'), exist_ok=True)
with open('Parameters/param_file_fm.txt', 'w') as file:
file.write('default')
self.close()
class ConcurencyFilter(QDialog):
def __init__(self):
super().__init__()
vbox = QVBoxLayout()
vbox.addWidget(QLabel('Concurency Filter'))
self.fiter_concurecy = QCheckBox('Filter concurency')
vbox.addWidget(self.fiter_concurecy)
preserve_box = QHBoxLayout()
acc_p_val = QLabel('0')
self.preserve_slider = QSlider(QtCore.Qt.Horizontal)
self.preserve_slider.setRange(0, 100)
self.preserve_slider.valueChanged.connect(lambda val: acc_p_val.setText(str(val / 100)))
preserve_box.addWidget(self.preserve_slider)
preserve_box.addWidget(acc_p_val)
vbox.addLayout(preserve_box)
ratio_box = QHBoxLayout()
acc_r_val = QLabel('0')
self.ratio_slider = QSlider(QtCore.Qt.Horizontal)
self.ratio_slider.setRange(0, 100)
self.ratio_slider.valueChanged.connect(lambda val: acc_r_val.setText(str(val / 100)))
ratio_box.addWidget(self.ratio_slider)
ratio_box.addWidget(acc_r_val)
vbox.addLayout(ratio_box)
self.setLayout(vbox)
class EdgeFilter(QDialog):
def __init__(self):
super().__init__()
vbox = QVBoxLayout()
vbox.addWidget(QLabel('Edge Filter'))
grid = QGridLayout()
vbox.addLayout(grid)
self.cut_off_label = QLabel('CutOff')
self.cut_off_slider = QSlider(QtCore.Qt.Horizontal)
self.cut_off_slider.setRange(0, 100)
self.cut_off_acc_val = QLabel('0')
self.cut_off_slider.valueChanged.connect(lambda val: self.cut_off_acc_val.setText(str(val / 100)))
self.utility_label = QLabel('Utility')
self.utility_slider = QSlider(QtCore.Qt.Horizontal)
self.utility_slider.setRange(0, 100)
self.utility_acc_val = QLabel('0')
self.utility_slider.valueChanged.connect(lambda val: self.utility_acc_val.setText(str(val / 100)))
self.buttons = QButtonGroup()
self.best_button = QRadioButton('Best edges')
self.fuzzy_button = QRadioButton('Fuzzy edges')
self.buttons.addButton(self.best_button)
self.buttons.addButton(self.fuzzy_button)
grid.addWidget(self.best_button, 0, 0)
grid.addWidget(self.fuzzy_button, 0, 1)
grid.addWidget(self.cut_off_label, 1, 0)
grid.addWidget(self.cut_off_acc_val, 1, 1)
grid.addWidget(self.cut_off_slider, 2, 0, 1, 2)
grid.addWidget(self.utility_label, 3, 0)
grid.addWidget(self.utility_acc_val, 3, 1)
grid.addWidget(self.utility_slider, 4, 0, 1, 2)
self.setLayout(vbox)
class MetricGrid(QDialog):
def __init__(self, name):
super().__init__()
grid = QGridLayout()
self.label = QLabel(name)
grid.addWidget(self.label, 0, 0)
self.active_button = QCheckBox('Active?')
self.active_button.setChecked(True)
grid.addWidget(self.active_button, 0, 1)
self.inverted_button = QCheckBox('Inverted?')
self.inverted_button.setChecked(False)
grid.addWidget(self.inverted_button, 0, 2)
self.acc_val = QLabel('0')
self.slider = QSlider(QtCore.Qt.Horizontal)
self.slider.setRange(0, 100)
self.slider.valueChanged.connect(lambda val: self.acc_val.setText(str(val / 100)))
grid.addWidget(self.slider, 1, 0, 1, 2)
grid.addWidget(self.acc_val, 1, 2)
self.setLayout(grid)
class AttenuationGrid(QDialog):
def __init__(self):
super().__init__()
grid = QGridLayout()
self.label = QLabel('Maximal event distance')
self.max_ev_dis = QSlider(QtCore.Qt.Horizontal)
self.max_ev_dis.setRange(0, 100)
self.max_ev_dis_acc_val = QLabel('0')
self.max_ev_dis.valueChanged.connect(lambda val: self.max_ev_dis_acc_val.setText(str(val)))
self.second_label = QLabel('Select attenuation to use:')
self.buttonbox = QButtonGroup()
self.nth_root = QRadioButton('Nth root with radical')
self.linear_attenuation = QRadioButton('Linear attenuation')
self.nth_root_slider = QSlider(QtCore.Qt.Horizontal)
self.nth_root_slider.setRange(0, 100)
self.nth_label = QLabel('0')
self.nth_root_slider.valueChanged.connect(lambda val: self.nth_label.setText(str(val / 10)))
grid.addWidget(QLabel('Attenuation'), 0, 0)
grid.addWidget(self.label, 1, 0)
grid.addWidget(self.max_ev_dis_acc_val, 1, 1)
grid.addWidget(self.max_ev_dis, 2, 1, 1, 2)
grid.addWidget(self.second_label,3,0)
grid.addWidget(self.nth_root, 4, 1)
grid.addWidget(self.linear_attenuation, 4, 0)
grid.addWidget(self.nth_root_slider,5,0,1,2)
self.setLayout(grid)
def fill_my_parameters(self):
try:
with open('Parameters/param_file_fm.txt', 'r') as file:
params = file.read().split('\n')
node_filter = NodeFilter(cut_off=int(params[0]))
edge_params = params[1].split(' ')
edge_filter = EdgeFilter(edge_transform=int(edge_params[0]),
sc_ratio=float(edge_params[1]),
preserve=float(edge_params[2]))
conc_params = params[2].split(' ')
concurrency_filter = ConcurrencyFilter(filter_concurrency=bool(conc_params[0]),
preserve=float(conc_params[1]),
offset=float(conc_params[2]))
attenuation_params = params[3].split(' ')
attenuation_filter = None
if attenuation_params[0] == '0':
attenuation_filter = LinearAttenuation(buffer_size=int(attenuation_params[2]),
num_of_echelons=float(attenuation_params[1]))
else:
attenuation_filter = NRootAttenuation(buffer_size=int(attenuation_params[2]),
num_of_echelons=float(attenuation_params[1]))
metrics = params[4].split(';')
metrics = metrics[:-1]
#def __init__(self, name, metric_type, include=True, invert=False, weight=1.0):
metrics_list = [MetricConfig('proximity_correlation_binary',
metric_type='binary',
weight=float(metric[0]),
invert=bool(metric[1]),
include=bool(metric[2])) for metric in metrics]
self.config = Configuration(FilterConfig(node_filter=node_filter,
edge_filter=edge_filter,
concurrency_filter=concurrency_filter),
metric_configs=metrics_list,
attenuation=attenuation_filter,
maximal_distance=int(attenuation_params[2]))
except IOError:
print('File don\'t exists')
if os.path.isfile('Parameters/param_file_fm.txt'):
try:
os.remove('Parameters/param_file_fm.txt')
except OSError as e:
print(f'Failed with: {e.strerror}')
else:
print('Did not find path')
pass
def execute(self, *args, **kwargs):
self.fullpath = args[0]
with open(self.fullpath, 'r') as log_file:
log = log_file.read()
fm = FuzzyMiner()
fm.___init___(log, self.fullpath)
self.config = Configuration(FilterConfig(node_filter=NodeFilter(),
edge_filter=EdgeFilter(),
concurrency_filter=ConcurrencyFilter()),
[MetricConfig(name='proximity_correlation_binary',
metric_type='binary'),
MetricConfig(name='endpoint_correlation_binary',
metric_type='binary'),
MetricConfig(name='originator_correlation_binary',
metric_type='binary'),
MetricConfig(name='datatype_correlation_binary',
metric_type='binary'),
MetricConfig(name='datavalue_correlation_binary',
metric_type='binary'),
MetricConfig(name='routing_significance_unary',
metric_type='unary'),
MetricConfig(name='distance_significance_binary',
metric_type='binary'),
MetricConfig(name='frequency_significance_unary',
metric_type='unary'),
MetricConfig(name='frequency_significance_binary',
metric_type='binary'),
],
NRootAttenuation(buffer_size=5, num_of_echelons=2.7),
maximal_distance=5)
self.fill_my_parameters()
fm.apply_config(self.config)
return "success", fm.full_path
class FuzzyMiner:
def ___init___(self, log, path):
self.name = path[-1]
self.path = path
self.log = self.parse_log(log)
self.nodes = None
self.clusters = None
self.edges = None
self.node_indices = None
self.num_of_nodes = None
self.fm_message = ''
self.extract_node_info()
self.metric_settings = None
self.unary_node_frequency_values = None
self.unary_node_frequency_normalized_values = None
self.binary_edge_frequency_values = None
self.binary_edge_frequency_normalized_values = None
self.binary_corr_divisors = None
self.unary_simple_aggregate_normalized_values = None
self.binary_simple_aggregate_normalized_values = None
self.binary_multi_aggregate_normalized_values = None
self.binary_corr_proximity_values = None
self.binary_corr_proximity_normalized_values = None
self.binary_corr_endpoint_values = None
self.binary_corr_endpoint_normalized_values = None
self.binary_corr_originator_values = None
self.binary_corr_originator_normalized_values = None
self.binary_corr_datatype_values = None
self.binary_corr_datatype_normalized_values = None
self.binary_corr_datavalue_values = None
self.binary_corr_datavalue_normalized_values = None
self.unary_derivative_routing_values = None
self.unary_derivative_routing_normalized_values = None
self.binary_derivative_distance_values = None
self.binary_derivative_distance_normalized_values = None
self.unary_weighted_values = None
self.binary_sig_weighted_values = None
self.binary_corr_weighted_values = None
# Clustering
self.node_cluster_mapping = list()
self.cluster_dict = dict()
self.fm_edges_dict = dict()
self.fm_clusters = list()
self.fm_edges = list()
self.fm_nodes = list()
def init_lists(self):
s = self.num_of_nodes
self.unary_node_frequency_values = [0 for _ in range(s)]
self.unary_node_frequency_normalized_values = [0.0 for _ in range(s)]
self.binary_edge_frequency_values = [[0 for _ in range(s)] for _ in range(s)]
self.binary_edge_frequency_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_divisors = [[0.0 for _ in range(s)] for _ in range(s)]
self.unary_simple_aggregate_normalized_values = [0.0 for _ in range(s)]
self.binary_simple_aggregate_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_multi_aggregate_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["proximity_correlation_binary"][0]:
self.binary_corr_proximity_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_proximity_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["endpoint_correlation_binary"][0]:
self.binary_corr_endpoint_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_endpoint_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["originator_correlation_binary"][0]:
self.binary_corr_originator_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_originator_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["datatype_correlation_binary"][0]:
self.binary_corr_datatype_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_datatype_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["datavalue_correlation_binary"][0]:
self.binary_corr_datavalue_values = [[0 for _ in range(s)] for _ in range(s)]
self.binary_corr_datavalue_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["routing_significance_unary"][0]:
self.unary_derivative_routing_values = [0 for _ in range(s)]
self.unary_derivative_routing_normalized_values = [0 for _ in range(s)]
if self.metric_settings["distance_significance_binary"][0]:
self.binary_derivative_distance_values = [([0 for _ in range(s)]) for _ in range(s)]
self.binary_derivative_distance_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.unary_weighted_values = [0 for _ in range(s)]
self.binary_sig_weighted_values = [[0 for _ in range(s)] for _ in range(s)]
self.binary_corr_weighted_values = [[0 for _ in range(s)] for _ in range(s)]
def apply_config(self, config):
self.config = config
metric_configs = self.config.metric_configs
self.metric_settings = dict()
for conf in metric_configs:
self.metric_settings[conf.name] = (conf.include, conf.invert, conf.weight)
self.init_lists()
self.extract_primary_metrics()
self.normalize_primary_metrics()
self.extract_aggregates()
self.extract_derivative_metrics()
self.normalize_derivative_metrics()
self.extract_weighted_metrics()
return self.apply_filters()
def apply_filters(self):
return self.apply_concurrency_filter(self.config.filter_config.concurrency_filter)
def apply_concurrency_filter(self, concurrency_filter):
self.config.filter_config.concurrency_filter = concurrency_filter
self.apply_concurrency_filter_helper(concurrency_filter)
return self.apply_edge_filter(self.config.filter_config.edge_filter)
def apply_concurrency_filter_helper(self, concurrency_filter):
self.config.filter_config.concurrency_filter = concurrency_filter
self.concurrency_filter_resultant_binary_values = copy.deepcopy(self.binary_sig_weighted_values)
self.concurrency_filter_resultant_binary_corr_values = copy.deepcopy(
self.binary_corr_weighted_values)
if self.config.filter_config.concurrency_filter.filter_concurrency:
sz = self.num_of_nodes
for i in range(0, sz):
for j in range(0, i):
self.process_relation_pair(i, j)
def apply_edge_filter(self, edge_filter):
self.config.filter_config.edge_filter = edge_filter
self.apply_edge_filter_helper(edge_filter)
return self.apply_node_filter(self.config.filter_config.node_filter)
def apply_edge_filter_helper(self, edge_filter):
self.config.filter_config.edge_filter = edge_filter
self.edge_filter_resultant_binary_values = copy.deepcopy(self.concurrency_filter_resultant_binary_values)
self.edge_filter_resultant_binary_corr_values = copy.deepcopy(
self.concurrency_filter_resultant_binary_corr_values)
sz = self.num_of_nodes
self.preserve_mask = [[False for x in range(sz)] for y in range(sz)]
if self.config.filter_config.edge_filter.edge_transform == 1:
if self.config.filter_config.edge_filter.preserve == 0.0:
self.config.filter_config.edge_filter.preserve = 0.001
for i in range(0, sz):
self.process_node_edges_fuzzy_filter(i)
else:
for i in range(0, sz):
self.process_node_edges_best_filter(i)
for i in range(0, sz):
for j in range(0, sz):
if not self.preserve_mask[i][j]:
self.edge_filter_resultant_binary_values[i][j] = 0.0
self.edge_filter_resultant_binary_corr_values[i][j] = 0.0
def apply_node_filter(self, node_filter):
self.config.filter_config.node_filter = node_filter
self.apply_node_filter_helper(node_filter)
self.finalize_graph_data()
# Vizualization here since we have all graph data
# graph_path = self.visualize(self.fm_nodes, self.fm_edges, self.fm_clusters)
# self.fm_message.graph_path = graph_path
return self.fm_message
def apply_node_filter_helper(self, node_filter):
self.config.filter_config.node_filter = node_filter
self.node_filter_resultant_unary_values = copy.deepcopy(self.unary_weighted_values)
self.node_filter_resultant_binary_values = copy.deepcopy(self.edge_filter_resultant_binary_values)
self.node_filter_resultant_binary_corr_values = copy.deepcopy(self.edge_filter_resultant_binary_corr_values)
self.clusterize()
def finalize_graph_data(self):
self.fm_nodes = self.fm_nodes
self.fm_edges = self.fm_edges
self.fm_clusters = self.fm_clusters
# Debug block starts
print("Nodes\n")
for node in self.fm_nodes:
print(node)
# print()
print("\nClusters\n")
for cluster in self.fm_clusters:
print(cluster)
# print()
print("\nEdges\n")
for edge in self.fm_edges:
print(edge)
# print()
print(self.unary_node_frequency_normalized_values)
print(self.binary_edge_frequency_values)
print(self.binary_edge_frequency_normalized_values)
print(self.binary_corr_endpoint_normalized_values)
# Debug block ends
print(self.path)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = dir_path.replace("\\", "/") + '/Results/Fuzzy_Miner/' + datetime.now().strftime(
"%d_%m_%Y_%H_%M_%S") + "/"
self.filename = self.path.split('/')[-1]
print(self.filename)
print(dir_path)
self.full_path = os.path.join(dir_path, f"{self.filename}.csv")
print(self.full_path)
os.makedirs(os.path.dirname(self.full_path), exist_ok=True)
with open(self.full_path, 'w') as result:
result.write('type,id,significance,from,to\n')
for node in self.fm_nodes:
result.write(f'n,{node.label.split("@")[0]},{node.significance:.2f}\n')
for cluster in self.fm_clusters:
result.write(f'c,{cluster.label.split("@")[0]}_{cluster.index},{cluster.significance:.2f}\n')
for i, edge in enumerate(self.fm_edges):
s = next(x for x in self.fm_nodes + self.fm_clusters if x.index == edge.source)
t = next(x for x in self.fm_nodes + self.fm_clusters if x.index == edge.target)
s_label = s.label.split("@")[0]
t_label = t.label.split('@')[0]
if s.node_type == 'cluster':
s_label += f'_{s.index}'
if t.node_type == 'cluster':
t_label += f'_{t.index}'
result.write(f'e,e{i},{edge.significance:.2f},{s_label},'
f'{t_label}\n')
def process_node_edges_fuzzy_filter(self, idx):
sz = self.num_of_nodes
min_in_val = sys.float_info.max
max_in_val = sys.float_info.min
min_out_val = sys.float_info.max
max_out_val = sys.float_info.min
in_values = [0.0 for _ in range(self.num_of_nodes)]
out_values = [0.0 for _ in range(self.num_of_nodes)]
ignore_self_loops = self.config.filter_config.edge_filter.ignore_self_loops
sc_ratio = self.config.filter_config.edge_filter.sc_ratio
for i in range(0, sz):
if ignore_self_loops and i == idx:
continue
significance = self.concurrency_filter_resultant_binary_values[i][idx]
if significance > 0.0:
correlation = self.concurrency_filter_resultant_binary_corr_values[i][idx]
in_values[i] = significance * sc_ratio + correlation * (1.0 - sc_ratio)
if in_values[i] > max_in_val:
max_in_val = in_values[i]
if in_values[i] < min_in_val:
min_in_val = in_values[i]
else:
in_values[i] = 0.0
significance = self.concurrency_filter_resultant_binary_values[idx][i]
if significance > 0.0:
correlation = self.concurrency_filter_resultant_binary_corr_values[idx][i]
out_values[i] = significance * sc_ratio + correlation * (1.0 - sc_ratio)
if out_values[i] > max_out_val:
max_out_val = out_values[i]
if out_values[i] < min_out_val:
min_out_val = out_values[i]
else:
out_values[i] = 0.0
if self.config.filter_config.edge_filter.interpret_abs:
max_in_val = max(max_in_val, max_out_val)
max_out_val = max_in_val
min_in_val = min(min_in_val, min_out_val)
min_out_val = min_in_val
in_limit = max_in_val - (max_in_val - min_in_val) * self.config.filter_config.edge_filter.preserve
out_limit = max_out_val - (max_out_val - min_out_val) * self.config.filter_config.edge_filter.preserve
for i in range(0, sz):
if ignore_self_loops and i == idx:
continue
if in_values[i] >= in_limit:
self.preserve_mask[i][idx] = True
if out_values[i] >= out_limit:
self.preserve_mask[idx][i] = True
def process_node_edges_best_filter(self, idx):
best_pre = -1
best_succ = -1
best_pre_sig = 0.0
best_succ_sig = 0.0
sz = self.num_of_nodes
for i in range(0, sz):
if i == idx and self.config.filter_config.edge_filter.ignore_self_loops:
continue
pre_sig = self.concurrency_filter_resultant_binary_values[i][idx]
if pre_sig > best_pre_sig:
best_pre_sig = pre_sig
best_pre = i
succ_sig = self.concurrency_filter_resultant_binary_values[idx][i]
if succ_sig > best_succ_sig:
best_succ_sig = succ_sig
best_succ = i
if best_pre >= 0:
self.preserve_mask[best_pre][idx] = True
if best_succ >= 0:
self.preserve_mask[idx][best_succ] = True
def process_relation_pair(self, x, y):
sig_fwd = self.binary_sig_weighted_values[x][y]
sig_bwd = self.binary_sig_weighted_values[y][x]
if sig_fwd > 0.0 and sig_bwd > 0.0:
rel_imp_AB = self.get_relative_imp(x, y)
rel_imp_BA = self.get_relative_imp(y, x)
if rel_imp_AB > self.config.filter_config.concurrency_filter.preserve and rel_imp_BA > self.config.filter_config.concurrency_filter.preserve:
return
else:
ratio = min(rel_imp_AB, rel_imp_BA) / max(rel_imp_AB, rel_imp_BA)
if ratio < self.config.filter_config.concurrency_filter.offset:
if rel_imp_AB > rel_imp_BA:
self.concurrency_filter_resultant_binary_values[y][x] = 0.0
self.concurrency_filter_resultant_binary_corr_values[y][x] = 0.0
else:
self.concurrency_filter_resultant_binary_values[x][y] = 0.0
self.concurrency_filter_resultant_binary_corr_values[x][y] = 0.0
else:
self.concurrency_filter_resultant_binary_values[x][y] = 0.0
self.concurrency_filter_resultant_binary_corr_values[x][y] = 0.0
self.concurrency_filter_resultant_binary_values[y][x] = 0.0
self.concurrency_filter_resultant_binary_corr_values[y][x] = 0.0
def get_relative_imp(self, x, y):
sig_ref = self.binary_sig_weighted_values[x][y]
sig_source_out = 0.0
sig_target_in = 0.0
sz = self.num_of_nodes
for i in range(0, sz):
if i != x:
sig_source_out += self.binary_sig_weighted_values[x][i]
if i != y:
sig_target_in += self.binary_sig_weighted_values[i][y]
return (sig_ref / sig_source_out) + (sig_ref / sig_target_in)
def parse_log(self, log):
data_types = ['string', 0, 'date', 0.0, 'boolean', 'id']
log = xmltodict.parse(log)
log = loads(dumps(log))
traces = []
for trace in log['log']['trace']:
attributes = list(trace.keys())
attributes_dictionary = {}
for data_type in data_types:
if data_type in attributes:
if type(trace[data_type]) == list:
for dictionary in trace[data_type]:
attributes_dictionary[dictionary['@key']] = dictionary['@value']
else:
attributes_dictionary[trace[data_type]['@key']] = trace[data_type]['@value']
trace_events = []
if type(trace['event']) == dict:
trace['event'] = [trace['event']]
for event in trace['event']:
event_attributes = list(event.keys())
event_dict = {}
for data_type in data_types:
if data_type in event_attributes:
if type(event[data_type]) == list:
for dictionary in event[data_type]:
event_dict[dictionary['@key']] = dictionary['@value']
else:
event_dict[event[data_type]['@key']] = event[data_type]['@value']
event_dict['concept:name'] = event_dict['concept:name']
trace_events.append(event_dict)
traces.append(trace_events)
return traces
def extract_node_info(self):
idx = 0
self.node_indices = dict()
for trace in self.log:
for event in trace:
name = event['concept:name'] + "@" + event['lifecycle:transition']
if name not in self.node_indices.keys():
self.node_indices[name] = idx
idx += 1
self.num_of_nodes = idx
self.nodes = list(self.node_indices.keys())
def extract_aggregates(self):
if self.metric_settings["distance_significance_binary"][0]:
self.cal_unary_simple_aggregate()
self.cal_binary_simple_aggregate()
if self.metric_settings["routing_significance_unary"][0]:
if self.metric_settings["distance_significance_binary"][0]:
self.cal_binary_multi_aggregate()
else:
self.cal_binary_simple_aggregate()
self.cal_binary_multi_aggregate()
def extract_derivative_metrics(self):
if self.metric_settings["routing_significance_unary"][0]:
self.cal_unary_derivative()
if self.metric_settings["distance_significance_binary"][0]:
self.cal_binary_derivative()
def extract_primary_metrics(self):
max_look_back = self.config.maximal_distance
# print(self.binary_edge_frequency_values)
# self.binary_edge_frequency_values = [[0.0 for _ in range(self.num_of_nodes)] for _ in range(self.num_of_nodes)]
for trace in self.log:
# print(len(trace))
look_back = list()
look_back_indices = list()
for event in trace:
# print(len(event))
follower_event = event
follower_index = self.node_indices[
follower_event['concept:name'] + "@" + follower_event['lifecycle:transition']]
look_back.insert(0, follower_event)
look_back_indices.insert(0, follower_index)
if len(look_back) > (max_look_back + 1):
look_back.pop(max_look_back + 1)
look_back_indices.pop(max_look_back + 1)
self.unary_node_frequency_values[follower_index] += 1
for k in range(1, len(look_back)):
# print(len(look_back), look_back, end=' ')
# print()
# (len(look_back))
ref_event = look_back[k]
ref_index = look_back_indices[k]
att_factor = self.config.attenuation.get_attenuation_factor(k)
# print(self.binary_edge_frequency_values)
print(self.binary_edge_frequency_values[ref_index][follower_index], ref_index, follower_index,
att_factor)
self.binary_edge_frequency_values[ref_index][follower_index] += att_factor
# print(self.binary_edge_frequency_values[ref_index][follower_index], att_factor, end=' ')
if self.metric_settings["proximity_correlation_binary"][0]:
self.binary_corr_proximity_values[ref_index][follower_index] += self.cal_proximity_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["endpoint_correlation_binary"][0]:
self.binary_corr_endpoint_values[ref_index][follower_index] += self.cal_endpoint_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["originator_correlation_binary"][0]:
self.binary_corr_originator_values[ref_index][
follower_index] += self.cal_originator_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["datatype_correlation_binary"][0]:
self.binary_corr_datatype_values[ref_index][follower_index] += self.cal_datatype_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["datavalue_correlation_binary"][0]:
self.binary_corr_datavalue_values[ref_index][follower_index] += self.cal_datavalue_correlation(
ref_event,
follower_event) * att_factor
self.binary_corr_divisors[ref_index][follower_index] += att_factor
# print(self.binary_edge_frequency_values)
def extract_weighted_metrics(self):
self.cal_weighted_unary_values()
self.cal_weighted_binary_values()
self.cal_weighted_binary_corr_values()
def cal_weighted_unary_values(self):
inc1 = self.metric_settings["frequency_significance_unary"][0]
inc2 = self.metric_settings["routing_significance_unary"][0]
w1 = self.metric_settings["frequency_significance_unary"][2]
w2 = self.metric_settings["routing_significance_unary"][2]
sz = self.num_of_nodes
valid_matrices = list()
if inc1 and (w1 > 0.0) and self.is_valid_matrix1D(self.unary_node_frequency_normalized_values):
valid_matrices.append(self.unary_node_frequency_normalized_values)
if inc2 and (w2 > 0.0) and self.is_valid_matrix1D(self.unary_derivative_routing_normalized_values):
valid_matrices.append(self.unary_derivative_routing_normalized_values)
for valid_matrix in valid_matrices:
for i in range(sz):
self.unary_weighted_values[i] += valid_matrix[i]
self.unary_weighted_values = self.normalize_matrix1D(self.unary_weighted_values)
def cal_weighted_binary_values(self):
inc1 = self.metric_settings["frequency_significance_binary"][0]
inc2 = self.metric_settings["distance_significance_binary"][0]
w1 = self.metric_settings["frequency_significance_binary"][2]
w2 = self.metric_settings["distance_significance_binary"][2]
sz = self.num_of_nodes
valid_matrices = list()
if inc1 and (w1 > 0.0) and self.is_valid_matrix2D(self.binary_edge_frequency_normalized_values):
valid_matrices.append(self.binary_edge_frequency_normalized_values)
if inc2 and (w2 > 0.0) and self.is_valid_matrix2D(self.binary_derivative_distance_normalized_values):
valid_matrices.append(self.binary_derivative_distance_normalized_values)
for valid_matrix in valid_matrices:
for i in range(0, sz):
for j in range(0, sz):
self.binary_sig_weighted_values[i][j] += valid_matrix[i][j]
self.binary_sig_weighted_values = self.normalize_matrix2D(self.binary_sig_weighted_values)
def cal_weighted_binary_corr_values(self):
inc1 = self.metric_settings["proximity_correlation_binary"][0]
inc2 = self.metric_settings["originator_correlation_binary"][0]
inc3 = self.metric_settings["endpoint_correlation_binary"][0]
inc4 = self.metric_settings["datatype_correlation_binary"][0]
inc5 = self.metric_settings["datavalue_correlation_binary"][0]
w1 = self.metric_settings["proximity_correlation_binary"][2]
w2 = self.metric_settings["originator_correlation_binary"][2]
w3 = self.metric_settings["endpoint_correlation_binary"][2]
w4 = self.metric_settings["datatype_correlation_binary"][2]
w5 = self.metric_settings["datavalue_correlation_binary"][2]
valid_matrices = list()
if inc1 and (w1 > 0.0) and self.is_valid_matrix2D(self.binary_corr_proximity_normalized_values):
valid_matrices.append(self.binary_corr_proximity_normalized_values)
if inc2 and (w2 > 0.0) and self.is_valid_matrix2D(self.binary_corr_endpoint_normalized_values):
valid_matrices.append(self.binary_corr_endpoint_normalized_values)
if inc3 and (w3 > 0.0) and self.is_valid_matrix2D(self.binary_corr_originator_normalized_values):
valid_matrices.append(self.binary_corr_originator_normalized_values)
if inc4 and (w4 > 0.0) and self.is_valid_matrix2D(self.binary_corr_datatype_normalized_values):
valid_matrices.append(self.binary_corr_datatype_normalized_values)
if inc5 and (w5 > 0.0) and self.is_valid_matrix2D(self.binary_corr_datavalue_normalized_values):
valid_matrices.append(self.binary_corr_datavalue_normalized_values)
sz = self.num_of_nodes
for valid_matrix in valid_matrices:
for i in range(0, sz):
for j in range(0, sz):
self.binary_corr_weighted_values[i][j] += valid_matrix[i][j]
self.binary_corr_weighted_values = self.normalize_matrix2D(self.binary_corr_weighted_values)
def normalize_primary_metrics(self):
self.unary_node_frequency_normalized_values = self.weight_normalize1D(self.unary_node_frequency_values,
self.metric_settings[
"frequency_significance_unary"][1],
self.metric_settings[
"frequency_significance_unary"][2])
self.binary_edge_frequency_normalized_values = self.weight_normalize2D(self.binary_edge_frequency_values,
self.metric_settings[
"frequency_significance_binary"][1],
self.metric_settings[
"frequency_significance_binary"][2])
inc1 = self.metric_settings["proximity_correlation_binary"][0]
inc2 = self.metric_settings["originator_correlation_binary"][0]
inc3 = self.metric_settings["endpoint_correlation_binary"][0]
inc4 = self.metric_settings["datatype_correlation_binary"][0]
inc5 = self.metric_settings["datavalue_correlation_binary"][0]
inv1 = self.metric_settings["proximity_correlation_binary"][1]
inv2 = self.metric_settings["originator_correlation_binary"][1]
inv3 = self.metric_settings["endpoint_correlation_binary"][1]
inv4 = self.metric_settings["datatype_correlation_binary"][1]
inv5 = self.metric_settings["datavalue_correlation_binary"][1]
w1 = self.metric_settings["proximity_correlation_binary"][2]
w2 = self.metric_settings["originator_correlation_binary"][2]
w3 = self.metric_settings["endpoint_correlation_binary"][2]
w4 = self.metric_settings["datatype_correlation_binary"][2]
w5 = self.metric_settings["datavalue_correlation_binary"][2]
if inc1:
self.binary_corr_proximity_normalized_values = self.special_weight_normalize2D(
self.binary_corr_proximity_values, self.binary_corr_divisors, inv1, w1)
if inc2:
self.binary_corr_endpoint_normalized_values = self.special_weight_normalize2D(
self.binary_corr_endpoint_values, self.binary_corr_divisors, inv2, w2)
if inc3:
self.binary_corr_originator_normalized_values = self.special_weight_normalize2D(
self.binary_corr_originator_values, self.binary_corr_divisors, inv3, w3)
if inc4:
self.binary_corr_datatype_normalized_values = self.special_weight_normalize2D(
self.binary_corr_datatype_values, self.binary_corr_divisors, inv4, w4)
if inc5:
self.binary_corr_datavalue_normalized_values = self.special_weight_normalize2D(
self.binary_corr_datavalue_values, self.binary_corr_divisors, inv5, w5)
def normalize_derivative_metrics(self):
if self.metric_settings["routing_significance_unary"][0]:
self.unary_derivative_routing_normalized_values = self.weight_normalize1D(
self.unary_derivative_routing_values,
self.metric_settings["routing_significance_unary"][1],
self.metric_settings["routing_significance_unary"][2])
if self.metric_settings["distance_significance_binary"][0]:
self.binary_derivative_distance_normalized_values = self.weight_normalize2D(
self.binary_derivative_distance_values,
self.metric_settings["distance_significance_binary"][1],
self.metric_settings["distance_significance_binary"][2])
def normalize_matrix1D(self, lst):
max_val = max(lst)
if max_val == 0:
return lst
else:
norm_list = list()
for val in lst:
norm_list.append(val / max_val)
return norm_list
def normalize_matrix2D(self, lst):
sz = len(lst[0])
max_val = max(map(max, lst))
if max_val == 0:
return lst
else:
norm_list = list()
for i in range(0, sz):
temp_list = list()
for j in range(0, sz):
temp_list.append(lst[i][j] / max_val)
norm_list.append(temp_list)
return norm_list
def cal_unary_derivative(self):
sz = self.num_of_nodes
for i in range(0, sz):
in_value = 0.0
out_value = 0.0
quotient = 0.0
for x in range(0, sz):
if x == i:
continue
in_value += self.binary_simple_aggregate_normalized_values[x][i] * \
self.binary_multi_aggregate_normalized_values[x][i]
out_value += self.binary_simple_aggregate_normalized_values[i][x] * \
self.binary_multi_aggregate_normalized_values[i][x]
if in_value == 0.0 and out_value == 0.0:
quotient = 0.0
else:
quotient = abs((in_value - out_value) / (in_value + out_value))
self.unary_derivative_routing_values[i] = quotient
def cal_binary_derivative(self):
sz = self.num_of_nodes
for i in range(0, sz):
sig_source = self.unary_simple_aggregate_normalized_values[i]
for j in range(0, sz):
sig_target = self.unary_simple_aggregate_normalized_values[j]
if sig_source + sig_target == 0:
continue
sig_link = self.binary_simple_aggregate_normalized_values[i][j]
self.binary_derivative_distance_values[i][j] = 1.0 - (
(sig_source - sig_link) + (sig_target - sig_link)) / (sig_source + sig_target)
def cal_binary_multi_aggregate(self):
inc1 = self.metric_settings["proximity_correlation_binary"][0]
inc2 = self.metric_settings["endpoint_correlation_binary"][0]
inc3 = self.metric_settings["originator_correlation_binary"][0]
inc4 = self.metric_settings["datatype_correlation_binary"][0]
inc5 = self.metric_settings["datavalue_correlation_binary"][0]
valid_metrics = list()
if inc1 and self.is_valid_matrix2D(self.binary_corr_proximity_normalized_values):
valid_metrics.append(self.binary_corr_proximity_normalized_values)
if inc2 and self.is_valid_matrix2D(self.binary_corr_endpoint_normalized_values):
valid_metrics.append(self.binary_corr_endpoint_normalized_values)
if inc3 and self.is_valid_matrix2D(self.binary_corr_originator_normalized_values):
valid_metrics.append(self.binary_corr_originator_normalized_values)
if inc4 and self.is_valid_matrix2D(self.binary_corr_datatype_normalized_values):
valid_metrics.append(self.binary_corr_datatype_normalized_values)
if inc5 and self.is_valid_matrix2D(self.binary_corr_datavalue_normalized_values):
valid_metrics.append(self.binary_corr_datavalue_normalized_values)
temp_max = 0
if len(valid_metrics) > 0:
sz = self.num_of_nodes
for i in range(0, sz):
for j in range(0, sz):
aggregated = 0.0
for k in range(0, len(valid_metrics)):
aggregated += valid_metrics[k][i][j]
self.binary_multi_aggregate_normalized_values[i][j] = aggregated
if aggregated > temp_max:
temp_max = aggregated
if temp_max > 0:
for i in range(0, sz):
for j in range(0, sz):
self.binary_multi_aggregate_normalized_values[i][j] *= (1 / temp_max)
return
def compensate_frequency(self, values, divisors):
size = len(values[0])
comp_list = list()
for i in range(size):
temp_list = list()
for j in range(size):
if divisors[i][j] > 0.0:
temp_list.append(values[i][j] / divisors[i][j])
else:
temp_list.append(values[i][j])
comp_list.append(temp_list)
return comp_list
def weight_normalize1D(self, lst, invert, normalize_max):
size = len(lst)
if normalize_max == 0:
return [0.0 for i in range(size)]
else:
max_val = max(lst)
if max_val > 0.0:
norm_list = list()
for i in range(size):
val = (lst[i] * normalize_max) / max_val
if invert:
val = normalize_max - val
norm_list.append(val)
return norm_list
else:
if invert:
for i in range(size):
lst[i] = normalize_max - lst[i]
return lst
def weight_normalize2D(self, lst, invert, normalize_max):
size = len(lst[0])
if normalize_max == 0:
return [[0.0 for i in range(size)] for j in range(size)]
else:
max_val = max(map(max, lst))
if max_val > 0.0:
norm_list = list()
for i in range(size):
temp_list = list()
for j in range(size):
val = (lst[i][j] * normalize_max) / max_val
if invert:
val = normalize_max - val
temp_list.append(val)
norm_list.append(temp_list)
return norm_list
else:
if invert:
for i in range(size):
for j in range(size):
lst[i][j] = normalize_max - lst[i][j]
return lst
def is_valid_matrix2D(self, lst):
size = len(lst[0])
for i in range(0, size):
for j in range(0, size):
if lst[i][j] > 0.0:
return True
return False
def is_valid_matrix1D(self, lst):
for i in range(0, len(lst)):
if lst[i] > 0.0:
return True
return False
def special_weight_normalize2D(self, values, divisors, invert, normalize_max):
size = len(values[0])
if normalize_max == 0:
norm_list = [[0.0 for i in range(size)] for j in range(size)]
return norm_list
else:
comp_list = self.compensate_frequency(values, divisors)
max_value = max(map(max, comp_list))
if max_value > 0.0:
norm_list = list()
for i in range(size):
temp_list = list()
for j in range(size):
val = (comp_list[i][j] * normalize_max) / max_value
if invert:
val = normalize_max - val
temp_list.append(val)
norm_list.append(temp_list)
return norm_list
else:
if invert:
for i in range(size):
for j in range(size):
comp_list[i][j] = normalize_max - comp_list[i][j]
return comp_list
def cal_proximity_correlation(self, evt1, evt2):
if 'time:timestamp' not in evt1 or 'time:timestamp' not in evt2:
return 0.0
time1 = evt1['time:timestamp']
time2 = evt2['time:timestamp']
if time1 is not None and time2 is not None:
time1 = time1.timestamp() * 1000
time2 = time2.timestamp() * 1000
if time1 != time2:
return 1.0 / (time2 - time1)
else:
return 1.0
else:
return 0.0
def cal_endpoint_correlation(self, evt1, evt2):
first_name = evt1['concept:name'] if 'concept:name' in evt1 else "<no name>"
second_name = evt2['concept:name'] if 'concept:name' in evt2 else "<no name>"
dist = distance(str(first_name), str(second_name))
big_str_len = max(len(str(first_name)), len(str(second_name)))
if big_str_len == 0:
return 1.0
else:
return (big_str_len - dist) / big_str_len
def cal_originator_correlation(self, evt1, evt2):
first_resource = evt1['org:resource'] if 'org:resource' in evt1 else "<no resource>"
second_resource = evt2['org:resource'] if 'org:resource' in evt2 else "<no resource>"
dist = distance(str(first_resource), str(second_resource))
big_str_len = max(len(first_resource), len(second_resource))
if big_str_len == 0:
return 1.0
else:
return (big_str_len - dist) / big_str_len
def cal_datatype_correlation(self, evt1, evt2):
ref_data_keys = list()
fol_data_keys = list()
for key in evt1:
if not self.is_standard_key(key):
ref_data_keys.append(key)
for key in evt2:
if not self.is_standard_key(key):
fol_data_keys.append(key)
if (len(ref_data_keys) == 0) or (len(fol_data_keys) == 0):
return 0
overlap = 0
for key in ref_data_keys:
if key in fol_data_keys:
overlap += 1
return overlap / len(ref_data_keys)
def cal_datavalue_correlation(self, evt1, evt2):
ref_data_keys = list()
fol_data_keys = list()
for key in evt1:
if not self.is_standard_key(key):
ref_data_keys.append(key)
for key in evt2:
if not self.is_standard_key(key):
fol_data_keys.append(key)
if (len(ref_data_keys) == 0) or (len(fol_data_keys) == 0):
return 0
key_overlap = 0
val_overlap = 0
for key in ref_data_keys:
if key in fol_data_keys:
key_overlap += 1
dist = self.distance(str(evt1[key]), str(evt2[key]))
big_str_len = max(len(str(evt1[key])), len(str(evt2[key])))
if big_str_len == 0:
val_overlap += 1.0
else:
val_overlap += (big_str_len - dist) / big_str_len
if key_overlap == 0:
return 0.0
else:
return val_overlap / key_overlap
def cal_unary_simple_aggregate(self):
if self.is_valid_matrix1D(self.unary_node_frequency_normalized_values):
temp_max = 0
sz = len(self.unary_node_frequency_normalized_values)
for i in range(sz):
self.unary_simple_aggregate_normalized_values[i] = self.unary_node_frequency_normalized_values[i]
if self.unary_node_frequency_normalized_values[i] > temp_max:
temp_max = self.unary_node_frequency_normalized_values[i]
if temp_max > 0:
for i in range(sz):
self.unary_simple_aggregate_normalized_values[i] *= (1 / temp_max)
return
def cal_binary_simple_aggregate(self):
if self.is_valid_matrix2D(self.binary_edge_frequency_normalized_values):
temp_max = 0
sz = self.num_of_nodes
for i in range(0, sz):
for j in range(0, sz):
self.binary_simple_aggregate_normalized_values[i][j] = \
self.binary_edge_frequency_normalized_values[i][j]
if self.binary_edge_frequency_normalized_values[i][j] > temp_max:
temp_max = self.binary_edge_frequency_normalized_values[i][j]
if temp_max > 0:
for i in range(0, sz):
for j in range(0, sz):
self.binary_simple_aggregate_normalized_values[i][j] *= (1 / temp_max)
return
def is_standard_key(self, key):
if key.find("concept") != -1 or key.find("lifecycle") != -1 or key.find("org") != -1 or key.find(
"time") != -1 or key.find("semantic") != -1:
return True
else:
return False
def clusterize(self):
self.node_cluster_mapping = [i for i in range(0, self.num_of_nodes)]
self.cluster_dict.clear()
self.fm_edges_dict.clear()
self.fm_clusters.clear()
self.fm_nodes.clear()
self.fm_edges.clear()
victims = list()
for i in range(0, self.num_of_nodes):
if self.node_filter_resultant_unary_values[i] < self.config.filter_config.node_filter.cut_off:
victims.append(i)
cluster_idx = self.num_of_nodes + 1
for i in range(0, len(victims)):
if victims[i] == -1:
continue
neighbor = self.most_correlated_neighbor(victims[i])
if neighbor >= self.num_of_nodes:
self.cluster_dict[neighbor].add_node(victims[i])
self.node_cluster_mapping[victims[i]] = neighbor
victims[i] = -1
else:
cluster = Cluster(cluster_idx)
self.cluster_dict[cluster_idx] = cluster
cluster.add_node(victims[i])
self.node_cluster_mapping[victims[i]] = cluster_idx
victims[i] = -1
if neighbor in victims:
cluster.add_node(neighbor)
self.node_cluster_mapping[neighbor] = cluster_idx
victims[victims.index(neighbor)] = -1
cluster_idx += 1
self.fm_clusters.append(cluster)
cluster_size = len(self.fm_clusters)
idx = 0
while idx < cluster_size:
target = self.get_preferred_merge_target(self.fm_clusters[idx].index)
if target is not None:
self.merge_with(target, self.fm_clusters[idx].index)
self.cluster_dict.pop(self.fm_clusters[idx].index)
self.fm_clusters.remove(self.fm_clusters[idx])
cluster_size -= 1
else:
idx += 1
cluster_size = len(self.fm_clusters)
idx = 0
while idx < cluster_size:
cluster = self.fm_clusters[idx]
pre_set = self.get_predecessors_of_cluster(cluster.index)
succ_set = self.get_successors_of_cluster(cluster.index)
if len(pre_set) == 0 and len(succ_set) == 0:
for prim_index in cluster.get_primitives():
self.node_cluster_mapping[prim_index] = -1
self.cluster_dict.pop(cluster.index)
self.fm_clusters.remove(cluster)
cluster_size -= 1
else:
idx += 1
cls_sz = len(self.fm_clusters)
idx = 0
while idx < cls_sz:
cluster = self.fm_clusters[idx]
if len(cluster.get_primitives()) == 1:
self.check_for_direct_connection(cluster)
self.cluster_dict.pop(cluster.index)
self.fm_clusters.remove(cluster)
cls_sz -= 1
else:
idx += 1
for cluster in self.fm_clusters:
primitive_indices = cluster.get_primitives()
primitive_significances = [self.node_filter_resultant_unary_values[idx] for idx in
primitive_indices]
cluster.significance = sum(primitive_significances) / len(primitive_significances)
for i in range(0, self.num_of_nodes):
if self.node_cluster_mapping[i] != -1 and self.node_cluster_mapping[i] < self.num_of_nodes:
self.fm_nodes.append(Node(i, self.nodes[i],
self.node_filter_resultant_unary_values[i]))
for i in range(0, self.num_of_nodes):
if self.node_cluster_mapping[i] != -1:
for j in range(0, self.num_of_nodes):
significance = self.node_filter_resultant_binary_values[i][j]
correlation = self.node_filter_resultant_binary_corr_values[i][j]
if significance > 0.0:
if i == j:
mapped_idx = self.node_cluster_mapping[i]
if mapped_idx != -1:
if mapped_idx < self.num_of_nodes:
if (i, j) in self.fm_edges_dict.keys():
if self.fm_edges_dict[(i, j)].significance < significance:
self.fm_edges_dict[(i, j)].significance = significance
self.fm_edges_dict[(i, j)].correlation = correlation
else:
self.fm_edges_dict[(i, j)] = Edge(i, j, significance, correlation)
else:
mapped_i = self.node_cluster_mapping[i]
mapped_j = self.node_cluster_mapping[j]
if mapped_i == -1 or mapped_j == -1:
continue
else:
if mapped_i == mapped_j:
continue
else:
if (mapped_i, mapped_j) in self.fm_edges_dict.keys():
if self.fm_edges_dict[(mapped_i, mapped_j)].significance < significance:
self.fm_edges_dict[(mapped_i, mapped_j)].significance = significance
self.fm_edges_dict[(mapped_i, mapped_j)].correlation = correlation
else:
self.fm_edges_dict[(mapped_i, mapped_j)] = Edge(mapped_i, mapped_j,
significance, correlation)
for key, value in self.fm_edges_dict.items():
self.fm_edges.append(value)
def merge_with(self, winner_index, loser_index):
loser_primitive_indices = self.cluster_dict[loser_index].get_primitives()
for prim_idx in loser_primitive_indices:
self.cluster_dict[winner_index].add_node(prim_idx)
self.node_cluster_mapping[prim_idx] = winner_index
def get_preferred_merge_target(self, index):
pre_target = None
succ_target = None
max_pre_corr = 0.0
max_succ_corr = 0.0
predecessors = self.get_predecessors_of_cluster(index)
for predecessor in predecessors:
if predecessor in self.cluster_dict.keys():
corr = self.get_aggregate_correlation(index, predecessor)
if corr > max_pre_corr:
max_pre_corr = corr
pre_target = predecessor
else:
pre_target = None
max_pre_corr = 0.0
break
successors = self.get_successors_of_cluster(index)
for successor in successors:
if successor in self.cluster_dict.keys():
corr = self.get_aggregate_correlation(index, successor)
if corr > max_succ_corr:
max_succ_corr = corr
succ_target = successor
else:
if pre_target != None:
return pre_target
else:
return None
if max_pre_corr > max_succ_corr:
return pre_target
else:
return succ_target
def get_successors_of_cluster(self, index):
cluster = self.cluster_dict[index]
successors = set()
for prim_idx in cluster.get_primitives():
successors = successors.union(self.get_successors_of_node(prim_idx))
successors -= set(cluster.get_primitives())
successors.discard(index)
return successors
def get_successors_of_node(self, index):
successors = set()
for i in range(0, self.num_of_nodes):
if i == index:
continue
elif self.node_filter_resultant_binary_values[index][i] > 0.0:
if self.node_cluster_mapping[i] != -1:
successors.add(self.node_cluster_mapping[i])
return successors
def get_aggregate_correlation(self, cluster1_idx, cluster2_idx):
cluster1_primitive_indices = self.cluster_dict[cluster1_idx].get_primitives()
cluster2_primitive_indices = self.cluster_dict[cluster2_idx].get_primitives()
aggregate_corr = 0.0
for prim1_idx in cluster1_primitive_indices:
for prim2_idx in cluster2_primitive_indices:
aggregate_corr += self.edge_filter_resultant_binary_corr_values[prim1_idx][
prim2_idx]
aggregate_corr += self.edge_filter_resultant_binary_corr_values[prim2_idx][
prim1_idx]
return aggregate_corr
def get_predecessors_of_cluster(self, index):
cluster = self.cluster_dict[index]
predecessors = set()
for prim_idx in cluster.get_primitives():
predecessors = predecessors.union(self.get_predecessors_of_node(prim_idx))
predecessors -= set(cluster.get_primitives())
predecessors.discard(index)
return predecessors
def get_predecessors_of_node(self, index):
predecessors = set()
for i in range(0, self.num_of_nodes):
if i == index:
continue
elif self.node_filter_resultant_binary_values[i][index] > 0.0:
if self.node_cluster_mapping[i] != -1:
predecessors.add(self.node_cluster_mapping[i])
return predecessors
def most_correlated_neighbor(self, idx):
max_corr = 0.0
winner_idx = 0
for i in range(0, self.num_of_nodes):
if i == idx:
continue
curr_corr = self.concurrency_filter_resultant_binary_corr_values[idx][i]
if curr_corr > max_corr:
winner_idx = self.node_cluster_mapping[i]
max_corr = curr_corr
curr_corr = self.concurrency_filter_resultant_binary_corr_values[i][idx]
if curr_corr > max_corr:
winner_idx = self.node_cluster_mapping[i]
max_corr = curr_corr
return winner_idx
def check_for_direct_connection(self, cluster):
node_index = cluster.get_primitives()[0]
own_idx = node_index
pre_set = self.get_predecessors_of_node(own_idx)
succ_set = self.get_successors_of_node(own_idx)
for pre_idx in pre_set:
if pre_idx in self.cluster_dict.keys():
continue
for succ_idx in succ_set:
if succ_idx in self.cluster_dict.keys():
continue
if self.edge_filter_resultant_binary_values[pre_idx][succ_idx] == 0.0:
from_sig = self.edge_filter_resultant_binary_values[pre_idx][own_idx]
to_sig = self.edge_filter_resultant_binary_values[own_idx][succ_idx]
from_corr = self.edge_filter_resultant_binary_corr_values[pre_idx][own_idx]
to_corr = self.edge_filter_resultant_binary_corr_values[own_idx][succ_idx]
self.node_filter_resultant_binary_values[pre_idx][succ_idx] = \
(from_sig + to_sig) / 2.0
self.node_filter_resultant_binary_corr_values[pre_idx][succ_idx] = \
(from_corr + to_corr) / 2.0
self.node_filter_resultant_binary_values[pre_idx][own_idx] = 0.0
self.node_filter_resultant_binary_values[own_idx][succ_idx] = 0.0
self.node_filter_resultant_binary_corr_values[pre_idx][own_idx] = 0.0
self.node_filter_resultant_binary_corr_values[own_idx][succ_idx] = 0.0
self.node_cluster_mapping[own_idx] = -1
class Node:
def __init__(self, index, label, significance, node_type="primitive"):
self.index = index
self.label = label
self.significance = significance
self.node_type = node_type
def __str__(self):
return self.label + " index: " + str(self.index) + " significance: " + str(
self.significance) + " and type: " + self.node_type
class Edge:
def __init__(self, source_index, target_index, significance, correlation):
self.source = source_index
self.target = target_index
self.significance = significance
self.correlation = correlation
def __str__(self):
return "source: " + str(self.source) + " target: " + str(self.target) + " significance: " + str(
self.significance) + " correlation: " + str(self.correlation)
class Cluster(Node):
def __init__(self, index):
super().__init__(index, "Cluster", 1.0, "cluster")
self.primitives = list()
def add_node(self, node_index):
self.primitives.append(node_index)
def get_primitives(self):
return self.primitives
def __str__(self):
return self.label + " index: " + str(self.index) + " mean significance: " + str(
self.significance) + " has primitives: " + str(self.get_primitives())
class Filter:
def __init__(self, name):
self.name = name
def __str__(self):
return "Filter name: " + self.name
class NodeFilter(Filter):
def __init__(self, cut_off=0.0):
super().__init__("node_filter")
self.cut_off = cut_off
def __str__(self):
return super().__str__() + " Cut Off: " + str(self.cut_off)
class EdgeFilter(Filter):
def __init__(self, edge_transform=1, sc_ratio=0.75, preserve=0.2,
interpret_abs=False, ignore_self_loops=True):
super().__init__("edge_filter")
self.edge_transform = edge_transform
self.sc_ratio = sc_ratio
self.preserve = preserve
self.interpret_abs = interpret_abs
self.ignore_self_loops = ignore_self_loops
def __str__(self):
if self.edge_transform == 1:
return super().__str__() + " Edge Transform: " + str(self.edge_transform) + " sc_ratio: " + str(
self.sc_ratio) + " Preserve: " + str(self.preserve) + " Ignore Self Loops: " + str(
self.ignore_self_loops) + " Interpret Absolute: " + str(self.interpret_abs)
else:
return super().__str__() + "Edge Transform: " + str(self.edge_transform) + " Ignore Self Loops: " + str(
self.ignore_self_loops)
class ConcurrencyFilter(Filter):
def __init__(self, filter_concurrency=True, preserve=0.6, offset=0.7):
super().__init__("concurrency_filter")
self.filter_concurrency = filter_concurrency
self.preserve = preserve
self.offset = offset
def __str__(self):
if self.filter_concurrency:
return super().__str__() + " Preserve: " + str(self.preserve) + " Offset: " + str(self.offset)
else:
return super().__str__() + "Filter is Disabled"
from abc import ABC, abstractmethod
class Configuration:
def __init__(self, filter_config, metric_configs, attenuation, maximal_distance):
self.filter_config = filter_config
self.metric_configs = metric_configs
self.attenuation = attenuation
self.maximal_distance = maximal_distance
def __str__(self):
metric_info = ""
for metric in self.metric_configs:
metric_info += metric.__str__()
return self.filter_config.__str__() + "\n" + metric_info + " Attenuation: " + str(
self.attenuation) + " Maximum Distance: " + str(self.maximal_distance)
class FilterConfig:
def __init__(self, node_filter, edge_filter, concurrency_filter):
self.node_filter = node_filter
self.edge_filter = edge_filter
self.concurrency_filter = concurrency_filter
def __str__(self):
return self.node_filter.__str__() + "\n" + self.edge_filter.__str__() + "\n" + self.concurrency_filter.__str__()
class MetricConfig:
def __init__(self, name, metric_type, include=True, invert=False, weight=1.0):
self.name = name
self.metric_type = metric_type
self.include = include
self.invert = invert
self.weight = weight
def __str__(self):
return "Metric Name: " + self.name + " Metric Type: " + self.metric_type + " Included: " + str(
self.include) + " Inverted: " + str(self.invert) + " Weight: " + str(self.weight)
class Attenuation(ABC):
def __init__(self, buf_size=5, echelons=2.7, attenuation_factors=None):
self.buf_size = buf_size
self.echelons = echelons
self.attenuation_factors = attenuation_factors
def attenuate(self, value, distance):
return value * self.get_attenuation_factor(distance)
def get_attenuation_factor(self, distance):
if distance < self.buf_size:
if self.attenuation_factors is None:
self.generate_buffer()
return self.attenuation_factors[distance]
else:
return self.create_attenuation_factor(distance)
def generate_buffer(self):
self.attenuation_factors = []
for i in range(self.buf_size):
self.attenuation_factors.append(self.create_attenuation_factor(i))
@abstractmethod
def create_attenuation_factor(self, distance):
pass
@abstractmethod
def get_name(self):
pass
def __str__(self):
return "Buffer Size: " + str(self.buf_size) + " Attenuation Factor: " + self.attenuation_factors
class LinearAttenuation(Attenuation):
def __init__(self, buffer_size, num_of_echelons):
super().__init__(buffer_size, num_of_echelons)
def create_attenuation_factor(self, distance):
if distance == 1:
return 1.0
else:
return float(self.echelons - distance + 1) / float(self.echelons)
def get_name(self):
return "Linear Attenuation"
def __str__(self):
return " Echelons Value: " + str(self.echelons)
class NRootAttenuation(Attenuation):
def __init__(self, buffer_size, num_of_echelons):
super().__init__(buffer_size, num_of_echelons)
def create_attenuation_factor(self, distance):
if distance == 1:
return 1.0
else:
return 1.0 / pow(self.echelons, distance - 1)
def get_name(self):
if self.echelons == 2:
return "Square root"
elif self.echelons == 3:
return "Cubic root"
elif self.echelons == 4:
return "Quadratic root"
else:
return str(self.echelons) + "th root"
def __str__(self):
return " Echelons Value: " + str(self.echelons)
| import copy
import os
import sys
from datetime import datetime
from json import loads, dumps
from Levenshtein import distance
import numpy as np
import xmltodict
from PyQt5 import QtCore
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QDialog, QScrollArea, QGridLayout, QCheckBox, \
QSlider, QPushButton, QHBoxLayout, QButtonGroup, QRadioButton
class Plugin:
def __init__(self):
print('Plugin init ("Fuzzy Miner")')
self.hasParameters = True
self.myDialog = self.CustomDialog()
class CustomDialog(QDialog):
def __init__(self, *args, **kwargs):
super(Plugin.CustomDialog, self).__init__(*args, **kwargs)
self.resize(400, 500)
self.layout = QVBoxLayout()
self.setWindowTitle("Parameters")
self.acc_button = QPushButton('Ok')
self.acc_button.clicked.connect(self.close_window)
self.cancel_button = QPushButton('Cancel')
self.cancel_button.clicked.connect(self.close_cancel)
self.buttonBox = QHBoxLayout()
self.buttonBox.addWidget(self.acc_button)
self.buttonBox.addWidget(self.cancel_button)
self.scrollArea = QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QDialog()
self.vlayout = QVBoxLayout(self.scrollAreaWidgetContents)
self.vlayout.setSpacing(0)
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.layout.addWidget(self.scrollArea)
self.layout.addLayout(self.buttonBox)
self.setLayout(self.layout)
self.metrics = [Plugin.CustomDialog.MetricGrid('proximity_correlation_binary'),
Plugin.CustomDialog.MetricGrid('endpoint_correlation_binary'),
Plugin.CustomDialog.MetricGrid('originator_correlation_binary'),
Plugin.CustomDialog.MetricGrid('datatype_correlation_binary'),
Plugin.CustomDialog.MetricGrid('datavalue_correlation_binary'),
Plugin.CustomDialog.MetricGrid('routing_significance_unary'),
Plugin.CustomDialog.MetricGrid('distance_significance_binary'),
Plugin.CustomDialog.MetricGrid('frequency_significance_unary'),
Plugin.CustomDialog.MetricGrid('frequency_significance_binary')]
# add metric widgets
[self.vlayout.addWidget(metric) for metric in self.metrics]
# add edge filter widget
self.edge_filter = self.EdgeFilter()
self.vlayout.addWidget(self.edge_filter)
# add attenuation widget
self.attenuation_filter = self.AttenuationGrid()
self.vlayout.addWidget(self.attenuation_filter)
# add Node filter widget
hbox = QHBoxLayout()
self.node_sig_cutoff_slider = QSlider()
self.node_sig_cutoff_slider.setRange(0, 100)
self.acc_sig_cutoff_val = QLabel('0')
self.node_sig_cutoff_slider.valueChanged.connect(
lambda val: self.acc_sig_cutoff_val.setText(str(val / 100)))
self.vlayout.addLayout(hbox)
# add concurency filter
self.concurency_filter = self.ConcurencyFilter()
self.vlayout.addWidget(self.concurency_filter)
def close_window(self):
print(f'Save Configuration to File and close dialog')
os.makedirs(os.path.dirname('Parameters/param_file_fm.txt'), exist_ok=True)
with open('Parameters/param_file_fm.txt', 'w') as file:
# add node filter
file.write(self.acc_sig_cutoff_val.text() + '\n')
# add edge filter
fuzzy_or_best = self.edge_filter.fuzzy_button.isChecked()
file.write(f'{str(int(fuzzy_or_best))} {str(self.edge_filter.cut_off_acc_val.text())} '
f'{str(self.edge_filter.utility_acc_val.text())}\n')
# add concurency filter
is_concurency = self.concurency_filter.fiter_concurecy.isChecked()
file.write(f'{str(int(is_concurency))} {str(self.concurency_filter.preserve_slider.value()/100)} '
f'{str(self.concurency_filter.ratio_slider.value()/100)}\n')
# add attenuation
is_nthroot = self.attenuation_filter.nth_root.isChecked()
file.write(f'{str(int(is_nthroot))} {self.attenuation_filter.nth_label.text()} '
f'{self.attenuation_filter.max_ev_dis_acc_val.text()}\n')
# add metrics to file
for metric in self.metrics:
file.write(
f'{metric.acc_val.text()} {str(int(metric.inverted_button.isChecked()))} {str(int(metric.active_button.isChecked()))}' + ';')
self.close()
def close_cancel(self):
os.makedirs(os.path.dirname('Parameters/param_file_fm.txt'), exist_ok=True)
with open('Parameters/param_file_fm.txt', 'w') as file:
file.write('default')
self.close()
class ConcurencyFilter(QDialog):
def __init__(self):
super().__init__()
vbox = QVBoxLayout()
vbox.addWidget(QLabel('Concurency Filter'))
self.fiter_concurecy = QCheckBox('Filter concurency')
vbox.addWidget(self.fiter_concurecy)
preserve_box = QHBoxLayout()
acc_p_val = QLabel('0')
self.preserve_slider = QSlider(QtCore.Qt.Horizontal)
self.preserve_slider.setRange(0, 100)
self.preserve_slider.valueChanged.connect(lambda val: acc_p_val.setText(str(val / 100)))
preserve_box.addWidget(self.preserve_slider)
preserve_box.addWidget(acc_p_val)
vbox.addLayout(preserve_box)
ratio_box = QHBoxLayout()
acc_r_val = QLabel('0')
self.ratio_slider = QSlider(QtCore.Qt.Horizontal)
self.ratio_slider.setRange(0, 100)
self.ratio_slider.valueChanged.connect(lambda val: acc_r_val.setText(str(val / 100)))
ratio_box.addWidget(self.ratio_slider)
ratio_box.addWidget(acc_r_val)
vbox.addLayout(ratio_box)
self.setLayout(vbox)
class EdgeFilter(QDialog):
def __init__(self):
super().__init__()
vbox = QVBoxLayout()
vbox.addWidget(QLabel('Edge Filter'))
grid = QGridLayout()
vbox.addLayout(grid)
self.cut_off_label = QLabel('CutOff')
self.cut_off_slider = QSlider(QtCore.Qt.Horizontal)
self.cut_off_slider.setRange(0, 100)
self.cut_off_acc_val = QLabel('0')
self.cut_off_slider.valueChanged.connect(lambda val: self.cut_off_acc_val.setText(str(val / 100)))
self.utility_label = QLabel('Utility')
self.utility_slider = QSlider(QtCore.Qt.Horizontal)
self.utility_slider.setRange(0, 100)
self.utility_acc_val = QLabel('0')
self.utility_slider.valueChanged.connect(lambda val: self.utility_acc_val.setText(str(val / 100)))
self.buttons = QButtonGroup()
self.best_button = QRadioButton('Best edges')
self.fuzzy_button = QRadioButton('Fuzzy edges')
self.buttons.addButton(self.best_button)
self.buttons.addButton(self.fuzzy_button)
grid.addWidget(self.best_button, 0, 0)
grid.addWidget(self.fuzzy_button, 0, 1)
grid.addWidget(self.cut_off_label, 1, 0)
grid.addWidget(self.cut_off_acc_val, 1, 1)
grid.addWidget(self.cut_off_slider, 2, 0, 1, 2)
grid.addWidget(self.utility_label, 3, 0)
grid.addWidget(self.utility_acc_val, 3, 1)
grid.addWidget(self.utility_slider, 4, 0, 1, 2)
self.setLayout(vbox)
class MetricGrid(QDialog):
def __init__(self, name):
super().__init__()
grid = QGridLayout()
self.label = QLabel(name)
grid.addWidget(self.label, 0, 0)
self.active_button = QCheckBox('Active?')
self.active_button.setChecked(True)
grid.addWidget(self.active_button, 0, 1)
self.inverted_button = QCheckBox('Inverted?')
self.inverted_button.setChecked(False)
grid.addWidget(self.inverted_button, 0, 2)
self.acc_val = QLabel('0')
self.slider = QSlider(QtCore.Qt.Horizontal)
self.slider.setRange(0, 100)
self.slider.valueChanged.connect(lambda val: self.acc_val.setText(str(val / 100)))
grid.addWidget(self.slider, 1, 0, 1, 2)
grid.addWidget(self.acc_val, 1, 2)
self.setLayout(grid)
class AttenuationGrid(QDialog):
def __init__(self):
super().__init__()
grid = QGridLayout()
self.label = QLabel('Maximal event distance')
self.max_ev_dis = QSlider(QtCore.Qt.Horizontal)
self.max_ev_dis.setRange(0, 100)
self.max_ev_dis_acc_val = QLabel('0')
self.max_ev_dis.valueChanged.connect(lambda val: self.max_ev_dis_acc_val.setText(str(val)))
self.second_label = QLabel('Select attenuation to use:')
self.buttonbox = QButtonGroup()
self.nth_root = QRadioButton('Nth root with radical')
self.linear_attenuation = QRadioButton('Linear attenuation')
self.nth_root_slider = QSlider(QtCore.Qt.Horizontal)
self.nth_root_slider.setRange(0, 100)
self.nth_label = QLabel('0')
self.nth_root_slider.valueChanged.connect(lambda val: self.nth_label.setText(str(val / 10)))
grid.addWidget(QLabel('Attenuation'), 0, 0)
grid.addWidget(self.label, 1, 0)
grid.addWidget(self.max_ev_dis_acc_val, 1, 1)
grid.addWidget(self.max_ev_dis, 2, 1, 1, 2)
grid.addWidget(self.second_label,3,0)
grid.addWidget(self.nth_root, 4, 1)
grid.addWidget(self.linear_attenuation, 4, 0)
grid.addWidget(self.nth_root_slider,5,0,1,2)
self.setLayout(grid)
def fill_my_parameters(self):
try:
with open('Parameters/param_file_fm.txt', 'r') as file:
params = file.read().split('\n')
node_filter = NodeFilter(cut_off=int(params[0]))
edge_params = params[1].split(' ')
edge_filter = EdgeFilter(edge_transform=int(edge_params[0]),
sc_ratio=float(edge_params[1]),
preserve=float(edge_params[2]))
conc_params = params[2].split(' ')
concurrency_filter = ConcurrencyFilter(filter_concurrency=bool(conc_params[0]),
preserve=float(conc_params[1]),
offset=float(conc_params[2]))
attenuation_params = params[3].split(' ')
attenuation_filter = None
if attenuation_params[0] == '0':
attenuation_filter = LinearAttenuation(buffer_size=int(attenuation_params[2]),
num_of_echelons=float(attenuation_params[1]))
else:
attenuation_filter = NRootAttenuation(buffer_size=int(attenuation_params[2]),
num_of_echelons=float(attenuation_params[1]))
metrics = params[4].split(';')
metrics = metrics[:-1]
#def __init__(self, name, metric_type, include=True, invert=False, weight=1.0):
metrics_list = [MetricConfig('proximity_correlation_binary',
metric_type='binary',
weight=float(metric[0]),
invert=bool(metric[1]),
include=bool(metric[2])) for metric in metrics]
self.config = Configuration(FilterConfig(node_filter=node_filter,
edge_filter=edge_filter,
concurrency_filter=concurrency_filter),
metric_configs=metrics_list,
attenuation=attenuation_filter,
maximal_distance=int(attenuation_params[2]))
except IOError:
print('File don\'t exists')
if os.path.isfile('Parameters/param_file_fm.txt'):
try:
os.remove('Parameters/param_file_fm.txt')
except OSError as e:
print(f'Failed with: {e.strerror}')
else:
print('Did not find path')
pass
def execute(self, *args, **kwargs):
self.fullpath = args[0]
with open(self.fullpath, 'r') as log_file:
log = log_file.read()
fm = FuzzyMiner()
fm.___init___(log, self.fullpath)
self.config = Configuration(FilterConfig(node_filter=NodeFilter(),
edge_filter=EdgeFilter(),
concurrency_filter=ConcurrencyFilter()),
[MetricConfig(name='proximity_correlation_binary',
metric_type='binary'),
MetricConfig(name='endpoint_correlation_binary',
metric_type='binary'),
MetricConfig(name='originator_correlation_binary',
metric_type='binary'),
MetricConfig(name='datatype_correlation_binary',
metric_type='binary'),
MetricConfig(name='datavalue_correlation_binary',
metric_type='binary'),
MetricConfig(name='routing_significance_unary',
metric_type='unary'),
MetricConfig(name='distance_significance_binary',
metric_type='binary'),
MetricConfig(name='frequency_significance_unary',
metric_type='unary'),
MetricConfig(name='frequency_significance_binary',
metric_type='binary'),
],
NRootAttenuation(buffer_size=5, num_of_echelons=2.7),
maximal_distance=5)
self.fill_my_parameters()
fm.apply_config(self.config)
return "success", fm.full_path
class FuzzyMiner:
def ___init___(self, log, path):
self.name = path[-1]
self.path = path
self.log = self.parse_log(log)
self.nodes = None
self.clusters = None
self.edges = None
self.node_indices = None
self.num_of_nodes = None
self.fm_message = ''
self.extract_node_info()
self.metric_settings = None
self.unary_node_frequency_values = None
self.unary_node_frequency_normalized_values = None
self.binary_edge_frequency_values = None
self.binary_edge_frequency_normalized_values = None
self.binary_corr_divisors = None
self.unary_simple_aggregate_normalized_values = None
self.binary_simple_aggregate_normalized_values = None
self.binary_multi_aggregate_normalized_values = None
self.binary_corr_proximity_values = None
self.binary_corr_proximity_normalized_values = None
self.binary_corr_endpoint_values = None
self.binary_corr_endpoint_normalized_values = None
self.binary_corr_originator_values = None
self.binary_corr_originator_normalized_values = None
self.binary_corr_datatype_values = None
self.binary_corr_datatype_normalized_values = None
self.binary_corr_datavalue_values = None
self.binary_corr_datavalue_normalized_values = None
self.unary_derivative_routing_values = None
self.unary_derivative_routing_normalized_values = None
self.binary_derivative_distance_values = None
self.binary_derivative_distance_normalized_values = None
self.unary_weighted_values = None
self.binary_sig_weighted_values = None
self.binary_corr_weighted_values = None
# Clustering
self.node_cluster_mapping = list()
self.cluster_dict = dict()
self.fm_edges_dict = dict()
self.fm_clusters = list()
self.fm_edges = list()
self.fm_nodes = list()
def init_lists(self):
s = self.num_of_nodes
self.unary_node_frequency_values = [0 for _ in range(s)]
self.unary_node_frequency_normalized_values = [0.0 for _ in range(s)]
self.binary_edge_frequency_values = [[0 for _ in range(s)] for _ in range(s)]
self.binary_edge_frequency_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_divisors = [[0.0 for _ in range(s)] for _ in range(s)]
self.unary_simple_aggregate_normalized_values = [0.0 for _ in range(s)]
self.binary_simple_aggregate_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_multi_aggregate_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["proximity_correlation_binary"][0]:
self.binary_corr_proximity_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_proximity_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["endpoint_correlation_binary"][0]:
self.binary_corr_endpoint_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_endpoint_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["originator_correlation_binary"][0]:
self.binary_corr_originator_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_originator_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["datatype_correlation_binary"][0]:
self.binary_corr_datatype_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.binary_corr_datatype_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["datavalue_correlation_binary"][0]:
self.binary_corr_datavalue_values = [[0 for _ in range(s)] for _ in range(s)]
self.binary_corr_datavalue_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
if self.metric_settings["routing_significance_unary"][0]:
self.unary_derivative_routing_values = [0 for _ in range(s)]
self.unary_derivative_routing_normalized_values = [0 for _ in range(s)]
if self.metric_settings["distance_significance_binary"][0]:
self.binary_derivative_distance_values = [([0 for _ in range(s)]) for _ in range(s)]
self.binary_derivative_distance_normalized_values = [[0.0 for _ in range(s)] for _ in range(s)]
self.unary_weighted_values = [0 for _ in range(s)]
self.binary_sig_weighted_values = [[0 for _ in range(s)] for _ in range(s)]
self.binary_corr_weighted_values = [[0 for _ in range(s)] for _ in range(s)]
def apply_config(self, config):
self.config = config
metric_configs = self.config.metric_configs
self.metric_settings = dict()
for conf in metric_configs:
self.metric_settings[conf.name] = (conf.include, conf.invert, conf.weight)
self.init_lists()
self.extract_primary_metrics()
self.normalize_primary_metrics()
self.extract_aggregates()
self.extract_derivative_metrics()
self.normalize_derivative_metrics()
self.extract_weighted_metrics()
return self.apply_filters()
def apply_filters(self):
return self.apply_concurrency_filter(self.config.filter_config.concurrency_filter)
def apply_concurrency_filter(self, concurrency_filter):
self.config.filter_config.concurrency_filter = concurrency_filter
self.apply_concurrency_filter_helper(concurrency_filter)
return self.apply_edge_filter(self.config.filter_config.edge_filter)
def apply_concurrency_filter_helper(self, concurrency_filter):
self.config.filter_config.concurrency_filter = concurrency_filter
self.concurrency_filter_resultant_binary_values = copy.deepcopy(self.binary_sig_weighted_values)
self.concurrency_filter_resultant_binary_corr_values = copy.deepcopy(
self.binary_corr_weighted_values)
if self.config.filter_config.concurrency_filter.filter_concurrency:
sz = self.num_of_nodes
for i in range(0, sz):
for j in range(0, i):
self.process_relation_pair(i, j)
def apply_edge_filter(self, edge_filter):
self.config.filter_config.edge_filter = edge_filter
self.apply_edge_filter_helper(edge_filter)
return self.apply_node_filter(self.config.filter_config.node_filter)
def apply_edge_filter_helper(self, edge_filter):
self.config.filter_config.edge_filter = edge_filter
self.edge_filter_resultant_binary_values = copy.deepcopy(self.concurrency_filter_resultant_binary_values)
self.edge_filter_resultant_binary_corr_values = copy.deepcopy(
self.concurrency_filter_resultant_binary_corr_values)
sz = self.num_of_nodes
self.preserve_mask = [[False for x in range(sz)] for y in range(sz)]
if self.config.filter_config.edge_filter.edge_transform == 1:
if self.config.filter_config.edge_filter.preserve == 0.0:
self.config.filter_config.edge_filter.preserve = 0.001
for i in range(0, sz):
self.process_node_edges_fuzzy_filter(i)
else:
for i in range(0, sz):
self.process_node_edges_best_filter(i)
for i in range(0, sz):
for j in range(0, sz):
if not self.preserve_mask[i][j]:
self.edge_filter_resultant_binary_values[i][j] = 0.0
self.edge_filter_resultant_binary_corr_values[i][j] = 0.0
def apply_node_filter(self, node_filter):
self.config.filter_config.node_filter = node_filter
self.apply_node_filter_helper(node_filter)
self.finalize_graph_data()
# Vizualization here since we have all graph data
# graph_path = self.visualize(self.fm_nodes, self.fm_edges, self.fm_clusters)
# self.fm_message.graph_path = graph_path
return self.fm_message
def apply_node_filter_helper(self, node_filter):
self.config.filter_config.node_filter = node_filter
self.node_filter_resultant_unary_values = copy.deepcopy(self.unary_weighted_values)
self.node_filter_resultant_binary_values = copy.deepcopy(self.edge_filter_resultant_binary_values)
self.node_filter_resultant_binary_corr_values = copy.deepcopy(self.edge_filter_resultant_binary_corr_values)
self.clusterize()
def finalize_graph_data(self):
self.fm_nodes = self.fm_nodes
self.fm_edges = self.fm_edges
self.fm_clusters = self.fm_clusters
# Debug block starts
print("Nodes\n")
for node in self.fm_nodes:
print(node)
# print()
print("\nClusters\n")
for cluster in self.fm_clusters:
print(cluster)
# print()
print("\nEdges\n")
for edge in self.fm_edges:
print(edge)
# print()
print(self.unary_node_frequency_normalized_values)
print(self.binary_edge_frequency_values)
print(self.binary_edge_frequency_normalized_values)
print(self.binary_corr_endpoint_normalized_values)
# Debug block ends
print(self.path)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = dir_path.replace("\\", "/") + '/Results/Fuzzy_Miner/' + datetime.now().strftime(
"%d_%m_%Y_%H_%M_%S") + "/"
self.filename = self.path.split('/')[-1]
print(self.filename)
print(dir_path)
self.full_path = os.path.join(dir_path, f"{self.filename}.csv")
print(self.full_path)
os.makedirs(os.path.dirname(self.full_path), exist_ok=True)
with open(self.full_path, 'w') as result:
result.write('type,id,significance,from,to\n')
for node in self.fm_nodes:
result.write(f'n,{node.label.split("@")[0]},{node.significance:.2f}\n')
for cluster in self.fm_clusters:
result.write(f'c,{cluster.label.split("@")[0]}_{cluster.index},{cluster.significance:.2f}\n')
for i, edge in enumerate(self.fm_edges):
s = next(x for x in self.fm_nodes + self.fm_clusters if x.index == edge.source)
t = next(x for x in self.fm_nodes + self.fm_clusters if x.index == edge.target)
s_label = s.label.split("@")[0]
t_label = t.label.split('@')[0]
if s.node_type == 'cluster':
s_label += f'_{s.index}'
if t.node_type == 'cluster':
t_label += f'_{t.index}'
result.write(f'e,e{i},{edge.significance:.2f},{s_label},'
f'{t_label}\n')
def process_node_edges_fuzzy_filter(self, idx):
sz = self.num_of_nodes
min_in_val = sys.float_info.max
max_in_val = sys.float_info.min
min_out_val = sys.float_info.max
max_out_val = sys.float_info.min
in_values = [0.0 for _ in range(self.num_of_nodes)]
out_values = [0.0 for _ in range(self.num_of_nodes)]
ignore_self_loops = self.config.filter_config.edge_filter.ignore_self_loops
sc_ratio = self.config.filter_config.edge_filter.sc_ratio
for i in range(0, sz):
if ignore_self_loops and i == idx:
continue
significance = self.concurrency_filter_resultant_binary_values[i][idx]
if significance > 0.0:
correlation = self.concurrency_filter_resultant_binary_corr_values[i][idx]
in_values[i] = significance * sc_ratio + correlation * (1.0 - sc_ratio)
if in_values[i] > max_in_val:
max_in_val = in_values[i]
if in_values[i] < min_in_val:
min_in_val = in_values[i]
else:
in_values[i] = 0.0
significance = self.concurrency_filter_resultant_binary_values[idx][i]
if significance > 0.0:
correlation = self.concurrency_filter_resultant_binary_corr_values[idx][i]
out_values[i] = significance * sc_ratio + correlation * (1.0 - sc_ratio)
if out_values[i] > max_out_val:
max_out_val = out_values[i]
if out_values[i] < min_out_val:
min_out_val = out_values[i]
else:
out_values[i] = 0.0
if self.config.filter_config.edge_filter.interpret_abs:
max_in_val = max(max_in_val, max_out_val)
max_out_val = max_in_val
min_in_val = min(min_in_val, min_out_val)
min_out_val = min_in_val
in_limit = max_in_val - (max_in_val - min_in_val) * self.config.filter_config.edge_filter.preserve
out_limit = max_out_val - (max_out_val - min_out_val) * self.config.filter_config.edge_filter.preserve
for i in range(0, sz):
if ignore_self_loops and i == idx:
continue
if in_values[i] >= in_limit:
self.preserve_mask[i][idx] = True
if out_values[i] >= out_limit:
self.preserve_mask[idx][i] = True
def process_node_edges_best_filter(self, idx):
best_pre = -1
best_succ = -1
best_pre_sig = 0.0
best_succ_sig = 0.0
sz = self.num_of_nodes
for i in range(0, sz):
if i == idx and self.config.filter_config.edge_filter.ignore_self_loops:
continue
pre_sig = self.concurrency_filter_resultant_binary_values[i][idx]
if pre_sig > best_pre_sig:
best_pre_sig = pre_sig
best_pre = i
succ_sig = self.concurrency_filter_resultant_binary_values[idx][i]
if succ_sig > best_succ_sig:
best_succ_sig = succ_sig
best_succ = i
if best_pre >= 0:
self.preserve_mask[best_pre][idx] = True
if best_succ >= 0:
self.preserve_mask[idx][best_succ] = True
def process_relation_pair(self, x, y):
sig_fwd = self.binary_sig_weighted_values[x][y]
sig_bwd = self.binary_sig_weighted_values[y][x]
if sig_fwd > 0.0 and sig_bwd > 0.0:
rel_imp_AB = self.get_relative_imp(x, y)
rel_imp_BA = self.get_relative_imp(y, x)
if rel_imp_AB > self.config.filter_config.concurrency_filter.preserve and rel_imp_BA > self.config.filter_config.concurrency_filter.preserve:
return
else:
ratio = min(rel_imp_AB, rel_imp_BA) / max(rel_imp_AB, rel_imp_BA)
if ratio < self.config.filter_config.concurrency_filter.offset:
if rel_imp_AB > rel_imp_BA:
self.concurrency_filter_resultant_binary_values[y][x] = 0.0
self.concurrency_filter_resultant_binary_corr_values[y][x] = 0.0
else:
self.concurrency_filter_resultant_binary_values[x][y] = 0.0
self.concurrency_filter_resultant_binary_corr_values[x][y] = 0.0
else:
self.concurrency_filter_resultant_binary_values[x][y] = 0.0
self.concurrency_filter_resultant_binary_corr_values[x][y] = 0.0
self.concurrency_filter_resultant_binary_values[y][x] = 0.0
self.concurrency_filter_resultant_binary_corr_values[y][x] = 0.0
def get_relative_imp(self, x, y):
sig_ref = self.binary_sig_weighted_values[x][y]
sig_source_out = 0.0
sig_target_in = 0.0
sz = self.num_of_nodes
for i in range(0, sz):
if i != x:
sig_source_out += self.binary_sig_weighted_values[x][i]
if i != y:
sig_target_in += self.binary_sig_weighted_values[i][y]
return (sig_ref / sig_source_out) + (sig_ref / sig_target_in)
def parse_log(self, log):
data_types = ['string', 0, 'date', 0.0, 'boolean', 'id']
log = xmltodict.parse(log)
log = loads(dumps(log))
traces = []
for trace in log['log']['trace']:
attributes = list(trace.keys())
attributes_dictionary = {}
for data_type in data_types:
if data_type in attributes:
if type(trace[data_type]) == list:
for dictionary in trace[data_type]:
attributes_dictionary[dictionary['@key']] = dictionary['@value']
else:
attributes_dictionary[trace[data_type]['@key']] = trace[data_type]['@value']
trace_events = []
if type(trace['event']) == dict:
trace['event'] = [trace['event']]
for event in trace['event']:
event_attributes = list(event.keys())
event_dict = {}
for data_type in data_types:
if data_type in event_attributes:
if type(event[data_type]) == list:
for dictionary in event[data_type]:
event_dict[dictionary['@key']] = dictionary['@value']
else:
event_dict[event[data_type]['@key']] = event[data_type]['@value']
event_dict['concept:name'] = event_dict['concept:name']
trace_events.append(event_dict)
traces.append(trace_events)
return traces
def extract_node_info(self):
idx = 0
self.node_indices = dict()
for trace in self.log:
for event in trace:
name = event['concept:name'] + "@" + event['lifecycle:transition']
if name not in self.node_indices.keys():
self.node_indices[name] = idx
idx += 1
self.num_of_nodes = idx
self.nodes = list(self.node_indices.keys())
def extract_aggregates(self):
if self.metric_settings["distance_significance_binary"][0]:
self.cal_unary_simple_aggregate()
self.cal_binary_simple_aggregate()
if self.metric_settings["routing_significance_unary"][0]:
if self.metric_settings["distance_significance_binary"][0]:
self.cal_binary_multi_aggregate()
else:
self.cal_binary_simple_aggregate()
self.cal_binary_multi_aggregate()
def extract_derivative_metrics(self):
if self.metric_settings["routing_significance_unary"][0]:
self.cal_unary_derivative()
if self.metric_settings["distance_significance_binary"][0]:
self.cal_binary_derivative()
def extract_primary_metrics(self):
max_look_back = self.config.maximal_distance
# print(self.binary_edge_frequency_values)
# self.binary_edge_frequency_values = [[0.0 for _ in range(self.num_of_nodes)] for _ in range(self.num_of_nodes)]
for trace in self.log:
# print(len(trace))
look_back = list()
look_back_indices = list()
for event in trace:
# print(len(event))
follower_event = event
follower_index = self.node_indices[
follower_event['concept:name'] + "@" + follower_event['lifecycle:transition']]
look_back.insert(0, follower_event)
look_back_indices.insert(0, follower_index)
if len(look_back) > (max_look_back + 1):
look_back.pop(max_look_back + 1)
look_back_indices.pop(max_look_back + 1)
self.unary_node_frequency_values[follower_index] += 1
for k in range(1, len(look_back)):
# print(len(look_back), look_back, end=' ')
# print()
# (len(look_back))
ref_event = look_back[k]
ref_index = look_back_indices[k]
att_factor = self.config.attenuation.get_attenuation_factor(k)
# print(self.binary_edge_frequency_values)
print(self.binary_edge_frequency_values[ref_index][follower_index], ref_index, follower_index,
att_factor)
self.binary_edge_frequency_values[ref_index][follower_index] += att_factor
# print(self.binary_edge_frequency_values[ref_index][follower_index], att_factor, end=' ')
if self.metric_settings["proximity_correlation_binary"][0]:
self.binary_corr_proximity_values[ref_index][follower_index] += self.cal_proximity_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["endpoint_correlation_binary"][0]:
self.binary_corr_endpoint_values[ref_index][follower_index] += self.cal_endpoint_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["originator_correlation_binary"][0]:
self.binary_corr_originator_values[ref_index][
follower_index] += self.cal_originator_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["datatype_correlation_binary"][0]:
self.binary_corr_datatype_values[ref_index][follower_index] += self.cal_datatype_correlation(
ref_event, follower_event) * att_factor
if self.metric_settings["datavalue_correlation_binary"][0]:
self.binary_corr_datavalue_values[ref_index][follower_index] += self.cal_datavalue_correlation(
ref_event,
follower_event) * att_factor
self.binary_corr_divisors[ref_index][follower_index] += att_factor
# print(self.binary_edge_frequency_values)
def extract_weighted_metrics(self):
self.cal_weighted_unary_values()
self.cal_weighted_binary_values()
self.cal_weighted_binary_corr_values()
def cal_weighted_unary_values(self):
inc1 = self.metric_settings["frequency_significance_unary"][0]
inc2 = self.metric_settings["routing_significance_unary"][0]
w1 = self.metric_settings["frequency_significance_unary"][2]
w2 = self.metric_settings["routing_significance_unary"][2]
sz = self.num_of_nodes
valid_matrices = list()
if inc1 and (w1 > 0.0) and self.is_valid_matrix1D(self.unary_node_frequency_normalized_values):
valid_matrices.append(self.unary_node_frequency_normalized_values)
if inc2 and (w2 > 0.0) and self.is_valid_matrix1D(self.unary_derivative_routing_normalized_values):
valid_matrices.append(self.unary_derivative_routing_normalized_values)
for valid_matrix in valid_matrices:
for i in range(sz):
self.unary_weighted_values[i] += valid_matrix[i]
self.unary_weighted_values = self.normalize_matrix1D(self.unary_weighted_values)
def cal_weighted_binary_values(self):
inc1 = self.metric_settings["frequency_significance_binary"][0]
inc2 = self.metric_settings["distance_significance_binary"][0]
w1 = self.metric_settings["frequency_significance_binary"][2]
w2 = self.metric_settings["distance_significance_binary"][2]
sz = self.num_of_nodes
valid_matrices = list()
if inc1 and (w1 > 0.0) and self.is_valid_matrix2D(self.binary_edge_frequency_normalized_values):
valid_matrices.append(self.binary_edge_frequency_normalized_values)
if inc2 and (w2 > 0.0) and self.is_valid_matrix2D(self.binary_derivative_distance_normalized_values):
valid_matrices.append(self.binary_derivative_distance_normalized_values)
for valid_matrix in valid_matrices:
for i in range(0, sz):
for j in range(0, sz):
self.binary_sig_weighted_values[i][j] += valid_matrix[i][j]
self.binary_sig_weighted_values = self.normalize_matrix2D(self.binary_sig_weighted_values)
def cal_weighted_binary_corr_values(self):
inc1 = self.metric_settings["proximity_correlation_binary"][0]
inc2 = self.metric_settings["originator_correlation_binary"][0]
inc3 = self.metric_settings["endpoint_correlation_binary"][0]
inc4 = self.metric_settings["datatype_correlation_binary"][0]
inc5 = self.metric_settings["datavalue_correlation_binary"][0]
w1 = self.metric_settings["proximity_correlation_binary"][2]
w2 = self.metric_settings["originator_correlation_binary"][2]
w3 = self.metric_settings["endpoint_correlation_binary"][2]
w4 = self.metric_settings["datatype_correlation_binary"][2]
w5 = self.metric_settings["datavalue_correlation_binary"][2]
valid_matrices = list()
if inc1 and (w1 > 0.0) and self.is_valid_matrix2D(self.binary_corr_proximity_normalized_values):
valid_matrices.append(self.binary_corr_proximity_normalized_values)
if inc2 and (w2 > 0.0) and self.is_valid_matrix2D(self.binary_corr_endpoint_normalized_values):
valid_matrices.append(self.binary_corr_endpoint_normalized_values)
if inc3 and (w3 > 0.0) and self.is_valid_matrix2D(self.binary_corr_originator_normalized_values):
valid_matrices.append(self.binary_corr_originator_normalized_values)
if inc4 and (w4 > 0.0) and self.is_valid_matrix2D(self.binary_corr_datatype_normalized_values):
valid_matrices.append(self.binary_corr_datatype_normalized_values)
if inc5 and (w5 > 0.0) and self.is_valid_matrix2D(self.binary_corr_datavalue_normalized_values):
valid_matrices.append(self.binary_corr_datavalue_normalized_values)
sz = self.num_of_nodes
for valid_matrix in valid_matrices:
for i in range(0, sz):
for j in range(0, sz):
self.binary_corr_weighted_values[i][j] += valid_matrix[i][j]
self.binary_corr_weighted_values = self.normalize_matrix2D(self.binary_corr_weighted_values)
def normalize_primary_metrics(self):
self.unary_node_frequency_normalized_values = self.weight_normalize1D(self.unary_node_frequency_values,
self.metric_settings[
"frequency_significance_unary"][1],
self.metric_settings[
"frequency_significance_unary"][2])
self.binary_edge_frequency_normalized_values = self.weight_normalize2D(self.binary_edge_frequency_values,
self.metric_settings[
"frequency_significance_binary"][1],
self.metric_settings[
"frequency_significance_binary"][2])
inc1 = self.metric_settings["proximity_correlation_binary"][0]
inc2 = self.metric_settings["originator_correlation_binary"][0]
inc3 = self.metric_settings["endpoint_correlation_binary"][0]
inc4 = self.metric_settings["datatype_correlation_binary"][0]
inc5 = self.metric_settings["datavalue_correlation_binary"][0]
inv1 = self.metric_settings["proximity_correlation_binary"][1]
inv2 = self.metric_settings["originator_correlation_binary"][1]
inv3 = self.metric_settings["endpoint_correlation_binary"][1]
inv4 = self.metric_settings["datatype_correlation_binary"][1]
inv5 = self.metric_settings["datavalue_correlation_binary"][1]
w1 = self.metric_settings["proximity_correlation_binary"][2]
w2 = self.metric_settings["originator_correlation_binary"][2]
w3 = self.metric_settings["endpoint_correlation_binary"][2]
w4 = self.metric_settings["datatype_correlation_binary"][2]
w5 = self.metric_settings["datavalue_correlation_binary"][2]
if inc1:
self.binary_corr_proximity_normalized_values = self.special_weight_normalize2D(
self.binary_corr_proximity_values, self.binary_corr_divisors, inv1, w1)
if inc2:
self.binary_corr_endpoint_normalized_values = self.special_weight_normalize2D(
self.binary_corr_endpoint_values, self.binary_corr_divisors, inv2, w2)
if inc3:
self.binary_corr_originator_normalized_values = self.special_weight_normalize2D(
self.binary_corr_originator_values, self.binary_corr_divisors, inv3, w3)
if inc4:
self.binary_corr_datatype_normalized_values = self.special_weight_normalize2D(
self.binary_corr_datatype_values, self.binary_corr_divisors, inv4, w4)
if inc5:
self.binary_corr_datavalue_normalized_values = self.special_weight_normalize2D(
self.binary_corr_datavalue_values, self.binary_corr_divisors, inv5, w5)
def normalize_derivative_metrics(self):
if self.metric_settings["routing_significance_unary"][0]:
self.unary_derivative_routing_normalized_values = self.weight_normalize1D(
self.unary_derivative_routing_values,
self.metric_settings["routing_significance_unary"][1],
self.metric_settings["routing_significance_unary"][2])
if self.metric_settings["distance_significance_binary"][0]:
self.binary_derivative_distance_normalized_values = self.weight_normalize2D(
self.binary_derivative_distance_values,
self.metric_settings["distance_significance_binary"][1],
self.metric_settings["distance_significance_binary"][2])
def normalize_matrix1D(self, lst):
max_val = max(lst)
if max_val == 0:
return lst
else:
norm_list = list()
for val in lst:
norm_list.append(val / max_val)
return norm_list
def normalize_matrix2D(self, lst):
sz = len(lst[0])
max_val = max(map(max, lst))
if max_val == 0:
return lst
else:
norm_list = list()
for i in range(0, sz):
temp_list = list()
for j in range(0, sz):
temp_list.append(lst[i][j] / max_val)
norm_list.append(temp_list)
return norm_list
def cal_unary_derivative(self):
sz = self.num_of_nodes
for i in range(0, sz):
in_value = 0.0
out_value = 0.0
quotient = 0.0
for x in range(0, sz):
if x == i:
continue
in_value += self.binary_simple_aggregate_normalized_values[x][i] * \
self.binary_multi_aggregate_normalized_values[x][i]
out_value += self.binary_simple_aggregate_normalized_values[i][x] * \
self.binary_multi_aggregate_normalized_values[i][x]
if in_value == 0.0 and out_value == 0.0:
quotient = 0.0
else:
quotient = abs((in_value - out_value) / (in_value + out_value))
self.unary_derivative_routing_values[i] = quotient
def cal_binary_derivative(self):
sz = self.num_of_nodes
for i in range(0, sz):
sig_source = self.unary_simple_aggregate_normalized_values[i]
for j in range(0, sz):
sig_target = self.unary_simple_aggregate_normalized_values[j]
if sig_source + sig_target == 0:
continue
sig_link = self.binary_simple_aggregate_normalized_values[i][j]
self.binary_derivative_distance_values[i][j] = 1.0 - (
(sig_source - sig_link) + (sig_target - sig_link)) / (sig_source + sig_target)
def cal_binary_multi_aggregate(self):
inc1 = self.metric_settings["proximity_correlation_binary"][0]
inc2 = self.metric_settings["endpoint_correlation_binary"][0]
inc3 = self.metric_settings["originator_correlation_binary"][0]
inc4 = self.metric_settings["datatype_correlation_binary"][0]
inc5 = self.metric_settings["datavalue_correlation_binary"][0]
valid_metrics = list()
if inc1 and self.is_valid_matrix2D(self.binary_corr_proximity_normalized_values):
valid_metrics.append(self.binary_corr_proximity_normalized_values)
if inc2 and self.is_valid_matrix2D(self.binary_corr_endpoint_normalized_values):
valid_metrics.append(self.binary_corr_endpoint_normalized_values)
if inc3 and self.is_valid_matrix2D(self.binary_corr_originator_normalized_values):
valid_metrics.append(self.binary_corr_originator_normalized_values)
if inc4 and self.is_valid_matrix2D(self.binary_corr_datatype_normalized_values):
valid_metrics.append(self.binary_corr_datatype_normalized_values)
if inc5 and self.is_valid_matrix2D(self.binary_corr_datavalue_normalized_values):
valid_metrics.append(self.binary_corr_datavalue_normalized_values)
temp_max = 0
if len(valid_metrics) > 0:
sz = self.num_of_nodes
for i in range(0, sz):
for j in range(0, sz):
aggregated = 0.0
for k in range(0, len(valid_metrics)):
aggregated += valid_metrics[k][i][j]
self.binary_multi_aggregate_normalized_values[i][j] = aggregated
if aggregated > temp_max:
temp_max = aggregated
if temp_max > 0:
for i in range(0, sz):
for j in range(0, sz):
self.binary_multi_aggregate_normalized_values[i][j] *= (1 / temp_max)
return
def compensate_frequency(self, values, divisors):
size = len(values[0])
comp_list = list()
for i in range(size):
temp_list = list()
for j in range(size):
if divisors[i][j] > 0.0:
temp_list.append(values[i][j] / divisors[i][j])
else:
temp_list.append(values[i][j])
comp_list.append(temp_list)
return comp_list
def weight_normalize1D(self, lst, invert, normalize_max):
size = len(lst)
if normalize_max == 0:
return [0.0 for i in range(size)]
else:
max_val = max(lst)
if max_val > 0.0:
norm_list = list()
for i in range(size):
val = (lst[i] * normalize_max) / max_val
if invert:
val = normalize_max - val
norm_list.append(val)
return norm_list
else:
if invert:
for i in range(size):
lst[i] = normalize_max - lst[i]
return lst
def weight_normalize2D(self, lst, invert, normalize_max):
size = len(lst[0])
if normalize_max == 0:
return [[0.0 for i in range(size)] for j in range(size)]
else:
max_val = max(map(max, lst))
if max_val > 0.0:
norm_list = list()
for i in range(size):
temp_list = list()
for j in range(size):
val = (lst[i][j] * normalize_max) / max_val
if invert:
val = normalize_max - val
temp_list.append(val)
norm_list.append(temp_list)
return norm_list
else:
if invert:
for i in range(size):
for j in range(size):
lst[i][j] = normalize_max - lst[i][j]
return lst
def is_valid_matrix2D(self, lst):
size = len(lst[0])
for i in range(0, size):
for j in range(0, size):
if lst[i][j] > 0.0:
return True
return False
def is_valid_matrix1D(self, lst):
for i in range(0, len(lst)):
if lst[i] > 0.0:
return True
return False
def special_weight_normalize2D(self, values, divisors, invert, normalize_max):
size = len(values[0])
if normalize_max == 0:
norm_list = [[0.0 for i in range(size)] for j in range(size)]
return norm_list
else:
comp_list = self.compensate_frequency(values, divisors)
max_value = max(map(max, comp_list))
if max_value > 0.0:
norm_list = list()
for i in range(size):
temp_list = list()
for j in range(size):
val = (comp_list[i][j] * normalize_max) / max_value
if invert:
val = normalize_max - val
temp_list.append(val)
norm_list.append(temp_list)
return norm_list
else:
if invert:
for i in range(size):
for j in range(size):
comp_list[i][j] = normalize_max - comp_list[i][j]
return comp_list
def cal_proximity_correlation(self, evt1, evt2):
if 'time:timestamp' not in evt1 or 'time:timestamp' not in evt2:
return 0.0
time1 = evt1['time:timestamp']
time2 = evt2['time:timestamp']
if time1 is not None and time2 is not None:
time1 = time1.timestamp() * 1000
time2 = time2.timestamp() * 1000
if time1 != time2:
return 1.0 / (time2 - time1)
else:
return 1.0
else:
return 0.0
def cal_endpoint_correlation(self, evt1, evt2):
first_name = evt1['concept:name'] if 'concept:name' in evt1 else "<no name>"
second_name = evt2['concept:name'] if 'concept:name' in evt2 else "<no name>"
dist = distance(str(first_name), str(second_name))
big_str_len = max(len(str(first_name)), len(str(second_name)))
if big_str_len == 0:
return 1.0
else:
return (big_str_len - dist) / big_str_len
def cal_originator_correlation(self, evt1, evt2):
first_resource = evt1['org:resource'] if 'org:resource' in evt1 else "<no resource>"
second_resource = evt2['org:resource'] if 'org:resource' in evt2 else "<no resource>"
dist = distance(str(first_resource), str(second_resource))
big_str_len = max(len(first_resource), len(second_resource))
if big_str_len == 0:
return 1.0
else:
return (big_str_len - dist) / big_str_len
def cal_datatype_correlation(self, evt1, evt2):
ref_data_keys = list()
fol_data_keys = list()
for key in evt1:
if not self.is_standard_key(key):
ref_data_keys.append(key)
for key in evt2:
if not self.is_standard_key(key):
fol_data_keys.append(key)
if (len(ref_data_keys) == 0) or (len(fol_data_keys) == 0):
return 0
overlap = 0
for key in ref_data_keys:
if key in fol_data_keys:
overlap += 1
return overlap / len(ref_data_keys)
def cal_datavalue_correlation(self, evt1, evt2):
ref_data_keys = list()
fol_data_keys = list()
for key in evt1:
if not self.is_standard_key(key):
ref_data_keys.append(key)
for key in evt2:
if not self.is_standard_key(key):
fol_data_keys.append(key)
if (len(ref_data_keys) == 0) or (len(fol_data_keys) == 0):
return 0
key_overlap = 0
val_overlap = 0
for key in ref_data_keys:
if key in fol_data_keys:
key_overlap += 1
dist = self.distance(str(evt1[key]), str(evt2[key]))
big_str_len = max(len(str(evt1[key])), len(str(evt2[key])))
if big_str_len == 0:
val_overlap += 1.0
else:
val_overlap += (big_str_len - dist) / big_str_len
if key_overlap == 0:
return 0.0
else:
return val_overlap / key_overlap
def cal_unary_simple_aggregate(self):
if self.is_valid_matrix1D(self.unary_node_frequency_normalized_values):
temp_max = 0
sz = len(self.unary_node_frequency_normalized_values)
for i in range(sz):
self.unary_simple_aggregate_normalized_values[i] = self.unary_node_frequency_normalized_values[i]
if self.unary_node_frequency_normalized_values[i] > temp_max:
temp_max = self.unary_node_frequency_normalized_values[i]
if temp_max > 0:
for i in range(sz):
self.unary_simple_aggregate_normalized_values[i] *= (1 / temp_max)
return
def cal_binary_simple_aggregate(self):
if self.is_valid_matrix2D(self.binary_edge_frequency_normalized_values):
temp_max = 0
sz = self.num_of_nodes
for i in range(0, sz):
for j in range(0, sz):
self.binary_simple_aggregate_normalized_values[i][j] = \
self.binary_edge_frequency_normalized_values[i][j]
if self.binary_edge_frequency_normalized_values[i][j] > temp_max:
temp_max = self.binary_edge_frequency_normalized_values[i][j]
if temp_max > 0:
for i in range(0, sz):
for j in range(0, sz):
self.binary_simple_aggregate_normalized_values[i][j] *= (1 / temp_max)
return
def is_standard_key(self, key):
if key.find("concept") != -1 or key.find("lifecycle") != -1 or key.find("org") != -1 or key.find(
"time") != -1 or key.find("semantic") != -1:
return True
else:
return False
def clusterize(self):
self.node_cluster_mapping = [i for i in range(0, self.num_of_nodes)]
self.cluster_dict.clear()
self.fm_edges_dict.clear()
self.fm_clusters.clear()
self.fm_nodes.clear()
self.fm_edges.clear()
victims = list()
for i in range(0, self.num_of_nodes):
if self.node_filter_resultant_unary_values[i] < self.config.filter_config.node_filter.cut_off:
victims.append(i)
cluster_idx = self.num_of_nodes + 1
for i in range(0, len(victims)):
if victims[i] == -1:
continue
neighbor = self.most_correlated_neighbor(victims[i])
if neighbor >= self.num_of_nodes:
self.cluster_dict[neighbor].add_node(victims[i])
self.node_cluster_mapping[victims[i]] = neighbor
victims[i] = -1
else:
cluster = Cluster(cluster_idx)
self.cluster_dict[cluster_idx] = cluster
cluster.add_node(victims[i])
self.node_cluster_mapping[victims[i]] = cluster_idx
victims[i] = -1
if neighbor in victims:
cluster.add_node(neighbor)
self.node_cluster_mapping[neighbor] = cluster_idx
victims[victims.index(neighbor)] = -1
cluster_idx += 1
self.fm_clusters.append(cluster)
cluster_size = len(self.fm_clusters)
idx = 0
while idx < cluster_size:
target = self.get_preferred_merge_target(self.fm_clusters[idx].index)
if target is not None:
self.merge_with(target, self.fm_clusters[idx].index)
self.cluster_dict.pop(self.fm_clusters[idx].index)
self.fm_clusters.remove(self.fm_clusters[idx])
cluster_size -= 1
else:
idx += 1
cluster_size = len(self.fm_clusters)
idx = 0
while idx < cluster_size:
cluster = self.fm_clusters[idx]
pre_set = self.get_predecessors_of_cluster(cluster.index)
succ_set = self.get_successors_of_cluster(cluster.index)
if len(pre_set) == 0 and len(succ_set) == 0:
for prim_index in cluster.get_primitives():
self.node_cluster_mapping[prim_index] = -1
self.cluster_dict.pop(cluster.index)
self.fm_clusters.remove(cluster)
cluster_size -= 1
else:
idx += 1
cls_sz = len(self.fm_clusters)
idx = 0
while idx < cls_sz:
cluster = self.fm_clusters[idx]
if len(cluster.get_primitives()) == 1:
self.check_for_direct_connection(cluster)
self.cluster_dict.pop(cluster.index)
self.fm_clusters.remove(cluster)
cls_sz -= 1
else:
idx += 1
for cluster in self.fm_clusters:
primitive_indices = cluster.get_primitives()
primitive_significances = [self.node_filter_resultant_unary_values[idx] for idx in
primitive_indices]
cluster.significance = sum(primitive_significances) / len(primitive_significances)
for i in range(0, self.num_of_nodes):
if self.node_cluster_mapping[i] != -1 and self.node_cluster_mapping[i] < self.num_of_nodes:
self.fm_nodes.append(Node(i, self.nodes[i],
self.node_filter_resultant_unary_values[i]))
for i in range(0, self.num_of_nodes):
if self.node_cluster_mapping[i] != -1:
for j in range(0, self.num_of_nodes):
significance = self.node_filter_resultant_binary_values[i][j]
correlation = self.node_filter_resultant_binary_corr_values[i][j]
if significance > 0.0:
if i == j:
mapped_idx = self.node_cluster_mapping[i]
if mapped_idx != -1:
if mapped_idx < self.num_of_nodes:
if (i, j) in self.fm_edges_dict.keys():
if self.fm_edges_dict[(i, j)].significance < significance:
self.fm_edges_dict[(i, j)].significance = significance
self.fm_edges_dict[(i, j)].correlation = correlation
else:
self.fm_edges_dict[(i, j)] = Edge(i, j, significance, correlation)
else:
mapped_i = self.node_cluster_mapping[i]
mapped_j = self.node_cluster_mapping[j]
if mapped_i == -1 or mapped_j == -1:
continue
else:
if mapped_i == mapped_j:
continue
else:
if (mapped_i, mapped_j) in self.fm_edges_dict.keys():
if self.fm_edges_dict[(mapped_i, mapped_j)].significance < significance:
self.fm_edges_dict[(mapped_i, mapped_j)].significance = significance
self.fm_edges_dict[(mapped_i, mapped_j)].correlation = correlation
else:
self.fm_edges_dict[(mapped_i, mapped_j)] = Edge(mapped_i, mapped_j,
significance, correlation)
for key, value in self.fm_edges_dict.items():
self.fm_edges.append(value)
def merge_with(self, winner_index, loser_index):
loser_primitive_indices = self.cluster_dict[loser_index].get_primitives()
for prim_idx in loser_primitive_indices:
self.cluster_dict[winner_index].add_node(prim_idx)
self.node_cluster_mapping[prim_idx] = winner_index
def get_preferred_merge_target(self, index):
pre_target = None
succ_target = None
max_pre_corr = 0.0
max_succ_corr = 0.0
predecessors = self.get_predecessors_of_cluster(index)
for predecessor in predecessors:
if predecessor in self.cluster_dict.keys():
corr = self.get_aggregate_correlation(index, predecessor)
if corr > max_pre_corr:
max_pre_corr = corr
pre_target = predecessor
else:
pre_target = None
max_pre_corr = 0.0
break
successors = self.get_successors_of_cluster(index)
for successor in successors:
if successor in self.cluster_dict.keys():
corr = self.get_aggregate_correlation(index, successor)
if corr > max_succ_corr:
max_succ_corr = corr
succ_target = successor
else:
if pre_target != None:
return pre_target
else:
return None
if max_pre_corr > max_succ_corr:
return pre_target
else:
return succ_target
def get_successors_of_cluster(self, index):
cluster = self.cluster_dict[index]
successors = set()
for prim_idx in cluster.get_primitives():
successors = successors.union(self.get_successors_of_node(prim_idx))
successors -= set(cluster.get_primitives())
successors.discard(index)
return successors
def get_successors_of_node(self, index):
successors = set()
for i in range(0, self.num_of_nodes):
if i == index:
continue
elif self.node_filter_resultant_binary_values[index][i] > 0.0:
if self.node_cluster_mapping[i] != -1:
successors.add(self.node_cluster_mapping[i])
return successors
def get_aggregate_correlation(self, cluster1_idx, cluster2_idx):
cluster1_primitive_indices = self.cluster_dict[cluster1_idx].get_primitives()
cluster2_primitive_indices = self.cluster_dict[cluster2_idx].get_primitives()
aggregate_corr = 0.0
for prim1_idx in cluster1_primitive_indices:
for prim2_idx in cluster2_primitive_indices:
aggregate_corr += self.edge_filter_resultant_binary_corr_values[prim1_idx][
prim2_idx]
aggregate_corr += self.edge_filter_resultant_binary_corr_values[prim2_idx][
prim1_idx]
return aggregate_corr
def get_predecessors_of_cluster(self, index):
cluster = self.cluster_dict[index]
predecessors = set()
for prim_idx in cluster.get_primitives():
predecessors = predecessors.union(self.get_predecessors_of_node(prim_idx))
predecessors -= set(cluster.get_primitives())
predecessors.discard(index)
return predecessors
def get_predecessors_of_node(self, index):
predecessors = set()
for i in range(0, self.num_of_nodes):
if i == index:
continue
elif self.node_filter_resultant_binary_values[i][index] > 0.0:
if self.node_cluster_mapping[i] != -1:
predecessors.add(self.node_cluster_mapping[i])
return predecessors
def most_correlated_neighbor(self, idx):
max_corr = 0.0
winner_idx = 0
for i in range(0, self.num_of_nodes):
if i == idx:
continue
curr_corr = self.concurrency_filter_resultant_binary_corr_values[idx][i]
if curr_corr > max_corr:
winner_idx = self.node_cluster_mapping[i]
max_corr = curr_corr
curr_corr = self.concurrency_filter_resultant_binary_corr_values[i][idx]
if curr_corr > max_corr:
winner_idx = self.node_cluster_mapping[i]
max_corr = curr_corr
return winner_idx
def check_for_direct_connection(self, cluster):
node_index = cluster.get_primitives()[0]
own_idx = node_index
pre_set = self.get_predecessors_of_node(own_idx)
succ_set = self.get_successors_of_node(own_idx)
for pre_idx in pre_set:
if pre_idx in self.cluster_dict.keys():
continue
for succ_idx in succ_set:
if succ_idx in self.cluster_dict.keys():
continue
if self.edge_filter_resultant_binary_values[pre_idx][succ_idx] == 0.0:
from_sig = self.edge_filter_resultant_binary_values[pre_idx][own_idx]
to_sig = self.edge_filter_resultant_binary_values[own_idx][succ_idx]
from_corr = self.edge_filter_resultant_binary_corr_values[pre_idx][own_idx]
to_corr = self.edge_filter_resultant_binary_corr_values[own_idx][succ_idx]
self.node_filter_resultant_binary_values[pre_idx][succ_idx] = \
(from_sig + to_sig) / 2.0
self.node_filter_resultant_binary_corr_values[pre_idx][succ_idx] = \
(from_corr + to_corr) / 2.0
self.node_filter_resultant_binary_values[pre_idx][own_idx] = 0.0
self.node_filter_resultant_binary_values[own_idx][succ_idx] = 0.0
self.node_filter_resultant_binary_corr_values[pre_idx][own_idx] = 0.0
self.node_filter_resultant_binary_corr_values[own_idx][succ_idx] = 0.0
self.node_cluster_mapping[own_idx] = -1
class Node:
def __init__(self, index, label, significance, node_type="primitive"):
self.index = index
self.label = label
self.significance = significance
self.node_type = node_type
def __str__(self):
return self.label + " index: " + str(self.index) + " significance: " + str(
self.significance) + " and type: " + self.node_type
class Edge:
def __init__(self, source_index, target_index, significance, correlation):
self.source = source_index
self.target = target_index
self.significance = significance
self.correlation = correlation
def __str__(self):
return "source: " + str(self.source) + " target: " + str(self.target) + " significance: " + str(
self.significance) + " correlation: " + str(self.correlation)
class Cluster(Node):
def __init__(self, index):
super().__init__(index, "Cluster", 1.0, "cluster")
self.primitives = list()
def add_node(self, node_index):
self.primitives.append(node_index)
def get_primitives(self):
return self.primitives
def __str__(self):
return self.label + " index: " + str(self.index) + " mean significance: " + str(
self.significance) + " has primitives: " + str(self.get_primitives())
class Filter:
def __init__(self, name):
self.name = name
def __str__(self):
return "Filter name: " + self.name
class NodeFilter(Filter):
def __init__(self, cut_off=0.0):
super().__init__("node_filter")
self.cut_off = cut_off
def __str__(self):
return super().__str__() + " Cut Off: " + str(self.cut_off)
class EdgeFilter(Filter):
def __init__(self, edge_transform=1, sc_ratio=0.75, preserve=0.2,
interpret_abs=False, ignore_self_loops=True):
super().__init__("edge_filter")
self.edge_transform = edge_transform
self.sc_ratio = sc_ratio
self.preserve = preserve
self.interpret_abs = interpret_abs
self.ignore_self_loops = ignore_self_loops
def __str__(self):
if self.edge_transform == 1:
return super().__str__() + " Edge Transform: " + str(self.edge_transform) + " sc_ratio: " + str(
self.sc_ratio) + " Preserve: " + str(self.preserve) + " Ignore Self Loops: " + str(
self.ignore_self_loops) + " Interpret Absolute: " + str(self.interpret_abs)
else:
return super().__str__() + "Edge Transform: " + str(self.edge_transform) + " Ignore Self Loops: " + str(
self.ignore_self_loops)
class ConcurrencyFilter(Filter):
def __init__(self, filter_concurrency=True, preserve=0.6, offset=0.7):
super().__init__("concurrency_filter")
self.filter_concurrency = filter_concurrency
self.preserve = preserve
self.offset = offset
def __str__(self):
if self.filter_concurrency:
return super().__str__() + " Preserve: " + str(self.preserve) + " Offset: " + str(self.offset)
else:
return super().__str__() + "Filter is Disabled"
from abc import ABC, abstractmethod
class Configuration:
def __init__(self, filter_config, metric_configs, attenuation, maximal_distance):
self.filter_config = filter_config
self.metric_configs = metric_configs
self.attenuation = attenuation
self.maximal_distance = maximal_distance
def __str__(self):
metric_info = ""
for metric in self.metric_configs:
metric_info += metric.__str__()
return self.filter_config.__str__() + "\n" + metric_info + " Attenuation: " + str(
self.attenuation) + " Maximum Distance: " + str(self.maximal_distance)
class FilterConfig:
def __init__(self, node_filter, edge_filter, concurrency_filter):
self.node_filter = node_filter
self.edge_filter = edge_filter
self.concurrency_filter = concurrency_filter
def __str__(self):
return self.node_filter.__str__() + "\n" + self.edge_filter.__str__() + "\n" + self.concurrency_filter.__str__()
class MetricConfig:
def __init__(self, name, metric_type, include=True, invert=False, weight=1.0):
self.name = name
self.metric_type = metric_type
self.include = include
self.invert = invert
self.weight = weight
def __str__(self):
return "Metric Name: " + self.name + " Metric Type: " + self.metric_type + " Included: " + str(
self.include) + " Inverted: " + str(self.invert) + " Weight: " + str(self.weight)
class Attenuation(ABC):
def __init__(self, buf_size=5, echelons=2.7, attenuation_factors=None):
self.buf_size = buf_size
self.echelons = echelons
self.attenuation_factors = attenuation_factors
def attenuate(self, value, distance):
return value * self.get_attenuation_factor(distance)
def get_attenuation_factor(self, distance):
if distance < self.buf_size:
if self.attenuation_factors is None:
self.generate_buffer()
return self.attenuation_factors[distance]
else:
return self.create_attenuation_factor(distance)
def generate_buffer(self):
self.attenuation_factors = []
for i in range(self.buf_size):
self.attenuation_factors.append(self.create_attenuation_factor(i))
@abstractmethod
def create_attenuation_factor(self, distance):
pass
@abstractmethod
def get_name(self):
pass
def __str__(self):
return "Buffer Size: " + str(self.buf_size) + " Attenuation Factor: " + self.attenuation_factors
class LinearAttenuation(Attenuation):
def __init__(self, buffer_size, num_of_echelons):
super().__init__(buffer_size, num_of_echelons)
def create_attenuation_factor(self, distance):
if distance == 1:
return 1.0
else:
return float(self.echelons - distance + 1) / float(self.echelons)
def get_name(self):
return "Linear Attenuation"
def __str__(self):
return " Echelons Value: " + str(self.echelons)
class NRootAttenuation(Attenuation):
def __init__(self, buffer_size, num_of_echelons):
super().__init__(buffer_size, num_of_echelons)
def create_attenuation_factor(self, distance):
if distance == 1:
return 1.0
else:
return 1.0 / pow(self.echelons, distance - 1)
def get_name(self):
if self.echelons == 2:
return "Square root"
elif self.echelons == 3:
return "Cubic root"
elif self.echelons == 4:
return "Quadratic root"
else:
return str(self.echelons) + "th root"
def __str__(self):
return " Echelons Value: " + str(self.echelons)
| en | 0.482865 | # add metric widgets # add edge filter widget # add attenuation widget # add Node filter widget # add concurency filter # add node filter # add edge filter # add concurency filter # add attenuation # add metrics to file #def __init__(self, name, metric_type, include=True, invert=False, weight=1.0): # Clustering # Vizualization here since we have all graph data # graph_path = self.visualize(self.fm_nodes, self.fm_edges, self.fm_clusters) # self.fm_message.graph_path = graph_path # Debug block starts # print() # print() # print() # Debug block ends # print(self.binary_edge_frequency_values) # self.binary_edge_frequency_values = [[0.0 for _ in range(self.num_of_nodes)] for _ in range(self.num_of_nodes)] # print(len(trace)) # print(len(event)) # print(len(look_back), look_back, end=' ') # print() # (len(look_back)) # print(self.binary_edge_frequency_values) # print(self.binary_edge_frequency_values[ref_index][follower_index], att_factor, end=' ') # print(self.binary_edge_frequency_values) | 2.092304 | 2 |
nbx/nbmanager/tests/common.py | dalejung/nbx | 2 | 6614909 | <reponame>dalejung/nbx
import github
import pandas as pd
from mock import Mock
from nbx.nbmanager.tagged_gist.notebook_gisthub import (
NotebookGist,
NotebookGistHub
)
from nbx.nbmanager.tagged_gist.gisthub import TaggedGist
hub = None
login = None
password = <PASSWORD>
try:
import GithubCredentials
login = GithubCredentials.login
password = GithubCredentials.password
hub = github.Github(login, password, user_agent="nbx")
except Exception:
pass
def require_github(func):
if hub is None:
return lambda s: None
return func
class TestGistHub(object):
def refresh_gist(self, gist):
return gist.gist
def update_gist(self, gist):
pass
def makeFakeGist():
gist = Mock()
gist.description = "Test Gist #notebook #pandas #woo"
gist.id = 123
# fake files
filenames = ['a.ipynb', 'b.ipynb', 'test.txt']
files = {}
for fn in filenames:
fo = Mock()
fo.filename = fn
fo.content = fn+" content"
files[fn] = fo
gist.files = files
# fake history
history = []
dates = pd.date_range("2000", freq="D", periods=4).to_pydatetime()
for i, date in enumerate(dates):
state = Mock()
state.version = i
state.committed_at = date
raw_data = {}
files = {}
for fn in filenames:
fo = {
'content': "{fn}_{i}_revision_content".format(fn=fn, i=i),
'filename': fn,
}
files[fn] = fo
# after 2, don't include 'a.ipynb'
if i >= 2:
del files['a.ipynb']
raw_data['files'] = files
state.raw_data = raw_data
history.append(state)
gist.history = history
return gist
def make_notebookgist():
gist = makeFakeGist()
tg = TaggedGist.from_gist(gist)
# fake gisthub
gisthub = NotebookGistHub(TestGistHub())
nb = NotebookGist(tg, gisthub)
return nb
| import github
import pandas as pd
from mock import Mock
from nbx.nbmanager.tagged_gist.notebook_gisthub import (
NotebookGist,
NotebookGistHub
)
from nbx.nbmanager.tagged_gist.gisthub import TaggedGist
hub = None
login = None
password = <PASSWORD>
try:
import GithubCredentials
login = GithubCredentials.login
password = GithubCredentials.password
hub = github.Github(login, password, user_agent="nbx")
except Exception:
pass
def require_github(func):
if hub is None:
return lambda s: None
return func
class TestGistHub(object):
def refresh_gist(self, gist):
return gist.gist
def update_gist(self, gist):
pass
def makeFakeGist():
gist = Mock()
gist.description = "Test Gist #notebook #pandas #woo"
gist.id = 123
# fake files
filenames = ['a.ipynb', 'b.ipynb', 'test.txt']
files = {}
for fn in filenames:
fo = Mock()
fo.filename = fn
fo.content = fn+" content"
files[fn] = fo
gist.files = files
# fake history
history = []
dates = pd.date_range("2000", freq="D", periods=4).to_pydatetime()
for i, date in enumerate(dates):
state = Mock()
state.version = i
state.committed_at = date
raw_data = {}
files = {}
for fn in filenames:
fo = {
'content': "{fn}_{i}_revision_content".format(fn=fn, i=i),
'filename': fn,
}
files[fn] = fo
# after 2, don't include 'a.ipynb'
if i >= 2:
del files['a.ipynb']
raw_data['files'] = files
state.raw_data = raw_data
history.append(state)
gist.history = history
return gist
def make_notebookgist():
gist = makeFakeGist()
tg = TaggedGist.from_gist(gist)
# fake gisthub
gisthub = NotebookGistHub(TestGistHub())
nb = NotebookGist(tg, gisthub)
return nb | en | 0.738525 | #notebook #pandas #woo" # fake files # fake history # after 2, don't include 'a.ipynb' # fake gisthub | 2.27195 | 2 |
notebooks/abstract Neural Models/STDP.py | atenagm1375/awesome-computational-neuro-science | 8 | 6614910 | #!/usr/bin/env python
# coding: utf-8
# ## Packages
# In[3]:
import matplotlib.style as mplstyle
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import random
import math
from collections import namedtuple
get_ipython().run_line_magic('matplotlib', 'inline')
μ
# In[4]:
""" ⚡⚡⚡⚡⚡⚡⚡ """
# Remove this if you want to test neural activities. this is for reporducibility purpose
np.random.seed(19680801)
# ### Optimizations for Matplotlib
# In[5]:
mpl.rcParams['path.simplify'] = True
mpl.rcParams['path.simplify_threshold'] = 1.0
mpl.rcParams['agg.path.chunksize'] = 10000
mplstyle.use('fast')
# ### Leaky Integrate & Fire Model
# In[183]:
class LIF:
__slots__ = ['dt__τ', 'θ', 'R', 'u_rest', 'u',
'input', 'spike_trace', 'isi', 'posts', 'input']
def __init__(self, config: namedtuple):
self.θ = config.threshold
self.R = config.resistor
self.u_rest = config.uRest
self.dt = config.dt
self.τ = config.tau
self.isi = int(config.isInhibitory)
self.u = self.u_rest
self.input = {}
self.spike_trace = []
self.posts = []
def integrate(self, I, t):
"""
integrate(currentValue, currentTime)
return one if neuron spikes in currentTime otherwise it will return zero
"""
# compute potential
self.u += (self.u_rest - self.u + self.R * I) * (self.dt / self.τ)
# Add presynaptic input
self.u += self.input.get(t, 0) * self.R * (self.dt / self.τ)
# compute spikes
if self.u >= self.θ:
self.u = self.u_rest
self.spike_trace.append(t)
# update entry input for post synaptic neurons
for synapse in self.posts:
fs_time = t + (1//self.dt*self.dt) + \
self.dt # future spikes time
if fs_time no in synpse.neuron.input:
synapse.neuron.input[fs_time] = 0
synapse.neuron.input[fs_time] += pow(-1, self.isi) * synapse.w
# if neuron has just spiked in time t return 1 otherwise 0
return int(t in self.spike_trace[-1:])
def reset(self, t, alpha):
# reset inputs
if self.input.get(t, 0) == 0:
return
value = self.input.pop(t)
# search for next 6 feasable seconds delayed
for sec in range(1, 6):
fs_time = t + (sec//self.dt*self.dt)
if fs_time in self.input:
self.input[fs_time] += value * alpha
# In[ ]:
# ### Inverstigation
# In[20]:
# LIFConfig=namedtuple('LIFConfig', 'tau resistor threshold uRest dt isInhibitory')
# lif_config = LIFConfig(5, 10, -65, -70, 0.5, False)
# currents = np.linspace(0, 2, 200)
# currents = np.array([0.6 for i in range(200)])
# dts = np.linspace(0,10, 200)
# neuron = LIF(lif_config)
# In[ ]:
# ## STDP unsupervised learning
#
#
# ```python
# src = LIF_Neuron
# dest = LIF_Neuron
#
# current_src <- noisy current for src for duration
# current_dest <- noisy current for dest for duration
#
# #connect src to dest with stdp config
#
# for neuron in [src, dest]:
# # compute neuron potential
# # apply pre synaptic for neuron
# # compute spikes for neuron
# # reset inputs for neuron
#
# if src.spiked and dest.spiked :
# update.stdp_connection
# store.append([deltaT, deltaW]
# ```
#
# In[21]:
dt = 1
duration = 20
delta_w_delta_t = []
s = []
d = []
for i in np.arange(-1 * duration, duration, 0.5):
sd = get_current(duration, dt, i)
s.append(sd[0])
d.append(sd[1])
# In[27]:
s = np.array(s).flatten()
plt.plot(np.linspace(0, 10, 1600), s)
d = np.array(d).flatten()
plt.plot(np.linspace(0, 10, 1600), d)
# ### first Phase
# In[191]:
# params
MAX_I = 5
LIFConfig = namedtuple(
'LIFConfig', 'tau resistor threshold uRest dt isInhibitory')
lif_config = LIFConfig(5, 10, -65, -70, 0.5, False)
SynapseConfig = namedtuple('SynapseConfig', 'neuron w')
dt = 1
duration = 20
delta_gap = 0.5
INIT_WEIGHT = 1
ΔwΔt = []
# In[175]:
def get_current(duration, dt, delta):
src = []
dest = []
times = np.arange(0, duration, dt)
if delta >= 0:
src = np.array([MAX_I] * times.shape[0])
dest = np.zeros(times.shape)
dest[times >= delta] = MAX_I
return zip(src, dest)
dest = np.array([MAX_I] * times.shape[0])
src = np.zeros(times.shape)
src[times >= -1*delta] = MAX_I
return zip(src, dest, times)
# In[193]:
for delta in np.arange(-1*duration, duration, delta_gap):
currents = get_current(duration, dt, delta)
src = LIF(lif_config)
dest = LIF(lif_config)
# set connection
src.post.append(SynapseConfig(dest, INIT_WEIGHT))
for (Is, Id, t) in currents:
src.integrate(Is, t)
src.reset(t, alpha=0.5)
dest.integrate(Is, t)
dest.reset(t, alpha=0.5)
if (len(src.spike_trace) and len(dest.spike_trace)):
for synapse in pre.post:
w = synapse.w
Δw, Δt = stdp(w, src.spike_trace[-1], dest.spike_trace[-1])
synapse._replace(w=w+Δw)
ΔwΔt.aapend([Δw, Δt])
break
delta_w_delta_t = np.array(delta_w_delta_t)
plt.plot(delta_w_delta_t[:, 0], delta_w_delta_t[:, 1], 'o')
plt.show()
# $\Delta_t \geq 0 \implies t_{pre} < t_{post}$
# In[197]:
def stdp(w, t_pre, t_post, w, tau_=5. tau__=5, A__=lambda x: 10, A_=lambda x: -10):
Δt = t_post - t_pre
if (Δt >= 0):
# A Plus or minus minus :)
Δw = A__(w) * np.exp(-abs(Δt) / tau__)
else:
# A minus
Δw = A_(w) * np.exp(-abs(Δt) / tau_)
return Δw, Δt
# SynapseConfig = namedtuple('SynapseConfig', 'neuron w')
# syn = SynapseConfig(50, 1)
# print(syn)
# syn = syn._replace(w=2)
# print(syn)
# In[ ]:
# ## Report
# The temporal distance of the spikes in the pair is of the order of a
# few to tens of milliseconds, whereas the temporal distance
# between the pairs is of the order of hundreds of milliseconds to
# seconds.
# Experiments show that increasing the repetition frequency leads
# to an increase in potentiation.
# The pair-based STDP cannot give a full account of experimental
# results (repetition frequency or triplet and quadruplet
# experiments).
# According to the pair-based STDP, if the repetition frequency r is
# increased, the depression effect becomes greater, which is not
# experimentally true!
# Instead, symmetric triplets of the form pre-post-pre and
# post-pre-post are used.
#
# The LTP is proportional to the value of xj evaluated at tif and the slow trace yi;2:
# ∆w+
# ij (tif ) = A+(wij)xj(tif )yi;2(tif−);
# where tf−
# i indicates the value of yi;2 before it is incremented due to the postsynaptic spike at tif .
# Similar argument works for LTD
#
# STDP learning rule
# The change in weight of a synapse depends on the temporal
# difference j∆tj = jtpost − tprej:
# ∆w+ = A+(w) · exp(−j∆tj=t+) at tpost for tpre < tpost;
# ∆w
# − = A−(w) · exp(−j∆tj=t−) at tpre for tpre < tpost:
# Happens immediately after each
# spike (at times tpre and tpost).
# This rule is fully specified by
# defining:
# (i) the weight-dependence of the
# amplitude parameter A±(w).
# (ii) which spike pairs are taken into
# consideration (all pairs or
# nearest one).
# Two intracellular electrodes were used (one for stimulation, one
# for measurement of the neuronal response).
# Excellent spatial and temporal resolution.
# Synaptic weight change (∆wji) turns out to be a function of tjf −tif .
# The direction of the change depends on the relative timing of preand postsynaptic spikes. 496 Synaptic
# Two intracellular electrodes were used (one for stimulation, one
# for measurement of the neuronal response).
# Excellent spatial and temporal resolution.
# Synaptic weight change (∆wji) turns out to be a function of tjf −tif .
# The direction of the change depends on the relative timing of preand postsynaptic spikes. 496 Synaptic
# ### Neuron Populations
#
#
# $\Delta t$ is a small time interval
#
# ##### Population Activity $A(t)$
# $n_{act}(t, t + \Delta t)$ return number of spikes for population
#
# $A(t) = \lim\limits_{\Delta t \to 0} \frac{n_{act}(t, t + \Delta t)}{\Delta t * N}$ in discrit mathematic we approximate to $\frac{\sum\limits_{j=1}^N \sum\limits_{f} \delta (t - t^f)}{N}$
#
# In[202]:
class Population:
def __init__(self, conf: namedtuple):
self.size = conf.size
# inhibitory neurons
isi_size = int((1 - conf.splitSize) * self.size)
self.neurons = [conf.neuron(conf.isiConfig) for _ in range(isi_size)]
# exicitory neurons
self.neurons.extend(
[conf.neuron(conf.iseConfig) for _ in range(self.size - isi_size)]
)
self.α = conf.traceAlpha
self.activities = []
def activate(self, I, t):
# count of spikes for all neurons with a generator
n_act = sum((neuron.integrate(I, t) for neuron in self.neurons))
self.activities.append((t, n_act / self.size))
def reset_inputs(self):
for neuron in self.neurons:
neuron.input = min(0, (1 - self.α) * neuron.input)
def get_detailed_spikes(self):
return np.array([
[i, tf] for (i, neuron) in enumerate(self.neurons) for tf in neuron.spike_trace
])
# #### Configuration (namedtuple)
#
# lif:
# + tau : $\tau$
# + resistor: $R$
# + threshold: $\theta$
# + uRest: $u_{rest}$
# + dt: small interval of time to make dynamic equeation discrete
# + isInhibitory: falg showing a neuron is inhibitory or not
#
# synapse:
# + neuron: reference to post synaptic neuron (it can be from other populations)
# + w: connection weight
#
# population:
# + size: size of population
# + splitSize: $pop = [(1-splitSize) * size] * neurons^{inhibitory} \cup [splitSize * size] * neurons^{excitatory}$
# + neuron: neuron model [LIF, ELIF, AELIF, ...]
# + isiConfig: configuration for inhibitory neurons
# + iseConfig: configuration for excitatory neurons
# + traceAlpha: $\alpha$ used in inputs reseting phase
# In[206]:
CONFIGS = {
# neurons
"lif": namedtuple('LIFConfig', 'tau resistor threshold uRest dt isInhibitory'),
# connections
"synapse": namedtuple('SynapseConfig', 'neuron w'),
"connection": namedtuple('ConnectionTypeConfig', 'mu sigma coupling_probability'),
# population
"population": namedtuple('PopulationParams', 'size splitSize neuron isiConfig iseConfig traceAlpha'),
}
# In[207]:
Synapse = CONFIGS['synapse']
def full_connectivity(pre_neurons, post_neurons, config):
μ, σ = config.mu, config.sigma
normal = np.random.normal
for pre in pre_neurons:
for post in post_neurons:
pre.posts.append(Synapse(post, normal(μ, σ)))
def fixed_coupling_probability_connectivity(pre_neurons, post_neurons, config):
C_pre_size = int(config.coupling_probability * len(pre_neurons))
C_post_size = int(config.coupling_probability * len(post_neurons))
μ, σ = config.mu, config.sigma
normal = np.random.normal
for pre in np.random.choice(pre_neurons, C_pre_size, replace=False):
pre.posts.extend([
Synapse(post, normal(μ, σ)) for post in
np.random.choice(pre_neurons, C_post_size, replace=False)
])
def fixed_number_of_presynaptics_parents(pre_neurons, post_neurons, config):
C = int(config.coupling_probability * len(pre_neurons))
μ, σ = config.mu, config.sigma
normal = np.random.normal
for post in post_neurons:
for pre in np.random.choice(pre_neurons, C, replace=False):
pre.posts.append(Synapse(post, normal(μ, σ)))
connectivities = {
'full': full_connectivity,
'fixedCP': fixed_coupling_probability_connectivity,
'fixedNPP': fixed_number_of_presynaptics_parents,
}
# In[ ]:
### utitlites + Plotters
def noise(μ=0, σ=0.5):
return np.random.normal(μ, σ)
def choice(*choices):
return np.random.choice(choices)
class CurrentManager:
def __init__(self, duration, dt, currents):
self.time_intervals = np.arange(0, duration, dt)
self.currents = currents(duration, dt) if callable(
currents) else currents
def __enter__(self):
return zip(self.currents, self.time_intervals)
def __exit__(self, type, value, traceback):
del self.currents
del self.time_intervals
# In[208]:
def raster_plot(x, y, c):
plt.scatter(x, y, s=1, c=c)
for (area, color, label) in [(20, 'y', 'inhibitory'), (80, 'm', 'excititory')]:
plt.scatter([], [], c=color, alpha=0.8, s=area, label=label)
plt.legend(scatterpoints=1, frameon=False, labelspacing=1)
plt.title("raster plot")
plt.ylabel("Neurons")
plt.xlabel("times")
plt.show()
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# ## Packages
# In[3]:
import matplotlib.style as mplstyle
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import random
import math
from collections import namedtuple
get_ipython().run_line_magic('matplotlib', 'inline')
μ
# In[4]:
""" ⚡⚡⚡⚡⚡⚡⚡ """
# Remove this if you want to test neural activities. this is for reporducibility purpose
np.random.seed(19680801)
# ### Optimizations for Matplotlib
# In[5]:
mpl.rcParams['path.simplify'] = True
mpl.rcParams['path.simplify_threshold'] = 1.0
mpl.rcParams['agg.path.chunksize'] = 10000
mplstyle.use('fast')
# ### Leaky Integrate & Fire Model
# In[183]:
class LIF:
__slots__ = ['dt__τ', 'θ', 'R', 'u_rest', 'u',
'input', 'spike_trace', 'isi', 'posts', 'input']
def __init__(self, config: namedtuple):
self.θ = config.threshold
self.R = config.resistor
self.u_rest = config.uRest
self.dt = config.dt
self.τ = config.tau
self.isi = int(config.isInhibitory)
self.u = self.u_rest
self.input = {}
self.spike_trace = []
self.posts = []
def integrate(self, I, t):
"""
integrate(currentValue, currentTime)
return one if neuron spikes in currentTime otherwise it will return zero
"""
# compute potential
self.u += (self.u_rest - self.u + self.R * I) * (self.dt / self.τ)
# Add presynaptic input
self.u += self.input.get(t, 0) * self.R * (self.dt / self.τ)
# compute spikes
if self.u >= self.θ:
self.u = self.u_rest
self.spike_trace.append(t)
# update entry input for post synaptic neurons
for synapse in self.posts:
fs_time = t + (1//self.dt*self.dt) + \
self.dt # future spikes time
if fs_time no in synpse.neuron.input:
synapse.neuron.input[fs_time] = 0
synapse.neuron.input[fs_time] += pow(-1, self.isi) * synapse.w
# if neuron has just spiked in time t return 1 otherwise 0
return int(t in self.spike_trace[-1:])
def reset(self, t, alpha):
# reset inputs
if self.input.get(t, 0) == 0:
return
value = self.input.pop(t)
# search for next 6 feasable seconds delayed
for sec in range(1, 6):
fs_time = t + (sec//self.dt*self.dt)
if fs_time in self.input:
self.input[fs_time] += value * alpha
# In[ ]:
# ### Inverstigation
# In[20]:
# LIFConfig=namedtuple('LIFConfig', 'tau resistor threshold uRest dt isInhibitory')
# lif_config = LIFConfig(5, 10, -65, -70, 0.5, False)
# currents = np.linspace(0, 2, 200)
# currents = np.array([0.6 for i in range(200)])
# dts = np.linspace(0,10, 200)
# neuron = LIF(lif_config)
# In[ ]:
# ## STDP unsupervised learning
#
#
# ```python
# src = LIF_Neuron
# dest = LIF_Neuron
#
# current_src <- noisy current for src for duration
# current_dest <- noisy current for dest for duration
#
# #connect src to dest with stdp config
#
# for neuron in [src, dest]:
# # compute neuron potential
# # apply pre synaptic for neuron
# # compute spikes for neuron
# # reset inputs for neuron
#
# if src.spiked and dest.spiked :
# update.stdp_connection
# store.append([deltaT, deltaW]
# ```
#
# In[21]:
dt = 1
duration = 20
delta_w_delta_t = []
s = []
d = []
for i in np.arange(-1 * duration, duration, 0.5):
sd = get_current(duration, dt, i)
s.append(sd[0])
d.append(sd[1])
# In[27]:
s = np.array(s).flatten()
plt.plot(np.linspace(0, 10, 1600), s)
d = np.array(d).flatten()
plt.plot(np.linspace(0, 10, 1600), d)
# ### first Phase
# In[191]:
# params
MAX_I = 5
LIFConfig = namedtuple(
'LIFConfig', 'tau resistor threshold uRest dt isInhibitory')
lif_config = LIFConfig(5, 10, -65, -70, 0.5, False)
SynapseConfig = namedtuple('SynapseConfig', 'neuron w')
dt = 1
duration = 20
delta_gap = 0.5
INIT_WEIGHT = 1
ΔwΔt = []
# In[175]:
def get_current(duration, dt, delta):
src = []
dest = []
times = np.arange(0, duration, dt)
if delta >= 0:
src = np.array([MAX_I] * times.shape[0])
dest = np.zeros(times.shape)
dest[times >= delta] = MAX_I
return zip(src, dest)
dest = np.array([MAX_I] * times.shape[0])
src = np.zeros(times.shape)
src[times >= -1*delta] = MAX_I
return zip(src, dest, times)
# In[193]:
for delta in np.arange(-1*duration, duration, delta_gap):
currents = get_current(duration, dt, delta)
src = LIF(lif_config)
dest = LIF(lif_config)
# set connection
src.post.append(SynapseConfig(dest, INIT_WEIGHT))
for (Is, Id, t) in currents:
src.integrate(Is, t)
src.reset(t, alpha=0.5)
dest.integrate(Is, t)
dest.reset(t, alpha=0.5)
if (len(src.spike_trace) and len(dest.spike_trace)):
for synapse in pre.post:
w = synapse.w
Δw, Δt = stdp(w, src.spike_trace[-1], dest.spike_trace[-1])
synapse._replace(w=w+Δw)
ΔwΔt.aapend([Δw, Δt])
break
delta_w_delta_t = np.array(delta_w_delta_t)
plt.plot(delta_w_delta_t[:, 0], delta_w_delta_t[:, 1], 'o')
plt.show()
# $\Delta_t \geq 0 \implies t_{pre} < t_{post}$
# In[197]:
def stdp(w, t_pre, t_post, w, tau_=5. tau__=5, A__=lambda x: 10, A_=lambda x: -10):
Δt = t_post - t_pre
if (Δt >= 0):
# A Plus or minus minus :)
Δw = A__(w) * np.exp(-abs(Δt) / tau__)
else:
# A minus
Δw = A_(w) * np.exp(-abs(Δt) / tau_)
return Δw, Δt
# SynapseConfig = namedtuple('SynapseConfig', 'neuron w')
# syn = SynapseConfig(50, 1)
# print(syn)
# syn = syn._replace(w=2)
# print(syn)
# In[ ]:
# ## Report
# The temporal distance of the spikes in the pair is of the order of a
# few to tens of milliseconds, whereas the temporal distance
# between the pairs is of the order of hundreds of milliseconds to
# seconds.
# Experiments show that increasing the repetition frequency leads
# to an increase in potentiation.
# The pair-based STDP cannot give a full account of experimental
# results (repetition frequency or triplet and quadruplet
# experiments).
# According to the pair-based STDP, if the repetition frequency r is
# increased, the depression effect becomes greater, which is not
# experimentally true!
# Instead, symmetric triplets of the form pre-post-pre and
# post-pre-post are used.
#
# The LTP is proportional to the value of xj evaluated at tif and the slow trace yi;2:
# ∆w+
# ij (tif ) = A+(wij)xj(tif )yi;2(tif−);
# where tf−
# i indicates the value of yi;2 before it is incremented due to the postsynaptic spike at tif .
# Similar argument works for LTD
#
# STDP learning rule
# The change in weight of a synapse depends on the temporal
# difference j∆tj = jtpost − tprej:
# ∆w+ = A+(w) · exp(−j∆tj=t+) at tpost for tpre < tpost;
# ∆w
# − = A−(w) · exp(−j∆tj=t−) at tpre for tpre < tpost:
# Happens immediately after each
# spike (at times tpre and tpost).
# This rule is fully specified by
# defining:
# (i) the weight-dependence of the
# amplitude parameter A±(w).
# (ii) which spike pairs are taken into
# consideration (all pairs or
# nearest one).
# Two intracellular electrodes were used (one for stimulation, one
# for measurement of the neuronal response).
# Excellent spatial and temporal resolution.
# Synaptic weight change (∆wji) turns out to be a function of tjf −tif .
# The direction of the change depends on the relative timing of preand postsynaptic spikes. 496 Synaptic
# Two intracellular electrodes were used (one for stimulation, one
# for measurement of the neuronal response).
# Excellent spatial and temporal resolution.
# Synaptic weight change (∆wji) turns out to be a function of tjf −tif .
# The direction of the change depends on the relative timing of preand postsynaptic spikes. 496 Synaptic
# ### Neuron Populations
#
#
# $\Delta t$ is a small time interval
#
# ##### Population Activity $A(t)$
# $n_{act}(t, t + \Delta t)$ return number of spikes for population
#
# $A(t) = \lim\limits_{\Delta t \to 0} \frac{n_{act}(t, t + \Delta t)}{\Delta t * N}$ in discrit mathematic we approximate to $\frac{\sum\limits_{j=1}^N \sum\limits_{f} \delta (t - t^f)}{N}$
#
# In[202]:
class Population:
def __init__(self, conf: namedtuple):
self.size = conf.size
# inhibitory neurons
isi_size = int((1 - conf.splitSize) * self.size)
self.neurons = [conf.neuron(conf.isiConfig) for _ in range(isi_size)]
# exicitory neurons
self.neurons.extend(
[conf.neuron(conf.iseConfig) for _ in range(self.size - isi_size)]
)
self.α = conf.traceAlpha
self.activities = []
def activate(self, I, t):
# count of spikes for all neurons with a generator
n_act = sum((neuron.integrate(I, t) for neuron in self.neurons))
self.activities.append((t, n_act / self.size))
def reset_inputs(self):
for neuron in self.neurons:
neuron.input = min(0, (1 - self.α) * neuron.input)
def get_detailed_spikes(self):
return np.array([
[i, tf] for (i, neuron) in enumerate(self.neurons) for tf in neuron.spike_trace
])
# #### Configuration (namedtuple)
#
# lif:
# + tau : $\tau$
# + resistor: $R$
# + threshold: $\theta$
# + uRest: $u_{rest}$
# + dt: small interval of time to make dynamic equeation discrete
# + isInhibitory: falg showing a neuron is inhibitory or not
#
# synapse:
# + neuron: reference to post synaptic neuron (it can be from other populations)
# + w: connection weight
#
# population:
# + size: size of population
# + splitSize: $pop = [(1-splitSize) * size] * neurons^{inhibitory} \cup [splitSize * size] * neurons^{excitatory}$
# + neuron: neuron model [LIF, ELIF, AELIF, ...]
# + isiConfig: configuration for inhibitory neurons
# + iseConfig: configuration for excitatory neurons
# + traceAlpha: $\alpha$ used in inputs reseting phase
# In[206]:
CONFIGS = {
# neurons
"lif": namedtuple('LIFConfig', 'tau resistor threshold uRest dt isInhibitory'),
# connections
"synapse": namedtuple('SynapseConfig', 'neuron w'),
"connection": namedtuple('ConnectionTypeConfig', 'mu sigma coupling_probability'),
# population
"population": namedtuple('PopulationParams', 'size splitSize neuron isiConfig iseConfig traceAlpha'),
}
# In[207]:
Synapse = CONFIGS['synapse']
def full_connectivity(pre_neurons, post_neurons, config):
μ, σ = config.mu, config.sigma
normal = np.random.normal
for pre in pre_neurons:
for post in post_neurons:
pre.posts.append(Synapse(post, normal(μ, σ)))
def fixed_coupling_probability_connectivity(pre_neurons, post_neurons, config):
C_pre_size = int(config.coupling_probability * len(pre_neurons))
C_post_size = int(config.coupling_probability * len(post_neurons))
μ, σ = config.mu, config.sigma
normal = np.random.normal
for pre in np.random.choice(pre_neurons, C_pre_size, replace=False):
pre.posts.extend([
Synapse(post, normal(μ, σ)) for post in
np.random.choice(pre_neurons, C_post_size, replace=False)
])
def fixed_number_of_presynaptics_parents(pre_neurons, post_neurons, config):
C = int(config.coupling_probability * len(pre_neurons))
μ, σ = config.mu, config.sigma
normal = np.random.normal
for post in post_neurons:
for pre in np.random.choice(pre_neurons, C, replace=False):
pre.posts.append(Synapse(post, normal(μ, σ)))
connectivities = {
'full': full_connectivity,
'fixedCP': fixed_coupling_probability_connectivity,
'fixedNPP': fixed_number_of_presynaptics_parents,
}
# In[ ]:
### utitlites + Plotters
def noise(μ=0, σ=0.5):
return np.random.normal(μ, σ)
def choice(*choices):
return np.random.choice(choices)
class CurrentManager:
def __init__(self, duration, dt, currents):
self.time_intervals = np.arange(0, duration, dt)
self.currents = currents(duration, dt) if callable(
currents) else currents
def __enter__(self):
return zip(self.currents, self.time_intervals)
def __exit__(self, type, value, traceback):
del self.currents
del self.time_intervals
# In[208]:
def raster_plot(x, y, c):
plt.scatter(x, y, s=1, c=c)
for (area, color, label) in [(20, 'y', 'inhibitory'), (80, 'm', 'excititory')]:
plt.scatter([], [], c=color, alpha=0.8, s=area, label=label)
plt.legend(scatterpoints=1, frameon=False, labelspacing=1)
plt.title("raster plot")
plt.ylabel("Neurons")
plt.xlabel("times")
plt.show()
# In[ ]:
| en | 0.702881 | #!/usr/bin/env python # coding: utf-8 # ## Packages # In[3]: # In[4]: ⚡⚡⚡⚡⚡⚡⚡ # Remove this if you want to test neural activities. this is for reporducibility purpose # ### Optimizations for Matplotlib # In[5]: # ### Leaky Integrate & Fire Model # In[183]: integrate(currentValue, currentTime) return one if neuron spikes in currentTime otherwise it will return zero # compute potential # Add presynaptic input # compute spikes # update entry input for post synaptic neurons # future spikes time # if neuron has just spiked in time t return 1 otherwise 0 # reset inputs # search for next 6 feasable seconds delayed # In[ ]: # ### Inverstigation # In[20]: # LIFConfig=namedtuple('LIFConfig', 'tau resistor threshold uRest dt isInhibitory') # lif_config = LIFConfig(5, 10, -65, -70, 0.5, False) # currents = np.linspace(0, 2, 200) # currents = np.array([0.6 for i in range(200)]) # dts = np.linspace(0,10, 200) # neuron = LIF(lif_config) # In[ ]: # ## STDP unsupervised learning # # # ```python # src = LIF_Neuron # dest = LIF_Neuron # # current_src <- noisy current for src for duration # current_dest <- noisy current for dest for duration # # #connect src to dest with stdp config # # for neuron in [src, dest]: # # compute neuron potential # # apply pre synaptic for neuron # # compute spikes for neuron # # reset inputs for neuron # # if src.spiked and dest.spiked : # update.stdp_connection # store.append([deltaT, deltaW] # ``` # # In[21]: # In[27]: # ### first Phase # In[191]: # params # In[175]: # In[193]: # set connection # $\Delta_t \geq 0 \implies t_{pre} < t_{post}$ # In[197]: # A Plus or minus minus :) # A minus # SynapseConfig = namedtuple('SynapseConfig', 'neuron w') # syn = SynapseConfig(50, 1) # print(syn) # syn = syn._replace(w=2) # print(syn) # In[ ]: # ## Report # The temporal distance of the spikes in the pair is of the order of a # few to tens of milliseconds, whereas the temporal distance # between the pairs is of the order of hundreds of milliseconds to # seconds. # Experiments show that increasing the repetition frequency leads # to an increase in potentiation. # The pair-based STDP cannot give a full account of experimental # results (repetition frequency or triplet and quadruplet # experiments). # According to the pair-based STDP, if the repetition frequency r is # increased, the depression effect becomes greater, which is not # experimentally true! # Instead, symmetric triplets of the form pre-post-pre and # post-pre-post are used. # # The LTP is proportional to the value of xj evaluated at tif and the slow trace yi;2: # ∆w+ # ij (tif ) = A+(wij)xj(tif )yi;2(tif−); # where tf− # i indicates the value of yi;2 before it is incremented due to the postsynaptic spike at tif . # Similar argument works for LTD # # STDP learning rule # The change in weight of a synapse depends on the temporal # difference j∆tj = jtpost − tprej: # ∆w+ = A+(w) · exp(−j∆tj=t+) at tpost for tpre < tpost; # ∆w # − = A−(w) · exp(−j∆tj=t−) at tpre for tpre < tpost: # Happens immediately after each # spike (at times tpre and tpost). # This rule is fully specified by # defining: # (i) the weight-dependence of the # amplitude parameter A±(w). # (ii) which spike pairs are taken into # consideration (all pairs or # nearest one). # Two intracellular electrodes were used (one for stimulation, one # for measurement of the neuronal response). # Excellent spatial and temporal resolution. # Synaptic weight change (∆wji) turns out to be a function of tjf −tif . # The direction of the change depends on the relative timing of preand postsynaptic spikes. 496 Synaptic # Two intracellular electrodes were used (one for stimulation, one # for measurement of the neuronal response). # Excellent spatial and temporal resolution. # Synaptic weight change (∆wji) turns out to be a function of tjf −tif . # The direction of the change depends on the relative timing of preand postsynaptic spikes. 496 Synaptic # ### Neuron Populations # # # $\Delta t$ is a small time interval # # ##### Population Activity $A(t)$ # $n_{act}(t, t + \Delta t)$ return number of spikes for population # # $A(t) = \lim\limits_{\Delta t \to 0} \frac{n_{act}(t, t + \Delta t)}{\Delta t * N}$ in discrit mathematic we approximate to $\frac{\sum\limits_{j=1}^N \sum\limits_{f} \delta (t - t^f)}{N}$ # # In[202]: # inhibitory neurons # exicitory neurons # count of spikes for all neurons with a generator # #### Configuration (namedtuple) # # lif: # + tau : $\tau$ # + resistor: $R$ # + threshold: $\theta$ # + uRest: $u_{rest}$ # + dt: small interval of time to make dynamic equeation discrete # + isInhibitory: falg showing a neuron is inhibitory or not # # synapse: # + neuron: reference to post synaptic neuron (it can be from other populations) # + w: connection weight # # population: # + size: size of population # + splitSize: $pop = [(1-splitSize) * size] * neurons^{inhibitory} \cup [splitSize * size] * neurons^{excitatory}$ # + neuron: neuron model [LIF, ELIF, AELIF, ...] # + isiConfig: configuration for inhibitory neurons # + iseConfig: configuration for excitatory neurons # + traceAlpha: $\alpha$ used in inputs reseting phase # In[206]: # neurons # connections # population # In[207]: # In[ ]: ### utitlites + Plotters # In[208]: # In[ ]: | 2.235243 | 2 |
continuous_threading/__init__.py | justengel/continuous_threading | 7 | 6614911 | from .__meta__ import version as __version__
__all__ = [
# Base threading library
'deque', 'BaseThread', 'Event', 'Timer', 'Lock', 'RLock', 'Condition', 'Semaphore', 'BoundedSemaphore',
'active_count', 'current_thread', 'enumerate', 'setprofile', 'settrace', 'main_thread', 'Barrier',
'BrokenBarrierError',
# Timer utils
'start_timer', 'stop_timer',
# Threading utils
'make_thread_safe',
# continuous_threading
'is_py27', 'Queue', 'Empty',
'Thread', 'ContinuousThread', 'PausableThread', 'OperationThread', 'PeriodicThread',
# Fix threading._shutdown
'threading_shutdown', 'get_shutdown_timeout', 'set_shutdown_timeout',
'get_allow_shutdown', 'set_allow_shutdown', 'shutdown',
'using_custom_shutdown', 'set_shutdown', 'reset_shutdown',
# Multiprocessing
'ProcessError', 'MpEvent', 'MpQueue', 'MpJoinableQueue', 'MpSimpleQueue',
'is_parent_process_alive', 'mark_task_done',
'Process', 'ContinuousProcess', 'PausableProcess', 'PeriodicProcess', 'OperationProcess',
'BaseCommand', 'ObjectCommand', 'ProcessCommand', 'ExecCommand', 'CommandProcess'
]
from collections import deque
# ===== Required threading classes =====
from threading import Thread as BaseThread, Event, Timer
# threading imports that are not required. Just a shortcut
try:
from threading import Lock, RLock, Condition, Semaphore, BoundedSemaphore, \
active_count, current_thread, enumerate, setprofile, settrace
except ImportError as err:
# Your threading library has problems, but I don't care
class ThreadingLibraryError:
error = err
def __new__(cls, *args, **kwargs):
raise EnvironmentError('Cannot import from threading library! ' + str(cls.error))
Lock = ThreadingLibraryError
RLock = ThreadingLibraryError
Condition = ThreadingLibraryError
Semaphore = ThreadingLibraryError
BoundedSemaphore = ThreadingLibraryError
active_count = ThreadingLibraryError
current_thread = ThreadingLibraryError
enumerate = ThreadingLibraryError
setprofile = ThreadingLibraryError
settrace = ThreadingLibraryError
try:
from threading import main_thread, Barrier, BrokenBarrierError
except ImportError:
# Running Python 2.7?
class ThreadingLibraryError(Exception):
error = err
def __new__(cls, *args, **kwargs):
raise EnvironmentError('Cannot import from threading library! ' + str(cls.error))
main_thread = ThreadingLibraryError
Barrier = ThreadingLibraryError
BrokenBarrierError = ThreadingLibraryError
# ===== Timer utils =====
from .timer_utils import start_timer, stop_timer
# ===== Continuous Threading Objects =====
from .threading_utils import make_thread_safe
from .safe_threading import is_py27, Queue, Empty, \
Thread, ContinuousThread, PausableThread, OperationThread, PeriodicThread
# ===== Fix threading._shutdown =====
from .fix_threading_shutdown import \
shutdown, get_shutdown_timeout, set_shutdown_timeout, get_allow_shutdown, set_allow_shutdown, \
threading_shutdown, custom_shutdown, using_custom_shutdown, set_shutdown, reset_shutdown
# ===== Multiprocessing Objects =====
try:
from .safe_multiprocessing import ProcessError, MpEvent, MpQueue, MpJoinableQueue, MpSimpleQueue, \
is_parent_process_alive, mark_task_done, \
Process, ContinuousProcess, PausableProcess, PeriodicProcess, OperationProcess, \
BaseCommand, ObjectCommand, ProcessCommand, ExecCommand, CommandProcess
except (ImportError, Exception):
ProcessError = None
MpEvent = None
MpQueue = None
MpJoinableQueue = None
MpSimpleQueue = None
is_parent_process_alive = None
mark_task_done = None
Process = None
ContinuousProcess = None
PausableProcess = None
PeriodicProcess = None
OperationProcess = None
BaseCommand = ObjectCommand = ProcessCommand = ExecCommand = None
CommandProcess = None
| from .__meta__ import version as __version__
__all__ = [
# Base threading library
'deque', 'BaseThread', 'Event', 'Timer', 'Lock', 'RLock', 'Condition', 'Semaphore', 'BoundedSemaphore',
'active_count', 'current_thread', 'enumerate', 'setprofile', 'settrace', 'main_thread', 'Barrier',
'BrokenBarrierError',
# Timer utils
'start_timer', 'stop_timer',
# Threading utils
'make_thread_safe',
# continuous_threading
'is_py27', 'Queue', 'Empty',
'Thread', 'ContinuousThread', 'PausableThread', 'OperationThread', 'PeriodicThread',
# Fix threading._shutdown
'threading_shutdown', 'get_shutdown_timeout', 'set_shutdown_timeout',
'get_allow_shutdown', 'set_allow_shutdown', 'shutdown',
'using_custom_shutdown', 'set_shutdown', 'reset_shutdown',
# Multiprocessing
'ProcessError', 'MpEvent', 'MpQueue', 'MpJoinableQueue', 'MpSimpleQueue',
'is_parent_process_alive', 'mark_task_done',
'Process', 'ContinuousProcess', 'PausableProcess', 'PeriodicProcess', 'OperationProcess',
'BaseCommand', 'ObjectCommand', 'ProcessCommand', 'ExecCommand', 'CommandProcess'
]
from collections import deque
# ===== Required threading classes =====
from threading import Thread as BaseThread, Event, Timer
# threading imports that are not required. Just a shortcut
try:
from threading import Lock, RLock, Condition, Semaphore, BoundedSemaphore, \
active_count, current_thread, enumerate, setprofile, settrace
except ImportError as err:
# Your threading library has problems, but I don't care
class ThreadingLibraryError:
error = err
def __new__(cls, *args, **kwargs):
raise EnvironmentError('Cannot import from threading library! ' + str(cls.error))
Lock = ThreadingLibraryError
RLock = ThreadingLibraryError
Condition = ThreadingLibraryError
Semaphore = ThreadingLibraryError
BoundedSemaphore = ThreadingLibraryError
active_count = ThreadingLibraryError
current_thread = ThreadingLibraryError
enumerate = ThreadingLibraryError
setprofile = ThreadingLibraryError
settrace = ThreadingLibraryError
try:
from threading import main_thread, Barrier, BrokenBarrierError
except ImportError:
# Running Python 2.7?
class ThreadingLibraryError(Exception):
error = err
def __new__(cls, *args, **kwargs):
raise EnvironmentError('Cannot import from threading library! ' + str(cls.error))
main_thread = ThreadingLibraryError
Barrier = ThreadingLibraryError
BrokenBarrierError = ThreadingLibraryError
# ===== Timer utils =====
from .timer_utils import start_timer, stop_timer
# ===== Continuous Threading Objects =====
from .threading_utils import make_thread_safe
from .safe_threading import is_py27, Queue, Empty, \
Thread, ContinuousThread, PausableThread, OperationThread, PeriodicThread
# ===== Fix threading._shutdown =====
from .fix_threading_shutdown import \
shutdown, get_shutdown_timeout, set_shutdown_timeout, get_allow_shutdown, set_allow_shutdown, \
threading_shutdown, custom_shutdown, using_custom_shutdown, set_shutdown, reset_shutdown
# ===== Multiprocessing Objects =====
try:
from .safe_multiprocessing import ProcessError, MpEvent, MpQueue, MpJoinableQueue, MpSimpleQueue, \
is_parent_process_alive, mark_task_done, \
Process, ContinuousProcess, PausableProcess, PeriodicProcess, OperationProcess, \
BaseCommand, ObjectCommand, ProcessCommand, ExecCommand, CommandProcess
except (ImportError, Exception):
ProcessError = None
MpEvent = None
MpQueue = None
MpJoinableQueue = None
MpSimpleQueue = None
is_parent_process_alive = None
mark_task_done = None
Process = None
ContinuousProcess = None
PausableProcess = None
PeriodicProcess = None
OperationProcess = None
BaseCommand = ObjectCommand = ProcessCommand = ExecCommand = None
CommandProcess = None
| en | 0.778282 | # Base threading library # Timer utils # Threading utils # continuous_threading # Fix threading._shutdown # Multiprocessing # ===== Required threading classes ===== # threading imports that are not required. Just a shortcut # Your threading library has problems, but I don't care # Running Python 2.7? # ===== Timer utils ===== # ===== Continuous Threading Objects ===== # ===== Fix threading._shutdown ===== # ===== Multiprocessing Objects ===== | 1.720687 | 2 |
2020/aoc_2020_02/aoc_2020_02.py | ericcolton/AdventOfCode | 0 | 6614912 | #!/usr/bin/env python3
"""
Advent of Code 2020 Day 2: Password Philosophy
https://adventofcode.com/2020/day/2
Solution by <NAME>
"""
import re
from collections import namedtuple
Entry = namedtuple('Entry', ['min', 'max', 'policy_char', 'text'])
def does_password_follow_policy_part_1(entry: Entry):
count = 0
for c in entry.text:
if c == entry.policy_char:
count += 1
if count > entry.max:
return False
return count >= entry.min
def does_password_follow_policy_part_2(entry: Entry):
if entry.min > len(entry.text) or entry.max > len(entry.text):
return False
min_char_match = entry.text[entry.min - 1] == entry.policy_char
max_char_match = entry.text[entry.max - 1] == entry.policy_char
return min_char_match ^ max_char_match
def count_passwords_following_policy(data, func):
return len(list(filter(func, data)))
def parse_input_file(file):
data = []
expr = r'(\d+)\-(\d+)\s*(\w)\s*\:\s*(\w+)'
for line in file:
match = re.search(expr, line.rstrip())
if match:
data.append(Entry(int(match.group(1)), int(match.group(2)), match.group(3), match.group(4)))
return data
if __name__ == '__main__':
input_filename = __file__.rstrip('.py') + '_input.txt'
with open(input_filename, 'r') as file:
data = parse_input_file(file)
part_1_matching_count = count_passwords_following_policy(data, does_password_follow_policy_part_1)
print("Solution to Part 1 is {}".format(part_1_matching_count))
part_2_matching_count = count_passwords_following_policy(data, does_password_follow_policy_part_2)
print("Solution to Part 2 is {}".format(part_2_matching_count)) | #!/usr/bin/env python3
"""
Advent of Code 2020 Day 2: Password Philosophy
https://adventofcode.com/2020/day/2
Solution by <NAME>
"""
import re
from collections import namedtuple
Entry = namedtuple('Entry', ['min', 'max', 'policy_char', 'text'])
def does_password_follow_policy_part_1(entry: Entry):
count = 0
for c in entry.text:
if c == entry.policy_char:
count += 1
if count > entry.max:
return False
return count >= entry.min
def does_password_follow_policy_part_2(entry: Entry):
if entry.min > len(entry.text) or entry.max > len(entry.text):
return False
min_char_match = entry.text[entry.min - 1] == entry.policy_char
max_char_match = entry.text[entry.max - 1] == entry.policy_char
return min_char_match ^ max_char_match
def count_passwords_following_policy(data, func):
return len(list(filter(func, data)))
def parse_input_file(file):
data = []
expr = r'(\d+)\-(\d+)\s*(\w)\s*\:\s*(\w+)'
for line in file:
match = re.search(expr, line.rstrip())
if match:
data.append(Entry(int(match.group(1)), int(match.group(2)), match.group(3), match.group(4)))
return data
if __name__ == '__main__':
input_filename = __file__.rstrip('.py') + '_input.txt'
with open(input_filename, 'r') as file:
data = parse_input_file(file)
part_1_matching_count = count_passwords_following_policy(data, does_password_follow_policy_part_1)
print("Solution to Part 1 is {}".format(part_1_matching_count))
part_2_matching_count = count_passwords_following_policy(data, does_password_follow_policy_part_2)
print("Solution to Part 2 is {}".format(part_2_matching_count)) | en | 0.64699 | #!/usr/bin/env python3 Advent of Code 2020 Day 2: Password Philosophy https://adventofcode.com/2020/day/2 Solution by <NAME> | 3.874121 | 4 |
src/DomainMapper/DomainMapper.py | FriedLabJHU/DomainMapper | 1 | 6614913 | import re
import sys
import argparse
from math import log
from datetime import datetime
from Bio.SearchIO import parse
from DomainMapper import LatestDomains, IODomains
# returns the combination of two E-Values (treated as psuedo P-Values) with Tippett's method
def log_mean_E_val(eval_A, eval_B):
if eval_A == eval_B:
return eval_A
else:
try:
val = (eval_A - eval_B)/(log(eval_A) - log(eval_B))
return (eval_A - eval_B)/(log(eval_A) - log(eval_B))
except:
return min(eval_A, eval_B)
# returns the length of the set A∩B, can also return an array that includes all intersecting residue indices
def RangeIntersection(range_A, range_B, array = False):
intersection = set(range_A).intersection(range_B)
length = len(intersection)
if not array:
return length
else:
return length, intersection
# returns the query/map range of an hsp for only ranges with an aligned HMM -- this will create non-contiguous domains if the HMM matches seperate parts on a query range
def hsp_range_finder(hsp, map_range_idx, hmm_range_idx):
query_start, query_end = map_range_idx
hmm_start, hmm_end = hmm_range_idx
alignment = hsp.aln
query_aln = alignment[0]
hmm_aln = alignment[1]
gap_ranges = []
if (query_end - query_start) - (hmm_end - hmm_start) > args.intra_gap:
alignment_gap = re.finditer('\.{'+str(args.intra_gap)+',}', str(hmm_aln.seq))
# Finding the range of the gap in the hmm alignment
# Match the hmm gap range start/end index to the query sequence index
if alignment_gap:
for gap in alignment_gap:
gap_start_idx = gap.start()
gap_end_idx = gap.end()
gap_start_query_idx = gap_start_idx + query_start - len(re.findall('-',str(query_aln.seq[:gap_start_idx])))
gap_end_query_idx = gap_end_idx + query_start - len(re.findall('-',str(query_aln.seq[:gap_start_idx])))
gap_ranges += list(range(gap_start_query_idx, gap_end_query_idx))
# Remove the gap from the range, so that it doesn't hog up those resi's (there may be another domain inside)
# If no gaps are found the whole will be returned
domain_map_range = [x for x in range(query_start, query_end) if x not in gap_ranges]
return domain_map_range
# mark_for_deletion and overlap_logic_array are a global variables
def eliminate_overlapping_domains(idx, eval, overlap_map, domain_map):
if mark_for_deletion[idx]:
return
# save all idx and eval for domains with which the reference domain overlaps with
tmp_idx = [idx]
tmp_eval = [eval]
for i,overlap in enumerate(overlap_map):
if overlap and not mark_for_deletion[i]: # skips over i=idx since that always maps to 0
F_grp_ol, eval_ol = domain_map[i][0:2]
tmp_idx.append(i)
tmp_eval.append(eval_ol)
# check if the reference domain is the best fit for it's sequence coverage, if not check if any of the overlaping domains are recursively
min_eval_idx = tmp_idx[tmp_eval.index(min(tmp_eval))]
if min_eval_idx == idx:
for j in tmp_idx:
if j != idx:
mark_for_deletion[j] = 1
return
else:
eliminate_overlapping_domains(min_eval_idx, min(tmp_eval), overlap_logic_array[min_eval_idx],domain_map)
descriptionText = """"""
argparser = argparse.ArgumentParser(description=descriptionText)
argparser.add_argument('-f', type=str, default='NULL', help='Input path to file from \'hmmscan\'')
argparser.add_argument('-o', type=str, default='NULL', help='Output path for mapped domains')
argparser.add_argument('--ecod_domains', default='NULL', type=str, help='Path to ECOD \'Latest Domains\' text file (default = file is automatically downloaded [165 MB Free Space Required (deleted after parsing)] [2 MB File Saved])')
argparser.add_argument('--intra_gap', '--intra_domain_gap_tolerance', type=int, default=35, help='Optional gap size between HMM sequence and query sequence for non-contiguous alignment within a domain (default = 35)')
argparser.add_argument('--inter_gap', '--inter_domain_gap_tolerance', type=int, default=20, help='Optional gap size between two domains sequences for non-contiguous merging (default = 20)')
argparser.add_argument('--overlap', '--domain_overlap_tolerance', type=int, default=15, help='Optional overlap between HMM domain sequence and fasta aligment in consecutive or split domains (default = 15)')
argparser.add_argument('--eval_cutoff', type=float, default=1e-5, help='Optional upper bound tolerance of the E-value (default = 1e-5)')
argparser.add_argument('--update', help='Update ECOD \'Latest Domains\'', default=False, action="store_true")
args = argparser.parse_args()
if len(sys.argv) < 1:
IODomains.ErrorMsg("No Arguments Passed. View help page with \'DomainMapper.py -h\'.")
if args.f == 'NULL':
IODomains.ErrorMsg("No Input hmmscan file provided. View help page with \'DomainMapper.py -h\'")
if args.o == 'NULL':
IODomains.ErrorMsg("No Output path provided. View help page with \'DomainMapper.py -h\'")
if args.ecod_domains == 'NULL':
if args.update:
LatestDomains.update()
ecod_domain_dict = LatestDomains.load()
else:
ecod_domain_dict = LatestDomains.load(args.ecod_domains)
if args.intra_gap < 0:
IODomains.ErrorMsg("Non-positive option detected for gap size tolerance. Please ensure all numerical arguments are positive numbers. View help page with \'DomainMapper.py -h\'")
if args.overlap < 0:
IODomains.ErrorMsg("Non-positive option detected for overlap tolerance. Please ensure all numerical arguments are positive numbers. View help page with \'DomainMapper.py -h\'")
if args.eval_cutoff < 0:
IODomains.ErrorMsg("Non-positive option detected for E-value cuttoff. Please ensure all numerical arguments are positive numbers. View help page with \'DomainMapper.py -h\'")
# Non-contiguous domain counter
NC_domain_cnt = 0
# Circular Permutant domain counter
CP_domain_cnt = 0
# Interveneing domain counter
IS_domain_cnt = 0
# Total domain counter
Tot_domain_cnt = 0
# Final formatted output
output_lines = list()
# Very inefficient step but worth it
hmmscan = parse(args.f,'hmmer3-text')
num_proteins = sum(1 for _ in hmmscan)
IODomains.printProgressBar(0, num_proteins, prefix = 'Mapping:', suffix = 'Complete', length = 50)
hmmscan = parse(args.f,'hmmer3-text')
for p_idx, protein in enumerate(hmmscan):
accession = protein.id
potential_domain_mappings = list()
for hit in protein.hits:
# Single sequence alignment hit
if len(hit.hsps) == 1:
map_range = hit.hsps[0].query_range
hmm_range = hit.hsps[0].hit_range
evalue = hit.hsps[0].evalue_cond
F_group = hit.hsps[0].hit_id
query_property = list()
# Keep if the E-value is lower than the cutoff
if evalue < args.eval_cutoff:
map_range = hsp_range_finder(hit.hsps[0], map_range, hmm_range)
potential_domain_mappings.append([F_group, evalue, map_range, hmm_range, query_property])
# Multiple sequence alignment hits
# There is potential for these to be non-contiguous, circular permutants, or insertional domains
if len(hit.hsps) > 1:
potential_noncontig_domains = list()
overlapping_potential_noncontig_domains = list()
for hsp in hit.hsps:
map_range = hsp.query_range
hmm_range = hsp.hit_range
evalue = hsp.evalue_cond
F_group = hsp.hit_id
query_property = list()
# Keep if the E-value is lower than the cutoff
if evalue < args.eval_cutoff:
map_range = hsp_range_finder(hit.hsps[0], map_range, hmm_range)
potential_noncontig_domains.append([F_group, evalue, map_range, hmm_range, query_property])
# Check if any potential non-contig. domains must be combined
# By referencing domain_A from [:-1] (all but the last) and domain_B from [a+1:] (from index one more than "A" to the end)
# We are guaranteed to only check unique pairs of domans against each other
# In a non-contiguous domain, what happens is that the various AA-ranges correspond to distinct portions in the query sequence and in the HMM model
if len(potential_noncontig_domains) > 1:
for a,domain_A in enumerate(potential_noncontig_domains[:-1]):
for b,domain_B in enumerate(potential_noncontig_domains[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
# If the ranges of the hmm are indexed first for the downsteam query sequence and then the upstream query sequence, consider it a circular permutant
if RangeIntersection(map_rng_A,map_rng_B) <= args.overlap and RangeIntersection(list(range(hmm_rng_A[0],hmm_rng_A[1])),list(range(hmm_rng_B[0],hmm_rng_B[1]))) <= args.overlap and ((map_rng_A[0] < map_rng_B[0] and hmm_rng_A[0] > hmm_rng_B[0]) or (map_rng_A[0] > map_rng_B[0] and hmm_rng_A[0] < hmm_rng_B[0])) and F_grp_A != 'null' and F_grp_B != 'null' and F_grp_A == F_grp_B:
# Mark as Circ. Permut.
potential_noncontig_domains[b+a+1][4].append('CP')
# If there is a small space between the circ. permut. domains close it off and consider it a single contiguous domain
# otherwise, if the space is larger than the inter domain gap tolerance it can also be marked as non-contig. later on in this program
if map_rng_A[-1] < map_rng_B[0] and (map_rng_B[0] - map_rng_A[-1]) < args.inter_gap:
for x in range(map_rng_A[-1], map_rng_B[0]):
if x not in map_rng_A:
map_rng_A.append(x)
potential_noncontig_domains[a] = ['null',1,[],[],[]]
potential_noncontig_domains[b+a+1][1] = log_mean_E_val(eval_A, eval_B)
potential_noncontig_domains[b+a+1][2] = map_rng_A + map_rng_B
potential_noncontig_domains[b+a+1][3] = hmm_rng_A + hmm_rng_B
if map_rng_A[-1] > map_rng_B[0] and F_grp_A != 'null' and F_grp_B != 'null' and F_grp_A == F_grp_B:
map_rng_A = list(range(map_rng_A[0],map_rng_B[-1]))
potential_noncontig_domains[a] = ['null',1,[],[],[]]
potential_noncontig_domains[b+a+1][1] = log_mean_E_val(eval_A, eval_B)
potential_noncontig_domains[b+a+1][2] = map_rng_A
potential_noncontig_domains[b+a+1][3] = hmm_rng_A + hmm_rng_B
if RangeIntersection(map_rng_A,map_rng_B) <= args.overlap and RangeIntersection(list(range(hmm_rng_A[0],hmm_rng_A[1])),list(range(hmm_rng_B[0],hmm_rng_B[1]))) <= args.overlap and F_grp_A != 'null' and F_grp_B != 'null' and F_grp_A == F_grp_B:
# If two domain mappings are considerably close, consider them one domain
# otherwise, it is a non-contig. domain which will be marked as such later in this program
if map_rng_A[-1] < map_rng_B[0] and (map_rng_B[0] - map_rng_A[-1]) < args.inter_gap:
for x in range(map_rng_A[-1], map_rng_B[0]):
if x not in map_rng_A:
map_rng_A.append(x)
potential_noncontig_domains[a] = ['null',1,[],[],[]]
potential_noncontig_domains[b+a+1][1] = log_mean_E_val(eval_A, eval_B)
potential_noncontig_domains[b+a+1][2] = map_rng_A + map_rng_B
potential_noncontig_domains[b+a+1][3] = hmm_rng_A + hmm_rng_B
elif map_rng_A[-1] < map_rng_B[0] and (map_rng_B[0] - map_rng_A[-1]) >= args.inter_gap:
# prevent double saving
if domain_A not in overlapping_potential_noncontig_domains:
potential_noncontig_domains[a] = ['null',1,[],[],[]]
overlapping_potential_noncontig_domains.append(domain_A)
elif domain_A in overlapping_potential_noncontig_domains:
potential_noncontig_domains[a] = ['null',1,[],[],[]] # if B was added but more domains follow, deletes latest domain in list to overlap pot_nc_domains
if domain_B not in overlapping_potential_noncontig_domains:
overlapping_potential_noncontig_domains.append(domain_B)
elif b+a+1 == len(potential_noncontig_domains):
potential_noncontig_domains[b+a+1] = ['null',1,[],[],[]] # deletes last possible overlapping nc_domain, able to be readded later
# preparing to remove overlapping domains that are non-contiguous
if len(overlapping_potential_noncontig_domains) > 1:
overlap_logic_array = [[0 for i in range(len(overlapping_potential_noncontig_domains))] for j in range(len(overlapping_potential_noncontig_domains))]
for a,domain_A in enumerate(overlapping_potential_noncontig_domains[:-1]):
for b,domain_B in enumerate(overlapping_potential_noncontig_domains[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
if RangeIntersection(map_rng_A,map_rng_B) > args.overlap:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
if len(map_rng_A) < args.intra_gap:
if float(RangeIntersection(map_rng_A,map_rng_B))/float(len(map_rng_A)) > 0.7:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# recursively remove high E-Value overlapping domains
mark_for_deletion = [0 for i in range(len(overlapping_potential_noncontig_domains))]
for a,dom_overlap_map in enumerate(overlap_logic_array):
F_grp_A, eval_A = overlapping_potential_noncontig_domains[a][0:2]
eliminate_overlapping_domains(a,eval_A,dom_overlap_map,overlapping_potential_noncontig_domains)
nonoverlapping_potential_noncontig_domains = []
for a,delete in enumerate(mark_for_deletion):
if not delete:
nonoverlapping_potential_noncontig_domains.append(overlapping_potential_noncontig_domains[a])
if len(nonoverlapping_potential_noncontig_domains) > 1:
for a,domain_A in enumerate(nonoverlapping_potential_noncontig_domains[:-1]):
for b,domain_B in enumerate(nonoverlapping_potential_noncontig_domains[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A, query_prop_A = domain_A
F_grp_B, eval_B, map_rng_B, hmm_rng_B, query_prop_B = domain_B
nc_F_grp = F_grp_A
nc_eval = log_mean_E_val(eval_A, eval_B)
nc_map_rng = map_rng_A + map_rng_B
nc_hmm_rng = hmm_rng_A + hmm_rng_B
nc_query_prop = query_prop_A + query_prop_B
potential_noncontig_domains = [[nc_F_grp, nc_eval, nc_map_rng, nc_hmm_rng, nc_query_prop]]
else:
potential_noncontig_domains = nonoverlapping_potential_noncontig_domains
# Remove any domains with low E-values
for pot_nc_dom in potential_noncontig_domains:
F_group, eval = pot_nc_dom[0:2]
if F_group != 'null' and eval < args.eval_cutoff:
potential_domain_mappings.append(pot_nc_dom)
# Mark all overlapping domains
mapped_domains = []
overlap_logic_array = [[0 for i in range(len(potential_domain_mappings))] for j in range(len(potential_domain_mappings))]
for a,domain_A in enumerate(potential_domain_mappings[:-1]):
for b,domain_B in enumerate(potential_domain_mappings[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
# If there's more than 15 (default) residues of overlap, there might be a conflict
if RangeIntersection(map_rng_A,map_rng_B) > args.overlap:
# Here we check for situtations were a domain might overlap by <=15 residues on each side of other domains, allowing for "feathering" of domains.
# This could be bugged for any domains under twice the overlap length
if RangeIntersection(map_rng_A,map_rng_B) <= 2*args.overlap and len(map_rng_A) >= 2*args.overlap:
mid_rng_idx_B = len(map_rng_B)//2
# More than 15 residue overlaps on either side of a domain and it wil be marked overlapping
if RangeIntersection(map_rng_A,map_rng_B[:mid_rng_idx_B]) >= args.overlap or RangeIntersection(map_rng_A,map_rng_B[mid_rng_idx_B:]) >= args.overlap:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# More than twice the overlap tolerance and it will be marked overlapping
else:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# Smaller domains (< gap size) must be treated differently since their overlap could be 100% but smaller than our tolerances
if len(map_rng_A) < args.intra_gap:
if float(RangeIntersection(map_rng_A,map_rng_B))/float(len(map_rng_A)) > 0.7:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# Recursively check all overlap maps and mark those with poor overlap and high E-vlaues for deletion
mark_for_deletion = [0 for i in range(len(potential_domain_mappings))]
for a,dom_overlap_map in enumerate(overlap_logic_array):
F_grp_A, eval_A = potential_domain_mappings[a][0:2]
eliminate_overlapping_domains(a,eval_A,dom_overlap_map,potential_domain_mappings)
# Deleting
for a,delete in enumerate(mark_for_deletion):
if delete:
potential_domain_mappings[a] = ['null',1,[],[],[]]
# Final domains
for pot_dom_map in potential_domain_mappings:
F_group, eval = pot_dom_map[0:2]
if F_group != 'null' and eval < args.eval_cutoff:
# Count the total number of NC and CP domains after filtering
try:
dom_prop = pot_dom_map[4]
if 'CP' in dom_prop:
CP_domain_cnt += 1
except IndexError:
pass
mapped_domains.append(pot_dom_map)
# Label the intervening domains (domains that lie within non-contiguous domains)
if len(mapped_domains) > 1: # only proteins with multiple domains can contain insertional domains
for a,domain_A in enumerate(mapped_domains):
for b,domain_B in enumerate(mapped_domains):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
# Check if domain B resides within domain A
# First, create the set of all 'missing residues' in A
alignment_gap_A = [x for x in range(map_rng_A[0],map_rng_A[-1]) if x not in map_rng_A]
num_res_B_in_aln_gap_A = RangeIntersection(alignment_gap_A,map_rng_B)
# Then, if all of B's residues (with 15 (default) allowed as exception) lie within a gap of A mark as IS of that domain
if num_res_B_in_aln_gap_A > len(map_rng_B) - args.overlap and len(map_rng_B) - args.overlap > 0:
if "IS" not in mapped_domains[b][4]: # only count insertional domains once even if they are found to be insertional to multiple host domains
mapped_domains[b][4].append('IS')
IS_domain_cnt += 1
#Now just output this to a file
domain_info = dict()
for a,domain in enumerate(mapped_domains):
#reformat the residue range into a nice tidy little string
F_group, eval, map_range, hmm_range, domain_properties = domain[:]
map_range = sorted(map_range)
start = map_range[0]
residue_range_as_string = str(start+1) #the +1 is an artifact caused by BioPython re-indexing 1 to 0.
for i in range(1,len(map_range)):
if map_range[i] == map_range[i-1] + 1:
pass #they're consecutive
else:
residue_range_as_string += ('-{},{}').format(str(map_range[i-1]+1), str(map_range[i]+1)) #Again, +1's is because of BioPython indexing
# need to catch these all before
if "NC" not in mapped_domains[a][4]:
mapped_domains[a][4].append("NC")
NC_domain_cnt += 1
residue_range_as_string += ('-{}').format(str(map_range[-1]+1))
#try to find the domain in the domain dict else output the F group from the hmmscan
if F_group in ecod_domain_dict.keys():
f_id = ecod_domain_dict[F_group][0]
arch = ecod_domain_dict[F_group][1]
x_group = ecod_domain_dict[F_group][2]
t_group = ecod_domain_dict[F_group][3]
domain_info[a] = [accession, '{:.1e}'.format(eval), residue_range_as_string, ' '.join(str(s) for s in domain_properties), arch, x_group, t_group, F_group, f_id]
else:
domain_info[a] = [accession, '{:.1e}'.format(eval), residue_range_as_string, ' '.join(str(s) for s in domain_properties), "N/A", "N/A", "N/A", F_group, "N/A"]
Tot_domain_cnt += 1 # yes I know taking the len of mapped_domains is fast but this shows future readers of this code where we are taking final count
# print domains out in order of the first index that appears for a given annotation
domain_info = dict(sorted(domain_info.items(), key=lambda item: int(item[1][2].split("-")[0])))
for k in domain_info.keys():
accession, eval, res_rng, dom_prop, arch, x_grp, t_grp, f_grp, f_id = domain_info[k]
output_lines.append(('{}\t'*9).format(accession, eval, res_rng, dom_prop, arch, x_grp, t_grp, f_grp, f_id)+'\n')
IODomains.printProgressBar(p_idx + 1, num_proteins, prefix = 'Mapping:', suffix = 'Complete', length = 50)
with open(args.o, 'w') as mapped_domains_file:
mapped_domains_file.write(
IODomains.FileHeader(datetime.now(), args.f, args.o, args.intra_gap, args.inter_gap, args.overlap, args.eval_cutoff, num_proteins, Tot_domain_cnt, NC_domain_cnt, CP_domain_cnt, IS_domain_cnt))
for line in output_lines:
mapped_domains_file.write(line)
| import re
import sys
import argparse
from math import log
from datetime import datetime
from Bio.SearchIO import parse
from DomainMapper import LatestDomains, IODomains
# returns the combination of two E-Values (treated as psuedo P-Values) with Tippett's method
def log_mean_E_val(eval_A, eval_B):
if eval_A == eval_B:
return eval_A
else:
try:
val = (eval_A - eval_B)/(log(eval_A) - log(eval_B))
return (eval_A - eval_B)/(log(eval_A) - log(eval_B))
except:
return min(eval_A, eval_B)
# returns the length of the set A∩B, can also return an array that includes all intersecting residue indices
def RangeIntersection(range_A, range_B, array = False):
intersection = set(range_A).intersection(range_B)
length = len(intersection)
if not array:
return length
else:
return length, intersection
# returns the query/map range of an hsp for only ranges with an aligned HMM -- this will create non-contiguous domains if the HMM matches seperate parts on a query range
def hsp_range_finder(hsp, map_range_idx, hmm_range_idx):
query_start, query_end = map_range_idx
hmm_start, hmm_end = hmm_range_idx
alignment = hsp.aln
query_aln = alignment[0]
hmm_aln = alignment[1]
gap_ranges = []
if (query_end - query_start) - (hmm_end - hmm_start) > args.intra_gap:
alignment_gap = re.finditer('\.{'+str(args.intra_gap)+',}', str(hmm_aln.seq))
# Finding the range of the gap in the hmm alignment
# Match the hmm gap range start/end index to the query sequence index
if alignment_gap:
for gap in alignment_gap:
gap_start_idx = gap.start()
gap_end_idx = gap.end()
gap_start_query_idx = gap_start_idx + query_start - len(re.findall('-',str(query_aln.seq[:gap_start_idx])))
gap_end_query_idx = gap_end_idx + query_start - len(re.findall('-',str(query_aln.seq[:gap_start_idx])))
gap_ranges += list(range(gap_start_query_idx, gap_end_query_idx))
# Remove the gap from the range, so that it doesn't hog up those resi's (there may be another domain inside)
# If no gaps are found the whole will be returned
domain_map_range = [x for x in range(query_start, query_end) if x not in gap_ranges]
return domain_map_range
# mark_for_deletion and overlap_logic_array are a global variables
def eliminate_overlapping_domains(idx, eval, overlap_map, domain_map):
if mark_for_deletion[idx]:
return
# save all idx and eval for domains with which the reference domain overlaps with
tmp_idx = [idx]
tmp_eval = [eval]
for i,overlap in enumerate(overlap_map):
if overlap and not mark_for_deletion[i]: # skips over i=idx since that always maps to 0
F_grp_ol, eval_ol = domain_map[i][0:2]
tmp_idx.append(i)
tmp_eval.append(eval_ol)
# check if the reference domain is the best fit for it's sequence coverage, if not check if any of the overlaping domains are recursively
min_eval_idx = tmp_idx[tmp_eval.index(min(tmp_eval))]
if min_eval_idx == idx:
for j in tmp_idx:
if j != idx:
mark_for_deletion[j] = 1
return
else:
eliminate_overlapping_domains(min_eval_idx, min(tmp_eval), overlap_logic_array[min_eval_idx],domain_map)
descriptionText = """"""
argparser = argparse.ArgumentParser(description=descriptionText)
argparser.add_argument('-f', type=str, default='NULL', help='Input path to file from \'hmmscan\'')
argparser.add_argument('-o', type=str, default='NULL', help='Output path for mapped domains')
argparser.add_argument('--ecod_domains', default='NULL', type=str, help='Path to ECOD \'Latest Domains\' text file (default = file is automatically downloaded [165 MB Free Space Required (deleted after parsing)] [2 MB File Saved])')
argparser.add_argument('--intra_gap', '--intra_domain_gap_tolerance', type=int, default=35, help='Optional gap size between HMM sequence and query sequence for non-contiguous alignment within a domain (default = 35)')
argparser.add_argument('--inter_gap', '--inter_domain_gap_tolerance', type=int, default=20, help='Optional gap size between two domains sequences for non-contiguous merging (default = 20)')
argparser.add_argument('--overlap', '--domain_overlap_tolerance', type=int, default=15, help='Optional overlap between HMM domain sequence and fasta aligment in consecutive or split domains (default = 15)')
argparser.add_argument('--eval_cutoff', type=float, default=1e-5, help='Optional upper bound tolerance of the E-value (default = 1e-5)')
argparser.add_argument('--update', help='Update ECOD \'Latest Domains\'', default=False, action="store_true")
args = argparser.parse_args()
if len(sys.argv) < 1:
IODomains.ErrorMsg("No Arguments Passed. View help page with \'DomainMapper.py -h\'.")
if args.f == 'NULL':
IODomains.ErrorMsg("No Input hmmscan file provided. View help page with \'DomainMapper.py -h\'")
if args.o == 'NULL':
IODomains.ErrorMsg("No Output path provided. View help page with \'DomainMapper.py -h\'")
if args.ecod_domains == 'NULL':
if args.update:
LatestDomains.update()
ecod_domain_dict = LatestDomains.load()
else:
ecod_domain_dict = LatestDomains.load(args.ecod_domains)
if args.intra_gap < 0:
IODomains.ErrorMsg("Non-positive option detected for gap size tolerance. Please ensure all numerical arguments are positive numbers. View help page with \'DomainMapper.py -h\'")
if args.overlap < 0:
IODomains.ErrorMsg("Non-positive option detected for overlap tolerance. Please ensure all numerical arguments are positive numbers. View help page with \'DomainMapper.py -h\'")
if args.eval_cutoff < 0:
IODomains.ErrorMsg("Non-positive option detected for E-value cuttoff. Please ensure all numerical arguments are positive numbers. View help page with \'DomainMapper.py -h\'")
# Non-contiguous domain counter
NC_domain_cnt = 0
# Circular Permutant domain counter
CP_domain_cnt = 0
# Interveneing domain counter
IS_domain_cnt = 0
# Total domain counter
Tot_domain_cnt = 0
# Final formatted output
output_lines = list()
# Very inefficient step but worth it
hmmscan = parse(args.f,'hmmer3-text')
num_proteins = sum(1 for _ in hmmscan)
IODomains.printProgressBar(0, num_proteins, prefix = 'Mapping:', suffix = 'Complete', length = 50)
hmmscan = parse(args.f,'hmmer3-text')
for p_idx, protein in enumerate(hmmscan):
accession = protein.id
potential_domain_mappings = list()
for hit in protein.hits:
# Single sequence alignment hit
if len(hit.hsps) == 1:
map_range = hit.hsps[0].query_range
hmm_range = hit.hsps[0].hit_range
evalue = hit.hsps[0].evalue_cond
F_group = hit.hsps[0].hit_id
query_property = list()
# Keep if the E-value is lower than the cutoff
if evalue < args.eval_cutoff:
map_range = hsp_range_finder(hit.hsps[0], map_range, hmm_range)
potential_domain_mappings.append([F_group, evalue, map_range, hmm_range, query_property])
# Multiple sequence alignment hits
# There is potential for these to be non-contiguous, circular permutants, or insertional domains
if len(hit.hsps) > 1:
potential_noncontig_domains = list()
overlapping_potential_noncontig_domains = list()
for hsp in hit.hsps:
map_range = hsp.query_range
hmm_range = hsp.hit_range
evalue = hsp.evalue_cond
F_group = hsp.hit_id
query_property = list()
# Keep if the E-value is lower than the cutoff
if evalue < args.eval_cutoff:
map_range = hsp_range_finder(hit.hsps[0], map_range, hmm_range)
potential_noncontig_domains.append([F_group, evalue, map_range, hmm_range, query_property])
# Check if any potential non-contig. domains must be combined
# By referencing domain_A from [:-1] (all but the last) and domain_B from [a+1:] (from index one more than "A" to the end)
# We are guaranteed to only check unique pairs of domans against each other
# In a non-contiguous domain, what happens is that the various AA-ranges correspond to distinct portions in the query sequence and in the HMM model
if len(potential_noncontig_domains) > 1:
for a,domain_A in enumerate(potential_noncontig_domains[:-1]):
for b,domain_B in enumerate(potential_noncontig_domains[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
# If the ranges of the hmm are indexed first for the downsteam query sequence and then the upstream query sequence, consider it a circular permutant
if RangeIntersection(map_rng_A,map_rng_B) <= args.overlap and RangeIntersection(list(range(hmm_rng_A[0],hmm_rng_A[1])),list(range(hmm_rng_B[0],hmm_rng_B[1]))) <= args.overlap and ((map_rng_A[0] < map_rng_B[0] and hmm_rng_A[0] > hmm_rng_B[0]) or (map_rng_A[0] > map_rng_B[0] and hmm_rng_A[0] < hmm_rng_B[0])) and F_grp_A != 'null' and F_grp_B != 'null' and F_grp_A == F_grp_B:
# Mark as Circ. Permut.
potential_noncontig_domains[b+a+1][4].append('CP')
# If there is a small space between the circ. permut. domains close it off and consider it a single contiguous domain
# otherwise, if the space is larger than the inter domain gap tolerance it can also be marked as non-contig. later on in this program
if map_rng_A[-1] < map_rng_B[0] and (map_rng_B[0] - map_rng_A[-1]) < args.inter_gap:
for x in range(map_rng_A[-1], map_rng_B[0]):
if x not in map_rng_A:
map_rng_A.append(x)
potential_noncontig_domains[a] = ['null',1,[],[],[]]
potential_noncontig_domains[b+a+1][1] = log_mean_E_val(eval_A, eval_B)
potential_noncontig_domains[b+a+1][2] = map_rng_A + map_rng_B
potential_noncontig_domains[b+a+1][3] = hmm_rng_A + hmm_rng_B
if map_rng_A[-1] > map_rng_B[0] and F_grp_A != 'null' and F_grp_B != 'null' and F_grp_A == F_grp_B:
map_rng_A = list(range(map_rng_A[0],map_rng_B[-1]))
potential_noncontig_domains[a] = ['null',1,[],[],[]]
potential_noncontig_domains[b+a+1][1] = log_mean_E_val(eval_A, eval_B)
potential_noncontig_domains[b+a+1][2] = map_rng_A
potential_noncontig_domains[b+a+1][3] = hmm_rng_A + hmm_rng_B
if RangeIntersection(map_rng_A,map_rng_B) <= args.overlap and RangeIntersection(list(range(hmm_rng_A[0],hmm_rng_A[1])),list(range(hmm_rng_B[0],hmm_rng_B[1]))) <= args.overlap and F_grp_A != 'null' and F_grp_B != 'null' and F_grp_A == F_grp_B:
# If two domain mappings are considerably close, consider them one domain
# otherwise, it is a non-contig. domain which will be marked as such later in this program
if map_rng_A[-1] < map_rng_B[0] and (map_rng_B[0] - map_rng_A[-1]) < args.inter_gap:
for x in range(map_rng_A[-1], map_rng_B[0]):
if x not in map_rng_A:
map_rng_A.append(x)
potential_noncontig_domains[a] = ['null',1,[],[],[]]
potential_noncontig_domains[b+a+1][1] = log_mean_E_val(eval_A, eval_B)
potential_noncontig_domains[b+a+1][2] = map_rng_A + map_rng_B
potential_noncontig_domains[b+a+1][3] = hmm_rng_A + hmm_rng_B
elif map_rng_A[-1] < map_rng_B[0] and (map_rng_B[0] - map_rng_A[-1]) >= args.inter_gap:
# prevent double saving
if domain_A not in overlapping_potential_noncontig_domains:
potential_noncontig_domains[a] = ['null',1,[],[],[]]
overlapping_potential_noncontig_domains.append(domain_A)
elif domain_A in overlapping_potential_noncontig_domains:
potential_noncontig_domains[a] = ['null',1,[],[],[]] # if B was added but more domains follow, deletes latest domain in list to overlap pot_nc_domains
if domain_B not in overlapping_potential_noncontig_domains:
overlapping_potential_noncontig_domains.append(domain_B)
elif b+a+1 == len(potential_noncontig_domains):
potential_noncontig_domains[b+a+1] = ['null',1,[],[],[]] # deletes last possible overlapping nc_domain, able to be readded later
# preparing to remove overlapping domains that are non-contiguous
if len(overlapping_potential_noncontig_domains) > 1:
overlap_logic_array = [[0 for i in range(len(overlapping_potential_noncontig_domains))] for j in range(len(overlapping_potential_noncontig_domains))]
for a,domain_A in enumerate(overlapping_potential_noncontig_domains[:-1]):
for b,domain_B in enumerate(overlapping_potential_noncontig_domains[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
if RangeIntersection(map_rng_A,map_rng_B) > args.overlap:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
if len(map_rng_A) < args.intra_gap:
if float(RangeIntersection(map_rng_A,map_rng_B))/float(len(map_rng_A)) > 0.7:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# recursively remove high E-Value overlapping domains
mark_for_deletion = [0 for i in range(len(overlapping_potential_noncontig_domains))]
for a,dom_overlap_map in enumerate(overlap_logic_array):
F_grp_A, eval_A = overlapping_potential_noncontig_domains[a][0:2]
eliminate_overlapping_domains(a,eval_A,dom_overlap_map,overlapping_potential_noncontig_domains)
nonoverlapping_potential_noncontig_domains = []
for a,delete in enumerate(mark_for_deletion):
if not delete:
nonoverlapping_potential_noncontig_domains.append(overlapping_potential_noncontig_domains[a])
if len(nonoverlapping_potential_noncontig_domains) > 1:
for a,domain_A in enumerate(nonoverlapping_potential_noncontig_domains[:-1]):
for b,domain_B in enumerate(nonoverlapping_potential_noncontig_domains[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A, query_prop_A = domain_A
F_grp_B, eval_B, map_rng_B, hmm_rng_B, query_prop_B = domain_B
nc_F_grp = F_grp_A
nc_eval = log_mean_E_val(eval_A, eval_B)
nc_map_rng = map_rng_A + map_rng_B
nc_hmm_rng = hmm_rng_A + hmm_rng_B
nc_query_prop = query_prop_A + query_prop_B
potential_noncontig_domains = [[nc_F_grp, nc_eval, nc_map_rng, nc_hmm_rng, nc_query_prop]]
else:
potential_noncontig_domains = nonoverlapping_potential_noncontig_domains
# Remove any domains with low E-values
for pot_nc_dom in potential_noncontig_domains:
F_group, eval = pot_nc_dom[0:2]
if F_group != 'null' and eval < args.eval_cutoff:
potential_domain_mappings.append(pot_nc_dom)
# Mark all overlapping domains
mapped_domains = []
overlap_logic_array = [[0 for i in range(len(potential_domain_mappings))] for j in range(len(potential_domain_mappings))]
for a,domain_A in enumerate(potential_domain_mappings[:-1]):
for b,domain_B in enumerate(potential_domain_mappings[a+1:]):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
# If there's more than 15 (default) residues of overlap, there might be a conflict
if RangeIntersection(map_rng_A,map_rng_B) > args.overlap:
# Here we check for situtations were a domain might overlap by <=15 residues on each side of other domains, allowing for "feathering" of domains.
# This could be bugged for any domains under twice the overlap length
if RangeIntersection(map_rng_A,map_rng_B) <= 2*args.overlap and len(map_rng_A) >= 2*args.overlap:
mid_rng_idx_B = len(map_rng_B)//2
# More than 15 residue overlaps on either side of a domain and it wil be marked overlapping
if RangeIntersection(map_rng_A,map_rng_B[:mid_rng_idx_B]) >= args.overlap or RangeIntersection(map_rng_A,map_rng_B[mid_rng_idx_B:]) >= args.overlap:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# More than twice the overlap tolerance and it will be marked overlapping
else:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# Smaller domains (< gap size) must be treated differently since their overlap could be 100% but smaller than our tolerances
if len(map_rng_A) < args.intra_gap:
if float(RangeIntersection(map_rng_A,map_rng_B))/float(len(map_rng_A)) > 0.7:
overlap_logic_array[a][b+a+1] = 1
overlap_logic_array[b+a+1][a] = 1
# Recursively check all overlap maps and mark those with poor overlap and high E-vlaues for deletion
mark_for_deletion = [0 for i in range(len(potential_domain_mappings))]
for a,dom_overlap_map in enumerate(overlap_logic_array):
F_grp_A, eval_A = potential_domain_mappings[a][0:2]
eliminate_overlapping_domains(a,eval_A,dom_overlap_map,potential_domain_mappings)
# Deleting
for a,delete in enumerate(mark_for_deletion):
if delete:
potential_domain_mappings[a] = ['null',1,[],[],[]]
# Final domains
for pot_dom_map in potential_domain_mappings:
F_group, eval = pot_dom_map[0:2]
if F_group != 'null' and eval < args.eval_cutoff:
# Count the total number of NC and CP domains after filtering
try:
dom_prop = pot_dom_map[4]
if 'CP' in dom_prop:
CP_domain_cnt += 1
except IndexError:
pass
mapped_domains.append(pot_dom_map)
# Label the intervening domains (domains that lie within non-contiguous domains)
if len(mapped_domains) > 1: # only proteins with multiple domains can contain insertional domains
for a,domain_A in enumerate(mapped_domains):
for b,domain_B in enumerate(mapped_domains):
F_grp_A, eval_A, map_rng_A, hmm_rng_A = domain_A[0:4]
F_grp_B, eval_B, map_rng_B, hmm_rng_B = domain_B[0:4]
# Check if domain B resides within domain A
# First, create the set of all 'missing residues' in A
alignment_gap_A = [x for x in range(map_rng_A[0],map_rng_A[-1]) if x not in map_rng_A]
num_res_B_in_aln_gap_A = RangeIntersection(alignment_gap_A,map_rng_B)
# Then, if all of B's residues (with 15 (default) allowed as exception) lie within a gap of A mark as IS of that domain
if num_res_B_in_aln_gap_A > len(map_rng_B) - args.overlap and len(map_rng_B) - args.overlap > 0:
if "IS" not in mapped_domains[b][4]: # only count insertional domains once even if they are found to be insertional to multiple host domains
mapped_domains[b][4].append('IS')
IS_domain_cnt += 1
#Now just output this to a file
domain_info = dict()
for a,domain in enumerate(mapped_domains):
#reformat the residue range into a nice tidy little string
F_group, eval, map_range, hmm_range, domain_properties = domain[:]
map_range = sorted(map_range)
start = map_range[0]
residue_range_as_string = str(start+1) #the +1 is an artifact caused by BioPython re-indexing 1 to 0.
for i in range(1,len(map_range)):
if map_range[i] == map_range[i-1] + 1:
pass #they're consecutive
else:
residue_range_as_string += ('-{},{}').format(str(map_range[i-1]+1), str(map_range[i]+1)) #Again, +1's is because of BioPython indexing
# need to catch these all before
if "NC" not in mapped_domains[a][4]:
mapped_domains[a][4].append("NC")
NC_domain_cnt += 1
residue_range_as_string += ('-{}').format(str(map_range[-1]+1))
#try to find the domain in the domain dict else output the F group from the hmmscan
if F_group in ecod_domain_dict.keys():
f_id = ecod_domain_dict[F_group][0]
arch = ecod_domain_dict[F_group][1]
x_group = ecod_domain_dict[F_group][2]
t_group = ecod_domain_dict[F_group][3]
domain_info[a] = [accession, '{:.1e}'.format(eval), residue_range_as_string, ' '.join(str(s) for s in domain_properties), arch, x_group, t_group, F_group, f_id]
else:
domain_info[a] = [accession, '{:.1e}'.format(eval), residue_range_as_string, ' '.join(str(s) for s in domain_properties), "N/A", "N/A", "N/A", F_group, "N/A"]
Tot_domain_cnt += 1 # yes I know taking the len of mapped_domains is fast but this shows future readers of this code where we are taking final count
# print domains out in order of the first index that appears for a given annotation
domain_info = dict(sorted(domain_info.items(), key=lambda item: int(item[1][2].split("-")[0])))
for k in domain_info.keys():
accession, eval, res_rng, dom_prop, arch, x_grp, t_grp, f_grp, f_id = domain_info[k]
output_lines.append(('{}\t'*9).format(accession, eval, res_rng, dom_prop, arch, x_grp, t_grp, f_grp, f_id)+'\n')
IODomains.printProgressBar(p_idx + 1, num_proteins, prefix = 'Mapping:', suffix = 'Complete', length = 50)
with open(args.o, 'w') as mapped_domains_file:
mapped_domains_file.write(
IODomains.FileHeader(datetime.now(), args.f, args.o, args.intra_gap, args.inter_gap, args.overlap, args.eval_cutoff, num_proteins, Tot_domain_cnt, NC_domain_cnt, CP_domain_cnt, IS_domain_cnt))
for line in output_lines:
mapped_domains_file.write(line)
| en | 0.922526 | # returns the combination of two E-Values (treated as psuedo P-Values) with Tippett's method # returns the length of the set A∩B, can also return an array that includes all intersecting residue indices # returns the query/map range of an hsp for only ranges with an aligned HMM -- this will create non-contiguous domains if the HMM matches seperate parts on a query range # Finding the range of the gap in the hmm alignment # Match the hmm gap range start/end index to the query sequence index # Remove the gap from the range, so that it doesn't hog up those resi's (there may be another domain inside) # If no gaps are found the whole will be returned # mark_for_deletion and overlap_logic_array are a global variables # save all idx and eval for domains with which the reference domain overlaps with # skips over i=idx since that always maps to 0 # check if the reference domain is the best fit for it's sequence coverage, if not check if any of the overlaping domains are recursively # Non-contiguous domain counter # Circular Permutant domain counter # Interveneing domain counter # Total domain counter # Final formatted output # Very inefficient step but worth it # Single sequence alignment hit # Keep if the E-value is lower than the cutoff # Multiple sequence alignment hits # There is potential for these to be non-contiguous, circular permutants, or insertional domains # Keep if the E-value is lower than the cutoff # Check if any potential non-contig. domains must be combined # By referencing domain_A from [:-1] (all but the last) and domain_B from [a+1:] (from index one more than "A" to the end) # We are guaranteed to only check unique pairs of domans against each other # In a non-contiguous domain, what happens is that the various AA-ranges correspond to distinct portions in the query sequence and in the HMM model # If the ranges of the hmm are indexed first for the downsteam query sequence and then the upstream query sequence, consider it a circular permutant # Mark as Circ. Permut. # If there is a small space between the circ. permut. domains close it off and consider it a single contiguous domain # otherwise, if the space is larger than the inter domain gap tolerance it can also be marked as non-contig. later on in this program # If two domain mappings are considerably close, consider them one domain # otherwise, it is a non-contig. domain which will be marked as such later in this program # prevent double saving # if B was added but more domains follow, deletes latest domain in list to overlap pot_nc_domains # deletes last possible overlapping nc_domain, able to be readded later # preparing to remove overlapping domains that are non-contiguous # recursively remove high E-Value overlapping domains # Remove any domains with low E-values # Mark all overlapping domains # If there's more than 15 (default) residues of overlap, there might be a conflict # Here we check for situtations were a domain might overlap by <=15 residues on each side of other domains, allowing for "feathering" of domains. # This could be bugged for any domains under twice the overlap length # More than 15 residue overlaps on either side of a domain and it wil be marked overlapping # More than twice the overlap tolerance and it will be marked overlapping # Smaller domains (< gap size) must be treated differently since their overlap could be 100% but smaller than our tolerances # Recursively check all overlap maps and mark those with poor overlap and high E-vlaues for deletion # Deleting # Final domains # Count the total number of NC and CP domains after filtering # Label the intervening domains (domains that lie within non-contiguous domains) # only proteins with multiple domains can contain insertional domains # Check if domain B resides within domain A # First, create the set of all 'missing residues' in A # Then, if all of B's residues (with 15 (default) allowed as exception) lie within a gap of A mark as IS of that domain # only count insertional domains once even if they are found to be insertional to multiple host domains #Now just output this to a file #reformat the residue range into a nice tidy little string #the +1 is an artifact caused by BioPython re-indexing 1 to 0. #they're consecutive #Again, +1's is because of BioPython indexing # need to catch these all before #try to find the domain in the domain dict else output the F group from the hmmscan # yes I know taking the len of mapped_domains is fast but this shows future readers of this code where we are taking final count # print domains out in order of the first index that appears for a given annotation | 2.345737 | 2 |
course2/tree_height.py | ropable/algorithmic_toolbox | 1 | 6614914 | # python3
import sys
from collections import deque
class Node(object):
"""A generic node class with some helper methods.
"""
def __init__(self, name, parent=None, children=None):
self.name = name
self.parent = parent
self.children = []
if children:
for child in children:
self.add_child(child)
def __str__(self):
return str(self.name)
def __repr__(self):
return 'Node {} (parent: {})'.format(self.name, self.parent)
def add_child(self, node):
self.children.append(node)
def is_leaf(self):
if self.children:
return False
return True
def is_root(self):
if self.parent:
return False
return True
def path_to_root(self):
"""Follow parent nodes back to the root node.
"""
path = [self]
if self.is_root():
return path
root = False
current = self.parent
while not root:
path.append(current)
if current.is_root():
root = True
else:
current = current.parent
return path
def allocate_nodes(parents):
"""Construct a tree from the input list of parent nodes.
"""
tree = {i: Node(i) for i, j in enumerate(parents)}
root_node = None
for i, parent in enumerate(parents):
if parent >= 0:
# Set the parent for that node.
tree[i].parent = tree[parent]
# Add the current node as a child of the parent.
tree[parent].add_child(tree[i])
else:
root_node = tree[i]
return tree, root_node
def calculate_tree_height(tree):
"""Iterate through a tree and calculate the height.
TOO SLOW - this solution is O(n^2) complexity.
"""
max_height = 0
for i in tree.values():
if i.is_leaf():
path = i.path_to_root()
if len(path) > max_height:
max_height = len(path)
return max_height
def compute_height(tree, root):
"""Undertake a breadth-first search through the tree, starting at the root.
This solution should be O(n) complexity.
"""
queue = deque([root]) # Begin the queue with the root node.
height = 0
while True:
if len(queue) == 0: # Nothing is left in the queue; return the current height.
return height
queue_n = len(queue)
height += 1 # Increment the height.
# For the CURRENT length of the queue, pop nodes from the front and append
# children on those nodes to the end.
# After queue_n times, our queue will now consist of all nodes of ``height``.
# Then we repeat the process.
while queue_n > 0:
node = queue.popleft()
for v in tree[node.name].children:
queue.append(v)
queue_n -= 1
if __name__ == '__main__':
n = int(sys.stdin.readline())
parents = list(map(int, sys.stdin.readline().split()))
tree, root_node = allocate_nodes(parents)
print(compute_height(tree, root_node))
| # python3
import sys
from collections import deque
class Node(object):
"""A generic node class with some helper methods.
"""
def __init__(self, name, parent=None, children=None):
self.name = name
self.parent = parent
self.children = []
if children:
for child in children:
self.add_child(child)
def __str__(self):
return str(self.name)
def __repr__(self):
return 'Node {} (parent: {})'.format(self.name, self.parent)
def add_child(self, node):
self.children.append(node)
def is_leaf(self):
if self.children:
return False
return True
def is_root(self):
if self.parent:
return False
return True
def path_to_root(self):
"""Follow parent nodes back to the root node.
"""
path = [self]
if self.is_root():
return path
root = False
current = self.parent
while not root:
path.append(current)
if current.is_root():
root = True
else:
current = current.parent
return path
def allocate_nodes(parents):
"""Construct a tree from the input list of parent nodes.
"""
tree = {i: Node(i) for i, j in enumerate(parents)}
root_node = None
for i, parent in enumerate(parents):
if parent >= 0:
# Set the parent for that node.
tree[i].parent = tree[parent]
# Add the current node as a child of the parent.
tree[parent].add_child(tree[i])
else:
root_node = tree[i]
return tree, root_node
def calculate_tree_height(tree):
"""Iterate through a tree and calculate the height.
TOO SLOW - this solution is O(n^2) complexity.
"""
max_height = 0
for i in tree.values():
if i.is_leaf():
path = i.path_to_root()
if len(path) > max_height:
max_height = len(path)
return max_height
def compute_height(tree, root):
"""Undertake a breadth-first search through the tree, starting at the root.
This solution should be O(n) complexity.
"""
queue = deque([root]) # Begin the queue with the root node.
height = 0
while True:
if len(queue) == 0: # Nothing is left in the queue; return the current height.
return height
queue_n = len(queue)
height += 1 # Increment the height.
# For the CURRENT length of the queue, pop nodes from the front and append
# children on those nodes to the end.
# After queue_n times, our queue will now consist of all nodes of ``height``.
# Then we repeat the process.
while queue_n > 0:
node = queue.popleft()
for v in tree[node.name].children:
queue.append(v)
queue_n -= 1
if __name__ == '__main__':
n = int(sys.stdin.readline())
parents = list(map(int, sys.stdin.readline().split()))
tree, root_node = allocate_nodes(parents)
print(compute_height(tree, root_node))
| en | 0.873636 | # python3 A generic node class with some helper methods. Follow parent nodes back to the root node. Construct a tree from the input list of parent nodes. # Set the parent for that node. # Add the current node as a child of the parent. Iterate through a tree and calculate the height. TOO SLOW - this solution is O(n^2) complexity. Undertake a breadth-first search through the tree, starting at the root. This solution should be O(n) complexity. # Begin the queue with the root node. # Nothing is left in the queue; return the current height. # Increment the height. # For the CURRENT length of the queue, pop nodes from the front and append # children on those nodes to the end. # After queue_n times, our queue will now consist of all nodes of ``height``. # Then we repeat the process. | 3.999513 | 4 |
csrv/model/cards/runner/card01028.py | mrroach/CentralServer | 0 | 6614915 | <filename>csrv/model/cards/runner/card01028.py
from csrv.model import actions
from csrv.model import events
from csrv.model.cards import card_info
from csrv.model.cards import program
from csrv.model import timing_phases
class Card01028RunAction(actions.MakeARunAction):
def __init__(self, game, player, card=None):
actions.MakeARunAction.__init__(
self, game, player, card=card, server=game.corp.archives)
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
self.card.setup_card01028()
actions.MakeARunAction.resolve(
self, response, ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
@property
def description(self):
return ('[click]: Make a run on archives, if successful, treat as '
'a successful run on R&D')
class Card01028(program.Program):
NAME = u'Card01028'
SET = card_info.CORE
NUMBER = 28
SIDE = card_info.RUNNER
FACTION = card_info.CRIMINAL
INFLUENCE = 3
UNIQUE = False
COST = 4
MEMORY = 2
IMAGE_SRC = '01028.png'
WHEN_INSTALLED_PROVIDES_CHOICES_FOR = {
timing_phases.RunnerTurnActions: 'card01028_actions',
}
def build_actions(self):
program.Program.build_actions(self)
def card01028_actions(self):
return [Card01028RunAction(self.game, self.player, card=self)]
def setup_card01028(self):
self.game.register_listener(events.ApproachServer_4_4, self)
def on_approach_server_4_4(self, event, sender):
# The old switcheroo
# TODO(mrroach): Make this into a conditional static trigger
if self.game.run.server == self.game.corp.archives:
self.game.run.server = self.game.corp.hq
self.game.deregister_listener(events.ApproachServer_4_4, self)
| <filename>csrv/model/cards/runner/card01028.py
from csrv.model import actions
from csrv.model import events
from csrv.model.cards import card_info
from csrv.model.cards import program
from csrv.model import timing_phases
class Card01028RunAction(actions.MakeARunAction):
def __init__(self, game, player, card=None):
actions.MakeARunAction.__init__(
self, game, player, card=card, server=game.corp.archives)
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
self.card.setup_card01028()
actions.MakeARunAction.resolve(
self, response, ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
@property
def description(self):
return ('[click]: Make a run on archives, if successful, treat as '
'a successful run on R&D')
class Card01028(program.Program):
NAME = u'Card01028'
SET = card_info.CORE
NUMBER = 28
SIDE = card_info.RUNNER
FACTION = card_info.CRIMINAL
INFLUENCE = 3
UNIQUE = False
COST = 4
MEMORY = 2
IMAGE_SRC = '01028.png'
WHEN_INSTALLED_PROVIDES_CHOICES_FOR = {
timing_phases.RunnerTurnActions: 'card01028_actions',
}
def build_actions(self):
program.Program.build_actions(self)
def card01028_actions(self):
return [Card01028RunAction(self.game, self.player, card=self)]
def setup_card01028(self):
self.game.register_listener(events.ApproachServer_4_4, self)
def on_approach_server_4_4(self, event, sender):
# The old switcheroo
# TODO(mrroach): Make this into a conditional static trigger
if self.game.run.server == self.game.corp.archives:
self.game.run.server = self.game.corp.hq
self.game.deregister_listener(events.ApproachServer_4_4, self)
| en | 0.699409 | # The old switcheroo # TODO(mrroach): Make this into a conditional static trigger | 2.425369 | 2 |
Algorithms/StringAlgorithms/knuth_morris_pratt_algorithm.py | hemraj4545/Data-Structures-and-Algorithms-in-Python | 3 | 6614916 | def prefixes(string):
n = len(string)
table = [0] * n
i, j = 0, 1
while j < n:
if string[i] == string[j]:
i += 1
table[j] = i
j += 1
else:
if i > 0:
i = table[i-1]
else:
j += 1
return table
def kmp(text, pattern):
n, m = len(text), len(pattern)
table = prefixes(pattern)
i, j = 0, 0
while i < n:
if text[i] == pattern[j]:
i += 1
j += 1
else:
if j > 0:
j = table[j-1]
else:
i += 1
if j == m:
return True
return False
print(kmp("iojsaditpdadityp", "aditya"))
print(kmp("aiopsaipankopanwarasde", "panwar"))
| def prefixes(string):
n = len(string)
table = [0] * n
i, j = 0, 1
while j < n:
if string[i] == string[j]:
i += 1
table[j] = i
j += 1
else:
if i > 0:
i = table[i-1]
else:
j += 1
return table
def kmp(text, pattern):
n, m = len(text), len(pattern)
table = prefixes(pattern)
i, j = 0, 0
while i < n:
if text[i] == pattern[j]:
i += 1
j += 1
else:
if j > 0:
j = table[j-1]
else:
i += 1
if j == m:
return True
return False
print(kmp("iojsaditpdadityp", "aditya"))
print(kmp("aiopsaipankopanwarasde", "panwar"))
| none | 1 | 3.448805 | 3 | |
python/multprocess_test.py | runningforlife/CodingExamples | 0 | 6614917 | <filename>python/multprocess_test.py
#!/usr/bin/python
"""
mutiple process example
"""
import random
from multiprocessing import Process, Queue
import os
def put_num(que):
num = random.randint(1, 100)
que.put(num)
print(f'put num {num} on pid {os.getpid()}')
def main():
queue = Queue()
childs = []
for i in range(4):
p = Process(target=put_num, args=(queue,))
childs.append(p)
p.start()
for p in childs:
p.join(10 * 1000)
results = [queue.get() for p in childs]
print(results)
if __name__ == '__main__':
main()
| <filename>python/multprocess_test.py
#!/usr/bin/python
"""
mutiple process example
"""
import random
from multiprocessing import Process, Queue
import os
def put_num(que):
num = random.randint(1, 100)
que.put(num)
print(f'put num {num} on pid {os.getpid()}')
def main():
queue = Queue()
childs = []
for i in range(4):
p = Process(target=put_num, args=(queue,))
childs.append(p)
p.start()
for p in childs:
p.join(10 * 1000)
results = [queue.get() for p in childs]
print(results)
if __name__ == '__main__':
main()
| en | 0.347264 | #!/usr/bin/python mutiple process example | 2.992002 | 3 |
src/list-frames-roles.py | Indicator/amr-eager | 54 | 6614918 | #!/usr/bin/env python2.7
#coding=utf-8
'''
@author: <NAME> (<EMAIL>)
@since: 2015-05-06
'''
from __future__ import print_function
import sys, re, fileinput, codecs
from collections import Counter, defaultdict
from amr import AMR, AMRSyntaxError, AMRError, Concept, AMRConstant
c = defaultdict(Counter)
for ln in fileinput.input():
try:
a = AMR(ln)
for h,r,d in a.role_triples(normalize_inverses=True, normalize_mod=False):
if a._v2c[h].is_frame():
c[str(a._v2c[h])][r] += 1
except AMRSyntaxError as ex:
print(ex, file=sys.stderr)
except AMRError as ex:
print(ex, file=sys.stderr)
for f,roles in sorted(c.items()):
print(f,'\t'.join(' '.join([r,str(n)]) for r,n in sorted(roles.items())), sep='\t')
| #!/usr/bin/env python2.7
#coding=utf-8
'''
@author: <NAME> (<EMAIL>)
@since: 2015-05-06
'''
from __future__ import print_function
import sys, re, fileinput, codecs
from collections import Counter, defaultdict
from amr import AMR, AMRSyntaxError, AMRError, Concept, AMRConstant
c = defaultdict(Counter)
for ln in fileinput.input():
try:
a = AMR(ln)
for h,r,d in a.role_triples(normalize_inverses=True, normalize_mod=False):
if a._v2c[h].is_frame():
c[str(a._v2c[h])][r] += 1
except AMRSyntaxError as ex:
print(ex, file=sys.stderr)
except AMRError as ex:
print(ex, file=sys.stderr)
for f,roles in sorted(c.items()):
print(f,'\t'.join(' '.join([r,str(n)]) for r,n in sorted(roles.items())), sep='\t')
| en | 0.355781 | #!/usr/bin/env python2.7 #coding=utf-8 @author: <NAME> (<EMAIL>) @since: 2015-05-06 | 2.194361 | 2 |
py/make_exam1.py | davidwhogg/EinsteinsUniverse | 0 | 6614919 | <reponame>davidwhogg/EinsteinsUniverse<gh_stars>0
import numpy as np
np.random.seed(17)
nproblem = 8 # magic
nstudent = 150 # magic
problems = [
r"""\begin{problem} (From Problem Set 1)
What is the relationship between the energy $E$ and wavelength
$\lambda$ of a photon? Give a formula that involves energy $E$,
Planck's Constant $h$, the speed of light $c$, and wavelength
$\lambda$ (or whatever you need; but if you need anything else,
be very clear with me what it is).
\end{problem}""",
r"""\begin{problem} (From Problem Set 1)
How many cubic millimeters are there in a liter?
\end{problem}
""",
r"""\begin{problem} (From Problem Set 1)
What are the SI units for pressure, in base units?
That is, what
combination of kg (kilograms), m (meters), and s (seconds)
has units of pressure?
\end{problem}
""",
r"""\begin{problem} (From the reading)
Classical mechanics, or Newtonian mechanics, is only valid in certain
circumstances. When do the laws of classical mechanics, like $\vec{F} =
m\,\vec{a}$ for example, become wrong or break down? There are many answers
to this problem; I will take anything correct.
\end{problem}
""",
r"""\begin{problem} (From the reading)
What musical instrument did Einstein most enjoy playing?
\end{problem}
""",
r"""\begin{problem} (From the Math Review Lab)
What is this number? Give your answer in scientific notation.
$$
\frac{(6\times10^{-34})\times(3\times10^8)}{9\times10^{-7}}
$$
You don't need a calculator to solve this problem.
\end{problem}
""",
r"""\begin{problem} (From Lecture)
Which of the following physical quantities are vectors?
\\
\textsl{(a)}~energy,
\textsl{(b)}~mass,
\textsl{(c)}~force,
\textsl{(d)}~momentum,
\textsl{(e)}~acceleration.
\end{problem}
""",
r"""\begin{problem} (From Lecture)
Complete this statement of Newton's third law. Make it as specific as
you can.\\[2ex]
\textbf{For every force on object A from object B, there will be...}
\end{problem}
"""]
assert len(problems) == nproblem
print(r"""
\documentclass[12pt, letterpaper]{article}
\include{eu}
\pagestyle{empty}
\begin{document}
""")
for student in range(nstudent):
print(r"""
\examheader{Term Exam 1}
""")
pindx = np.argsort(np.random.uniform(size=nproblem))
for problem, indx in enumerate(pindx):
print(problems[indx])
print(r"""
\vfill ~
""")
if problem == 3:
print(r"""
\clearpage
""")
print(r"""
\cleardoublepage
""")
print(r"""
\end{document}
""")
| import numpy as np
np.random.seed(17)
nproblem = 8 # magic
nstudent = 150 # magic
problems = [
r"""\begin{problem} (From Problem Set 1)
What is the relationship between the energy $E$ and wavelength
$\lambda$ of a photon? Give a formula that involves energy $E$,
Planck's Constant $h$, the speed of light $c$, and wavelength
$\lambda$ (or whatever you need; but if you need anything else,
be very clear with me what it is).
\end{problem}""",
r"""\begin{problem} (From Problem Set 1)
How many cubic millimeters are there in a liter?
\end{problem}
""",
r"""\begin{problem} (From Problem Set 1)
What are the SI units for pressure, in base units?
That is, what
combination of kg (kilograms), m (meters), and s (seconds)
has units of pressure?
\end{problem}
""",
r"""\begin{problem} (From the reading)
Classical mechanics, or Newtonian mechanics, is only valid in certain
circumstances. When do the laws of classical mechanics, like $\vec{F} =
m\,\vec{a}$ for example, become wrong or break down? There are many answers
to this problem; I will take anything correct.
\end{problem}
""",
r"""\begin{problem} (From the reading)
What musical instrument did Einstein most enjoy playing?
\end{problem}
""",
r"""\begin{problem} (From the Math Review Lab)
What is this number? Give your answer in scientific notation.
$$
\frac{(6\times10^{-34})\times(3\times10^8)}{9\times10^{-7}}
$$
You don't need a calculator to solve this problem.
\end{problem}
""",
r"""\begin{problem} (From Lecture)
Which of the following physical quantities are vectors?
\\
\textsl{(a)}~energy,
\textsl{(b)}~mass,
\textsl{(c)}~force,
\textsl{(d)}~momentum,
\textsl{(e)}~acceleration.
\end{problem}
""",
r"""\begin{problem} (From Lecture)
Complete this statement of Newton's third law. Make it as specific as
you can.\\[2ex]
\textbf{For every force on object A from object B, there will be...}
\end{problem}
"""]
assert len(problems) == nproblem
print(r"""
\documentclass[12pt, letterpaper]{article}
\include{eu}
\pagestyle{empty}
\begin{document}
""")
for student in range(nstudent):
print(r"""
\examheader{Term Exam 1}
""")
pindx = np.argsort(np.random.uniform(size=nproblem))
for problem, indx in enumerate(pindx):
print(problems[indx])
print(r"""
\vfill ~
""")
if problem == 3:
print(r"""
\clearpage
""")
print(r"""
\cleardoublepage
""")
print(r"""
\end{document}
""") | en | 0.803894 | # magic # magic \begin{problem} (From Problem Set 1) What is the relationship between the energy $E$ and wavelength $\lambda$ of a photon? Give a formula that involves energy $E$, Planck's Constant $h$, the speed of light $c$, and wavelength $\lambda$ (or whatever you need; but if you need anything else, be very clear with me what it is). \end{problem} \begin{problem} (From Problem Set 1) How many cubic millimeters are there in a liter? \end{problem} \begin{problem} (From Problem Set 1) What are the SI units for pressure, in base units? That is, what combination of kg (kilograms), m (meters), and s (seconds) has units of pressure? \end{problem} \begin{problem} (From the reading) Classical mechanics, or Newtonian mechanics, is only valid in certain circumstances. When do the laws of classical mechanics, like $\vec{F} = m\,\vec{a}$ for example, become wrong or break down? There are many answers to this problem; I will take anything correct. \end{problem} \begin{problem} (From the reading) What musical instrument did Einstein most enjoy playing? \end{problem} \begin{problem} (From the Math Review Lab) What is this number? Give your answer in scientific notation. $$ \frac{(6\times10^{-34})\times(3\times10^8)}{9\times10^{-7}} $$ You don't need a calculator to solve this problem. \end{problem} \begin{problem} (From Lecture) Which of the following physical quantities are vectors? \\ \textsl{(a)}~energy, \textsl{(b)}~mass, \textsl{(c)}~force, \textsl{(d)}~momentum, \textsl{(e)}~acceleration. \end{problem} \begin{problem} (From Lecture) Complete this statement of Newton's third law. Make it as specific as you can.\\[2ex] \textbf{For every force on object A from object B, there will be...} \end{problem} \documentclass[12pt, letterpaper]{article} \include{eu} \pagestyle{empty} \begin{document} \examheader{Term Exam 1} \vfill ~ \clearpage \cleardoublepage \end{document} | 3.424978 | 3 |
train.py | DavidZhang73/ENGN8501GroupProject | 0 | 6614920 | <reponame>DavidZhang73/ENGN8501GroupProject
"""
Train
Adapted from https://github.com/PeterL1n/BackgroundMattingV2 by <NAME>
"""
import argparse
import os
import kornia
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn import functional as F
from torch.optim import AdamW
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from tqdm import tqdm
from data_path import DATA_PATH
from dataset.augmentation import TrainFrameSpeedSampler, ValidFrameSampler
from dataset.video_matte import VideoMatte240KDataset, VideoMatteTrainAugmentation, VideoMatteValidAugmentation
from model import MattingBase
from model.utils import load_matched_state_dict
# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type=str, default='videomatte8k', choices=DATA_PATH.keys())
parser.add_argument('--model-backbone', type=str, default='resnet50', choices=['resnet50'])
parser.add_argument('--model-name', type=str, default='convLSTM')
parser.add_argument('--model-pretrain-initialization', type=str, default=None)
parser.add_argument('--model-last-checkpoint', type=str, default=r'<path to last checkpoint>')
parser.add_argument('--batch-size', type=int, default=4)
parser.add_argument('--seq-length', type=int, default=8)
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--epoch-start', type=int, default=0)
parser.add_argument('--epoch-end', type=int, default=10)
parser.add_argument('--log-train-loss-interval', type=int, default=1)
parser.add_argument('--log-train-images-interval', type=int, default=20)
parser.add_argument('--log-valid-interval', type=int, default=1000)
parser.add_argument('--checkpoint-interval', type=int, default=1000)
args = parser.parse_args()
# --------------- Loading ---------------
def train():
# Training DataLoader
dataset_train = VideoMatte240KDataset(
video_matte_path=DATA_PATH[args.dataset_name]['train'],
background_image_path=DATA_PATH['backgrounds']['train'],
seq_length=args.seq_length,
seq_sampler=TrainFrameSpeedSampler(10),
transform=VideoMatteTrainAugmentation((224, 224))
)
dataloader_train = DataLoader(
dataset_train,
shuffle=True,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True
)
# Validation DataLoader
dataset_valid = VideoMatte240KDataset(
video_matte_path=DATA_PATH[args.dataset_name]['valid'],
background_image_path=DATA_PATH['backgrounds']['valid'],
seq_length=args.seq_length,
seq_sampler=ValidFrameSampler(),
transform=VideoMatteValidAugmentation((224, 224))
)
dataloader_valid = DataLoader(
dataset_valid,
pin_memory=True,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=True
)
# Model
model = MattingBase(args.model_backbone).cuda()
if args.model_last_checkpoint is not None:
load_matched_state_dict(model, torch.load(args.model_last_checkpoint))
elif args.model_pretrain_initialization is not None:
model.load_pretrained_deeplabv3_state_dict(torch.load(args.model_pretrain_initialization)['model_state'])
optimizer = AdamW([
{'params': model.backbone.parameters(), 'lr': 1e-4},
{'params': model.aspp.parameters(), 'lr': 5e-4},
{'params': model.decoder.parameters(), 'lr': 5e-4}
])
scaler = GradScaler()
# Logging and checkpoints
if not os.path.exists(f'checkpoint/{args.model_name}'):
os.makedirs(f'checkpoint/{args.model_name}')
writer = SummaryWriter(f'log/{args.model_name}')
# Run loop
for epoch in range(args.epoch_start, args.epoch_end):
bar = tqdm(dataloader_train, desc='[Train]')
for i, (fgr, pha, bgr) in enumerate(bar):
step = epoch * len(dataloader_train) + i + 1
true_fgr = fgr.cuda(non_blocking=True)
true_bgr = bgr.cuda(non_blocking=True)
true_pha = pha.cuda(non_blocking=True)
true_src = true_bgr.clone()
# Composite foreground onto source
true_src = true_fgr * true_pha + true_src * (1 - true_pha)
with autocast():
pred_pha, pred_fgr, pred_err = model(true_src)[:3]
loss = compute_loss(
torch.flatten(pred_pha, 0, 1),
torch.flatten(pred_fgr, 0, 1),
torch.flatten(pred_err, 0, 1),
torch.flatten(true_pha, 0, 1),
torch.flatten(true_fgr, 0, 1)
)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
bar.set_description(f'[Train] Epoch: {epoch}, Step: {step}, Loss: {loss:.4f}')
if step == 1 or step % args.log_train_loss_interval == 0:
writer.add_scalar('loss', loss, step)
if step == 1 or step % args.log_train_images_interval == 0:
writer.add_image(
'train_pred_pha',
make_grid(torch.flatten(pred_pha, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_pred_fgr',
make_grid(torch.flatten(pred_fgr, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_pred_err',
make_grid(torch.flatten(pred_err, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_pred_com',
make_grid(torch.flatten(pred_fgr * pred_pha, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_true_src',
make_grid(torch.flatten(true_src, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_true_pha',
make_grid(torch.flatten(true_pha, 0, 1), nrow=args.seq_length),
step
)
del true_pha, true_fgr, true_bgr
del pred_pha, pred_fgr, pred_err
if step % args.log_valid_interval == 0:
valid(model, dataloader_valid, writer, step)
if step % args.checkpoint_interval == 0:
torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}-iter-{step}.pth')
torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}.pth')
# --------------- Utils ---------------
def compute_loss(pred_pha, pred_fgr, pred_err, true_pha, true_fgr):
true_err = torch.abs(pred_pha.detach() - true_pha)
true_msk = true_pha != 0
return F.l1_loss(pred_pha, true_pha) + \
F.l1_loss(kornia.sobel(pred_pha), kornia.sobel(true_pha)) + \
F.l1_loss(pred_fgr * true_msk, true_fgr * true_msk) + \
F.mse_loss(pred_err, true_err)
def valid(model, dataloader, writer, step):
model.eval()
loss_total = 0
loss_count = 0
with torch.no_grad():
bar = tqdm(dataloader, desc='[Valid]')
for true_fgr, true_pha, true_bgr in bar:
batch_size = true_pha.size(0)
true_pha = true_pha.cuda(non_blocking=True)
true_fgr = true_fgr.cuda(non_blocking=True)
true_bgr = true_bgr.cuda(non_blocking=True)
true_src = true_pha * true_fgr + (1 - true_pha) * true_bgr
pred_pha, pred_fgr, pred_err = model(true_src)[:3]
loss = compute_loss(
torch.flatten(pred_pha, 0, 1),
torch.flatten(pred_fgr, 0, 1),
torch.flatten(pred_err, 0, 1),
torch.flatten(true_pha, 0, 1),
torch.flatten(true_fgr, 0, 1)
)
bar.set_description(f'[Valid] Loss: {loss:.4f}')
loss_total += loss.cpu().item() * batch_size
loss_count += batch_size
writer.add_scalar('valid_loss', loss_total / loss_count, step)
model.train()
# --------------- Start ---------------
if __name__ == '__main__':
train()
| """
Train
Adapted from https://github.com/PeterL1n/BackgroundMattingV2 by <NAME>
"""
import argparse
import os
import kornia
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn import functional as F
from torch.optim import AdamW
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from tqdm import tqdm
from data_path import DATA_PATH
from dataset.augmentation import TrainFrameSpeedSampler, ValidFrameSampler
from dataset.video_matte import VideoMatte240KDataset, VideoMatteTrainAugmentation, VideoMatteValidAugmentation
from model import MattingBase
from model.utils import load_matched_state_dict
# --------------- Arguments ---------------
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', type=str, default='videomatte8k', choices=DATA_PATH.keys())
parser.add_argument('--model-backbone', type=str, default='resnet50', choices=['resnet50'])
parser.add_argument('--model-name', type=str, default='convLSTM')
parser.add_argument('--model-pretrain-initialization', type=str, default=None)
parser.add_argument('--model-last-checkpoint', type=str, default=r'<path to last checkpoint>')
parser.add_argument('--batch-size', type=int, default=4)
parser.add_argument('--seq-length', type=int, default=8)
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--epoch-start', type=int, default=0)
parser.add_argument('--epoch-end', type=int, default=10)
parser.add_argument('--log-train-loss-interval', type=int, default=1)
parser.add_argument('--log-train-images-interval', type=int, default=20)
parser.add_argument('--log-valid-interval', type=int, default=1000)
parser.add_argument('--checkpoint-interval', type=int, default=1000)
args = parser.parse_args()
# --------------- Loading ---------------
def train():
# Training DataLoader
dataset_train = VideoMatte240KDataset(
video_matte_path=DATA_PATH[args.dataset_name]['train'],
background_image_path=DATA_PATH['backgrounds']['train'],
seq_length=args.seq_length,
seq_sampler=TrainFrameSpeedSampler(10),
transform=VideoMatteTrainAugmentation((224, 224))
)
dataloader_train = DataLoader(
dataset_train,
shuffle=True,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True
)
# Validation DataLoader
dataset_valid = VideoMatte240KDataset(
video_matte_path=DATA_PATH[args.dataset_name]['valid'],
background_image_path=DATA_PATH['backgrounds']['valid'],
seq_length=args.seq_length,
seq_sampler=ValidFrameSampler(),
transform=VideoMatteValidAugmentation((224, 224))
)
dataloader_valid = DataLoader(
dataset_valid,
pin_memory=True,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=True
)
# Model
model = MattingBase(args.model_backbone).cuda()
if args.model_last_checkpoint is not None:
load_matched_state_dict(model, torch.load(args.model_last_checkpoint))
elif args.model_pretrain_initialization is not None:
model.load_pretrained_deeplabv3_state_dict(torch.load(args.model_pretrain_initialization)['model_state'])
optimizer = AdamW([
{'params': model.backbone.parameters(), 'lr': 1e-4},
{'params': model.aspp.parameters(), 'lr': 5e-4},
{'params': model.decoder.parameters(), 'lr': 5e-4}
])
scaler = GradScaler()
# Logging and checkpoints
if not os.path.exists(f'checkpoint/{args.model_name}'):
os.makedirs(f'checkpoint/{args.model_name}')
writer = SummaryWriter(f'log/{args.model_name}')
# Run loop
for epoch in range(args.epoch_start, args.epoch_end):
bar = tqdm(dataloader_train, desc='[Train]')
for i, (fgr, pha, bgr) in enumerate(bar):
step = epoch * len(dataloader_train) + i + 1
true_fgr = fgr.cuda(non_blocking=True)
true_bgr = bgr.cuda(non_blocking=True)
true_pha = pha.cuda(non_blocking=True)
true_src = true_bgr.clone()
# Composite foreground onto source
true_src = true_fgr * true_pha + true_src * (1 - true_pha)
with autocast():
pred_pha, pred_fgr, pred_err = model(true_src)[:3]
loss = compute_loss(
torch.flatten(pred_pha, 0, 1),
torch.flatten(pred_fgr, 0, 1),
torch.flatten(pred_err, 0, 1),
torch.flatten(true_pha, 0, 1),
torch.flatten(true_fgr, 0, 1)
)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
bar.set_description(f'[Train] Epoch: {epoch}, Step: {step}, Loss: {loss:.4f}')
if step == 1 or step % args.log_train_loss_interval == 0:
writer.add_scalar('loss', loss, step)
if step == 1 or step % args.log_train_images_interval == 0:
writer.add_image(
'train_pred_pha',
make_grid(torch.flatten(pred_pha, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_pred_fgr',
make_grid(torch.flatten(pred_fgr, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_pred_err',
make_grid(torch.flatten(pred_err, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_pred_com',
make_grid(torch.flatten(pred_fgr * pred_pha, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_true_src',
make_grid(torch.flatten(true_src, 0, 1), nrow=args.seq_length),
step
)
writer.add_image(
'train_true_pha',
make_grid(torch.flatten(true_pha, 0, 1), nrow=args.seq_length),
step
)
del true_pha, true_fgr, true_bgr
del pred_pha, pred_fgr, pred_err
if step % args.log_valid_interval == 0:
valid(model, dataloader_valid, writer, step)
if step % args.checkpoint_interval == 0:
torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}-iter-{step}.pth')
torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}.pth')
# --------------- Utils ---------------
def compute_loss(pred_pha, pred_fgr, pred_err, true_pha, true_fgr):
true_err = torch.abs(pred_pha.detach() - true_pha)
true_msk = true_pha != 0
return F.l1_loss(pred_pha, true_pha) + \
F.l1_loss(kornia.sobel(pred_pha), kornia.sobel(true_pha)) + \
F.l1_loss(pred_fgr * true_msk, true_fgr * true_msk) + \
F.mse_loss(pred_err, true_err)
def valid(model, dataloader, writer, step):
model.eval()
loss_total = 0
loss_count = 0
with torch.no_grad():
bar = tqdm(dataloader, desc='[Valid]')
for true_fgr, true_pha, true_bgr in bar:
batch_size = true_pha.size(0)
true_pha = true_pha.cuda(non_blocking=True)
true_fgr = true_fgr.cuda(non_blocking=True)
true_bgr = true_bgr.cuda(non_blocking=True)
true_src = true_pha * true_fgr + (1 - true_pha) * true_bgr
pred_pha, pred_fgr, pred_err = model(true_src)[:3]
loss = compute_loss(
torch.flatten(pred_pha, 0, 1),
torch.flatten(pred_fgr, 0, 1),
torch.flatten(pred_err, 0, 1),
torch.flatten(true_pha, 0, 1),
torch.flatten(true_fgr, 0, 1)
)
bar.set_description(f'[Valid] Loss: {loss:.4f}')
loss_total += loss.cpu().item() * batch_size
loss_count += batch_size
writer.add_scalar('valid_loss', loss_total / loss_count, step)
model.train()
# --------------- Start ---------------
if __name__ == '__main__':
train() | en | 0.482009 | Train Adapted from https://github.com/PeterL1n/BackgroundMattingV2 by <NAME> # --------------- Arguments --------------- # --------------- Loading --------------- # Training DataLoader # Validation DataLoader # Model # Logging and checkpoints # Run loop # Composite foreground onto source # --------------- Utils --------------- # --------------- Start --------------- | 1.974246 | 2 |
addons/onedrivebusiness/utils.py | tsukaeru/RDM-osf.io | 0 | 6614921 | <gh_stars>0
import logging
from django.core.cache import cache
from framework.exceptions import HTTPError
from osf.models import RdmAddonOption
from osf.models.region_external_account import RegionExternalAccount
from addons.osfstorage.models import Region
from addons.onedrivebusiness import SHORT_NAME
from addons.onedrivebusiness.client import UserListClient
from addons.onedrivebusiness import settings
logger = logging.getLogger(__name__)
def parse_root_folder_id(root_folder_id):
if '\t' not in root_folder_id:
return None, root_folder_id
return tuple(root_folder_id.split('\t', maxsplit=1))
def get_region_external_account(node):
user = node.creator
if user is None:
return None
institution = user.affiliated_institutions.first()
if institution is None:
return None
addon_option = RdmAddonOption.objects.filter(
provider=SHORT_NAME,
institution_id=institution.id,
is_allowed=True
).first()
if addon_option is None:
return None
try:
region = Region.objects.get(_id=institution._id)
return RegionExternalAccount.objects.get(region=region)
except Region.DoesNotExist:
return None
def get_column_id(sheet, text):
for row in sheet.iter_rows():
for cell in list(row):
if str(cell.value).strip() == text:
return (cell.column_letter, cell.row)
raise KeyError('Column "{}" is not found in userlist'.format(text))
def get_sheet_values(sheet, column_ids):
start_row = max([row for _, row in column_ids]) + 1
values = []
for row in sheet.iter_rows(min_row=start_row):
v = []
logger.debug('Row: {}'.format(row))
for col, _ in column_ids:
target = None
for cell in list(row):
if str(cell.value).startswith('#'):
continue
if cell.column_letter == col:
target = cell.value
v.append(target)
if any([e is None for e in v]):
continue
values.append(v)
return values
def get_user_item(region_client, folder_id, values):
eppn, msaccount = values
user_info = cache.get('{}:{}'.format(folder_id, msaccount))
if user_info is not None:
return (eppn, user_info)
try:
user = region_client.get_user(msaccount)
logger.debug('User: {}'.format(user))
user_info = {'userPrincipalName': msaccount, 'id': user['id'], 'mail': user['mail']}
cache.set('{}:{}'.format(folder_id, msaccount), user_info, settings.TEAM_MEMBER_USER_CACHE_TIMEOUT)
return (eppn, user_info)
except HTTPError:
logger.warning('Cannot get user details for {}'.format(msaccount))
return (eppn, {'userPrincipalName': msaccount, 'id': None})
def get_user_map(region_client, folder_id, filename=None, sheet_name=None):
user_map = cache.get(folder_id)
if user_map is not None:
return user_map
client = UserListClient(region_client, folder_id,
filename=filename, sheet_name=sheet_name)
sheet = client.get_workbook_sheet()
column_texts = ['ePPN', 'MicrosoftAccount']
column_ids = [get_column_id(sheet, text) for text in column_texts]
logger.debug('column_ids: {}'.format(column_ids))
user_map = dict([get_user_item(region_client, folder_id, v)
for v in get_sheet_values(sheet, column_ids)])
cache.set(folder_id, user_map, settings.TEAM_MEMBER_LIST_CACHE_TIMEOUT)
return user_map
| import logging
from django.core.cache import cache
from framework.exceptions import HTTPError
from osf.models import RdmAddonOption
from osf.models.region_external_account import RegionExternalAccount
from addons.osfstorage.models import Region
from addons.onedrivebusiness import SHORT_NAME
from addons.onedrivebusiness.client import UserListClient
from addons.onedrivebusiness import settings
logger = logging.getLogger(__name__)
def parse_root_folder_id(root_folder_id):
if '\t' not in root_folder_id:
return None, root_folder_id
return tuple(root_folder_id.split('\t', maxsplit=1))
def get_region_external_account(node):
user = node.creator
if user is None:
return None
institution = user.affiliated_institutions.first()
if institution is None:
return None
addon_option = RdmAddonOption.objects.filter(
provider=SHORT_NAME,
institution_id=institution.id,
is_allowed=True
).first()
if addon_option is None:
return None
try:
region = Region.objects.get(_id=institution._id)
return RegionExternalAccount.objects.get(region=region)
except Region.DoesNotExist:
return None
def get_column_id(sheet, text):
for row in sheet.iter_rows():
for cell in list(row):
if str(cell.value).strip() == text:
return (cell.column_letter, cell.row)
raise KeyError('Column "{}" is not found in userlist'.format(text))
def get_sheet_values(sheet, column_ids):
start_row = max([row for _, row in column_ids]) + 1
values = []
for row in sheet.iter_rows(min_row=start_row):
v = []
logger.debug('Row: {}'.format(row))
for col, _ in column_ids:
target = None
for cell in list(row):
if str(cell.value).startswith('#'):
continue
if cell.column_letter == col:
target = cell.value
v.append(target)
if any([e is None for e in v]):
continue
values.append(v)
return values
def get_user_item(region_client, folder_id, values):
eppn, msaccount = values
user_info = cache.get('{}:{}'.format(folder_id, msaccount))
if user_info is not None:
return (eppn, user_info)
try:
user = region_client.get_user(msaccount)
logger.debug('User: {}'.format(user))
user_info = {'userPrincipalName': msaccount, 'id': user['id'], 'mail': user['mail']}
cache.set('{}:{}'.format(folder_id, msaccount), user_info, settings.TEAM_MEMBER_USER_CACHE_TIMEOUT)
return (eppn, user_info)
except HTTPError:
logger.warning('Cannot get user details for {}'.format(msaccount))
return (eppn, {'userPrincipalName': msaccount, 'id': None})
def get_user_map(region_client, folder_id, filename=None, sheet_name=None):
user_map = cache.get(folder_id)
if user_map is not None:
return user_map
client = UserListClient(region_client, folder_id,
filename=filename, sheet_name=sheet_name)
sheet = client.get_workbook_sheet()
column_texts = ['ePPN', 'MicrosoftAccount']
column_ids = [get_column_id(sheet, text) for text in column_texts]
logger.debug('column_ids: {}'.format(column_ids))
user_map = dict([get_user_item(region_client, folder_id, v)
for v in get_sheet_values(sheet, column_ids)])
cache.set(folder_id, user_map, settings.TEAM_MEMBER_LIST_CACHE_TIMEOUT)
return user_map | none | 1 | 1.912445 | 2 | |
56798152-extract-ROI-with-mask/extract_ROI_with_mask.py | nathancy/stackoverflow | 3 | 6614922 | <gh_stars>1-10
import cv2
import numpy as np
# Load in image and create copy
image = cv2.imread('1.png')
original = image.copy()
# Gaussian blur and extract blue channel
blur = cv2.GaussianBlur(image, (3,3), 0)
blue = blur[:,:,0]
# Threshold image and erode to isolate gate contour
thresh = cv2.threshold(blue,135, 255, cv2.THRESH_BINARY_INV)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
erode = cv2.erode(thresh, kernel, iterations=4)
# Create a mask and find contours
mask = np.zeros(original.shape, dtype=np.uint8)
cnts = cv2.findContours(erode, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
# Filter for gate contour using area and draw onto mask
for c in cnts:
area = cv2.contourArea(c)
if area > 6000:
cv2.drawContours(mask, [c], -1, (255,255,255), 2)
# Dilate to restore contour and mask it with original image
dilate = cv2.dilate(mask, kernel, iterations=7)
result = cv2.bitwise_and(original, dilate)
cv2.imshow('thresh', thresh)
cv2.imshow('erode', erode)
cv2.imshow('mask', mask)
cv2.imshow('dilate', dilate)
cv2.imshow('result', result)
cv2.waitKey()
| import cv2
import numpy as np
# Load in image and create copy
image = cv2.imread('1.png')
original = image.copy()
# Gaussian blur and extract blue channel
blur = cv2.GaussianBlur(image, (3,3), 0)
blue = blur[:,:,0]
# Threshold image and erode to isolate gate contour
thresh = cv2.threshold(blue,135, 255, cv2.THRESH_BINARY_INV)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
erode = cv2.erode(thresh, kernel, iterations=4)
# Create a mask and find contours
mask = np.zeros(original.shape, dtype=np.uint8)
cnts = cv2.findContours(erode, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
# Filter for gate contour using area and draw onto mask
for c in cnts:
area = cv2.contourArea(c)
if area > 6000:
cv2.drawContours(mask, [c], -1, (255,255,255), 2)
# Dilate to restore contour and mask it with original image
dilate = cv2.dilate(mask, kernel, iterations=7)
result = cv2.bitwise_and(original, dilate)
cv2.imshow('thresh', thresh)
cv2.imshow('erode', erode)
cv2.imshow('mask', mask)
cv2.imshow('dilate', dilate)
cv2.imshow('result', result)
cv2.waitKey() | en | 0.819325 | # Load in image and create copy # Gaussian blur and extract blue channel # Threshold image and erode to isolate gate contour # Create a mask and find contours # Filter for gate contour using area and draw onto mask # Dilate to restore contour and mask it with original image | 3.127661 | 3 |
iterparse/__init__.py | invenia/iterparse | 0 | 6614923 | <filename>iterparse/__init__.py
from .parser import iterparse
__all__ = ['__version__', 'iterparse']
__version__ = '0.0.1' # Needs to be just a string for setup.py
| <filename>iterparse/__init__.py
from .parser import iterparse
__all__ = ['__version__', 'iterparse']
__version__ = '0.0.1' # Needs to be just a string for setup.py
| en | 0.755512 | # Needs to be just a string for setup.py | 1.444874 | 1 |
hackerrank/algorithms/Strings/make_it_anagram.py | codervikash/online-courses | 0 | 6614924 | # Alice recently started learning about cryptography and found that anagrams are very useful. Two strings are anagrams of each other if they have same character set and same length. For example strings "bacdc" and "dcbac" are anagrams, while strings "bacdc" and "dcbad" are not.
#
# Alice decides on an encryption scheme involving 2 large strings where encryption is dependent on the minimum number of character deletions required to make the two strings anagrams. She need your help in finding out this number.
#
# Given two strings (they can be of same or different length) help her in finding out the minimum number of character deletions required to make two strings anagrams. Any characters can be deleted from any of the strings.
#
# Input Format
# Two lines each containing a string.
#
# Constraints
# 1 <= Length of A,B <= 10000
# A and B will only consist of lowercase latin letter.
#
# Output Format
# A single integer which is the number of character deletions.
x = raw_input()
y = raw_input()
s1 = [0] * 26
s2 = [0] * 26
count = 0
for i in x:
s1[ord(i) - 97] += 1
for i in y:
s2[ord(i) - 97] += 1
for i in range(26):
count += abs(s1[i] - s2[i])
print (count)
| # Alice recently started learning about cryptography and found that anagrams are very useful. Two strings are anagrams of each other if they have same character set and same length. For example strings "bacdc" and "dcbac" are anagrams, while strings "bacdc" and "dcbad" are not.
#
# Alice decides on an encryption scheme involving 2 large strings where encryption is dependent on the minimum number of character deletions required to make the two strings anagrams. She need your help in finding out this number.
#
# Given two strings (they can be of same or different length) help her in finding out the minimum number of character deletions required to make two strings anagrams. Any characters can be deleted from any of the strings.
#
# Input Format
# Two lines each containing a string.
#
# Constraints
# 1 <= Length of A,B <= 10000
# A and B will only consist of lowercase latin letter.
#
# Output Format
# A single integer which is the number of character deletions.
x = raw_input()
y = raw_input()
s1 = [0] * 26
s2 = [0] * 26
count = 0
for i in x:
s1[ord(i) - 97] += 1
for i in y:
s2[ord(i) - 97] += 1
for i in range(26):
count += abs(s1[i] - s2[i])
print (count)
| en | 0.937291 | # Alice recently started learning about cryptography and found that anagrams are very useful. Two strings are anagrams of each other if they have same character set and same length. For example strings "bacdc" and "dcbac" are anagrams, while strings "bacdc" and "dcbad" are not. # # Alice decides on an encryption scheme involving 2 large strings where encryption is dependent on the minimum number of character deletions required to make the two strings anagrams. She need your help in finding out this number. # # Given two strings (they can be of same or different length) help her in finding out the minimum number of character deletions required to make two strings anagrams. Any characters can be deleted from any of the strings. # # Input Format # Two lines each containing a string. # # Constraints # 1 <= Length of A,B <= 10000 # A and B will only consist of lowercase latin letter. # # Output Format # A single integer which is the number of character deletions. | 3.909197 | 4 |
programme/views/admin_schedule_view.py | darkismus/kompassi | 13 | 6614925 | import logging
from django.contrib import messages
from django.views.decorators.http import require_http_methods, require_POST
from django.shortcuts import redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from core.utils import initialize_form
from ..helpers import programme_admin_required
from ..forms import (
AddRoomForm,
DeleteViewForm,
MoveViewForm,
MoveViewRoomForm,
RemoveViewRoomForm,
ViewForm,
)
from ..models import View, ViewRoom
schedule_actions = {
'add-view': ViewForm,
'move-view': MoveViewForm,
'remove-room': RemoveViewRoomForm,
'move-room': MoveViewRoomForm,
}
@programme_admin_required
@require_http_methods(['GET', 'HEAD', 'POST'])
def admin_schedule_view(request, vars, event):
from .public_views import actual_schedule_view
if request.method == 'POST':
action = request.POST.get('action')
if action in schedule_actions:
FormClass = schedule_actions[action]
form = initialize_form(FormClass, request, event=event)
if form.is_valid():
form.save()
messages.success(request, _('The schedule change was successful.'))
return redirect('programme:admin_schedule_view', event_slug=event.slug)
else:
messages.error(request, _('Please check the form.'))
else:
messages.error(request, _('Unknown action'))
vars.update(
add_view_form=ViewForm(event=event),
)
return actual_schedule_view(
request,
event,
internal_programmes=True,
template='programme_admin_schedule_view.pug',
vars=vars,
show_programme_actions=True,
)
view_actions = {
'add-room': AddRoomForm,
'update-view': ViewForm,
'delete-view': DeleteViewForm,
}
@programme_admin_required
@require_POST
def admin_schedule_update_view_view(request, vars, event, view_id):
view = get_object_or_404(View, id=int(view_id), event=event)
action = request.POST.get('action')
if action in view_actions:
FormClass = view_actions[action]
form = initialize_form(FormClass, request, instance=view)
if form.is_valid():
form.save()
messages.success(request, _('The schedule change was successful.'))
return redirect('programme:admin_schedule_view', event_slug=event.slug)
else:
messages.error(request, _('Please check the form.'))
else:
messages.error(request, _('Unknown action'))
return redirect('programme:admin_schedule_view', event_slug=event.slug)
| import logging
from django.contrib import messages
from django.views.decorators.http import require_http_methods, require_POST
from django.shortcuts import redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from core.utils import initialize_form
from ..helpers import programme_admin_required
from ..forms import (
AddRoomForm,
DeleteViewForm,
MoveViewForm,
MoveViewRoomForm,
RemoveViewRoomForm,
ViewForm,
)
from ..models import View, ViewRoom
schedule_actions = {
'add-view': ViewForm,
'move-view': MoveViewForm,
'remove-room': RemoveViewRoomForm,
'move-room': MoveViewRoomForm,
}
@programme_admin_required
@require_http_methods(['GET', 'HEAD', 'POST'])
def admin_schedule_view(request, vars, event):
from .public_views import actual_schedule_view
if request.method == 'POST':
action = request.POST.get('action')
if action in schedule_actions:
FormClass = schedule_actions[action]
form = initialize_form(FormClass, request, event=event)
if form.is_valid():
form.save()
messages.success(request, _('The schedule change was successful.'))
return redirect('programme:admin_schedule_view', event_slug=event.slug)
else:
messages.error(request, _('Please check the form.'))
else:
messages.error(request, _('Unknown action'))
vars.update(
add_view_form=ViewForm(event=event),
)
return actual_schedule_view(
request,
event,
internal_programmes=True,
template='programme_admin_schedule_view.pug',
vars=vars,
show_programme_actions=True,
)
view_actions = {
'add-room': AddRoomForm,
'update-view': ViewForm,
'delete-view': DeleteViewForm,
}
@programme_admin_required
@require_POST
def admin_schedule_update_view_view(request, vars, event, view_id):
view = get_object_or_404(View, id=int(view_id), event=event)
action = request.POST.get('action')
if action in view_actions:
FormClass = view_actions[action]
form = initialize_form(FormClass, request, instance=view)
if form.is_valid():
form.save()
messages.success(request, _('The schedule change was successful.'))
return redirect('programme:admin_schedule_view', event_slug=event.slug)
else:
messages.error(request, _('Please check the form.'))
else:
messages.error(request, _('Unknown action'))
return redirect('programme:admin_schedule_view', event_slug=event.slug)
| none | 1 | 1.93805 | 2 | |
relman/source.py | delapsley/relman | 0 | 6614926 | # Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data sources"""
import abc
import csv
from jira import client
class Source:
__metaclass__ = abc.ABCMeta
def __iter__(self):
return self
@abc.abstractmethod
def next(self):
pass
class JIRASource(Source):
def __init__(self, server, user, password, jql, max_results=100):
jira = client.JIRA(server=server, basic_auth=(user, password))
self._issues = jira.search_issues(jql, maxResults=max_results)
def __iter__(self):
return self._issues.__iter__()
def next(self):
return self._issues.next()
class CSVSource(Source):
def __init__(self, input_file):
self._issues = []
with open(input_file, 'rb') as csvfile:
test_reader = csv.reader(csvfile)
header = None
for row in test_reader:
header = [x for x in row]
break
for row in test_reader:
self._issues.append(dict(zip(header, row)))
def __iter__(self):
return self._issues.__iter__()
def next(self):
return self._issues.next()
def run(parsed_args):
"""Execute release task creation
parsed_args: command line arguemnts"""
assert parsed_args.server
assert parsed_args.user
assert parsed_args.password
assert parsed_args.query
assert parsed_args.max_results
if not parsed_args.dry_run:
jira = client.JIRA(server=parsed_args.server,
basic_auth=(parsed_args.user, parsed_args.password))
issues = jira.search_issues(parsed_args.query,
maxResults=parsed_args.max_results)
print('key,len(verification_steps),len(verification_results),review')
for i in issues:
verification_results = i.fields.customfield_14210 or []
verification_steps = i.fields.customfield_13913 or []
review = False
try:
comments = jira.comments(i)
except:
comments = []
for c in comments:
if 'review.metacloud.in' in c.body:
review = True
print('%s,%s,%s,%s' % (i.key,
len(verification_steps),
len(verification_results),
review))
| # Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data sources"""
import abc
import csv
from jira import client
class Source:
__metaclass__ = abc.ABCMeta
def __iter__(self):
return self
@abc.abstractmethod
def next(self):
pass
class JIRASource(Source):
def __init__(self, server, user, password, jql, max_results=100):
jira = client.JIRA(server=server, basic_auth=(user, password))
self._issues = jira.search_issues(jql, maxResults=max_results)
def __iter__(self):
return self._issues.__iter__()
def next(self):
return self._issues.next()
class CSVSource(Source):
def __init__(self, input_file):
self._issues = []
with open(input_file, 'rb') as csvfile:
test_reader = csv.reader(csvfile)
header = None
for row in test_reader:
header = [x for x in row]
break
for row in test_reader:
self._issues.append(dict(zip(header, row)))
def __iter__(self):
return self._issues.__iter__()
def next(self):
return self._issues.next()
def run(parsed_args):
"""Execute release task creation
parsed_args: command line arguemnts"""
assert parsed_args.server
assert parsed_args.user
assert parsed_args.password
assert parsed_args.query
assert parsed_args.max_results
if not parsed_args.dry_run:
jira = client.JIRA(server=parsed_args.server,
basic_auth=(parsed_args.user, parsed_args.password))
issues = jira.search_issues(parsed_args.query,
maxResults=parsed_args.max_results)
print('key,len(verification_steps),len(verification_results),review')
for i in issues:
verification_results = i.fields.customfield_14210 or []
verification_steps = i.fields.customfield_13913 or []
review = False
try:
comments = jira.comments(i)
except:
comments = []
for c in comments:
if 'review.metacloud.in' in c.body:
review = True
print('%s,%s,%s,%s' % (i.key,
len(verification_steps),
len(verification_results),
review))
| en | 0.830414 | # Copyright 2016 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Data sources Execute release task creation parsed_args: command line arguemnts | 2.738113 | 3 |
hypha/apply/categories/categories_seed.py | maxpearl/hypha | 20 | 6614927 | # The global categories are fairly static so using a static seed.
CATEGORIES = [
{
"category": "Addressed problems",
"name": "Restrictive Internet filtering by technical methods (IP blocking, DNS filtering, TCP RST, DPI, etc.)",
"tid": "318"
},
{
"category": "Addressed problems",
"name": "Blocking, filtering, or modification of political, social, and/or religious content (including apps)",
"tid": "319"
},
{
"category": "Addressed problems",
"name": "Technical attacks against government critics, journalists, and/or human rights organizations (Cyberattacks)",
"tid": "351"
},
{
"category": "Addressed problems",
"name": "Localized or nationwide communications shut down or throttling (Blackouts)",
"tid": "320"
},
{
"category": "Addressed problems",
"name": "Physical intimidation, arrest, violence (including device seizure or destruction), and death for political or social reasons",
"tid": "354"
},
{
"category": "Addressed problems",
"name": "Pro-government manipulation of online discussions (propaganda, imitation content, and/or sockpuppets)",
"tid": "353"
},
{
"category": "Addressed problems",
"name": "Repressive surveillance or monitoring of communication",
"tid": "321"
},
{
"category": "Addressed problems",
"name": "Policies, laws, or directives that increase surveillance, censorship, and punishment",
"tid": "352"
},
{
"category": "Addressed problems",
"name": "Government practices that hold intermediaries (social networks or ISPs) liable for user content",
"tid": "355"
},
{
"category": "Addressed problems",
"name": "Prohibitive cost to access the Internet",
"tid": "357"
},
{
"category": "Addressed problems",
"name": "Other",
"tid": "327"
},
{
"category": "Focus",
"name": "Access to the Internet",
"tid": "2"
},
{
"category": "Focus",
"name": "Awareness of privacy and security threats",
"tid": "363"
},
{
"category": "Focus",
"name": "Privacy enhancement",
"tid": "3"
},
{
"category": "Focus",
"name": "Security from danger or threat online",
"tid": "1"
},
{
"category": "Objective(s)",
"name": "Deploying technology",
"tid": "362"
},
{
"category": "Objective(s)",
"name": "Software or hardware development",
"tid": "360"
},
{
"category": "Objective(s)",
"name": "Testing",
"tid": "333"
},
{
"category": "Objective(s)",
"name": "Research",
"tid": "361"
},
{
"category": "Objective(s)",
"name": "Technology development",
"tid": "330"
},
{
"category": "Objective(s)",
"name": "Training",
"tid": "331"
},
{
"category": "Beneficiaries",
"name": "General public",
"tid": "335"
},
{
"category": "Beneficiaries",
"name": "Women",
"tid": "342"
},
{
"category": "Beneficiaries",
"name": "Youth",
"tid": "343"
},
{
"category": "Beneficiaries",
"name": "Sexual minorities",
"tid": "345"
},
{
"category": "Beneficiaries",
"name": "Ethnic minorities",
"tid": "344"
},
{
"category": "Beneficiaries",
"name": "Activists",
"tid": "336"
},
{
"category": "Beneficiaries",
"name": "Journalists",
"tid": "337"
},
{
"category": "Beneficiaries",
"name": "Advocacy groups/NGOs",
"tid": "338"
},
{
"category": "Beneficiaries",
"name": "Academia",
"tid": "339"
},
{
"category": "Beneficiaries",
"name": "Technologists",
"tid": "340"
},
{
"category": "Beneficiaries",
"name": "Entrepreneurs",
"tid": "359"
},
{
"category": "Beneficiaries",
"name": "Government",
"tid": "341"
},
{
"category": "Beneficiaries",
"name": "Other",
"tid": "350"
},
{
"category": "Region(s)",
"name": "Global",
"tid": "271"
},
{
"category": "Region(s)",
"name": "North Africa and Middle east",
"tid": "358"
},
{
"category": "Region(s)",
"name": "East Africa",
"tid": "369"
},
{
"category": "Region(s)",
"name": "West Africa",
"tid": "370"
},
{
"category": "Region(s)",
"name": "South Africa",
"tid": "272"
},
{
"category": "Region(s)",
"name": "North Asia and Russia",
"tid": "371"
},
{
"category": "Region(s)",
"name": "Central Asia",
"tid": "274"
},
{
"category": "Region(s)",
"name": "East Asia",
"tid": "372"
},
{
"category": "Region(s)",
"name": "South Asia",
"tid": "373"
},
{
"category": "Region(s)",
"name": "South-East Asia",
"tid": "365"
},
{
"category": "Region(s)",
"name": "Eastern Europe",
"tid": "291"
},
{
"category": "Region(s)",
"name": "Central America",
"tid": "273"
},
{
"category": "Region(s)",
"name": "Caribbean",
"tid": "374"
},
{
"category": "Region(s)",
"name": "Andean",
"tid": "375"
},
{
"category": "Region(s)",
"name": "Southern cone",
"tid": "376"
},
{
"category": "Project status",
"name": "Just an Idea (Pre-alpha)",
"tid": "329"
},
{
"category": "Project status",
"name": "It Exists! (Alpha/Beta)",
"tid": "328"
},
{
"category": "Project status",
"name": "It's basically done. (Release)",
"tid": "366"
},
{
"category": "Project status",
"name": "People Use It. (Production)",
"tid": "367"
},
{
"tid": "6",
"name": "Afghanistan",
"category": "Countries"
},
{
"tid": "7",
"name": "Aland Islands",
"category": "Countries"
},
{
"tid": "8",
"name": "Albania",
"category": "Countries"
},
{
"tid": "9",
"name": "Algeria",
"category": "Countries"
},
{
"tid": "10",
"name": "American Samoa",
"category": "Countries"
},
{
"tid": "11",
"name": "Andorra",
"category": "Countries"
},
{
"tid": "12",
"name": "Angola",
"category": "Countries"
},
{
"tid": "13",
"name": "Anguilla",
"category": "Countries"
},
{
"tid": "14",
"name": "Antarctica",
"category": "Countries"
},
{
"tid": "15",
"name": "Antigua and Barbuda",
"category": "Countries"
},
{
"tid": "16",
"name": "Argentina",
"category": "Countries"
},
{
"tid": "17",
"name": "Armenia",
"category": "Countries"
},
{
"tid": "18",
"name": "Aruba",
"category": "Countries"
},
{
"tid": "19",
"name": "Australia",
"category": "Countries"
},
{
"tid": "20",
"name": "Austria",
"category": "Countries"
},
{
"tid": "21",
"name": "Azerbaijan",
"category": "Countries"
},
{
"tid": "22",
"name": "Bahamas",
"category": "Countries"
},
{
"tid": "23",
"name": "Bahrain",
"category": "Countries"
},
{
"tid": "24",
"name": "Bangladesh",
"category": "Countries"
},
{
"tid": "25",
"name": "Barbados",
"category": "Countries"
},
{
"tid": "26",
"name": "Belarus",
"category": "Countries"
},
{
"tid": "27",
"name": "Belgium",
"category": "Countries"
},
{
"tid": "28",
"name": "Belize",
"category": "Countries"
},
{
"tid": "29",
"name": "Benin",
"category": "Countries"
},
{
"tid": "30",
"name": "Bermuda",
"category": "Countries"
},
{
"tid": "31",
"name": "Bhutan",
"category": "Countries"
},
{
"tid": "32",
"name": "Bolivia",
"category": "Countries"
},
{
"tid": "33",
"name": "Bosnia and Herzegovina",
"category": "Countries"
},
{
"tid": "34",
"name": "Botswana",
"category": "Countries"
},
{
"tid": "35",
"name": "Bouvet Island",
"category": "Countries"
},
{
"tid": "36",
"name": "Brazil",
"category": "Countries"
},
{
"tid": "37",
"name": "British Indian Ocean Territory",
"category": "Countries"
},
{
"tid": "38",
"name": "British Virgin Islands",
"category": "Countries"
},
{
"tid": "39",
"name": "Brunei",
"category": "Countries"
},
{
"tid": "40",
"name": "Bulgaria",
"category": "Countries"
},
{
"tid": "41",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "42",
"name": "Burundi",
"category": "Countries"
},
{
"tid": "43",
"name": "Cambodia",
"category": "Countries"
},
{
"tid": "44",
"name": "Cameroon",
"category": "Countries"
},
{
"tid": "45",
"name": "Canada",
"category": "Countries"
},
{
"tid": "46",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "47",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "48",
"name": "Central African Republic",
"category": "Countries"
},
{
"tid": "49",
"name": "Chad",
"category": "Countries"
},
{
"tid": "50",
"name": "Chile",
"category": "Countries"
},
{
"tid": "51",
"name": "China",
"category": "Countries"
},
{
"tid": "52",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "53",
"name": "Cocos (Keel<NAME>",
"category": "Countries"
},
{
"tid": "54",
"name": "Colombia",
"category": "Countries"
},
{
"tid": "55",
"name": "Comoros",
"category": "Countries"
},
{
"tid": "56",
"name": "Congo (Brazzaville)",
"category": "Countries"
},
{
"tid": "57",
"name": "Congo (Kinshasa)",
"category": "Countries"
},
{
"tid": "58",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "59",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "60",
"name": "Croatia",
"category": "Countries"
},
{
"tid": "61",
"name": "Cuba",
"category": "Countries"
},
{
"tid": "62",
"name": "Curaçao",
"category": "Countries"
},
{
"tid": "63",
"name": "Cyprus",
"category": "Countries"
},
{
"tid": "64",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "65",
"name": "Denmark",
"category": "Countries"
},
{
"tid": "66",
"name": "Djibouti",
"category": "Countries"
},
{
"tid": "67",
"name": "Dominica",
"category": "Countries"
},
{
"tid": "68",
"name": "Dominican Republic",
"category": "Countries"
},
{
"tid": "69",
"name": "Ecuador",
"category": "Countries"
},
{
"tid": "70",
"name": "Egypt",
"category": "Countries"
},
{
"tid": "71",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "72",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "73",
"name": "Eritrea",
"category": "Countries"
},
{
"tid": "74",
"name": "Estonia",
"category": "Countries"
},
{
"tid": "75",
"name": "Ethiopia",
"category": "Countries"
},
{
"tid": "76",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "77",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "78",
"name": "Fiji",
"category": "Countries"
},
{
"tid": "79",
"name": "Finland",
"category": "Countries"
},
{
"tid": "80",
"name": "France",
"category": "Countries"
},
{
"tid": "81",
"name": "<NAME>ana",
"category": "Countries"
},
{
"tid": "82",
"name": "French Polynesia",
"category": "Countries"
},
{
"tid": "83",
"name": "French Southern Territories",
"category": "Countries"
},
{
"tid": "84",
"name": "Gabon",
"category": "Countries"
},
{
"tid": "85",
"name": "Gambia",
"category": "Countries"
},
{
"tid": "86",
"name": "Georgia",
"category": "Countries"
},
{
"tid": "87",
"name": "Germany",
"category": "Countries"
},
{
"tid": "88",
"name": "Ghana",
"category": "Countries"
},
{
"tid": "89",
"name": "Gibraltar",
"category": "Countries"
},
{
"tid": "90",
"name": "Greece",
"category": "Countries"
},
{
"tid": "91",
"name": "Greenland",
"category": "Countries"
},
{
"tid": "92",
"name": "Grenada",
"category": "Countries"
},
{
"tid": "93",
"name": "Guadeloupe",
"category": "Countries"
},
{
"tid": "94",
"name": "Guam",
"category": "Countries"
},
{
"tid": "95",
"name": "Guatemala",
"category": "Countries"
},
{
"tid": "96",
"name": "Guernsey",
"category": "Countries"
},
{
"tid": "97",
"name": "Guinea",
"category": "Countries"
},
{
"tid": "98",
"name": "Guinea-Bissau",
"category": "Countries"
},
{
"tid": "99",
"name": "Guyana",
"category": "Countries"
},
{
"tid": "100",
"name": "Haiti",
"category": "Countries"
},
{
"tid": "101",
"name": "<NAME> <NAME>",
"category": "Countries"
},
{
"tid": "102",
"name": "Honduras",
"category": "Countries"
},
{
"tid": "103",
"name": "Hungary",
"category": "Countries"
},
{
"tid": "104",
"name": "Iceland",
"category": "Countries"
},
{
"tid": "105",
"name": "India",
"category": "Countries"
},
{
"tid": "106",
"name": "Indonesia",
"category": "Countries"
},
{
"tid": "107",
"name": "Iran",
"category": "Countries"
},
{
"tid": "108",
"name": "Iraq",
"category": "Countries"
},
{
"tid": "109",
"name": "Ireland",
"category": "Countries"
},
{
"tid": "110",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "111",
"name": "Israel",
"category": "Countries"
},
{
"tid": "112",
"name": "Italy",
"category": "Countries"
},
{
"tid": "113",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "114",
"name": "Jamaica",
"category": "Countries"
},
{
"tid": "115",
"name": "Japan",
"category": "Countries"
},
{
"tid": "116",
"name": "Jersey",
"category": "Countries"
},
{
"tid": "117",
"name": "Jordan",
"category": "Countries"
},
{
"tid": "118",
"name": "Kazakhstan",
"category": "Countries"
},
{
"tid": "119",
"name": "Kenya",
"category": "Countries"
},
{
"tid": "120",
"name": "Kiribati",
"category": "Countries"
},
{
"tid": "488",
"name": "Kosovo",
"category": "Countries"
},
{
"tid": "121",
"name": "Kuwait",
"category": "Countries"
},
{
"tid": "122",
"name": "Kyrgyzstan",
"category": "Countries"
},
{
"tid": "123",
"name": "Laos",
"category": "Countries"
},
{
"tid": "124",
"name": "Latvia",
"category": "Countries"
},
{
"tid": "125",
"name": "Lebanon",
"category": "Countries"
},
{
"tid": "126",
"name": "Lesotho",
"category": "Countries"
},
{
"tid": "127",
"name": "Liberia",
"category": "Countries"
},
{
"tid": "128",
"name": "Libya",
"category": "Countries"
},
{
"tid": "129",
"name": "Liechtenstein",
"category": "Countries"
},
{
"tid": "130",
"name": "Lithuania",
"category": "Countries"
},
{
"tid": "131",
"name": "Luxembourg",
"category": "Countries"
},
{
"tid": "132",
"name": "Macedonia",
"category": "Countries"
},
{
"tid": "133",
"name": "Madagascar",
"category": "Countries"
},
{
"tid": "134",
"name": "Malawi",
"category": "Countries"
},
{
"tid": "135",
"name": "Malaysia",
"category": "Countries"
},
{
"tid": "136",
"name": "Maldives",
"category": "Countries"
},
{
"tid": "137",
"name": "Mali",
"category": "Countries"
},
{
"tid": "138",
"name": "Malta",
"category": "Countries"
},
{
"tid": "139",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "140",
"name": "Martinique",
"category": "Countries"
},
{
"tid": "141",
"name": "Mauritania",
"category": "Countries"
},
{
"tid": "142",
"name": "Mauritius",
"category": "Countries"
},
{
"tid": "143",
"name": "Mayotte",
"category": "Countries"
},
{
"tid": "144",
"name": "Mexico",
"category": "Countries"
},
{
"tid": "145",
"name": "Micronesia",
"category": "Countries"
},
{
"tid": "146",
"name": "Moldova",
"category": "Countries"
},
{
"tid": "147",
"name": "Monaco",
"category": "Countries"
},
{
"tid": "148",
"name": "Mongolia",
"category": "Countries"
},
{
"tid": "149",
"name": "Montenegro",
"category": "Countries"
},
{
"tid": "150",
"name": "Montserrat",
"category": "Countries"
},
{
"tid": "151",
"name": "Morocco",
"category": "Countries"
},
{
"tid": "152",
"name": "Mozambique",
"category": "Countries"
},
{
"tid": "153",
"name": "Myanmar",
"category": "Countries"
},
{
"tid": "154",
"name": "Namibia",
"category": "Countries"
},
{
"tid": "155",
"name": "Nauru",
"category": "Countries"
},
{
"tid": "156",
"name": "Nepal",
"category": "Countries"
},
{
"tid": "157",
"name": "Netherlands",
"category": "Countries"
},
{
"tid": "158",
"name": "Netherlands Antilles",
"category": "Countries"
},
{
"tid": "159",
"name": "New Caledonia",
"category": "Countries"
},
{
"tid": "160",
"name": "New Zealand",
"category": "Countries"
},
{
"tid": "161",
"name": "Nicaragua",
"category": "Countries"
},
{
"tid": "162",
"name": "Niger",
"category": "Countries"
},
{
"tid": "163",
"name": "Nigeria",
"category": "Countries"
},
{
"tid": "164",
"name": "Niue",
"category": "Countries"
},
{
"tid": "165",
"name": "Norfolk Island",
"category": "Countries"
},
{
"tid": "166",
"name": "North Korea",
"category": "Countries"
},
{
"tid": "167",
"name": "Northern Mariana Islands",
"category": "Countries"
},
{
"tid": "168",
"name": "Norway",
"category": "Countries"
},
{
"tid": "169",
"name": "Oman",
"category": "Countries"
},
{
"tid": "170",
"name": "Pakistan",
"category": "Countries"
},
{
"tid": "171",
"name": "Palau",
"category": "Countries"
},
{
"tid": "172",
"name": "Palestinian Territory",
"category": "Countries"
},
{
"tid": "173",
"name": "Panama",
"category": "Countries"
},
{
"tid": "174",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "175",
"name": "Paraguay",
"category": "Countries"
},
{
"tid": "176",
"name": "Peru",
"category": "Countries"
},
{
"tid": "177",
"name": "Philippines",
"category": "Countries"
},
{
"tid": "178",
"name": "Pitcairn",
"category": "Countries"
},
{
"tid": "179",
"name": "Poland",
"category": "Countries"
},
{
"tid": "180",
"name": "Portugal",
"category": "Countries"
},
{
"tid": "181",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "182",
"name": "Qatar",
"category": "Countries"
},
{
"tid": "183",
"name": "Reunion",
"category": "Countries"
},
{
"tid": "184",
"name": "Romania",
"category": "Countries"
},
{
"tid": "185",
"name": "Russia",
"category": "Countries"
},
{
"tid": "186",
"name": "Rwanda",
"category": "Countries"
},
{
"tid": "187",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "188",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "189",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "190",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "191",
"name": "<NAME> (French part)",
"category": "Countries"
},
{
"tid": "192",
"name": "<NAME> Miquelon",
"category": "Countries"
},
{
"tid": "193",
"name": "<NAME> and <NAME>",
"category": "Countries"
},
{
"tid": "194",
"name": "Samoa",
"category": "Countries"
},
{
"tid": "195",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "196",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "197",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "198",
"name": "Senegal",
"category": "Countries"
},
{
"tid": "199",
"name": "Serbia",
"category": "Countries"
},
{
"tid": "200",
"name": "Seychelles",
"category": "Countries"
},
{
"tid": "201",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "202",
"name": "Singapore",
"category": "Countries"
},
{
"tid": "203",
"name": "Slovakia",
"category": "Countries"
},
{
"tid": "204",
"name": "Slovenia",
"category": "Countries"
},
{
"tid": "205",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "206",
"name": "Somalia",
"category": "Countries"
},
{
"tid": "207",
"name": "South Africa",
"category": "Countries"
},
{
"tid": "208",
"name": "South Georgia and the South Sandwich Islands",
"category": "Countries"
},
{
"tid": "209",
"name": "South Korea",
"category": "Countries"
},
{
"tid": "476",
"name": "South Sudan",
"category": "Countries"
},
{
"tid": "210",
"name": "Spain",
"category": "Countries"
},
{
"tid": "211",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "212",
"name": "Sudan",
"category": "Countries"
},
{
"tid": "213",
"name": "Suriname",
"category": "Countries"
},
{
"tid": "214",
"name": "<NAME> <NAME>",
"category": "Countries"
},
{
"tid": "215",
"name": "Swaziland",
"category": "Countries"
},
{
"tid": "216",
"name": "Sweden",
"category": "Countries"
},
{
"tid": "217",
"name": "Switzerland",
"category": "Countries"
},
{
"tid": "218",
"name": "Syria",
"category": "Countries"
},
{
"tid": "219",
"name": "Taiwan",
"category": "Countries"
},
{
"tid": "220",
"name": "Tajikistan",
"category": "Countries"
},
{
"tid": "221",
"name": "Tanzania",
"category": "Countries"
},
{
"tid": "222",
"name": "Thailand",
"category": "Countries"
},
{
"tid": "223",
"name": "Timor-Leste",
"category": "Countries"
},
{
"tid": "224",
"name": "Togo",
"category": "Countries"
},
{
"tid": "225",
"name": "Tokelau",
"category": "Countries"
},
{
"tid": "226",
"name": "Tonga",
"category": "Countries"
},
{
"tid": "227",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "228",
"name": "Tunisia",
"category": "Countries"
},
{
"tid": "229",
"name": "Turkey",
"category": "Countries"
},
{
"tid": "230",
"name": "Turkmenistan",
"category": "Countries"
},
{
"tid": "231",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "232",
"name": "Tuvalu",
"category": "Countries"
},
{
"tid": "233",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "234",
"name": "Uganda",
"category": "Countries"
},
{
"tid": "235",
"name": "Ukraine",
"category": "Countries"
},
{
"tid": "236",
"name": "United Arab Emirates",
"category": "Countries"
},
{
"tid": "237",
"name": "United Kingdom",
"category": "Countries"
},
{
"tid": "238",
"name": "United States",
"category": "Countries"
},
{
"tid": "239",
"name": "United States Minor Outlying Islands",
"category": "Countries"
},
{
"tid": "240",
"name": "Uruguay",
"category": "Countries"
},
{
"tid": "241",
"name": "Uzbekistan",
"category": "Countries"
},
{
"tid": "242",
"name": "Vanuatu",
"category": "Countries"
},
{
"tid": "243",
"name": "Vatican",
"category": "Countries"
},
{
"tid": "244",
"name": "Venezuela",
"category": "Countries"
},
{
"tid": "245",
"name": "Vietnam",
"category": "Countries"
},
{
"tid": "246",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "247",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "248",
"name": "Yemen",
"category": "Countries"
},
{
"tid": "249",
"name": "Zambia",
"category": "Countries"
},
{
"tid": "250",
"name": "Zimbabwe",
"category": "Countries"
},
{
"tid": "302",
"category": "Technology attributes",
"name": "Anonymity"
},
{
"tid": "303",
"category": "Technology attributes",
"name": "Application deployment"
},
{
"tid": "298",
"category": "Technology attributes",
"name": "Browser extension"
},
{
"tid": "299",
"category": "Technology attributes",
"name": "Browser plugin"
},
{
"tid": "308",
"category": "Technology attributes",
"name": "Cryptography"
},
{
"tid": "310",
"category": "Technology attributes",
"name": "Dependency integration"
},
{
"tid": "312",
"category": "Technology attributes",
"name": "Desktop App"
},
{
"tid": "311",
"category": "Technology attributes",
"name": "Desktop client"
},
{
"tid": "316",
"category": "Technology attributes",
"name": "Hardware/Embedded device(s)"
},
{
"tid": "347",
"category": "Technology attributes",
"name": "Infrastructure as a service (IaaS)"
},
{
"tid": "306",
"category": "Technology attributes",
"name": "Mobile application (clientside)"
},
{
"tid": "314",
"category": "Technology attributes",
"name": "Networking"
},
{
"tid": "356",
"category": "Technology attributes",
"name": "Not applicable"
},
{
"tid": "317",
"category": "Technology attributes",
"name": "Other"
},
{
"tid": "346",
"category": "Technology attributes",
"name": "Platform as a service (PaaS)"
},
{
"tid": "349",
"category": "Technology attributes",
"name": "Reverse Engineering"
},
{
"tid": "313",
"category": "Technology attributes",
"name": "Sensitive data"
},
{
"tid": "305",
"category": "Technology attributes",
"name": "Server daemon"
},
{
"tid": "315",
"category": "Technology attributes",
"name": "Software as a Service (SaaS)"
},
{
"tid": "300",
"category": "Technology attributes",
"name": "Unmanaged language"
},
{
"tid": "301",
"category": "Technology attributes",
"name": "User interface/experience"
},
{
"tid": "307",
"category": "Technology attributes",
"name": "Web API/Mobile application (serverside)"
},
{
"tid": "304",
"category": "Technology attributes",
"name": "Web application"
},
{
"tid": "309",
"category": "Technology attributes",
"name": "Wireless Communication"
}
]
| # The global categories are fairly static so using a static seed.
CATEGORIES = [
{
"category": "Addressed problems",
"name": "Restrictive Internet filtering by technical methods (IP blocking, DNS filtering, TCP RST, DPI, etc.)",
"tid": "318"
},
{
"category": "Addressed problems",
"name": "Blocking, filtering, or modification of political, social, and/or religious content (including apps)",
"tid": "319"
},
{
"category": "Addressed problems",
"name": "Technical attacks against government critics, journalists, and/or human rights organizations (Cyberattacks)",
"tid": "351"
},
{
"category": "Addressed problems",
"name": "Localized or nationwide communications shut down or throttling (Blackouts)",
"tid": "320"
},
{
"category": "Addressed problems",
"name": "Physical intimidation, arrest, violence (including device seizure or destruction), and death for political or social reasons",
"tid": "354"
},
{
"category": "Addressed problems",
"name": "Pro-government manipulation of online discussions (propaganda, imitation content, and/or sockpuppets)",
"tid": "353"
},
{
"category": "Addressed problems",
"name": "Repressive surveillance or monitoring of communication",
"tid": "321"
},
{
"category": "Addressed problems",
"name": "Policies, laws, or directives that increase surveillance, censorship, and punishment",
"tid": "352"
},
{
"category": "Addressed problems",
"name": "Government practices that hold intermediaries (social networks or ISPs) liable for user content",
"tid": "355"
},
{
"category": "Addressed problems",
"name": "Prohibitive cost to access the Internet",
"tid": "357"
},
{
"category": "Addressed problems",
"name": "Other",
"tid": "327"
},
{
"category": "Focus",
"name": "Access to the Internet",
"tid": "2"
},
{
"category": "Focus",
"name": "Awareness of privacy and security threats",
"tid": "363"
},
{
"category": "Focus",
"name": "Privacy enhancement",
"tid": "3"
},
{
"category": "Focus",
"name": "Security from danger or threat online",
"tid": "1"
},
{
"category": "Objective(s)",
"name": "Deploying technology",
"tid": "362"
},
{
"category": "Objective(s)",
"name": "Software or hardware development",
"tid": "360"
},
{
"category": "Objective(s)",
"name": "Testing",
"tid": "333"
},
{
"category": "Objective(s)",
"name": "Research",
"tid": "361"
},
{
"category": "Objective(s)",
"name": "Technology development",
"tid": "330"
},
{
"category": "Objective(s)",
"name": "Training",
"tid": "331"
},
{
"category": "Beneficiaries",
"name": "General public",
"tid": "335"
},
{
"category": "Beneficiaries",
"name": "Women",
"tid": "342"
},
{
"category": "Beneficiaries",
"name": "Youth",
"tid": "343"
},
{
"category": "Beneficiaries",
"name": "Sexual minorities",
"tid": "345"
},
{
"category": "Beneficiaries",
"name": "Ethnic minorities",
"tid": "344"
},
{
"category": "Beneficiaries",
"name": "Activists",
"tid": "336"
},
{
"category": "Beneficiaries",
"name": "Journalists",
"tid": "337"
},
{
"category": "Beneficiaries",
"name": "Advocacy groups/NGOs",
"tid": "338"
},
{
"category": "Beneficiaries",
"name": "Academia",
"tid": "339"
},
{
"category": "Beneficiaries",
"name": "Technologists",
"tid": "340"
},
{
"category": "Beneficiaries",
"name": "Entrepreneurs",
"tid": "359"
},
{
"category": "Beneficiaries",
"name": "Government",
"tid": "341"
},
{
"category": "Beneficiaries",
"name": "Other",
"tid": "350"
},
{
"category": "Region(s)",
"name": "Global",
"tid": "271"
},
{
"category": "Region(s)",
"name": "North Africa and Middle east",
"tid": "358"
},
{
"category": "Region(s)",
"name": "East Africa",
"tid": "369"
},
{
"category": "Region(s)",
"name": "West Africa",
"tid": "370"
},
{
"category": "Region(s)",
"name": "South Africa",
"tid": "272"
},
{
"category": "Region(s)",
"name": "North Asia and Russia",
"tid": "371"
},
{
"category": "Region(s)",
"name": "Central Asia",
"tid": "274"
},
{
"category": "Region(s)",
"name": "East Asia",
"tid": "372"
},
{
"category": "Region(s)",
"name": "South Asia",
"tid": "373"
},
{
"category": "Region(s)",
"name": "South-East Asia",
"tid": "365"
},
{
"category": "Region(s)",
"name": "Eastern Europe",
"tid": "291"
},
{
"category": "Region(s)",
"name": "Central America",
"tid": "273"
},
{
"category": "Region(s)",
"name": "Caribbean",
"tid": "374"
},
{
"category": "Region(s)",
"name": "Andean",
"tid": "375"
},
{
"category": "Region(s)",
"name": "Southern cone",
"tid": "376"
},
{
"category": "Project status",
"name": "Just an Idea (Pre-alpha)",
"tid": "329"
},
{
"category": "Project status",
"name": "It Exists! (Alpha/Beta)",
"tid": "328"
},
{
"category": "Project status",
"name": "It's basically done. (Release)",
"tid": "366"
},
{
"category": "Project status",
"name": "People Use It. (Production)",
"tid": "367"
},
{
"tid": "6",
"name": "Afghanistan",
"category": "Countries"
},
{
"tid": "7",
"name": "Aland Islands",
"category": "Countries"
},
{
"tid": "8",
"name": "Albania",
"category": "Countries"
},
{
"tid": "9",
"name": "Algeria",
"category": "Countries"
},
{
"tid": "10",
"name": "American Samoa",
"category": "Countries"
},
{
"tid": "11",
"name": "Andorra",
"category": "Countries"
},
{
"tid": "12",
"name": "Angola",
"category": "Countries"
},
{
"tid": "13",
"name": "Anguilla",
"category": "Countries"
},
{
"tid": "14",
"name": "Antarctica",
"category": "Countries"
},
{
"tid": "15",
"name": "Antigua and Barbuda",
"category": "Countries"
},
{
"tid": "16",
"name": "Argentina",
"category": "Countries"
},
{
"tid": "17",
"name": "Armenia",
"category": "Countries"
},
{
"tid": "18",
"name": "Aruba",
"category": "Countries"
},
{
"tid": "19",
"name": "Australia",
"category": "Countries"
},
{
"tid": "20",
"name": "Austria",
"category": "Countries"
},
{
"tid": "21",
"name": "Azerbaijan",
"category": "Countries"
},
{
"tid": "22",
"name": "Bahamas",
"category": "Countries"
},
{
"tid": "23",
"name": "Bahrain",
"category": "Countries"
},
{
"tid": "24",
"name": "Bangladesh",
"category": "Countries"
},
{
"tid": "25",
"name": "Barbados",
"category": "Countries"
},
{
"tid": "26",
"name": "Belarus",
"category": "Countries"
},
{
"tid": "27",
"name": "Belgium",
"category": "Countries"
},
{
"tid": "28",
"name": "Belize",
"category": "Countries"
},
{
"tid": "29",
"name": "Benin",
"category": "Countries"
},
{
"tid": "30",
"name": "Bermuda",
"category": "Countries"
},
{
"tid": "31",
"name": "Bhutan",
"category": "Countries"
},
{
"tid": "32",
"name": "Bolivia",
"category": "Countries"
},
{
"tid": "33",
"name": "Bosnia and Herzegovina",
"category": "Countries"
},
{
"tid": "34",
"name": "Botswana",
"category": "Countries"
},
{
"tid": "35",
"name": "Bouvet Island",
"category": "Countries"
},
{
"tid": "36",
"name": "Brazil",
"category": "Countries"
},
{
"tid": "37",
"name": "British Indian Ocean Territory",
"category": "Countries"
},
{
"tid": "38",
"name": "British Virgin Islands",
"category": "Countries"
},
{
"tid": "39",
"name": "Brunei",
"category": "Countries"
},
{
"tid": "40",
"name": "Bulgaria",
"category": "Countries"
},
{
"tid": "41",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "42",
"name": "Burundi",
"category": "Countries"
},
{
"tid": "43",
"name": "Cambodia",
"category": "Countries"
},
{
"tid": "44",
"name": "Cameroon",
"category": "Countries"
},
{
"tid": "45",
"name": "Canada",
"category": "Countries"
},
{
"tid": "46",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "47",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "48",
"name": "Central African Republic",
"category": "Countries"
},
{
"tid": "49",
"name": "Chad",
"category": "Countries"
},
{
"tid": "50",
"name": "Chile",
"category": "Countries"
},
{
"tid": "51",
"name": "China",
"category": "Countries"
},
{
"tid": "52",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "53",
"name": "Cocos (Keel<NAME>",
"category": "Countries"
},
{
"tid": "54",
"name": "Colombia",
"category": "Countries"
},
{
"tid": "55",
"name": "Comoros",
"category": "Countries"
},
{
"tid": "56",
"name": "Congo (Brazzaville)",
"category": "Countries"
},
{
"tid": "57",
"name": "Congo (Kinshasa)",
"category": "Countries"
},
{
"tid": "58",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "59",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "60",
"name": "Croatia",
"category": "Countries"
},
{
"tid": "61",
"name": "Cuba",
"category": "Countries"
},
{
"tid": "62",
"name": "Curaçao",
"category": "Countries"
},
{
"tid": "63",
"name": "Cyprus",
"category": "Countries"
},
{
"tid": "64",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "65",
"name": "Denmark",
"category": "Countries"
},
{
"tid": "66",
"name": "Djibouti",
"category": "Countries"
},
{
"tid": "67",
"name": "Dominica",
"category": "Countries"
},
{
"tid": "68",
"name": "Dominican Republic",
"category": "Countries"
},
{
"tid": "69",
"name": "Ecuador",
"category": "Countries"
},
{
"tid": "70",
"name": "Egypt",
"category": "Countries"
},
{
"tid": "71",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "72",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "73",
"name": "Eritrea",
"category": "Countries"
},
{
"tid": "74",
"name": "Estonia",
"category": "Countries"
},
{
"tid": "75",
"name": "Ethiopia",
"category": "Countries"
},
{
"tid": "76",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "77",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "78",
"name": "Fiji",
"category": "Countries"
},
{
"tid": "79",
"name": "Finland",
"category": "Countries"
},
{
"tid": "80",
"name": "France",
"category": "Countries"
},
{
"tid": "81",
"name": "<NAME>ana",
"category": "Countries"
},
{
"tid": "82",
"name": "French Polynesia",
"category": "Countries"
},
{
"tid": "83",
"name": "French Southern Territories",
"category": "Countries"
},
{
"tid": "84",
"name": "Gabon",
"category": "Countries"
},
{
"tid": "85",
"name": "Gambia",
"category": "Countries"
},
{
"tid": "86",
"name": "Georgia",
"category": "Countries"
},
{
"tid": "87",
"name": "Germany",
"category": "Countries"
},
{
"tid": "88",
"name": "Ghana",
"category": "Countries"
},
{
"tid": "89",
"name": "Gibraltar",
"category": "Countries"
},
{
"tid": "90",
"name": "Greece",
"category": "Countries"
},
{
"tid": "91",
"name": "Greenland",
"category": "Countries"
},
{
"tid": "92",
"name": "Grenada",
"category": "Countries"
},
{
"tid": "93",
"name": "Guadeloupe",
"category": "Countries"
},
{
"tid": "94",
"name": "Guam",
"category": "Countries"
},
{
"tid": "95",
"name": "Guatemala",
"category": "Countries"
},
{
"tid": "96",
"name": "Guernsey",
"category": "Countries"
},
{
"tid": "97",
"name": "Guinea",
"category": "Countries"
},
{
"tid": "98",
"name": "Guinea-Bissau",
"category": "Countries"
},
{
"tid": "99",
"name": "Guyana",
"category": "Countries"
},
{
"tid": "100",
"name": "Haiti",
"category": "Countries"
},
{
"tid": "101",
"name": "<NAME> <NAME>",
"category": "Countries"
},
{
"tid": "102",
"name": "Honduras",
"category": "Countries"
},
{
"tid": "103",
"name": "Hungary",
"category": "Countries"
},
{
"tid": "104",
"name": "Iceland",
"category": "Countries"
},
{
"tid": "105",
"name": "India",
"category": "Countries"
},
{
"tid": "106",
"name": "Indonesia",
"category": "Countries"
},
{
"tid": "107",
"name": "Iran",
"category": "Countries"
},
{
"tid": "108",
"name": "Iraq",
"category": "Countries"
},
{
"tid": "109",
"name": "Ireland",
"category": "Countries"
},
{
"tid": "110",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "111",
"name": "Israel",
"category": "Countries"
},
{
"tid": "112",
"name": "Italy",
"category": "Countries"
},
{
"tid": "113",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "114",
"name": "Jamaica",
"category": "Countries"
},
{
"tid": "115",
"name": "Japan",
"category": "Countries"
},
{
"tid": "116",
"name": "Jersey",
"category": "Countries"
},
{
"tid": "117",
"name": "Jordan",
"category": "Countries"
},
{
"tid": "118",
"name": "Kazakhstan",
"category": "Countries"
},
{
"tid": "119",
"name": "Kenya",
"category": "Countries"
},
{
"tid": "120",
"name": "Kiribati",
"category": "Countries"
},
{
"tid": "488",
"name": "Kosovo",
"category": "Countries"
},
{
"tid": "121",
"name": "Kuwait",
"category": "Countries"
},
{
"tid": "122",
"name": "Kyrgyzstan",
"category": "Countries"
},
{
"tid": "123",
"name": "Laos",
"category": "Countries"
},
{
"tid": "124",
"name": "Latvia",
"category": "Countries"
},
{
"tid": "125",
"name": "Lebanon",
"category": "Countries"
},
{
"tid": "126",
"name": "Lesotho",
"category": "Countries"
},
{
"tid": "127",
"name": "Liberia",
"category": "Countries"
},
{
"tid": "128",
"name": "Libya",
"category": "Countries"
},
{
"tid": "129",
"name": "Liechtenstein",
"category": "Countries"
},
{
"tid": "130",
"name": "Lithuania",
"category": "Countries"
},
{
"tid": "131",
"name": "Luxembourg",
"category": "Countries"
},
{
"tid": "132",
"name": "Macedonia",
"category": "Countries"
},
{
"tid": "133",
"name": "Madagascar",
"category": "Countries"
},
{
"tid": "134",
"name": "Malawi",
"category": "Countries"
},
{
"tid": "135",
"name": "Malaysia",
"category": "Countries"
},
{
"tid": "136",
"name": "Maldives",
"category": "Countries"
},
{
"tid": "137",
"name": "Mali",
"category": "Countries"
},
{
"tid": "138",
"name": "Malta",
"category": "Countries"
},
{
"tid": "139",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "140",
"name": "Martinique",
"category": "Countries"
},
{
"tid": "141",
"name": "Mauritania",
"category": "Countries"
},
{
"tid": "142",
"name": "Mauritius",
"category": "Countries"
},
{
"tid": "143",
"name": "Mayotte",
"category": "Countries"
},
{
"tid": "144",
"name": "Mexico",
"category": "Countries"
},
{
"tid": "145",
"name": "Micronesia",
"category": "Countries"
},
{
"tid": "146",
"name": "Moldova",
"category": "Countries"
},
{
"tid": "147",
"name": "Monaco",
"category": "Countries"
},
{
"tid": "148",
"name": "Mongolia",
"category": "Countries"
},
{
"tid": "149",
"name": "Montenegro",
"category": "Countries"
},
{
"tid": "150",
"name": "Montserrat",
"category": "Countries"
},
{
"tid": "151",
"name": "Morocco",
"category": "Countries"
},
{
"tid": "152",
"name": "Mozambique",
"category": "Countries"
},
{
"tid": "153",
"name": "Myanmar",
"category": "Countries"
},
{
"tid": "154",
"name": "Namibia",
"category": "Countries"
},
{
"tid": "155",
"name": "Nauru",
"category": "Countries"
},
{
"tid": "156",
"name": "Nepal",
"category": "Countries"
},
{
"tid": "157",
"name": "Netherlands",
"category": "Countries"
},
{
"tid": "158",
"name": "Netherlands Antilles",
"category": "Countries"
},
{
"tid": "159",
"name": "New Caledonia",
"category": "Countries"
},
{
"tid": "160",
"name": "New Zealand",
"category": "Countries"
},
{
"tid": "161",
"name": "Nicaragua",
"category": "Countries"
},
{
"tid": "162",
"name": "Niger",
"category": "Countries"
},
{
"tid": "163",
"name": "Nigeria",
"category": "Countries"
},
{
"tid": "164",
"name": "Niue",
"category": "Countries"
},
{
"tid": "165",
"name": "Norfolk Island",
"category": "Countries"
},
{
"tid": "166",
"name": "North Korea",
"category": "Countries"
},
{
"tid": "167",
"name": "Northern Mariana Islands",
"category": "Countries"
},
{
"tid": "168",
"name": "Norway",
"category": "Countries"
},
{
"tid": "169",
"name": "Oman",
"category": "Countries"
},
{
"tid": "170",
"name": "Pakistan",
"category": "Countries"
},
{
"tid": "171",
"name": "Palau",
"category": "Countries"
},
{
"tid": "172",
"name": "Palestinian Territory",
"category": "Countries"
},
{
"tid": "173",
"name": "Panama",
"category": "Countries"
},
{
"tid": "174",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "175",
"name": "Paraguay",
"category": "Countries"
},
{
"tid": "176",
"name": "Peru",
"category": "Countries"
},
{
"tid": "177",
"name": "Philippines",
"category": "Countries"
},
{
"tid": "178",
"name": "Pitcairn",
"category": "Countries"
},
{
"tid": "179",
"name": "Poland",
"category": "Countries"
},
{
"tid": "180",
"name": "Portugal",
"category": "Countries"
},
{
"tid": "181",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "182",
"name": "Qatar",
"category": "Countries"
},
{
"tid": "183",
"name": "Reunion",
"category": "Countries"
},
{
"tid": "184",
"name": "Romania",
"category": "Countries"
},
{
"tid": "185",
"name": "Russia",
"category": "Countries"
},
{
"tid": "186",
"name": "Rwanda",
"category": "Countries"
},
{
"tid": "187",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "188",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "189",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "190",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "191",
"name": "<NAME> (French part)",
"category": "Countries"
},
{
"tid": "192",
"name": "<NAME> Miquelon",
"category": "Countries"
},
{
"tid": "193",
"name": "<NAME> and <NAME>",
"category": "Countries"
},
{
"tid": "194",
"name": "Samoa",
"category": "Countries"
},
{
"tid": "195",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "196",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "197",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "198",
"name": "Senegal",
"category": "Countries"
},
{
"tid": "199",
"name": "Serbia",
"category": "Countries"
},
{
"tid": "200",
"name": "Seychelles",
"category": "Countries"
},
{
"tid": "201",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "202",
"name": "Singapore",
"category": "Countries"
},
{
"tid": "203",
"name": "Slovakia",
"category": "Countries"
},
{
"tid": "204",
"name": "Slovenia",
"category": "Countries"
},
{
"tid": "205",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "206",
"name": "Somalia",
"category": "Countries"
},
{
"tid": "207",
"name": "South Africa",
"category": "Countries"
},
{
"tid": "208",
"name": "South Georgia and the South Sandwich Islands",
"category": "Countries"
},
{
"tid": "209",
"name": "South Korea",
"category": "Countries"
},
{
"tid": "476",
"name": "South Sudan",
"category": "Countries"
},
{
"tid": "210",
"name": "Spain",
"category": "Countries"
},
{
"tid": "211",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "212",
"name": "Sudan",
"category": "Countries"
},
{
"tid": "213",
"name": "Suriname",
"category": "Countries"
},
{
"tid": "214",
"name": "<NAME> <NAME>",
"category": "Countries"
},
{
"tid": "215",
"name": "Swaziland",
"category": "Countries"
},
{
"tid": "216",
"name": "Sweden",
"category": "Countries"
},
{
"tid": "217",
"name": "Switzerland",
"category": "Countries"
},
{
"tid": "218",
"name": "Syria",
"category": "Countries"
},
{
"tid": "219",
"name": "Taiwan",
"category": "Countries"
},
{
"tid": "220",
"name": "Tajikistan",
"category": "Countries"
},
{
"tid": "221",
"name": "Tanzania",
"category": "Countries"
},
{
"tid": "222",
"name": "Thailand",
"category": "Countries"
},
{
"tid": "223",
"name": "Timor-Leste",
"category": "Countries"
},
{
"tid": "224",
"name": "Togo",
"category": "Countries"
},
{
"tid": "225",
"name": "Tokelau",
"category": "Countries"
},
{
"tid": "226",
"name": "Tonga",
"category": "Countries"
},
{
"tid": "227",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "228",
"name": "Tunisia",
"category": "Countries"
},
{
"tid": "229",
"name": "Turkey",
"category": "Countries"
},
{
"tid": "230",
"name": "Turkmenistan",
"category": "Countries"
},
{
"tid": "231",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "232",
"name": "Tuvalu",
"category": "Countries"
},
{
"tid": "233",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "234",
"name": "Uganda",
"category": "Countries"
},
{
"tid": "235",
"name": "Ukraine",
"category": "Countries"
},
{
"tid": "236",
"name": "United Arab Emirates",
"category": "Countries"
},
{
"tid": "237",
"name": "United Kingdom",
"category": "Countries"
},
{
"tid": "238",
"name": "United States",
"category": "Countries"
},
{
"tid": "239",
"name": "United States Minor Outlying Islands",
"category": "Countries"
},
{
"tid": "240",
"name": "Uruguay",
"category": "Countries"
},
{
"tid": "241",
"name": "Uzbekistan",
"category": "Countries"
},
{
"tid": "242",
"name": "Vanuatu",
"category": "Countries"
},
{
"tid": "243",
"name": "Vatican",
"category": "Countries"
},
{
"tid": "244",
"name": "Venezuela",
"category": "Countries"
},
{
"tid": "245",
"name": "Vietnam",
"category": "Countries"
},
{
"tid": "246",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "247",
"name": "<NAME>",
"category": "Countries"
},
{
"tid": "248",
"name": "Yemen",
"category": "Countries"
},
{
"tid": "249",
"name": "Zambia",
"category": "Countries"
},
{
"tid": "250",
"name": "Zimbabwe",
"category": "Countries"
},
{
"tid": "302",
"category": "Technology attributes",
"name": "Anonymity"
},
{
"tid": "303",
"category": "Technology attributes",
"name": "Application deployment"
},
{
"tid": "298",
"category": "Technology attributes",
"name": "Browser extension"
},
{
"tid": "299",
"category": "Technology attributes",
"name": "Browser plugin"
},
{
"tid": "308",
"category": "Technology attributes",
"name": "Cryptography"
},
{
"tid": "310",
"category": "Technology attributes",
"name": "Dependency integration"
},
{
"tid": "312",
"category": "Technology attributes",
"name": "Desktop App"
},
{
"tid": "311",
"category": "Technology attributes",
"name": "Desktop client"
},
{
"tid": "316",
"category": "Technology attributes",
"name": "Hardware/Embedded device(s)"
},
{
"tid": "347",
"category": "Technology attributes",
"name": "Infrastructure as a service (IaaS)"
},
{
"tid": "306",
"category": "Technology attributes",
"name": "Mobile application (clientside)"
},
{
"tid": "314",
"category": "Technology attributes",
"name": "Networking"
},
{
"tid": "356",
"category": "Technology attributes",
"name": "Not applicable"
},
{
"tid": "317",
"category": "Technology attributes",
"name": "Other"
},
{
"tid": "346",
"category": "Technology attributes",
"name": "Platform as a service (PaaS)"
},
{
"tid": "349",
"category": "Technology attributes",
"name": "Reverse Engineering"
},
{
"tid": "313",
"category": "Technology attributes",
"name": "Sensitive data"
},
{
"tid": "305",
"category": "Technology attributes",
"name": "Server daemon"
},
{
"tid": "315",
"category": "Technology attributes",
"name": "Software as a Service (SaaS)"
},
{
"tid": "300",
"category": "Technology attributes",
"name": "Unmanaged language"
},
{
"tid": "301",
"category": "Technology attributes",
"name": "User interface/experience"
},
{
"tid": "307",
"category": "Technology attributes",
"name": "Web API/Mobile application (serverside)"
},
{
"tid": "304",
"category": "Technology attributes",
"name": "Web application"
},
{
"tid": "309",
"category": "Technology attributes",
"name": "Wireless Communication"
}
]
| en | 0.762745 | # The global categories are fairly static so using a static seed. | 1.402287 | 1 |
src/models/Data.py | sag111/Author-Profiling | 0 | 6614928 | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow_datasets as tfds
import tensorflow as tf
import os
import csv
import json
import time
import unicodedata
from selectolax.parser import HTMLParser
import numpy as np
import pandas as pd
def GetPreprocessFunctions(tokenizer, maxLen, limitStrategy="Filter"):
def cutToMaxLen(doc, summary, maxLen=maxLen):
docSize = len(doc)
summarySize = len(summary)
return doc[:min(docSize, maxLen)], summary[:min(summarySize, maxLen)]
def encode(doc, summary):
if "BERT_tokenizer" in tokenizer.__dict__:
doc = tokenizer.encode(doc.numpy())
summary = tokenizer.encode(summary.numpy())
elif type(tokenizer).__name__=="SentencePieceProcessor":
doc = [tokenizer.bos_id()] + tokenizer.EncodeAsIds(doc.numpy()) + [tokenizer.eos_id()]
summary = [tokenizer.bos_id()] + tokenizer.EncodeAsIds(summary.numpy()) + [tokenizer.eos_id()]
else:
startIdx, endIdx = tokenizer.vocab_size, tokenizer.vocab_size+1
doc = [startIdx] + tokenizer.encode(
doc.numpy()) + [endIdx]
summary = [startIdx] + tokenizer.encode(
summary.numpy()) + [endIdx]
if limitStrategy == "Cut":
doc, summary = cutToMaxLen(doc, summary)
return doc, summary
def filter_max_length(x, y, max_length=maxLen):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
return encode, filter_max_length
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
def loadDataset(dataConfig, loadTrain=True):
if dataConfig["corpus"] == "gigaword":
if loadTrain:
train_examples = loadGigaword(dataConfig["train_x"], dataConfig["train_y"])
dev_examples = loadGigaword(dataConfig["dev_x"], dataConfig["dev_y"])
test_examples = loadGigaword(dataConfig["test_x"], dataConfig["test_y"])
elif dataConfig["corpus"] == "lenta":
if loadTrain:
train_examples = loadLenta(dataConfig["train"])
dev_examples = loadLenta(dataConfig["dev"])
test_examples = loadLenta(dataConfig["test"])
elif dataConfig["corpus"] == "ria":
if loadTrain:
train_examples = loadRia(dataConfig["train"], dataConfig)
dev_examples = loadRia(dataConfig["dev"], dataConfig)
test_examples = loadRia(dataConfig["test"], dataConfig)
elif dataConfig["corpus"] == "gooppe_ria":
if loadTrain:
train_examples = loadGooppeRia(dataConfig["train"], dataConfig)
dev_examples = loadGooppeRia(dataConfig["dev"], dataConfig)
test_examples = loadGooppeRia(dataConfig["test"], dataConfig)
else:
raise ValueError("Dataset not specified")
if loadTrain:
return train_examples, dev_examples, test_examples
else:
return dev_examples, test_examples
def clear_text(text: str, rm_strong=True) -> str:
selector = "strong"
text = unicodedata.normalize("NFKD", text)
text = text.replace("\n", " ")
tree = HTMLParser(text)
if rm_strong:
for node in tree.css(selector):
node.decompose()
return tree.text().strip()
def loadRia(path, dataConfig):
with open(path, "r") as f:
lines = f.readlines()
ria_json = [json.loads(x) for x in lines]
def gen():
for doc in ria_json:
if dataConfig.get("clear", False):
yield (clear_text(doc["text"]), clear_text(doc["title"]))
else:
yield (doc["text"], doc["title"])
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples
def loadGooppeRia(path, dataConfig):
def gen():
with open(path) as file:
reader = csv.reader(file, delimiter="\t")
for line in reader:
if dataConfig.get("clear", False):
yield (clear_text(line[0]), clear_text(line[1]))
else:
yield (line[0], line[1])
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples
def loadLenta(path):
lentaDF = pd.read_csv(path)
def gen():
for ir, row in lentaDF.iterrows():
yield (row["text"], row["title"])
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples
def loadGigaword(path_src, path_tgt):
with open(path_src, "r") as f:
docs = f.readlines()
with open(path_tgt, "r") as f:
summs = f.readlines()
docs = [s.strip() for s in docs]
summs = [s.strip() for s in summs]
def gen():
for doc, summ in zip(docs, summs):
yield (doc, summ)
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow_datasets as tfds
import tensorflow as tf
import os
import csv
import json
import time
import unicodedata
from selectolax.parser import HTMLParser
import numpy as np
import pandas as pd
def GetPreprocessFunctions(tokenizer, maxLen, limitStrategy="Filter"):
def cutToMaxLen(doc, summary, maxLen=maxLen):
docSize = len(doc)
summarySize = len(summary)
return doc[:min(docSize, maxLen)], summary[:min(summarySize, maxLen)]
def encode(doc, summary):
if "BERT_tokenizer" in tokenizer.__dict__:
doc = tokenizer.encode(doc.numpy())
summary = tokenizer.encode(summary.numpy())
elif type(tokenizer).__name__=="SentencePieceProcessor":
doc = [tokenizer.bos_id()] + tokenizer.EncodeAsIds(doc.numpy()) + [tokenizer.eos_id()]
summary = [tokenizer.bos_id()] + tokenizer.EncodeAsIds(summary.numpy()) + [tokenizer.eos_id()]
else:
startIdx, endIdx = tokenizer.vocab_size, tokenizer.vocab_size+1
doc = [startIdx] + tokenizer.encode(
doc.numpy()) + [endIdx]
summary = [startIdx] + tokenizer.encode(
summary.numpy()) + [endIdx]
if limitStrategy == "Cut":
doc, summary = cutToMaxLen(doc, summary)
return doc, summary
def filter_max_length(x, y, max_length=maxLen):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
return encode, filter_max_length
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
def loadDataset(dataConfig, loadTrain=True):
if dataConfig["corpus"] == "gigaword":
if loadTrain:
train_examples = loadGigaword(dataConfig["train_x"], dataConfig["train_y"])
dev_examples = loadGigaword(dataConfig["dev_x"], dataConfig["dev_y"])
test_examples = loadGigaword(dataConfig["test_x"], dataConfig["test_y"])
elif dataConfig["corpus"] == "lenta":
if loadTrain:
train_examples = loadLenta(dataConfig["train"])
dev_examples = loadLenta(dataConfig["dev"])
test_examples = loadLenta(dataConfig["test"])
elif dataConfig["corpus"] == "ria":
if loadTrain:
train_examples = loadRia(dataConfig["train"], dataConfig)
dev_examples = loadRia(dataConfig["dev"], dataConfig)
test_examples = loadRia(dataConfig["test"], dataConfig)
elif dataConfig["corpus"] == "gooppe_ria":
if loadTrain:
train_examples = loadGooppeRia(dataConfig["train"], dataConfig)
dev_examples = loadGooppeRia(dataConfig["dev"], dataConfig)
test_examples = loadGooppeRia(dataConfig["test"], dataConfig)
else:
raise ValueError("Dataset not specified")
if loadTrain:
return train_examples, dev_examples, test_examples
else:
return dev_examples, test_examples
def clear_text(text: str, rm_strong=True) -> str:
selector = "strong"
text = unicodedata.normalize("NFKD", text)
text = text.replace("\n", " ")
tree = HTMLParser(text)
if rm_strong:
for node in tree.css(selector):
node.decompose()
return tree.text().strip()
def loadRia(path, dataConfig):
with open(path, "r") as f:
lines = f.readlines()
ria_json = [json.loads(x) for x in lines]
def gen():
for doc in ria_json:
if dataConfig.get("clear", False):
yield (clear_text(doc["text"]), clear_text(doc["title"]))
else:
yield (doc["text"], doc["title"])
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples
def loadGooppeRia(path, dataConfig):
def gen():
with open(path) as file:
reader = csv.reader(file, delimiter="\t")
for line in reader:
if dataConfig.get("clear", False):
yield (clear_text(line[0]), clear_text(line[1]))
else:
yield (line[0], line[1])
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples
def loadLenta(path):
lentaDF = pd.read_csv(path)
def gen():
for ir, row in lentaDF.iterrows():
yield (row["text"], row["title"])
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples
def loadGigaword(path_src, path_tgt):
with open(path_src, "r") as f:
docs = f.readlines()
with open(path_tgt, "r") as f:
summs = f.readlines()
docs = [s.strip() for s in docs]
summs = [s.strip() for s in summs]
def gen():
for doc, summ in zip(docs, summs):
yield (doc, summ)
examples = tf.data.Dataset.from_generator(gen, output_shapes=((), ()), output_types=(tf.string, tf.string))
return examples | en | 0.743333 | # apply sin to even indices in the array; 2i # apply cos to odd indices in the array; 2i+1 # add extra dimensions to add the padding # to the attention logits. # (batch_size, 1, 1, seq_len) # (seq_len, seq_len) # Encoder padding mask # Used in the 2nd attention block in the decoder. # This padding mask is used to mask the encoder outputs. # Used in the 1st attention block in the decoder. # It is used to pad and mask future tokens in the input received by # the decoder. | 2.311926 | 2 |
baggie/baggie/util.py | Box-Robotics/ros2-bagutils | 10 | 6614929 | # Copyright 2020 Box Robotics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
from datetime import datetime as DT
from rclpy.time import Time
def stamp(dt=None):
"""
Generates a timestamp compatiable for writing to rosbag2
Parameters:
dt (datetime.datetime or rclpy.time.Time):
A `datetime` or `Time` object representing the timestamp. If this
parameter is omitted, the current system time is used.
Returns:
An int encoding of the timestamp as nanoseconds past the epoch (of a
particular clock; usually the system clock)
"""
if dt is None:
dt = DT.now()
if isinstance(dt, DT):
return int(dt.timestamp() * 1e9)
elif isinstance(dt, Time):
return dt.nanoseconds
elif isinstance(dt, int):
return dt
else:
# NOTE: let's not encourage passing in an `int` (see docs above -- we
# don't document it -- and the error message below -- also,
# undocumented).
raise(TypeError(
"stamp: 'dt' must be an instance of " +
"'datetime.datetime' or 'rclpy.time.Time', " +
" not '%s'" % type(dt)))
def msg2typestr(msg):
"""
Introspects the message type from the passed in `msg` and encodes it as a
string in the format required by rosbag2.
Parameters
----------
msg : Message
A ROS 2 message (deserialized) whose type we need to introspect
Returns
-------
A string encoding of the message type suitable for serialization to a ROS 2
bag.
"""
mod_components = msg.__module__.split(".")
mod_components[-1] = msg.__class__.__name__
return "/".join(mod_components)
def typestr2msgtype(type_str):
"""
Given a ROS 2 bag message type encoding string, this function will return a
Type instance of the actual Python message type that can then be used for
creating instances of the particular message class. If the loader for the
class type is not on your `sys.path` an exception will be thrown.
Parameters
----------
type_str : str
A string encoding of a message type compatible with rosbag2
Returns
-------
The `Type` of the Python message.
"""
module_components = type_str.split("/")
type_name = module_components[-1]
module_str = ".".join(module_components[0:-1])
module = importlib.import_module(module_str)
return type(getattr(module, type_name)())
| # Copyright 2020 Box Robotics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
from datetime import datetime as DT
from rclpy.time import Time
def stamp(dt=None):
"""
Generates a timestamp compatiable for writing to rosbag2
Parameters:
dt (datetime.datetime or rclpy.time.Time):
A `datetime` or `Time` object representing the timestamp. If this
parameter is omitted, the current system time is used.
Returns:
An int encoding of the timestamp as nanoseconds past the epoch (of a
particular clock; usually the system clock)
"""
if dt is None:
dt = DT.now()
if isinstance(dt, DT):
return int(dt.timestamp() * 1e9)
elif isinstance(dt, Time):
return dt.nanoseconds
elif isinstance(dt, int):
return dt
else:
# NOTE: let's not encourage passing in an `int` (see docs above -- we
# don't document it -- and the error message below -- also,
# undocumented).
raise(TypeError(
"stamp: 'dt' must be an instance of " +
"'datetime.datetime' or 'rclpy.time.Time', " +
" not '%s'" % type(dt)))
def msg2typestr(msg):
"""
Introspects the message type from the passed in `msg` and encodes it as a
string in the format required by rosbag2.
Parameters
----------
msg : Message
A ROS 2 message (deserialized) whose type we need to introspect
Returns
-------
A string encoding of the message type suitable for serialization to a ROS 2
bag.
"""
mod_components = msg.__module__.split(".")
mod_components[-1] = msg.__class__.__name__
return "/".join(mod_components)
def typestr2msgtype(type_str):
"""
Given a ROS 2 bag message type encoding string, this function will return a
Type instance of the actual Python message type that can then be used for
creating instances of the particular message class. If the loader for the
class type is not on your `sys.path` an exception will be thrown.
Parameters
----------
type_str : str
A string encoding of a message type compatible with rosbag2
Returns
-------
The `Type` of the Python message.
"""
module_components = type_str.split("/")
type_name = module_components[-1]
module_str = ".".join(module_components[0:-1])
module = importlib.import_module(module_str)
return type(getattr(module, type_name)())
| en | 0.68351 | # Copyright 2020 Box Robotics, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Generates a timestamp compatiable for writing to rosbag2 Parameters: dt (datetime.datetime or rclpy.time.Time): A `datetime` or `Time` object representing the timestamp. If this parameter is omitted, the current system time is used. Returns: An int encoding of the timestamp as nanoseconds past the epoch (of a particular clock; usually the system clock) # NOTE: let's not encourage passing in an `int` (see docs above -- we # don't document it -- and the error message below -- also, # undocumented). Introspects the message type from the passed in `msg` and encodes it as a string in the format required by rosbag2. Parameters ---------- msg : Message A ROS 2 message (deserialized) whose type we need to introspect Returns ------- A string encoding of the message type suitable for serialization to a ROS 2 bag. Given a ROS 2 bag message type encoding string, this function will return a Type instance of the actual Python message type that can then be used for creating instances of the particular message class. If the loader for the class type is not on your `sys.path` an exception will be thrown. Parameters ---------- type_str : str A string encoding of a message type compatible with rosbag2 Returns ------- The `Type` of the Python message. | 2.785284 | 3 |
bin/results/results2011.py | adjspecies/furrypoll-munger | 1 | 6614930 | import csv
import MySQLdb
import sys
from meta import FIELDNAMES
from MySQLResults import Results
class Results2011(Results):
questionIndex = {
'month': 3,
'year': 4,
'biosex': 5,
'gender': 6,
'orientation': 7,
'country': 8,
'state': 9,
'race_white': 11,
'race_black': 12,
'race_hispanic': 13,
'race_asian': 14,
'race_native': 15,
'religion': 18,
'politics_social': 20,
'politics_economic': 22,
'occupation': 24,
'education': 26,
'relationship': 28,
'partner_is_furry': 30,
'howfurry': 31,
'years_known_fandom': 32,
'years_as_furry': 33,
'furries_known': 34,
'furries_known_in_person': 35,
'whoknows_nobody': 36,
'whoknows_family': 37,
'whoknows_SO': 38,
'whoknows_furryfriends': 39,
'whoknows_bestfriends': 40,
'whoknows_closerfriends': 41,
'whoknows_friends': 42,
'whoknows_coworkers': 43,
'whoknows_commonknowledge': 44,
'nonfurry_response': 45,
'nonfurry_response_personal': 46,
'nonfurry_accuracy': 47,
'rp_as_different_gender': 48,
'seximportance': 49,
'seximportance_personal': 50,
'seximportance_others': 51,
'seximportance_public': 52,
'howoften_chat_online': 53,
'howoften_roleplay': 54,
'howoften_attend_conventions': 55,
'howoften_meet_up': 56,
'howoften_visit_furry_websites': 57,
'howoften_participate_in_furry_online_communities': 58,
'howoften_write': 59,
'howoften_draw': 60,
'howoften_play_nonfurry_online_games': 61,
'howoften_play_nonfurry_rpgs': 62,
'howoften_attend_nonfurry_conventions': 63,
'howoften_participate_in_nonfurry_online_communities': 64,
'is_artist': 65,
'is_writer': 66,
'is_musician': 67,
'is_congoer': 68,
'is_fursuiter': 69,
'is_active_online_communities': 70,
'is_fan_rpgs': 71,
'is_fan_scifi': 72,
'is_fan_anime': 73,
'is_plushophile': 74,
'is_zoophile': 75,
'is_polyglot': 76,
'is_animal_rights_advocate': 77,
'is_vegetarian': 78,
'is_politically_active': 79,
'is_otherkin': 80,
'opinion_artwork': 81,
'opinion_writing': 82,
'opinion_conventions': 83,
'opinion_fursuiting': 84,
'opinion_plushophilia': 85,
'opinion_zoophilia': 86,
'opinion_online_communities': 87,
'importance_artwork': 88,
'importance_writing': 89,
'importance_online_communities': 90,
'importance_muds': 91,
'importance_conventions': 92,
'importance_fursuiting': 93,
'website_artspots': 281,
'website_deviantart': 282,
'website_e621': 283,
'website_flayrah': 284,
'website_furaffinity': 285,
'website_furcadia': 286,
'website_furnation': 287,
'website_furocity': 288,
'website_furry4life': 289,
'website_furryteens': 290,
'website_furspace': 291,
'website_furtopia': 292,
'website_inkbunny': 293,
'website_pounced': 294,
'website_sofurry': 295,
'website_vcl': 296,
'website_wikifur': 297,
'how_much_human': 362,
'animal_wolf': 299,
'animal_redfox': 300,
'animal_greyfox': 301,
'animal_arcticfox': 302,
'animal_kitsune': 303,
'animal_otherfox': 304,
'animal_coyote': 305,
'animal_jackal': 306,
'animal_germanshepherd': 307,
'animal_husky': 308,
'animal_collie': 309,
'animal_otherdog': 310,
'animal_othercanine': 311,
'animal_tiger': 312,
'animal_lion': 313,
'animal_leopard': 314,
'animal_snowleopard': 315,
'animal_panther': 316,
'animal_cheetah': 317,
'animal_cougar': 318,
'animal_domesticcat': 319,
'animal_otherfeline': 320,
'animal_dragon': 321,
'animal_lizard': 322,
'animal_dinosaur': 323,
'animal_otherreptile': 324,
'animal_raccoon': 325,
'animal_skunk': 326,
'animal_badger': 327,
'animal_riverotter': 328,
'animal_seaotter': 329,
'animal_weasel': 330,
'animal_othermustelid': 331,
'animal_redpanda': 332,
'animal_othermusteloid': 333,
'animal_horse': 334,
'animal_deer': 335,
'animal_otherungulate': 336,
'animal_brownbear': 337,
'animal_grizzlybear': 338,
'animal_pandabear': 339,
'animal_polarbear': 340,
'animal_otherbear': 341,
'animal_mouse': 342,
'animal_rat': 343,
'animal_squirrel': 344,
'animal_other': 345,
'animal_raven': 346,
'animal_raptor': 347,
'animal_otherbird': 348,
'animal_rabbit': 349,
'animal_kangaroo': 350,
'animal_koala': 351,
'animal_othermarsupial': 352,
'animal_lemur': 353,
'animal_monkey': 354,
'animal_otherprimate': 355,
'animal_hyaena': 356,
'animal_bat': 357,
'animal_griffin': 358,
'animal_snowleopard': None,
}
def buildResults():
results = Results2011(year=2011)
return results.getResults()
if __name__ == '__main__':
outfile = sys.argv[1]
with open(outfile, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)
for row in buildResults():
writer.writerow(row)
| import csv
import MySQLdb
import sys
from meta import FIELDNAMES
from MySQLResults import Results
class Results2011(Results):
questionIndex = {
'month': 3,
'year': 4,
'biosex': 5,
'gender': 6,
'orientation': 7,
'country': 8,
'state': 9,
'race_white': 11,
'race_black': 12,
'race_hispanic': 13,
'race_asian': 14,
'race_native': 15,
'religion': 18,
'politics_social': 20,
'politics_economic': 22,
'occupation': 24,
'education': 26,
'relationship': 28,
'partner_is_furry': 30,
'howfurry': 31,
'years_known_fandom': 32,
'years_as_furry': 33,
'furries_known': 34,
'furries_known_in_person': 35,
'whoknows_nobody': 36,
'whoknows_family': 37,
'whoknows_SO': 38,
'whoknows_furryfriends': 39,
'whoknows_bestfriends': 40,
'whoknows_closerfriends': 41,
'whoknows_friends': 42,
'whoknows_coworkers': 43,
'whoknows_commonknowledge': 44,
'nonfurry_response': 45,
'nonfurry_response_personal': 46,
'nonfurry_accuracy': 47,
'rp_as_different_gender': 48,
'seximportance': 49,
'seximportance_personal': 50,
'seximportance_others': 51,
'seximportance_public': 52,
'howoften_chat_online': 53,
'howoften_roleplay': 54,
'howoften_attend_conventions': 55,
'howoften_meet_up': 56,
'howoften_visit_furry_websites': 57,
'howoften_participate_in_furry_online_communities': 58,
'howoften_write': 59,
'howoften_draw': 60,
'howoften_play_nonfurry_online_games': 61,
'howoften_play_nonfurry_rpgs': 62,
'howoften_attend_nonfurry_conventions': 63,
'howoften_participate_in_nonfurry_online_communities': 64,
'is_artist': 65,
'is_writer': 66,
'is_musician': 67,
'is_congoer': 68,
'is_fursuiter': 69,
'is_active_online_communities': 70,
'is_fan_rpgs': 71,
'is_fan_scifi': 72,
'is_fan_anime': 73,
'is_plushophile': 74,
'is_zoophile': 75,
'is_polyglot': 76,
'is_animal_rights_advocate': 77,
'is_vegetarian': 78,
'is_politically_active': 79,
'is_otherkin': 80,
'opinion_artwork': 81,
'opinion_writing': 82,
'opinion_conventions': 83,
'opinion_fursuiting': 84,
'opinion_plushophilia': 85,
'opinion_zoophilia': 86,
'opinion_online_communities': 87,
'importance_artwork': 88,
'importance_writing': 89,
'importance_online_communities': 90,
'importance_muds': 91,
'importance_conventions': 92,
'importance_fursuiting': 93,
'website_artspots': 281,
'website_deviantart': 282,
'website_e621': 283,
'website_flayrah': 284,
'website_furaffinity': 285,
'website_furcadia': 286,
'website_furnation': 287,
'website_furocity': 288,
'website_furry4life': 289,
'website_furryteens': 290,
'website_furspace': 291,
'website_furtopia': 292,
'website_inkbunny': 293,
'website_pounced': 294,
'website_sofurry': 295,
'website_vcl': 296,
'website_wikifur': 297,
'how_much_human': 362,
'animal_wolf': 299,
'animal_redfox': 300,
'animal_greyfox': 301,
'animal_arcticfox': 302,
'animal_kitsune': 303,
'animal_otherfox': 304,
'animal_coyote': 305,
'animal_jackal': 306,
'animal_germanshepherd': 307,
'animal_husky': 308,
'animal_collie': 309,
'animal_otherdog': 310,
'animal_othercanine': 311,
'animal_tiger': 312,
'animal_lion': 313,
'animal_leopard': 314,
'animal_snowleopard': 315,
'animal_panther': 316,
'animal_cheetah': 317,
'animal_cougar': 318,
'animal_domesticcat': 319,
'animal_otherfeline': 320,
'animal_dragon': 321,
'animal_lizard': 322,
'animal_dinosaur': 323,
'animal_otherreptile': 324,
'animal_raccoon': 325,
'animal_skunk': 326,
'animal_badger': 327,
'animal_riverotter': 328,
'animal_seaotter': 329,
'animal_weasel': 330,
'animal_othermustelid': 331,
'animal_redpanda': 332,
'animal_othermusteloid': 333,
'animal_horse': 334,
'animal_deer': 335,
'animal_otherungulate': 336,
'animal_brownbear': 337,
'animal_grizzlybear': 338,
'animal_pandabear': 339,
'animal_polarbear': 340,
'animal_otherbear': 341,
'animal_mouse': 342,
'animal_rat': 343,
'animal_squirrel': 344,
'animal_other': 345,
'animal_raven': 346,
'animal_raptor': 347,
'animal_otherbird': 348,
'animal_rabbit': 349,
'animal_kangaroo': 350,
'animal_koala': 351,
'animal_othermarsupial': 352,
'animal_lemur': 353,
'animal_monkey': 354,
'animal_otherprimate': 355,
'animal_hyaena': 356,
'animal_bat': 357,
'animal_griffin': 358,
'animal_snowleopard': None,
}
def buildResults():
results = Results2011(year=2011)
return results.getResults()
if __name__ == '__main__':
outfile = sys.argv[1]
with open(outfile, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)
for row in buildResults():
writer.writerow(row)
| none | 1 | 2.3844 | 2 | |
orbeon_xml_api/tests/controls/test_file_attachment.py | sferket/orbeon-xml-api | 0 | 6614931 | from . import CommonTestCase
class FileAttachmentTestCase(CommonTestCase):
def _test_file_attachment(self):
pass
# file_attachment = self.builder.controls['file-attachment']
# self.assertEqual(file_attachment.resource_element1.label, 'File Attachment')
# self.assertEqual(file_attachment.resource_element.hint, None)
| from . import CommonTestCase
class FileAttachmentTestCase(CommonTestCase):
def _test_file_attachment(self):
pass
# file_attachment = self.builder.controls['file-attachment']
# self.assertEqual(file_attachment.resource_element1.label, 'File Attachment')
# self.assertEqual(file_attachment.resource_element.hint, None)
| en | 0.448289 | # file_attachment = self.builder.controls['file-attachment'] # self.assertEqual(file_attachment.resource_element1.label, 'File Attachment') # self.assertEqual(file_attachment.resource_element.hint, None) | 2.478248 | 2 |
grafcli/exceptions.py | daviddelannoy/grafcli | 160 | 6614932 | <filename>grafcli/exceptions.py<gh_stars>100-1000
from climb.exceptions import CLIException
class HostConfigError(CLIException):
pass
class MissingHostName(CLIException):
pass
class MissingTemplateCategory(CLIException):
pass
class InvalidPath(CLIException):
pass
class InvalidDocument(CLIException):
pass
class DocumentNotFound(CLIException):
pass
class CommandCancelled(CLIException):
pass
| <filename>grafcli/exceptions.py<gh_stars>100-1000
from climb.exceptions import CLIException
class HostConfigError(CLIException):
pass
class MissingHostName(CLIException):
pass
class MissingTemplateCategory(CLIException):
pass
class InvalidPath(CLIException):
pass
class InvalidDocument(CLIException):
pass
class DocumentNotFound(CLIException):
pass
class CommandCancelled(CLIException):
pass
| none | 1 | 1.950228 | 2 | |
keygen.py | NIT-dgp/SecureSnaps | 15 | 6614933 | import hashlib
import getpass
def max_val(ht, wdth):
return max(ht, wdth)
def yield_chunks(block, iterate_size):
for i in range(0, len(block), iterate_size):
yield block[i: i + iterate_size]
def get_string_hash(psswd):
# Encode the string into a byte array
psswd_encoded = psswd.encode('utf-8')
# Generate hash value
hash_psswd = hashlib.sha256(psswd_encoded)
hashvalue = hash_psswd.hexdigest()
return hashvalue
def generate_tuples(H, W, pwd):
height = H
width = W
password_hashed = get_string_hash(pwd)
hash_lst = list(yield_chunks(password_hashed, 4))
# print (hash_lst)
mod = max_val(height, width)
finval = []
# Since we want our values to be less than the height or width of the image
finval.append([(int(i, 16)) % mod for i in hash_lst])
finval = finval[0]
keytupl1 = finval[0:4]
keytupl2 = finval[4:8]
keytupl3 = finval[8:12]
keytupl4 = finval[12:]
return (keytupl1, keytupl2, keytupl3, keytupl4)
| import hashlib
import getpass
def max_val(ht, wdth):
return max(ht, wdth)
def yield_chunks(block, iterate_size):
for i in range(0, len(block), iterate_size):
yield block[i: i + iterate_size]
def get_string_hash(psswd):
# Encode the string into a byte array
psswd_encoded = psswd.encode('utf-8')
# Generate hash value
hash_psswd = hashlib.sha256(psswd_encoded)
hashvalue = hash_psswd.hexdigest()
return hashvalue
def generate_tuples(H, W, pwd):
height = H
width = W
password_hashed = get_string_hash(pwd)
hash_lst = list(yield_chunks(password_hashed, 4))
# print (hash_lst)
mod = max_val(height, width)
finval = []
# Since we want our values to be less than the height or width of the image
finval.append([(int(i, 16)) % mod for i in hash_lst])
finval = finval[0]
keytupl1 = finval[0:4]
keytupl2 = finval[4:8]
keytupl3 = finval[8:12]
keytupl4 = finval[12:]
return (keytupl1, keytupl2, keytupl3, keytupl4)
| en | 0.640005 | # Encode the string into a byte array # Generate hash value # print (hash_lst) # Since we want our values to be less than the height or width of the image | 3.044346 | 3 |
src/dagny/renderers.py | jamescallmebrent/dagny | 1 | 6614934 | # -*- coding: utf-8 -*-
from dagny.action import Action
from dagny.utils import camel_to_underscore, resource_name
@Action.RENDERER.html
def render_html(action, resource):
"""
Render an appropriate HTML response for an action.
This is a generic renderer backend which produces HTML responses. It uses
the name of the resource and current action to generate a template name,
then renders the template with a `RequestContext`.
To retrieve the template name, the resource name is first turned from
CamelCase to lowercase_underscore_separated; if the class name ends in
`Resource`, this is first removed from the end. For example:
User => user
UserResource => user
NameXYZ => name_xyz
XYZName => xyz_name
You can optionally define a template path prefix on your `Resource` like so:
class User(Resource):
template_path_prefix = 'auth/'
# ...
The template name is assembled from the template path prefix, the
re-formatted resource name, and the current action name. So, for a `User`
resource, with `template_path_prefix = 'auth/'`, and an action of `show`,
the template name would be:
auth/user/show.html
Finally, this is rendered using `render_to_response()`. The resource is
passed into the context as `self`, so that attribute assignments from the
action will be available in the template. This also uses `RequestContext`,
so configured context processors will also be available.
"""
from django.shortcuts import render_to_response
from django.template import RequestContext
resource_label = camel_to_underscore(resource_name(resource))
template_path_prefix = getattr(resource, 'template_path_prefix', "")
template_name = "%s%s/%s.html" % (template_path_prefix, resource_label, action.name)
return render_to_response(template_name, {
'self': resource
}, context_instance=RequestContext(resource.request))
| # -*- coding: utf-8 -*-
from dagny.action import Action
from dagny.utils import camel_to_underscore, resource_name
@Action.RENDERER.html
def render_html(action, resource):
"""
Render an appropriate HTML response for an action.
This is a generic renderer backend which produces HTML responses. It uses
the name of the resource and current action to generate a template name,
then renders the template with a `RequestContext`.
To retrieve the template name, the resource name is first turned from
CamelCase to lowercase_underscore_separated; if the class name ends in
`Resource`, this is first removed from the end. For example:
User => user
UserResource => user
NameXYZ => name_xyz
XYZName => xyz_name
You can optionally define a template path prefix on your `Resource` like so:
class User(Resource):
template_path_prefix = 'auth/'
# ...
The template name is assembled from the template path prefix, the
re-formatted resource name, and the current action name. So, for a `User`
resource, with `template_path_prefix = 'auth/'`, and an action of `show`,
the template name would be:
auth/user/show.html
Finally, this is rendered using `render_to_response()`. The resource is
passed into the context as `self`, so that attribute assignments from the
action will be available in the template. This also uses `RequestContext`,
so configured context processors will also be available.
"""
from django.shortcuts import render_to_response
from django.template import RequestContext
resource_label = camel_to_underscore(resource_name(resource))
template_path_prefix = getattr(resource, 'template_path_prefix', "")
template_name = "%s%s/%s.html" % (template_path_prefix, resource_label, action.name)
return render_to_response(template_name, {
'self': resource
}, context_instance=RequestContext(resource.request))
| en | 0.732995 | # -*- coding: utf-8 -*- Render an appropriate HTML response for an action. This is a generic renderer backend which produces HTML responses. It uses the name of the resource and current action to generate a template name, then renders the template with a `RequestContext`. To retrieve the template name, the resource name is first turned from CamelCase to lowercase_underscore_separated; if the class name ends in `Resource`, this is first removed from the end. For example: User => user UserResource => user NameXYZ => name_xyz XYZName => xyz_name You can optionally define a template path prefix on your `Resource` like so: class User(Resource): template_path_prefix = 'auth/' # ... The template name is assembled from the template path prefix, the re-formatted resource name, and the current action name. So, for a `User` resource, with `template_path_prefix = 'auth/'`, and an action of `show`, the template name would be: auth/user/show.html Finally, this is rendered using `render_to_response()`. The resource is passed into the context as `self`, so that attribute assignments from the action will be available in the template. This also uses `RequestContext`, so configured context processors will also be available. | 2.458446 | 2 |
Sentimental-readability/readability.py | Descalzo404/CS50-Problem-Sets | 0 | 6614935 | from cs50 import get_string
def main():
text = get_string("Text: ")
if (Coleman_Liau_index(text) >= 16):
print("Grade 16+")
elif (Coleman_Liau_index(text) < 1):
print("Before Grade 1")
else:
print("Grade", Coleman_Liau_index(text))
def Coleman_Liau_index(string):
stop = ('.', '?', '!')
words = 0
letters = 0
sentences = 0
checkword = 0
for c in string:
if c.isalpha() == True and checkword == 1:
checkword = 1
letters += 1
elif c.isalpha() == True and checkword == 0:
letters += 1
checkword = 1
words += 1
elif c in stop:
sentences += 1
checkword = 0
elif c == " ":
checkword = 0
letters = (100 * letters) / (words)
sentences = (100 * sentences) / (words)
index = 0.0588 * letters - 0.296 * sentences - 15.8
return round(index)
main()
| from cs50 import get_string
def main():
text = get_string("Text: ")
if (Coleman_Liau_index(text) >= 16):
print("Grade 16+")
elif (Coleman_Liau_index(text) < 1):
print("Before Grade 1")
else:
print("Grade", Coleman_Liau_index(text))
def Coleman_Liau_index(string):
stop = ('.', '?', '!')
words = 0
letters = 0
sentences = 0
checkword = 0
for c in string:
if c.isalpha() == True and checkword == 1:
checkword = 1
letters += 1
elif c.isalpha() == True and checkword == 0:
letters += 1
checkword = 1
words += 1
elif c in stop:
sentences += 1
checkword = 0
elif c == " ":
checkword = 0
letters = (100 * letters) / (words)
sentences = (100 * sentences) / (words)
index = 0.0588 * letters - 0.296 * sentences - 15.8
return round(index)
main()
| none | 1 | 3.60931 | 4 | |
src/mesonic/events.py | dreinsch/mesonic | 0 | 6614936 | <filename>src/mesonic/events.py
from enum import Enum, unique
from typing import TYPE_CHECKING, Callable, Dict
from attr import define, field, validators
if TYPE_CHECKING:
from mesonic.record import Record
from mesonic.synth import Synth
@define(kw_only=True)
class Event:
"""Event base class."""
track: int = 0
"""The Timeline track of the Event."""
info: Dict = field(factory=dict, validator=validators.instance_of(dict))
"""Additional information about this Event."""
def reverse(self) -> "Event":
"""Reverse the Event.
This is useful when executing the Events backwards or reversing/undoing them.
Returns
-------
Event
reversed Event
"""
return self
@unique
class SynthEventType(Enum):
"""SynthEventTypes describes what different SynthEvents are possible."""
START = 1
STOP = -1
PAUSE = 2
RESUME = -2
SET = 0
def reverse(self) -> "SynthEventType":
"""Reverse the SynthEventType
START <-> STOP
PAUSE <-> RESUME
SET <-> SET
Returns
-------
SynthEventType
the reversed SynthEventType
"""
return SynthEventType(-self.value)
@define(kw_only=True)
class SynthEvent(Event):
"""Events created by a Synth instance."""
synth: "Synth" = field(repr=False)
"""The respective Synth instance."""
etype: SynthEventType = field(validator=validators.in_(SynthEventType))
"""The type of SynthEvent."""
data: Dict = field(factory=dict)
"""Information about the SynthEvent."""
def __attrs_post_init__(self):
# will be executed by attrs after init
# and is used for validating the whole instance
# https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization
if self.etype is SynthEventType.SET:
for must_have in ["name", "old_value", "new_value"]:
if must_have not in self.data:
raise ValueError(f"data is missing key '{must_have}'")
def reverse(self) -> "SynthEvent":
"""Reverse the SynthEvent by adjusting the etype and data.
Returns
-------
SynthEvent
the reversed SynthEvent
"""
data = self.data.copy()
if self.etype is SynthEventType.SET:
data.update(
new_value=self.data["old_value"],
old_value=self.data["new_value"],
)
return SynthEvent(
synth=self.synth,
etype=self.etype.reverse(),
data=data,
)
@unique
class RecordEventType(Enum):
"""SynthEventTypes describes what different SynthEvents are possible."""
START = 1
STOP = -1
PAUSE = 2
RESUME = -2
def reverse(self) -> "RecordEventType":
"""Reverse the RecordEventType
START <-> STOP
PAUSE <-> RESUME
Returns
-------
RecordEventType
the reversed RecordEventType
"""
return RecordEventType(-self.value)
@define(kw_only=True)
class RecordEvent(Event):
"""Event created by a Record instance."""
record: "Record"
"""The respective Record instance."""
etype: RecordEventType
"""The type of RecordEvent."""
@define(kw_only=True)
class GenericEvent(Event):
"""A Generic Event that has two callables."""
action: Callable
"""The action to be executed when not reversed"""
reverse_action: Callable
"""The action to be executed when reversed"""
| <filename>src/mesonic/events.py
from enum import Enum, unique
from typing import TYPE_CHECKING, Callable, Dict
from attr import define, field, validators
if TYPE_CHECKING:
from mesonic.record import Record
from mesonic.synth import Synth
@define(kw_only=True)
class Event:
"""Event base class."""
track: int = 0
"""The Timeline track of the Event."""
info: Dict = field(factory=dict, validator=validators.instance_of(dict))
"""Additional information about this Event."""
def reverse(self) -> "Event":
"""Reverse the Event.
This is useful when executing the Events backwards or reversing/undoing them.
Returns
-------
Event
reversed Event
"""
return self
@unique
class SynthEventType(Enum):
"""SynthEventTypes describes what different SynthEvents are possible."""
START = 1
STOP = -1
PAUSE = 2
RESUME = -2
SET = 0
def reverse(self) -> "SynthEventType":
"""Reverse the SynthEventType
START <-> STOP
PAUSE <-> RESUME
SET <-> SET
Returns
-------
SynthEventType
the reversed SynthEventType
"""
return SynthEventType(-self.value)
@define(kw_only=True)
class SynthEvent(Event):
"""Events created by a Synth instance."""
synth: "Synth" = field(repr=False)
"""The respective Synth instance."""
etype: SynthEventType = field(validator=validators.in_(SynthEventType))
"""The type of SynthEvent."""
data: Dict = field(factory=dict)
"""Information about the SynthEvent."""
def __attrs_post_init__(self):
# will be executed by attrs after init
# and is used for validating the whole instance
# https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization
if self.etype is SynthEventType.SET:
for must_have in ["name", "old_value", "new_value"]:
if must_have not in self.data:
raise ValueError(f"data is missing key '{must_have}'")
def reverse(self) -> "SynthEvent":
"""Reverse the SynthEvent by adjusting the etype and data.
Returns
-------
SynthEvent
the reversed SynthEvent
"""
data = self.data.copy()
if self.etype is SynthEventType.SET:
data.update(
new_value=self.data["old_value"],
old_value=self.data["new_value"],
)
return SynthEvent(
synth=self.synth,
etype=self.etype.reverse(),
data=data,
)
@unique
class RecordEventType(Enum):
"""SynthEventTypes describes what different SynthEvents are possible."""
START = 1
STOP = -1
PAUSE = 2
RESUME = -2
def reverse(self) -> "RecordEventType":
"""Reverse the RecordEventType
START <-> STOP
PAUSE <-> RESUME
Returns
-------
RecordEventType
the reversed RecordEventType
"""
return RecordEventType(-self.value)
@define(kw_only=True)
class RecordEvent(Event):
"""Event created by a Record instance."""
record: "Record"
"""The respective Record instance."""
etype: RecordEventType
"""The type of RecordEvent."""
@define(kw_only=True)
class GenericEvent(Event):
"""A Generic Event that has two callables."""
action: Callable
"""The action to be executed when not reversed"""
reverse_action: Callable
"""The action to be executed when reversed"""
| en | 0.810119 | Event base class. The Timeline track of the Event. Additional information about this Event. Reverse the Event. This is useful when executing the Events backwards or reversing/undoing them. Returns ------- Event reversed Event SynthEventTypes describes what different SynthEvents are possible. Reverse the SynthEventType START <-> STOP PAUSE <-> RESUME SET <-> SET Returns ------- SynthEventType the reversed SynthEventType Events created by a Synth instance. The respective Synth instance. The type of SynthEvent. Information about the SynthEvent. # will be executed by attrs after init # and is used for validating the whole instance # https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization Reverse the SynthEvent by adjusting the etype and data. Returns ------- SynthEvent the reversed SynthEvent SynthEventTypes describes what different SynthEvents are possible. Reverse the RecordEventType START <-> STOP PAUSE <-> RESUME Returns ------- RecordEventType the reversed RecordEventType Event created by a Record instance. The respective Record instance. The type of RecordEvent. A Generic Event that has two callables. The action to be executed when not reversed The action to be executed when reversed | 2.863183 | 3 |
cli/asset_commands.py | blockchain-Bobby/Iroha-CLI | 2 | 6614937 | import click
from .iroha_helpers import *
#ASSET COMMANDS#
def new_asset():
asset = click.prompt("New Asset Name")
domain = click.prompt("Domain")
precision = click.prompt("Precision",type=int)
create_new_asset(asset,domain,precision)
def new_asset_transfer(account_id):
src_account_id = click.prompt("Source Account",default=account_id)
recipient = click.prompt("Recipient")
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Total Amount to Send")
description = click.prompt("Enter Transaction Details")
transfer_asset(src_account_id,recipient,asset_id,description,qty)
def increase_asset_qty():
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Qty To Add")
add_asset_qty(asset_id,qty)
def decrease_asset_qty():
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Qty To Subtract")
subtract_asset_qty(asset_id,qty)
#ASSET QUERIES
def view_account_asset_balance(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
get_account_assets(account_id)
def grant_asset_read_permission(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
contact = click.prompt("Username@domain Your Write Acc Granting Permission")
grant_account_read_permission(creator_account=account_id,contact=contact)
def query_asset_tx_history(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
total = click.prompt("Total Txs to return",default=50)
get_acc_tx_history(creator_account=account_id,total=total)
#def query_domain_assets():
# click.echo("Checking For Pending Transactions That Require Signatures")
# get_domain_assets() | import click
from .iroha_helpers import *
#ASSET COMMANDS#
def new_asset():
asset = click.prompt("New Asset Name")
domain = click.prompt("Domain")
precision = click.prompt("Precision",type=int)
create_new_asset(asset,domain,precision)
def new_asset_transfer(account_id):
src_account_id = click.prompt("Source Account",default=account_id)
recipient = click.prompt("Recipient")
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Total Amount to Send")
description = click.prompt("Enter Transaction Details")
transfer_asset(src_account_id,recipient,asset_id,description,qty)
def increase_asset_qty():
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Qty To Add")
add_asset_qty(asset_id,qty)
def decrease_asset_qty():
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Qty To Subtract")
subtract_asset_qty(asset_id,qty)
#ASSET QUERIES
def view_account_asset_balance(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
get_account_assets(account_id)
def grant_asset_read_permission(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
contact = click.prompt("Username@domain Your Write Acc Granting Permission")
grant_account_read_permission(creator_account=account_id,contact=contact)
def query_asset_tx_history(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
total = click.prompt("Total Txs to return",default=50)
get_acc_tx_history(creator_account=account_id,total=total)
#def query_domain_assets():
# click.echo("Checking For Pending Transactions That Require Signatures")
# get_domain_assets() | en | 0.152886 | #ASSET COMMANDS# #domain") #domain") #domain") #ASSET QUERIES #def query_domain_assets(): # click.echo("Checking For Pending Transactions That Require Signatures") # get_domain_assets() | 2.515549 | 3 |
hwt/serializer/simModel/serializer.py | mgielda/hwt | 0 | 6614938 | from copy import copy
from jinja2.environment import Environment
from jinja2.loaders import PackageLoader
from hwt.hdl.architecture import Architecture
from hwt.hdl.assignment import Assignment
from hwt.hdl.constants import SENSITIVITY
from hwt.hdl.ifContainter import IfContainer
from hwt.hdl.operator import Operator
from hwt.hdl.operatorDefs import AllOps, sensitivityByOp
from hwt.hdl.process import HWProcess
from hwt.hdl.switchContainer import SwitchContainer
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.enum import HEnum
from hwt.hdl.types.enumVal import HEnumVal
from hwt.hdl.types.typeCast import toHVal
from hwt.pyUtils.arrayQuery import arr_any
from hwt.serializer.exceptions import SerializerException
from hwt.serializer.generic.constCache import ConstCache
from hwt.serializer.generic.context import SerializerCtx
from hwt.serializer.generic.indent import getIndent
from hwt.serializer.generic.nameScope import LangueKeyword
from hwt.serializer.generic.serializer import GenericSerializer
from hwt.serializer.simModel.keywords import SIMMODEL_KEYWORDS
from hwt.serializer.simModel.ops import SimModelSerializer_ops
from hwt.serializer.simModel.types import SimModelSerializer_types
from hwt.serializer.simModel.value import SimModelSerializer_value
from hwt.serializer.utils import maxStmId
from hwt.synthesizer.param import evalParam
env = Environment(loader=PackageLoader('hwt', 'serializer/simModel/templates'))
unitTmpl = env.get_template('modelCls.py.template')
processTmpl = env.get_template('process.py.template')
ifTmpl = env.get_template("if.py.template")
class SimModelSerializer(SimModelSerializer_value, SimModelSerializer_ops,
SimModelSerializer_types, GenericSerializer):
"""
Serializer which converts Unit instances to simulator code
"""
_keywords_dict = {kw: LangueKeyword() for kw in SIMMODEL_KEYWORDS}
fileExtension = '.py'
@classmethod
def serializationDecision(cls, obj, serializedClasses,
serializedConfiguredUnits):
# we need all instances for simulation
return True
@classmethod
def stmAsHdl(cls, obj, ctx: SerializerCtx):
try:
serFn = getattr(cls, obj.__class__.__name__)
except AttributeError:
raise NotImplementedError("Not implemented for %s" % (repr(obj)))
return serFn(obj, ctx)
@classmethod
def Architecture(cls, arch: Architecture, ctx: SerializerCtx):
cls.Entity_prepare(arch.entity, ctx, serialize=False)
variables = []
procs = []
extraTypes = set()
extraTypes_serialized = []
arch.variables.sort(key=lambda x: (x.name, x._instId))
arch.processes.sort(key=lambda x: (x.name, maxStmId(x)))
arch.componentInstances.sort(key=lambda x: x._name)
ports = list(
map(lambda p: (p.name, cls.HdlType(p._dtype, ctx)),
arch.entity.ports))
for v in arch.variables:
t = v._dtype
# if type requires extra definition
if isinstance(t, HEnum) and t not in extraTypes:
extraTypes.add(v._dtype)
extraTypes_serialized.append(
cls.HdlType(t, ctx, declaration=True))
v.name = ctx.scope.checkedName(v.name, v)
variables.append(v)
childCtx = copy(ctx)
childCtx.constCache = ConstCache(ctx.scope.checkedName)
def serializeVar(v):
dv = evalParam(v.defVal)
if isinstance(dv, HEnumVal):
dv = "%s.%s" % (dv._dtype.name, dv.val)
else:
dv = cls.Value(dv, ctx)
return v.name, cls.HdlType(v._dtype, childCtx), dv
for p in arch.processes:
procs.append(cls.HWProcess(p, childCtx))
constants = []
for c in sorted(childCtx.constCache._cache.items(), key=lambda x: x[1],
reverse=True):
constants.append((c[1], cls.Value(c[0], ctx)))
return unitTmpl.render(
name=arch.getEntityName(),
constants=constants,
ports=ports,
signals=list(map(serializeVar, variables)),
extraTypes=extraTypes_serialized,
processes=procs,
processObjects=arch.processes,
processesNames=map(lambda p: p.name, arch.processes),
componentInstances=arch.componentInstances,
isOp=lambda x: isinstance(x, Operator),
sensitivityByOp=sensitivityByOp,
serialize_io=cls.sensitivityListItem,
)
@classmethod
def Assignment(cls, a: Assignment, ctx: SerializerCtx):
dst = a.dst
indentStr = getIndent(ctx.indent)
ev = a._is_completly_event_dependent
srcStr = "%s" % cls.Value(a.src, ctx)
if a.indexes is not None:
return "%sio.%s = (%s, (%s,), %s)" % (
indentStr, dst.name, srcStr,
", ".join(map(lambda x: cls.asHdl(x, ctx),
a.indexes)),
ev)
else:
if not (dst._dtype == a.src._dtype):
srcT = a.src._dtype
dstT = dst._dtype
if (isinstance(srcT, Bits) and
isinstance(dstT, Bits) and
srcT.bit_length() == dstT.bit_length() == 1):
if srcT.forceVector != dstT.forceVector:
_0 = cls.Value(toHVal(0), ctx)
if srcT.forceVector:
return "%sio.%s = ((%s)._getitem__val(%s), %s)"\
% (indentStr, dst.name, srcStr, _0, ev)
else:
return "%sio.%s = (%s, (%s,), %s)" % (
indentStr, dst.name, srcStr, _0, ev)
raise SerializerException(
("%s <= %s is not valid assignment\n"
" because types are different (%r; %r) ") %
(cls.asHdl(dst, ctx), srcStr,
dst._dtype, a.src._dtype))
else:
return "%sio.%s = (%s, %s)" % (
indentStr, dst.name, srcStr, ev)
@classmethod
def comment(cls, comentStr: str):
return "#" + comentStr.replace("\n", "\n#")
@classmethod
def IfContainer(cls, ifc: IfContainer, ctx: SerializerCtx):
cond = cls.condAsHdl(ifc.cond, ctx)
ifTrue = ifc.ifTrue
if ifc.elIfs:
# replace elifs with nested if statements
ifFalse = []
topIf = IfContainer(ifc.cond, ifc.ifTrue, ifFalse)
topIf._inputs = ifc._inputs
topIf._outputs = ifc._outputs
topIf._sensitivity = ifc._sensitivity
for c, stms in ifc.elIfs:
_ifFalse = []
lastIf = IfContainer(c, stms, _ifFalse)
lastIf._inputs = ifc._inputs
lastIf._outputs = ifc._outputs
lastIf._sensitivity = ifc._sensitivity
ifFalse.append(lastIf)
ifFalse = _ifFalse
if ifc.ifFalse is None:
lastIf.ifFalse = []
else:
lastIf.ifFalse = ifc.ifFalse
return cls.IfContainer(topIf, ctx)
else:
ifFalse = ifc.ifFalse
if ifFalse is None:
ifFalse = []
childCtx = ctx.withIndent()
outputInvalidateStms = []
for o in ifc._outputs:
# [TODO] look up indexes
indexes = None
oa = Assignment(o._dtype.fromPy(None), o, indexes,
virtualOnly=True, parentStm=ifc,
is_completly_event_dependent=ifc._is_completly_event_dependent)
outputInvalidateStms.append(cls.stmAsHdl(oa, childCtx))
return ifTmpl.render(
indent=getIndent(ctx.indent),
indentNum=ctx.indent,
cond=cond,
outputInvalidateStms=outputInvalidateStms,
ifTrue=tuple(map(
lambda obj: cls.stmAsHdl(obj, childCtx),
ifTrue)),
ifFalse=tuple(map(
lambda obj: cls.stmAsHdl(obj, childCtx),
ifFalse)))
@classmethod
def SwitchContainer(cls, sw: SwitchContainer,
ctx: SerializerCtx):
switchOn = sw.switchOn
def mkCond(c):
return switchOn._eq(c)
elIfs = []
for key, statements in sw.cases[1:]:
elIfs.append((mkCond(key), statements))
ifFalse = sw.default
topCond = mkCond(sw.cases[0][0])
topIf = IfContainer(topCond,
ifTrue=sw.cases[0][1],
ifFalse=ifFalse,
elIfs=elIfs)
topIf._sensitivity = sw._sensitivity
topIf._inputs = sw._inputs
topIf._outputs = sw._outputs
return cls.IfContainer(topIf, ctx)
@classmethod
def sensitivityListItem(cls, item):
if isinstance(item, Operator):
op = item.operator
if op == AllOps.RISING_EDGE:
sens = SENSITIVITY.RISING
elif op == AllOps.FALLING_EDGE:
sens = SENSITIVITY.FALLING
else:
raise TypeError("This is not an event sensitivity", op)
return "(%s, %s)" % (str(sens), item.operands[0].name)
else:
return item.name
@classmethod
def HWProcess(cls, proc: HWProcess, ctx: SerializerCtx):
body = proc.statements
assert body
proc.name = ctx.scope.checkedName(proc.name, proc)
sensitivityList = sorted(
map(cls.sensitivityListItem, proc.sensitivityList))
childCtx = ctx.withIndent(2)
_body = "\n".join([
cls.stmAsHdl(stm, childCtx)
for stm in body])
return processTmpl.render(
hasConditions=arr_any(
body, lambda stm: not isinstance(stm, Assignment)),
name=proc.name,
sensitivityList=sensitivityList,
stmLines=[_body]
)
| from copy import copy
from jinja2.environment import Environment
from jinja2.loaders import PackageLoader
from hwt.hdl.architecture import Architecture
from hwt.hdl.assignment import Assignment
from hwt.hdl.constants import SENSITIVITY
from hwt.hdl.ifContainter import IfContainer
from hwt.hdl.operator import Operator
from hwt.hdl.operatorDefs import AllOps, sensitivityByOp
from hwt.hdl.process import HWProcess
from hwt.hdl.switchContainer import SwitchContainer
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.enum import HEnum
from hwt.hdl.types.enumVal import HEnumVal
from hwt.hdl.types.typeCast import toHVal
from hwt.pyUtils.arrayQuery import arr_any
from hwt.serializer.exceptions import SerializerException
from hwt.serializer.generic.constCache import ConstCache
from hwt.serializer.generic.context import SerializerCtx
from hwt.serializer.generic.indent import getIndent
from hwt.serializer.generic.nameScope import LangueKeyword
from hwt.serializer.generic.serializer import GenericSerializer
from hwt.serializer.simModel.keywords import SIMMODEL_KEYWORDS
from hwt.serializer.simModel.ops import SimModelSerializer_ops
from hwt.serializer.simModel.types import SimModelSerializer_types
from hwt.serializer.simModel.value import SimModelSerializer_value
from hwt.serializer.utils import maxStmId
from hwt.synthesizer.param import evalParam
env = Environment(loader=PackageLoader('hwt', 'serializer/simModel/templates'))
unitTmpl = env.get_template('modelCls.py.template')
processTmpl = env.get_template('process.py.template')
ifTmpl = env.get_template("if.py.template")
class SimModelSerializer(SimModelSerializer_value, SimModelSerializer_ops,
SimModelSerializer_types, GenericSerializer):
"""
Serializer which converts Unit instances to simulator code
"""
_keywords_dict = {kw: LangueKeyword() for kw in SIMMODEL_KEYWORDS}
fileExtension = '.py'
@classmethod
def serializationDecision(cls, obj, serializedClasses,
serializedConfiguredUnits):
# we need all instances for simulation
return True
@classmethod
def stmAsHdl(cls, obj, ctx: SerializerCtx):
try:
serFn = getattr(cls, obj.__class__.__name__)
except AttributeError:
raise NotImplementedError("Not implemented for %s" % (repr(obj)))
return serFn(obj, ctx)
@classmethod
def Architecture(cls, arch: Architecture, ctx: SerializerCtx):
cls.Entity_prepare(arch.entity, ctx, serialize=False)
variables = []
procs = []
extraTypes = set()
extraTypes_serialized = []
arch.variables.sort(key=lambda x: (x.name, x._instId))
arch.processes.sort(key=lambda x: (x.name, maxStmId(x)))
arch.componentInstances.sort(key=lambda x: x._name)
ports = list(
map(lambda p: (p.name, cls.HdlType(p._dtype, ctx)),
arch.entity.ports))
for v in arch.variables:
t = v._dtype
# if type requires extra definition
if isinstance(t, HEnum) and t not in extraTypes:
extraTypes.add(v._dtype)
extraTypes_serialized.append(
cls.HdlType(t, ctx, declaration=True))
v.name = ctx.scope.checkedName(v.name, v)
variables.append(v)
childCtx = copy(ctx)
childCtx.constCache = ConstCache(ctx.scope.checkedName)
def serializeVar(v):
dv = evalParam(v.defVal)
if isinstance(dv, HEnumVal):
dv = "%s.%s" % (dv._dtype.name, dv.val)
else:
dv = cls.Value(dv, ctx)
return v.name, cls.HdlType(v._dtype, childCtx), dv
for p in arch.processes:
procs.append(cls.HWProcess(p, childCtx))
constants = []
for c in sorted(childCtx.constCache._cache.items(), key=lambda x: x[1],
reverse=True):
constants.append((c[1], cls.Value(c[0], ctx)))
return unitTmpl.render(
name=arch.getEntityName(),
constants=constants,
ports=ports,
signals=list(map(serializeVar, variables)),
extraTypes=extraTypes_serialized,
processes=procs,
processObjects=arch.processes,
processesNames=map(lambda p: p.name, arch.processes),
componentInstances=arch.componentInstances,
isOp=lambda x: isinstance(x, Operator),
sensitivityByOp=sensitivityByOp,
serialize_io=cls.sensitivityListItem,
)
@classmethod
def Assignment(cls, a: Assignment, ctx: SerializerCtx):
dst = a.dst
indentStr = getIndent(ctx.indent)
ev = a._is_completly_event_dependent
srcStr = "%s" % cls.Value(a.src, ctx)
if a.indexes is not None:
return "%sio.%s = (%s, (%s,), %s)" % (
indentStr, dst.name, srcStr,
", ".join(map(lambda x: cls.asHdl(x, ctx),
a.indexes)),
ev)
else:
if not (dst._dtype == a.src._dtype):
srcT = a.src._dtype
dstT = dst._dtype
if (isinstance(srcT, Bits) and
isinstance(dstT, Bits) and
srcT.bit_length() == dstT.bit_length() == 1):
if srcT.forceVector != dstT.forceVector:
_0 = cls.Value(toHVal(0), ctx)
if srcT.forceVector:
return "%sio.%s = ((%s)._getitem__val(%s), %s)"\
% (indentStr, dst.name, srcStr, _0, ev)
else:
return "%sio.%s = (%s, (%s,), %s)" % (
indentStr, dst.name, srcStr, _0, ev)
raise SerializerException(
("%s <= %s is not valid assignment\n"
" because types are different (%r; %r) ") %
(cls.asHdl(dst, ctx), srcStr,
dst._dtype, a.src._dtype))
else:
return "%sio.%s = (%s, %s)" % (
indentStr, dst.name, srcStr, ev)
@classmethod
def comment(cls, comentStr: str):
return "#" + comentStr.replace("\n", "\n#")
@classmethod
def IfContainer(cls, ifc: IfContainer, ctx: SerializerCtx):
cond = cls.condAsHdl(ifc.cond, ctx)
ifTrue = ifc.ifTrue
if ifc.elIfs:
# replace elifs with nested if statements
ifFalse = []
topIf = IfContainer(ifc.cond, ifc.ifTrue, ifFalse)
topIf._inputs = ifc._inputs
topIf._outputs = ifc._outputs
topIf._sensitivity = ifc._sensitivity
for c, stms in ifc.elIfs:
_ifFalse = []
lastIf = IfContainer(c, stms, _ifFalse)
lastIf._inputs = ifc._inputs
lastIf._outputs = ifc._outputs
lastIf._sensitivity = ifc._sensitivity
ifFalse.append(lastIf)
ifFalse = _ifFalse
if ifc.ifFalse is None:
lastIf.ifFalse = []
else:
lastIf.ifFalse = ifc.ifFalse
return cls.IfContainer(topIf, ctx)
else:
ifFalse = ifc.ifFalse
if ifFalse is None:
ifFalse = []
childCtx = ctx.withIndent()
outputInvalidateStms = []
for o in ifc._outputs:
# [TODO] look up indexes
indexes = None
oa = Assignment(o._dtype.fromPy(None), o, indexes,
virtualOnly=True, parentStm=ifc,
is_completly_event_dependent=ifc._is_completly_event_dependent)
outputInvalidateStms.append(cls.stmAsHdl(oa, childCtx))
return ifTmpl.render(
indent=getIndent(ctx.indent),
indentNum=ctx.indent,
cond=cond,
outputInvalidateStms=outputInvalidateStms,
ifTrue=tuple(map(
lambda obj: cls.stmAsHdl(obj, childCtx),
ifTrue)),
ifFalse=tuple(map(
lambda obj: cls.stmAsHdl(obj, childCtx),
ifFalse)))
@classmethod
def SwitchContainer(cls, sw: SwitchContainer,
ctx: SerializerCtx):
switchOn = sw.switchOn
def mkCond(c):
return switchOn._eq(c)
elIfs = []
for key, statements in sw.cases[1:]:
elIfs.append((mkCond(key), statements))
ifFalse = sw.default
topCond = mkCond(sw.cases[0][0])
topIf = IfContainer(topCond,
ifTrue=sw.cases[0][1],
ifFalse=ifFalse,
elIfs=elIfs)
topIf._sensitivity = sw._sensitivity
topIf._inputs = sw._inputs
topIf._outputs = sw._outputs
return cls.IfContainer(topIf, ctx)
@classmethod
def sensitivityListItem(cls, item):
if isinstance(item, Operator):
op = item.operator
if op == AllOps.RISING_EDGE:
sens = SENSITIVITY.RISING
elif op == AllOps.FALLING_EDGE:
sens = SENSITIVITY.FALLING
else:
raise TypeError("This is not an event sensitivity", op)
return "(%s, %s)" % (str(sens), item.operands[0].name)
else:
return item.name
@classmethod
def HWProcess(cls, proc: HWProcess, ctx: SerializerCtx):
body = proc.statements
assert body
proc.name = ctx.scope.checkedName(proc.name, proc)
sensitivityList = sorted(
map(cls.sensitivityListItem, proc.sensitivityList))
childCtx = ctx.withIndent(2)
_body = "\n".join([
cls.stmAsHdl(stm, childCtx)
for stm in body])
return processTmpl.render(
hasConditions=arr_any(
body, lambda stm: not isinstance(stm, Assignment)),
name=proc.name,
sensitivityList=sensitivityList,
stmLines=[_body]
)
| en | 0.685302 | Serializer which converts Unit instances to simulator code # we need all instances for simulation # if type requires extra definition #") # replace elifs with nested if statements # [TODO] look up indexes | 1.731542 | 2 |
vast_pipeline/tests/test_regression/test_epoch.py | askap-vast/vast-pipeline | 3 | 6614939 | <gh_stars>1-10
import os
import pandas as pd
import unittest
import glob
import shutil
from vast_pipeline.tests.test_regression import property_check, gen_config
from vast_pipeline.tests.test_regression.make_testdir import make_testdir
from django.conf import settings as s
from django.test import TestCase, override_settings
from django.core.management import call_command
TEST_ROOT = os.path.join(s.BASE_DIR, 'vast_pipeline', 'tests')
no_data = not glob.glob(os.path.join(TEST_ROOT, 'regression-data','EPOCH*'))
@unittest.skipIf(
no_data,
'The regression test data is missing, skipping regression tests'
)
@override_settings(
PIPELINE_WORKING_DIR=os.path.join(TEST_ROOT, 'pipeline-runs'),
)
class BasicEpochTest(TestCase):
'''
Test pipeline under epoch based basic association method returns expected
results.
'''
@classmethod
def setUpTestData(self):
'''
Set up directory to test data and run the pipeline.
'''
base_path = 'epoch-basic'
self.base_run = os.path.join(
s.PIPELINE_WORKING_DIR, base_path
)
# setup test directory
make_testdir(self.base_run)
gen_config.gen_config(
base_path,
s.PIPELINE_WORKING_DIR,
['01', '03x', '02', '05x', '06x']
)
call_command('runpipeline', self.base_run)
# read output
self.sources = pd.read_parquet(
os.path.join(self.base_run, 'sources.parquet')
)
self.relations = pd.read_parquet(
os.path.join(
self.base_run, 'relations.parquet'
)
)
# remove test directory
shutil.rmtree(self.base_run)
def test_num_sources(self):
'''
See documentation for test_num_sources in property_check.
'''
property_check.test_num_sources(self, self.sources, 616)
def test_most_relations(self):
'''
See documentation for test_most_relations in property_check.
'''
# this is the expected highest relation sources
expected = pd.DataFrame(
[[21.033441, -73.151101, 1],
[21.035019, -73.151512, 1],
[23.061180, -73.651803, 1],
[23.063015, -73.650433, 1],
[23.425469, -73.296979, 1],
[23.429945, -73.297484, 1],
[322.249559, -4.402759, 1],
[322.249615, -4.402745, 1],
[322.752246, -3.982728, 1],
[322.752994, -3.982975, 1],
[322.822412, -5.092524, 1],
[322.825119, -5.090515, 1],
[322.875352, -4.231587, 1],
[322.875452, -4.231785, 1],
[322.927896, -5.030347, 1],
[322.930617, -5.031158, 1]],
columns = ['wavg_ra', 'wavg_dec', 'relations']
)
property_check.test_most_relations(
self.relations, self.sources, 16, expected
)
def test_known_source(self):
'''
See documentation for test_known_source in property check.
'''
property_check.test_known_source(self, self.sources, 12.369)
@unittest.skipIf(
no_data,
'The regression test data is missing, skipping regression tests'
)
@override_settings(
PIPELINE_WORKING_DIR=os.path.join(TEST_ROOT, 'pipeline-runs'),
)
class AdvancedEpochTest(TestCase):
'''
Test pipeline under epoch based advanced association method returns
expected results.
'''
@classmethod
def setUpTestData(self):
'''
Set up directory to test data and run the pipeline.
'''
base_path = 'epoch-advanced'
self.base_run = os.path.join(
s.PIPELINE_WORKING_DIR, base_path
)
# setup test directory
make_testdir(self.base_run)
gen_config.gen_config(
base_path,
s.PIPELINE_WORKING_DIR,
['01', '03x', '02', '05x', '06x']
)
call_command('runpipeline', self.base_run)
# read output
self.sources = pd.read_parquet(
os.path.join(self.base_run, 'sources.parquet')
)
self.relations = pd.read_parquet(
os.path.join(
self.base_run, 'relations.parquet'
)
)
# remove test directory
shutil.rmtree(self.base_run)
def test_num_sources(self):
'''
See documentation for test_num_sources in property_check.
'''
property_check.test_num_sources(self, self.sources, 624)
def test_most_relations(self):
'''
See documentation for test_most_relations in property_check.
'''
# this is the expected highest relation sources
expected = pd.DataFrame(
[
[321.899517, -04.201971, 3],
[020.649051, -73.638252, 2]
],
columns=["wavg_ra", "wavg_dec", "relations"],
)
property_check.test_most_relations(self.relations, self.sources, 2, expected)
def test_known_source(self):
'''
See documentation for test_known_source in property_check.
'''
property_check.test_known_source(self, self.sources, 12.369)
@unittest.skipIf(
no_data,
'The regression test data is missing, skipping regression tests'
)
@override_settings(
PIPELINE_WORKING_DIR=os.path.join(TEST_ROOT, 'pipeline-runs'),
)
class DeruiterEpochTest(TestCase):
'''
Test pipeline under epoch based deruiter association method returns
expected results.
'''
@classmethod
def setUpTestData(self):
'''
Set up directory to test data and run the pipeline.
'''
base_path = 'epoch-deruiter'
self.base_run = os.path.join(
s.PIPELINE_WORKING_DIR, base_path
)
# setup test directory
make_testdir(self.base_run)
gen_config.gen_config(
base_path,
s.PIPELINE_WORKING_DIR,
['01', '03x', '02', '05x', '06x']
)
call_command('runpipeline', self.base_run)
# read output
self.sources = pd.read_parquet(
os.path.join(self.base_run, 'sources.parquet')
)
self.relations = pd.read_parquet(
os.path.join(
self.base_run, 'relations.parquet'
)
)
# remove test directory
shutil.rmtree(self.base_run)
def test_num_sources(self):
'''
See documentation for test_num_sources in property_check.
'''
property_check.test_num_sources(self, self.sources, 616)
def test_most_relations(self):
'''
See documentation for test_most_relations in property_check.
'''
# this is the expected highest relation sources
expected = pd.DataFrame(
[[322.752467, -3.982379, 4],
[322.752646, -3.982859, 4],
[322.752791, -3.982937, 4],
[322.752859, -3.983386, 4],
[322.753513, -3.985183, 4]],
columns=['wavg_ra', 'wavg_dec', 'relations']
)
property_check.test_most_relations(
self.relations, self.sources, 5, expected)
def test_known_source(self):
'''
See documentation for test_known_source in property_check.
'''
property_check.test_known_source(self, self.sources, 12.369)
| import os
import pandas as pd
import unittest
import glob
import shutil
from vast_pipeline.tests.test_regression import property_check, gen_config
from vast_pipeline.tests.test_regression.make_testdir import make_testdir
from django.conf import settings as s
from django.test import TestCase, override_settings
from django.core.management import call_command
TEST_ROOT = os.path.join(s.BASE_DIR, 'vast_pipeline', 'tests')
no_data = not glob.glob(os.path.join(TEST_ROOT, 'regression-data','EPOCH*'))
@unittest.skipIf(
no_data,
'The regression test data is missing, skipping regression tests'
)
@override_settings(
PIPELINE_WORKING_DIR=os.path.join(TEST_ROOT, 'pipeline-runs'),
)
class BasicEpochTest(TestCase):
'''
Test pipeline under epoch based basic association method returns expected
results.
'''
@classmethod
def setUpTestData(self):
'''
Set up directory to test data and run the pipeline.
'''
base_path = 'epoch-basic'
self.base_run = os.path.join(
s.PIPELINE_WORKING_DIR, base_path
)
# setup test directory
make_testdir(self.base_run)
gen_config.gen_config(
base_path,
s.PIPELINE_WORKING_DIR,
['01', '03x', '02', '05x', '06x']
)
call_command('runpipeline', self.base_run)
# read output
self.sources = pd.read_parquet(
os.path.join(self.base_run, 'sources.parquet')
)
self.relations = pd.read_parquet(
os.path.join(
self.base_run, 'relations.parquet'
)
)
# remove test directory
shutil.rmtree(self.base_run)
def test_num_sources(self):
'''
See documentation for test_num_sources in property_check.
'''
property_check.test_num_sources(self, self.sources, 616)
def test_most_relations(self):
'''
See documentation for test_most_relations in property_check.
'''
# this is the expected highest relation sources
expected = pd.DataFrame(
[[21.033441, -73.151101, 1],
[21.035019, -73.151512, 1],
[23.061180, -73.651803, 1],
[23.063015, -73.650433, 1],
[23.425469, -73.296979, 1],
[23.429945, -73.297484, 1],
[322.249559, -4.402759, 1],
[322.249615, -4.402745, 1],
[322.752246, -3.982728, 1],
[322.752994, -3.982975, 1],
[322.822412, -5.092524, 1],
[322.825119, -5.090515, 1],
[322.875352, -4.231587, 1],
[322.875452, -4.231785, 1],
[322.927896, -5.030347, 1],
[322.930617, -5.031158, 1]],
columns = ['wavg_ra', 'wavg_dec', 'relations']
)
property_check.test_most_relations(
self.relations, self.sources, 16, expected
)
def test_known_source(self):
'''
See documentation for test_known_source in property check.
'''
property_check.test_known_source(self, self.sources, 12.369)
@unittest.skipIf(
no_data,
'The regression test data is missing, skipping regression tests'
)
@override_settings(
PIPELINE_WORKING_DIR=os.path.join(TEST_ROOT, 'pipeline-runs'),
)
class AdvancedEpochTest(TestCase):
'''
Test pipeline under epoch based advanced association method returns
expected results.
'''
@classmethod
def setUpTestData(self):
'''
Set up directory to test data and run the pipeline.
'''
base_path = 'epoch-advanced'
self.base_run = os.path.join(
s.PIPELINE_WORKING_DIR, base_path
)
# setup test directory
make_testdir(self.base_run)
gen_config.gen_config(
base_path,
s.PIPELINE_WORKING_DIR,
['01', '03x', '02', '05x', '06x']
)
call_command('runpipeline', self.base_run)
# read output
self.sources = pd.read_parquet(
os.path.join(self.base_run, 'sources.parquet')
)
self.relations = pd.read_parquet(
os.path.join(
self.base_run, 'relations.parquet'
)
)
# remove test directory
shutil.rmtree(self.base_run)
def test_num_sources(self):
'''
See documentation for test_num_sources in property_check.
'''
property_check.test_num_sources(self, self.sources, 624)
def test_most_relations(self):
'''
See documentation for test_most_relations in property_check.
'''
# this is the expected highest relation sources
expected = pd.DataFrame(
[
[321.899517, -04.201971, 3],
[020.649051, -73.638252, 2]
],
columns=["wavg_ra", "wavg_dec", "relations"],
)
property_check.test_most_relations(self.relations, self.sources, 2, expected)
def test_known_source(self):
'''
See documentation for test_known_source in property_check.
'''
property_check.test_known_source(self, self.sources, 12.369)
@unittest.skipIf(
no_data,
'The regression test data is missing, skipping regression tests'
)
@override_settings(
PIPELINE_WORKING_DIR=os.path.join(TEST_ROOT, 'pipeline-runs'),
)
class DeruiterEpochTest(TestCase):
'''
Test pipeline under epoch based deruiter association method returns
expected results.
'''
@classmethod
def setUpTestData(self):
'''
Set up directory to test data and run the pipeline.
'''
base_path = 'epoch-deruiter'
self.base_run = os.path.join(
s.PIPELINE_WORKING_DIR, base_path
)
# setup test directory
make_testdir(self.base_run)
gen_config.gen_config(
base_path,
s.PIPELINE_WORKING_DIR,
['01', '03x', '02', '05x', '06x']
)
call_command('runpipeline', self.base_run)
# read output
self.sources = pd.read_parquet(
os.path.join(self.base_run, 'sources.parquet')
)
self.relations = pd.read_parquet(
os.path.join(
self.base_run, 'relations.parquet'
)
)
# remove test directory
shutil.rmtree(self.base_run)
def test_num_sources(self):
'''
See documentation for test_num_sources in property_check.
'''
property_check.test_num_sources(self, self.sources, 616)
def test_most_relations(self):
'''
See documentation for test_most_relations in property_check.
'''
# this is the expected highest relation sources
expected = pd.DataFrame(
[[322.752467, -3.982379, 4],
[322.752646, -3.982859, 4],
[322.752791, -3.982937, 4],
[322.752859, -3.983386, 4],
[322.753513, -3.985183, 4]],
columns=['wavg_ra', 'wavg_dec', 'relations']
)
property_check.test_most_relations(
self.relations, self.sources, 5, expected)
def test_known_source(self):
'''
See documentation for test_known_source in property_check.
'''
property_check.test_known_source(self, self.sources, 12.369) | en | 0.722894 | Test pipeline under epoch based basic association method returns expected results. Set up directory to test data and run the pipeline. # setup test directory # read output # remove test directory See documentation for test_num_sources in property_check. See documentation for test_most_relations in property_check. # this is the expected highest relation sources See documentation for test_known_source in property check. Test pipeline under epoch based advanced association method returns expected results. Set up directory to test data and run the pipeline. # setup test directory # read output # remove test directory See documentation for test_num_sources in property_check. See documentation for test_most_relations in property_check. # this is the expected highest relation sources See documentation for test_known_source in property_check. Test pipeline under epoch based deruiter association method returns expected results. Set up directory to test data and run the pipeline. # setup test directory # read output # remove test directory See documentation for test_num_sources in property_check. See documentation for test_most_relations in property_check. # this is the expected highest relation sources See documentation for test_known_source in property_check. | 1.967972 | 2 |
nlpcda/tools/Homophone.py | blmoistawinde/nlpcda | 959 | 6614940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from nlpcda.tools.Basetool import Basetool
from nlpcda.config import homophone_path
class Homophone(Basetool):
'''
同音-意字,用于大致不改变原文下,【字级别的】,增强数据
'''
def __init__(self, base_file=homophone_path, create_num=5, change_rate=0.05, seed=1):
super(Homophone, self).__init__(base_file, create_num, change_rate, seed)
def load_paser_base_file(self):
combine_dict = {}
for line in open(self.base_file, "r", encoding='utf-8'):
seperate_word = line.strip().split("\t")
num = len(seperate_word)
for i in range(1, num):
combine_dict[seperate_word[i]] = seperate_word[1:]
print('load :%s done' % (self.base_file))
return combine_dict
def replace(self, replace_str:str):
replace_str = replace_str.replace('\n', '').strip()
words = list(replace_str)
sentences = [replace_str]
t = 0
while len(sentences) < self.create_num:
t += 1
a_sentence = ''
for word in words:
if word in self.base_file_mapobj and self.random.random() < self.change_rate:
wi = self.random.randint(0, len(self.base_file_mapobj[word]) - 1)
place = self.base_file_mapobj[word][wi]
else:
place = word
a_sentence += place
if a_sentence not in sentences:
sentences.append(a_sentence)
if t > self.create_num * self.loop_t / self.change_rate:
break
return sentences
def test(test_str, create_num=10, change_rate=0.3):
hoe = Homophone(create_num=create_num, change_rate=change_rate)
try:
return hoe.replace(test_str)
except:
print('error in Homophone.replace')
return [test_str]
if __name__ == '__main__':
ts = '''这是一场疫情防控的人民战争、总体战、阻击战,习近平总书记亲自指挥、亲自部署。运筹帷幄 指挥若定始终把人民群众生命安全和身体健康放在第一位'''
rs = test(ts)
for s in rs:
print(s)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from nlpcda.tools.Basetool import Basetool
from nlpcda.config import homophone_path
class Homophone(Basetool):
'''
同音-意字,用于大致不改变原文下,【字级别的】,增强数据
'''
def __init__(self, base_file=homophone_path, create_num=5, change_rate=0.05, seed=1):
super(Homophone, self).__init__(base_file, create_num, change_rate, seed)
def load_paser_base_file(self):
combine_dict = {}
for line in open(self.base_file, "r", encoding='utf-8'):
seperate_word = line.strip().split("\t")
num = len(seperate_word)
for i in range(1, num):
combine_dict[seperate_word[i]] = seperate_word[1:]
print('load :%s done' % (self.base_file))
return combine_dict
def replace(self, replace_str:str):
replace_str = replace_str.replace('\n', '').strip()
words = list(replace_str)
sentences = [replace_str]
t = 0
while len(sentences) < self.create_num:
t += 1
a_sentence = ''
for word in words:
if word in self.base_file_mapobj and self.random.random() < self.change_rate:
wi = self.random.randint(0, len(self.base_file_mapobj[word]) - 1)
place = self.base_file_mapobj[word][wi]
else:
place = word
a_sentence += place
if a_sentence not in sentences:
sentences.append(a_sentence)
if t > self.create_num * self.loop_t / self.change_rate:
break
return sentences
def test(test_str, create_num=10, change_rate=0.3):
hoe = Homophone(create_num=create_num, change_rate=change_rate)
try:
return hoe.replace(test_str)
except:
print('error in Homophone.replace')
return [test_str]
if __name__ == '__main__':
ts = '''这是一场疫情防控的人民战争、总体战、阻击战,习近平总书记亲自指挥、亲自部署。运筹帷幄 指挥若定始终把人民群众生命安全和身体健康放在第一位'''
rs = test(ts)
for s in rs:
print(s)
| zh | 0.951627 | #!/usr/bin/python # -*- coding: utf-8 -*- 同音-意字,用于大致不改变原文下,【字级别的】,增强数据 这是一场疫情防控的人民战争、总体战、阻击战,习近平总书记亲自指挥、亲自部署。运筹帷幄 指挥若定始终把人民群众生命安全和身体健康放在第一位 | 3.060032 | 3 |
gym_brt/envs/reinforcementlearning_extensions/wrapper.py | Data-Science-in-Mechanical-Engineering/vision-based-furuta-pendulum | 0 | 6614941 | <reponame>Data-Science-in-Mechanical-Engineering/vision-based-furuta-pendulum
"""
Wrapper for OpenAI Gym Reinforcement Learning environments.
Designed to work well with the Qube-Servo 2 classes of this repository.
Each wrapper can be used in the following way:
```python
with QubeSwingupEnv() as env:
env = WrapperClass(env)
... (Normal steps like you would do without the wrapper or also additonal wrapping)
```
@Author: <NAME>
"""
import typing as tp
import cv2
import numpy as np
from gym import Env, Wrapper, ObservationWrapper, spaces
from gym_brt.control import calibrate
from gym_brt.data.config.configuration import FREQUENCY
from gym_brt.envs.reinforcementlearning_extensions.rl_reward_functions import exp_swing_up_reward
try:
from gym_brt.quanser import QubeHardware
except ImportError:
print("Warning: Can not import QubeHardware in wrapper.py")
Array = tp.Union[tp.List, np.ndarray]
class ImageObservationWrapper(ObservationWrapper):
"""Wrapper to get an image from the environment and not a state.
Use env.render('rgb_array') as observation rather than the observation the environment provides.
"""
def __init__(self, env: Env, out_shape: tp.Tuple = None) -> None:
"""
Args:
env: Gym environment to wrap around. Must be a simulation.
out_shape: Output shape of the image observation. If None the rendered image will not be resized.
"""
super(ImageObservationWrapper, self).__init__(env)
dummy_obs = env.render("rgb_array", width=out_shape[0], height=out_shape[1])
# Update observation space
self.out_shape = out_shape
obs_shape = out_shape if out_shape is not None else dummy_obs.shape
self.observation_space = spaces.Box(low=0, high=255, shape=obs_shape, dtype=dummy_obs.dtype)
def observation(self, observation: np.ndarray) -> np.ndarray:
img = self.env.render("rgb_array", width=self.out_shape[0], height=self.out_shape[1])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#if self.out_shape is not None:
# img = cv2.resize(img, (self.out_shape[0], self.out_shape[1]), interpolation=cv2.INTER_AREA)
return img
def convert_single_state(state: Array) -> np.ndarray:
"""Convert a single state in form of :math:`(\mathtt{theta}, \mathtt{\alpha}, \mathtt{theta_dot}, \mathtt{alpha_dot})`
to :math:`(cos(\mathtt{theta}), sin(\mathtt{theta}), cos(\mathtt{\alpha}), sin(\mathtt{\alpha}),
\mathtt{theta_dot}, \mathtt{alpha_dot})`
"""
theta, alpha, theta_dot, alpha_dot = state
return np.array([np.cos(theta), np.sin(theta), np.cos(alpha), np.sin(alpha), theta_dot, alpha_dot],
dtype=np.float64)
def convert_states_array(states: Array) -> np.ndarray:
""" Converts an array of states in form :math:`(\mathtt{theta}, \mathtt{\alpha}, \mathtt{theta_dot},
\mathtt{alpha_dot})` to its extend form :math:`(cos(\mathtt{theta}), sin(\mathtt{theta}), cos(\mathtt{\alpha}),
sin(\mathtt{\alpha}), \mathtt{theta_dot}, \mathtt{alpha_dot})`
"""
return np.concatenate((np.cos(states[:, 0:1]), np.sin(states[:, 0:1]), np.cos(states[:, 1:2]),
np.sin(states[:, 1:2]), states[:, 2:3], states[:, 3:4], states[:, 4:]), axis=1)
class TrigonometricObservationWrapper(ObservationWrapper):
def __init__(self, env):
super(TrigonometricObservationWrapper, self).__init__(env)
obs_max = np.asarray([1, 1, 1, 1, np.inf, np.inf], dtype=np.float64)
self.observation_space = spaces.Box(-obs_max, obs_max, dtype=np.float64)
def observation(self, observation: Array):
""" With an observation in form :math:`(\mathtt{\theta}, \mathtt{\alpha}, \mathtt{\dot{\theta}}, \mathtt{\dot{\alpha}})`,
this wrapper transforms every of such observation to :math:`(cos(\mathtt{\theta}), sin(\mathtt{\theta}),
cos(\mathtt{alpha}), sin(\mathtt{alpha}), \mathtt{\dot{theta}}, \mathtt{\dot{\alpha}})`
"""
assert len(observation) == 4, "Assumes an observation which is in form (theta, alpha, theta_dot, alpha_dot)."
return convert_single_state(observation)
class ExponentialRewardWrapper(Wrapper):
""" Wrapper for an exponential reward.
If the trigonometric version of the state (== cosine & sine of theta and alpha) is used, they must be converted to
shape :math:`(\mathtt{\theta}, \mathtt{\alpha}, \mathtt{\dot{theta}}, \mathtt{\dot{\alpha})` or the
conversion to the trigonometric shape must be done after the reward calcultion.
"""
def __init__(self, env: Env):
super().__init__(env)
assert env.frequency is not None
self._dt = 1.0 / env.frequency
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action: float):
observation, _, done, info = self.env.step(action)
assert len(observation) == 4, "Assumes an observation which is in shape [theta, alpha, theta_dot, alpha_dot]."
reward = exp_swing_up_reward(observation, action, self._dt)
return observation, reward, done, info
class CalibrationWrapper(Wrapper):
"""Wrapper to calibrate the rotary arm of the Qube to a specific angle.
The Qube gets calibrated to the desired theta value with a PID controller. For this the left and the right joint
limits are determined to calculate the correct value of the desired theta. Those limits which are used to calibrate
the Qube to the desired theta value are initialised at the first calibration step. We can force reinitialisation of
these limits via the argument `limit_reset_threshold`.
This wrapper only works for the hardware version of the Qube. It does not effect the simulation instances.
"""
def __init__(self, env: Env, desired_theta: float = 0.0, frequency: int = None, u_max: float = 1.0,
noise: bool = False, unit='deg', limit_reset_threshold=None):
"""Creates an wrapper for calibration.
Args:
env: OpenAI gym environment of the `real` Qube.
desired_theta: Value of :math:`\mathtt{\theta}` after calibration
frequency: Sample frequency; ff not specified it is derived from the given environment
u_max: Maximum applied voltage during calibration
noise: Additional noise added to the desired theta to incorporate random starts
unit: Unit of the the angle; either `deg` or `rad`
limit_reset_threshold: Force to reinitialize the limits after the specified timestep threshold
"""
super(CalibrationWrapper, self).__init__(env)
self.frequency = FREQUENCY if frequency is None else frequency
self.u_max = u_max
self.desired_theta = desired_theta
self.noise = noise
self.limits = None
self.counter = 0
self.qube = self.unwrapped.qube
assert isinstance(self.qube, QubeHardware), "Only the hardware version of the Qube can be calibrated."
self.limit_reset_threshold = np.inf if limit_reset_threshold is None else limit_reset_threshold
if unit == 'deg':
self.noise_scale = 180. / 8
elif unit == 'rad':
self.noise_scale = np.pi / 8
else:
self.noise_scale = 0.
def reset(self, **kwargs):
# First reset the env to be sure the environment it is ready for calibration
self.env.reset(**kwargs)
# Inject a little bit of noise if desired
theta = self.desired_theta + np.random.normal(scale=self.noise_scale) if self.noise else self.desired_theta
print(f"Setting to {theta}")
# Calibrate
self.limits = calibrate(self.qube, theta, self.frequency, self.u_max, limits=self.limits)
self.counter += 1
# Check if we have to reset the limits for calibration
if self.counter >= self.limit_reset_threshold:
self.limits = None
self.counter = 0
# Second reset to get the state and initialize correctly
return self.env.reset(**kwargs)
| """
Wrapper for OpenAI Gym Reinforcement Learning environments.
Designed to work well with the Qube-Servo 2 classes of this repository.
Each wrapper can be used in the following way:
```python
with QubeSwingupEnv() as env:
env = WrapperClass(env)
... (Normal steps like you would do without the wrapper or also additonal wrapping)
```
@Author: <NAME>
"""
import typing as tp
import cv2
import numpy as np
from gym import Env, Wrapper, ObservationWrapper, spaces
from gym_brt.control import calibrate
from gym_brt.data.config.configuration import FREQUENCY
from gym_brt.envs.reinforcementlearning_extensions.rl_reward_functions import exp_swing_up_reward
try:
from gym_brt.quanser import QubeHardware
except ImportError:
print("Warning: Can not import QubeHardware in wrapper.py")
Array = tp.Union[tp.List, np.ndarray]
class ImageObservationWrapper(ObservationWrapper):
"""Wrapper to get an image from the environment and not a state.
Use env.render('rgb_array') as observation rather than the observation the environment provides.
"""
def __init__(self, env: Env, out_shape: tp.Tuple = None) -> None:
"""
Args:
env: Gym environment to wrap around. Must be a simulation.
out_shape: Output shape of the image observation. If None the rendered image will not be resized.
"""
super(ImageObservationWrapper, self).__init__(env)
dummy_obs = env.render("rgb_array", width=out_shape[0], height=out_shape[1])
# Update observation space
self.out_shape = out_shape
obs_shape = out_shape if out_shape is not None else dummy_obs.shape
self.observation_space = spaces.Box(low=0, high=255, shape=obs_shape, dtype=dummy_obs.dtype)
def observation(self, observation: np.ndarray) -> np.ndarray:
img = self.env.render("rgb_array", width=self.out_shape[0], height=self.out_shape[1])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#if self.out_shape is not None:
# img = cv2.resize(img, (self.out_shape[0], self.out_shape[1]), interpolation=cv2.INTER_AREA)
return img
def convert_single_state(state: Array) -> np.ndarray:
"""Convert a single state in form of :math:`(\mathtt{theta}, \mathtt{\alpha}, \mathtt{theta_dot}, \mathtt{alpha_dot})`
to :math:`(cos(\mathtt{theta}), sin(\mathtt{theta}), cos(\mathtt{\alpha}), sin(\mathtt{\alpha}),
\mathtt{theta_dot}, \mathtt{alpha_dot})`
"""
theta, alpha, theta_dot, alpha_dot = state
return np.array([np.cos(theta), np.sin(theta), np.cos(alpha), np.sin(alpha), theta_dot, alpha_dot],
dtype=np.float64)
def convert_states_array(states: Array) -> np.ndarray:
""" Converts an array of states in form :math:`(\mathtt{theta}, \mathtt{\alpha}, \mathtt{theta_dot},
\mathtt{alpha_dot})` to its extend form :math:`(cos(\mathtt{theta}), sin(\mathtt{theta}), cos(\mathtt{\alpha}),
sin(\mathtt{\alpha}), \mathtt{theta_dot}, \mathtt{alpha_dot})`
"""
return np.concatenate((np.cos(states[:, 0:1]), np.sin(states[:, 0:1]), np.cos(states[:, 1:2]),
np.sin(states[:, 1:2]), states[:, 2:3], states[:, 3:4], states[:, 4:]), axis=1)
class TrigonometricObservationWrapper(ObservationWrapper):
def __init__(self, env):
super(TrigonometricObservationWrapper, self).__init__(env)
obs_max = np.asarray([1, 1, 1, 1, np.inf, np.inf], dtype=np.float64)
self.observation_space = spaces.Box(-obs_max, obs_max, dtype=np.float64)
def observation(self, observation: Array):
""" With an observation in form :math:`(\mathtt{\theta}, \mathtt{\alpha}, \mathtt{\dot{\theta}}, \mathtt{\dot{\alpha}})`,
this wrapper transforms every of such observation to :math:`(cos(\mathtt{\theta}), sin(\mathtt{\theta}),
cos(\mathtt{alpha}), sin(\mathtt{alpha}), \mathtt{\dot{theta}}, \mathtt{\dot{\alpha}})`
"""
assert len(observation) == 4, "Assumes an observation which is in form (theta, alpha, theta_dot, alpha_dot)."
return convert_single_state(observation)
class ExponentialRewardWrapper(Wrapper):
""" Wrapper for an exponential reward.
If the trigonometric version of the state (== cosine & sine of theta and alpha) is used, they must be converted to
shape :math:`(\mathtt{\theta}, \mathtt{\alpha}, \mathtt{\dot{theta}}, \mathtt{\dot{\alpha})` or the
conversion to the trigonometric shape must be done after the reward calcultion.
"""
def __init__(self, env: Env):
super().__init__(env)
assert env.frequency is not None
self._dt = 1.0 / env.frequency
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action: float):
observation, _, done, info = self.env.step(action)
assert len(observation) == 4, "Assumes an observation which is in shape [theta, alpha, theta_dot, alpha_dot]."
reward = exp_swing_up_reward(observation, action, self._dt)
return observation, reward, done, info
class CalibrationWrapper(Wrapper):
"""Wrapper to calibrate the rotary arm of the Qube to a specific angle.
The Qube gets calibrated to the desired theta value with a PID controller. For this the left and the right joint
limits are determined to calculate the correct value of the desired theta. Those limits which are used to calibrate
the Qube to the desired theta value are initialised at the first calibration step. We can force reinitialisation of
these limits via the argument `limit_reset_threshold`.
This wrapper only works for the hardware version of the Qube. It does not effect the simulation instances.
"""
def __init__(self, env: Env, desired_theta: float = 0.0, frequency: int = None, u_max: float = 1.0,
noise: bool = False, unit='deg', limit_reset_threshold=None):
"""Creates an wrapper for calibration.
Args:
env: OpenAI gym environment of the `real` Qube.
desired_theta: Value of :math:`\mathtt{\theta}` after calibration
frequency: Sample frequency; ff not specified it is derived from the given environment
u_max: Maximum applied voltage during calibration
noise: Additional noise added to the desired theta to incorporate random starts
unit: Unit of the the angle; either `deg` or `rad`
limit_reset_threshold: Force to reinitialize the limits after the specified timestep threshold
"""
super(CalibrationWrapper, self).__init__(env)
self.frequency = FREQUENCY if frequency is None else frequency
self.u_max = u_max
self.desired_theta = desired_theta
self.noise = noise
self.limits = None
self.counter = 0
self.qube = self.unwrapped.qube
assert isinstance(self.qube, QubeHardware), "Only the hardware version of the Qube can be calibrated."
self.limit_reset_threshold = np.inf if limit_reset_threshold is None else limit_reset_threshold
if unit == 'deg':
self.noise_scale = 180. / 8
elif unit == 'rad':
self.noise_scale = np.pi / 8
else:
self.noise_scale = 0.
def reset(self, **kwargs):
# First reset the env to be sure the environment it is ready for calibration
self.env.reset(**kwargs)
# Inject a little bit of noise if desired
theta = self.desired_theta + np.random.normal(scale=self.noise_scale) if self.noise else self.desired_theta
print(f"Setting to {theta}")
# Calibrate
self.limits = calibrate(self.qube, theta, self.frequency, self.u_max, limits=self.limits)
self.counter += 1
# Check if we have to reset the limits for calibration
if self.counter >= self.limit_reset_threshold:
self.limits = None
self.counter = 0
# Second reset to get the state and initialize correctly
return self.env.reset(**kwargs) | en | 0.786745 | Wrapper for OpenAI Gym Reinforcement Learning environments. Designed to work well with the Qube-Servo 2 classes of this repository. Each wrapper can be used in the following way: ```python with QubeSwingupEnv() as env: env = WrapperClass(env) ... (Normal steps like you would do without the wrapper or also additonal wrapping) ``` @Author: <NAME> Wrapper to get an image from the environment and not a state. Use env.render('rgb_array') as observation rather than the observation the environment provides. Args: env: Gym environment to wrap around. Must be a simulation. out_shape: Output shape of the image observation. If None the rendered image will not be resized. # Update observation space #if self.out_shape is not None: # img = cv2.resize(img, (self.out_shape[0], self.out_shape[1]), interpolation=cv2.INTER_AREA) Convert a single state in form of :math:`(\mathtt{theta}, \mathtt{\alpha}, \mathtt{theta_dot}, \mathtt{alpha_dot})` to :math:`(cos(\mathtt{theta}), sin(\mathtt{theta}), cos(\mathtt{\alpha}), sin(\mathtt{\alpha}), \mathtt{theta_dot}, \mathtt{alpha_dot})` Converts an array of states in form :math:`(\mathtt{theta}, \mathtt{\alpha}, \mathtt{theta_dot}, \mathtt{alpha_dot})` to its extend form :math:`(cos(\mathtt{theta}), sin(\mathtt{theta}), cos(\mathtt{\alpha}), sin(\mathtt{\alpha}), \mathtt{theta_dot}, \mathtt{alpha_dot})` With an observation in form :math:`(\mathtt{\theta}, \mathtt{\alpha}, \mathtt{\dot{\theta}}, \mathtt{\dot{\alpha}})`, this wrapper transforms every of such observation to :math:`(cos(\mathtt{\theta}), sin(\mathtt{\theta}), cos(\mathtt{alpha}), sin(\mathtt{alpha}), \mathtt{\dot{theta}}, \mathtt{\dot{\alpha}})` Wrapper for an exponential reward. If the trigonometric version of the state (== cosine & sine of theta and alpha) is used, they must be converted to shape :math:`(\mathtt{\theta}, \mathtt{\alpha}, \mathtt{\dot{theta}}, \mathtt{\dot{\alpha})` or the conversion to the trigonometric shape must be done after the reward calcultion. Wrapper to calibrate the rotary arm of the Qube to a specific angle. The Qube gets calibrated to the desired theta value with a PID controller. For this the left and the right joint limits are determined to calculate the correct value of the desired theta. Those limits which are used to calibrate the Qube to the desired theta value are initialised at the first calibration step. We can force reinitialisation of these limits via the argument `limit_reset_threshold`. This wrapper only works for the hardware version of the Qube. It does not effect the simulation instances. Creates an wrapper for calibration. Args: env: OpenAI gym environment of the `real` Qube. desired_theta: Value of :math:`\mathtt{\theta}` after calibration frequency: Sample frequency; ff not specified it is derived from the given environment u_max: Maximum applied voltage during calibration noise: Additional noise added to the desired theta to incorporate random starts unit: Unit of the the angle; either `deg` or `rad` limit_reset_threshold: Force to reinitialize the limits after the specified timestep threshold # First reset the env to be sure the environment it is ready for calibration # Inject a little bit of noise if desired # Calibrate # Check if we have to reset the limits for calibration # Second reset to get the state and initialize correctly | 2.830754 | 3 |
totient/totient.py | ryanmcdermott/algorithms | 23 | 6614942 | <filename>totient/totient.py
def gcd(a, b):
if a == 0:
return b
return gcd(b % a, a)
def totient(n):
res = 1
for i in range(2, n):
if gcd(i, n) == 1:
res += 1
return res
| <filename>totient/totient.py
def gcd(a, b):
if a == 0:
return b
return gcd(b % a, a)
def totient(n):
res = 1
for i in range(2, n):
if gcd(i, n) == 1:
res += 1
return res
| none | 1 | 3.681812 | 4 | |
utils.py | AsaChiri/DDDynamicRecorder | 1 | 6614943 | <filename>utils.py
import asyncio
import os
import traceback
from os import path
import requests
from retrying import retry
# 更换 Chromium 下载地址为非 https 淘宝源
os.environ['PYPPETEER_DOWNLOAD_HOST'] = 'http://npm.taobao.org/mirrors'
from pyppeteer import launch # 不能删,隔壁 dynamic.py 还要调用的
from pyppeteer.chromium_downloader import check_chromium, download_chromium
# 检查 Chromium 是否下载
if not check_chromium():
download_chromium()
class BiliAPI():
def __init__(self) -> None:
self.default_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.130 Safari/537.36",
"Referer": "https://www.bilibili.com/"
}
@retry
def get(self, url, headers=None, cookies=None):
if not headers:
headers = self.default_headers
with requests.Session() as sess:
r = sess.get(url, headers=headers, cookies=cookies)
r.encoding = 'utf-8'
return r
def get_json(self, url, **kw):
return (self.get(url, **kw)).json()
def get_info(self, uid):
url = f'https://api.bilibili.com/x/space/acc/info?mid={uid}'
return (self.get_json(url))['data']
def get_dynamic(self, uid):
# need_top: {1: 带置顶, 0: 不带置顶}
url = f'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history?host_uid={uid}&offset_dynamic_id=0&need_top=0'
return (self.get_json(url))['data']
def get_live_info(self, uid):
url = f'https://api.live.bilibili.com/room/v1/Room/getRoomInfoOld?mid={uid}'
return (self.get_json(url))['data']
| <filename>utils.py
import asyncio
import os
import traceback
from os import path
import requests
from retrying import retry
# 更换 Chromium 下载地址为非 https 淘宝源
os.environ['PYPPETEER_DOWNLOAD_HOST'] = 'http://npm.taobao.org/mirrors'
from pyppeteer import launch # 不能删,隔壁 dynamic.py 还要调用的
from pyppeteer.chromium_downloader import check_chromium, download_chromium
# 检查 Chromium 是否下载
if not check_chromium():
download_chromium()
class BiliAPI():
def __init__(self) -> None:
self.default_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/79.0.3945.130 Safari/537.36",
"Referer": "https://www.bilibili.com/"
}
@retry
def get(self, url, headers=None, cookies=None):
if not headers:
headers = self.default_headers
with requests.Session() as sess:
r = sess.get(url, headers=headers, cookies=cookies)
r.encoding = 'utf-8'
return r
def get_json(self, url, **kw):
return (self.get(url, **kw)).json()
def get_info(self, uid):
url = f'https://api.bilibili.com/x/space/acc/info?mid={uid}'
return (self.get_json(url))['data']
def get_dynamic(self, uid):
# need_top: {1: 带置顶, 0: 不带置顶}
url = f'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history?host_uid={uid}&offset_dynamic_id=0&need_top=0'
return (self.get_json(url))['data']
def get_live_info(self, uid):
url = f'https://api.live.bilibili.com/room/v1/Room/getRoomInfoOld?mid={uid}'
return (self.get_json(url))['data']
| zh | 0.840256 | # 更换 Chromium 下载地址为非 https 淘宝源 # 不能删,隔壁 dynamic.py 还要调用的 # 检查 Chromium 是否下载 # need_top: {1: 带置顶, 0: 不带置顶} | 2.28958 | 2 |
notebooks/classical_linear_regression.py | jingmouren/QuantResearch | 623 | 6614944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, date
sample_size = 500
sigma_e = 3.0 # true value of parameter error sigma
random_num_generator = np.random.RandomState(0)
x = 10.0 * random_num_generator.rand(sample_size)
e = random_num_generator.normal(0, sigma_e, sample_size)
y = 1.0 + 2.0 * x + e # a = 1.0; b = 2.0; y = a + b*x
plt.scatter(x, y, color='blue')
# normal equation to estimate the model parameters
X = np.vstack((np.ones(sample_size), x)).T
params_closed_form = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
print('pamameters: %.7f, %.7f' %(params_closed_form[0], params_closed_form[1]))
from sklearn.linear_model import LinearRegression
# The next two lines does the regression
lm_model = LinearRegression(copy_X=True, fit_intercept=True, normalize=False)
lm_model.fit(x.reshape(-1,1), y) # fit() expects 2D array
print('pamameters: %.7f, %.7f' %(lm_model.intercept_, lm_model.coef_))
# present the graph
xfit = np.linspace(0, 10, sample_size)
yfit = lm_model.predict(xfit.reshape(-1,1))
ytrue = 2.0 * xfit + 1.0 # we know the true value of slope and intercept
plt.scatter(x, y, color='blue')
plt.plot(xfit, yfit, color='red', label='fitted line', linewidth=3)
plt.plot(xfit, ytrue, color='green', label='true line', linewidth=3)
plt.legend()
# R-Square
r_square = lm_model.score(x.reshape(-1,1), y)
print('R-Square %.7f' %(r_square))
from scipy.stats.stats import pearsonr
# The square root of R-Square is correlation coefficient
print('Its square root is Pearson correlation coefficient: %.7f == %.7f' %(np.sqrt(r_square), pearsonr(x, y)[0])) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, date
sample_size = 500
sigma_e = 3.0 # true value of parameter error sigma
random_num_generator = np.random.RandomState(0)
x = 10.0 * random_num_generator.rand(sample_size)
e = random_num_generator.normal(0, sigma_e, sample_size)
y = 1.0 + 2.0 * x + e # a = 1.0; b = 2.0; y = a + b*x
plt.scatter(x, y, color='blue')
# normal equation to estimate the model parameters
X = np.vstack((np.ones(sample_size), x)).T
params_closed_form = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
print('pamameters: %.7f, %.7f' %(params_closed_form[0], params_closed_form[1]))
from sklearn.linear_model import LinearRegression
# The next two lines does the regression
lm_model = LinearRegression(copy_X=True, fit_intercept=True, normalize=False)
lm_model.fit(x.reshape(-1,1), y) # fit() expects 2D array
print('pamameters: %.7f, %.7f' %(lm_model.intercept_, lm_model.coef_))
# present the graph
xfit = np.linspace(0, 10, sample_size)
yfit = lm_model.predict(xfit.reshape(-1,1))
ytrue = 2.0 * xfit + 1.0 # we know the true value of slope and intercept
plt.scatter(x, y, color='blue')
plt.plot(xfit, yfit, color='red', label='fitted line', linewidth=3)
plt.plot(xfit, ytrue, color='green', label='true line', linewidth=3)
plt.legend()
# R-Square
r_square = lm_model.score(x.reshape(-1,1), y)
print('R-Square %.7f' %(r_square))
from scipy.stats.stats import pearsonr
# The square root of R-Square is correlation coefficient
print('Its square root is Pearson correlation coefficient: %.7f == %.7f' %(np.sqrt(r_square), pearsonr(x, y)[0])) | en | 0.655557 | #!/usr/bin/env python # -*- coding: utf-8 -*- # true value of parameter error sigma # a = 1.0; b = 2.0; y = a + b*x # normal equation to estimate the model parameters # The next two lines does the regression # fit() expects 2D array # present the graph # we know the true value of slope and intercept # R-Square # The square root of R-Square is correlation coefficient | 3.439852 | 3 |
examples/density_compensation.py | CEA-COSMIC/pysap-etomo | 0 | 6614945 | <filename>examples/density_compensation.py
"""
Reconstruction using gpuNUFFT with density compensation to accelerate
convergence
Credit: <NAME>
"""
import pysap
from pysap.data import get_sample_data
from modopt.math.metrics import ssim
from modopt.opt.linear import Identity
from modopt.opt.proximity import SparseThreshold
import numpy as np
import matplotlib.pyplot as plt
from etomo.operators.utils import generate_locations_etomo_2D
from etomo.operators.fourier.utils import estimate_density_compensation
from etomo.operators import gpuNUFFT, WaveletPywt, HOTV
from etomo.reconstructors.forwardtomo import TomoReconstructor
# %%
# Loading input data
image = get_sample_data('2d-mri')
img_size = image.shape[0]
# %%
# Create fourier operator and simulate data
theta = np.arange(0., 180., 3.)
kspace_loc = generate_locations_etomo_2D(img_size, theta)
density_comp = estimate_density_compensation(kspace_loc, image.shape)
fourier_op = gpuNUFFT(samples=kspace_loc, shape=image.shape,
density_comp=density_comp)
data = fourier_op.op(image.data)
# %%
# Create operators
TV = HOTV(img_shape=image.shape, order=1)
wavelet = WaveletPywt(wavelet_name='sym8', nb_scale=3)
linear_op = TV
regularizer_op = SparseThreshold(linear=Identity(), weights=5e-9)
# %%
# Run reconstructions
reconstructor = TomoReconstructor(
data_op=fourier_op,
linear_op=linear_op,
regularizer_op=regularizer_op,
gradient_formulation='analysis',
verbose=True
)
x_final, cost, *_ = reconstructor.reconstruct(
data=data,
optimization_alg='condatvu',
num_iterations=300,
cost_op_kwargs={'cost_interval': 5}
)
# %%
# Results
fig, ax = plt.subplots(2, 2, figsize=(14, 14))
image_rec = pysap.Image(data=np.abs(x_final))
ax[0][1].imshow(image_rec, cmap='gray')
ax[0][1].set_title('Reconstructed image')
ax[0][0].imshow(image, cmap='gray')
ax[0][0].set_title('Original image')
ax[1][0].plot(cost)
ax[1][0].set_yscale('log')
ax[1][0].set_title('Evolution of cost function')
ax[1][1].set_visible(False)
plt.show()
recon_ssim = ssim(image_rec, image)
print(f'The Reconstruction SSIM is: {recon_ssim: 2f}')
| <filename>examples/density_compensation.py
"""
Reconstruction using gpuNUFFT with density compensation to accelerate
convergence
Credit: <NAME>
"""
import pysap
from pysap.data import get_sample_data
from modopt.math.metrics import ssim
from modopt.opt.linear import Identity
from modopt.opt.proximity import SparseThreshold
import numpy as np
import matplotlib.pyplot as plt
from etomo.operators.utils import generate_locations_etomo_2D
from etomo.operators.fourier.utils import estimate_density_compensation
from etomo.operators import gpuNUFFT, WaveletPywt, HOTV
from etomo.reconstructors.forwardtomo import TomoReconstructor
# %%
# Loading input data
image = get_sample_data('2d-mri')
img_size = image.shape[0]
# %%
# Create fourier operator and simulate data
theta = np.arange(0., 180., 3.)
kspace_loc = generate_locations_etomo_2D(img_size, theta)
density_comp = estimate_density_compensation(kspace_loc, image.shape)
fourier_op = gpuNUFFT(samples=kspace_loc, shape=image.shape,
density_comp=density_comp)
data = fourier_op.op(image.data)
# %%
# Create operators
TV = HOTV(img_shape=image.shape, order=1)
wavelet = WaveletPywt(wavelet_name='sym8', nb_scale=3)
linear_op = TV
regularizer_op = SparseThreshold(linear=Identity(), weights=5e-9)
# %%
# Run reconstructions
reconstructor = TomoReconstructor(
data_op=fourier_op,
linear_op=linear_op,
regularizer_op=regularizer_op,
gradient_formulation='analysis',
verbose=True
)
x_final, cost, *_ = reconstructor.reconstruct(
data=data,
optimization_alg='condatvu',
num_iterations=300,
cost_op_kwargs={'cost_interval': 5}
)
# %%
# Results
fig, ax = plt.subplots(2, 2, figsize=(14, 14))
image_rec = pysap.Image(data=np.abs(x_final))
ax[0][1].imshow(image_rec, cmap='gray')
ax[0][1].set_title('Reconstructed image')
ax[0][0].imshow(image, cmap='gray')
ax[0][0].set_title('Original image')
ax[1][0].plot(cost)
ax[1][0].set_yscale('log')
ax[1][0].set_title('Evolution of cost function')
ax[1][1].set_visible(False)
plt.show()
recon_ssim = ssim(image_rec, image)
print(f'The Reconstruction SSIM is: {recon_ssim: 2f}')
| en | 0.621232 | Reconstruction using gpuNUFFT with density compensation to accelerate convergence Credit: <NAME> # %% # Loading input data # %% # Create fourier operator and simulate data # %% # Create operators # %% # Run reconstructions # %% # Results | 2.652381 | 3 |
examples/strategies/double_ma.py | touqi/ctpbee | 1 | 6614946 | <filename>examples/strategies/double_ma.py<gh_stars>1-10
"""
双均线策略实现
"""
from ctpbee import CtpbeeApi
from ctpbee.constant import BarData
from ctpbee.indicator import ArrayManager
class DoubleMa(CtpbeeApi):
def __init__(self, name: str, code=None):
# 初始化策略参数
super().init_app(name)
self.indicator = ArrayManager()
self.fast_windows = 5
self.slow_window = 10
def on_bar(self, bar: BarData) -> None:
self.indicator.update_bar(bar)
if self.indicator.inited:
return
ma, sig, his = self.indicator.macd(fast_period=self.fast_windows, slow_period=self.slow_window, signal_period=1)
def on_realtime(self):
pass
if __name__ == '__main__':
pass | <filename>examples/strategies/double_ma.py<gh_stars>1-10
"""
双均线策略实现
"""
from ctpbee import CtpbeeApi
from ctpbee.constant import BarData
from ctpbee.indicator import ArrayManager
class DoubleMa(CtpbeeApi):
def __init__(self, name: str, code=None):
# 初始化策略参数
super().init_app(name)
self.indicator = ArrayManager()
self.fast_windows = 5
self.slow_window = 10
def on_bar(self, bar: BarData) -> None:
self.indicator.update_bar(bar)
if self.indicator.inited:
return
ma, sig, his = self.indicator.macd(fast_period=self.fast_windows, slow_period=self.slow_window, signal_period=1)
def on_realtime(self):
pass
if __name__ == '__main__':
pass | zh | 0.89592 | 双均线策略实现 # 初始化策略参数 | 2.44171 | 2 |
player.py | Ivche1337/Dodgerino-Game | 1 | 6614947 | <filename>player.py
import glob
import pygame
import time
class Player():
def __init__(self, screen_width, screen_height):
# Player Position
self.x = (screen_width / 2.30)
self.y = (screen_height / 1.2)
# Player Animation
self.ani = glob.glob('images/player_animation/frame*')
self.ani.sort()
self.ani_frame = 0
self.ani_len = len(self.ani)
self.update_frame = 0
# Player sounds
self.player_score_sound = pygame.mixer.Sound("sound/player_score.wav")
self.player_crash_sound = pygame.mixer.Sound("sound/player_crash.wav")
# Player attributes
self.score = 0
self.name = "Unknown"
def update(self):
"Update the sprite of the player."
if self.update_frame % 10 == 0:
if self.ani_frame >= self.ani_len - 1:
self.ani_frame = 0
else:
self.ani_frame += 1
self.img = pygame.image.load(self.ani[self.ani_frame])
self.deltaTime = 0
self.update_frame += 1
def draw(self, screen):
"Draw the player to the screen."
self.img = pygame.image.load(self.ani[self.ani_frame])
self.update()
screen.blit(self.img, (self.x, self.y))
def crash(self):
pass
#pygame.mixer.Sound.play(self.player_crash_sound)
def add_score_point(self):
"Gives the player a score point"
self.score += 1
#pygame.mixer.Sound.play(self.player_score_sound)
# ACCESSORS!!
# Accessors for x position
def getX(self):
"Returns the x position of the player."
return self.x
def setX(self, x):
"Sets the x position of the player."
self.x = x
# Accessors for y position
def getY(self):
"Returns the y position of the player."
return self.y
def setY(self, y):
"Sets the y position of the player."
self.y = y
# Accessors for the score
def get_score(self):
return self.score
def set_score(self, score):
self.score = score
# Accessors for the name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
| <filename>player.py
import glob
import pygame
import time
class Player():
def __init__(self, screen_width, screen_height):
# Player Position
self.x = (screen_width / 2.30)
self.y = (screen_height / 1.2)
# Player Animation
self.ani = glob.glob('images/player_animation/frame*')
self.ani.sort()
self.ani_frame = 0
self.ani_len = len(self.ani)
self.update_frame = 0
# Player sounds
self.player_score_sound = pygame.mixer.Sound("sound/player_score.wav")
self.player_crash_sound = pygame.mixer.Sound("sound/player_crash.wav")
# Player attributes
self.score = 0
self.name = "Unknown"
def update(self):
"Update the sprite of the player."
if self.update_frame % 10 == 0:
if self.ani_frame >= self.ani_len - 1:
self.ani_frame = 0
else:
self.ani_frame += 1
self.img = pygame.image.load(self.ani[self.ani_frame])
self.deltaTime = 0
self.update_frame += 1
def draw(self, screen):
"Draw the player to the screen."
self.img = pygame.image.load(self.ani[self.ani_frame])
self.update()
screen.blit(self.img, (self.x, self.y))
def crash(self):
pass
#pygame.mixer.Sound.play(self.player_crash_sound)
def add_score_point(self):
"Gives the player a score point"
self.score += 1
#pygame.mixer.Sound.play(self.player_score_sound)
# ACCESSORS!!
# Accessors for x position
def getX(self):
"Returns the x position of the player."
return self.x
def setX(self, x):
"Sets the x position of the player."
self.x = x
# Accessors for y position
def getY(self):
"Returns the y position of the player."
return self.y
def setY(self, y):
"Sets the y position of the player."
self.y = y
# Accessors for the score
def get_score(self):
return self.score
def set_score(self, score):
self.score = score
# Accessors for the name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
| en | 0.644676 | # Player Position # Player Animation # Player sounds # Player attributes #pygame.mixer.Sound.play(self.player_crash_sound) #pygame.mixer.Sound.play(self.player_score_sound) # ACCESSORS!! # Accessors for x position # Accessors for y position # Accessors for the score # Accessors for the name | 2.990785 | 3 |
salt/modules/minion.py | preoctopus/salt | 1 | 6614948 | <filename>salt/modules/minion.py
# -*- coding: utf-8 -*-
'''
Module to provide information about minions
'''
from __future__ import absolute_import
# Import Python libs
import os
# Import Salt libs
import salt.utils
import salt.key
# Don't shadow built-ins.
__func_alias__ = {
'list_': 'list'
}
def list_():
'''
Return a list of accepted, denied, unaccepted and rejected keys.
This is the same output as `salt-key -L`
CLI Example:
.. code-block:: bash
salt 'master' minion.list
'''
pki_dir = __salt__['config.get']('pki_dir', '')
transport = __salt__['config.get']('transport', '')
# We have to replace the minion/master directories
pki_dir = pki_dir.replace('minion', 'master')
# The source code below is (nearly) a copy of salt.key.Key.list_keys
# We have to differentiate between RaetKey._check_minions_directories
# and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys
# have an additional 'denied' state.
if transport in ('zeromq', 'tcp'):
key_dirs = _check_minions_directories(pki_dir)
else:
key_dirs = _check_minions_directories_raetkey(pki_dir)
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.isorted(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, IOError):
# key dir kind is not created yet, just skip
continue
return ret
def _check_minions_directories(pki_dir):
'''
Return the minion keys directory paths.
This function is a copy of salt.key.Key._check_minions_directories.
'''
minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC)
minions_pre = os.path.join(pki_dir, salt.key.Key.PEND)
minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ)
minions_denied = os.path.join(pki_dir, salt.key.Key.DEN)
return minions_accepted, minions_pre, minions_rejected, minions_denied
def _check_minions_directories_raetkey(pki_dir):
'''
Return the minion keys directory paths.
This function is a copy of salt.key.RaetKey._check_minions_directories.
'''
accepted = os.path.join(pki_dir, salt.key.RaetKey.ACC)
pre = os.path.join(pki_dir, salt.key.RaetKey.PEND)
rejected = os.path.join(pki_dir, salt.key.RaetKey.REJ)
return accepted, pre, rejected
def kill():
'''
Kill the salt minion.
If you have a monitor that restarts ``salt-minion`` when it dies then this is
a great way to restart after a minion upgrade.
CLI example::
>$ salt minion[12] minion.kill
minion1:
----------
killed:
7874
minion2:
----------
killed:
29071
The result of the salt command shows the process ID of the minions that were
successfully killed - in this case they were ``7874`` and ``29071``.
'''
pid = __grains__.get('pid')
if pid:
if 'ps.kill_pid' in __salt__:
__salt__['ps.kill_pid'](pid)
else:
pid = None
return {'killed': pid}
| <filename>salt/modules/minion.py
# -*- coding: utf-8 -*-
'''
Module to provide information about minions
'''
from __future__ import absolute_import
# Import Python libs
import os
# Import Salt libs
import salt.utils
import salt.key
# Don't shadow built-ins.
__func_alias__ = {
'list_': 'list'
}
def list_():
'''
Return a list of accepted, denied, unaccepted and rejected keys.
This is the same output as `salt-key -L`
CLI Example:
.. code-block:: bash
salt 'master' minion.list
'''
pki_dir = __salt__['config.get']('pki_dir', '')
transport = __salt__['config.get']('transport', '')
# We have to replace the minion/master directories
pki_dir = pki_dir.replace('minion', 'master')
# The source code below is (nearly) a copy of salt.key.Key.list_keys
# We have to differentiate between RaetKey._check_minions_directories
# and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys
# have an additional 'denied' state.
if transport in ('zeromq', 'tcp'):
key_dirs = _check_minions_directories(pki_dir)
else:
key_dirs = _check_minions_directories_raetkey(pki_dir)
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.isorted(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, IOError):
# key dir kind is not created yet, just skip
continue
return ret
def _check_minions_directories(pki_dir):
'''
Return the minion keys directory paths.
This function is a copy of salt.key.Key._check_minions_directories.
'''
minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC)
minions_pre = os.path.join(pki_dir, salt.key.Key.PEND)
minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ)
minions_denied = os.path.join(pki_dir, salt.key.Key.DEN)
return minions_accepted, minions_pre, minions_rejected, minions_denied
def _check_minions_directories_raetkey(pki_dir):
'''
Return the minion keys directory paths.
This function is a copy of salt.key.RaetKey._check_minions_directories.
'''
accepted = os.path.join(pki_dir, salt.key.RaetKey.ACC)
pre = os.path.join(pki_dir, salt.key.RaetKey.PEND)
rejected = os.path.join(pki_dir, salt.key.RaetKey.REJ)
return accepted, pre, rejected
def kill():
'''
Kill the salt minion.
If you have a monitor that restarts ``salt-minion`` when it dies then this is
a great way to restart after a minion upgrade.
CLI example::
>$ salt minion[12] minion.kill
minion1:
----------
killed:
7874
minion2:
----------
killed:
29071
The result of the salt command shows the process ID of the minions that were
successfully killed - in this case they were ``7874`` and ``29071``.
'''
pid = __grains__.get('pid')
if pid:
if 'ps.kill_pid' in __salt__:
__salt__['ps.kill_pid'](pid)
else:
pid = None
return {'killed': pid}
| en | 0.837564 | # -*- coding: utf-8 -*- Module to provide information about minions # Import Python libs # Import Salt libs # Don't shadow built-ins. Return a list of accepted, denied, unaccepted and rejected keys. This is the same output as `salt-key -L` CLI Example: .. code-block:: bash salt 'master' minion.list # We have to replace the minion/master directories # The source code below is (nearly) a copy of salt.key.Key.list_keys # We have to differentiate between RaetKey._check_minions_directories # and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys # have an additional 'denied' state. # key dir kind is not created yet, just skip Return the minion keys directory paths. This function is a copy of salt.key.Key._check_minions_directories. Return the minion keys directory paths. This function is a copy of salt.key.RaetKey._check_minions_directories. Kill the salt minion. If you have a monitor that restarts ``salt-minion`` when it dies then this is a great way to restart after a minion upgrade. CLI example:: >$ salt minion[12] minion.kill minion1: ---------- killed: 7874 minion2: ---------- killed: 29071 The result of the salt command shows the process ID of the minions that were successfully killed - in this case they were ``7874`` and ``29071``. | 2.34193 | 2 |
server/core/commands.py | saianupkumarp/daware | 1 | 6614949 | <gh_stars>1-10
from flask_script import Manager
from gevent import spawn
import settings
def command_manager(app):
manager = Manager(app, with_default_commands=None)
@manager.command
def start():
try:
app.run(host=settings.SERVER_HOST, port=settings.SERVER_PORT, threaded=settings.THREADED)
except KeyboardInterrupt:
pass
return manager | from flask_script import Manager
from gevent import spawn
import settings
def command_manager(app):
manager = Manager(app, with_default_commands=None)
@manager.command
def start():
try:
app.run(host=settings.SERVER_HOST, port=settings.SERVER_PORT, threaded=settings.THREADED)
except KeyboardInterrupt:
pass
return manager | none | 1 | 2.096429 | 2 | |
test_hermes/test_client.py | transifex/hermes | 2 | 6614950 | <reponame>transifex/hermes
from __future__ import absolute_import
from Queue import Empty
from random import randint
from time import sleep
import os
from unittest import TestCase, skipUnless
from signal import SIGINT, SIGCHLD
from select import error as select_error
from os import getpid
from mock import MagicMock, patch, PropertyMock
from psycopg2 import OperationalError
from hermes.client import Client
from hermes.components import Component
from hermes.connectors import PostgresConnector
from hermes.exceptions import InvalidConfigurationException
from hermes.strategies import TERMINATE
_WATCH_PATH = '/tmp/hermes_test'
_FAILOVER_FILES = ('recovery.conf', 'recovery.done')
_POSTGRES_DSN = {
'database': 'test_hermes'
}
class RunningClientTestCase(TestCase):
def setUp(self):
# Create the folder
if not os.path.exists(_WATCH_PATH):
os.makedirs(_WATCH_PATH)
self.client = Client(_POSTGRES_DSN, _WATCH_PATH, _FAILOVER_FILES)
self.client.log = MagicMock()
def tearDown(self):
if self.client.is_alive():
self.client.terminate()
# Remove the folder
if not os.path.exists(_WATCH_PATH):
os.removedirs(_WATCH_PATH)
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_master(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
self.client._start_components = MagicMock(return_value=None)
# Start the client and allow to settle
self.client._start_observer()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertTrue(self.client._start_components.called)
PostgresConnector.is_server_master = old_func
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_slave(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=False)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(self.client.is_alive())
PostgresConnector.is_server_master = old_func
def test_client_directory_watcher_when_file_incorrect(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, 'random_file.rand')
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(PostgresConnector.is_server_master.called)
PostgresConnector.is_server_master = old_func
class ClientComponentTestCase(TestCase):
def test_add_listener_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_listener,
3)
def test_add_processor_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_processor,
3)
def test_add_listener_accepts_component(self):
client = Client(MagicMock())
client.add_listener(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._listener, Component)
def test_add_processor_accepts_component(self):
client = Client(MagicMock(), MagicMock())
client.add_processor(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._processor, Component)
class ValidateComponentsTestCase(TestCase):
def test_throws_on_non_listener(self):
client = Client(MagicMock())
client._processor = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_non_processor(self):
client = Client(MagicMock())
client._listener = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_different_queue(self):
client = Client(MagicMock())
client._listener = MagicMock()
client._processor = MagicMock()
client._listener.error_queue = MagicMock(
return_value=True
)
client._listener.error_queue = MagicMock(
return_value=False
)
self.assertRaises(InvalidConfigurationException,
client._validate_components)
class WatchdogObserverTestCase(TestCase):
def setUp(self):
self.client = Client(MagicMock())
self.client.directory_observer = MagicMock()
def test_start_schedules_obeserver_if_watch_path(self):
self.client._watch_path = randint(50, 1000)
self.client._start_observer()
self.client.directory_observer.schedule.assert_called_once_with(
self.client, self.client._watch_path, recursive=False
)
self.client.directory_observer.start.assert_called_once_with()
def test_start_not_schedule_observer_if_none_watch_path(self):
self.client._watch_path = None
self.client._start_observer()
self.assertEqual(self.client.directory_observer.schedule.call_count, 0)
self.assertEqual(self.client.directory_observer.start.call_count, 0)
def test_stop_stops_observer_if_watch_path_and_observer(self):
self.client.directory_observer.is_alive.return_value = True
self.client._watch_path = True
self.client._stop_observer()
self.client.directory_observer.stop.assert_called_once_with()
def test_stop_does_not_stop_observer_on_none(self):
self.client._watch_path = None
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
def test_stop_does_not_stop_on_dead(self):
self.client._watch_path = True
self.client.directory_observer.is_alive.return_value = False
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
class ClientStartupTestCase(TestCase):
def test_startup_functions_are_called(self):
with patch('multiprocessing.Process.start') as mock_process_start:
with patch('hermes.client.signal') as mock_signal:
client = Client(MagicMock())
client._validate_components = MagicMock()
client.start()
self.assertEqual(mock_signal.call_count, 2)
client._validate_components.assert_called_once_with()
mock_process_start.assert_called_once_with()
def test_initial_start_components(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._start_components()
client._listener.start.assert_called_once_with()
client._processor.start.assert_called_once_with()
def test_start_components_when_components_running(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = True
client._start_components()
self.assertEqual(client._listener.start.call_count, 0)
self.assertEqual(client._processor.start.call_count, 0)
def test_join_is_called_on_restart(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._processor.ident.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._listener.ident.return_value = True
client._start_components(restart=True)
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
class ClientShutdownTestCase(TestCase):
def test_shutdown(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client._stop_observer = MagicMock()
client._should_run = True
client._shutdown()
client._stop_components.assert_called_once_with()
client._stop_observer.assert_called_once_with()
self.assertFalse(client._should_run)
def test_stop_terminates(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._listener = MagicMock()
client._processor.ident.return_value = True
client._listener.ident.return_value = True
client._processor.is_alive.return_value = True
client._listener.is_alive.return_value = True
client._stop_components()
client._processor.terminate.assert_called_once_with()
client._listener.terminate.assert_called_once_with()
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
def test_handle_terminate_when_same_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._shutdown = MagicMock()
mock_ident.return_value = getpid()
client._handle_terminate(None, None)
client._shutdown.assert_called_once_with()
def test_handle_terminate_when_different_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._exit_queue = MagicMock()
client._shutdown = MagicMock()
current_pid = getpid()
mock_ident.return_value = current_pid + 1
client._handle_terminate(None, None)
client._exit_queue.put_nowait.assert_called_once_with(True)
def test_handle_sigchld_when_should_not_run(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._should_run = False
client._handle_sigchld(None, None)
self.assertEqual(
client._processor.error_queue.get_nowait.call_count, 0
)
def test_handle_sigchld_when_expected_error_and_terminate(self):
client = Client(MagicMock())
client._processor = MagicMock()
client.execute_role_based_procedure = MagicMock()
client._processor.error_queue.get_nowait.return_value = (
True, TERMINATE
)
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertTrue(client._exception_raised)
client.execute_role_based_procedure.assert_called_once_with()
def test_handle_sigchld_when_not_expected(self):
client = Client(MagicMock())
client.log = MagicMock()
client._processor = MagicMock()
client._shutdown = MagicMock()
client._processor.error_queue.get_nowait.return_value = (
False, TERMINATE
)
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertTrue(client._exception_raised)
client._shutdown.assert_called_once_with()
def test_handle_sigchld_when_queue_is_empty(self):
client = Client(MagicMock())
client._start_components = MagicMock()
client._processor = MagicMock()
client._processor.error_queue.get_nowait.side_effect = Empty
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertFalse(client._exception_raised)
self.assertTrue(client._child_interrupted)
client._start_components.assert_called_once_with(restart=True)
class ClientRunProcedureTestCase(TestCase):
def test_initial_run_funcs(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal') as mock_signal:
with patch('select.select') as mock_select:
mock_select.side_effect = Exception
client = Client(MagicMock())
client._start_observer = MagicMock()
client.execute_role_based_procedure = MagicMock()
self.assertRaises(Exception, client.run)
mock_signal.assert_called_once_with(
SIGCHLD, client._handle_sigchld
)
client.execute_role_based_procedure.assert_called_once_with()
def test_role_based_procedures_are_called_outside_of_main_loop(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal'):
client = Client(MagicMock())
random_raised = randint(1, 10000)
client._exception_raised = random_raised
client._start_observer = MagicMock()
client.execute_role_based_procedure = MagicMock(
side_effect=Exception
)
self.assertRaises(Exception, client.run)
client.execute_role_based_procedure.assert_called_once_with()
# Raised value should be the same as that which we set
self.assertEqual(client._exception_raised, random_raised)
def test_client_calls_terminate_on_exit_queue(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal'):
client = Client(MagicMock())
client.execute_role_based_procedure = MagicMock()
client._start_observer = MagicMock()
client.terminate = MagicMock(side_effect=Exception)
self.assertRaises(Empty, client._exit_queue.get_nowait)
client._exit_queue.put(True)
self.assertRaises(Exception, client.run)
client.terminate.assert_called_once_with()
def test_client_sets_run_flag_on_interrupt(self):
with patch('hermes.log.get_logger'):
with patch('select.select', side_effect=select_error):
client = Client(MagicMock())
client.execute_role_based_procedure = MagicMock()
client.run()
self.assertFalse(client._should_run)
class RoleBasedProceduresTestCase(TestCase):
def test_when_server_is_master(self):
client = Client(MagicMock())
client.log = MagicMock()
client._start_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.return_value = True
client.execute_role_based_procedure()
client._start_components.assert_called_once_with(restart=True)
def test_when_server_is_slave(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.return_value = False
client.execute_role_based_procedure()
client._stop_components.assert_called_once_with()
def test_when_server_is_down_and_no_backoff(self):
with patch('hermes.client.sleep') as mock_sleep:
mock_sleep.side_effect = Exception('Break out of loop')
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.side_effect = OperationalError
self.assertRaises(Exception, client.execute_role_based_procedure)
client._stop_components.assert_called_once_with()
mock_sleep.assert_called_once_with(1)
| from __future__ import absolute_import
from Queue import Empty
from random import randint
from time import sleep
import os
from unittest import TestCase, skipUnless
from signal import SIGINT, SIGCHLD
from select import error as select_error
from os import getpid
from mock import MagicMock, patch, PropertyMock
from psycopg2 import OperationalError
from hermes.client import Client
from hermes.components import Component
from hermes.connectors import PostgresConnector
from hermes.exceptions import InvalidConfigurationException
from hermes.strategies import TERMINATE
_WATCH_PATH = '/tmp/hermes_test'
_FAILOVER_FILES = ('recovery.conf', 'recovery.done')
_POSTGRES_DSN = {
'database': 'test_hermes'
}
class RunningClientTestCase(TestCase):
def setUp(self):
# Create the folder
if not os.path.exists(_WATCH_PATH):
os.makedirs(_WATCH_PATH)
self.client = Client(_POSTGRES_DSN, _WATCH_PATH, _FAILOVER_FILES)
self.client.log = MagicMock()
def tearDown(self):
if self.client.is_alive():
self.client.terminate()
# Remove the folder
if not os.path.exists(_WATCH_PATH):
os.removedirs(_WATCH_PATH)
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_master(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
self.client._start_components = MagicMock(return_value=None)
# Start the client and allow to settle
self.client._start_observer()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertTrue(self.client._start_components.called)
PostgresConnector.is_server_master = old_func
@skipUnless(os.environ.get('ALL_TESTS', False),
"Unittests only")
def test_client_directory_watcher_when_server_slave(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=False)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, _FAILOVER_FILES[0])
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(self.client.is_alive())
PostgresConnector.is_server_master = old_func
def test_client_directory_watcher_when_file_incorrect(self):
# We have to monkey patch the 'is_server_master' function to ensure
# we can control the test path
old_func = PostgresConnector.is_server_master
PostgresConnector.is_server_master = MagicMock(return_value=True)
# Start the observer and allow to settle
self.client.directory_observer.start()
sleep(3)
# Create a file and detect if the RiverClient has been informed
file_path = '{}/{}'.format(_WATCH_PATH, 'random_file.rand')
with open(file_path, 'a'):
os.utime(file_path, None)
# Give the event time to emit
sleep(3)
self.assertFalse(PostgresConnector.is_server_master.called)
PostgresConnector.is_server_master = old_func
class ClientComponentTestCase(TestCase):
def test_add_listener_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_listener,
3)
def test_add_processor_throws_on_non_component(self):
client = Client(MagicMock(), MagicMock())
self.assertRaises(InvalidConfigurationException,
client.add_processor,
3)
def test_add_listener_accepts_component(self):
client = Client(MagicMock())
client.add_listener(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._listener, Component)
def test_add_processor_accepts_component(self):
client = Client(MagicMock(), MagicMock())
client.add_processor(Component(MagicMock(),
MagicMock(),
MagicMock()))
self.assertIsInstance(client._processor, Component)
class ValidateComponentsTestCase(TestCase):
def test_throws_on_non_listener(self):
client = Client(MagicMock())
client._processor = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_non_processor(self):
client = Client(MagicMock())
client._listener = 3
self.assertRaises(InvalidConfigurationException,
client._validate_components)
def test_throws_on_different_queue(self):
client = Client(MagicMock())
client._listener = MagicMock()
client._processor = MagicMock()
client._listener.error_queue = MagicMock(
return_value=True
)
client._listener.error_queue = MagicMock(
return_value=False
)
self.assertRaises(InvalidConfigurationException,
client._validate_components)
class WatchdogObserverTestCase(TestCase):
def setUp(self):
self.client = Client(MagicMock())
self.client.directory_observer = MagicMock()
def test_start_schedules_obeserver_if_watch_path(self):
self.client._watch_path = randint(50, 1000)
self.client._start_observer()
self.client.directory_observer.schedule.assert_called_once_with(
self.client, self.client._watch_path, recursive=False
)
self.client.directory_observer.start.assert_called_once_with()
def test_start_not_schedule_observer_if_none_watch_path(self):
self.client._watch_path = None
self.client._start_observer()
self.assertEqual(self.client.directory_observer.schedule.call_count, 0)
self.assertEqual(self.client.directory_observer.start.call_count, 0)
def test_stop_stops_observer_if_watch_path_and_observer(self):
self.client.directory_observer.is_alive.return_value = True
self.client._watch_path = True
self.client._stop_observer()
self.client.directory_observer.stop.assert_called_once_with()
def test_stop_does_not_stop_observer_on_none(self):
self.client._watch_path = None
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
def test_stop_does_not_stop_on_dead(self):
self.client._watch_path = True
self.client.directory_observer.is_alive.return_value = False
self.client._stop_observer()
self.assertEqual(self.client.directory_observer.stop.call_count, 0)
class ClientStartupTestCase(TestCase):
def test_startup_functions_are_called(self):
with patch('multiprocessing.Process.start') as mock_process_start:
with patch('hermes.client.signal') as mock_signal:
client = Client(MagicMock())
client._validate_components = MagicMock()
client.start()
self.assertEqual(mock_signal.call_count, 2)
client._validate_components.assert_called_once_with()
mock_process_start.assert_called_once_with()
def test_initial_start_components(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._start_components()
client._listener.start.assert_called_once_with()
client._processor.start.assert_called_once_with()
def test_start_components_when_components_running(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = True
client._start_components()
self.assertEqual(client._listener.start.call_count, 0)
self.assertEqual(client._processor.start.call_count, 0)
def test_join_is_called_on_restart(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._processor.is_alive.return_value = False
client._processor.ident.return_value = True
client._listener = MagicMock()
client._listener.is_alive.return_value = False
client._listener.ident.return_value = True
client._start_components(restart=True)
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
class ClientShutdownTestCase(TestCase):
def test_shutdown(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client._stop_observer = MagicMock()
client._should_run = True
client._shutdown()
client._stop_components.assert_called_once_with()
client._stop_observer.assert_called_once_with()
self.assertFalse(client._should_run)
def test_stop_terminates(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._listener = MagicMock()
client._processor.ident.return_value = True
client._listener.ident.return_value = True
client._processor.is_alive.return_value = True
client._listener.is_alive.return_value = True
client._stop_components()
client._processor.terminate.assert_called_once_with()
client._listener.terminate.assert_called_once_with()
client._listener.join.assert_called_once_with()
client._processor.join.assert_called_once_with()
def test_handle_terminate_when_same_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._shutdown = MagicMock()
mock_ident.return_value = getpid()
client._handle_terminate(None, None)
client._shutdown.assert_called_once_with()
def test_handle_terminate_when_different_process(self):
with patch('hermes.client.Client.ident',
new_callable=PropertyMock) as mock_ident:
client = Client(MagicMock())
client._exit_queue = MagicMock()
client._shutdown = MagicMock()
current_pid = getpid()
mock_ident.return_value = current_pid + 1
client._handle_terminate(None, None)
client._exit_queue.put_nowait.assert_called_once_with(True)
def test_handle_sigchld_when_should_not_run(self):
client = Client(MagicMock())
client._processor = MagicMock()
client._should_run = False
client._handle_sigchld(None, None)
self.assertEqual(
client._processor.error_queue.get_nowait.call_count, 0
)
def test_handle_sigchld_when_expected_error_and_terminate(self):
client = Client(MagicMock())
client._processor = MagicMock()
client.execute_role_based_procedure = MagicMock()
client._processor.error_queue.get_nowait.return_value = (
True, TERMINATE
)
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertTrue(client._exception_raised)
client.execute_role_based_procedure.assert_called_once_with()
def test_handle_sigchld_when_not_expected(self):
client = Client(MagicMock())
client.log = MagicMock()
client._processor = MagicMock()
client._shutdown = MagicMock()
client._processor.error_queue.get_nowait.return_value = (
False, TERMINATE
)
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertTrue(client._exception_raised)
client._shutdown.assert_called_once_with()
def test_handle_sigchld_when_queue_is_empty(self):
client = Client(MagicMock())
client._start_components = MagicMock()
client._processor = MagicMock()
client._processor.error_queue.get_nowait.side_effect = Empty
client._should_run = True
client._exception_raised = False
client._handle_sigchld(SIGCHLD, None)
client._processor.error_queue.get_nowait.assert_called_once_with()
self.assertFalse(client._exception_raised)
self.assertTrue(client._child_interrupted)
client._start_components.assert_called_once_with(restart=True)
class ClientRunProcedureTestCase(TestCase):
def test_initial_run_funcs(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal') as mock_signal:
with patch('select.select') as mock_select:
mock_select.side_effect = Exception
client = Client(MagicMock())
client._start_observer = MagicMock()
client.execute_role_based_procedure = MagicMock()
self.assertRaises(Exception, client.run)
mock_signal.assert_called_once_with(
SIGCHLD, client._handle_sigchld
)
client.execute_role_based_procedure.assert_called_once_with()
def test_role_based_procedures_are_called_outside_of_main_loop(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal'):
client = Client(MagicMock())
random_raised = randint(1, 10000)
client._exception_raised = random_raised
client._start_observer = MagicMock()
client.execute_role_based_procedure = MagicMock(
side_effect=Exception
)
self.assertRaises(Exception, client.run)
client.execute_role_based_procedure.assert_called_once_with()
# Raised value should be the same as that which we set
self.assertEqual(client._exception_raised, random_raised)
def test_client_calls_terminate_on_exit_queue(self):
with patch('hermes.log.get_logger'):
with patch('hermes.client.signal'):
client = Client(MagicMock())
client.execute_role_based_procedure = MagicMock()
client._start_observer = MagicMock()
client.terminate = MagicMock(side_effect=Exception)
self.assertRaises(Empty, client._exit_queue.get_nowait)
client._exit_queue.put(True)
self.assertRaises(Exception, client.run)
client.terminate.assert_called_once_with()
def test_client_sets_run_flag_on_interrupt(self):
with patch('hermes.log.get_logger'):
with patch('select.select', side_effect=select_error):
client = Client(MagicMock())
client.execute_role_based_procedure = MagicMock()
client.run()
self.assertFalse(client._should_run)
class RoleBasedProceduresTestCase(TestCase):
def test_when_server_is_master(self):
client = Client(MagicMock())
client.log = MagicMock()
client._start_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.return_value = True
client.execute_role_based_procedure()
client._start_components.assert_called_once_with(restart=True)
def test_when_server_is_slave(self):
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.return_value = False
client.execute_role_based_procedure()
client._stop_components.assert_called_once_with()
def test_when_server_is_down_and_no_backoff(self):
with patch('hermes.client.sleep') as mock_sleep:
mock_sleep.side_effect = Exception('Break out of loop')
client = Client(MagicMock())
client.log = MagicMock()
client._stop_components = MagicMock()
client.master_pg_conn = MagicMock()
client.master_pg_conn.is_server_master.side_effect = OperationalError
self.assertRaises(Exception, client.execute_role_based_procedure)
client._stop_components.assert_called_once_with()
mock_sleep.assert_called_once_with(1) | en | 0.892572 | # Create the folder # Remove the folder # We have to monkey patch the 'is_server_master' function to ensure # we can control the test path # Start the client and allow to settle # Create a file and detect if the RiverClient has been informed # Give the event time to emit # We have to monkey patch the 'is_server_master' function to ensure # we can control the test path # Start the observer and allow to settle # Create a file and detect if the RiverClient has been informed # Give the event time to emit # We have to monkey patch the 'is_server_master' function to ensure # we can control the test path # Start the observer and allow to settle # Create a file and detect if the RiverClient has been informed # Give the event time to emit # Raised value should be the same as that which we set | 2.035316 | 2 |