code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
"""
chemdataextractor.parse.battery_energy.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parser for energy.
"""
import re
import logging
from lxml import etree
import traceback
from . import R, I, W, Optional, merge, join
from .base import BaseSentenceParser
from ..utils import first
from .cem import cem, chemical_label, lenient_chemical_label, solvent_name
from .common import lbrct, dt, rbrct, comma
from .elements import W, I, R, T, Optional, Any, OneOrMore, Not, ZeroOrMore, SkipTo
log = logging.getLogger(__name__)
delim = R(r'^[:;\.,]$')
units = ((((W('Wh') | (W('W') + W('h'))) + R(r'^(k?g|m?(L|l))[\-–−]1$'))) | ((W('Wh') | (W('W') + W('h'))) + W('/') + R('^(k?g)|(L|l)$')))('units').add_action(merge)
joined_range = R(r'^[\+\-–−]?\d+(\.\\d+)?(\(\d\))?[\-––-−~∼˜]\d+(\.\d+)?(\(\d\))?$')('value').add_action(join)
spaced_range = (R(r'^[\+\-–−]?\d+(\.d+)?(\(\d\))?$') + Optional(units).hide() +
(R(r'^[\-±–−~∼˜]$') + R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') | R(r'^[\+\-–−]\d+(\.\d+)?(\(\d\))?$')))('value').add_action(join)
to_range = (R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') + Optional(units).hide() + (I('to') + \
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') | R(r'^[\+\-–−]\d+(\.\d+)?(\(\d\))?$')))('value').add_action(join)
and_range = (
ZeroOrMore(R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$') + Optional(units).hide() + Optional(comma).hide()) +
Optional(I('and') | comma).hide() + R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$'))('value').add_action(join)
range = (Optional(R(r'^[\-–−]$')) + (and_range | to_range | spaced_range | joined_range)).add_action(join)
value = (Optional(R(r'^[\-–−]$')) +
Optional(R(r'^[~∼˜\<\>\≤\≥]$')) +
Optional(R(r'^[\-\–\–\−±∓⨤⨦±]$')) +
R(r'^[\+\-–−]?\d+(\.\d+)?(\(\d\))?$')).add_action(join)
ordinal = T('JJ').add_action(join)
ener = (range | value | ordinal)('value')
cem_prefix = (
Optional('oxidized') +
cem('cem') +
Optional(I('battery')) +
Optional(delim).hide())
multi_cem = ZeroOrMore(cem_prefix + Optional(comma).hide()) + Optional(I('and') | comma).hide() + cem_prefix
ener_specifier = (I('energy') | I('energies') + Optional('density'))('specifier')
prefix = (
Optional(I('the') | I('a') | I('an') | I('its') | I('with')).hide() +
Optional(I('inherently')).hide() +
Optional(I('excellent') | I('high') | I('low') | I('stable') | I('superior') | I('maximum') | I('highest')).hide() +
Optional(I('initial')).hide() +
ener_specifier +
Optional(I('varies') + I('from')).hide() +
Optional(W('=') | W('~') | W('≈') | W('≃') | I('of') | I('was') | I('is') | I('at') | I('as') | I('near') | I('above') | I('below')).hide() +
Optional(I('reported') | I('determined') | I('measured') | I('calculated') | I('known')).hide() +
Optional(I('as') | (I('to') + I('be'))).hide() +
Optional( I('in') + I('the') + I('range') | I('ranging')).hide() +
Optional(I('of')).hide() +
Optional(I('about') | I('from') | I('approximately') | I('around') | (I('high') + I('as')) | (I('higher') | I('lower') + I('than')) | (I('up') + I('to') | I('in') + I('excess') + I('of'))).hide())
ener_and_units = (
Optional(lbrct).hide() +
ener +
units +
Optional(rbrct).hide())('ener')
ener_specifier_and_value = Optional(prefix) + (Optional(delim).hide() + Optional(
lbrct | I('[')).hide() + ener + units + Optional(rbrct | I(']')).hide())('ener')
prefix_cem_value = (
Optional(prefix).hide() +
Optional(I('the') | I('a') | I('an') | I('these') | I('those') | I('this') | I('that')).hide() +
(multi_cem | cem_prefix | lenient_chemical_label) +
Optional(lbrct +Optional(cem_prefix | lenient_chemical_label | multi_cem) + rbrct) +
Optional(I('is') | I('was') | I('were') | I('occurs') | I('of') | I('could') | I('can') | I('remained') | (I('can') + I('be') + I('assigned') + Optional(I('at') | I('to')))).hide() +
Optional(I('reach') | I('reaching') | I('observed') | I('determined') | I('measured') | I('calculated') | I('found') | I('increased') | I('expected')).hide() +
Optional(I('in') + I('the') + I('range') + I('of') | I('ranging') + I('from') | I('as') | I('to') | I('to') + I('be') | I('about') | I('over') | ( I('higher') | I('lower')) + I('than') | I('above')).hide() +
Optional(lbrct).hide() + SkipTo((ener_specifier_and_value | ener_and_units) ) +
(ener_specifier_and_value | ener_and_units) +
Optional(rbrct).hide())('ener_phrase')
cem_prefix_value = (
((multi_cem | cem_prefix | lenient_chemical_label))
+ Optional(delim).hide()
+ Optional(I('that') | I('which') | I('was') | I('since') | I('the') | I('resulting') + I('in')).hide()
+ Optional(I('typically') | I('also')).hide()
+ Optional(prefix)
+ Optional(I('display') | I('displays') | I('exhibit') | I('exhibited') | I('exhibits') | I('exhibiting') | I('shows') | I('show') | I('showed') | I('gave') | I('demonstrate') | I('demonstrates') | I('are') | I('remains') | I('maintains') | I('delivered') | I('provided') |
I('undergo') | I('undergoes') | I('has') | I('have') | I('having') | I('determined') | I('with') | I('where') | I('orders') | I('were') | (I('is') + Optional(I('classified') + I('as')))).hide()
+ Optional((I('reported') + I('to') + I('have')) | I('at') | I('with')).hide()
+ Optional(lbrct).hide()
+ (ener_specifier_and_value | ener_and_units) + Optional(rbrct).hide()
+ Optional(I('can') + I('be') + I('achieved'))
)('ener_phrase')
prefix_value_cem = (
Optional(I('below') | I('at')).hide() +
Optional(prefix).hide() +
Optional(I('is') | I('were') | I('was') | I('are')).hide() +
(ener_specifier_and_value | ener_and_units) +
Optional(
Optional(I('has') + I('been') + I('found')) +
Optional(I('is') | I('were') | I('was') | I('are')) +
Optional(I('observed') | I('determined') | I('measured') | I('calculated') | I('reported'))).hide() +
Optional(ener_specifier_and_value | ener_and_units) +
Optional(I('in') | I('for') | I('of')).hide() +
Optional(I('the')).hide() +
Optional(R('^[:;,]$')).hide() +
Optional(lbrct).hide() +
Optional(I('of')).hide() + SkipTo(multi_cem | cem_prefix | lenient_chemical_label) +
(multi_cem | cem_prefix | lenient_chemical_label) +
Optional(rbrct).hide())('ener_phrase')
value_prefix_cem = (Optional(I('of')).hide() +
(ener_specifier_and_value | ener_and_units) +
Optional(delim).hide() +
Optional(I('which') | I('that')).hide() +
Optional(I('has') +
I('been') | I('was') | I('is') | I('were')).hide() +
Optional(I('found') | I('observed') | I('measured') | I('calculated') | I('determined')).hide() +
Optional(I('likely') | I('close') | (I('can') +
I('be'))).hide() +
Optional(I('corresponds') | I('associated')).hide() +
Optional(I('to') +
I('be') | I('with') | I('is') | I('as')).hide() +
Optional(I('the')).hide() +
ener_specifier +
Optional(I('of') | I('in')).hide() +
(multi_cem | cem_prefix | lenient_chemical_label))('ener_phrase')
cem_value_prefix = ((multi_cem | cem_prefix | lenient_chemical_label)
+ Optional((I('is') | I('was') | I('were')) + Optional(I('reported') | I('found') | I('calculate') | I('measured') | I('shown') | I('found')) + Optional(I('to'))).hide()
+ Optional(I('display') | I('displays') | I('exhibit') | I('exhibits') | I('exhibiting') | I('shows') | I('show') | I('demonstrate') | I('demonstrates') |
I('undergo') | I('undergoes') | I('has') | I('have') | I('having') | I('determined') | I('with') | I('where') | I('orders') | (I('is') + Optional(I('classified') + I('as')))).hide()
+ Optional(I('the') | I('a') | I('an')).hide()
+ Optional(I('value') | I('values')).hide()
+ Optional(I('varies') + I('from')).hide()
+ Optional(W('=') | W('~') | W('≈') | W('≃') | I('was') | I('is') | I('at') | I('as') | I('near') | I('above') | I('below')).hide()
+ Optional(I('in') + I('the') + I('range') | I('ranging')).hide()
+ Optional(I('of') | I('about') | I('from') | I('approximately') | I('around') | (I('high') + I('as')) | (I('higher') | I('lower') + I('than'))).hide()
+ (ener_specifier_and_value | ener_and_units)
+ Optional(I('as') | I('of') | I('for')).hide()
+ Optional(I('its') | I('their') | I('the')).hide() + ener_specifier)('ener_phrase')
bc = (
value_prefix_cem
| cem_value_prefix
| cem_prefix_value
| prefix_cem_value
| prefix_value_cem
)
def print_tree(trees):
print(trees)
try:
print(etree.tostring(trees))
except BaseException:
print('no tree')
class EnergyParser(BaseSentenceParser):
""""""
root = bc
def interpret(self, result, start, end):
# try:
compound = self.model.fields['compound'].model_class()
raw_value = first(result.xpath('./ener/value/text()'))
raw_units = first(result.xpath('./ener/units/text()'))
try:
specifier = ' '.join(
[i for i in (first(result.xpath('./specifier'))).itertext()])
except BaseException:
specifier = ''
# print_tree(first(result.xpath('.')))
# sh-- If chemical names are not in the tree, don't select.
# for child in (first(result.xpath('.'))).getchildren():
# if child.tag == 'cem':
# continue
battery_energy= self.model(raw_value=raw_value,
raw_units=raw_units,
specifier=specifier,
value=self.extract_value(raw_value),
error=self.extract_error(raw_value),
units=self.extract_units(raw_units),
)
cem_lists = []
for cem_el in result.xpath('./cem'):
# print_tree(cem_el)
if cem_el is not None:
log.debug(etree.tostring(cem_el))
cem_lists.append(''.join(cem_el.xpath('./names/text()')))
battery_energy.compound = compound
battery_energy.compound.names = cem_lists
battery_energy.compound.labels = cem_el.xpath('./labels/text()')
log.debug(battery_energy.serialize())
yield battery_energy
# except TypeError as e:
# print('==========Error===============')
# traceback.print_exc()
# log.debug(e)
|
[
"lxml.etree.tostring",
"logging.getLogger"
] |
[((508, 535), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (525, 535), False, 'import logging\n'), ((9043, 9064), 'lxml.etree.tostring', 'etree.tostring', (['trees'], {}), '(trees)\n', (9057, 9064), False, 'from lxml import etree\n'), ((10461, 10483), 'lxml.etree.tostring', 'etree.tostring', (['cem_el'], {}), '(cem_el)\n', (10475, 10483), False, 'from lxml import etree\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from . import ResourceConfigTestCase
from .. import BaseModel
Base = declarative_base(cls=BaseModel)
class SomeModel(Base):
id = Column(Integer, primary_key=True)
unique = Column(String, unique=True)
non_unique = Column(String)
class TestDatasource(ResourceConfigTestCase):
def test_set_source_to_model_name(self):
endpoint_def = self._render(SomeModel)
self.assertEqual(endpoint_def['datasource']['source'], 'SomeModel')
def test_projection_for_regular_columns(self):
endpoint_def = self._render(SomeModel)
self.assertEqual(endpoint_def['datasource']['projection'], {
'_etag': 0,
'id': 1,
'unique': 1,
'non_unique': 1,
})
def test_projection_with_custom_automatically_handled_fields(self):
self._created = '_date_created'
self._updated = '_last_updated'
self._etag = 'non_unique'
endpoint_def = self._render(SomeModel)
self.assertEqual(endpoint_def['datasource']['projection'], {
'non_unique': 0,
'id': 1,
'unique': 1,
})
|
[
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
] |
[((239, 270), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {'cls': 'BaseModel'}), '(cls=BaseModel)\n', (255, 270), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((305, 338), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (311, 338), False, 'from sqlalchemy import Column, Integer, String\n'), ((352, 379), 'sqlalchemy.Column', 'Column', (['String'], {'unique': '(True)'}), '(String, unique=True)\n', (358, 379), False, 'from sqlalchemy import Column, Integer, String\n'), ((397, 411), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (403, 411), False, 'from sqlalchemy import Column, Integer, String\n')]
|
import random
import copy
class Jeu:
def __init__(self, lignes):
self.allumettes = []
self.lignes = lignes
nombreAllumettes = 1
for i in range(lignes):
self.allumettes.append(nombreAllumettes)
nombreAllumettes += 2
def __str__(self):
lstGraphique = []
for i in range(len(self.allumettes)):
lstGraphique.append(
str(i) + "] " + " " * (self.lignes - 1 - i) + "|" * self.allumettes[i]
)
return "\n".join(lstGraphique) + "\n" + "-" * 10
def retire(self, ligne, nombre):
if len(self.allumettes) - 1 < ligne or ligne < 0 or nombre < 1:
return False
if self.allumettes[ligne] < nombre:
return False
self.allumettes[ligne] -= nombre
return True
def testValide(self, ligne, nombre):
if len(self.allumettes) - 1 < ligne or ligne < 0 or nombre < 1:
return False
if self.allumettes[ligne] < nombre:
return False
if (
self.totalAllumettes() - nombre < 1
): # on vérifie que le coup ne nous fait pas perdre
return False
return True
def totalAllumettes(self):
return sum(self.allumettes)
def checkPosition(self):
# on récupère le tableau des allumettes en binaire
listBinaire = []
for i in self.allumettes:
listBinaire.append(str(bin(i))[2:])
print(listBinaire)
maxLen = max([len(i) for i in listBinaire])
finalList = [i.zfill(maxLen) for i in listBinaire]
print(finalList)
sommeColonnes = []
for i in range(len(finalList[0])):
sommeColonnes.append(sum(int(j[i]) for j in finalList))
print(sommeColonnes)
return all([i % 2 == 0 for i in sommeColonnes])
class Partie:
def __init__(self, aleatoire=False):
self.aleatoire = aleatoire
print("\n" * 20 + "-" * 20)
print("Bienvenue dans le jeu de Marienbad")
print("-" * 20)
print(
"Un nombre aléatoire de lignes entre 3 et 6 va être choisi pour commencer le jeu."
)
nbLignes = random.randint(3, 6)
self.jeu = Jeu(nbLignes)
print(self.jeu)
print("Un joueur aléatoire entre le joueur et le robot va commencer la partie.")
self.joueurActuel = random.choice(["joueur", "robot"])
print(f"Le hasard a décidé : c'est le {self.joueurActuel} qui va commencer.")
def checkFin(self):
if self.jeu.totalAllumettes() == 0:
return "zero"
elif self.jeu.totalAllumettes() == 1:
return "un"
else:
return False
def tour(self):
while not self.checkFin():
if self.joueurActuel == "joueur":
nbLigne = int(
input(
"Entrez le numéro de la ligne dans laquelle vous souhaitez retirer des allumettes : "
)
)
nbAllumette = int(
input(
"Entrez la quantité d'allumettes à retirer sur cette ligne : "
)
)
if self.jeu.retire(nbLigne, nbAllumette):
print(
f"Le joueur retire {nbAllumette} allumettes sur la ligne {nbLigne}."
)
print(self.jeu)
self.joueurActuel = "robot"
else:
print(
"Desolé, votre entrée est invalide (soit parce que le numéro de ligne est invalide, soit parce vous voulez retirer plus d'allumettes qu'elle n'en contient.)"
)
else:
if self.aleatoire:
listeIndexDisponibles = [
i
for i in range(len(self.jeu.allumettes))
if self.jeu.allumettes[i] > 0
]
nbLigne = random.choice(listeIndexDisponibles)
nbAllumette = random.randint(1, self.jeu.allumettes[nbLigne])
self.jeu.retire(nbLigne, nbAllumette)
print(
f"Le robot retire {nbAllumette} allumettes sur la ligne {nbLigne}."
)
print(self.jeu)
self.joueurActuel = "joueur"
else:
positionsValides = []
# on génère un mouvement aléatoire / on teste tous les mouvements
for i in range(len(self.jeu.allumettes)): # numéro de ligne
for j in range(
1, self.jeu.allumettes[i] + 1
): # nombre d'allumettes
if self.jeu.testValide(i, j):
positionsValides.append((i, j))
print(positionsValides)
position = None
for i in positionsValides:
# on copie le jeu
jeuCopy = copy.deepcopy(
self.jeu
) # on utilise le module copy pour éviter de modifier des références.
jeuCopy.retire(i[0], i[1])
good = jeuCopy.checkPosition()
if good:
position = i
break
if position == None:
print("Aucune possibilité de gagner. Le robot a perdu")
return
else:
nbLigne, nbAllumette = position
self.jeu.retire(nbLigne, nbAllumette)
print(
f"Le robot retire {nbAllumette} allumettes sur la ligne {nbLigne}."
)
print(self.jeu)
self.joueurActuel = "joueur"
# on l'applique sur une copie du jeu d'allumettes
# et on vérifie si la position est gagnante.
# si la position est gagnante on applique le mouvement sur le vrai jeu
# sinon on essaie le prochain mouvement
# TODO : Changer la condition de victoire parce que je l'ai fait dans le mauvais sens
state = self.checkFin()
if state == "zero":
print(
f"Il ne reste plus d'allumettes sur le plateau : le {'robot' if self.joueurActuel == 'joueur' else 'joueur'} a gagné !"
)
else:
print(
f"Il ne reste plus qu'une allumette sur le plateau : le {'robot' if self.joueurActuel == 'joueur' else 'joueur'} a perdu !"
)
p = Partie(aleatoire=False)
p.tour()
|
[
"copy.deepcopy",
"random.choice",
"random.randint"
] |
[((2202, 2222), 'random.randint', 'random.randint', (['(3)', '(6)'], {}), '(3, 6)\n', (2216, 2222), False, 'import random\n'), ((2399, 2433), 'random.choice', 'random.choice', (["['joueur', 'robot']"], {}), "(['joueur', 'robot'])\n", (2412, 2433), False, 'import random\n'), ((4062, 4098), 'random.choice', 'random.choice', (['listeIndexDisponibles'], {}), '(listeIndexDisponibles)\n', (4075, 4098), False, 'import random\n'), ((4133, 4180), 'random.randint', 'random.randint', (['(1)', 'self.jeu.allumettes[nbLigne]'], {}), '(1, self.jeu.allumettes[nbLigne])\n', (4147, 4180), False, 'import random\n'), ((5173, 5196), 'copy.deepcopy', 'copy.deepcopy', (['self.jeu'], {}), '(self.jeu)\n', (5186, 5196), False, 'import copy\n')]
|
import ctypes
# *************************************************
#* sql.py
#*
#* These should be consistent with the MS version.
#*
#*************************************************#
#ifndef __SQL_H
#_SQL_H
# ***************************
#* default to 3.51 declare something else before here and you get a whole new ball of wax
#**************************#
#ifndef ODBCVER
ODBCVER = 0x0380
#endif
#ifndef __SQLTYPES_H
#include "sqltypes.h"
#endif
#ifdef __cplusplus
# extern "C" {
#endif
# ***************************
# * some ret values
# **************************#
SQL_NULL_DATA = (-1)
SQL_DATA_AT_EXEC = (-2)
SQL_SUCCESS = 0
SQL_SUCCESS_WITH_INFO = 1
#if (ODBCVER >= 0x0300)
SQL_NO_DATA = 100
#endif
SQL_ERROR = (-1)
SQL_INVALID_HANDLE = (-2)
SQL_STILL_EXECUTING = 2
SQL_NEED_DATA = 99
# SQL_SUCCEEDED(rc) = (((rc)&(~1))==0)
# ***************************
#* use these to indicate string termination to some function
#**************************#
SQL_NTS = (-3)
SQL_NTSL = (-3)
# maximum message length #
SQL_MAX_MESSAGE_LENGTH = 512
# date/time length constants #
#if (ODBCVER >= 0x0300)
SQL_DATE_LEN = 10
SQL_TIME_LEN = 8 # add P+1 if precision is nonzero #
SQL_TIMESTAMP_LEN = 19 # add P+1 if precision is nonzero #
#endif
# handle type identifiers #
#if (ODBCVER >= 0x0300)
SQL_HANDLE_ENV = 1
SQL_HANDLE_DBC = 2
SQL_HANDLE_STMT = 3
SQL_HANDLE_DESC = 4
#endif
# environment attribute #
#if (ODBCVER >= 0x0300)
SQL_ATTR_OUTPUT_NTS = 10001
#endif
# connection attributes #
#if (ODBCVER >= 0x0300)
SQL_ATTR_AUTO_IPD = 10001
SQL_ATTR_METADATA_ID = 10014
#endif # ODBCVER >= 0x0300 #
# statement attributes #
#if (ODBCVER >= 0x0300)
SQL_ATTR_APP_ROW_DESC = 10010
SQL_ATTR_APP_PARAM_DESC = 10011
SQL_ATTR_IMP_ROW_DESC = 10012
SQL_ATTR_IMP_PARAM_DESC = 10013
SQL_ATTR_CURSOR_SCROLLABLE = (-1)
SQL_ATTR_CURSOR_SENSITIVITY = (-2)
#endif
# SQL_ATTR_CURSOR_SCROLLABLE = values #
#if (ODBCVER >= 0x0300)
SQL_NONSCROLLABLE = 0
SQL_SCROLLABLE = 1
#endif # ODBCVER >= 0x0300 #
# identifiers of fields in the SQL descriptor #
#if (ODBCVER >= 0x0300)
SQL_DESC_COUNT = 1001
SQL_DESC_TYPE = 1002
SQL_DESC_LENGTH = 1003
SQL_DESC_OCTET_LENGTH_PTR = 1004
SQL_DESC_PRECISION = 1005
SQL_DESC_SCALE = 1006
SQL_DESC_DATETIME_INTERVAL_CODE = 1007
SQL_DESC_NULLABLE = 1008
SQL_DESC_INDICATOR_PTR = 1009
SQL_DESC_DATA_PTR = 1010
SQL_DESC_NAME = 1011
SQL_DESC_UNNAMED = 1012
SQL_DESC_OCTET_LENGTH = 1013
SQL_DESC_ALLOC_TYPE = 1099
#endif
# identifiers of fields in the diagnostics area #
#if (ODBCVER >= 0x0300)
SQL_DIAG_RETURNCODE = 1
SQL_DIAG_NUMBER = 2
SQL_DIAG_ROW_COUNT = 3
SQL_DIAG_SQLSTATE = 4
SQL_DIAG_NATIVE = 5
SQL_DIAG_MESSAGE_TEXT = 6
SQL_DIAG_DYNAMIC_FUNCTION = 7
SQL_DIAG_CLASS_ORIGIN = 8
SQL_DIAG_SUBCLASS_ORIGIN = 9
SQL_DIAG_CONNECTION_NAME = 10
SQL_DIAG_SERVER_NAME = 11
SQL_DIAG_DYNAMIC_FUNCTION_CODE = 12
#endif
# dynamic function codes #
#if (ODBCVER >= 0x0300)
SQL_DIAG_ALTER_DOMAIN = 3
SQL_DIAG_ALTER_TABLE = 4
SQL_DIAG_CALL = 7
SQL_DIAG_CREATE_ASSERTION = 6
SQL_DIAG_CREATE_CHARACTER_SET = 8
SQL_DIAG_CREATE_COLLATION = 10
SQL_DIAG_CREATE_DOMAIN = 23
SQL_DIAG_CREATE_INDEX = (-1)
SQL_DIAG_CREATE_SCHEMA = 64
SQL_DIAG_CREATE_TABLE = 77
SQL_DIAG_CREATE_TRANSLATION = 79
SQL_DIAG_CREATE_VIEW = 84
SQL_DIAG_DELETE_WHERE = 19
#define SQL_DIAG_DROP_ASSERTION = 24
#define SQL_DIAG_DROP_CHARACTER_SET = 25
#define SQL_DIAG_DROP_COLLATION = 26
#define SQL_DIAG_DROP_DOMAIN = 27
SQL_DIAG_DROP_INDEX = (-2)
SQL_DIAG_DROP_SCHEMA = 31
SQL_DIAG_DROP_TABLE = 32
SQL_DIAG_DROP_TRANSLATION = 33
SQL_DIAG_DROP_VIEW = 36
SQL_DIAG_DYNAMIC_DELETE_CURSOR = 38
SQL_DIAG_DYNAMIC_UPDATE_CURSOR = 81
SQL_DIAG_GRANT = 48
SQL_DIAG_INSERT = 50
SQL_DIAG_REVOKE = 59
SQL_DIAG_SELECT_CURSOR = 85
SQL_DIAG_UNKNOWN_STATEMENT = 0
SQL_DIAG_UPDATE_WHERE = 82
#endif # ODBCVER >= 0x0300 #
# SQL data type codes #
#define SQL_UNKNOWN_TYPE = 0
SQL_CHAR = 1
SQL_NUMERIC = 2
SQL_DECIMAL = 3
SQL_INTEGER = 4
SQL_SMALLINT = 5
SQL_FLOAT = 6
SQL_REAL = 7
SQL_DOUBLE = 8
#if (ODBCVER >= 0x0300)
SQL_DATETIME = 9
#endif
SQL_VARCHAR = 12
SQL_WCHAR = -8
SQL_WVARCHAR = -9
SQL_WLONGVARCHAR = -10
ALL_SQL_CHAR = (SQL_CHAR, SQL_WCHAR, SQL_VARCHAR, SQL_WVARCHAR,
SQL_WLONGVARCHAR)
SQL_BIGINT = -5
# One-parameter shortcuts for date/time data types #
#if (ODBCVER >= 0x0300)
SQL_TYPE_DATE = 91
SQL_TYPE_TIME = 92
SQL_TYPE_TIMESTAMP = 93
#endif
# Statement attribute values for cursor sensitivity #
#if (ODBCVER >= 0x0300)
SQL_UNSPECIFIED = 0
SQL_INSENSITIVE = 1
SQL_SENSITIVE = 2
#endif
# GetTypeInfo() request for all data types #
SQL_ALL_TYPES = 0
# Default conversion code for SQLBindCol(), = SQLBindParam() = and SQLGetData() = #
#if (ODBCVER >= 0x0300)
SQL_DEFAULT = 99
#endif
# SQLGetData() = code indicating that the application row descriptor
#* specifies the data type
#
#if (ODBCVER >= 0x0300)
SQL_ARD_TYPE = (-99)
#endif
# SQL date/time type subcodes #
#if (ODBCVER >= 0x0300)
SQL_CODE_DATE = 1
SQL_CODE_TIME = 2
SQL_CODE_TIMESTAMP = 3
#endif
# CLI option values #
#if (ODBCVER >= 0x0300)
SQL_FALSE = 0
SQL_TRUE = 1
#endif
# values of NULLABLE field in descriptor #
SQL_NO_NULLS = 0
SQL_NULLABLE = 1
# Value returned by SQLGetTypeInfo() = to denote that it is
#* not known whether or not a data type supports null values.
##
SQL_NULLABLE_UNKNOWN = 2
# Values returned by SQLGetTypeInfo() = to show WHERE clause
#* supported
#
#if (ODBCVER >= 0x0300)
SQL_PRED_NONE = 0
SQL_PRED_CHAR = 1
SQL_PRED_BASIC = 2
#endif
# values of UNNAMED field in descriptor #
#if (ODBCVER >= 0x0300)
SQL_NAMED = 0
SQL_UNNAMED = 1
#endif
# values of ALLOC_TYPE field in descriptor #
#if (ODBCVER >= 0x0300)
SQL_DESC_ALLOC_AUTO = 1
SQL_DESC_ALLOC_USER = 2
#endif
# FreeStmt() options #
SQL_CLOSE = 0
SQL_DROP = 1
SQL_UNBIND = 2
SQL_RESET_PARAMS = 3
# Codes used for FetchOrientation in SQLFetchScroll(),
# and in SQLDataSources()
#
SQL_FETCH_NEXT = 1
SQL_FETCH_FIRST = 2
# Other codes used for FetchOrientation in SQLFetchScroll() = #
SQL_FETCH_LAST = 3
SQL_FETCH_PRIOR = 4
SQL_FETCH_ABSOLUTE = 5
SQL_FETCH_RELATIVE = 6
# SQLEndTran() = options #
SQL_COMMIT = 0
SQL_ROLLBACK = 1
# null handles returned by SQLAllocHandle() = #
SQL_NULL_HENV = 0
SQL_NULL_HDBC = 0
SQL_NULL_HSTMT = 0
#if (ODBCVER >= 0x0300)
SQL_NULL_HDESC = 0
SQL_NULL_DESC = 0
#endif
# null handle used in place of parent handle when allocating HENV #
#if (ODBCVER >= 0x0300)
SQL_NULL_HANDLE = 0
#endif
# Values that may appear in the result set of SQLSpecialColumns() = #
SQL_SCOPE_CURROW = 0
SQL_SCOPE_TRANSACTION = 1
SQL_SCOPE_SESSION = 2
SQL_PC_UNKNOWN = 0
#if (ODBCVER >= 0x0300)
SQL_PC_NON_PSEUDO = 1
#endif
SQL_PC_PSEUDO = 2
# Reserved value for the IdentifierType argument of SQLSpecialColumns() = #
#if (ODBCVER >= 0x0300)
SQL_ROW_IDENTIFIER = 1
#endif
# Reserved values for UNIQUE argument of SQLStatistics() = #
SQL_INDEX_UNIQUE = 0
SQL_INDEX_ALL = 1
# Values that may appear in the result set of SQLStatistics() = #
SQL_INDEX_CLUSTERED = 1
SQL_INDEX_HASHED = 2
SQL_INDEX_OTHER = 3
# SQLGetFunctions() = values to identify ODBC APIs #
SQL_API_SQLALLOCCONNECT = 1
SQL_API_SQLALLOCENV = 2
#if (ODBCVER >= 0x0300)
SQL_API_SQLALLOCHANDLE = 1001
#endif
SQL_API_SQLALLOCSTMT = 3
SQL_API_SQLBINDCOL = 4
#if (ODBCVER >= 0x0300)
SQL_API_SQLBINDPARAM = 1002
#endif
SQL_API_SQLCANCEL = 5
#if (ODBCVER >= 0x0300)
SQL_API_SQLCLOSECURSOR = 1003
SQL_API_SQLCOLATTRIBUTE = 6
#endif
SQL_API_SQLCOLUMNS = 40
SQL_API_SQLCONNECT = 7
#if (ODBCVER >= 0x0300)
SQL_API_SQLCOPYDESC = 1004
#endif
SQL_API_SQLDATASOURCES = 57
SQL_API_SQLDESCRIBECOL = 8
SQL_API_SQLDISCONNECT = 9
#if (ODBCVER >= 0x0300)
SQL_API_SQLENDTRAN = 1005
#endif
SQL_API_SQLERROR = 10
SQL_API_SQLEXECDIRECT = 11
SQL_API_SQLEXECUTE = 12
SQL_API_SQLFETCH = 13
#if (ODBCVER >= 0x0300)
SQL_API_SQLFETCHSCROLL = 1021
#endif
SQL_API_SQLFREECONNECT = 14
SQL_API_SQLFREEENV = 15
#if (ODBCVER >= 0x0300)
SQL_API_SQLFREEHANDLE = 1006
#endif
SQL_API_SQLFREESTMT = 16
#if (ODBCVER >= 0x0300)
SQL_API_SQLGETCONNECTATTR = 1007
#endif
SQL_API_SQLGETCONNECTOPTION = 42
SQL_API_SQLGETCURSORNAME = 17
SQL_API_SQLGETDATA = 43
#if (ODBCVER >= 0x0300)
SQL_API_SQLGETDESCFIELD = 1008
SQL_API_SQLGETDESCREC = 1009
SQL_API_SQLGETDIAGFIELD = 1010
SQL_API_SQLGETDIAGREC = 1011
SQL_API_SQLGETENVATTR = 1012
#endif
SQL_API_SQLGETFUNCTIONS = 44
SQL_API_SQLGETINFO = 45
#if (ODBCVER >= 0x0300)
SQL_API_SQLGETSTMTATTR = 1014
#endif
SQL_API_SQLGETSTMTOPTION = 46
SQL_API_SQLGETTYPEINFO = 47
SQL_API_SQLNUMRESULTCOLS = 18
SQL_API_SQLPARAMDATA = 48
SQL_API_SQLPREPARE = 19
SQL_API_SQLPUTDATA = 49
SQL_API_SQLROWCOUNT = 20
#if (ODBCVER >= 0x0300)
SQL_API_SQLSETCONNECTATTR = 1016
#endif
SQL_API_SQLSETCONNECTOPTION = 50
SQL_API_SQLSETCURSORNAME = 21
#if (ODBCVER >= 0x0300)
SQL_API_SQLSETDESCFIELD = 1017
SQL_API_SQLSETDESCREC = 1018
SQL_API_SQLSETENVATTR = 1019
#endif
SQL_API_SQLSETPARAM = 22
#if (ODBCVER >= 0x0300)
SQL_API_SQLSETSTMTATTR = 1020
#endif
SQL_API_SQLSETSTMTOPTION = 51
SQL_API_SQLSPECIALCOLUMNS = 52
SQL_API_SQLSTATISTICS = 53
SQL_API_SQLTABLES = 54
SQL_API_SQLTRANSACT = 23
#if (ODBCVER >= 0x0380)
SQL_API_SQLCANCELHANDLE = 1022
#endif
# Information requested by SQLGetInfo() = #
#if (ODBCVER >= 0x0300)
SQL_MAX_DRIVER_CONNECTIONS = 0
SQL_MAXIMUM_DRIVER_CONNECTIONS = SQL_MAX_DRIVER_CONNECTIONS
SQL_MAX_CONCURRENT_ACTIVITIES = 1
SQL_MAXIMUM_CONCURRENT_ACTIVITIES = SQL_MAX_CONCURRENT_ACTIVITIES
#endif
SQL_DATA_SOURCE_NAME = 2
SQL_FETCH_DIRECTION = 8
SQL_SERVER_NAME = 13
SQL_SEARCH_PATTERN_ESCAPE = 14
SQL_DBMS_NAME = 17
SQL_DBMS_VER = 18
SQL_ACCESSIBLE_TABLES = 19
SQL_ACCESSIBLE_PROCEDURES = 20
SQL_CURSOR_COMMIT_BEHAVIOR = 23
SQL_DATA_SOURCE_READ_ONLY = 25
SQL_DEFAULT_TXN_ISOLATION = 26
SQL_IDENTIFIER_CASE = 28
SQL_IDENTIFIER_QUOTE_CHAR = 29
SQL_MAX_COLUMN_NAME_LEN = 30
SQL_MAXIMUM_COLUMN_NAME_LENGTH = SQL_MAX_COLUMN_NAME_LEN
SQL_MAX_CURSOR_NAME_LEN = 31
SQL_MAXIMUM_CURSOR_NAME_LENGTH = SQL_MAX_CURSOR_NAME_LEN
SQL_MAX_SCHEMA_NAME_LEN = 32
SQL_MAXIMUM_SCHEMA_NAME_LENGTH = SQL_MAX_SCHEMA_NAME_LEN
SQL_MAX_CATALOG_NAME_LEN = 34
SQL_MAXIMUM_CATALOG_NAME_LENGTH = SQL_MAX_CATALOG_NAME_LEN
SQL_MAX_TABLE_NAME_LEN = 35
SQL_SCROLL_CONCURRENCY = 43
SQL_TXN_CAPABLE = 46
SQL_TRANSACTION_CAPABLE = SQL_TXN_CAPABLE
SQL_USER_NAME = 47
SQL_TXN_ISOLATION_OPTION = 72
SQL_TRANSACTION_ISOLATION_OPTION = SQL_TXN_ISOLATION_OPTION
SQL_INTEGRITY = 73
SQL_GETDATA_EXTENSIONS = 81
SQL_NULL_COLLATION = 85
SQL_ALTER_TABLE = 86
SQL_ORDER_BY_COLUMNS_IN_SELECT = 90
SQL_SPECIAL_CHARACTERS = 94
SQL_MAX_COLUMNS_IN_GROUP_BY = 97
SQL_MAXIMUM_COLUMNS_IN_GROUP_BY = SQL_MAX_COLUMNS_IN_GROUP_BY
SQL_MAX_COLUMNS_IN_INDEX = 98
SQL_MAXIMUM_COLUMNS_IN_INDEX = SQL_MAX_COLUMNS_IN_INDEX
SQL_MAX_COLUMNS_IN_ORDER_BY = 99
SQL_MAXIMUM_COLUMNS_IN_ORDER_BY = SQL_MAX_COLUMNS_IN_ORDER_BY
SQL_MAX_COLUMNS_IN_SELECT = 100
SQL_MAXIMUM_COLUMNS_IN_SELECT = SQL_MAX_COLUMNS_IN_SELECT
SQL_MAX_COLUMNS_IN_TABLE = 101
SQL_MAX_INDEX_SIZE = 102
SQL_MAXIMUM_INDEX_SIZE = SQL_MAX_INDEX_SIZE
SQL_MAX_ROW_SIZE = 104
SQL_MAXIMUM_ROW_SIZE = SQL_MAX_ROW_SIZE
SQL_MAX_STATEMENT_LEN = 105
SQL_MAXIMUM_STATEMENT_LENGTH = SQL_MAX_STATEMENT_LEN
SQL_MAX_TABLES_IN_SELECT = 106
SQL_MAXIMUM_TABLES_IN_SELECT = SQL_MAX_TABLES_IN_SELECT
SQL_MAX_USER_NAME_LEN = 107
SQL_MAXIMUM_USER_NAME_LENGTH = SQL_MAX_USER_NAME_LEN
#if (ODBCVER >= 0x0300)
SQL_OJ_CAPABILITIES = 115
SQL_OUTER_JOIN_CAPABILITIES = SQL_OJ_CAPABILITIES
#endif # ODBCVER >= 0x0300 #
#if (ODBCVER >= 0x0300)
SQL_XOPEN_CLI_YEAR = 10000
SQL_CURSOR_SENSITIVITY = 10001
SQL_DESCRIBE_PARAMETER = 10002
SQL_CATALOG_NAME = 10003
SQL_COLLATION_SEQ = 10004
SQL_MAX_IDENTIFIER_LEN = 10005
SQL_MAXIMUM_IDENTIFIER_LENGTH = SQL_MAX_IDENTIFIER_LEN
#endif # ODBCVER >= 0x0300 #
# SQL_ALTER_TABLE = bitmasks #
#if (ODBCVER >= 0x0200)
SQL_AT_ADD_COLUMN = 0x00000001
SQL_AT_DROP_COLUMN = 0x00000002
#endif # ODBCVER >= 0x0200 #
#if (ODBCVER >= 0x0300)
SQL_AT_ADD_CONSTRAINT = 0x00000008
# SQL_ASYNC_MODE = values #
#if (ODBCVER >= 0x0300)
SQL_AM_NONE = 0
SQL_AM_CONNECTION = 1
SQL_AM_STATEMENT = 2
#endif
# SQL_CURSOR_COMMIT_BEHAVIOR = values #
SQL_CB_DELETE = 0
SQL_CB_CLOSE = 1
SQL_CB_PRESERVE = 2
# SQL_FETCH_DIRECTION = bitmasks #
SQL_FD_FETCH_NEXT = 0x00000001
SQL_FD_FETCH_FIRST = 0x00000002
SQL_FD_FETCH_LAST = 0x00000004
SQL_FD_FETCH_PRIOR = 0x00000008
SQL_FD_FETCH_ABSOLUTE = 0x00000010
SQL_FD_FETCH_RELATIVE = 0x00000020
# SQL_GETDATA_EXTENSIONS = bitmasks #
SQL_GD_ANY_COLUMN = 0x00000001
SQL_GD_ANY_ORDER = 0x00000002
# SQL_IDENTIFIER_CASE = values #
SQL_IC_UPPER = 1
SQL_IC_LOWER = 2
SQL_IC_SENSITIVE = 3
SQL_IC_MIXED = 4
# SQL_OJ_CAPABILITIES = bitmasks #
# NB: this means 'outer join', not what you may be thinking #
#if (ODBCVER >= 0x0201)
SQL_OJ_LEFT = 0x00000001
SQL_OJ_RIGHT = 0x00000002
SQL_OJ_FULL = 0x00000004
SQL_OJ_NESTED = 0x00000008
SQL_OJ_NOT_ORDERED = 0x00000010
SQL_OJ_INNER = 0x00000020
SQL_OJ_ALL_COMPARISON_OPS = 0x00000040
#endif
# SQL_SCROLL_CONCURRENCY = bitmasks #
SQL_SCCO_READ_ONLY = 0x00000001
SQL_SCCO_LOCK = 0x00000002
SQL_SCCO_OPT_ROWVER = 0x00000004
SQL_SCCO_OPT_VALUES = 0x00000008
# SQL_TXN_CAPABLE = values #
SQL_TC_NONE = 0
SQL_TC_DML = 1
SQL_TC_ALL = 2
SQL_TC_DDL_COMMIT = 3
SQL_TC_DDL_IGNORE = 4
# SQL_TXN_ISOLATION_OPTION = bitmasks #
SQL_TXN_READ_UNCOMMITTED = 0x00000001
SQL_TRANSACTION_READ_UNCOMMITTED = SQL_TXN_READ_UNCOMMITTED
SQL_TXN_READ_COMMITTED = 0x00000002
SQL_TRANSACTION_READ_COMMITTED = SQL_TXN_READ_COMMITTED
SQL_TXN_REPEATABLE_READ = 0x00000004
SQL_TRANSACTION_REPEATABLE_READ = SQL_TXN_REPEATABLE_READ
SQL_TXN_SERIALIZABLE = 0x00000008
SQL_TRANSACTION_SERIALIZABLE = SQL_TXN_SERIALIZABLE
# SQL_NULL_COLLATION = values #
SQL_NC_HIGH = 0
SQL_NC_LOW = 1
# sqlext.h
SQL_ACCESS_MODE = 101
SQL_AUTOCOMMIT = 102
SQL_LOGIN_TIMEOUT = 103
SQL_OPT_TRACE = 104
SQL_OPT_TRACEFILE = 105
SQL_TRANSLATE_DLL = 106
SQL_TRANSLATE_OPTION = 107
SQL_TXN_ISOLATION = 108
SQL_CURRENT_QUALIFIER = 109
SQL_ODBC_CURSORS = 110
SQL_QUIET_MODE = 111
SQL_PACKET_SIZE = 112
# connection attributes with new names */
SQL_ATTR_ACCESS_MODE = SQL_ACCESS_MODE
SQL_ATTR_AUTOCOMMIT = SQL_AUTOCOMMIT
SQL_ATTR_CONNECTION_TIMEOUT = 113
SQL_ATTR_CURRENT_CATALOG = SQL_CURRENT_QUALIFIER
SQL_ATTR_DISCONNECT_BEHAVIOR = 114
SQL_ATTR_ENLIST_IN_DTC = 1207
SQL_ATTR_ENLIST_IN_XA = 1208
SQL_ATTR_LOGIN_TIMEOUT = SQL_LOGIN_TIMEOUT
SQL_ATTR_ODBC_CURSORS = SQL_ODBC_CURSORS
SQL_ATTR_PACKET_SIZE = SQL_PACKET_SIZE
SQL_ATTR_QUIET_MODE = SQL_QUIET_MODE
SQL_ATTR_TRACE = SQL_OPT_TRACE
SQL_ATTR_TRACEFILE = SQL_OPT_TRACEFILE
SQL_ATTR_TRANSLATE_LIB = SQL_TRANSLATE_DLL
SQL_ATTR_TRANSLATE_OPTION = SQL_TRANSLATE_OPTION
SQL_ATTR_TXN_ISOLATION = SQL_TXN_ISOLATION
# /* SQL_AUTOCOMMIT options */
SQL_AUTOCOMMIT_OFF = ctypes.c_ulong(0)
SQL_AUTOCOMMIT_ON = ctypes.c_ulong(1)
SQL_AUTOCOMMIT_DEFAULT = SQL_AUTOCOMMIT_ON
# /* whether an attribute is a pointer or not */
SQL_IS_POINTER = (-4)
SQL_IS_UINTEGER = (-5)
SQL_IS_INTEGER = (-6)
SQL_IS_USMALLINT = (-7)
SQL_IS_SMALLINT = (-8)
# /* statement attributes for ODBC 3.0 */
SQL_QUERY_TIMEOUT = 0
SQL_MAX_ROWS = 1
SQL_NOSCAN = 2
SQL_MAX_LENGTH = 3
SQL_ASYNC_ENABLE = 4 # /* = same = as = SQL_ATTR_ASYNC_ENABLE = */
SQL_BIND_TYPE = 5
SQL_CURSOR_TYPE = 6
SQL_CONCURRENCY = 7
SQL_KEYSET_SIZE = 8
SQL_ROWSET_SIZE = 9
SQL_SIMULATE_CURSOR = 10
SQL_RETRIEVE_DATA = 11
SQL_USE_BOOKMARKS = 12
SQL_GET_BOOKMARK = 13
SQL_ROW_NUMBER = 14
SQL_ATTR_ASYNC_ENABLE = 4
SQL_ATTR_CONCURRENCY = SQL_CONCURRENCY
SQL_ATTR_CURSOR_TYPE = SQL_CURSOR_TYPE
SQL_ATTR_ENABLE_AUTO_IPD = 15
SQL_ATTR_FETCH_BOOKMARK_PTR = 16
SQL_ATTR_KEYSET_SIZE = SQL_KEYSET_SIZE
SQL_ATTR_MAX_LENGTH = SQL_MAX_LENGTH
SQL_ATTR_MAX_ROWS = SQL_MAX_ROWS
SQL_ATTR_NOSCAN = SQL_NOSCAN
SQL_ATTR_PARAM_BIND_OFFSET_PTR = 17
SQL_ATTR_PARAM_BIND_TYPE = 18
SQL_ATTR_PARAM_OPERATION_PTR = 19
SQL_ATTR_PARAM_STATUS_PTR = 20
SQL_ATTR_PARAMS_PROCESSED_PTR = 21
SQL_ATTR_PARAMSET_SIZE = 22
SQL_ATTR_QUERY_TIMEOUT = SQL_QUERY_TIMEOUT
SQL_ATTR_RETRIEVE_DATA = SQL_RETRIEVE_DATA
SQL_ATTR_ROW_BIND_OFFSET_PTR = 23
SQL_ATTR_ROW_BIND_TYPE = SQL_BIND_TYPE
SQL_ATTR_ROW_NUMBER = SQL_ROW_NUMBER
SQL_ATTR_ROW_OPERATION_PTR = 24
SQL_ATTR_ROW_STATUS_PTR = 25
SQL_ATTR_ROWS_FETCHED_PTR = 26
SQL_ATTR_ROW_ARRAY_SIZE = 27
SQL_ATTR_SIMULATE_CURSOR = SQL_SIMULATE_CURSOR
SQL_ATTR_USE_BOOKMARKS = SQL_USE_BOOKMARKS
|
[
"ctypes.c_ulong"
] |
[((14583, 14600), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(0)'], {}), '(0)\n', (14597, 14600), False, 'import ctypes\n'), ((14621, 14638), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(1)'], {}), '(1)\n', (14635, 14638), False, 'import ctypes\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
from einops.layers.torch import Rearrange, Reduce
from einops import rearrange
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def pool2x2(x):
return nn.MaxPool2d(kernel_size=2, stride=2)(x)
def upsample2(x):
return F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class FeatureTunk(nn.Module):
def __init__(self, pretrained=True):
super(FeatureTunk, self).__init__()
self.rgb_extractor = BasicBlock(3, 3)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
# self.dense121 = torchvision.models.densenet.densenet121(pretrained=pretrained).features
# self.dense121.conv0 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# self.resnet18 = torchvision.models.resnet.resnet18(pretrained=pretrained)
# self.resnet18.fc = nn.Linear(512, 64)
self.mobilenetv3_feat = torchvision.models.mobilenet.mobilenet_v3_small(pretrained=pretrained).features
# origin: 1
self.mobilenetv3_avgpool = nn.AdaptiveAvgPool2d(4)
# origin: 576
self.mobilenetv3_classifier = nn.Sequential(
nn.Linear(48, 256),
nn.Hardswish(inplace=True),
nn.Dropout(p=0.2, inplace=True),
nn.Linear(256, 64),
)
def forward(self, x):
# x = self.mobilenetv3_feat(self.rgb_extractor(x))
x = self.rgb_extractor(x)
x = self.mobilenetv3_avgpool(x)
x = torch.flatten(x, 1)
x = self.mobilenetv3_classifier(x)
return x
class MyNetWork(nn.Module):
def __init__(self, output):
super(MyNetWork, self).__init__()
self.feature_tunk = FeatureTunk()
self.linear1 = nn.Linear(12, 256)
self.linear2 = nn.Linear(256, 128)
self.linear3 = nn.Linear(128, 64)
self.selu = nn.SELU()
self.linear_output = nn.Linear(128, output)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='selu')
def forward(self, x):
rgb_img = x[:, :-12]
aux_obs = x[:, -12:]
# 1 * 8 * 8 feat
rgb_img = rearrange(rgb_img, 'b (h w c) -> b c h w', h = 128, w = 128, c = 3)
feat_out = self.feature_tunk(rgb_img)
aux_obs_out = self.linear1(aux_obs)
aux_obs_out = self.selu(aux_obs_out)
aux_obs_out = self.linear2(aux_obs_out)
aux_obs_out = self.selu(aux_obs_out)
aux_obs_out = self.linear3(aux_obs_out)
aux_obs_out = self.selu(aux_obs_out)
output = torch.cat((aux_obs_out, feat_out), dim=1)
output = self.linear_output(output)
return output
|
[
"torch.flatten",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_",
"torchvision.models.mobilenet.mobilenet_v3_small",
"torch.nn.Conv2d",
"torch.nn.Hardswish",
"torch.cat",
"torch.nn.init.zeros_",
"torch.nn.BatchNorm2d",
"torch.nn.init.normal_",
"torch.nn.SELU",
"einops.rearrange",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.interpolate",
"torch.nn.init.ones_"
] |
[((281, 370), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (290, 370), True, 'import torch.nn as nn\n'), ((473, 547), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n', (482, 547), True, 'import torch.nn as nn\n'), ((649, 718), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, scale_factor=2, mode='bilinear', align_corners=True)\n", (662, 718), True, 'import torch.nn.functional as F\n'), ((577, 614), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (589, 614), True, 'import torch.nn as nn\n'), ((937, 959), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (951, 959), True, 'import torch.nn as nn\n'), ((980, 1001), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (987, 1001), True, 'import torch.nn as nn\n'), ((1066, 1088), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (1080, 1088), True, 'import torch.nn as nn\n'), ((2678, 2701), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(4)'], {}), '(4)\n', (2698, 2701), True, 'import torch.nn as nn\n'), ((3108, 3127), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (3121, 3127), False, 'import torch\n'), ((3368, 3386), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(256)'], {}), '(12, 256)\n', (3377, 3386), True, 'import torch.nn as nn\n'), ((3410, 3429), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (3419, 3429), True, 'import torch.nn as nn\n'), ((3453, 3471), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (3462, 3471), True, 'import torch.nn as nn\n'), ((3492, 3501), 'torch.nn.SELU', 'nn.SELU', ([], {}), '()\n', (3499, 3501), True, 'import torch.nn as nn\n'), ((3532, 3554), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'output'], {}), '(128, output)\n', (3541, 3554), True, 'import torch.nn as nn\n'), ((3846, 3907), 'einops.rearrange', 'rearrange', (['rgb_img', '"""b (h w c) -> b c h w"""'], {'h': '(128)', 'w': '(128)', 'c': '(3)'}), "(rgb_img, 'b (h w c) -> b c h w', h=128, w=128, c=3)\n", (3855, 3907), False, 'from einops import rearrange\n'), ((4254, 4295), 'torch.cat', 'torch.cat', (['(aux_obs_out, feat_out)'], {'dim': '(1)'}), '((aux_obs_out, feat_out), dim=1)\n', (4263, 4295), False, 'import torch\n'), ((2543, 2613), 'torchvision.models.mobilenet.mobilenet_v3_small', 'torchvision.models.mobilenet.mobilenet_v3_small', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (2590, 2613), False, 'import torchvision\n'), ((2789, 2807), 'torch.nn.Linear', 'nn.Linear', (['(48)', '(256)'], {}), '(48, 256)\n', (2798, 2807), True, 'import torch.nn as nn\n'), ((2821, 2847), 'torch.nn.Hardswish', 'nn.Hardswish', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2833, 2847), True, 'import torch.nn as nn\n'), ((2861, 2892), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.2)', 'inplace': '(True)'}), '(p=0.2, inplace=True)\n', (2871, 2892), True, 'import torch.nn as nn\n'), ((2906, 2924), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (2915, 2924), True, 'import torch.nn as nn\n'), ((1760, 1809), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (1783, 1809), True, 'import torch.nn as nn\n'), ((3646, 3716), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""selu"""'}), "(m.weight, mode='fan_out', nonlinearity='selu')\n", (3669, 3716), True, 'import torch.nn as nn\n'), ((1869, 1891), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (1883, 1891), True, 'import torch.nn as nn\n'), ((1972, 1995), 'torch.nn.init.ones_', 'nn.init.ones_', (['m.weight'], {}), '(m.weight)\n', (1985, 1995), True, 'import torch.nn as nn\n'), ((2012, 2034), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (2026, 2034), True, 'import torch.nn as nn\n'), ((2094, 2128), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (2109, 2128), True, 'import torch.nn as nn\n'), ((2145, 2167), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (2159, 2167), True, 'import torch.nn as nn\n')]
|
from ogs6py import ogs
model = ogs.OGS(PROJECT_FILE="test.prj")
model.geo.add_geom(filename="square_1x1.gml")
model.mesh.add_mesh(filename="square_1x1_quad_1e2.vtu")
model.processes.set_process(name="SD",
type="SMALL_DEFORMATION",
integration_order="2",
solid_density="rho_sr",
specific_body_force="0 0")
model.processes.set_constitutive_relation(type="LinearElasticIsotropic",
youngs_modulus="E",
poissons_ratio="nu")
model.processes.add_process_variable(process_variable="process_variable",
process_variable_name="displacement")
model.processes.add_process_variable(secondary_variable="sigma",
output_name="sigma")
model.timeloop.add_process(process="SD",
nonlinear_solver_name="basic_newton",
convergence_type="DeltaX",
norm_type="NORM2",
abstol="1e-15",
time_discretization="BackwardEuler")
model.timeloop.set_stepping(process="SD", type="FixedTimeStepping",
t_initial="0",
t_end="1",
repeat="4",
delta_t="0.25")
model.timeloop.add_output(type="VTK",
prefix="blubb",
repeat="1",
each_steps="10",
variables=["displacement", "sigma"])
model.parameters.add_parameter(name="E", type="Constant", value="1")
model.parameters.add_parameter(name="nu", type="Constant", value="0.3")
model.parameters.add_parameter(name="rho_sr", type="Constant", value="1")
model.parameters.add_parameter(name="displacement0",
type="Constant",
values="0 0")
model.parameters.add_parameter(name="dirichlet0", type="Constant", value="0")
model.parameters.add_parameter(name="dirichlet1", type="Constant", value="0.05")
model.processvars.set_ic(process_variable_name="displacement",
components="2",
order="1",
initial_condition="displacement0")
model.processvars.add_bc(process_variable_name="displacement",
geometrical_set="square_1x1_geometry",
geometry="left",
type="Dirichlet",
component="0",
parameter="dirichlet0")
model.processvars.add_bc(process_variable_name="displacement",
geometrical_set="square_1x1_geometry",
geometry="bottom",
type="Dirichlet",
component="1",
parameter="dirichlet0")
model.processvars.add_bc(process_variable_name="displacement",
geometrical_set="square_1x1_geometry",
geometry="top",
type="Dirichlet",
component="1",
parameter="dirichlet1")
model.nonlinsolvers.add_non_lin_solver(name="basic_newton",
type="Newton",
max_iter="4",
linear_solver="general_linear_solver")
model.linsolvers.add_lin_solver(name="general_linear_solver",
kind="lis",
solver_type="cg",
precon_type="jacobi",
max_iteration_step="10000",
error_tolerance="1e-16")
model.linsolvers.add_lin_solver(name="general_linear_solver",
kind="eigen",
solver_type="CG",
precon_type="DIAGONAL",
max_iteration_step="10000",
error_tolerance="1e-16")
model.linsolvers.add_lin_solver(name="general_linear_solver",
kind="petsc",
prefix="sd",
solver_type="cg",
precon_type="bjacobi",
max_iteration_step="10000",
error_tolerance="1e-16")
model.write_input()
model.run_model(logfile="test.log")
# to select a directory containg an OGS executable:
# model.runModel(path="/path_to_ogs_bin_dir/")
|
[
"ogs6py.ogs.OGS"
] |
[((32, 64), 'ogs6py.ogs.OGS', 'ogs.OGS', ([], {'PROJECT_FILE': '"""test.prj"""'}), "(PROJECT_FILE='test.prj')\n", (39, 64), False, 'from ogs6py import ogs\n')]
|
from rpython.rlib import rwin32
from rpython.rlib.rarithmetic import r_uint
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from pypy.interpreter.error import oefmt, wrap_windowserror
from pypy.interpreter.function import StaticMethod
from pypy.interpreter.gateway import interp2app, unwrap_spec
from _multiprocess.interp_connection import w_handle
CONSTANTS = """
PIPE_ACCESS_INBOUND PIPE_ACCESS_DUPLEX
GENERIC_READ GENERIC_WRITE OPEN_EXISTING
PIPE_TYPE_MESSAGE PIPE_READMODE_MESSAGE PIPE_WAIT
PIPE_UNLIMITED_INSTANCES
NMPWAIT_WAIT_FOREVER
ERROR_PIPE_CONNECTED ERROR_SEM_TIMEOUT ERROR_PIPE_BUSY
ERROR_NO_SYSTEM_RESOURCES ERROR_BROKEN_PIPE ERROR_MORE_DATA
ERROR_ALREADY_EXISTS ERROR_NO_DATA
""".split()
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
includes = ['windows.h'],
libraries = ['kernel32'],
)
for name in CONSTANTS:
locals()[name] = rffi_platform.ConstantInteger(name)
config = rffi_platform.configure(CConfig)
globals().update(config)
def handle_w(space, w_handle):
return rffi.cast(rwin32.HANDLE, space.int_w(w_handle))
_CreateNamedPipe = rwin32.winexternal(
'CreateNamedPipeA', [
rwin32.LPCSTR,
rwin32.DWORD, rwin32.DWORD, rwin32.DWORD,
rwin32.DWORD, rwin32.DWORD, rwin32.DWORD,
rffi.VOIDP],
rwin32.HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
_ConnectNamedPipe = rwin32.winexternal(
'ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_SetNamedPipeHandleState = rwin32.winexternal(
'SetNamedPipeHandleState', [
rwin32.HANDLE,
rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_WaitNamedPipe = rwin32.winexternal(
'WaitNamedPipeA', [rwin32.LPCSTR, rwin32.DWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_PeekNamedPipe = rwin32.winexternal(
'PeekNamedPipe', [
rwin32.HANDLE,
rffi.VOIDP,
rwin32.DWORD,
rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_CreateFile = rwin32.winexternal(
'CreateFileA', [
rwin32.LPCSTR,
rwin32.DWORD, rwin32.DWORD, rffi.VOIDP,
rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE],
rwin32.HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
_WriteFile = rwin32.winexternal(
'WriteFile', [
rwin32.HANDLE,
rffi.VOIDP, rwin32.DWORD,
rwin32.LPDWORD, rffi.VOIDP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_ReadFile = rwin32.winexternal(
'ReadFile', [
rwin32.HANDLE,
rffi.VOIDP, rwin32.DWORD,
rwin32.LPDWORD, rffi.VOIDP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
_ExitProcess = rwin32.winexternal(
'ExitProcess', [rffi.UINT], lltype.Void,
save_err=rffi.RFFI_SAVE_LASTERROR)
_GetTickCount = rwin32.winexternal(
'GetTickCount', [], rwin32.DWORD)
_Sleep = rwin32.winexternal(
'Sleep', [rwin32.DWORD], lltype.Void)
def CloseHandle(space, w_handle):
handle = handle_w(space, w_handle)
if not rwin32.CloseHandle(handle):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
def GetLastError(space):
"""NOTE: don't use this. See issue #2658"""
return space.newint(rwin32.GetLastError_saved())
# __________________________________________________________
# functions for the "win32" namespace
@unwrap_spec(name='text', openmode=r_uint, pipemode=r_uint, maxinstances=r_uint,
outputsize=r_uint, inputsize=r_uint, timeout=r_uint)
def CreateNamedPipe(space, name, openmode, pipemode, maxinstances,
outputsize, inputsize, timeout, w_security):
security = space.int_w(w_security)
if security:
raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
handle = _CreateNamedPipe(
name, openmode, pipemode, maxinstances,
outputsize, inputsize, timeout, rffi.NULL)
if handle == rwin32.INVALID_HANDLE_VALUE:
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
return w_handle(space, handle)
def ConnectNamedPipe(space, w_handle, w_overlapped):
handle = handle_w(space, w_handle)
overlapped = space.int_w(w_overlapped)
if overlapped:
raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
if not _ConnectNamedPipe(handle, rffi.NULL):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
def SetNamedPipeHandleState(space, w_handle, w_pipemode, w_maxinstances,
w_timeout):
handle = handle_w(space, w_handle)
state = lltype.malloc(rffi.CArrayPtr(rffi.UINT).TO, 3, flavor='raw')
statep = lltype.malloc(rffi.CArrayPtr(rffi.UINTP).TO, 3, flavor='raw',
zero=True)
try:
if not space.is_w(w_pipemode, space.w_None):
state[0] = rffi.cast(rffi.UINT, space.uint_w(w_pipemode))
statep[0] = rffi.ptradd(state, 0)
if not space.is_w(w_maxinstances, space.w_None):
state[1] = rffi.cast(rffi.UINT, space.uint_w(w_maxinstances))
statep[1] = rffi.ptradd(state, 1)
if not space.is_w(w_timeout, space.w_None):
state[2] = rffi.cast(rffi.UINT, space.uint_w(w_timeout))
statep[2] = rffi.ptradd(state, 2)
if not _SetNamedPipeHandleState(handle, statep[0], statep[1],
statep[2]):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
finally:
lltype.free(state, flavor='raw')
lltype.free(statep, flavor='raw')
@unwrap_spec(name='text', timeout=r_uint)
def WaitNamedPipe(space, name, timeout):
# Careful: zero means "default value specified by CreateNamedPipe()"
if not _WaitNamedPipe(name, timeout):
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
@unwrap_spec(filename='fsencode', access=r_uint, share=r_uint,
disposition=r_uint, flags=r_uint)
def CreateFile(space, filename, access, share, w_security,
disposition, flags, w_templatefile):
security = space.int_w(w_security)
templatefile = space.int_w(w_templatefile)
if security or templatefile:
raise oefmt(space.w_NotImplementedError, "expected a NULL pointer")
handle = _CreateFile(filename, access, share, rffi.NULL,
disposition, flags, rwin32.NULL_HANDLE)
if handle == rwin32.INVALID_HANDLE_VALUE:
raise wrap_windowserror(space, rwin32.lastSavedWindowsError())
return w_handle(space, handle)
@unwrap_spec(code=r_uint)
def ExitProcess(space, code):
_ExitProcess(code)
def win32_namespace(space):
"NOT_RPYTHON"
w_win32 = space.call_function(space.w_type,
space.wrap("win32"),
space.newtuple([]),
space.newdict())
# constants
for name in CONSTANTS:
space.setattr(w_win32,
space.wrap(name),
space.wrap(config[name]))
space.setattr(w_win32,
space.wrap('NULL'),
space.newint(0))
# functions
for name in ['CloseHandle', 'GetLastError', 'CreateFile',
'CreateNamedPipe', 'ConnectNamedPipe',
'SetNamedPipeHandleState', 'WaitNamedPipe',
'ExitProcess',
]:
function = globals()[name]
w_function = space.wrap(interp2app(function))
w_method = space.wrap(StaticMethod(w_function))
space.setattr(w_win32, space.wrap(name), w_method)
return w_win32
|
[
"rpython.rtyper.tool.rffi_platform.configure",
"pypy.interpreter.gateway.interp2app",
"_multiprocess.interp_connection.w_handle",
"rpython.rtyper.lltypesystem.rffi.ptradd",
"rpython.translator.tool.cbuild.ExternalCompilationInfo",
"pypy.interpreter.gateway.unwrap_spec",
"rpython.rlib.rwin32.lastSavedWindowsError",
"rpython.rlib.rwin32.winexternal",
"rpython.rtyper.lltypesystem.lltype.free",
"pypy.interpreter.function.StaticMethod",
"pypy.interpreter.error.oefmt",
"rpython.rlib.rwin32.GetLastError_saved",
"rpython.rlib.rwin32.CloseHandle",
"rpython.rtyper.lltypesystem.rffi.CArrayPtr",
"rpython.rtyper.tool.rffi_platform.ConstantInteger"
] |
[((1098, 1130), 'rpython.rtyper.tool.rffi_platform.configure', 'rffi_platform.configure', (['CConfig'], {}), '(CConfig)\n', (1121, 1130), False, 'from rpython.rtyper.tool import rffi_platform\n'), ((1267, 1478), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""CreateNamedPipeA"""', '[rwin32.LPCSTR, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD,\n rwin32.DWORD, rwin32.DWORD, rffi.VOIDP]', 'rwin32.HANDLE'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('CreateNamedPipeA', [rwin32.LPCSTR, rwin32.DWORD, rwin32\n .DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rwin32.DWORD, rffi.\n VOIDP], rwin32.HANDLE, save_err=rffi.RFFI_SAVE_LASTERROR)\n", (1285, 1478), False, 'from rpython.rlib import rwin32\n'), ((1536, 1656), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""ConnectNamedPipe"""', '[rwin32.HANDLE, rffi.VOIDP]', 'rwin32.BOOL'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('ConnectNamedPipe', [rwin32.HANDLE, rffi.VOIDP], rwin32.\n BOOL, save_err=rffi.RFFI_SAVE_LASTERROR)\n", (1554, 1656), False, 'from rpython.rlib import rwin32\n'), ((1689, 1857), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""SetNamedPipeHandleState"""', '[rwin32.HANDLE, rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD]', 'rwin32.BOOL'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('SetNamedPipeHandleState', [rwin32.HANDLE, rwin32.\n LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], rwin32.BOOL, save_err=rffi.\n RFFI_SAVE_LASTERROR)\n", (1707, 1857), False, 'from rpython.rlib import rwin32\n'), ((1896, 2016), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""WaitNamedPipeA"""', '[rwin32.LPCSTR, rwin32.DWORD]', 'rwin32.BOOL'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('WaitNamedPipeA', [rwin32.LPCSTR, rwin32.DWORD], rwin32.\n BOOL, save_err=rffi.RFFI_SAVE_LASTERROR)\n", (1914, 2016), False, 'from rpython.rlib import rwin32\n'), ((2043, 2226), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""PeekNamedPipe"""', '[rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rwin32.LPDWORD,\n rwin32.LPDWORD]', 'rwin32.BOOL'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('PeekNamedPipe', [rwin32.HANDLE, rffi.VOIDP, rwin32.\n DWORD, rwin32.LPDWORD, rwin32.LPDWORD, rwin32.LPDWORD], rwin32.BOOL,\n save_err=rffi.RFFI_SAVE_LASTERROR)\n", (2061, 2226), False, 'from rpython.rlib import rwin32\n'), ((2279, 2472), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""CreateFileA"""', '[rwin32.LPCSTR, rwin32.DWORD, rwin32.DWORD, rffi.VOIDP, rwin32.DWORD,\n rwin32.DWORD, rwin32.HANDLE]', 'rwin32.HANDLE'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('CreateFileA', [rwin32.LPCSTR, rwin32.DWORD, rwin32.\n DWORD, rffi.VOIDP, rwin32.DWORD, rwin32.DWORD, rwin32.HANDLE], rwin32.\n HANDLE, save_err=rffi.RFFI_SAVE_LASTERROR)\n", (2297, 2472), False, 'from rpython.rlib import rwin32\n'), ((2515, 2674), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""WriteFile"""', '[rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP]', 'rwin32.BOOL'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('WriteFile', [rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD,\n rwin32.LPDWORD, rffi.VOIDP], rwin32.BOOL, save_err=rffi.RFFI_SAVE_LASTERROR\n )\n", (2533, 2674), False, 'from rpython.rlib import rwin32\n'), ((2717, 2875), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""ReadFile"""', '[rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD, rwin32.LPDWORD, rffi.VOIDP]', 'rwin32.BOOL'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('ReadFile', [rwin32.HANDLE, rffi.VOIDP, rwin32.DWORD,\n rwin32.LPDWORD, rffi.VOIDP], rwin32.BOOL, save_err=rffi.RFFI_SAVE_LASTERROR\n )\n", (2735, 2875), False, 'from rpython.rlib import rwin32\n'), ((2921, 3020), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""ExitProcess"""', '[rffi.UINT]', 'lltype.Void'], {'save_err': 'rffi.RFFI_SAVE_LASTERROR'}), "('ExitProcess', [rffi.UINT], lltype.Void, save_err=rffi.\n RFFI_SAVE_LASTERROR)\n", (2939, 3020), False, 'from rpython.rlib import rwin32\n'), ((3042, 3094), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""GetTickCount"""', '[]', 'rwin32.DWORD'], {}), "('GetTickCount', [], rwin32.DWORD)\n", (3060, 3094), False, 'from rpython.rlib import rwin32\n'), ((3110, 3166), 'rpython.rlib.rwin32.winexternal', 'rwin32.winexternal', (['"""Sleep"""', '[rwin32.DWORD]', 'lltype.Void'], {}), "('Sleep', [rwin32.DWORD], lltype.Void)\n", (3128, 3166), False, 'from rpython.rlib import rwin32\n'), ((3586, 3723), 'pypy.interpreter.gateway.unwrap_spec', 'unwrap_spec', ([], {'name': '"""text"""', 'openmode': 'r_uint', 'pipemode': 'r_uint', 'maxinstances': 'r_uint', 'outputsize': 'r_uint', 'inputsize': 'r_uint', 'timeout': 'r_uint'}), "(name='text', openmode=r_uint, pipemode=r_uint, maxinstances=\n r_uint, outputsize=r_uint, inputsize=r_uint, timeout=r_uint)\n", (3597, 3723), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((5787, 5827), 'pypy.interpreter.gateway.unwrap_spec', 'unwrap_spec', ([], {'name': '"""text"""', 'timeout': 'r_uint'}), "(name='text', timeout=r_uint)\n", (5798, 5827), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((6057, 6157), 'pypy.interpreter.gateway.unwrap_spec', 'unwrap_spec', ([], {'filename': '"""fsencode"""', 'access': 'r_uint', 'share': 'r_uint', 'disposition': 'r_uint', 'flags': 'r_uint'}), "(filename='fsencode', access=r_uint, share=r_uint, disposition=\n r_uint, flags=r_uint)\n", (6068, 6157), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((6755, 6779), 'pypy.interpreter.gateway.unwrap_spec', 'unwrap_spec', ([], {'code': 'r_uint'}), '(code=r_uint)\n', (6766, 6779), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((896, 967), 'rpython.translator.tool.cbuild.ExternalCompilationInfo', 'ExternalCompilationInfo', ([], {'includes': "['windows.h']", 'libraries': "['kernel32']"}), "(includes=['windows.h'], libraries=['kernel32'])\n", (919, 967), False, 'from rpython.translator.tool.cbuild import ExternalCompilationInfo\n'), ((4256, 4279), '_multiprocess.interp_connection.w_handle', 'w_handle', (['space', 'handle'], {}), '(space, handle)\n', (4264, 4279), False, 'from _multiprocess.interp_connection import w_handle\n'), ((6729, 6752), '_multiprocess.interp_connection.w_handle', 'w_handle', (['space', 'handle'], {}), '(space, handle)\n', (6737, 6752), False, 'from _multiprocess.interp_connection import w_handle\n'), ((1052, 1087), 'rpython.rtyper.tool.rffi_platform.ConstantInteger', 'rffi_platform.ConstantInteger', (['name'], {}), '(name)\n', (1081, 1087), False, 'from rpython.rtyper.tool import rffi_platform\n'), ((3257, 3283), 'rpython.rlib.rwin32.CloseHandle', 'rwin32.CloseHandle', (['handle'], {}), '(handle)\n', (3275, 3283), False, 'from rpython.rlib import rwin32\n'), ((3455, 3482), 'rpython.rlib.rwin32.GetLastError_saved', 'rwin32.GetLastError_saved', ([], {}), '()\n', (3480, 3482), False, 'from rpython.rlib import rwin32\n'), ((3934, 3995), 'pypy.interpreter.error.oefmt', 'oefmt', (['space.w_NotImplementedError', '"""expected a NULL pointer"""'], {}), "(space.w_NotImplementedError, 'expected a NULL pointer')\n", (3939, 3995), False, 'from pypy.interpreter.error import oefmt, wrap_windowserror\n'), ((4449, 4510), 'pypy.interpreter.error.oefmt', 'oefmt', (['space.w_NotImplementedError', '"""expected a NULL pointer"""'], {}), "(space.w_NotImplementedError, 'expected a NULL pointer')\n", (4454, 4510), False, 'from pypy.interpreter.error import oefmt, wrap_windowserror\n'), ((5710, 5742), 'rpython.rtyper.lltypesystem.lltype.free', 'lltype.free', (['state'], {'flavor': '"""raw"""'}), "(state, flavor='raw')\n", (5721, 5742), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((5751, 5784), 'rpython.rtyper.lltypesystem.lltype.free', 'lltype.free', (['statep'], {'flavor': '"""raw"""'}), "(statep, flavor='raw')\n", (5762, 5784), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((6410, 6471), 'pypy.interpreter.error.oefmt', 'oefmt', (['space.w_NotImplementedError', '"""expected a NULL pointer"""'], {}), "(space.w_NotImplementedError, 'expected a NULL pointer')\n", (6415, 6471), False, 'from pypy.interpreter.error import oefmt, wrap_windowserror\n'), ((3324, 3354), 'rpython.rlib.rwin32.lastSavedWindowsError', 'rwin32.lastSavedWindowsError', ([], {}), '()\n', (3352, 3354), False, 'from rpython.rlib import rwin32\n'), ((4212, 4242), 'rpython.rlib.rwin32.lastSavedWindowsError', 'rwin32.lastSavedWindowsError', ([], {}), '()\n', (4240, 4242), False, 'from rpython.rlib import rwin32\n'), ((4599, 4629), 'rpython.rlib.rwin32.lastSavedWindowsError', 'rwin32.lastSavedWindowsError', ([], {}), '()\n', (4627, 4629), False, 'from rpython.rlib import rwin32\n'), ((4810, 4835), 'rpython.rtyper.lltypesystem.rffi.CArrayPtr', 'rffi.CArrayPtr', (['rffi.UINT'], {}), '(rffi.UINT)\n', (4824, 4835), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((4884, 4910), 'rpython.rtyper.lltypesystem.rffi.CArrayPtr', 'rffi.CArrayPtr', (['rffi.UINTP'], {}), '(rffi.UINTP)\n', (4898, 4910), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((5126, 5147), 'rpython.rtyper.lltypesystem.rffi.ptradd', 'rffi.ptradd', (['state', '(0)'], {}), '(state, 0)\n', (5137, 5147), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((5303, 5324), 'rpython.rtyper.lltypesystem.rffi.ptradd', 'rffi.ptradd', (['state', '(1)'], {}), '(state, 1)\n', (5314, 5324), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((5470, 5491), 'rpython.rtyper.lltypesystem.rffi.ptradd', 'rffi.ptradd', (['state', '(2)'], {}), '(state, 2)\n', (5481, 5491), False, 'from rpython.rtyper.lltypesystem import lltype, rffi\n'), ((6023, 6053), 'rpython.rlib.rwin32.lastSavedWindowsError', 'rwin32.lastSavedWindowsError', ([], {}), '()\n', (6051, 6053), False, 'from rpython.rlib import rwin32\n'), ((6685, 6715), 'rpython.rlib.rwin32.lastSavedWindowsError', 'rwin32.lastSavedWindowsError', ([], {}), '()\n', (6713, 6715), False, 'from rpython.rlib import rwin32\n'), ((7665, 7685), 'pypy.interpreter.gateway.interp2app', 'interp2app', (['function'], {}), '(function)\n', (7675, 7685), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((7717, 7741), 'pypy.interpreter.function.StaticMethod', 'StaticMethod', (['w_function'], {}), '(w_function)\n', (7729, 7741), False, 'from pypy.interpreter.function import StaticMethod\n'), ((5657, 5687), 'rpython.rlib.rwin32.lastSavedWindowsError', 'rwin32.lastSavedWindowsError', ([], {}), '()\n', (5685, 5687), False, 'from rpython.rlib import rwin32\n')]
|
import socket
import threading
import platform
import os
import sys
from queue import Queue
from user import User
import pickle
from datetime import datetime
import time
import math
class Client(object):
def __init__(self):
self.host = ''
self.command_port = 9999
self.data_port = 9998
self.command_sock = None
self.data_sock = None
self.clear_cmd = ''
self.user_details = User()
self.thread_queue = Queue()
self.NUMBER_OF_THREAD = 2
self.NUMBER_OF_JOB = 2
self.JOB_LIST = [1, 2]
def set_server_add(self,address):
self.host = address
def create_socket(self):
try:
self.command_sock = socket.socket()
except Exception as e:
print('Problem occur while socket creation : \n{0}\n'.format(e))
def connect_command_socket(self):
try:
self.command_sock.connect((self.host, self.command_port))
print('You are now connected to server.')
except Exception as e:
print('Problem occur while connecting command socket : \n{0}\n'.format(e))
def connect_data_socket(self):
try:
self.data_sock = socket.socket()
self.data_sock.connect((self.host, self.data_port))
print('Data connection establish.\n')
except Exception as e:
print('Problem occur while connecting data socket : \n{0}\n'.format(e))
def user_interface(self):
if platform.system() == 'Windows':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
while True:
os.system(self.clear_cmd)
user_choice = input('\t\tWelcome To Pluto\n\n1.Log In\n2.Sign Up\n3.Exit\n\nEnter your choice : ')
if user_choice == '1':
self.connect_data_socket()
self.signin_user()
elif user_choice == '2':
self.connect_data_socket()
self.register_user()
elif user_choice == '3':
self.command_sock.close()
os.system('kill '+str(os.getpid()))
break
else:
_ = input('\nInvalid choice, please select correct option.Press enter to continue.\n')
def recieve_command(self):
while True:
try:
command = str(self.command_sock.recv(1024), 'utf-8')
self.command_sock.send(str.encode('.'))
# print('Command data recieve.\n')
except Exception as e:
print('Problem occur while collecting command data : \n{0}\n'.format(e))
def register_user(self):
while True:
try:
os.system(self.clear_cmd)
userObj = User()
userObj.set_user_details()
byte_stream = pickle.dumps(('1',userObj))
self.data_sock.send(byte_stream)
print('Data sent to server.')
byte_stream = self.data_sock.recv(102480)
data = pickle.loads(byte_stream)
if data[0] == '1':
print('Userid already exsists.')
_=input('Press enter key to continue')
elif data[0] == '2':
print('User registration successful.')
_=input('Press enter key to continue')
byte_stream = pickle.dumps(('3',userObj))
self.data_sock.send(byte_stream)
self.data_sock.close()
break
except Exception as e:
print('Problem occur while sending data : \n{0}\n'.format(e))
_=input('Press enter key to continue')
break
def signin_user(self):
while True:
try:
os.system(self.clear_cmd)
userObj = User()
userObj.get_user_details()
byte_stream = pickle.dumps(('2',userObj))
self.data_sock.send(byte_stream)
print('Data sent to server.')
byte_stream = self.data_sock.recv(202480)
data = pickle.loads(byte_stream)
if data[0] == '1':
print('Welcome User.')
_=input('Press enter key to continue')
self.user_details.name = data[1][2]
self.user_details.password = data[1][1]
self.user_details.userid = data[1][0]
self.user_details.group_list = data[1][3]
self.user_details.log_file = data[1][4]
self.write_log_file('User signin.')
self.home_page()
byte_stream = pickle.dumps(('3',userObj))
self.data_sock.send(byte_stream)
self.data_sock.close()
break
elif data[0] == '2':
print('Invalid user name or password.')
_=input('Press enter key to continue or enter "b" to go back : ')
if _ == 'b':
userObj = User()
byte_stream = pickle.dumps(('3',userObj))
self.data_sock.send(byte_stream)
self.data_sock.close()
break
except Exception as e:
print('Problem occure while login data : \n{0}\n'.format(e))
_=input('Press enter key to continue')
break
def home_page(self):
while True:
try:
os.system(self.clear_cmd)
mopt = input('\t\tPluto Home\n\nName : {0}\t\tUser name : {1}\n\n1.Sign Out\n2.Groups\n3.Messages\n4.Check log\n\nEnter your choice : '.format(self.user_details.name, self.user_details.userid))
if mopt == '1':
self.write_log_file('User logout.')
break
elif mopt== '2':
while True:
os.system(self.clear_cmd)
gopt = input('\t\tPluto Groups\n\nName : {0}\t\tUser name : {1}\n\n1.Go back\n2.Current groups\n3.Join group\n4.Leave group\n\nEnter your choice : '.format(self.user_details.name, self.user_details.userid))
if gopt=='1':
break
if gopt=='2':
print('\nCurrent Group List\n{0}'.format(self.user_details.group_list))
_ = input('\nPress enter key to continue.')
elif gopt=='3':
self.show_groups()
_ = input('\nPress enter key to continue.')
elif gopt == '4':
self.leave_group()
elif mopt == '3':
self.send_messages()
elif mopt == '4':
self.read_log_file()
except Exception as e:
print('Problem occur while opening home page\n{0}\n'.format(e))
def show_groups(self):
try:
userObj = User()
byte_stream = pickle.dumps(('4',userObj))
self.data_sock.send(byte_stream)
byte_stream = self.data_sock.recv(102480)
group_data = pickle.loads(byte_stream)
while True:
os.system(self.clear_cmd)
print('\t\tAll Groups\n\n')
for i,group in enumerate(group_data[0,:]):
if i==0 or i==1:
print('{0} : Private(group_id)'.format(i))
else:
print('{0} : {1}({2})'.format(i,group[0],group[1]))
_ = input('\nEnter group number you want to join or enter "b" to go back : ')
if _ == 'b':
break
else:
try:
group_num = int(_)
if group_num>=0 and group_num < len(group_data[0]):
if group_num == 0 or group_num == 1:
_ = input('\nYou are not allow to join this group. press enter to continue')
else:
byte_stream = pickle.dumps(('5', group_data[0,group_num,1], self.user_details.userid))
self.data_sock.send(byte_stream)
byte_stream = self.data_sock.recv(102480)
info = pickle.loads(byte_stream)
if info == '1':
self.user_details.group_list.append(group_data[0,group_num,1])
self.write_log_file('User joined a group '+str(group_data[0,group_num,1])+'.')
print('\nYou have joined a new group.')
elif info == '2':
print('\nYou are already present in this group.')
_ = input('\nPress enter to continue')
except Exception as e:
pass
except Exception as e:
print('Problem occur while fetching group data \n{0}\n'.format(e))
def leave_group(self):
while True:
os.system(self.clear_cmd)
print('\t\tAll Groups\n\n')
for i,group in enumerate(self.user_details.group_list):
print('{0} : {1}'.format(i,group))
_ = input('\nEnter group number you want to leave or enter "b" to go back : ')
if _ == 'b':
break
else:
try:
group_num = int(_)
if group_num>=0 and group_num < len(self.user_details.group_list):
byte_stream = pickle.dumps(('6', self.user_details.group_list[group_num], self.user_details.userid))
self.data_sock.send(byte_stream)
byte_stream = self.data_sock.recv(102480)
info = pickle.loads(byte_stream)
if info == '1':
self.write_log_file('User left the group '+str(self.user_details.group_list[group_num])+'.')
del self.user_details.group_list[group_num]
print('\nThis group is removed from your list.')
else:
print('\nProblem occur')
_ = input('\nPress enter key to continue')
except Exception as e:
pass
def send_messages(self):
while True:
try:
os.system(self.clear_cmd)
print('\t\tPluto Message\n\n')
for i,group in enumerate(self.user_details.group_list):
print('{0} : {1}'.format(i,group))
_ = input('\nEnter group number to open message box or enter "b" to go back : ')
if _ == 'b':
break
else:
messopt = int(_)
if messopt>=0 and messopt<len(self.user_details.group_list):
group_name = self.user_details.group_list[messopt]
self.write_log_file('User enter into chat group '+group_name+'.')
while True:
os.system(self.clear_cmd)
print('\t\tGroup chat : {0}\n\t(To send file write following command)\n\t(in place of message : send -f filename.extn)\n\t(to download file use command : get -f filename.extn)'.format(group_name))
byte_stream = pickle.dumps(('8', group_name))
self.data_sock.send(byte_stream)
byte_stream = self.data_sock.recv(202480)
data = pickle.loads(byte_stream)
for item in data:
print(item)
print('')
user_mess = input('Enter your message (or type exit to go back) : ')
if user_mess == 'exit':
break
if len(user_mess)>=1:
mess_list = user_mess.split(' ')
if 'send' in mess_list and '-f' in mess_list:
try:
while True:
byte_stream = pickle.dumps(('10', mess_list[2], group_name, self.user_details.userid))
self.data_sock.send(byte_stream)
time.sleep(0.001)
d_sock = socket.socket()
d_sock.connect((self.host,9997))
d_sock.send(pickle.dumps(os.path.getsize(mess_list[2])))
file = open(mess_list[2], 'rb')
chunk = file.read(1024)
print('\n Sending file, please wait.')
while chunk:
print('#',end='')
d_sock.send(chunk)
chunk = file.read(1024)
file.close()
d_sock.send(b'done')
d_sock.close()
self.user_details.log_file.append('('+str(datetime.today())+') # '+'User send a file '+mess_list[2]+' to group '+group_name)
break
except Exception as e:
_ = input('\nInvalid file name, press enter to continue')
elif 'get' in mess_list and '-f' in mess_list:
try:
byte_stream = pickle.dumps(('11', mess_list[2], self.user_details.userid))
self.data_sock.send(byte_stream)
d_sock = socket.socket()
d_sock.bind(('',9997))
d_sock.listen()
d_conn,d_add = d_sock.accept()
sdata = pickle.loads(d_conn.recv(102400))
filesize = 0
file = open(mess_list[2], "wb")
print(sdata)
while filesize<=sdata:
chunk = d_conn.recv(1024)
filesize += 1024
print('#',end='')
file.write(chunk)
file.close()
d_conn.close()
d_sock.close()
_ = input('\nFile transfer complete, press enter key to continue\n')
self.user_details.log_file.append('('+str(datetime.today())+') # '+'User downloaded a file '+mess_list[2]+' from group '+group_name)
except (KeyboardInterrupt,SystemExit,Exception):
_ = input('\nProblem occur, press enter to continue')
else:
user_mess = '('+self.user_details.userid+') # '+user_mess
byte_stream = pickle.dumps(('9', group_name, user_mess))
self.data_sock.send(byte_stream)
# _ = input('Press enter key to continue')
except:
print('Problem occur while sending message.')
def write_log_file(self, log_text):
try:
log_text = '('+str(datetime.today())+') # '+log_text
self.user_details.log_file.append(log_text)
byte_stream = pickle.dumps(('7', self.user_details.userid, log_text))
self.data_sock.send(byte_stream)
except Exception as e:
print('Problem occur while writing log file. \n{0}\n'.format(e))
_ = input('Press enter key to continue')
def read_log_file(self):
try:
os.system(self.clear_cmd)
print('\t\tUser log file\n\n')
for item in self.user_details.log_file:
print(item)
except Exception as e:
print('Problem occur while reading log file. \n{0}\n'.format(e))
_ = input('Press enter key to continue')
def create_thread(self):
for _ in range(self.NUMBER_OF_THREAD):
t = threading.Thread(target=self.work_thread)
t.daemon = True
t.start()
def create_jobs(self):
for jobs in self.JOB_LIST:
self.thread_queue.put(jobs)
self.thread_queue.join()
def work_thread(self):
while True:
job_number = self.thread_queue.get()
if job_number == 1:
self.user_interface()
if job_number == 2:
self.recieve_command()
self.thread_queue.task_done()
if __name__ == '__main__':
clientObj = Client()
clientObj.set_server_add('10.1.139.241')
clientObj.create_socket()
clientObj.connect_command_socket()
clientObj.create_thread()
clientObj.create_jobs()
|
[
"pickle.loads",
"threading.Thread",
"os.getpid",
"pickle.dumps",
"datetime.datetime.today",
"os.path.getsize",
"socket.socket",
"os.system",
"time.sleep",
"platform.system",
"queue.Queue",
"user.User"
] |
[((390, 396), 'user.User', 'User', ([], {}), '()\n', (394, 396), False, 'from user import User\n'), ((419, 426), 'queue.Queue', 'Queue', ([], {}), '()\n', (424, 426), False, 'from queue import Queue\n'), ((620, 635), 'socket.socket', 'socket.socket', ([], {}), '()\n', (633, 635), False, 'import socket\n'), ((1041, 1056), 'socket.socket', 'socket.socket', ([], {}), '()\n', (1054, 1056), False, 'import socket\n'), ((1286, 1303), 'platform.system', 'platform.system', ([], {}), '()\n', (1301, 1303), False, 'import platform\n'), ((1397, 1422), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (1406, 1422), False, 'import os\n'), ((5493, 5499), 'user.User', 'User', ([], {}), '()\n', (5497, 5499), False, 'from user import User\n'), ((5517, 5545), 'pickle.dumps', 'pickle.dumps', (["('4', userObj)"], {}), "(('4', userObj))\n", (5529, 5545), False, 'import pickle\n'), ((5642, 5667), 'pickle.loads', 'pickle.loads', (['byte_stream'], {}), '(byte_stream)\n', (5654, 5667), False, 'import pickle\n'), ((7055, 7080), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (7064, 7080), False, 'import os\n'), ((11787, 11842), 'pickle.dumps', 'pickle.dumps', (["('7', self.user_details.userid, log_text)"], {}), "(('7', self.user_details.userid, log_text))\n", (11799, 11842), False, 'import pickle\n'), ((12053, 12078), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (12062, 12078), False, 'import os\n'), ((12383, 12424), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.work_thread'}), '(target=self.work_thread)\n', (12399, 12424), False, 'import threading\n'), ((2247, 2272), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (2256, 2272), False, 'import os\n'), ((2287, 2293), 'user.User', 'User', ([], {}), '()\n', (2291, 2293), False, 'from user import User\n'), ((2343, 2371), 'pickle.dumps', 'pickle.dumps', (["('1', userObj)"], {}), "(('1', userObj))\n", (2355, 2371), False, 'import pickle\n'), ((2499, 2524), 'pickle.loads', 'pickle.loads', (['byte_stream'], {}), '(byte_stream)\n', (2511, 2524), False, 'import pickle\n'), ((3063, 3088), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (3072, 3088), False, 'import os\n'), ((3103, 3109), 'user.User', 'User', ([], {}), '()\n', (3107, 3109), False, 'from user import User\n'), ((3159, 3187), 'pickle.dumps', 'pickle.dumps', (["('2', userObj)"], {}), "(('2', userObj))\n", (3171, 3187), False, 'import pickle\n'), ((3315, 3340), 'pickle.loads', 'pickle.loads', (['byte_stream'], {}), '(byte_stream)\n', (3327, 3340), False, 'import pickle\n'), ((4347, 4372), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (4356, 4372), False, 'import os\n'), ((5687, 5712), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (5696, 5712), False, 'import os\n'), ((8077, 8102), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (8086, 8102), False, 'import os\n'), ((3739, 3767), 'pickle.dumps', 'pickle.dumps', (["('3', userObj)"], {}), "(('3', userObj))\n", (3751, 3767), False, 'import pickle\n'), ((2762, 2790), 'pickle.dumps', 'pickle.dumps', (["('3', userObj)"], {}), "(('3', userObj))\n", (2774, 2790), False, 'import pickle\n'), ((7452, 7543), 'pickle.dumps', 'pickle.dumps', (["('6', self.user_details.group_list[group_num], self.user_details.userid)"], {}), "(('6', self.user_details.group_list[group_num], self.\n user_details.userid))\n", (7464, 7543), False, 'import pickle\n'), ((7639, 7664), 'pickle.loads', 'pickle.loads', (['byte_stream'], {}), '(byte_stream)\n', (7651, 7664), False, 'import pickle\n'), ((4019, 4025), 'user.User', 'User', ([], {}), '()\n', (4023, 4025), False, 'from user import User\n'), ((4046, 4074), 'pickle.dumps', 'pickle.dumps', (["('3', userObj)"], {}), "(('3', userObj))\n", (4058, 4074), False, 'import pickle\n'), ((4687, 4712), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (4696, 4712), False, 'import os\n'), ((8604, 8629), 'os.system', 'os.system', (['self.clear_cmd'], {}), '(self.clear_cmd)\n', (8613, 8629), False, 'import os\n'), ((8855, 8886), 'pickle.dumps', 'pickle.dumps', (["('8', group_name)"], {}), "(('8', group_name))\n", (8867, 8886), False, 'import pickle\n'), ((8990, 9015), 'pickle.loads', 'pickle.loads', (['byte_stream'], {}), '(byte_stream)\n', (9002, 9015), False, 'import pickle\n'), ((11689, 11705), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (11703, 11705), False, 'from datetime import datetime\n'), ((6309, 6383), 'pickle.dumps', 'pickle.dumps', (["('5', group_data[0, group_num, 1], self.user_details.userid)"], {}), "(('5', group_data[0, group_num, 1], self.user_details.userid))\n", (6321, 6383), False, 'import pickle\n'), ((6488, 6513), 'pickle.loads', 'pickle.loads', (['byte_stream'], {}), '(byte_stream)\n', (6500, 6513), False, 'import pickle\n'), ((1773, 1784), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1782, 1784), False, 'import os\n'), ((11427, 11469), 'pickle.dumps', 'pickle.dumps', (["('9', group_name, user_mess)"], {}), "(('9', group_name, user_mess))\n", (11439, 11469), False, 'import pickle\n'), ((9385, 9457), 'pickle.dumps', 'pickle.dumps', (["('10', mess_list[2], group_name, self.user_details.userid)"], {}), "(('10', mess_list[2], group_name, self.user_details.userid))\n", (9397, 9457), False, 'import pickle\n'), ((9513, 9530), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (9523, 9530), False, 'import time\n'), ((9551, 9566), 'socket.socket', 'socket.socket', ([], {}), '()\n', (9564, 9566), False, 'import socket\n'), ((10370, 10430), 'pickle.dumps', 'pickle.dumps', (["('11', mess_list[2], self.user_details.userid)"], {}), "(('11', mess_list[2], self.user_details.userid))\n", (10382, 10430), False, 'import pickle\n'), ((10493, 10508), 'socket.socket', 'socket.socket', ([], {}), '()\n', (10506, 10508), False, 'import socket\n'), ((9659, 9688), 'os.path.getsize', 'os.path.getsize', (['mess_list[2]'], {}), '(mess_list[2])\n', (9674, 9688), False, 'import os\n'), ((10076, 10092), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (10090, 10092), False, 'from datetime import datetime\n'), ((11109, 11125), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (11123, 11125), False, 'from datetime import datetime\n')]
|
import bpy
import sys
from mathutils import Vector
import numpy as np
from abc import ABC, ABCMeta, abstractmethod
# Scene parameters
SCALE_FACTOR = 0.05 # factor used to scale an object when close enough to target
ROTATION_VEC = (3.0, 0., 0.) # rotation vector applied to an object when close enough to target
ORIGINAL_ROTATION_VEC = (0., 0., 0.) # original rotation vector for an object
DIST_LIM = 10.5 # distance threshold for which an object is influenced by a target
TARGET_SPEED = 1.5 # speed at which the target is moving in the scene
# TODO
# Still some confusing naming around different "object" types.
# One cleanup option would be to embed everything in this class.
# The major thing to take into consideration is the difference between an actual 3D blender object
# and a basic data structure with object info. For example with the current duplication method a Blender
# object shares the same mesh data with all its duplicates.
class Object(ABC):
sierpinski_scale = None # scale to use during a recursive "sierpinski" step
def __init__(self, radius: float, location: tuple, name: str):
"""
Base utility class for a 3D object
:param radius:
:param location:
:param name:
"""
self.loc = location
self.radius = radius
self.name = name
self.verts = []
self.edges = []
self.faces = []
self.mesh_data = None
self.obj = None
def _init_mesh(self):
mesh_data = bpy.data.meshes.new("{}_mesh_data".format(self.name))
mesh_data.from_pydata(self.verts, self.edges, self.faces)
mesh_data.update()
self.mesh_data = mesh_data
def _init_obj(self, link=True):
self.obj = bpy.data.objects.new(self.name, self.mesh_data)
if link:
scene = bpy.context.scene
scene.collection.objects.link(self.obj)
@staticmethod
def scale_objects(object: dict, grid_val, scale_factor=SCALE_FACTOR):
obj = object['object']
origin_scale = object['orgn_scale']
# grid value 1, object should end up with original size
if grid_val:
if obj.scale != origin_scale:
obj.scale = origin_scale
# grid value 0, object should end up scaled
else:
scaled_val = origin_scale * scale_factor
if obj.scale != scaled_val:
obj.scale = scaled_val
# keyframe change
obj.keyframe_insert("scale")
@staticmethod
def rotate_objects(object: dict, grid_val, rotation_vec=ROTATION_VEC, original_rot_vec=ORIGINAL_ROTATION_VEC):
obj = object['object']
# grid value 1, object should end up with original size
if grid_val:
if obj.rotation_euler != original_rot_vec:
obj.rotation_euler = original_rot_vec
# grid value 0, object should end up scaled
else:
if obj.rotation_euler != rotation_vec:
obj.rotation_euler = rotation_vec
# keyframe change
obj.keyframe_insert("rotation_euler")
@classmethod
def obj_replication(cls, obj: dict, max_depth: int):
"""Entry point to manage Replication-Shrink for a target object"""
object = cls(radius=obj['radius'], location=obj['location'])
obj['object'] = object.obj
sub_objs = [obj]
for i in range(max_depth):
new_sub_objs = []
for sub_obj in sub_objs:
new_sub_objs.extend(cls.replicate_shrink_step(sub_obj, i + 1))
# delete original
objs = bpy.data.objects
objs.remove(sub_obj['object'], True)
sub_objs = new_sub_objs
# Scale mesh data (all copies should follow)
for v in sub_objs[0]['object'].data.vertices:
v.co *= cls.sierpinski_scale
# Just at this point link object to scene
for sub_obj in sub_objs:
scene = bpy.context.scene
scene.collection.objects.link(sub_obj['object'])
return sub_objs
@classmethod
@abstractmethod
def replicate_shrink_step(cls, obj: dict, max_depth: int):
"""Replicates (mesh copy) the given object using "sierpinski" logic
all the resulting sub-objects are then returned"""
pass
class Cube(Object):
sierpinski_scale = 1/3
def __init__(self, radius: float, location: tuple):
super().__init__(radius=radius, location=location, name='cube')
loc = location
self.verts = [
(loc[0]+radius, loc[1]+radius, loc[2]-radius),
(loc[0]+radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]+radius, loc[2]-radius),
(loc[0]+radius, loc[1]+radius, loc[2]+radius),
(loc[0]+radius, loc[1]-radius, loc[2]+radius),
(loc[0]-radius, loc[1]-radius, loc[2]+radius),
(loc[0]-radius, loc[1]+radius, loc[2]+radius),
]
self.faces = [
(0, 1, 2, 3),
(4, 7, 6, 5),
(0, 4, 5, 1),
(1, 5, 6, 2),
(2, 6, 7, 3),
(4, 0, 3, 7)
]
self._init_mesh()
self._init_obj()
@classmethod
def replicate_shrink_step(cls, cube: dict, max_depth: int):
radius = cube['radius']
loc = cube['location']
cube_obj = cube['object']
sub_cubes = []
# amount of shifting for the center of new object
center_shift = radius * (2 / 3)
for i, x in enumerate(np.linspace(loc[0] - center_shift, loc[0] + center_shift, 3)):
for j, y in enumerate(np.linspace(loc[1] - center_shift, loc[1] + center_shift, 3)):
for k, z in enumerate(np.linspace(loc[2] - center_shift, loc[2] + center_shift, 3)):
if i == j == 1 or j == k == 1 or k == i == 1:
continue
else:
cube_copy = cube_obj.copy()
# obj scaling (different from mesh one)
# keeps original dimensions, so need to keep track of depth
# cube_copy.scale = Vector((1 / 3**depth, 1 / 3**depth, 1 / 3**depth))
cube_copy.location = (x, y, z)
new_cube = {
'radius': radius * cls.sierpinski_scale,
'location': (x, y, z),
'object': cube_copy,
'orgn_scale': cube_copy.scale.copy()
}
sub_cubes.append(new_cube)
return sub_cubes
class Pyramid(Object):
sierpinski_scale = 1 / 2
def __init__(self, radius: float, location: tuple):
super().__init__(radius=radius, location=location, name='pyramid')
loc = location
self.verts = [
(loc[0]+radius, loc[1]+radius, loc[2]-radius),
(loc[0]+radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]-radius, loc[2]-radius),
(loc[0]-radius, loc[1]+radius, loc[2]-radius),
(loc[0], loc[1], loc[2]+radius),
]
self.faces = [
(0, 1, 2, 3),
(0, 1, 4),
(1, 2, 4),
(2, 3, 4),
(3, 0, 4),
]
self.sierpinski_scale = 1 / 2
self._init_mesh()
self._init_obj()
@classmethod
def replicate_shrink_step(cls, pyramid: dict, depth: int):
radius = pyramid['radius']
loc = pyramid['location']
pyramid_object = pyramid['object']
sub_pyramids = []
# amount of shifting for the center of new object
center_shift = radius / 2
# define the five locations for the five new sub-pyramids
new_loc_top = (loc[0], loc[1], loc[2] + radius)
new_loc_1 = (loc[0] + center_shift, loc[1] + center_shift, loc[2])
new_loc_2 = (loc[0] - center_shift, loc[1] + center_shift, loc[2])
new_loc_3 = (loc[0] + center_shift, loc[1] - center_shift, loc[2])
new_loc_4 = (loc[0] - center_shift, loc[1] - center_shift, loc[2])
new_locs = [new_loc_top, new_loc_1, new_loc_2, new_loc_3, new_loc_4]
for new_loc in new_locs:
pyramid_copy = pyramid_object.copy()
# obj scaling (different from mesh one)
# keeps original dimensions, so need to keep track of depth
# pyramid_copy.scale = Vector((1 / 2**depth, 1 / 2**depth, 1 / 2**depth))
pyramid_copy.location = new_loc
new_pyramid = {
'radius': radius * cls.sierpinski_scale,
'location': new_loc,
'object': pyramid_copy,
'orgn_scale': pyramid_copy.scale.copy()
}
sub_pyramids.append(new_pyramid)
return sub_pyramids
def update_grid(objs, target):
target_loc = target.location
for obj in objs:
dist = np.linalg.norm(np.array(target_loc) - np.array(obj['location']))
if dist < DIST_LIM:
Object.scale_objects(obj, 0)
else:
Object.scale_objects(obj, 1)
# test method to move a target object along an axis
# to trigger updates to the sierpinski sub-objects in the scene
def move_target(target):
(x, y, z) = target.location
target.location = (x + np.random.rand() - 0.5,
y + np.random.rand() - 0.5,
z - TARGET_SPEED + np.random.rand() - 0.5)
target.keyframe_insert("location")
# handler called at every frame change
def frame_handler(scene, objs, target, num_frames_change):
frame = scene.frame_current
# When reaching final frame, clear handlers
if frame >= bpy.context.scene.frame_end:
bpy.app.handlers.frame_change_pre.clear()
elif (frame % num_frames_change) == 0:
move_target(target)
# update grid
update_grid(objs, target)
def main(_):
NUM_FRAMES_CHANGE = 2 # higher values enable a more fluid transformation of objects, as frames between
# keyframings interpolate the object modification taking place.
bpy.ops.mesh.primitive_ico_sphere_add(
subdivisions=4,
radius=0.3,
location=(0, 0, 30))
#target.keyframe_insert("location")
target = bpy.context.scene.objects['Empty']
obj = {
'location': (0, 0, 0),
'radius': 15,
}
objs = Cube.obj_replication(obj, max_depth=3)
#objs = Pyramid.obj_replication(obj, max_depth=3)
bpy.app.handlers.frame_change_pre.clear()
bpy.app.handlers.frame_change_pre.append(lambda x: frame_handler(x, objs, target, NUM_FRAMES_CHANGE))
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"numpy.random.rand",
"bpy.ops.mesh.primitive_ico_sphere_add",
"numpy.array",
"bpy.data.objects.new",
"numpy.linspace",
"bpy.app.handlers.frame_change_pre.clear"
] |
[((10220, 10311), 'bpy.ops.mesh.primitive_ico_sphere_add', 'bpy.ops.mesh.primitive_ico_sphere_add', ([], {'subdivisions': '(4)', 'radius': '(0.3)', 'location': '(0, 0, 30)'}), '(subdivisions=4, radius=0.3, location=\n (0, 0, 30))\n', (10257, 10311), False, 'import bpy\n'), ((10648, 10689), 'bpy.app.handlers.frame_change_pre.clear', 'bpy.app.handlers.frame_change_pre.clear', ([], {}), '()\n', (10687, 10689), False, 'import bpy\n'), ((1746, 1793), 'bpy.data.objects.new', 'bpy.data.objects.new', (['self.name', 'self.mesh_data'], {}), '(self.name, self.mesh_data)\n', (1766, 1793), False, 'import bpy\n'), ((9855, 9896), 'bpy.app.handlers.frame_change_pre.clear', 'bpy.app.handlers.frame_change_pre.clear', ([], {}), '()\n', (9894, 9896), False, 'import bpy\n'), ((5621, 5681), 'numpy.linspace', 'np.linspace', (['(loc[0] - center_shift)', '(loc[0] + center_shift)', '(3)'], {}), '(loc[0] - center_shift, loc[0] + center_shift, 3)\n', (5632, 5681), True, 'import numpy as np\n'), ((5718, 5778), 'numpy.linspace', 'np.linspace', (['(loc[1] - center_shift)', '(loc[1] + center_shift)', '(3)'], {}), '(loc[1] - center_shift, loc[1] + center_shift, 3)\n', (5729, 5778), True, 'import numpy as np\n'), ((9066, 9086), 'numpy.array', 'np.array', (['target_loc'], {}), '(target_loc)\n', (9074, 9086), True, 'import numpy as np\n'), ((9089, 9114), 'numpy.array', 'np.array', (["obj['location']"], {}), "(obj['location'])\n", (9097, 9114), True, 'import numpy as np\n'), ((9442, 9458), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9456, 9458), True, 'import numpy as np\n'), ((9493, 9509), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9507, 9509), True, 'import numpy as np\n'), ((9559, 9575), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9573, 9575), True, 'import numpy as np\n'), ((5819, 5879), 'numpy.linspace', 'np.linspace', (['(loc[2] - center_shift)', '(loc[2] + center_shift)', '(3)'], {}), '(loc[2] - center_shift, loc[2] + center_shift, 3)\n', (5830, 5879), True, 'import numpy as np\n')]
|
from typing import Callable, List, Optional, TypeVar
from reactivex import Observable, abc, compose
from reactivex import operators as ops
from reactivex import typing
_T = TypeVar("_T")
def buffer_with_time_(
timespan: typing.RelativeTime,
timeshift: Optional[typing.RelativeTime] = None,
scheduler: Optional[abc.SchedulerBase] = None,
) -> Callable[[Observable[_T]], Observable[List[_T]]]:
if not timeshift:
timeshift = timespan
return compose(
ops.window_with_time(timespan, timeshift, scheduler),
ops.flat_map(ops.to_list()),
)
__all__ = ["buffer_with_time_"]
|
[
"typing.TypeVar",
"reactivex.operators.window_with_time",
"reactivex.operators.to_list"
] |
[((175, 188), 'typing.TypeVar', 'TypeVar', (['"""_T"""'], {}), "('_T')\n", (182, 188), False, 'from typing import Callable, List, Optional, TypeVar\n'), ((488, 540), 'reactivex.operators.window_with_time', 'ops.window_with_time', (['timespan', 'timeshift', 'scheduler'], {}), '(timespan, timeshift, scheduler)\n', (508, 540), True, 'from reactivex import operators as ops\n'), ((563, 576), 'reactivex.operators.to_list', 'ops.to_list', ([], {}), '()\n', (574, 576), True, 'from reactivex import operators as ops\n')]
|
"""
WIDGETMARK is a Performance Analysis Framework for GUI components
based on user defined use cases. Use Cases can be defined in classes
contained in separate files and are derived from widgetmark.UseCase.
These files will be searched recursively starting from the directory
widgetmark was started in. The default naming pattern is "bench_*.py".
"""
# TODO: print it somewhere
title = "\n".join([
"",
" _ _ _ _",
" (_) | | | | | |",
"__ ___ __| | __ _ ___| |_ _ __ ___ __ _ _ __| | __",
"\\ \\ /\\ / / |/ _` |/ _` |/ _ \\ __| '_ ` _ \\ / _` | '__| |/ /",
" \\ V V /| | (_| | (_| | __/ |_| | | | | | (_| | | | <",
" \\_/\\_/ |_|\\__,_|\\__, |\\___|\\__|_| |_| |_|\\__,_|_| |_|\\_\\",
" __/ |",
" |___/",
"",
])
import signal
import logging
import argparse
from contextlib import contextmanager
import sys
from typing import Dict, Tuple, Union, List, cast
from snakeviz.cli import main as snakviz_main
from .loader import BenchmarkLoader, FileLocation
from .cli_view import PrintView, Color
logger = logging.getLogger(__name__)
class ArgumentNamespace(argparse.Namespace):
"""This class is only for auto completion purposes."""
profile_output: str = "./profiles/"
should_profile: bool = False
pattern: str = ""
locations: List[FileLocation] = []
loglevel: str = ""
visualize: bool = False
class CLI:
_args: Dict[Tuple[str, ...], Dict[str, Union[str, bool, List[str]]]] = {
("-o", "--profile-output"): {
"dest": "profile_output",
"metavar": "PROFILE_FILES_LOCATION",
"default": ArgumentNamespace.profile_output,
"help": "Specify in which directory the profile files "
"should be saved in. The default is a directory "
"'profiles' in your current working directory. ",
},
("-p", "--profile"): {
"dest": "should_profile",
"action": "store_true",
"help": "If set, WidgetMark will create a profile using "
"the Python module cProfile to gain more insights "
"into performance of a widget. Activating profile "
"will introduce a small additional overhead which "
"will decrease performance. ",
},
("locations",): {
"nargs": "*",
"help": "File or folder where the benchmark use cases are "
"located. It is additionally possible to specify "
"for passed files, which UseCases should be executed "
"by adding its name as 'folder/file.py::UseCaseName'. "
"UseCaseName can be a regular expression as well "
"'folder/file.py::.*Case.*'.",
},
("--pattern",): {
"dest": "pattern",
"metavar": "USE_CASE_FILE_NAME_PATTERN",
"help": "File Pattern for the Benchmark Files. Wildcards "
"are used in glob.glob(), e.g. 'bench_*.py'.",
},
("--visualize", ): {
"dest": "visualize",
"action": "store_true",
"help": "Start SNAKEVIZ with the profile output directory. "
"SNAKEVIZ allows you to conveniently explore the "
"recorded profiles in you web browser. To learn more "
"about its usage and features, visit "
"http://jiffyclub.github.io/snakeviz/",
},
("--loglevel",): {
"dest": "loglevel",
"metavar": "LOGGING_MODULE_LEVEL",
"choices": ["CRITICAL",
"ERROR",
"WARNING",
"INFO",
"DEBUG",
"NOTSET"],
"help": "Set the Loggers Level to the passed one.",
},
}
"""Command Line Arguments."""
def __init__(self):
"""
Command Line Interface for the WidgetMark Framework, which allows
using the BenchmarkLoader class from command line.
"""
self._args: ArgumentNamespace = self._setup_command_line_args()
self._loader = BenchmarkLoader(locations=self._args.locations,
file_pattern=self._args.pattern,
with_profiler=self._args.should_profile)
def exec(self):
"""Load the Benchmark Files and Execute them"""
PrintView.print_divider(message="WIDGET-MARK",
colors=Color.BOLD)
with interrupt_signal_handled(_during_exec):
self._loader.load()
self._loader.run()
PrintView.print_results(results=self._loader.results,
profile_output=self._args.profile_output)
PrintView.print_divider(colors=Color.BOLD)
if self._args.should_profile:
if self._args.visualize:
global profile_location
profile_location = self._args.should_profile
with interrupt_signal_handled(_during_snake_viz):
snakviz_main([self._args.should_profile])
else:
print(f"To view your profiles, run"
f" 'snakeviz {self._args.should_profile}'.")
PrintView.print_divider(message="END OF SESSION",
colors=Color.BOLD)
def _setup_command_line_args(self) -> ArgumentNamespace:
"""Set up, parse and handle command line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
for keys, values in self._args.items():
parser.add_argument(*keys, **values) # type: ignore
args = parser.parse_args()
# Log Level for the Application
if args.loglevel:
CLI._set_log_level(args.loglevel)
del args.loglevel
# File Locations
if not args.locations:
args.locations = BenchmarkLoader.DEFAULT_LOCATION
for index, location in enumerate(args.locations):
args.locations[index] = FileLocation(location=location)
# Global Benchmark file Pattern
if not args.pattern:
args.pattern = BenchmarkLoader.DEFAULT_PATTERN
return cast(ArgumentNamespace, args)
@staticmethod
def _set_log_level(log_level: str):
"""Set the logging packages log level to the passed level."""
import logging
level = getattr(logging, log_level)
logging.basicConfig(level=level)
# ~~~~~~~~~~~~~~~~~~~~~~~~~ Interrupt Signal Handling ~~~~~~~~~~~~~~~~~~~~~~~~~
@contextmanager
def interrupt_signal_handled(new):
"""Context manager for handling keyboard interrupt signals."""
old = signal.signal(signal.SIGINT, new)
try:
yield new
finally:
signal.signal(signal.SIGINT, old)
def _during_exec(_signal, _frame):
print()
PrintView.print_divider(message="SESSION INTERRUPTED",
colors=Color.BOLD)
sys.exit(0)
profile_location = ""
def _during_snake_viz(_signal, _frame):
print(f"\nTo reopen snakeviz, run 'snakeviz {profile_location}'.")
PrintView.print_divider(message="END OF SESSION",
colors=Color.BOLD)
sys.exit(0)
|
[
"snakeviz.cli.main",
"argparse.ArgumentParser",
"logging.basicConfig",
"typing.cast",
"logging.getLogger",
"signal.signal",
"sys.exit"
] |
[((1181, 1208), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1198, 1208), False, 'import logging\n'), ((6929, 6962), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'new'], {}), '(signal.SIGINT, new)\n', (6942, 6962), False, 'import signal\n'), ((7204, 7215), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7212, 7215), False, 'import sys\n'), ((7458, 7469), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7466, 7469), False, 'import sys\n'), ((5728, 5772), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (5751, 5772), False, 'import argparse\n'), ((6450, 6479), 'typing.cast', 'cast', (['ArgumentNamespace', 'args'], {}), '(ArgumentNamespace, args)\n', (6454, 6479), False, 'from typing import Dict, Tuple, Union, List, cast\n'), ((6684, 6716), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level'}), '(level=level)\n', (6703, 6716), False, 'import logging\n'), ((7011, 7044), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'old'], {}), '(signal.SIGINT, old)\n', (7024, 7044), False, 'import signal\n'), ((5282, 5323), 'snakeviz.cli.main', 'snakviz_main', (['[self._args.should_profile]'], {}), '([self._args.should_profile])\n', (5294, 5323), True, 'from snakeviz.cli import main as snakviz_main\n')]
|
# Generated by Django 3.0.6 on 2020-11-14 06:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0007_sudotasks_effective_days'),
]
operations = [
migrations.CreateModel(
name='Groups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Hosts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='UsersGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='security.Groups')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='security.Users')),
],
),
migrations.CreateModel(
name='SudoConfigs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('host', models.CharField(default='ALL', max_length=20)),
('run_as', models.CharField(default='ALL', max_length=255)),
('commands', models.TextField()),
('command_alias', models.CharField(max_length=255, null=True)),
('args', models.CharField(max_length=255, null=True)),
('group_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='security.Groups')),
],
),
migrations.CreateModel(
name='HostsGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='security.Groups')),
('host_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='security.Hosts')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((370, 463), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (386, 463), False, 'from django.db import migrations, models\n'), ((487, 532), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (503, 532), False, 'from django.db import migrations, models\n'), ((663, 756), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (679, 756), False, 'from django.db import migrations, models\n'), ((780, 825), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (796, 825), False, 'from django.db import migrations, models\n'), ((956, 1049), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (972, 1049), False, 'from django.db import migrations, models\n'), ((1073, 1118), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (1089, 1118), False, 'from django.db import migrations, models\n'), ((1254, 1347), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1270, 1347), False, 'from django.db import migrations, models\n'), ((1375, 1464), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""security.Groups"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'security.Groups')\n", (1392, 1464), False, 'from django.db import migrations, models\n'), ((1490, 1578), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""security.Users"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'security.Users')\n", (1507, 1578), False, 'from django.db import migrations, models\n'), ((1710, 1803), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1726, 1803), False, 'from django.db import migrations, models\n'), ((1827, 1873), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""ALL"""', 'max_length': '(20)'}), "(default='ALL', max_length=20)\n", (1843, 1873), False, 'from django.db import migrations, models\n'), ((1903, 1950), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""ALL"""', 'max_length': '(255)'}), "(default='ALL', max_length=255)\n", (1919, 1950), False, 'from django.db import migrations, models\n'), ((1982, 2000), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1998, 2000), False, 'from django.db import migrations, models\n'), ((2037, 2080), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (2053, 2080), False, 'from django.db import migrations, models\n'), ((2108, 2151), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (2124, 2151), False, 'from django.db import migrations, models\n'), ((2183, 2272), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""security.Groups"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'security.Groups')\n", (2200, 2272), False, 'from django.db import migrations, models\n'), ((2404, 2497), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2420, 2497), False, 'from django.db import migrations, models\n'), ((2525, 2614), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""security.Groups"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'security.Groups')\n", (2542, 2614), False, 'from django.db import migrations, models\n'), ((2640, 2728), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""security.Hosts"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'security.Hosts')\n", (2657, 2728), False, 'from django.db import migrations, models\n')]
|
# Author: <NAME>
# Imperial College London
# August, 2015
#
# see license file in project root directory
import os
import time
import shutil
from reportlab.pdfgen import canvas
import malpem.mytools
def take_screenshot(output_file, parameters):
# DEFINITIONS
binary_display = os.path.join(malpem.mytools.__malpem_path__, "lib", "irtk", "display")
png_na = os.path.join(malpem.mytools.__malpem_path__, "etc", "screenshotNA.png")
# END DEFINITIONS
task_name = "Taking Screenshot"
start_time = malpem.mytools.start_task(task_name)
parameters += " -offscreen " + output_file
malpem.mytools.execute_cmd(binary_display, parameters, "")
if not os.path.isfile(output_file):
print("WARNING: Couldn't create screenshot (" + output_file + "): perhaps offscreen rendering failed")
shutil.copyfile(png_na, output_file)
malpem.mytools.ensure_file(output_file, "")
malpem.mytools.finished_task(start_time, task_name)
def calculate_volume(input_seg, output_file):
# DEFINITIONS
binary_volumes = os.path.join(malpem.mytools.__malpem_path__, "lib", "irtk", "cl_compute_volume")
# END DEFINITIONS
task_name = "Calculating volumes"
start_time = malpem.mytools.start_task(task_name)
malpem.mytools.execute_cmd(binary_volumes, input_seg + " > " + output_file, "")
malpem.mytools.ensure_file(output_file, "")
malpem.mytools.finished_task(start_time, task_name)
def create_report(input_file, input_mask, input_seg_malpem, output_report, output_dir):
# DEFINITIONS
structure_names = os.path.join(malpem.mytools.__malpem_path__, "etc", "nmm_info.csv")
structure_lut = os.path.join(malpem.mytools.__malpem_path__, "etc", "lut.csv")
# PAPER SIZE
left_start = 50
top_start = 800
right_stop = 550
# NMM STRUCTURE IDs FOR RESPECTIVE TISSUE CLASSES
vent_list = [1, 2, 21, 22, 23, 24]
cgm_list = range(41, 139, 1)
dgm_list = range(1, 41, 1)
wm_list = [12, 13, 16, 17]
other_list = [14, 15, 18, 33, 34]
for i in wm_list:
dgm_list.remove(i)
for i in vent_list:
dgm_list.remove(i)
for i in other_list:
dgm_list.remove(i)
total_brain_string = "[1-138]"
vent_string = malpem.mytools.get_id_string(vent_list)
cgm_string = malpem.mytools.get_id_string(cgm_list)
dgm_string = malpem.mytools.get_id_string(dgm_list)
wm_string = malpem.mytools.get_id_string(wm_list)
other_string = malpem.mytools.get_id_string(other_list)
# END DEFINITIONS
# CALCULATE STRUCTURAL VOLUMES
report_dir = os.path.join(output_dir, "report")
malpem.mytools.check_ex_dir(report_dir)
malpem_volume_file = os.path.join(report_dir, malpem.mytools.basename(output_report) + "_MALPEM_raw.csv")
malpem_report_file = os.path.join(report_dir, malpem.mytools.basename(output_report) + "_MALPEM_Report.csv")
calculate_volume(input_seg_malpem, malpem_volume_file)
f = open(structure_names)
s_id = []
s_short = []
s_long = []
s_side = []
s_volumes = []
for row in f:
arr = row.split(',')
s_id.append(arr[0])
s_short.append(arr[1])
s_long.append(arr[2])
s_side.append(arr[3])
f.close()
f = open(malpem_volume_file)
for row in f:
arr = row.split(',')
for i in range(len(arr)):
s_volumes.append(arr[i].lstrip())
f.close()
total_brain_volume = 0
cgm_volume = 0
dgm_volume = 0
vent_volume = 0
wm_volume = 0
other_volume = 0
for i in range(1, 139, 1):
if i in vent_list:
vent_volume += float(s_volumes[i])
if i in cgm_list:
cgm_volume += float(s_volumes[i])
if i in dgm_list:
dgm_volume += float(s_volumes[i])
if i in wm_list:
wm_volume += float(s_volumes[i])
if i in other_list:
other_volume += float(s_volumes[i])
total_brain_volume = other_volume + vent_volume + cgm_volume + dgm_volume + wm_volume
# END
# TAKE RELEVANT SCREENSHOTS
screenshots_malpem = []
screenshots_malpem.append(os.path.join(report_dir, malpem.mytools.basename(output_report) + "_MALPEM_xy.png"))
screenshots_malpem.append(os.path.join(report_dir, malpem.mytools.basename(output_report) + "_MALPEM_xz.png"))
screenshots_malpem.append(os.path.join(report_dir, malpem.mytools.basename(output_report) + "_MALPEM_yz.png"))
screenshots_mask = []
screenshots_mask.append(os.path.join(report_dir, malpem.mytools.basename(output_report) + "_mask_xy.png"))
screenshots_mask.append(os.path.join(report_dir, malpem.mytools.basename(output_report) + "_mask_xz.png"))
screenshots_mask.append(os.path.join(report_dir, malpem.mytools.basename(output_report) + "_mask_yz.png"))
take_screenshot(screenshots_malpem[0], input_file + " -seg " + input_seg_malpem + " -lut " + structure_lut + " -xy -res 2")
take_screenshot(screenshots_malpem[1], input_file + " -seg " + input_seg_malpem + " -lut " + structure_lut + " -xz -res 2")
take_screenshot(screenshots_malpem[2], input_file + " -seg " + input_seg_malpem + " -lut " + structure_lut + " -yz -res 2")
take_screenshot(screenshots_mask[0], input_file + " " + input_mask + " -scontour -xy -res 2")
take_screenshot(screenshots_mask[1], input_file + " " + input_mask + " -scontour -xz -res 2")
take_screenshot(screenshots_mask[2], input_file + " " + input_mask + " -scontour -yz -res 2")
# END
# CREATE CSV FILES
f = open(malpem_report_file, 'wb+')
f.write("ID,Structure,Volume [ml]\n")
f.write(total_brain_string + "," + "TotalBrain" + "," + str(total_brain_volume/1000) + "\n")
f.write(vent_string + "," + "Ventricle" + "," + str(vent_volume/1000) + "\n")
f.write(dgm_string + "," + "NonCortical" + "," + str(dgm_volume/1000) + "\n")
f.write(cgm_string + "," + "Cortical" + "," + str(cgm_volume/1000) + "\n")
f.write(wm_string + "," + "WhiteMatter" + "," + str(wm_volume/1000) + "\n")
f.write(other_string + "," + "Other" + "," + str(other_volume/1000) + "\n")
for i in range(0, 139, 1):
f.write(str(i) + "," + str(s_short[i]) + "," + str(float(s_volumes[i])/1000) + "\n")
f.close()
# END
## START CREATING ACTUAL REPORT
c = canvas.Canvas(output_report)
c.line(left_start, top_start, right_stop, top_start)
c.drawString(left_start, top_start-10, "Report based on MALPEM pipeline")
c.line(left_start, top_start-15, right_stop, top_start-15)
c.drawString(left_start, top_start-30, "File: " + input_file)
c.drawString(left_start, top_start-50, "Date/Time: " + time.strftime("%c"))
c.drawString(left_start, top_start-80, "Volumetric Summary")
c.drawString(left_start + 150, top_start-80, "[ml]")
c.drawString(left_start + 230, top_start-80, "rel. to total")
c.drawString(left_start + 310, top_start-80, "IDs")
c.line(left_start, top_start-85, right_stop, top_start-85)
c.drawString(left_start, top_start-100, "total brain volume: ")
c.drawString(left_start + 150, top_start-100, str(total_brain_volume/1000))
c.drawString(left_start + 230, top_start-100, str(round(100*total_brain_volume/total_brain_volume, 2)) + "%")
c.drawString(left_start + 310, top_start-100, total_brain_string)
c.drawString(left_start, top_start-120, "ventricle volume: ")
c.drawString(left_start + 150, top_start-120, str(vent_volume/1000))
c.drawString(left_start + 230, top_start-120, str(round(100*vent_volume/total_brain_volume, 2)) + "%")
c.drawString(left_start + 310, top_start-120, vent_string)
c.drawString(left_start, top_start-140, "noncortical GM volume: ")
c.drawString(left_start + 150, top_start-140, str(dgm_volume/1000))
c.drawString(left_start + 230, top_start-140, str(round(100*dgm_volume/total_brain_volume, 2)) + "%")
c.drawString(left_start + 310, top_start-140, dgm_string)
c.drawString(left_start, top_start-160, "cortical GM volume: ")
c.drawString(left_start + 150, top_start-160, str(cgm_volume/1000))
c.drawString(left_start + 230, top_start-160, str(round(100*cgm_volume/total_brain_volume, 2)) + "%")
c.drawString(left_start + 310, top_start-160, cgm_string)
c.drawString(left_start, top_start-180, "white matter volume: ")
c.drawString(left_start + 150, top_start-180, str(wm_volume/1000))
c.drawString(left_start + 230, top_start-180, str(round(100*wm_volume/total_brain_volume, 2)) + "%")
c.drawString(left_start + 310, top_start-180, wm_string)
c.drawString(left_start, top_start-200, "other volume: ")
c.drawString(left_start + 150, top_start-200, str(other_volume/1000))
c.drawString(left_start + 230, top_start-200, str(round(100*other_volume/total_brain_volume, 2)) + "%")
c.drawString(left_start + 310, top_start-200, other_string)
img_width = (right_stop-left_start-10)/3
c.drawString(left_start, top_start-240, "brain extraction (pincram)")
c.drawInlineImage(screenshots_mask[0], left_start, top_start-240-5-img_width, img_width, img_width)
c.drawInlineImage(screenshots_mask[1], left_start+165, top_start-240-5-img_width, img_width, img_width)
c.drawInlineImage(screenshots_mask[2], left_start+330, top_start-240-5-img_width, img_width, img_width)
c.drawString(left_start, top_start-240-1*(20+img_width), "whole brain segmentation (MALPEM with "
"Neuromorphometrics atlas)")
c.drawInlineImage(screenshots_malpem[0], left_start, top_start-240-1*(20+img_width)-5-img_width, img_width, img_width)
c.drawInlineImage(screenshots_malpem[1], left_start+165, top_start-240-1*(20+img_width)-5-img_width, img_width, img_width)
c.drawInlineImage(screenshots_malpem[2], left_start+330, top_start-240-1*(20+img_width)-5-img_width, img_width, img_width)
c.showPage()
cur_line = 0
cur_col = 0
for i in range(len(s_volumes)):
if i == 0:
c.drawString(left_start, top_start-10, "Neuromorphometrics Volumes [ml]")
c.line(left_start, top_start-15, right_stop, top_start-15)
cur_line = 2
cur_col = 0
if i == 139:
c.showPage()
c.line(left_start, top_start-15, right_stop, top_start-15)
cur_line = 2
cur_col = 0
c.drawString(left_start + cur_col * 280, top_start-10-cur_line*10, "(" + s_id[i] + ") ")
c.drawString(left_start + cur_col * 280 + 30, top_start-10-cur_line*10, str(float(s_volumes[i])/1000))
c.drawString(left_start + cur_col * 280 + 90, top_start-10-cur_line*10, "(" + s_short[i] + ")")
cur_line += 1
if cur_line > 72:
if cur_col >= 1:
cur_col = 0
c.showPage()
else:
cur_col += 1
cur_line = 2
cur_line = 0
for i in range(139):
if i == 0:
c.showPage()
c.drawString(left_start, top_start-10, "Dictionary of Neuromorphometrics Structure Names "
"(remapped IDs from original Atlas)")
c.line(left_start, top_start-15, right_stop, top_start-15)
cur_line += 2
c.line(left_start, top_start-10-cur_line*10+10, right_stop, top_start-10-cur_line*10+10)
c.drawString(left_start, top_start-10-cur_line*10, "Non-Cortical Structures")
c.line(left_start, top_start-10-cur_line*10-5, right_stop, top_start-10-cur_line*10-5)
cur_line += 2
if i == 41:
cur_line += 1
c.line(left_start, top_start-10-cur_line*10+10, right_stop, top_start-10-cur_line*10+10)
c.drawString(left_start, top_start-10-cur_line*10, "Cortical Structures")
c.line(left_start, top_start-10-cur_line*10-5, right_stop, top_start-10-cur_line*10-5)
cur_line += 2
if i == 139:
if cur_line > 0:
c.showPage()
cur_line = 0
c.drawString(left_start, top_start-10-cur_line*10, s_id[i])
c.drawString(left_start+30, top_start-10-cur_line*10, s_short[i])
if i > 40:
c.drawString(left_start+130, top_start-10-cur_line*10, s_long[i])
cur_line += 1
if cur_line > 72:
c.showPage()
cur_line = 0
c.save()
|
[
"time.strftime",
"reportlab.pdfgen.canvas.Canvas",
"os.path.isfile",
"shutil.copyfile",
"os.path.join"
] |
[((308, 378), 'os.path.join', 'os.path.join', (['malpem.mytools.__malpem_path__', '"""lib"""', '"""irtk"""', '"""display"""'], {}), "(malpem.mytools.__malpem_path__, 'lib', 'irtk', 'display')\n", (320, 378), False, 'import os\n'), ((392, 463), 'os.path.join', 'os.path.join', (['malpem.mytools.__malpem_path__', '"""etc"""', '"""screenshotNA.png"""'], {}), "(malpem.mytools.__malpem_path__, 'etc', 'screenshotNA.png')\n", (404, 463), False, 'import os\n'), ((1069, 1154), 'os.path.join', 'os.path.join', (['malpem.mytools.__malpem_path__', '"""lib"""', '"""irtk"""', '"""cl_compute_volume"""'], {}), "(malpem.mytools.__malpem_path__, 'lib', 'irtk', 'cl_compute_volume'\n )\n", (1081, 1154), False, 'import os\n'), ((1577, 1644), 'os.path.join', 'os.path.join', (['malpem.mytools.__malpem_path__', '"""etc"""', '"""nmm_info.csv"""'], {}), "(malpem.mytools.__malpem_path__, 'etc', 'nmm_info.csv')\n", (1589, 1644), False, 'import os\n'), ((1665, 1727), 'os.path.join', 'os.path.join', (['malpem.mytools.__malpem_path__', '"""etc"""', '"""lut.csv"""'], {}), "(malpem.mytools.__malpem_path__, 'etc', 'lut.csv')\n", (1677, 1727), False, 'import os\n'), ((2573, 2607), 'os.path.join', 'os.path.join', (['output_dir', '"""report"""'], {}), "(output_dir, 'report')\n", (2585, 2607), False, 'import os\n'), ((6249, 6277), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['output_report'], {}), '(output_report)\n', (6262, 6277), False, 'from reportlab.pdfgen import canvas\n'), ((696, 723), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (710, 723), False, 'import os\n'), ((844, 880), 'shutil.copyfile', 'shutil.copyfile', (['png_na', 'output_file'], {}), '(png_na, output_file)\n', (859, 880), False, 'import shutil\n'), ((6601, 6620), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (6614, 6620), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-17 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_actor_logger
_log = get_actor_logger(__name__)
class ImageSource(Actor):
"""
When token on input, get an image.
Inputs:
trigger: anything
Outputs:
b64image: generated image
"""
@manage(exclude=["_cam"])
def init(self):
self.setup()
def setup(self):
self._cam = calvinsys.open(self, "image.source")
def did_migrate(self):
self.setup()
def will_end(self):
calvinsys.close(self._cam)
@stateguard(lambda self: calvinsys.can_read(self._cam))
@condition(action_output=['b64image'])
def send_image(self):
image = calvinsys.read(self._cam)
return (image, )
@stateguard(lambda self: calvinsys.can_write(self._cam))
@condition(action_input=['trigger'])
def fetch_image(self, trigger):
calvinsys.write(self._cam, None)
action_priority = (fetch_image, send_image)
requires = ['image.source']
test_calvinsys = {'image.source': {'read': [1,0,1,0,0,1,0,1],
'write': [None, None, None, None]}}
test_set = [
{
'inports': {'trigger': [True, 1, "a", 0]},
'outports': {'b64image': [1,0,1,0,0,1,0,1]}
}
]
|
[
"calvin.actor.actor.calvinsys.write",
"calvin.actor.actor.calvinsys.can_write",
"calvin.actor.actor.calvinsys.open",
"calvin.actor.actor.manage",
"calvin.utilities.calvinlogger.get_actor_logger",
"calvin.actor.actor.calvinsys.read",
"calvin.actor.actor.calvinsys.close",
"calvin.actor.actor.calvinsys.can_read",
"calvin.actor.actor.condition"
] |
[((755, 781), 'calvin.utilities.calvinlogger.get_actor_logger', 'get_actor_logger', (['__name__'], {}), '(__name__)\n', (771, 781), False, 'from calvin.utilities.calvinlogger import get_actor_logger\n'), ((953, 977), 'calvin.actor.actor.manage', 'manage', ([], {'exclude': "['_cam']"}), "(exclude=['_cam'])\n", (959, 977), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1273, 1310), 'calvin.actor.actor.condition', 'condition', ([], {'action_output': "['b64image']"}), "(action_output=['b64image'])\n", (1282, 1310), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1471, 1506), 'calvin.actor.actor.condition', 'condition', ([], {'action_input': "['trigger']"}), "(action_input=['trigger'])\n", (1480, 1506), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1061, 1097), 'calvin.actor.actor.calvinsys.open', 'calvinsys.open', (['self', '"""image.source"""'], {}), "(self, 'image.source')\n", (1075, 1097), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1180, 1206), 'calvin.actor.actor.calvinsys.close', 'calvinsys.close', (['self._cam'], {}), '(self._cam)\n', (1195, 1206), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1353, 1378), 'calvin.actor.actor.calvinsys.read', 'calvinsys.read', (['self._cam'], {}), '(self._cam)\n', (1367, 1378), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1551, 1583), 'calvin.actor.actor.calvinsys.write', 'calvinsys.write', (['self._cam', 'None'], {}), '(self._cam, None)\n', (1566, 1583), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1237, 1266), 'calvin.actor.actor.calvinsys.can_read', 'calvinsys.can_read', (['self._cam'], {}), '(self._cam)\n', (1255, 1266), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n'), ((1434, 1464), 'calvin.actor.actor.calvinsys.can_write', 'calvinsys.can_write', (['self._cam'], {}), '(self._cam)\n', (1453, 1464), False, 'from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys\n')]
|
from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import glob
import os
import textwrap
required_conan_version = ">=1.43.0"
class CapnprotoConan(ConanFile):
name = "capnproto"
description = "Cap'n Proto serialization/RPC system."
license = "MIT"
topics = ("capnproto", "serialization", "rpc")
homepage = "https://capnproto.org"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_openssl": [True, False],
"with_zlib": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_openssl": True,
"with_zlib": True,
}
generators = "cmake", "cmake_find_package"
_cmake = None
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _is_msvc(self):
return str(self.settings.compiler) in ["Visual Studio", "msvc"]
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "15",
"gcc": "5",
"clang": "5",
"apple-clang": "5.1",
}
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if tools.Version(self.version) < "0.8.0":
del self.options.with_zlib
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
if self.options.with_openssl:
self.requires("openssl/1.1.1m")
if self.options.get_safe("with_zlib"):
self.requires("zlib/1.2.11")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 14)
minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)
if not minimum_version:
self.output.warn("Cap'n Proto requires C++14. Your compiler is unknown. Assuming it supports C++14.")
elif tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("Cap'n Proto requires C++14, which your compiler does not support.")
if self._is_msvc and self.options.shared:
raise ConanInvalidConfiguration("Cap'n Proto doesn't support shared libraries for Visual Studio")
if self.settings.os == "Windows" and tools.Version(self.version) < "0.8.0" and self.options.with_openssl:
raise ConanInvalidConfiguration("Cap'n Proto doesn't support OpenSSL on Windows pre 0.8.0")
def build_requirements(self):
if self.settings.os != "Windows":
self.build_requires("libtool/2.4.6")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BUILD_TESTING"] = False
self._cmake.definitions["EXTERNAL_CAPNP"] = False
self._cmake.definitions["CAPNP_LITE"] = False
self._cmake.definitions["WITH_OPENSSL"] = self.options.with_openssl
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _configure_autotools(self):
if self._autotools:
return self._autotools
args = [
"--enable-shared" if self.options.shared else "--disable-shared",
"--disable-static" if self.options.shared else "--enable-static",
"--with-openssl" if self.options.with_openssl else "--without-openssl",
"--enable-reflection",
]
if tools.Version(self.version) >= "0.8.0":
args.append("--with-zlib" if self.options.with_zlib else "--without-zlib")
self._autotools = AutoToolsBuildEnvironment(self)
# Fix rpath on macOS
if self.settings.os == "Macos":
self._autotools.link_flags.append("-Wl,-rpath,@loader_path/../lib")
self._autotools.configure(args=args)
return self._autotools
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.os == "Windows":
cmake = self._configure_cmake()
cmake.build()
else:
with tools.chdir(os.path.join(self._source_subfolder, "c++")):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")))
# relocatable shared libs on macOS
tools.replace_in_file("configure", "-install_name \\$rpath/", "-install_name @rpath/")
# avoid SIP issues on macOS when dependencies are shared
if tools.is_apple_os(self.settings.os):
libpaths = ":".join(self.deps_cpp_info.lib_paths)
tools.replace_in_file(
"configure",
"#! /bin/sh\n",
"#! /bin/sh\nexport DYLD_LIBRARY_PATH={}:$DYLD_LIBRARY_PATH\n".format(libpaths),
)
autotools = self._configure_autotools()
autotools.make()
@property
def _cmake_folder(self):
return os.path.join("lib", "cmake", "CapnProto")
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.os == "Windows":
cmake = self._configure_cmake()
cmake.install()
else:
with tools.chdir(os.path.join(self._source_subfolder, "c++")):
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
for cmake_file in glob.glob(os.path.join(self.package_folder, self._cmake_folder, "*")):
if os.path.basename(cmake_file) != "CapnProtoMacros.cmake":
os.remove(cmake_file)
# inject mandatory variables so that CAPNP_GENERATE_CPP function can
# work in a robust way (build from source or from pre build package)
find_execs = textwrap.dedent("""\
if(CMAKE_CROSSCOMPILING)
find_program(CAPNP_EXECUTABLE capnp PATHS ENV PATH NO_DEFAULT_PATH)
find_program(CAPNPC_CXX_EXECUTABLE capnpc-c++ PATHS ENV PATH NO_DEFAULT_PATH)
endif()
if(NOT CAPNP_EXECUTABLE)
set(CAPNP_EXECUTABLE "${CMAKE_CURRENT_LIST_DIR}/../../../bin/capnp${CMAKE_EXECUTABLE_SUFFIX}")
endif()
if(NOT CAPNPC_CXX_EXECUTABLE)
set(CAPNPC_CXX_EXECUTABLE "${CMAKE_CURRENT_LIST_DIR}/../../../bin/capnpc-c++${CMAKE_EXECUTABLE_SUFFIX}")
endif()
set(CAPNP_INCLUDE_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}/../../../include")
function(CAPNP_GENERATE_CPP SOURCES HEADERS)
""")
tools.replace_in_file(os.path.join(self.package_folder, self._cmake_folder, "CapnProtoMacros.cmake"),
"function(CAPNP_GENERATE_CPP SOURCES HEADERS)",
find_execs)
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "CapnProto")
capnprotomacros = os.path.join(self._cmake_folder, "CapnProtoMacros.cmake")
self.cpp_info.set_property("cmake_build_modules", [capnprotomacros])
components = [
{"name": "capnp", "requires": ["kj"]},
{"name": "capnp-json", "requires": ["capnp", "kj"]},
{"name": "capnp-rpc", "requires": ["capnp", "kj", "kj-async"]},
{"name": "capnpc", "requires": ["capnp", "kj"]},
{"name": "kj", "requires": []},
{"name": "kj-async", "requires": ["kj"]},
{"name": "kj-http", "requires": ["kj", "kj-async"]},
{"name": "kj-test", "requires": ["kj"]},
]
if self.options.get_safe("with_zlib"):
components.append({"name": "kj-gzip", "requires": ["kj", "kj-async", "zlib::zlib"]})
if self.options.with_openssl:
components.append({"name": "kj-tls", "requires": ["kj", "kj-async", "openssl::openssl"]})
if tools.Version(self.version) >= "0.9.0":
components.append({
"name": "capnp-websocket",
"requires": ["capnp", "capnp-rpc", "kj-http", "kj-async", "kj"],
})
for component in components:
self._register_component(component)
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["capnpc"].system_libs = ["pthread"]
self.cpp_info.components["kj"].system_libs = ["pthread"]
self.cpp_info.components["kj-async"].system_libs = ["pthread"]
elif self.settings.os == "Windows":
self.cpp_info.components["kj-async"].system_libs = ["ws2_32"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH env var with: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "CapnProto"
self.cpp_info.names["cmake_find_package_multi"] = "CapnProto"
self.cpp_info.components["kj"].build_modules = [capnprotomacros]
def _register_component(self, component):
name = component["name"]
self.cpp_info.components[name].set_property("cmake_target_name", "CapnProto::{}".format(name))
self.cpp_info.components[name].builddirs.append(self._cmake_folder)
self.cpp_info.components[name].set_property("pkg_config_name", name)
self.cpp_info.components[name].libs = [name]
self.cpp_info.components[name].requires = component["requires"]
|
[
"conans.tools.get",
"textwrap.dedent",
"os.remove",
"conans.tools.replace_in_file",
"os.path.basename",
"conans.tools.Version",
"conans.tools.patch",
"conans.CMake",
"conans.tools.check_min_cppstd",
"conans.errors.ConanInvalidConfiguration",
"conans.tools.get_env",
"conans.AutoToolsBuildEnvironment",
"os.path.join",
"conans.tools.is_apple_os"
] |
[((3166, 3277), 'conans.tools.get', 'tools.get', ([], {'destination': 'self._source_subfolder', 'strip_root': '(True)'}), "(**self.conan_data['sources'][self.version], destination=self.\n _source_subfolder, strip_root=True)\n", (3175, 3277), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((3401, 3412), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (3406, 3412), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((4317, 4348), 'conans.AutoToolsBuildEnvironment', 'AutoToolsBuildEnvironment', (['self'], {}), '(self)\n', (4342, 4348), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((5729, 5770), 'os.path.join', 'os.path.join', (['"""lib"""', '"""cmake"""', '"""CapnProto"""'], {}), "('lib', 'cmake', 'CapnProto')\n", (5741, 5770), False, 'import os\n'), ((6709, 7479), 'textwrap.dedent', 'textwrap.dedent', (['""" if(CMAKE_CROSSCOMPILING)\n find_program(CAPNP_EXECUTABLE capnp PATHS ENV PATH NO_DEFAULT_PATH)\n find_program(CAPNPC_CXX_EXECUTABLE capnpc-c++ PATHS ENV PATH NO_DEFAULT_PATH)\n endif()\n if(NOT CAPNP_EXECUTABLE)\n set(CAPNP_EXECUTABLE "${CMAKE_CURRENT_LIST_DIR}/../../../bin/capnp${CMAKE_EXECUTABLE_SUFFIX}")\n endif()\n if(NOT CAPNPC_CXX_EXECUTABLE)\n set(CAPNPC_CXX_EXECUTABLE "${CMAKE_CURRENT_LIST_DIR}/../../../bin/capnpc-c++${CMAKE_EXECUTABLE_SUFFIX}")\n endif()\n set(CAPNP_INCLUDE_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}/../../../include")\n function(CAPNP_GENERATE_CPP SOURCES HEADERS)\n """'], {}), '(\n """ if(CMAKE_CROSSCOMPILING)\n find_program(CAPNP_EXECUTABLE capnp PATHS ENV PATH NO_DEFAULT_PATH)\n find_program(CAPNPC_CXX_EXECUTABLE capnpc-c++ PATHS ENV PATH NO_DEFAULT_PATH)\n endif()\n if(NOT CAPNP_EXECUTABLE)\n set(CAPNP_EXECUTABLE "${CMAKE_CURRENT_LIST_DIR}/../../../bin/capnp${CMAKE_EXECUTABLE_SUFFIX}")\n endif()\n if(NOT CAPNPC_CXX_EXECUTABLE)\n set(CAPNPC_CXX_EXECUTABLE "${CMAKE_CURRENT_LIST_DIR}/../../../bin/capnpc-c++${CMAKE_EXECUTABLE_SUFFIX}")\n endif()\n set(CAPNP_INCLUDE_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}/../../../include")\n function(CAPNP_GENERATE_CPP SOURCES HEADERS)\n """\n )\n', (6724, 7479), False, 'import textwrap\n'), ((7824, 7881), 'os.path.join', 'os.path.join', (['self._cmake_folder', '"""CapnProtoMacros.cmake"""'], {}), "(self._cmake_folder, 'CapnProtoMacros.cmake')\n", (7836, 7881), False, 'import os\n'), ((9463, 9503), 'os.path.join', 'os.path.join', (['self.package_folder', '"""bin"""'], {}), "(self.package_folder, 'bin')\n", (9475, 9503), False, 'import os\n'), ((1703, 1730), 'conans.tools.Version', 'tools.Version', (['self.version'], {}), '(self.version)\n', (1716, 1730), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((2163, 2195), 'conans.tools.check_min_cppstd', 'tools.check_min_cppstd', (['self', '(14)'], {}), '(self, 14)\n', (2185, 2195), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((2699, 2795), 'conans.errors.ConanInvalidConfiguration', 'ConanInvalidConfiguration', (['"""Cap\'n Proto doesn\'t support shared libraries for Visual Studio"""'], {}), '(\n "Cap\'n Proto doesn\'t support shared libraries for Visual Studio")\n', (2724, 2795), False, 'from conans.errors import ConanInvalidConfiguration\n'), ((2923, 3013), 'conans.errors.ConanInvalidConfiguration', 'ConanInvalidConfiguration', (['"""Cap\'n Proto doesn\'t support OpenSSL on Windows pre 0.8.0"""'], {}), '(\n "Cap\'n Proto doesn\'t support OpenSSL on Windows pre 0.8.0")\n', (2948, 3013), False, 'from conans.errors import ConanInvalidConfiguration\n'), ((4164, 4191), 'conans.tools.Version', 'tools.Version', (['self.version'], {}), '(self.version)\n', (4177, 4191), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((4687, 4707), 'conans.tools.patch', 'tools.patch', ([], {}), '(**patch)\n', (4698, 4707), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((6272, 6325), 'os.path.join', 'os.path.join', (['self.package_folder', '"""lib"""', '"""pkgconfig"""'], {}), "(self.package_folder, 'lib', 'pkgconfig')\n", (6284, 6325), False, 'import os\n'), ((6363, 6421), 'os.path.join', 'os.path.join', (['self.package_folder', 'self._cmake_folder', '"""*"""'], {}), "(self.package_folder, self._cmake_folder, '*')\n", (6375, 6421), False, 'import os\n'), ((7502, 7580), 'os.path.join', 'os.path.join', (['self.package_folder', 'self._cmake_folder', '"""CapnProtoMacros.cmake"""'], {}), "(self.package_folder, self._cmake_folder, 'CapnProtoMacros.cmake')\n", (7514, 7580), False, 'import os\n'), ((8757, 8784), 'conans.tools.Version', 'tools.Version', (['self.version'], {}), '(self.version)\n', (8770, 8784), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((2453, 2498), 'conans.tools.Version', 'tools.Version', (['self.settings.compiler.version'], {}), '(self.settings.compiler.version)\n', (2466, 2498), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((2536, 2635), 'conans.errors.ConanInvalidConfiguration', 'ConanInvalidConfiguration', (['"""Cap\'n Proto requires C++14, which your compiler does not support."""'], {}), '(\n "Cap\'n Proto requires C++14, which your compiler does not support.")\n', (2561, 2635), False, 'from conans.errors import ConanInvalidConfiguration\n'), ((2836, 2863), 'conans.tools.Version', 'tools.Version', (['self.version'], {}), '(self.version)\n', (2849, 2863), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((5048, 5138), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['"""configure"""', '"""-install_name \\\\$rpath/"""', '"""-install_name @rpath/"""'], {}), "('configure', '-install_name \\\\$rpath/',\n '-install_name @rpath/')\n", (5069, 5138), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((5227, 5262), 'conans.tools.is_apple_os', 'tools.is_apple_os', (['self.settings.os'], {}), '(self.settings.os)\n', (5244, 5262), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n'), ((6202, 6242), 'os.path.join', 'os.path.join', (['self.package_folder', '"""lib"""'], {}), "(self.package_folder, 'lib')\n", (6214, 6242), False, 'import os\n'), ((6439, 6467), 'os.path.basename', 'os.path.basename', (['cmake_file'], {}), '(cmake_file)\n', (6455, 6467), False, 'import os\n'), ((6512, 6533), 'os.remove', 'os.remove', (['cmake_file'], {}), '(cmake_file)\n', (6521, 6533), False, 'import os\n'), ((4863, 4906), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""c++"""'], {}), "(self._source_subfolder, 'c++')\n", (4875, 4906), False, 'import os\n'), ((6025, 6068), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""c++"""'], {}), "(self._source_subfolder, 'c++')\n", (6037, 6068), False, 'import os\n'), ((4951, 4978), 'conans.tools.get_env', 'tools.get_env', (['"""AUTORECONF"""'], {}), "('AUTORECONF')\n", (4964, 4978), False, 'from conans import ConanFile, CMake, tools, AutoToolsBuildEnvironment\n')]
|
import dataclasses
from enum import Enum
from random import randint, shuffle
from typing import List
class NotGoatDoorException(BaseException):
pass
@dataclasses.dataclass
class Statistics:
wins: int
loses: int
overall: int
win_ratio: float
lose_ratio: float
class MontyHallChoices(Enum):
GOAT_1 = 'GOAT 1'
GOAT_2 = 'GOAT 2'
AUTO = 'Audi RS6'
@classmethod
def choices(cls) -> List[str]:
return list(i.value for i in cls)
class MontyHallSimulator:
def __init__(self, iterations: int = 10000) -> None:
self.choices = MontyHallChoices.choices()
self.door_amount = 3
self.counter = 0
self.iterations = iterations
self.is_turned = False
self.random_choice = False
def startup(self) -> None:
for _ in range(self.iterations):
random_index = randint(1, self.door_amount)
choice = self.choices.pop(random_index-1)
random_index_between_two = randint(1, len(self.choices))
if self.choices[random_index_between_two-1] == MontyHallChoices.AUTO.value:
stayed = self.choices.pop(random_index_between_two - 1)
door_with_goat = self.choices[0]
else:
door_with_goat = self.choices.pop(random_index_between_two - 1)
stayed = self.choices[0]
if door_with_goat not in [MontyHallChoices.GOAT_1.value, MontyHallChoices.GOAT_2.value]:
raise NotGoatDoorException
if self.is_turned:
choice = stayed
random_list = [choice, stayed]
if self.random_choice:
idx = randint(0, 1)
choice = random_list[idx]
if choice == MontyHallChoices.AUTO.value:
self.counter += 1
self.__reset()
def get_statistics(self) -> Statistics:
loses = self.iterations - self.counter
win_ratio = self.counter / self.iterations
lose_ratio = loses / self.iterations
return Statistics(
overall=self.iterations,
wins=self.counter,
loses=loses,
win_ratio=win_ratio,
lose_ratio=lose_ratio,
)
def __reset(self) -> None:
self.choices = MontyHallChoices.choices()
shuffle(self.choices)
def reset_attributes(self) -> None:
self.counter = 0
self.__reset()
def switch_mode(self) -> None:
self.is_turned = not self.is_turned
def random_choice_mode(self) -> None:
self.random_choice = not self.random_choice
|
[
"random.shuffle",
"random.randint"
] |
[((2332, 2353), 'random.shuffle', 'shuffle', (['self.choices'], {}), '(self.choices)\n', (2339, 2353), False, 'from random import randint, shuffle\n'), ((872, 900), 'random.randint', 'randint', (['(1)', 'self.door_amount'], {}), '(1, self.door_amount)\n', (879, 900), False, 'from random import randint, shuffle\n'), ((1683, 1696), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1690, 1696), False, 'from random import randint, shuffle\n')]
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
import functools
from inspect import signature
def expected_type(*type_args, **type_kwargs):
def decorator(fn):
@functools.wraps(fn)
def checker(*args, **kwargs):
fn_signature = signature(fn)
input_value = fn_signature.bind(*args, **kwargs).arguments
require_type = fn_signature.bind_partial(*type_args, **type_kwargs).arguments
for name, value in input_value.items():
if name in require_type and not isinstance(value, require_type[name]):
raise TypeError('%s must be %s, but got %s' % (name, require_type[name], type(value)))
return fn(*args, **kwargs)
return checker
return decorator
|
[
"inspect.signature",
"functools.wraps"
] |
[((174, 193), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (189, 193), False, 'import functools\n'), ((259, 272), 'inspect.signature', 'signature', (['fn'], {}), '(fn)\n', (268, 272), False, 'from inspect import signature\n')]
|
"""Flask plugin. Includes a path helper that allows you to pass a view
function to `path`. Inspects URL rules and view docstrings.
Passing a view function::
from flask import Flask
app = Flask(__name__)
@app.route('/gists/<gist_id>')
def gist_detail(gist_id):
'''Gist detail view.
---
x-extension: metadata
get:
responses:
200:
schema:
$ref: '#/definitions/Gist'
'''
return 'detail for gist {}'.format(gist_id)
with app.test_request_context():
spec.path(view=gist_detail)
print(spec.to_dict()['paths'])
# {'/gists/{gist_id}': {'get': {'responses': {200: {'schema': {'$ref': '#/definitions/Gist'}}}},
# 'x-extension': 'metadata'}}
Passing a method view function::
from flask import Flask
from flask.views import MethodView
app = Flask(__name__)
class GistApi(MethodView):
'''Gist API.
---
x-extension: metadata
'''
def get(self):
'''Gist view
---
responses:
200:
schema:
$ref: '#/definitions/Gist'
'''
pass
def post(self):
pass
method_view = GistApi.as_view('gists')
app.add_url_rule("/gists", view_func=method_view)
with app.test_request_context():
spec.path(view=method_view)
# Alternatively, pass in an app object as a kwarg
# spec.path(view=method_view, app=app)
print(spec.to_dict()['paths'])
# {'/gists': {'get': {'responses': {200: {'schema': {'$ref': '#/definitions/Gist'}}}},
# 'post': {},
# 'x-extension': 'metadata'}}
"""
import re
from flask import current_app
from flask.views import MethodView
from apispec import BasePlugin, yaml_utils
from apispec.exceptions import APISpecError
# from flask-restplus
RE_URL = re.compile(r"<(?:[^:<>]+:)?([^<>]+)>")
class FlaskPlugin(BasePlugin):
"""APISpec plugin for Flask"""
@staticmethod
def flaskpath2openapi(path):
"""Convert a Flask URL rule to an OpenAPI-compliant path.
:param str path: Flask path template.
"""
return RE_URL.sub(r"{\1}", path)
@staticmethod
def _rule_for_view(view, app=None):
if app is None:
app = current_app
view_funcs = app.view_functions
endpoint = None
for ept, view_func in view_funcs.items():
if view_func == view:
endpoint = ept
if not endpoint:
raise APISpecError(f"Could not find endpoint for view {view}")
# WARNING: Assume 1 rule per view function for now
rule = app.url_map._rules_by_endpoint[endpoint][0]
return rule
def path_helper(self, operations, *, view, app=None, **kwargs):
"""Path helper that allows passing a Flask view function."""
rule = self._rule_for_view(view, app=app)
operations.update(yaml_utils.load_operations_from_docstring(view.__doc__))
if hasattr(view, "view_class") and issubclass(view.view_class, MethodView):
for method in view.methods:
if method in rule.methods:
method_name = method.lower()
method = getattr(view.view_class, method_name)
operations[method_name] = yaml_utils.load_yaml_from_docstring(
method.__doc__
)
return self.flaskpath2openapi(rule.rule)
|
[
"apispec.exceptions.APISpecError",
"apispec.yaml_utils.load_yaml_from_docstring",
"apispec.yaml_utils.load_operations_from_docstring",
"re.compile"
] |
[((1976, 2013), 're.compile', 're.compile', (['"""<(?:[^:<>]+:)?([^<>]+)>"""'], {}), "('<(?:[^:<>]+:)?([^<>]+)>')\n", (1986, 2013), False, 'import re\n'), ((2637, 2693), 'apispec.exceptions.APISpecError', 'APISpecError', (['f"""Could not find endpoint for view {view}"""'], {}), "(f'Could not find endpoint for view {view}')\n", (2649, 2693), False, 'from apispec.exceptions import APISpecError\n'), ((3047, 3102), 'apispec.yaml_utils.load_operations_from_docstring', 'yaml_utils.load_operations_from_docstring', (['view.__doc__'], {}), '(view.__doc__)\n', (3088, 3102), False, 'from apispec import BasePlugin, yaml_utils\n'), ((3433, 3484), 'apispec.yaml_utils.load_yaml_from_docstring', 'yaml_utils.load_yaml_from_docstring', (['method.__doc__'], {}), '(method.__doc__)\n', (3468, 3484), False, 'from apispec import BasePlugin, yaml_utils\n')]
|
# stdlib imports
import os
from collections import OrderedDict
from datetime import datetime, timedelta
import logging
# third party imports
import numpy as np
from obspy.core.trace import Stats
# local imports
from gmprocess.io.seedname import get_channel_name, get_units_type
from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS
from gmprocess.core.stationstream import StationStream
from gmprocess.io.utils import is_binary
DATE_FMT = "%Y/%m/%d-%H:%M:%S.%f"
GMT_OFFSET = 8 * 3600 # CWB data is in local time, GMT +8
HDR_ROWS = 22
COLWIDTH = 10
NCOLS = 4
def is_cwb(filename, config=None):
"""Check to see if file is a Taiwan Central Weather Bureau strong motion
file.
Args:
filename (str):
Path to possible CWB data file.
config (dict):
Dictionary containing configuration.
Returns:
bool: True if CWB, False otherwise.
"""
logging.debug("Checking if format is cwb.")
if is_binary(filename):
return False
try:
f = open(filename, "rt", encoding="utf-8")
line = f.readline()
f.close()
if line.startswith("#Earthquake Information"):
return True
except UnicodeDecodeError:
return False
return False
def read_cwb(filename, config=None, **kwargs):
"""Read Taiwan Central Weather Bureau strong motion file.
Args:
filename (str):
Path to possible CWB data file.
config (dict):
Dictionary containing configuration.
kwargs (ref):
Other arguments will be ignored.
Returns:
Stream: Obspy Stream containing three channels of acceleration
data (cm/s**2).
"""
logging.debug("Starting read_cwb.")
if not is_cwb(filename, config):
raise Exception(f"{filename} is not a valid CWB strong motion data file.")
f = open(filename, "rt", encoding="utf-8")
# according to the powers that defined the Network.Station.Channel.Location
# "standard", Location is a two character field. Most data providers,
# including CWB here, don't provide this. We'll flag it as "--".
data = np.genfromtxt(
filename, skip_header=HDR_ROWS, delimiter=[COLWIDTH] * NCOLS
) # time, Z, NS, EW
hdr = _get_header_info(f, data)
f.close()
head, tail = os.path.split(filename)
hdr["standard"]["source_file"] = tail or os.path.basename(head)
hdr_z = hdr.copy()
hdr_z["channel"] = get_channel_name(
hdr["sampling_rate"], is_acceleration=True, is_vertical=True, is_north=False
)
hdr_z["standard"]["horizontal_orientation"] = np.nan
hdr_z["standard"]["vertical_orientation"] = np.nan
hdr_z["standard"]["units_type"] = get_units_type(hdr_z["channel"])
hdr_h1 = hdr.copy()
hdr_h1["channel"] = get_channel_name(
hdr["sampling_rate"], is_acceleration=True, is_vertical=False, is_north=True
)
hdr_h1["standard"]["horizontal_orientation"] = np.nan
hdr_h1["standard"]["vertical_orientation"] = np.nan
hdr_h1["standard"]["units_type"] = get_units_type(hdr_h1["channel"])
hdr_h2 = hdr.copy()
hdr_h2["channel"] = get_channel_name(
hdr["sampling_rate"], is_acceleration=True, is_vertical=False, is_north=False
)
hdr_h2["standard"]["horizontal_orientation"] = np.nan
hdr_h2["standard"]["vertical_orientation"] = np.nan
hdr_h2["standard"]["units_type"] = get_units_type(hdr_h2["channel"])
stats_z = Stats(hdr_z)
stats_h1 = Stats(hdr_h1)
stats_h2 = Stats(hdr_h2)
response = {"input_units": "counts", "output_units": "cm/s^2"}
trace_z = StationTrace(data=data[:, 1], header=stats_z)
trace_z.setProvenance("remove_response", response)
trace_h1 = StationTrace(data=data[:, 2], header=stats_h1)
trace_h1.setProvenance("remove_response", response)
trace_h2 = StationTrace(data=data[:, 3], header=stats_h2)
trace_h2.setProvenance("remove_response", response)
stream = StationStream([trace_z, trace_h1, trace_h2])
return [stream]
def _get_header_info(file, data):
"""Return stats structure from various headers.
Output is a dictionary like this:
- network (str): Always TW
- station (str)
- channel (str)
- location (str): Default is '--'
- starttime (datetime)
- duration (float)
- sampling_rate (float)
- delta (float)
- npts (int)
- coordinates:
- latitude (float)
- longitude (float)
- elevation (float): Default is np.nan
- standard (Defaults are either np.nan or '')
- horizontal_orientation (float): Rotation from north (degrees)
- instrument_period (float): Period of sensor (Hz)
- instrument_damping (float): Fraction of critical
- process_time (datetime): Reported date of processing
- process_level: Either 'V0', 'V1', 'V2', or 'V3'
- station_name (str): Long form station description
- sensor_serial_number (str): Reported sensor serial
- instrument (str)
- comments (str): Processing comments
- structure_type (str)
- corner_frequency (float): Sensor corner frequency (Hz)
- units (str)
- source (str): Network source description
- source_format (str): Always cwb
- format_specific
- dc_offset_z (float)
- dc_offset_h1 (float)
- dc_offset_h2 (float)
Args:
file (TextIOWrapper): File object containing data
data (ndarray): Array of strong motion data
Returns:
dictionary: Dictionary of header/metadata information
"""
hdr = OrderedDict()
coordinates = {}
standard = {}
format_specific = {}
hdr["location"] = "--"
while True:
line = file.readline()
if line.startswith("#StationCode"):
hdr["station"] = line.split(":")[1].strip()
logging.debug(f"station: {hdr['station']}")
if line.startswith("#StationName"):
standard["station_name"] = line.split(":")[1].strip()
logging.debug(f"station_name: {standard['station_name']}")
if line.startswith("#StationLongitude"):
coordinates["longitude"] = float(line.split(":")[1].strip())
if line.startswith("#StationLatitude"):
coordinates["latitude"] = float(line.split(":")[1].strip())
if line.startswith("#StartTime"):
timestr = ":".join(line.split(":")[1:]).strip()
hdr["starttime"] = datetime.strptime(timestr, DATE_FMT)
if line.startswith("#RecordLength"):
hdr["duration"] = float(line.split(":")[1].strip())
if line.startswith("#SampleRate"):
hdr["sampling_rate"] = int(line.split(":")[1].strip())
if line.startswith("#InstrumentKind"):
standard["instrument"] = line.split(":")[1].strip()
if line.startswith("#AmplitudeMAX. U:"):
format_specific["dc_offset_z"] = float(line.split("~")[1])
if line.startswith("#AmplitudeMAX. N:"):
format_specific["dc_offset_h1"] = float(line.split("~")[1])
if line.startswith("#AmplitudeMAX. E:"):
format_specific["dc_offset_h2"] = float(line.split("~")[1])
if line.startswith("#Data"):
break
# correct start time to GMT
hdr["starttime"] = hdr["starttime"] - timedelta(seconds=GMT_OFFSET)
nrows, _ = data.shape
# Add some optional information to the header
hdr["network"] = "TW"
hdr["delta"] = 1 / hdr["sampling_rate"]
hdr["calib"] = 1.0
standard["units_type"] = "acc"
standard["units"] = "cm/s^2"
hdr["source"] = "Taiwan Central Weather Bureau"
hdr["npts"] = nrows
secs = int(data[-1, 0])
microsecs = int((data[-1, 0] - secs) * 1e6)
hdr["endtime"] = hdr["starttime"] + timedelta(seconds=secs, microseconds=microsecs)
# Set defaults
logging.warning("Setting elevation to 0.0")
coordinates["elevation"] = 0.0
if "longitude" not in coordinates:
coordinates["longitude"] = np.nan
if "latitude" not in coordinates:
coordinates["latitude"] = np.nan
standard["instrument_period"] = np.nan
standard["instrument_damping"] = np.nan
standard["process_time"] = ""
standard["process_level"] = PROCESS_LEVELS["V1"]
standard["sensor_serial_number"] = ""
standard["comments"] = ""
standard["structure_type"] = ""
standard["corner_frequency"] = np.nan
standard["source"] = (
"Taiwan Strong Motion Instrumentation Program " + "via Central Weather Bureau"
)
standard["source_format"] = "cwb"
# these fields can be used for instrument correction
# when data is in counts
standard["instrument_sensitivity"] = np.nan
standard["volts_to_counts"] = np.nan
if "station_name" not in standard:
standard["station_name"] = ""
if "instrument" not in standard:
standard["instrument"] = ""
if "dc_offset_z" not in format_specific:
format_specific["dc_offset_z"] = np.nan
if "dc_offset_h2" not in format_specific:
format_specific["dc_offset_h2"] = np.nan
if "dc_offset_h1" not in format_specific:
format_specific["dc_offset_h1"] = np.nan
# Set dictionary
hdr["standard"] = standard
hdr["coordinates"] = coordinates
hdr["format_specific"] = format_specific
return hdr
|
[
"logging.debug",
"gmprocess.core.stationtrace.StationTrace",
"obspy.core.trace.Stats",
"os.path.basename",
"logging.warning",
"gmprocess.core.stationstream.StationStream",
"gmprocess.io.seedname.get_channel_name",
"numpy.genfromtxt",
"datetime.datetime.strptime",
"gmprocess.io.utils.is_binary",
"datetime.timedelta",
"collections.OrderedDict",
"gmprocess.io.seedname.get_units_type",
"os.path.split"
] |
[((924, 967), 'logging.debug', 'logging.debug', (['"""Checking if format is cwb."""'], {}), "('Checking if format is cwb.')\n", (937, 967), False, 'import logging\n'), ((975, 994), 'gmprocess.io.utils.is_binary', 'is_binary', (['filename'], {}), '(filename)\n', (984, 994), False, 'from gmprocess.io.utils import is_binary\n'), ((1721, 1756), 'logging.debug', 'logging.debug', (['"""Starting read_cwb."""'], {}), "('Starting read_cwb.')\n", (1734, 1756), False, 'import logging\n'), ((2160, 2235), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'skip_header': 'HDR_ROWS', 'delimiter': '([COLWIDTH] * NCOLS)'}), '(filename, skip_header=HDR_ROWS, delimiter=[COLWIDTH] * NCOLS)\n', (2173, 2235), True, 'import numpy as np\n'), ((2338, 2361), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (2351, 2361), False, 'import os\n'), ((2477, 2576), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["hdr['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(True)', 'is_north': '(False)'}), "(hdr['sampling_rate'], is_acceleration=True, is_vertical=\n True, is_north=False)\n", (2493, 2576), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((2736, 2768), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["hdr_z['channel']"], {}), "(hdr_z['channel'])\n", (2750, 2768), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((2818, 2917), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["hdr['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(False)', 'is_north': '(True)'}), "(hdr['sampling_rate'], is_acceleration=True, is_vertical=\n False, is_north=True)\n", (2834, 2917), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3080, 3113), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["hdr_h1['channel']"], {}), "(hdr_h1['channel'])\n", (3094, 3113), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3163, 3263), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["hdr['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(False)', 'is_north': '(False)'}), "(hdr['sampling_rate'], is_acceleration=True, is_vertical=\n False, is_north=False)\n", (3179, 3263), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3426, 3459), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["hdr_h2['channel']"], {}), "(hdr_h2['channel'])\n", (3440, 3459), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((3475, 3487), 'obspy.core.trace.Stats', 'Stats', (['hdr_z'], {}), '(hdr_z)\n', (3480, 3487), False, 'from obspy.core.trace import Stats\n'), ((3503, 3516), 'obspy.core.trace.Stats', 'Stats', (['hdr_h1'], {}), '(hdr_h1)\n', (3508, 3516), False, 'from obspy.core.trace import Stats\n'), ((3532, 3545), 'obspy.core.trace.Stats', 'Stats', (['hdr_h2'], {}), '(hdr_h2)\n', (3537, 3545), False, 'from obspy.core.trace import Stats\n'), ((3628, 3673), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', ([], {'data': 'data[:, 1]', 'header': 'stats_z'}), '(data=data[:, 1], header=stats_z)\n', (3640, 3673), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((3745, 3791), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', ([], {'data': 'data[:, 2]', 'header': 'stats_h1'}), '(data=data[:, 2], header=stats_h1)\n', (3757, 3791), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((3864, 3910), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', ([], {'data': 'data[:, 3]', 'header': 'stats_h2'}), '(data=data[:, 3], header=stats_h2)\n', (3876, 3910), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((3981, 4025), 'gmprocess.core.stationstream.StationStream', 'StationStream', (['[trace_z, trace_h1, trace_h2]'], {}), '([trace_z, trace_h1, trace_h2])\n', (3994, 4025), False, 'from gmprocess.core.stationstream import StationStream\n'), ((5592, 5605), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5603, 5605), False, 'from collections import OrderedDict\n'), ((7846, 7889), 'logging.warning', 'logging.warning', (['"""Setting elevation to 0.0"""'], {}), "('Setting elevation to 0.0')\n", (7861, 7889), False, 'import logging\n'), ((2407, 2429), 'os.path.basename', 'os.path.basename', (['head'], {}), '(head)\n', (2423, 2429), False, 'import os\n'), ((7315, 7344), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'GMT_OFFSET'}), '(seconds=GMT_OFFSET)\n', (7324, 7344), False, 'from datetime import datetime, timedelta\n'), ((7774, 7821), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'secs', 'microseconds': 'microsecs'}), '(seconds=secs, microseconds=microsecs)\n', (7783, 7821), False, 'from datetime import datetime, timedelta\n'), ((5856, 5899), 'logging.debug', 'logging.debug', (['f"""station: {hdr[\'station\']}"""'], {}), '(f"station: {hdr[\'station\']}")\n', (5869, 5899), False, 'import logging\n'), ((6022, 6080), 'logging.debug', 'logging.debug', (['f"""station_name: {standard[\'station_name\']}"""'], {}), '(f"station_name: {standard[\'station_name\']}")\n', (6035, 6080), False, 'import logging\n'), ((6456, 6492), 'datetime.datetime.strptime', 'datetime.strptime', (['timestr', 'DATE_FMT'], {}), '(timestr, DATE_FMT)\n', (6473, 6492), False, 'from datetime import datetime, timedelta\n')]
|
#!/usr/bin/env python
import sys
import inspect
import re
import optparse
import vtk
from pydoc import classname
'''
This is a translation from the TCL code of the same name.
'''
vtk.vtkObject.GlobalWarningDisplayOff()
def RedirectVTKMessages():
'''
Can be used to redirect VTK related error messages to a
file.
'''
log = vtk.vtkFileOutputWindow()
log.SetFlush(1)
log.AppendOff()
log.SetFileName('TestSetGet-err.log')
log.SetInstance(log)
commonExceptions = set([
"vtkDistributedDataFilter", # core dump
"vtkDataEncoder", # Use after free error.
"vtkWebApplication", # Thread issues - calls vtkDataEncoder
"vtkView", # vtkView.SetRepresentation(None) fails
"vtkGenericAttributeCollection", # Assert error
"vtkOverlappingAMR",
# These give an HDF5 no file name error.
"vtkAMRFlashParticlesReader",
"vtkAMREnzoParticlesReader",
"vtkAMRFlashReader",
# core dump in release mode, issue is vtkChartMatrix.GetChart() & vtkScatterPlotMatrix.SetActivePlot()
"vtkChartMatrix",
"vtkScatterPlotMatrix"
])
classLinuxExceptions = set([
"vtkAMREnzoReader" # core dump
])
# In the case of Windows, these classes cause a crash.
classWindowsExceptions = set([
"vtkWin32VideoSource", # Update() works the first time but a subsequent run calls up the webcam which crashes on close.
"vtkCMLMoleculeReader",
"vtkCPExodusIIInSituReader",
"vtkMINCImageWriter",
"vtkQtInitialization"
])
classExceptions = set()
# This will hold the classes to be tested.
vtkClasses = set()
classNames = None
classesTested = set()
# Keep a record of the classes tested.
nonexistentClasses = set()
abstractClasses = set()
noConcreteImplementation = set()
noObserver = set()
setGetWorked = set()
setGetFailed = set()
#------------------------
# These variables are used for further record keeping
# should users wish to investigate or debug further.
noGetSetPairs = set()
# These will be a list of the get/set functions keyed on class name.
getParameterFail = dict()
setParameterFail = dict()
#------------------------
# Controls the verbosity of the output.
verbose = False
class ErrorObserver:
'''
See: http://public.kitware.com/pipermail/vtkusers/2012-June/074703.html
'''
def __init__(self):
self.__ErrorOccurred = False
self.__ErrorMessage = None
self.CallDataType = 'string0'
def __call__(self, obj, event, message):
self.__ErrorOccurred = True
self.__ErrorMessage = message
def ErrorOccurred(self):
occ = self.__ErrorOccurred
self.__ErrorOccurred = False
return occ
def ErrorMessage(self):
return self.__ErrorMessage
def GetVTKClasses():
'''
:return: a set of all the VTK classes.
'''
# This pattern will get the name and type of the member in the vtk classes.
pattern = r'\<vtkclass (.*)\.(.*)\>'
regEx = re.compile(pattern)
vtkClasses = inspect.getmembers(
vtk, inspect.isclass and not inspect.isabstract)
res = set()
for name, obj in vtkClasses:
result = re.match(regEx, repr(obj))
if result:
res.add(result.group(2))
return res
def GetVTKClassGroups(vtkClasses, subStr):
'''
:param: vtkClasses - the set of VTK classes
:param: subStr - the substring for the VTK class to match e.g Readers
:return: a set of all the VTK classes that are e.g. Readers.
'''
res = set()
for obj in list(vtkClasses):
if obj.find(subStr) > -1:
res.add(obj)
return res
def FilterClasses(allClasses, filter):
'''
:param: vtkCLasses - the set of VTK classes
:param: filters - a list of substrings of classes to be removed
:return: a set of all the VTK classes that do not have the substrings
in their names.
'''
res = allClasses
for f in filter:
c = GetVTKClassGroups(allClasses, f)
res = res - c
return res
def TestOne(cname):
'''
Test the named class looking for complementary Set/Get pairs to test.
The test is essentially this: Set(Get()).
Some classes will generate a TypeError or an AttributeError,
in this case, the name of the class is stored in the global variable
abstractClasses or noConcreteImplementation with False being returned.
If Set... or Get... fails a record of this is kept in the global classes:
setParameterFail and getParameterFail
Return values:
0: If not any of the Set(Get()) tests work.
1: If at least one of Set(Get()) tests works.
2: No observer could be added.
3: If it is an abstract class.
4: No concrete implementation.
5: Class does not exist.
:param: cname - the name of the class to be tested.
:return: One of the above return values.
'''
try:
b = getattr(vtk, cname)()
e = ErrorObserver()
try:
b.AddObserver('ErrorEvent', e)
except AttributeError:
noObserver.add(cname)
return 2
except:
raise
getPattern = r'(^Set(.*)$)'
getRegEx = re.compile(getPattern)
setPattern = r'(^Get(.*)$)'
setRegEx = re.compile(setPattern)
methods = [method for method in dir(getattr(vtk, cname))
if callable(getattr(getattr(vtk, cname), method))]
# Partition the Set/Get functions into separate sets.
gets = set()
sets = set()
for m in methods:
result = re.match(getRegEx, m)
if result:
gets.add(result.group(2))
result = re.match(setRegEx, m)
if result:
sets.add(result.group(2))
ok = False
# These are our Set... Get... functions.
matchingGetSet = set.intersection(gets, sets)
setGetStatus = False
if matchingGetSet:
for m in set.intersection(gets, sets):
# Get...() with no parameters.
try:
x = getattr(b, 'Get' + m)()
try:
if type(x) is tuple:
x = list(x)
getattr(b, 'Set' + m)(*x)
else:
getattr(b, 'Set' + m)(x)
setGetStatus |= True
except TypeError:
# The parameter passed is of the wrong type
# or wrong number of parameters.
try:
value = setParameterFail[cname]
value.add('Set' + m)
setParameterFail[cname] = value
except KeyError:
# Key is not present
setParameterFail[cname] = set(['Set' + m])
except Exception as err:
print(cname + 'Set' + m + ' ' + str(err))
except TypeError:
# Get...() takes 1 or more arguments.
try:
value = getParameterFail[cname]
value.add('Get' + m)
getParameterFail[cname] = value
except KeyError:
# Key is not present
getParameterFail[cname] = set(['Get' + m])
except Exception as err:
print(cname + 'Get' + m + ' ' + str(err))
ok = setGetStatus
else:
noGetSetPairs.add(cname)
b = None
if ok:
setGetWorked.add(cname)
return 1
setGetFailed.add(cname)
return 0
except TypeError:
# Trapping abstract classes.
abstractClasses.add(cname)
return 3
except NotImplementedError:
# No concrete implementation
noConcreteImplementation.add(cname)
return 4
except AttributeError:
# Class does not exist
nonexistentClasses.add(cname)
return 5
except:
raise
def TestSetGet(batch, batchNo=0, batchSize=0):
'''
Test Set/Get pairs in each batch.
If at least one of the Set(Get()) tests pass, the name of the
class is added to the global variable setGetWorked otherwise
to the global variable setGetFailed.
:param: batch - the set of classes to be tested.
:param: batchNo - if the set of classes is a subgroup then this
is the index of the subgroup.
:param: batchSize - if the set of classes is a subgroup then this
is the size of the subgroup.
'''
baseIdx = batchNo * batchSize
idx = baseIdx
for a in batch:
batchIdx = idx - baseIdx
# res = " Testing -- {:4d} - {:s}".format(idx,a)
# There is no format method in Python 2.5
res = " Testing -- %4d - %s" % (idx, a)
if (batchIdx < len(batch) - 1):
# nextRes = " Next -- {:4d} - {:s}".format(idx + 1,list(batch)[batchIdx +1])
nextRes = " Next -- %4d - %s" % (idx + 1, list(batch)[batchIdx + 1])
else:
nextRes = "No next"
# if verbose:
# print(res, nextRes)
classesTested.add(a)
ok = TestOne(a)
if ok == 0:
if verbose:
print(res + ' - Fail')
elif ok == 1:
if verbose:
print(res + ' - Ok')
elif ok == 2:
if verbose:
print(res + ' - no observer could be added.')
elif ok == 3:
if verbose:
print(res + ' - is Abstract')
elif ok == 4:
if verbose:
print(res + ' - No concrete implementation')
elif ok == 5:
if verbose:
print(res + ' - Does not exist')
else:
if verbose:
print(res + ' - Unknown status')
idx += 1
# print(nextRes)
def BatchTest(vtkClasses, batchNo, batchSize):
'''
Batch the classes into groups of batchSize.
:param: batchNo - if the set of classes is a subgroup then this
is the index of the subgroup.
:param: batchSize - if the set of classes is a subgroup then this
is the size of the subgroup.
'''
idx = 0
total = 0;
batch = set()
for a in vtkClasses:
currentBatchNo = idx // batchSize;
if currentBatchNo == batchNo:
batch.add(a)
total += 1
if total == batchSize:
TestSetGet(batch, batchNo, batchSize)
batch = set()
total = 0
idx += 1
if batch:
TestSetGet(batch, batchNo, batchSize)
def PrintResultSummary():
print('-' * 40)
print('Set(Get()) worked: %i' % len(setGetWorked))
print('Set(Get()) failed: %i' % len(setGetFailed))
print('Abstract classes: %i' % len(abstractClasses))
print('Non-existent classes: %i' % len(nonexistentClasses))
print('No concrete implementation: %i' % len(noConcreteImplementation))
print('No observer could be added: %i' % len(noObserver))
print('-' * 40)
print('Total number of classes tested: %i' % len(classesTested))
print('-' * 40)
print('Excluded from testing: %i' % len(classExceptions))
print('-' * 40)
def ProgramOptions():
desc = """
%prog Tests each VTK class for Set(Get()) where Get() has no parameters.
"""
parser = optparse.OptionParser(description=desc)
parser.set_defaults(verbose=False)
parser.add_option('-c', '--classnames',
help='The name of the class or a list of classes in quotes separated by commas.',
type='string',
dest='classnames',
default=None,
action='store')
parser.add_option('-q', '--quiet',
help='Do not print status messages to stdout (default)',
dest='verbose',
action="store_false")
parser.add_option('-v', '--verbose',
help='Print status messages to stdout',
dest='verbose',
action="store_true")
(opts, args) = parser.parse_args()
return (True, opts)
def CheckPythonVersion(ver):
'''
Check the Python version.
:param: ver - the minimum required version number as hexadecimal.
:return: True if if the Python version is greater than or equal to ver.
'''
if sys.hexversion < ver:
return False
return True
def main(argv=None):
if not CheckPythonVersion(0x02060000):
print('This program requires Python 2.6 or greater.')
return
global classExceptions
global vtkClasses
global classNames
global verbose
if argv is None:
argv = sys.argv
(res, opts) = ProgramOptions()
if opts.classnames:
cn = [x.strip() for x in opts.classnames.split(',')]
classNames = set(cn)
if opts.verbose:
verbose = opts.verbose
print('CTEST_FULL_OUTPUT (Avoid ctest truncation of output)')
# RedirectVTKMessages()
if classNames:
TestSetGet(classNames)
else:
classExceptions = commonExceptions.union(classLinuxExceptions)
classExceptions = classExceptions.union(classWindowsExceptions)
vtkClasses = GetVTKClasses()
# filter = ['Reader', 'Writer', 'Array_I', 'Qt']
# vtkClasses = FilterClasses(vtkClasses, filter)
vtkClasses = vtkClasses - classExceptions
TestSetGet(vtkClasses)
# In Windows
# 0-10, 10-17, 17-18, 18-23 in steps of 100 work but not if called
# in a loop.
# intervals = [[0,10]] # [[0,10]], [10,17], [17,18], [18,20]]
# for j in intervals:
# for i in range(j[0], j[1]):
# BatchTest(vtkClasses, i, 100)
# print(vtkClasses)
PrintResultSummary()
if __name__ == '__main__':
sys.exit(main())
|
[
"optparse.OptionParser",
"re.compile",
"re.match",
"vtk.vtkFileOutputWindow",
"vtk.vtkObject.GlobalWarningDisplayOff",
"inspect.getmembers"
] |
[((185, 224), 'vtk.vtkObject.GlobalWarningDisplayOff', 'vtk.vtkObject.GlobalWarningDisplayOff', ([], {}), '()\n', (222, 224), False, 'import vtk\n'), ((349, 374), 'vtk.vtkFileOutputWindow', 'vtk.vtkFileOutputWindow', ([], {}), '()\n', (372, 374), False, 'import vtk\n'), ((2847, 2866), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (2857, 2866), False, 'import re\n'), ((2884, 2951), 'inspect.getmembers', 'inspect.getmembers', (['vtk', '(inspect.isclass and not inspect.isabstract)'], {}), '(vtk, inspect.isclass and not inspect.isabstract)\n', (2902, 2951), False, 'import inspect\n'), ((11454, 11493), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'description': 'desc'}), '(description=desc)\n', (11475, 11493), False, 'import optparse\n'), ((5068, 5090), 're.compile', 're.compile', (['getPattern'], {}), '(getPattern)\n', (5078, 5090), False, 'import re\n'), ((5146, 5168), 're.compile', 're.compile', (['setPattern'], {}), '(setPattern)\n', (5156, 5168), False, 'import re\n'), ((5457, 5478), 're.match', 're.match', (['getRegEx', 'm'], {}), '(getRegEx, m)\n', (5465, 5478), False, 'import re\n'), ((5565, 5586), 're.match', 're.match', (['setRegEx', 'm'], {}), '(setRegEx, m)\n', (5573, 5586), False, 'import re\n')]
|
from web_scraping_utils import *
from postgresql_login import postgresql_login
from sqlalchemy import create_engine
year = '2021'
month ='Abril'
table_name = 'usd_values'
def load_data_into_postgresql_table(df):
# Prepare login info
postgresql_str = 'postgresql://{user}:{password}@{host}:{port}/{dbname}'\
.format(**postgresql_login)
# Establish connection to postgresql database
engine = create_engine(postgresql_str)
conn = engine.connect()
# Load dataframe to postgresql table
df.to_sql(table_name, conn, if_exists='append', index=False)
print('Data loaded to {} table in {} database...'.format(table_name, postgresql_login['dbname']))
conn.close()
if __name__ == "__main__":
if table_name == "usd_values":
data = extract_usd_values_from_sii(year, month)
elif table_name == "uf_values":
data = extract_uf_values_from_sii(year, month)
load_data_into_postgresql_table(data)
|
[
"sqlalchemy.create_engine"
] |
[((414, 443), 'sqlalchemy.create_engine', 'create_engine', (['postgresql_str'], {}), '(postgresql_str)\n', (427, 443), False, 'from sqlalchemy import create_engine\n')]
|
import numpy as np
import gym
from gym import spaces
import math
MAX_MARCH = 20
EPSILON = 0.1
DEG_TO_RAD = 0.0174533
WINDOW_SIZE = [300, 300]
#
# Objects
#
def generate_box(pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = np.random.uniform([size[0], size[0]], [size[1], size[1]])
if pos is None:
if inside_window:
pos = np.random.uniform([box_size[0], box_size[1]],
[WINDOW_SIZE[0] - box_size[0], WINDOW_SIZE[1] - box_size[1]])
else:
pos = np.random.uniform(WINDOW_SIZE)
if inside_window:
return Box(pos, box_size, color=color, is_goal=is_goal)
else:
return Box(pos, box_size, color=color, is_goal=is_goal)
def generate_circle(pos=None, radius=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
circ_rad = np.random.uniform(radius[0], radius[1])
if pos is None:
if inside_window:
pos = np.random.uniform([circ_rad, circ_rad], [WINDOW_SIZE[0]-circ_rad, WINDOW_SIZE[1]-circ_rad])
else:
pos = np.random.uniform(WINDOW_SIZE)
if inside_window:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
else:
return Circle(pos, circ_rad, color=color, is_goal=is_goal)
def dist(v):
'''calculate length of vector'''
return np.linalg.norm(v)
class Circle():
def __init__(self, center, radius, color=(255, 255, 255), is_goal=False, is_visible=True,
is_obstacle=True):
self.center = center
self.radius = radius
self.color = color
self.is_goal = is_goal
self.is_visible = is_visible
self.is_obstacle = is_obstacle
self.objects_type = 'circle'
def sdf(self, p):
return dist(self.center - p) - self.radius
def draw(self):
pygame.draw.circle(display, self.color, self.center, self.radius)
class Box():
def __init__(self, center, size, color=(255, 255, 255), is_goal=False, is_visible=True,
is_obstacle=True):
self.center = center
self.size = size #this is a size 2 array for length and height
self.color = color
self.rect = pygame.Rect(center-size, size*2)
self.is_goal = is_goal
self.is_visible = is_visible
self.is_obstacle = is_obstacle
self.objects_type = 'box'
def sdf(self, p):
offset = np.abs(p-self.center) - self.size
unsigned_dist = dist(np.clip(offset, 0, np.inf))
dist_inside_box = np.max(np.clip(offset, -np.inf, 0))
return unsigned_dist + dist_inside_box
def draw(self):
pygame.draw.rect(display, self.color, self.rect)
#
# Character Class
#
class Ray():
def __init__(self, start, angle, color='white', render_march=False):
'''
Ray for ray marching
if render_march is True, then we render the sdf circles used to calculate march
'''
self.start = start
self.angle = angle
self.color = color
self.render_march = render_march
self.touched_obj = None
self.obj_dist = np.inf
self.sdf = None
def update(self, start=None, angle=None):
'''
update position and angle, perform march, determine object and distance
'''
if start is not None:
self.start = start
if angle is not None:
self.angle = angle
self.march()
def march(self):
'''
perform ray march, find collision with object
'''
depth = 0
p = self.start
for i in range(MAX_MARCH):
dist, obj = self.sdf(p)
depth += dist
if self.render_march:
pygame.draw.circle(display, (255, 255, 255, 0.3), p, dist, width=1)
if dist < EPSILON:
self.touched_obj = obj
self.obj_dist = depth
return depth, obj
else:
p = p + np.array([np.cos(self.angle), np.sin(self.angle)]) * dist
self.touched_obj = obj
self.obj_dist = depth
return depth, obj
def draw(self):
end = self.start + np.array([np.cos(self.angle), np.sin(self.angle)]) * self.obj_dist
pygame.draw.line(display, self.color, self.start, end)
class Character:
def __init__(self, pos=[WINDOW_SIZE[0]/2, WINDOW_SIZE[1]/2], angle=0, color='yellow', size=5,
fov=120*DEG_TO_RAD, num_rays=30, render_rays=True, max_depth=424):
'''
Generate a character that can move through the window
pos: starting position
angle: starting angle (radians) angle always takes on values from -pi to pi
color: color
size: size
fov: range of angles character can see using rays
num_rays: fidelity of depth perception
draw_rays: whether or not to draw the characters rays
'''
self.pos = pos
self.angle = (angle + np.pi) % (2*np.pi) - np.pi
self.color = color
self.size = size
self.fov = fov
self.ray_splits = fov / num_rays
self.render_rays = render_rays
self.num_rays = num_rays
self.max_depth = max_depth
self.obstacle_sdf = None
self.visible_sdf = None
self.rays = []
fov_start = self.angle - self.fov/2
for i in range(num_rays):
self.rays.append(Ray(self.pos, fov_start + i*self.ray_splits))
# print(len(self.rays))
# print(self.num_rays)
def update_sdf_funcs(self, obstacle_sdf, visible_sdf):
'''
Update the current held sdf functions which allow the character
to calculate distance to objects and for rays
'''
self.obstacle_sdf = obstacle_sdf
self.visible_sdf = visible_sdf
fov_start = self.angle - self.fov/2
for i in range(self.num_rays):
self.rays[i].sdf = visible_sdf
self.rays[i].update(start=self.pos, angle=fov_start + i*self.ray_splits)
def update_rays(self):
'''
update the angle of the rays using own position and angle
'''
fov_start = self.angle - self.fov/2
for i in range(self.num_rays):
self.rays[i].update(start=self.pos, angle=fov_start + i*self.ray_splits)
def draw_rays(self):
'''
draw the rays coming from character
'''
for ray in self.rays:
ray.draw()
def draw(self):
'''
draw the character
'''
point1 = [self.pos[0] - (math.cos(self.angle+0.3))*self.size,
self.pos[1] - (math.sin(self.angle+0.3))*self.size]
point2 = [self.pos[0] - math.cos(self.angle)*self.size*.8, self.pos[1] - math.sin(self.angle)*self.size*.8]
point3 = [self.pos[0] - (math.cos(self.angle-0.3))*self.size,
self.pos[1] - (math.sin(self.angle-0.3))*self.size]
pygame.draw.polygon(
display,
self.color,
[self.pos, point1, point2, point3, self.pos]
)
if self.render_rays:
self.draw_rays()
def move(self, speed=0.5):
'''
move in the faced direction with number of pixels of speed
collision detection uses the same ray marching algorithm
after moving, update the rays
'''
collide_with_object = self.march_collision_detection(speed)
if collide_with_object is False:
self.pos[0] += math.cos(self.angle) * speed
self.pos[1] += math.sin(self.angle) * speed
else:
#collided with object, move with the given depth
dist_to_obj = collide_with_object[0]
self.pos[0] += math.cos(self.angle) * dist_to_obj
self.pos[1] += math.sin(self.angle) * dist_to_obj
self.update_rays()
return collide_with_object
def march_collision_detection(self, max_dist):
'''
perform ray march, used for collision detection. The max_dist is the speed we are
moving at. If the max_dist exceeds the sdf (i.e., we are colliding with an object),
then return the distance to the collided object
If sdf exceeds max_dist, then we have not collided on our path, so return False
(i.e., no object hit)
returns:
False - if no object collided with
dist, obj - if colliding with an object, return the distance that we are allowed to
travel and the object
'''
depth = 0
p = self.pos
for i in range(MAX_MARCH):
dist, obj = self.obstacle_sdf(p)
if dist < EPSILON:
#we have collided before passing the requisite distance
return depth-2*EPSILON, obj
if depth + dist > max_dist:
#we have enough room to move on the desired path
return False
else:
#we continue the march
depth += dist
p = p + np.array([np.cos(self.angle), np.sin(self.angle)]) * dist
return depth, obj
def rotate(self, angle=0.05):
self.angle += angle
self.angle = (self.angle + np.pi) % (2*np.pi) - np.pi
self.update_rays()
def ray_obs(self):
'''
Get all rays and their distances to objects
normalize_depth: divide depth readings by value
'''
ray_colors = []
ray_depths = []
for ray in self.rays:
# ray_colors.append(colors_dict[ray.touched_obj.color])
ray_colors.append(ray.touched_obj.color)
ray_depths.append(ray.obj_dist)
# if normalize_depth:
# ray_depths = np.array(ray_depths) / normalize_depth
# else:
# ray_depths = np.array(ray_depths)
ray_colors = np.array(ray_colors)
# background_colors = np.full(ray_colors.shape, 0)
ray_depths = np.clip(ray_depths, 0, self.max_depth) / self.max_depth
visual = (1 - ray_depths.reshape(-1, 1)) * ray_colors / 255
# return ray_depths, ray_colors
return visual
def randomize_location_and_angle(character, goal=None, world_size=[300, 300], sdf_func=None, sep=True):
'''
create a random location and start direction for the character
noting that we do not allow spawning into objects
sep: if set to True, we will make sure character has a minimum distance away
from the goal that is at least half the max distance possible from goal
to end of window
'''
#max distance from goal to end of window
max_goal_sep = dist(np.max([np.array(WINDOW_SIZE) - goal.center, goal.center], axis=0))
searching = True
while searching:
pos = np.random.uniform(WINDOW_SIZE)
goal_sep = dist(goal.center - pos)
if sdf_func(pos)[0] > 0 and (not sep or goal_sep > max_goal_sep / 2):
#position is okay
searching = False
character.pos = pos
character.angle = np.random.uniform(6.28)
# character.pos = np.array([100, 100])
# character.angle = 0
character.update_rays()
#
# Nav Environments
#
class GeneralNav(gym.Env):
metadata = {"render.modes": ['rgb_array', 'human'], 'video.frames_per_second': 24}
def __init__(self, num_rays=30, max_steps=200, num_objects=5,
rew_structure='dist', give_heading=0, verbose=0, flat=True,
world_gen_func=None, world_gen_params={}, world_size=[300, 300], skeleton=True):
'''
General Nav environment which can be used to test some general pygame things and see
that all of the object and distance detection things are working
When inheriting, should make sure to change the functions
step(), reset(), get_observation(), generate_world()
rew_structure: 'dist' - reward given based on distance to goal
'goal' - reward only given when goal reached
give_heading: whether to additionally give a distance and direction to goal
flat: whether to give observations in a flattened state
world_gen_func: a function can be passed to manually create a world
using some other rules. Note that it needs to generate objects, a goal, and
set the agent position and heading
The character will be passed as the argument
'''
super(GeneralNav, self).__init__()
if 'pygame' not in globals():
global pygame
import pygame
if not skeleton:
print('generating general')
self.total_rewards = 0
self.give_heading = give_heading
self.flat = flat
if give_heading:
self.observation_space = spaces.Box(low=0, high=1, shape=((num_rays + 1)*3,))
else:
# self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*2,), dtype=np.float)
self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*3,))
self.action_space = spaces.Discrete(4) #turn left, forward, right as actions
self.max_steps = max_steps
self.current_steps = 0
self.character = Character(max_depth=dist(world_size))
self.num_objects = num_objects
self.num_rays = num_rays
self.rew_structure = rew_structure
self.verbose = verbose
self.objects = []
self.world_gen_func = world_gen_func
self.world_gen_params = world_gen_params
self.world_size = world_size
if self.world_gen_func is None:
self.generate_world()
# randomize_location_and_angle(self.character)
else:
self.world_gen_func(self.character, **self.world_gen_params)
def step(self, action):
reward = -1
collide_with_object = False
done = False
info = {}
if action == 0:
self.character.rotate(-0.1)
if action == 1:
collide_with_object = self.character.move(10)
if action == 2:
self.character.rotate(0.1)
if action == 3:
pass
if self.rew_structure == 'dist':
goal = objects[-1]
dist_to_goal = np.clip(dist(goal.center - self.character.pos), 0, 1000) / 1000
reward = float(-dist_to_goal)
if collide_with_object is not False:
obj = collide_with_object[1]
if obj.is_goal:
if self.verbose:
print('goal reached!')
reward = float(100)
done = True
else:
# reward = -10
reward = float(-1)
observation = self.get_observation()
if self.current_steps > self.max_steps:
done = True
self.current_steps += 1
self.total_rewards += reward
if done and self.verbose:
print('done, total_reward:{}'.format(self.total_rewards))
return observation, reward, done, info
def get_observation(self):
# ray_depths, ray_colors = self.character.ray_obs()
# return np.append(ray_depths, ray_colors)
if self.give_heading > 0:
#tell where the goal is distance and heading
ray_obs = self.character.ray_obs()
goal = objects[-1]
dist_to_goal = np.clip(dist(goal.center - self.character.pos), 0, 1000) / 1000
heading = goal.center - self.character.pos
heading = np.arctan2(heading[1], heading[0])
if self.give_heading == 1:
#only give distance to goal
obs = np.vstack([ray_obs, [dist_to_goal, 0, 0]])
elif self.give_heading == 2:
#give distance and angle to goal
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, 0]])
elif self.give_heading == 3:
#give distance and angle to goal and current agent angle
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, self.character.angle]])
if self.flat:
return np.array(obs.reshape(-1), dtype='float')
else:
return np.array(obs, dtype='float')
else:
if self.flat:
return np.array(self.character.ray_obs().reshape(-1), dtype='float')
else:
return np.array(self.character.ray_obs(), dtype='float')
def reset(self):
self.generate_world()
def generate_walls(self):
self.objects.append(Box(np.array([0, 0]), np.array([1, self.world_size[1]]), color=(0, 255, 0)))
self.objects.append(Box(np.array([0, 0]), np.array([self.world_size[0], 1]), color=(0, 255, 0)))
self.objects.append(Box(np.array([0, self.world_size[1]]), np.array([self.world_size[0], 1]), color=(0, 255, 0)))
self.objects.append(Box(np.array([self.world_size[0], 0]), np.array([1, self.world_size[1]]), color=(0, 255, 0)))
def generate_box(self, pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = np.random.uniform([size[0], size[0]], [size[1], size[1]])
if pos is None:
if inside_window:
pos = np.random.uniform([box_size[0], box_size[1]],
[self.world_size[0] - box_size[0], self.world_size[1] - box_size[1]])
else:
pos = np.random.uniform(self.world_size)
if inside_window:
return Box(pos, box_size, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
else:
return Box(pos, box_size, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
def generate_circle(self, pos=None, radius=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False,
is_visible=True, is_obstacle=True):
circ_rad = np.random.uniform(radius[0], radius[1])
if pos is None:
if inside_window:
pos = np.random.uniform([circ_rad, circ_rad], [self.world_size[0]-circ_rad, self.world_size[1]-circ_rad])
else:
pos = np.random.uniform(self.world_size)
if inside_window:
return Circle(pos, circ_rad, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
else:
return Circle(pos, circ_rad, color=color, is_goal=is_goal, is_visible=is_visible, is_obstacle=is_obstacle)
def generate_world(self):
'''
World generation should end up with a list of objects as self.objects
Should end by calling
self.generate_walls (optional to include walls)
self.visible_objects, self.obstacles = self.decompose_objects(self.objects)
obstacle_sdf = self.get_sdf_func('obstacle')
visible_sdf = self.get_sdf_func('visible')
self.character.update_sdf_funcs(obstacle_sdf, visible_sdf)
'''
boxes = [self.generate_box() for i in range(5)]
circles = [self.generate_circle() for i in range(5)]
self.objects = boxes + circles
self.generate_walls()
self.visible_objects, self.obstacles, self.all_objects = self.decompose_objects(self.objects)
obstacle_sdf = self.get_sdf_func('obstacle')
visible_sdf = self.get_sdf_func('visible')
self.character.update_sdf_funcs(obstacle_sdf, visible_sdf)
def decompose_objects(self, objects):
'''
Take a list of objects and turn them into a dictionary
of usable pieces
We need to lists, one for visible objects (which vision rays
will use for collision detection), and obstacle objects
(which the player uses for collision detection).
Goals are not inherently obstacles, so when making a goal, make sure
to decided if it should have vision/collision detection included
'''
type_box = type(generate_box())
type_circle = type(generate_circle())
visible_objects = {'box_centers': [], 'box_sizes': [], 'boxes': [],
'circle_centers': [], 'circle_radii': [], 'circles': []}
obstacles = {'box_centers': [], 'box_sizes': [], 'boxes': [],
'circle_centers': [], 'circle_radii': [], 'circles': []}
all_objects = {'box_centers': [], 'box_sizes': [], 'boxes': [],
'circle_centers': [], 'circle_radii': [], 'circles': []}
for obj in objects:
if type(obj) == type_box:
all_objects['box_centers'].append(obj.center)
all_objects['box_sizes'].append(obj.size)
all_objects['boxes'].append(obj)
if obj.is_visible:
visible_objects['box_centers'].append(obj.center)
visible_objects['box_sizes'].append(obj.size)
visible_objects['boxes'].append(obj)
if obj.is_obstacle:
obstacles['box_centers'].append(obj.center)
obstacles['box_sizes'].append(obj.size)
obstacles['boxes'].append(obj)
elif type(obj) == type_circle:
all_objects['circle_centers'].append(obj.center)
all_objects['circle_radii'].append(obj.radius)
all_objects['circles'].append(obj)
if obj.is_visible:
visible_objects['circle_centers'].append(obj.center)
visible_objects['circle_radii'].append(obj.radius)
visible_objects['circles'].append(obj)
if obj.is_obstacle:
obstacles['circle_centers'].append(obj.center)
obstacles['circle_radii'].append(obj.radius)
obstacles['circles'].append(obj)
else:
raise Exception('Invalid object not of type box or circle in objects')
for key in visible_objects:
if key not in ['boxes', 'circles']:
visible_objects[key] = np.array(visible_objects[key])
for key in obstacles:
if key not in ['boxes', 'circles']:
obstacles[key] = np.array(obstacles[key])
return visible_objects, obstacles, all_objects
def box_sdfs(self, p, objects):
'''
compute all the sdf functions for boxes using global variables
box_centers
box_sizes
both are m x 2 arrays with each row representing a box
'''
box_centers = objects['box_centers']
box_sizes = objects['box_sizes']
if len(box_centers) > 0:
offset = np.abs(p - box_centers) - box_sizes
unsigned_dist = np.linalg.norm(np.clip(offset, 0, np.inf), axis=1)
dist_inside_box = np.max(np.clip(offset, -np.inf, 0), axis=1)
dists = unsigned_dist + dist_inside_box
return dists
else:
return np.array([])
def circle_sdfs(self, p, objects):
'''
compute all the sdf functions for circles using global variables
circle_centers (m x 2 array)
circle_radii (m x 1 array)
both arrays are 2 dimensional
'''
circle_centers = objects['circle_centers']
circle_radii = objects['circle_radii']
if len(circle_centers) > 0:
return np.linalg.norm((circle_centers - p), axis=1) - circle_radii
else:
return np.array([])
def scene_sdf(self, p, objects):
'''
Perform an sdf on the objects passed
The objects passed should be those generated by the decompose_objects
function
'''
box_dists = self.box_sdfs(p, objects)
circle_dists = self.circle_sdfs(p, objects)
dists = np.append(box_dists, circle_dists)
min_dist = np.min(dists)
obj_index = np.argmin(dists)
obj_select_list = objects['boxes'] + objects['circles']
return np.min(dists), obj_select_list[obj_index]
def get_sdf_func(self, typ='visible'):
'''
Get an sdf function to be passed down to the character and rays
'''
if typ == 'visible':
def sdf(p):
return self.scene_sdf(p, self.visible_objects)
return sdf
elif typ == 'obstacle':
def sdf(p):
return self.scene_sdf(p, self.obstacles)
return sdf
elif typ == 'all':
def sdf(p):
return self.scene_sdf(p, self.all_objects)
return sdf
else:
raise Exception('Invalid object type for sdf generator')
def render(self, mode='rgb_array'):
'''
Render out the scene using pygame. If mode=='human', render it to the screen
Otherwise only return an rgb_array of pixel colors using pygame
'''
if 'screen' not in globals():
pygame.init()
if mode == 'human':
globals()['screen'] = pygame.display.set_mode([self.world_size[0], self.world_size[1] + 10])
globals()['display'] = pygame.Surface([self.world_size[0], self.world_size[1] + 10])
display.fill((0, 0, 0))
self.character.draw()
self.draw_character_view()
for obj in self.objects:
obj.draw()
if mode == 'human':
screen.blit(display, (0, 0))
pygame.display.update()
if mode == 'rgb_array':
return pygame.surfarray.pixels3d(display)
def draw_character_view(self):
length = self.world_size[0] / self.num_rays
colors = self.character.ray_obs() * 255
for i in range(self.num_rays):
rect = pygame.Rect([i * length, 300, length, 10])
pygame.draw.rect(display, colors[i], rect)
class MorrisNav(GeneralNav):
metadata = {"render.modes": ['rgb_array', 'human'], 'video.frames_per_second': 24}
def __init__(self, num_rays=30, max_steps=None, give_heading=0, verbose=0,
platform_visible=False, ep_struct=1, platform_size=10, world_size=[300, 300],
platform_randomization=1, platform_randomization_spread=20,
global_cues=1, platform_fixed_duration=10, character_sep=False,
reward_shift=0, platform_reward=100):
'''
rew_structure: 'dist' - reward given based on distance to goal
'goal' - reward only given when goal reached
give_heading: whether to additionally give a distance and direction to goal
platform_visible: whether platform should be made visible
max_steps: how many steps an episode should last - default depends on episode structure
!!
ep_struct: important variable about what kind of test we will perform
1: the platform position does not reset between episodes, episodes are 200 steps max
2: the platform position resets each episode, and if the agent stays on a platform
for a while, rewards will be given and position reset
(implement later)
3: agent must stay on platform for 5 timesteps before reward is given and
episode resets
4: agent must explicitly perform an action to say when it is on the platform (not implemented)
!!
plaform_randomization: how the platform position will be randomized
1: fixed positions in one of four quadrants
2: some spot randomized close to the quadrant spots (given by platform_randomization_spread)
3: uniform random
global_cues: what global cues will be provided to the agent (not implemented)
1: all walls colored differently
2: all walls white with a "poster" hanging up
observation space: each ray gives an rgb value depending on distance from object, so this
gives num_rays*3 observations. Additionally a flag will be on/off depending on whether
the agent is currently on a platform
platform_fixed_time: once the agent reaches the plaform, it will not longer be allowed to
move forward, only rotate (mimic the "stay on platform and look around" phase). This controls
how many timesteps this happens for
character_sep: whether character should be forced to a randomized position far from platform
reward_shift: value the reward should be centered on (e.g., -1 will make every time step give
-1 reward, vs. 0 where the goal gives 1 reward)
'''
super(MorrisNav, self).__init__()
if 'pygame' not in globals():
global pygame
import pygame
self.total_rewards = 0
self.give_heading = give_heading
self.ep_struct = ep_struct
self.platform_visible = platform_visible
self.platform_size = platform_size
self.platform_randomization = platform_randomization
self.platform_randomization_spread = platform_randomization_spread
self.world_size = world_size
self.global_cues = global_cues
self.platform_fixed_duration = platform_fixed_duration
self.character_sep = character_sep
self.reward_shift = reward_shift
self.platform_reward = platform_reward
self.num_rays = num_rays
if give_heading:
self.observation_space = spaces.Box(low=0, high=1, shape=((num_rays + 1)*3 + 1,))
else:
# self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*2,), dtype=np.float)
self.observation_space = spaces.Box(low=0, high=1, shape=(num_rays*3 + 1,))
self.action_space = spaces.Discrete(4) #turn left, forward, right as actions
if max_steps is None:
if ep_struct == 1 or ep_struct == 3:
self.max_steps = 200
if ep_struct == 2:
self.max_steps = 1000
else:
self.max_steps = max_steps
if max_steps is not None:
self.max_steps = max_steps
self.current_steps = 0
self.duration_on_platform = 0
self.on_platform = False
self.character = Character(max_depth=dist(world_size))
self.verbose = verbose
self.objects = []
self.goal = None
self.generate_world()
def generate_world(self):
self.objects = []
if self.platform_randomization < 3:
quadrant_locations = np.array([self.world_size[0] / 4, self.world_size[1] / 4])
multipliers = np.array([1, 3])
randoms = np.random.choice(2, size=(2))
multipliers = multipliers[randoms] #get how much the x/y values should be multiplied by
pos = quadrant_locations * multipliers
if self.platform_randomization == 2:
#add a spread to the platform location from quadrant position
pos += np.random.uniform(-self.platform_randomization_spread, self.platform_randomization_spread,
size=(2))
elif self.platform_randomization == 3:
pos = None
platform = self.generate_box(pos=pos, size=[self.platform_size, self.platform_size], is_goal=True,
is_visible=self.platform_visible, is_obstacle=False)
self.objects.append(platform)
self.goal = platform
self.generate_walls()
self.visible_objects, self.obstacles, self.all_objects = self.decompose_objects(self.objects)
obstacle_sdf = self.get_sdf_func('obstacle')
visible_sdf = self.get_sdf_func('visible')
self.character.update_sdf_funcs(obstacle_sdf, visible_sdf)
def generate_walls(self):
if self.global_cues == 1:
self.objects.append(Box(np.array([0, 0]), np.array([1, self.world_size[1]]), color=(255, 0, 0)))
self.objects.append(Box(np.array([0, 0]), np.array([self.world_size[0], 1]), color=(0, 255, 0)))
self.objects.append(Box(np.array([0, self.world_size[1]]), np.array([self.world_size[0], 1]), color=(0, 0, 255)))
self.objects.append(Box(np.array([self.world_size[0], 0]), np.array([1, self.world_size[1]]), color=(255, 255, 255)))
elif self.global_cues == 2:
pass
def step(self, action):
reward = 0
collide_with_object = False
done = False
info = {}
if action == 0:
self.character.rotate(-0.1)
if action == 1:
if self.ep_struct >= 3 or not self.on_platform:
#if on the platform, must now be fixed onto it
collide_with_object = self.character.move(3)
if action == 2:
self.character.rotate(0.1)
if action == 3:
pass
# if collide_with_object is not False:
# obj = collide_with_object[1]
if self.on_platform:
self.duration_on_platform += 1
if self.ep_struct <= 2:
reward = self.platform_reward
if self.duration_on_platform >= self.platform_fixed_duration:
if self.ep_struct == 1:
#resetting episode in ep_struct 1
done = True
elif self.ep_struct == 2:
#only reset position in ep_struct 2, episode concludes at end of time
self.reset_character()
elif self.ep_struct == 3:
reward = self.platform_reward
done = True
observation = self.get_observation()
if self.current_steps > self.max_steps:
done = True
reward += self.reward_shift
self.current_steps += 1
self.total_rewards += reward
if done and self.verbose:
print('done, total_reward:{}'.format(self.total_rewards))
return observation, reward, done, info
def get_observation(self):
'''
Get observation reading the colors of the rays and also whether on platform or not
'''
# ray_depths, ray_colors = self.character.ray_obs()
# return np.append(ray_depths, ray_colors)
self.on_platform = np.all(np.abs(self.goal.center - self.character.pos) < self.goal.size)
if self.give_heading > 0:
raise Exception('Not implemented a give_heading > 0 condition for observation')
#tell where the goal is distance and heading
ray_obs = self.character.ray_obs()
goal = self.goal
dist_to_goal = np.clip(dist(goal.center - self.character.pos), 0, 1000) / 1000
heading = goal.center - self.character.pos
heading = np.arctan2(heading[1], heading[0])
if self.give_heading == 1:
#only give distance to goal
obs = np.vstack([ray_obs, [dist_to_goal, 0, 0]])
elif self.give_heading == 2:
#give distance and angle to goal
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, 0]])
elif self.give_heading == 3:
#give distance and angle to goal and current agent angle
obs = np.vstack([ray_obs, [dist_to_goal, heading/3.14, self.character.angle]])
#!! Add code to show when on top of platform
if self.flat:
return np.array(obs.reshape(-1), dtype='float')
else:
return np.array(obs, dtype='float')
else:
obs = np.array(self.character.ray_obs().reshape(-1), dtype='float')
obs = np.append(obs, np.array([self.on_platform * 1]))
return obs
def reset(self):
if self.ep_struct == 2:
self.generate_world()
observation = self.get_observation()
self.current_steps = 0
self.total_rewards = 0
self.on_platform = False
self.duration_on_platform = 0
randomize_location_and_angle(self.character, self.goal, self.world_size, self.get_sdf_func('all'), self.character_sep)
return observation
def reset_character(self):
'''
Reset position of the character, used for ep_struct 2
'''
self.on_platform = False
self.duration_on_platform = 0
randomize_location_and_angle(self.character, self.goal, self.world_size, self.get_sdf_func('all'), self.character_sep)
|
[
"pygame.draw.line",
"numpy.abs",
"numpy.arctan2",
"pygame.Rect",
"gym.spaces.Discrete",
"numpy.argmin",
"numpy.clip",
"pygame.display.update",
"numpy.sin",
"numpy.linalg.norm",
"pygame.display.set_mode",
"numpy.append",
"math.cos",
"numpy.random.choice",
"pygame.draw.polygon",
"pygame.Surface",
"pygame.draw.rect",
"pygame.init",
"math.sin",
"numpy.min",
"numpy.cos",
"numpy.vstack",
"numpy.random.uniform",
"pygame.draw.circle",
"numpy.array",
"gym.spaces.Box",
"pygame.surfarray.pixels3d"
] |
[((505, 562), 'numpy.random.uniform', 'np.random.uniform', (['[size[0], size[0]]', '[size[1], size[1]]'], {}), '([size[0], size[0]], [size[1], size[1]])\n', (522, 562), True, 'import numpy as np\n'), ((1185, 1224), 'numpy.random.uniform', 'np.random.uniform', (['radius[0]', 'radius[1]'], {}), '(radius[0], radius[1])\n', (1202, 1224), True, 'import numpy as np\n'), ((1677, 1694), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (1691, 1694), True, 'import numpy as np\n'), ((11818, 11841), 'numpy.random.uniform', 'np.random.uniform', (['(6.28)'], {}), '(6.28)\n', (11835, 11841), True, 'import numpy as np\n'), ((2183, 2248), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'self.color', 'self.center', 'self.radius'], {}), '(display, self.color, self.center, self.radius)\n', (2201, 2248), False, 'import pygame\n'), ((2546, 2582), 'pygame.Rect', 'pygame.Rect', (['(center - size)', '(size * 2)'], {}), '(center - size, size * 2)\n', (2557, 2582), False, 'import pygame\n'), ((3001, 3049), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'self.color', 'self.rect'], {}), '(display, self.color, self.rect)\n', (3017, 3049), False, 'import pygame\n'), ((4682, 4736), 'pygame.draw.line', 'pygame.draw.line', (['display', 'self.color', 'self.start', 'end'], {}), '(display, self.color, self.start, end)\n', (4698, 4736), False, 'import pygame\n'), ((7508, 7598), 'pygame.draw.polygon', 'pygame.draw.polygon', (['display', 'self.color', '[self.pos, point1, point2, point3, self.pos]'], {}), '(display, self.color, [self.pos, point1, point2, point3,\n self.pos])\n', (7527, 7598), False, 'import pygame\n'), ((10599, 10619), 'numpy.array', 'np.array', (['ray_colors'], {}), '(ray_colors)\n', (10607, 10619), True, 'import numpy as np\n'), ((11546, 11576), 'numpy.random.uniform', 'np.random.uniform', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (11563, 11576), True, 'import numpy as np\n'), ((18446, 18503), 'numpy.random.uniform', 'np.random.uniform', (['[size[0], size[0]]', '[size[1], size[1]]'], {}), '([size[0], size[0]], [size[1], size[1]])\n', (18463, 18503), True, 'import numpy as np\n'), ((19281, 19320), 'numpy.random.uniform', 'np.random.uniform', (['radius[0]', 'radius[1]'], {}), '(radius[0], radius[1])\n', (19298, 19320), True, 'import numpy as np\n'), ((25254, 25288), 'numpy.append', 'np.append', (['box_dists', 'circle_dists'], {}), '(box_dists, circle_dists)\n', (25263, 25288), True, 'import numpy as np\n'), ((25308, 25321), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (25314, 25321), True, 'import numpy as np\n'), ((25342, 25358), 'numpy.argmin', 'np.argmin', (['dists'], {}), '(dists)\n', (25351, 25358), True, 'import numpy as np\n'), ((31271, 31289), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (31286, 31289), False, 'from gym import spaces\n'), ((627, 738), 'numpy.random.uniform', 'np.random.uniform', (['[box_size[0], box_size[1]]', '[WINDOW_SIZE[0] - box_size[0], WINDOW_SIZE[1] - box_size[1]]'], {}), '([box_size[0], box_size[1]], [WINDOW_SIZE[0] - box_size[0],\n WINDOW_SIZE[1] - box_size[1]])\n', (644, 738), True, 'import numpy as np\n'), ((805, 835), 'numpy.random.uniform', 'np.random.uniform', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (822, 835), True, 'import numpy as np\n'), ((1289, 1389), 'numpy.random.uniform', 'np.random.uniform', (['[circ_rad, circ_rad]', '[WINDOW_SIZE[0] - circ_rad, WINDOW_SIZE[1] - circ_rad]'], {}), '([circ_rad, circ_rad], [WINDOW_SIZE[0] - circ_rad, \n WINDOW_SIZE[1] - circ_rad])\n', (1306, 1389), True, 'import numpy as np\n'), ((1413, 1443), 'numpy.random.uniform', 'np.random.uniform', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (1430, 1443), True, 'import numpy as np\n'), ((2768, 2791), 'numpy.abs', 'np.abs', (['(p - self.center)'], {}), '(p - self.center)\n', (2774, 2791), True, 'import numpy as np\n'), ((2831, 2857), 'numpy.clip', 'np.clip', (['offset', '(0)', 'np.inf'], {}), '(offset, 0, np.inf)\n', (2838, 2857), True, 'import numpy as np\n'), ((2892, 2919), 'numpy.clip', 'np.clip', (['offset', '(-np.inf)', '(0)'], {}), '(offset, -np.inf, 0)\n', (2899, 2919), True, 'import numpy as np\n'), ((10700, 10738), 'numpy.clip', 'np.clip', (['ray_depths', '(0)', 'self.max_depth'], {}), '(ray_depths, 0, self.max_depth)\n', (10707, 10738), True, 'import numpy as np\n'), ((13942, 13960), 'gym.spaces.Discrete', 'spaces.Discrete', (['(4)'], {}), '(4)\n', (13957, 13960), False, 'from gym import spaces\n'), ((16523, 16557), 'numpy.arctan2', 'np.arctan2', (['heading[1]', 'heading[0]'], {}), '(heading[1], heading[0])\n', (16533, 16557), True, 'import numpy as np\n'), ((24390, 24402), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24398, 24402), True, 'import numpy as np\n'), ((24907, 24919), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24915, 24919), True, 'import numpy as np\n'), ((25447, 25460), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (25453, 25460), True, 'import numpy as np\n'), ((26423, 26436), 'pygame.init', 'pygame.init', ([], {}), '()\n', (26434, 26436), False, 'import pygame\n'), ((26613, 26674), 'pygame.Surface', 'pygame.Surface', (['[self.world_size[0], self.world_size[1] + 10]'], {}), '([self.world_size[0], self.world_size[1] + 10])\n', (26627, 26674), False, 'import pygame\n'), ((26920, 26943), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (26941, 26943), False, 'import pygame\n'), ((27008, 27042), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['display'], {}), '(display)\n', (27033, 27042), False, 'import pygame\n'), ((27245, 27287), 'pygame.Rect', 'pygame.Rect', (['[i * length, 300, length, 10]'], {}), '([i * length, 300, length, 10])\n', (27256, 27287), False, 'import pygame\n'), ((27300, 27342), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'colors[i]', 'rect'], {}), '(display, colors[i], rect)\n', (27316, 27342), False, 'import pygame\n'), ((30973, 31031), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '((num_rays + 1) * 3 + 1,)'}), '(low=0, high=1, shape=((num_rays + 1) * 3 + 1,))\n', (30983, 31031), False, 'from gym import spaces\n'), ((31183, 31235), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(num_rays * 3 + 1,)'}), '(low=0, high=1, shape=(num_rays * 3 + 1,))\n', (31193, 31235), False, 'from gym import spaces\n'), ((32105, 32163), 'numpy.array', 'np.array', (['[self.world_size[0] / 4, self.world_size[1] / 4]'], {}), '([self.world_size[0] / 4, self.world_size[1] / 4])\n', (32113, 32163), True, 'import numpy as np\n'), ((32190, 32206), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (32198, 32206), True, 'import numpy as np\n'), ((32229, 32256), 'numpy.random.choice', 'np.random.choice', (['(2)'], {'size': '(2)'}), '(2, size=2)\n', (32245, 32256), True, 'import numpy as np\n'), ((36497, 36531), 'numpy.arctan2', 'np.arctan2', (['heading[1]', 'heading[0]'], {}), '(heading[1], heading[0])\n', (36507, 36531), True, 'import numpy as np\n'), ((4140, 4207), 'pygame.draw.circle', 'pygame.draw.circle', (['display', '(255, 255, 255, 0.3)', 'p', 'dist'], {'width': '(1)'}), '(display, (255, 255, 255, 0.3), p, dist, width=1)\n', (4158, 4207), False, 'import pygame\n'), ((8078, 8098), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (8086, 8098), False, 'import math\n'), ((8134, 8154), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (8142, 8154), False, 'import math\n'), ((8327, 8347), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (8335, 8347), False, 'import math\n'), ((8389, 8409), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (8397, 8409), False, 'import math\n'), ((13645, 13699), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '((num_rays + 1) * 3,)'}), '(low=0, high=1, shape=((num_rays + 1) * 3,))\n', (13655, 13699), False, 'from gym import spaces\n'), ((13863, 13911), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(num_rays * 3,)'}), '(low=0, high=1, shape=(num_rays * 3,))\n', (13873, 13911), False, 'from gym import spaces\n'), ((16664, 16706), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, 0, 0]]'], {}), '([ray_obs, [dist_to_goal, 0, 0]])\n', (16673, 16706), True, 'import numpy as np\n'), ((17251, 17279), 'numpy.array', 'np.array', (['obs'], {'dtype': '"""float"""'}), "(obs, dtype='float')\n", (17259, 17279), True, 'import numpy as np\n'), ((17637, 17653), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (17645, 17653), True, 'import numpy as np\n'), ((17655, 17688), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (17663, 17688), True, 'import numpy as np\n'), ((17742, 17758), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (17750, 17758), True, 'import numpy as np\n'), ((17760, 17793), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (17768, 17793), True, 'import numpy as np\n'), ((17847, 17880), 'numpy.array', 'np.array', (['[0, self.world_size[1]]'], {}), '([0, self.world_size[1]])\n', (17855, 17880), True, 'import numpy as np\n'), ((17882, 17915), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (17890, 17915), True, 'import numpy as np\n'), ((17969, 18002), 'numpy.array', 'np.array', (['[self.world_size[0], 0]'], {}), '([self.world_size[0], 0])\n', (17977, 18002), True, 'import numpy as np\n'), ((18004, 18037), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (18012, 18037), True, 'import numpy as np\n'), ((18580, 18699), 'numpy.random.uniform', 'np.random.uniform', (['[box_size[0], box_size[1]]', '[self.world_size[0] - box_size[0], self.world_size[1] - box_size[1]]'], {}), '([box_size[0], box_size[1]], [self.world_size[0] -\n box_size[0], self.world_size[1] - box_size[1]])\n', (18597, 18699), True, 'import numpy as np\n'), ((18778, 18812), 'numpy.random.uniform', 'np.random.uniform', (['self.world_size'], {}), '(self.world_size)\n', (18795, 18812), True, 'import numpy as np\n'), ((19397, 19505), 'numpy.random.uniform', 'np.random.uniform', (['[circ_rad, circ_rad]', '[self.world_size[0] - circ_rad, self.world_size[1] - circ_rad]'], {}), '([circ_rad, circ_rad], [self.world_size[0] - circ_rad, \n self.world_size[1] - circ_rad])\n', (19414, 19505), True, 'import numpy as np\n'), ((19537, 19571), 'numpy.random.uniform', 'np.random.uniform', (['self.world_size'], {}), '(self.world_size)\n', (19554, 19571), True, 'import numpy as np\n'), ((23474, 23504), 'numpy.array', 'np.array', (['visible_objects[key]'], {}), '(visible_objects[key])\n', (23482, 23504), True, 'import numpy as np\n'), ((23616, 23640), 'numpy.array', 'np.array', (['obstacles[key]'], {}), '(obstacles[key])\n', (23624, 23640), True, 'import numpy as np\n'), ((24091, 24114), 'numpy.abs', 'np.abs', (['(p - box_centers)'], {}), '(p - box_centers)\n', (24097, 24114), True, 'import numpy as np\n'), ((24170, 24196), 'numpy.clip', 'np.clip', (['offset', '(0)', 'np.inf'], {}), '(offset, 0, np.inf)\n', (24177, 24196), True, 'import numpy as np\n'), ((24243, 24270), 'numpy.clip', 'np.clip', (['offset', '(-np.inf)', '(0)'], {}), '(offset, -np.inf, 0)\n', (24250, 24270), True, 'import numpy as np\n'), ((24814, 24856), 'numpy.linalg.norm', 'np.linalg.norm', (['(circle_centers - p)'], {'axis': '(1)'}), '(circle_centers - p, axis=1)\n', (24828, 24856), True, 'import numpy as np\n'), ((26507, 26577), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[self.world_size[0], self.world_size[1] + 10]'], {}), '([self.world_size[0], self.world_size[1] + 10])\n', (26530, 26577), False, 'import pygame\n'), ((32586, 32689), 'numpy.random.uniform', 'np.random.uniform', (['(-self.platform_randomization_spread)', 'self.platform_randomization_spread'], {'size': '(2)'}), '(-self.platform_randomization_spread, self.\n platform_randomization_spread, size=2)\n', (32603, 32689), True, 'import numpy as np\n'), ((35997, 36042), 'numpy.abs', 'np.abs', (['(self.goal.center - self.character.pos)'], {}), '(self.goal.center - self.character.pos)\n', (36003, 36042), True, 'import numpy as np\n'), ((36638, 36680), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, 0, 0]]'], {}), '([ray_obs, [dist_to_goal, 0, 0]])\n', (36647, 36680), True, 'import numpy as np\n'), ((37282, 37310), 'numpy.array', 'np.array', (['obs'], {'dtype': '"""float"""'}), "(obs, dtype='float')\n", (37290, 37310), True, 'import numpy as np\n'), ((37451, 37483), 'numpy.array', 'np.array', (['[self.on_platform * 1]'], {}), '([self.on_platform * 1])\n', (37459, 37483), True, 'import numpy as np\n'), ((7126, 7152), 'math.cos', 'math.cos', (['(self.angle + 0.3)'], {}), '(self.angle + 0.3)\n', (7134, 7152), False, 'import math\n'), ((7197, 7223), 'math.sin', 'math.sin', (['(self.angle + 0.3)'], {}), '(self.angle + 0.3)\n', (7205, 7223), False, 'import math\n'), ((7383, 7409), 'math.cos', 'math.cos', (['(self.angle - 0.3)'], {}), '(self.angle - 0.3)\n', (7391, 7409), False, 'import math\n'), ((7454, 7480), 'math.sin', 'math.sin', (['(self.angle - 0.3)'], {}), '(self.angle - 0.3)\n', (7462, 7480), False, 'import math\n'), ((11429, 11450), 'numpy.array', 'np.array', (['WINDOW_SIZE'], {}), '(WINDOW_SIZE)\n', (11437, 11450), True, 'import numpy as np\n'), ((16819, 16874), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, 0]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, 0]])\n', (16828, 16874), True, 'import numpy as np\n'), ((33506, 33522), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (33514, 33522), True, 'import numpy as np\n'), ((33524, 33557), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (33532, 33557), True, 'import numpy as np\n'), ((33615, 33631), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (33623, 33631), True, 'import numpy as np\n'), ((33633, 33666), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (33641, 33666), True, 'import numpy as np\n'), ((33724, 33757), 'numpy.array', 'np.array', (['[0, self.world_size[1]]'], {}), '([0, self.world_size[1]])\n', (33732, 33757), True, 'import numpy as np\n'), ((33759, 33792), 'numpy.array', 'np.array', (['[self.world_size[0], 1]'], {}), '([self.world_size[0], 1])\n', (33767, 33792), True, 'import numpy as np\n'), ((33850, 33883), 'numpy.array', 'np.array', (['[self.world_size[0], 0]'], {}), '([self.world_size[0], 0])\n', (33858, 33883), True, 'import numpy as np\n'), ((33885, 33918), 'numpy.array', 'np.array', (['[1, self.world_size[1]]'], {}), '([1, self.world_size[1]])\n', (33893, 33918), True, 'import numpy as np\n'), ((36793, 36848), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, 0]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, 0]])\n', (36802, 36848), True, 'import numpy as np\n'), ((4617, 4635), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (4623, 4635), True, 'import numpy as np\n'), ((4637, 4655), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (4643, 4655), True, 'import numpy as np\n'), ((7266, 7286), 'math.cos', 'math.cos', (['self.angle'], {}), '(self.angle)\n', (7274, 7286), False, 'import math\n'), ((7315, 7335), 'math.sin', 'math.sin', (['self.angle'], {}), '(self.angle)\n', (7323, 7335), False, 'import math\n'), ((17009, 17083), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]])\n', (17018, 17083), True, 'import numpy as np\n'), ((36983, 37057), 'numpy.vstack', 'np.vstack', (['[ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]]'], {}), '([ray_obs, [dist_to_goal, heading / 3.14, self.character.angle]])\n', (36992, 37057), True, 'import numpy as np\n'), ((4403, 4421), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (4409, 4421), True, 'import numpy as np\n'), ((4423, 4441), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (4429, 4441), True, 'import numpy as np\n'), ((9727, 9745), 'numpy.cos', 'np.cos', (['self.angle'], {}), '(self.angle)\n', (9733, 9745), True, 'import numpy as np\n'), ((9747, 9765), 'numpy.sin', 'np.sin', (['self.angle'], {}), '(self.angle)\n', (9753, 9765), True, 'import numpy as np\n')]
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""An implementation of the FedProx algorithm.
Based on the paper:
"Federated Optimization in Heterogeneous Networks" by <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>. MLSys 2020.
See https://arxiv.org/abs/1812.06127 for the full paper.
"""
import collections
from typing import Callable, Optional, Union
import tensorflow as tf
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import mean
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.optimizers import optimizer as optimizer_base
from tensorflow_federated.python.learning.templates import client_works
from tensorflow_federated.python.learning.templates import composers
from tensorflow_federated.python.learning.templates import distributors
from tensorflow_federated.python.learning.templates import finalizers
from tensorflow_federated.python.learning.templates import learning_process
from tensorflow_federated.python.tensorflow_libs import tensor_utils
def build_proximal_client_update_with_tff_optimizer(
model_fn,
proximal_strength: float,
use_experimental_simulation_loop: bool = False):
"""Creates client update logic in FedProx using a TFF optimizer.
In contrast to using a `tf.keras.optimizers.Optimizer`, we avoid creating
`tf.Variable`s associated with the optimizer state within the scope of the
client work, as they are not necessary. This also means that the client's
model weights are updated by computing `optimizer.next` and then assigning
the result to the model weights (while a `tf.keras.optimizers.Optimizer` will
modify the model weight in place using `optimizer.apply_gradients`).
Args:
model_fn: A no-arg callable returning a `tff.learning.Model`.
proximal_strength: A nonnegative float representing the parameter of
FedProx's regularization term. When set to `0`, the client update reduces
to that of FedAvg. Higher values prevent clients from moving too far from
the server model during local training.
use_experimental_simulation_loop: Controls the reduce loop function for the
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tf.function`.
"""
model = model_fn()
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
@tf.function
def client_update(optimizer, initial_weights, data):
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
def reduce_fn(state, batch):
"""Trains a `tff.learning.Model` on a batch of data."""
num_examples_sum, optimizer_state = state
with tf.GradientTape() as tape:
output = model.forward_pass(batch, training=True)
gradients = tape.gradient(output.loss, model_weights.trainable)
proximal_delta = tf.nest.map_structure(tf.subtract,
model_weights.trainable,
initial_weights.trainable)
proximal_term = tf.nest.map_structure(lambda x: proximal_strength * x,
proximal_delta)
gradients = tf.nest.map_structure(tf.add, gradients, proximal_term)
optimizer_state, updated_weights = optimizer.next(optimizer_state,
model_weights.trainable,
gradients)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights.trainable,
updated_weights)
if output.num_examples is None:
num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0]
else:
num_examples_sum += tf.cast(output.num_examples, tf.int64)
return num_examples_sum, optimizer_state
def initial_state_for_reduce_fn():
trainable_tensor_specs = tf.nest.map_structure(
lambda v: tf.TensorSpec(v.shape, v.dtype), model_weights.trainable)
return tf.zeros(
shape=[],
dtype=tf.int64), optimizer.initialize(trainable_tensor_specs)
num_examples, _ = dataset_reduce_fn(
reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn)
client_update = tf.nest.map_structure(tf.subtract,
initial_weights.trainable,
model_weights.trainable)
model_output = model.report_local_outputs()
stat_output = collections.OrderedDict(num_examples=num_examples)
# TODO(b/122071074): Consider moving this functionality into
# tff.federated_mean?
client_update, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(client_update))
# Zero out the weight if there are any non-finite values.
if has_non_finite_delta > 0:
client_weight = tf.constant(0.0)
else:
client_weight = tf.cast(num_examples, tf.float32)
return client_works.ClientResult(
update=client_update,
update_weight=client_weight), model_output, stat_output
return client_update
def build_proximal_client_update_with_keras_optimizer(
model_fn,
proximal_strength: float,
use_experimental_simulation_loop: bool = False):
"""Creates client update logic in FedProx using a `tf.keras` optimizer.
In contrast to using a `tff.learning.optimizers.Optimizer`, we have to
maintain `tf.Variable`s associated with the optimizer state within the scope
of the client work. Additionally, the client model weights are modified in
place by using `optimizer.apply_gradients`).
Args:
model_fn: A no-arg callable returning a `tff.learning.Model`.
proximal_strength: A nonnegative float representing the parameter of
FedProx's regularization term. When set to `0`, the client update reduces
to that of FedAvg. Higher values prevent clients from moving too far from
the server model during local training.
use_experimental_simulation_loop: Controls the reduce loop function for the
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tf.function`.
"""
model = model_fn()
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
@tf.function
def client_update(optimizer, initial_weights, data):
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
def reduce_fn(num_examples_sum, batch):
"""Trains a `tff.learning.Model` on a batch of data."""
with tf.GradientTape() as tape:
output = model.forward_pass(batch, training=True)
gradients = tape.gradient(output.loss, model_weights.trainable)
proximal_delta = tf.nest.map_structure(tf.subtract,
model_weights.trainable,
initial_weights.trainable)
proximal_term = tf.nest.map_structure(lambda x: proximal_strength * x,
proximal_delta)
gradients = tf.nest.map_structure(tf.add, gradients, proximal_term)
grads_and_vars = zip(gradients, model_weights.trainable)
optimizer.apply_gradients(grads_and_vars)
# TODO(b/199782787): Add a unit test for a model that does not compute
# `num_examples` in its forward pass.
if output.num_examples is None:
num_examples_sum += tf.shape(output.predictions, out_type=tf.int64)[0]
else:
num_examples_sum += tf.cast(output.num_examples, tf.int64)
return num_examples_sum
def initial_state_for_reduce_fn():
return tf.zeros(shape=[], dtype=tf.int64)
num_examples = dataset_reduce_fn(
reduce_fn, data, initial_state_fn=initial_state_for_reduce_fn)
client_update = tf.nest.map_structure(tf.subtract,
initial_weights.trainable,
model_weights.trainable)
model_output = model.report_local_outputs()
stat_output = collections.OrderedDict(num_examples=num_examples)
# TODO(b/122071074): Consider moving this functionality into
# tff.federated_mean?
client_update, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(client_update))
# Zero out the weight if there are any non-finite values.
if has_non_finite_delta > 0:
client_weight = tf.constant(0.0)
else:
client_weight = tf.cast(num_examples, tf.float32)
return client_works.ClientResult(
update=client_update,
update_weight=client_weight), model_output, stat_output
return client_update
def build_fed_prox_client_work(
model_fn: Callable[[], model_lib.Model],
proximal_strength: float,
optimizer_fn: Union[optimizer_base.Optimizer,
Callable[[], tf.keras.optimizers.Optimizer]],
use_experimental_simulation_loop: bool = False
) -> client_works.ClientWorkProcess:
"""Creates a `ClientWorkProcess` for federated averaging.
This client work is constructed in slightly different manners depending on
whether `optimizer_fn` is a `tff.learning.optimizers.Optimizer`, or a no-arg
callable returning a `tf.keras.optimizers.Optimizer`.
If it is a `tff.learning.optimizers.Optimizer`, we avoid creating
`tf.Variable`s associated with the optimizer state within the scope of the
client work, as they are not necessary. This also means that the client's
model weights are updated by computing `optimizer.next` and then assigning
the result to the model weights (while a `tf.keras.optimizers.Optimizer` will
modify the model weight in place using `optimizer.apply_gradients`).
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
proximal_strength: A nonnegative float representing the parameter of
FedProx's regularization term. When set to `0`, the algorithm reduces to
FedAvg. Higher values prevent clients from moving too far from the server
model during local training.
optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg callable
that returns a `tf.keras.Optimizer`.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation. It is
currently necessary to set this flag to True for performant GPU
simulations.
Returns:
A `ClientWorkProcess`.
"""
with tf.Graph().as_default():
# Wrap model construction in a graph to avoid polluting the global context
# with variables created for this model.
model = model_fn()
data_type = computation_types.SequenceType(model.input_spec)
weights_type = model_utils.weights_type_from_model(model)
if isinstance(optimizer_fn, optimizer_base.Optimizer):
@computations.tf_computation(weights_type, data_type)
def client_update_computation(initial_model_weights, dataset):
client_update = build_proximal_client_update_with_tff_optimizer(
model_fn, proximal_strength, use_experimental_simulation_loop)
return client_update(optimizer_fn, initial_model_weights, dataset)
else:
@computations.tf_computation(weights_type, data_type)
def client_update_computation(initial_model_weights, dataset):
optimizer = optimizer_fn()
client_update = build_proximal_client_update_with_keras_optimizer(
model_fn, proximal_strength, use_experimental_simulation_loop)
return client_update(optimizer, initial_model_weights, dataset)
@computations.federated_computation
def init_fn():
return intrinsics.federated_value((), placements.SERVER)
@computations.federated_computation(
init_fn.type_signature.result, computation_types.at_clients(weights_type),
computation_types.at_clients(data_type))
def next_fn(state, weights, client_data):
client_result, model_outputs, stat_output = intrinsics.federated_map(
client_update_computation, (weights, client_data))
train_metrics = model.federated_output_computation(model_outputs)
stat_metrics = intrinsics.federated_sum(stat_output)
measurements = intrinsics.federated_zip(
collections.OrderedDict(train=train_metrics, stat=stat_metrics))
return measured_process.MeasuredProcessOutput(state, client_result,
measurements)
return client_works.ClientWorkProcess(init_fn, next_fn)
DEFAULT_SERVER_OPTIMIZER_FN = lambda: tf.keras.optimizers.SGD(learning_rate=1.0)
def build_example_weighted_fed_prox_process(
model_fn: Callable[[], model_lib.Model],
proximal_strength: float,
client_optimizer_fn: Union[optimizer_base.Optimizer,
Callable[[], tf.keras.optimizers.Optimizer]],
server_optimizer_fn: Union[optimizer_base.Optimizer, Callable[
[], tf.keras.optimizers.Optimizer]] = DEFAULT_SERVER_OPTIMIZER_FN,
distributor: Optional[distributors.DistributionProcess] = None,
model_update_aggregation_factory: Optional[
factory.WeightedAggregationFactory] = None,
use_experimental_simulation_loop: bool = False
) -> learning_process.LearningProcess:
"""Builds a learning process that performs the FedProx algorithm.
This function creates a `LearningProcess` that performs example-weighted
FedProx on client models. This algorithm behaves the same as federated
averaging, except that it uses a proximal regularization term that encourages
clients to not drift too far from the server model.
The iterative process has the following methods inherited from
`tff.learning.templates.LearningProcess`:
* `initialize`: A `tff.Computation` with the functional type signature
`( -> S@SERVER)`, where `S` is a `LearningAlgorithmState` representing the
initial state of the server.
* `next`: A `tff.Computation` with the functional type signature
`(<S@SERVER, {B*}@CLIENTS> -> <L@SERVER>)` where `S` is a
`LearningAlgorithmState` whose type matches the output of `initialize`
and `{B*}@CLIENTS` represents the client datasets. The output `L`
contains the updated server state, as well as metrics that are the result
of `tff.learning.Model.federated_output_computation` during client
training, and any other metrics from broadcast and aggregation processes.
* `report`: A `tff.Computation` with type signature `( -> M@SERVER)`, where
`M` represents the type of the model weights used during training.
Each time the `next` method is called, the server model is broadcast to each
client using a broadcast function. For each client, local training is
performed using `client_optimizer_fn`. Each client computes the difference
between the client model after training and the initial broadcast model.
These model deltas are then aggregated at the server using a weighted
aggregation function. Clients weighted by the number of examples they see
thoughout local training. The aggregate model delta is applied at the server
using a server optimizer, as in the FedOpt framework proposed in
[Reddi et al., 2021](https://arxiv.org/abs/2003.00295).
Note: The default server optimizer function is `tf.keras.optimizers.SGD`
with a learning rate of 1.0, which corresponds to adding the model delta to
the current server model. This recovers the original FedProx algorithm in
[Li et al., 2020](https://arxiv.org/abs/1812.06127). More
sophisticated federated averaging procedures may use different learning rates
or server optimizers.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
proximal_strength: A nonnegative float representing the parameter of
FedProx's regularization term. When set to `0`, the algorithm reduces to
FedAvg. Higher values prevent clients from moving too far from the server
model during local training.
client_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg
callable that returns a `tf.keras.Optimizer`.
server_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg
callable that returns a `tf.keras.Optimizer`. By default, this uses
`tf.keras.optimizers.SGD` with a learning rate of 1.0.
distributor: An optional `DistributionProcess` that broadcasts the model
weights on the server to the clients. If set to `None`, the distributor is
constructed via `distributors.build_broadcast_process`.
model_update_aggregation_factory: An optional
`tff.aggregators.WeightedAggregationFactory` used to aggregate client
updates on the server. If `None`, this is set to
`tff.aggregators.MeanFactory`.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation. It is
currently necessary to set this flag to True for performant GPU
simulations.
Returns:
A `LearningProcess`.
Raises:
ValueError: If `proximal_parameter` is not a nonnegative float.
"""
if not isinstance(proximal_strength, float) or proximal_strength < 0.0:
raise ValueError(
'proximal_strength must be a nonnegative float, found {}'.format(
proximal_strength))
py_typecheck.check_callable(model_fn)
@computations.tf_computation()
def initial_model_weights_fn():
return model_utils.ModelWeights.from_model(model_fn())
model_weights_type = initial_model_weights_fn.type_signature.result
if distributor is None:
distributor = distributors.build_broadcast_process(model_weights_type)
if model_update_aggregation_factory is None:
model_update_aggregation_factory = mean.MeanFactory()
py_typecheck.check_type(model_update_aggregation_factory,
factory.WeightedAggregationFactory)
aggregator = model_update_aggregation_factory.create(
model_weights_type.trainable, computation_types.TensorType(tf.float32))
process_signature = aggregator.next.type_signature
input_client_value_type = process_signature.parameter[1]
result_server_value_type = process_signature.result[1]
if input_client_value_type.member != result_server_value_type.member:
raise TypeError('`model_update_aggregation_factory` does not produce a '
'compatible `AggregationProcess`. The processes must '
'retain the type structure of the inputs on the '
f'server, but got {input_client_value_type.member} != '
f'{result_server_value_type.member}.')
client_work = build_fed_prox_client_work(model_fn, proximal_strength,
client_optimizer_fn,
use_experimental_simulation_loop)
finalizer = finalizers.build_apply_optimizer_finalizer(
server_optimizer_fn, model_weights_type)
return composers.compose_learning_process(initial_model_weights_fn,
distributor, client_work,
aggregator, finalizer)
|
[
"tensorflow_federated.python.learning.templates.client_works.ClientWorkProcess",
"tensorflow.keras.optimizers.SGD",
"tensorflow_federated.python.learning.templates.distributors.build_broadcast_process",
"tensorflow.nest.map_structure",
"tensorflow_federated.python.common_libs.py_typecheck.check_callable",
"tensorflow_federated.python.common_libs.py_typecheck.check_type",
"tensorflow_federated.python.tensorflow_libs.tensor_utils.zero_all_if_any_non_finite",
"tensorflow_federated.python.core.api.computations.tf_computation",
"tensorflow_federated.python.learning.model_utils.ModelWeights.from_model",
"tensorflow.TensorSpec",
"tensorflow.cast",
"tensorflow_federated.python.core.impl.types.computation_types.at_clients",
"tensorflow_federated.python.core.impl.federated_context.intrinsics.federated_sum",
"tensorflow_federated.python.core.impl.federated_context.intrinsics.federated_value",
"tensorflow_federated.python.aggregators.mean.MeanFactory",
"tensorflow.constant",
"tensorflow.Graph",
"tensorflow_federated.python.learning.model_utils.weights_type_from_model",
"tensorflow_federated.python.learning.templates.composers.compose_learning_process",
"tensorflow_federated.python.core.templates.measured_process.MeasuredProcessOutput",
"tensorflow_federated.python.core.impl.types.computation_types.TensorType",
"tensorflow_federated.python.core.impl.federated_context.intrinsics.federated_map",
"tensorflow.shape",
"tensorflow.zeros",
"collections.OrderedDict",
"tensorflow_federated.python.learning.templates.client_works.ClientResult",
"tensorflow_federated.python.learning.framework.dataset_reduce.build_dataset_reduce_fn",
"tensorflow_federated.python.learning.templates.finalizers.build_apply_optimizer_finalizer",
"tensorflow.GradientTape",
"tensorflow_federated.python.core.impl.types.computation_types.SequenceType"
] |
[((3621, 3693), 'tensorflow_federated.python.learning.framework.dataset_reduce.build_dataset_reduce_fn', 'dataset_reduce.build_dataset_reduce_fn', (['use_experimental_simulation_loop'], {}), '(use_experimental_simulation_loop)\n', (3659, 3693), False, 'from tensorflow_federated.python.learning.framework import dataset_reduce\n'), ((7617, 7689), 'tensorflow_federated.python.learning.framework.dataset_reduce.build_dataset_reduce_fn', 'dataset_reduce.build_dataset_reduce_fn', (['use_experimental_simulation_loop'], {}), '(use_experimental_simulation_loop)\n', (7655, 7689), False, 'from tensorflow_federated.python.learning.framework import dataset_reduce\n'), ((12380, 12428), 'tensorflow_federated.python.core.impl.types.computation_types.SequenceType', 'computation_types.SequenceType', (['model.input_spec'], {}), '(model.input_spec)\n', (12410, 12428), False, 'from tensorflow_federated.python.core.impl.types import computation_types\n'), ((12446, 12488), 'tensorflow_federated.python.learning.model_utils.weights_type_from_model', 'model_utils.weights_type_from_model', (['model'], {}), '(model)\n', (12481, 12488), False, 'from tensorflow_federated.python.learning import model_utils\n'), ((14127, 14175), 'tensorflow_federated.python.learning.templates.client_works.ClientWorkProcess', 'client_works.ClientWorkProcess', (['init_fn', 'next_fn'], {}), '(init_fn, next_fn)\n', (14157, 14175), False, 'from tensorflow_federated.python.learning.templates import client_works\n'), ((14216, 14258), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(1.0)'}), '(learning_rate=1.0)\n', (14239, 14258), True, 'import tensorflow as tf\n'), ((19222, 19259), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['model_fn'], {}), '(model_fn)\n', (19249, 19259), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((19264, 19293), 'tensorflow_federated.python.core.api.computations.tf_computation', 'computations.tf_computation', ([], {}), '()\n', (19291, 19293), False, 'from tensorflow_federated.python.core.api import computations\n'), ((19668, 19766), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['model_update_aggregation_factory', 'factory.WeightedAggregationFactory'], {}), '(model_update_aggregation_factory, factory.\n WeightedAggregationFactory)\n', (19691, 19766), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((20748, 20835), 'tensorflow_federated.python.learning.templates.finalizers.build_apply_optimizer_finalizer', 'finalizers.build_apply_optimizer_finalizer', (['server_optimizer_fn', 'model_weights_type'], {}), '(server_optimizer_fn,\n model_weights_type)\n', (20790, 20835), False, 'from tensorflow_federated.python.learning.templates import finalizers\n'), ((20848, 20961), 'tensorflow_federated.python.learning.templates.composers.compose_learning_process', 'composers.compose_learning_process', (['initial_model_weights_fn', 'distributor', 'client_work', 'aggregator', 'finalizer'], {}), '(initial_model_weights_fn, distributor,\n client_work, aggregator, finalizer)\n', (20882, 20961), False, 'from tensorflow_federated.python.learning.templates import composers\n'), ((3792, 3834), 'tensorflow_federated.python.learning.model_utils.ModelWeights.from_model', 'model_utils.ModelWeights.from_model', (['model'], {}), '(model)\n', (3827, 3834), False, 'from tensorflow_federated.python.learning import model_utils\n'), ((5678, 5769), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.subtract', 'initial_weights.trainable', 'model_weights.trainable'], {}), '(tf.subtract, initial_weights.trainable, model_weights\n .trainable)\n', (5699, 5769), True, 'import tensorflow as tf\n'), ((5915, 5965), 'collections.OrderedDict', 'collections.OrderedDict', ([], {'num_examples': 'num_examples'}), '(num_examples=num_examples)\n', (5938, 5965), False, 'import collections\n'), ((6110, 6164), 'tensorflow_federated.python.tensorflow_libs.tensor_utils.zero_all_if_any_non_finite', 'tensor_utils.zero_all_if_any_non_finite', (['client_update'], {}), '(client_update)\n', (6149, 6164), False, 'from tensorflow_federated.python.tensorflow_libs import tensor_utils\n'), ((7788, 7830), 'tensorflow_federated.python.learning.model_utils.ModelWeights.from_model', 'model_utils.ModelWeights.from_model', (['model'], {}), '(model)\n', (7823, 7830), False, 'from tensorflow_federated.python.learning import model_utils\n'), ((9304, 9395), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.subtract', 'initial_weights.trainable', 'model_weights.trainable'], {}), '(tf.subtract, initial_weights.trainable, model_weights\n .trainable)\n', (9325, 9395), True, 'import tensorflow as tf\n'), ((9541, 9591), 'collections.OrderedDict', 'collections.OrderedDict', ([], {'num_examples': 'num_examples'}), '(num_examples=num_examples)\n', (9564, 9591), False, 'import collections\n'), ((9736, 9790), 'tensorflow_federated.python.tensorflow_libs.tensor_utils.zero_all_if_any_non_finite', 'tensor_utils.zero_all_if_any_non_finite', (['client_update'], {}), '(client_update)\n', (9775, 9790), False, 'from tensorflow_federated.python.tensorflow_libs import tensor_utils\n'), ((12553, 12605), 'tensorflow_federated.python.core.api.computations.tf_computation', 'computations.tf_computation', (['weights_type', 'data_type'], {}), '(weights_type, data_type)\n', (12580, 12605), False, 'from tensorflow_federated.python.core.api import computations\n'), ((12905, 12957), 'tensorflow_federated.python.core.api.computations.tf_computation', 'computations.tf_computation', (['weights_type', 'data_type'], {}), '(weights_type, data_type)\n', (12932, 12957), False, 'from tensorflow_federated.python.core.api import computations\n'), ((13341, 13390), 'tensorflow_federated.python.core.impl.federated_context.intrinsics.federated_value', 'intrinsics.federated_value', (['()', 'placements.SERVER'], {}), '((), placements.SERVER)\n', (13367, 13390), False, 'from tensorflow_federated.python.core.impl.federated_context import intrinsics\n'), ((13651, 13726), 'tensorflow_federated.python.core.impl.federated_context.intrinsics.federated_map', 'intrinsics.federated_map', (['client_update_computation', '(weights, client_data)'], {}), '(client_update_computation, (weights, client_data))\n', (13675, 13726), False, 'from tensorflow_federated.python.core.impl.federated_context import intrinsics\n'), ((13825, 13862), 'tensorflow_federated.python.core.impl.federated_context.intrinsics.federated_sum', 'intrinsics.federated_sum', (['stat_output'], {}), '(stat_output)\n', (13849, 13862), False, 'from tensorflow_federated.python.core.impl.federated_context import intrinsics\n'), ((13992, 14066), 'tensorflow_federated.python.core.templates.measured_process.MeasuredProcessOutput', 'measured_process.MeasuredProcessOutput', (['state', 'client_result', 'measurements'], {}), '(state, client_result, measurements)\n', (14030, 14066), False, 'from tensorflow_federated.python.core.templates import measured_process\n'), ((13468, 13510), 'tensorflow_federated.python.core.impl.types.computation_types.at_clients', 'computation_types.at_clients', (['weights_type'], {}), '(weights_type)\n', (13496, 13510), False, 'from tensorflow_federated.python.core.impl.types import computation_types\n'), ((13518, 13557), 'tensorflow_federated.python.core.impl.types.computation_types.at_clients', 'computation_types.at_clients', (['data_type'], {}), '(data_type)\n', (13546, 13557), False, 'from tensorflow_federated.python.core.impl.types import computation_types\n'), ((19503, 19559), 'tensorflow_federated.python.learning.templates.distributors.build_broadcast_process', 'distributors.build_broadcast_process', (['model_weights_type'], {}), '(model_weights_type)\n', (19539, 19559), False, 'from tensorflow_federated.python.learning.templates import distributors\n'), ((19647, 19665), 'tensorflow_federated.python.aggregators.mean.MeanFactory', 'mean.MeanFactory', ([], {}), '()\n', (19663, 19665), False, 'from tensorflow_federated.python.aggregators import mean\n'), ((19880, 19920), 'tensorflow_federated.python.core.impl.types.computation_types.TensorType', 'computation_types.TensorType', (['tf.float32'], {}), '(tf.float32)\n', (19908, 19920), False, 'from tensorflow_federated.python.core.impl.types import computation_types\n'), ((4279, 4370), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.subtract', 'model_weights.trainable', 'initial_weights.trainable'], {}), '(tf.subtract, model_weights.trainable, initial_weights\n .trainable)\n', (4300, 4370), True, 'import tensorflow as tf\n'), ((4478, 4548), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda x: proximal_strength * x)', 'proximal_delta'], {}), '(lambda x: proximal_strength * x, proximal_delta)\n', (4499, 4548), True, 'import tensorflow as tf\n'), ((4611, 4666), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.add', 'gradients', 'proximal_term'], {}), '(tf.add, gradients, proximal_term)\n', (4632, 4666), True, 'import tensorflow as tf\n'), ((6283, 6299), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (6294, 6299), True, 'import tensorflow as tf\n'), ((6332, 6365), 'tensorflow.cast', 'tf.cast', (['num_examples', 'tf.float32'], {}), '(num_examples, tf.float32)\n', (6339, 6365), True, 'import tensorflow as tf\n'), ((6378, 6454), 'tensorflow_federated.python.learning.templates.client_works.ClientResult', 'client_works.ClientResult', ([], {'update': 'client_update', 'update_weight': 'client_weight'}), '(update=client_update, update_weight=client_weight)\n', (6403, 6454), False, 'from tensorflow_federated.python.learning.templates import client_works\n'), ((8238, 8329), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.subtract', 'model_weights.trainable', 'initial_weights.trainable'], {}), '(tf.subtract, model_weights.trainable, initial_weights\n .trainable)\n', (8259, 8329), True, 'import tensorflow as tf\n'), ((8437, 8507), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda x: proximal_strength * x)', 'proximal_delta'], {}), '(lambda x: proximal_strength * x, proximal_delta)\n', (8458, 8507), True, 'import tensorflow as tf\n'), ((8570, 8625), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['tf.add', 'gradients', 'proximal_term'], {}), '(tf.add, gradients, proximal_term)\n', (8591, 8625), True, 'import tensorflow as tf\n'), ((9139, 9173), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[]', 'dtype': 'tf.int64'}), '(shape=[], dtype=tf.int64)\n', (9147, 9173), True, 'import tensorflow as tf\n'), ((9909, 9925), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (9920, 9925), True, 'import tensorflow as tf\n'), ((9958, 9991), 'tensorflow.cast', 'tf.cast', (['num_examples', 'tf.float32'], {}), '(num_examples, tf.float32)\n', (9965, 9991), True, 'import tensorflow as tf\n'), ((10004, 10080), 'tensorflow_federated.python.learning.templates.client_works.ClientResult', 'client_works.ClientResult', ([], {'update': 'client_update', 'update_weight': 'client_weight'}), '(update=client_update, update_weight=client_weight)\n', (10029, 10080), False, 'from tensorflow_federated.python.learning.templates import client_works\n'), ((13916, 13979), 'collections.OrderedDict', 'collections.OrderedDict', ([], {'train': 'train_metrics', 'stat': 'stat_metrics'}), '(train=train_metrics, stat=stat_metrics)\n', (13939, 13979), False, 'import collections\n'), ((4100, 4117), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4115, 4117), True, 'import tensorflow as tf\n'), ((5171, 5209), 'tensorflow.cast', 'tf.cast', (['output.num_examples', 'tf.int64'], {}), '(output.num_examples, tf.int64)\n', (5178, 5209), True, 'import tensorflow as tf\n'), ((5443, 5477), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[]', 'dtype': 'tf.int64'}), '(shape=[], dtype=tf.int64)\n', (5451, 5477), True, 'import tensorflow as tf\n'), ((8059, 8076), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8074, 8076), True, 'import tensorflow as tf\n'), ((9016, 9054), 'tensorflow.cast', 'tf.cast', (['output.num_examples', 'tf.int64'], {}), '(output.num_examples, tf.int64)\n', (9023, 9054), True, 'import tensorflow as tf\n'), ((12194, 12204), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12202, 12204), True, 'import tensorflow as tf\n'), ((5080, 5127), 'tensorflow.shape', 'tf.shape', (['output.predictions'], {'out_type': 'tf.int64'}), '(output.predictions, out_type=tf.int64)\n', (5088, 5127), True, 'import tensorflow as tf\n'), ((5372, 5403), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['v.shape', 'v.dtype'], {}), '(v.shape, v.dtype)\n', (5385, 5403), True, 'import tensorflow as tf\n'), ((8925, 8972), 'tensorflow.shape', 'tf.shape', (['output.predictions'], {'out_type': 'tf.int64'}), '(output.predictions, out_type=tf.int64)\n', (8933, 8972), True, 'import tensorflow as tf\n')]
|
from shapely import geometry
from shapely.geometry import shape, Point
import geohash as gh
import numpy as np
import pandas as pd
import numpy as np
import sys
import pandas as pd
import datetime
import random
from random import choices
# get train acc and test acc
# Train acc from 2017 to 2019 May
#Test acc from 2019 May to Dec 2019
def traintestdataAcc(data, city):
train_data_2017_acc = data.loc[(data['UJAHR'] == 2017)]
train_data_2018_acc = data.loc[
(data['UJAHR'] == 2018)]
train_data_2019_acc = data.loc[
(data['UMONAT'] <= 5) & (data['UJAHR'] == 2019)]
frames = [train_data_2017_acc, train_data_2018_acc,train_data_2019_acc]
train_data_acc = pd.concat(frames)
train_data_acc.to_csv('../../../data_preprocessing/data/regions/'+city+'/train_acc.csv',index=False)
test_data_acc = data.loc[
(data['UMONAT'] > 5) & (data['UJAHR'] == 2019)]
test_data_acc.to_csv('../../../data_preprocessing/data/regions/'+city+'/test_acc.csv',index=False)
return train_data_acc,test_data_acc
def random_latlong(geohash):
dic = gh.bbox(geohash)
# getting min, max lat/lng
min_lng = dic.get('w')
min_lat = dic.get('s')
max_lng = dic.get('e')
max_lat = dic.get('n')
# generate random float between [min_lng, max_lng)
long = np.random.uniform(min_lng, max_lng)
# generate random float between [min_lat, max_lat)
lat = np.random.uniform(min_lat, max_lat)
return lat, long
def dow(date):
dayNumber = date.weekday()
day = -1
if dayNumber == 6:
day = 1
else:
day = dayNumber + 2
return day
def find_t_nonACC(t):
tm = str(t)
dateTimesplit = tm.split(' ')
dateFind = dateTimesplit[0]
timeFind = dateTimesplit[1]
datesplit = dateFind.split('-')
timesplit = timeFind.split(':')
frmt = '%Y-%m-%d'
datsend = datetime.datetime.strptime(dateFind, frmt)
dayofweek = dow(datsend)
year, month, day = datesplit[0], datesplit[1], datesplit[2]
month = int(month)
hour = int(timesplit[0])
return year, month, dayofweek, hour
def randomtimes(geohash, stime, etime, n):
frmt = '%d-%m-%Y %H:%M:%S'
stime = datetime.datetime.strptime(stime, frmt)
etime = datetime.datetime.strptime(etime, frmt)
td = etime - stime
k = []
t = random.random() * td + stime
year, month, dayofweek, hour = find_t_nonACC(t)
year = int(year)
lat, long = random_latlong(geohash)
return True, lat, long, year, month, dayofweek,hour
def trainNonacc(hann_grid_zeroacc,train,city):
t = []
a=[]
no_of_acc=len(train.index)
print('no of acc=',no_of_acc)
no_of_nonacc=no_of_acc*3
print('no of non acc in train=',no_of_nonacc)
for i in range(0,no_of_nonacc):
geohashVal=hann_grid_zeroacc['geohash'].values # 153m x153 m all geohashes
geoSelect=choices(geohashVal) # select one geohash with replacement
bol, lat, long, year, month, dayofweek,hour = randomtimes(geoSelect[0], '01-01-2017 00:00:00',
'31-05-2019 23:00:00', i)
if bol and [year, month, dayofweek,hour] not in t:
p = (year, month, dayofweek,hour)
k = (geoSelect[0], lat, long, year, month, dayofweek,hour)
a.append(k)
t.append(p)
i = i + 1
else:
continue
dt = pd.DataFrame(a)
dt.columns = ['geohash', 'non_acclat', 'non_acclong', 'UJAHR', 'UMONAT', 'UWOCHENTAG','hour']
dt['UMONAT'] = dt["UMONAT"].astype(str).astype(int)
dt['UJAHR'] = dt["UJAHR"].astype(str).astype(int)
train_non_acc_data=dt.loc[((dt['UJAHR']==2017) & (dt['UMONAT']<=12)|(dt['UJAHR']==2018) & (dt['UMONAT']<=12) | ((dt['UJAHR']==2019) & (dt['UMONAT']<=5)))]
train_non_acc_data.to_csv('../../../data_preprocessing/data/regions/'+city+'/train_nonaccdata.csv', index=False)
def testNonacc(hann_grid_zeroacc,test,city):
a=[]
t=[]
no_of_acc=len(test.index)
print('no of acc=',no_of_acc)
no_of_nonacc=no_of_acc*3
print('no of non acc in test=',no_of_nonacc)
for i in range(0,no_of_nonacc):
geohashVal=hann_grid_zeroacc['geohash'].values # 153m x153 m all geohashes
geoSelect=choices(geohashVal) # select one geohash with replacement
bol, lat, long, year, month, dayofweek,hour = randomtimes(geoSelect[0], '01-06-2019 00:00:00',
'31-12-2019 23:00:00', i)
if bol and [year, month, dayofweek,hour] not in t:
p = (year, month, dayofweek,hour)
k = (geoSelect[0], lat, long, year, month, dayofweek,hour)
a.append(k)
t.append(p)
i = i + 1
else:
continue
dt = pd.DataFrame(a)
dt.columns = ['geohash', 'non_acclat', 'non_acclong', 'UJAHR', 'UMONAT', 'UWOCHENTAG','hour']
dt['UMONAT'] = dt["UMONAT"].astype(str).astype(int)
dt['UJAHR'] = dt["UJAHR"].astype(str).astype(int)
test_data = dt.loc[(dt['UMONAT'] > 5) & (dt['UJAHR'] == 2019)]
test_data.to_csv('../../../data_preprocessing/data/regions/'+city+'/test_nonaccdata.csv', index=False)
if __name__ == "__main__":
cities = ['LS/hannover']#,'Bayern/munich','Bayern/nurenberg']
for city in cities:
region_grid=pd.read_csv('../../../data_preprocessing/data/regions/'+city+'/numberofGridRegionGeo7.csv',header=0)
region_selectedWithacc=pd.read_csv('../../../data_preprocessing/data/regions/'+city+'/acc_threeYear_hannover.csv',header=0)
train,test=traintestdataAcc(region_selectedWithacc, city)
# non acc cases generation
trainNonacc(region_grid,train,city)
testNonacc(region_grid,test,city)
print('finished for city=',city)
|
[
"pandas.DataFrame",
"numpy.random.uniform",
"pandas.read_csv",
"random.choices",
"random.random",
"datetime.datetime.strptime",
"geohash.bbox",
"pandas.concat"
] |
[((692, 709), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (701, 709), True, 'import pandas as pd\n'), ((1090, 1106), 'geohash.bbox', 'gh.bbox', (['geohash'], {}), '(geohash)\n', (1097, 1106), True, 'import geohash as gh\n'), ((1312, 1347), 'numpy.random.uniform', 'np.random.uniform', (['min_lng', 'max_lng'], {}), '(min_lng, max_lng)\n', (1329, 1347), True, 'import numpy as np\n'), ((1413, 1448), 'numpy.random.uniform', 'np.random.uniform', (['min_lat', 'max_lat'], {}), '(min_lat, max_lat)\n', (1430, 1448), True, 'import numpy as np\n'), ((1870, 1912), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dateFind', 'frmt'], {}), '(dateFind, frmt)\n', (1896, 1912), False, 'import datetime\n'), ((2186, 2225), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['stime', 'frmt'], {}), '(stime, frmt)\n', (2212, 2225), False, 'import datetime\n'), ((2238, 2277), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['etime', 'frmt'], {}), '(etime, frmt)\n', (2264, 2277), False, 'import datetime\n'), ((3415, 3430), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (3427, 3430), True, 'import pandas as pd\n'), ((4811, 4826), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (4823, 4826), True, 'import pandas as pd\n'), ((2868, 2887), 'random.choices', 'choices', (['geohashVal'], {}), '(geohashVal)\n', (2875, 2887), False, 'from random import choices\n'), ((4264, 4283), 'random.choices', 'choices', (['geohashVal'], {}), '(geohashVal)\n', (4271, 4283), False, 'from random import choices\n'), ((5366, 5475), 'pandas.read_csv', 'pd.read_csv', (["('../../../data_preprocessing/data/regions/' + city +\n '/numberofGridRegionGeo7.csv')"], {'header': '(0)'}), "('../../../data_preprocessing/data/regions/' + city +\n '/numberofGridRegionGeo7.csv', header=0)\n", (5377, 5475), True, 'import pandas as pd\n'), ((5499, 5608), 'pandas.read_csv', 'pd.read_csv', (["('../../../data_preprocessing/data/regions/' + city +\n '/acc_threeYear_hannover.csv')"], {'header': '(0)'}), "('../../../data_preprocessing/data/regions/' + city +\n '/acc_threeYear_hannover.csv', header=0)\n", (5510, 5608), True, 'import pandas as pd\n'), ((2320, 2335), 'random.random', 'random.random', ([], {}), '()\n', (2333, 2335), False, 'import random\n')]
|
"""
The trainer class.
Library: Tensowflow 2.2.0, pyTorch 1.5.1
Author: <NAME>
Email: <EMAIL>
"""
from __future__ import absolute_import, division, print_function
from util.validation import *
from util.logger import *
try:
from tqdm import tqdm
from tqdm import trange
except ImportError:
print("tqdm and trange not found, disabling progress bars")
def tqdm(iter):
return iter
def trange(iter):
return iter
TQDM_COLS = 80
def cross_entropy2d(input, target):
# input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# input: (n*h*w, c)
input = input.transpose(1, 2).transpose(2, 3).contiguous()
input = input[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
input = input.view(-1, c)
# target: (n*h*w,)
mask = target >= 0.0
target = target[mask]
func_loss = torch.nn.CrossEntropyLoss()
loss = func_loss(input, target)
return loss
class Trainer(object):
def __init__(self, model, optimizer, logger, num_epochs, train_loader,
test_loader=None,
epoch=0,
log_batch_stride=30,
check_point_epoch_stride=60,
scheduler=None):
"""
:param model: A network model to train.
:param optimizer: A optimizer.
:param logger: The logger for writing results to Tensorboard.
:param num_epochs: iteration count.
:param train_loader: pytorch's DataLoader
:param test_loader: pytorch's DataLoader
:param epoch: the start epoch number.
:param log_batch_stride: it determines the step to write log in the batch loop.
:param check_point_epoch_stride: it determines the step to save a model in the epoch loop.
:param scheduler: optimizer scheduler for adjusting learning rate.
"""
self.cuda = torch.cuda.is_available()
self.model = model
self.optim = optimizer
self.logger = logger
self.train_loader = train_loader
self.test_loader = test_loader
self.num_epoches = num_epochs
self.check_point_step = check_point_epoch_stride
self.log_batch_stride = log_batch_stride
self.scheduler = scheduler
self.epoch = epoch
def train(self):
if not next(self.model.parameters()).is_cuda and self.cuda:
raise ValueError("A model should be set via .cuda() before constructing optimizer.")
for epoch in trange(self.epoch, self.num_epoches,
position=0,
desc='Train', ncols=TQDM_COLS):
self.epoch = epoch
# train
self._train_epoch()
# step forward to reduce the learning rate in the optimizer.
if self.scheduler:
self.scheduler.step()
# model checkpoints
if epoch%self.check_point_step == 0:
self.logger.save_model_and_optimizer(self.model,
self.optim,
'epoch_{}'.format(epoch))
def evaluate(self):
num_batches = len(self.test_loader)
self.model.eval()
with torch.no_grad():
for n_batch, (sample_batched) in tqdm(enumerate(self.test_loader),
total=num_batches,
leave=False,
desc="Valid epoch={}".format(self.epoch),
ncols=TQDM_COLS):
self._eval_batch(sample_batched, n_batch, num_batches)
def _train_epoch(self):
num_batches = len(self.train_loader)
if self.test_loader:
dataloader_iterator = iter(self.test_loader)
for n_batch, (sample_batched) in tqdm(enumerate(self.train_loader),
total=num_batches,
leave=False,
desc="Train epoch={}".format(self.epoch),
ncols=TQDM_COLS):
self.model.train()
data = sample_batched['image']
target = sample_batched['annotation']
if self.cuda:
data, target = data.cuda(), target.cuda()
self.optim.zero_grad()
torch.cuda.empty_cache()
score = self.model(data)
loss = cross_entropy2d(score, target)
loss_data = loss.data.item()
if np.isnan(loss_data):
raise ValueError('loss is nan while training')
loss.backward()
self.optim.step()
if n_batch%self.log_batch_stride != 0:
continue
self.logger.store_checkpoint_var('img_width', data.shape[3])
self.logger.store_checkpoint_var('img_height', data.shape[2])
self.model.img_width = data.shape[3]
self.model.img_height = data.shape[2]
#write logs to Tensorboard.
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
lbl_true = target.data.cpu().numpy()
acc, acc_cls, mean_iou, fwavacc = \
label_accuracy_score(lbl_true, lbl_pred, n_class=score.shape[1])
self.logger.log_train(loss, 'loss', self.epoch, n_batch, num_batches)
self.logger.log_train(acc, 'acc', self.epoch, n_batch, num_batches)
self.logger.log_train(acc_cls, 'acc_cls', self.epoch, n_batch, num_batches)
self.logger.log_train(mean_iou, 'mean_iou', self.epoch, n_batch, num_batches)
self.logger.log_train(fwavacc, 'fwavacc', self.epoch, n_batch, num_batches)
#write result images when starting epoch.
if n_batch == 0:
log_img = self.logger.concatenate_images([lbl_pred, lbl_true], input_axis='byx')
log_img = self.logger.concatenate_images([log_img, data.cpu().numpy()[:, :, :, :]])
self.logger.log_images_train(log_img, self.epoch, n_batch, num_batches,
nrows=data.shape[0])
#if the trainer has the test loader, it evaluates the model using the test data.
if self.test_loader:
self.model.eval()
with torch.no_grad():
try:
sample_batched = next(dataloader_iterator)
except StopIteration:
dataloader_iterator = iter(self.test_loader)
sample_batched = next(dataloader_iterator)
self._eval_batch(sample_batched, n_batch, num_batches)
def _eval_batch(self, sample_batched, n_batch, num_batches):
data = sample_batched['image']
target = sample_batched['annotation']
if self.cuda:
data, target = data.cuda(), target.cuda()
torch.cuda.empty_cache()
score = self.model(data)
loss = cross_entropy2d(score, target)
loss_data = loss.data.item()
if np.isnan(loss_data):
raise ValueError('loss is nan while training')
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
lbl_true = target.data.cpu().numpy()
acc, acc_cls, mean_iou, fwavacc = \
label_accuracy_score(lbl_true, lbl_pred, n_class=score.shape[1])
self.logger.log_test(loss, 'loss', self.epoch, n_batch, num_batches)
self.logger.log_test(acc, 'acc', self.epoch, n_batch, num_batches)
self.logger.log_test(acc_cls, 'acc_cls', self.epoch, n_batch, num_batches)
self.logger.log_test(mean_iou, 'mean_iou', self.epoch, n_batch, num_batches)
self.logger.log_test(fwavacc, 'fwavacc', self.epoch, n_batch, num_batches)
if n_batch == 0:
log_img = self.logger.concatenate_images([lbl_pred, lbl_true], input_axis='byx')
log_img = self.logger.concatenate_images([log_img, data.cpu().numpy()[:, :, :, :]])
self.logger.log_images_test(log_img, self.epoch, n_batch, num_batches,
nrows=data.shape[0])
def _write_img(self, score, target, input_img, n_batch, num_batches):
lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :]
lbl_true = target.data.cpu().numpy()
log_img = self.logger.concatenate_images([lbl_pred, lbl_true], input_axis='byx')
log_img = self.logger.concatenate_images([log_img, input_img.cpu().numpy()[:, :, :, :]])
self.logger.log_images(log_img, self.epoch, n_batch, num_batches, nrows=log_img.shape[0])
|
[
"tqdm.trange"
] |
[((2478, 2557), 'tqdm.trange', 'trange', (['self.epoch', 'self.num_epoches'], {'position': '(0)', 'desc': '"""Train"""', 'ncols': 'TQDM_COLS'}), "(self.epoch, self.num_epoches, position=0, desc='Train', ncols=TQDM_COLS)\n", (2484, 2557), False, 'from tqdm import trange\n')]
|
"""Data type class for the variables containing all the methods related to data types."""
from enum import Enum, auto
from typing import Any
class DataType(Enum):
"""Data type of a variable in the driver."""
String = auto()
RawString = auto()
Integer = auto()
IntegerExt = auto()
Boolean = auto()
Float = auto()
FloatExt = auto()
Enum = auto()
EnumExt = auto()
StringList = auto()
RawStringList = auto()
IntegerList = auto()
IntegerExtList = auto()
BooleanList = auto()
FloatList = auto()
FloatExtList = auto()
EnumList = auto()
EnumExtList = auto()
@property
def is_list(self) -> bool:
"""Returns True, if the data type is a list."""
return self in frozenset(
{
DataType.StringList,
DataType.RawStringList,
DataType.IntegerList,
DataType.IntegerExtList,
DataType.BooleanList,
DataType.FloatList,
DataType.FloatExtList,
DataType.EnumList,
DataType.EnumExtList
})
@property
def is_scalar(self) -> bool:
"""Returns True, if the data type is a scalar."""
return not self.is_list
@property
def is_scalar_enum(self) -> bool:
"""Returns True, if the data type is a scalar enum or enum_ext."""
return self == DataType.Enum or self == DataType.EnumExt
@property
def is_list_enum(self) -> bool:
"""Returns True, if the data type is a list enum or list enum_ext."""
return self == DataType.EnumList or self == DataType.EnumExtList
@property
def is_enum(self) -> bool:
"""Returns True, if the data type is enum or enum array - including the extended."""
return self in [DataType.Enum, DataType.EnumExt, DataType.EnumList, DataType.EnumExtList]
@property
def is_raw_string(self) -> bool:
"""Returns True for raw string and raw string list."""
return self == DataType.RawString or self == DataType.RawStringList
@property
def is_boolean(self) -> bool:
"""Returns True for boolean and boolean list."""
return self == DataType.Boolean or self == DataType.BooleanList
@property
def is_string(self) -> bool:
"""Returns True for string and string list."""
return self == DataType.String or self == DataType.StringList
@property
def element_type(self):
"""For lists, the property returns type of the element.
For scalars, it returns the same type."""
if self.is_scalar:
return self
elif self == DataType.StringList:
return DataType.String
elif self == DataType.RawStringList:
return DataType.RawString
elif self == DataType.RawStringList:
return DataType.RawString
elif self == DataType.BooleanList:
return DataType.Boolean
elif self == DataType.IntegerList:
return DataType.Integer
elif self == DataType.IntegerExtList:
return DataType.IntegerExt
elif self == DataType.FloatList:
return DataType.Float
elif self == DataType.FloatExtList:
return DataType.FloatExt
elif self == DataType.EnumList:
return DataType.Enum
elif self == DataType.EnumExtList:
return DataType.EnumExt
def get_default_value(self, enm: Enum = None) -> Any:
"""Returns default value for the current type.
If the data type is Enum or EnumString, you have to provide the enum class."""
if self.is_list:
return []
if self == DataType.RawString:
return ''
elif self == DataType.String:
return ''
elif self == DataType.Boolean:
return False
elif self == DataType.Integer:
return 0
elif self == DataType.IntegerExt:
return 0
elif self == DataType.Float:
return 0.0
elif self == DataType.FloatExt:
return 0.0
elif self == DataType.Enum:
return enm(0)
elif self == DataType.EnumExt:
return enm(0)
|
[
"enum.auto"
] |
[((222, 228), 'enum.auto', 'auto', ([], {}), '()\n', (226, 228), False, 'from enum import Enum, auto\n'), ((242, 248), 'enum.auto', 'auto', ([], {}), '()\n', (246, 248), False, 'from enum import Enum, auto\n'), ((260, 266), 'enum.auto', 'auto', ([], {}), '()\n', (264, 266), False, 'from enum import Enum, auto\n'), ((281, 287), 'enum.auto', 'auto', ([], {}), '()\n', (285, 287), False, 'from enum import Enum, auto\n'), ((299, 305), 'enum.auto', 'auto', ([], {}), '()\n', (303, 305), False, 'from enum import Enum, auto\n'), ((315, 321), 'enum.auto', 'auto', ([], {}), '()\n', (319, 321), False, 'from enum import Enum, auto\n'), ((334, 340), 'enum.auto', 'auto', ([], {}), '()\n', (338, 340), False, 'from enum import Enum, auto\n'), ((349, 355), 'enum.auto', 'auto', ([], {}), '()\n', (353, 355), False, 'from enum import Enum, auto\n'), ((367, 373), 'enum.auto', 'auto', ([], {}), '()\n', (371, 373), False, 'from enum import Enum, auto\n'), ((388, 394), 'enum.auto', 'auto', ([], {}), '()\n', (392, 394), False, 'from enum import Enum, auto\n'), ((412, 418), 'enum.auto', 'auto', ([], {}), '()\n', (416, 418), False, 'from enum import Enum, auto\n'), ((434, 440), 'enum.auto', 'auto', ([], {}), '()\n', (438, 440), False, 'from enum import Enum, auto\n'), ((459, 465), 'enum.auto', 'auto', ([], {}), '()\n', (463, 465), False, 'from enum import Enum, auto\n'), ((481, 487), 'enum.auto', 'auto', ([], {}), '()\n', (485, 487), False, 'from enum import Enum, auto\n'), ((501, 507), 'enum.auto', 'auto', ([], {}), '()\n', (505, 507), False, 'from enum import Enum, auto\n'), ((524, 530), 'enum.auto', 'auto', ([], {}), '()\n', (528, 530), False, 'from enum import Enum, auto\n'), ((543, 549), 'enum.auto', 'auto', ([], {}), '()\n', (547, 549), False, 'from enum import Enum, auto\n'), ((565, 571), 'enum.auto', 'auto', ([], {}), '()\n', (569, 571), False, 'from enum import Enum, auto\n')]
|
#!/usr/bin/env python
from __future__ import unicode_literals
import traceback
import pwd
import os
from os.path import join, isfile
import subprocess
import importlib
# Local imports
from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
class JavaCodeEvaluator(BaseEvaluator):
"""Tests the Java code obtained from Code Server"""
def __init__(self, metadata, test_case_data):
self.files = []
self.compiled_user_answer = None
self.compiled_test_code = None
self.submit_code_path = ""
self.user_output_path = ""
self.ref_output_path = ""
# Set metadata values
self.user_answer = metadata.get('user_answer')
self.file_paths = metadata.get('file_paths')
self.partial_grading = metadata.get('partial_grading')
# Set test case data values
self.test_case = test_case_data.get('test_case')
self.weight = test_case_data.get('weight')
def teardown(self):
# Delete the created file.
if os.path.exists(self.submit_code_path):
os.remove(self.submit_code_path)
if os.path.exists(self.user_output_path):
os.remove(self.user_output_path)
if os.path.exists(self.ref_output_path):
os.remove(self.ref_output_path)
if os.path.exists(self.test_code_path):
os.remove(self.test_code_path)
if self.files:
delete_files(self.files)
def get_commands(self, clean_ref_code_path, user_code_directory):
compile_command = 'javac {0}'.format(self.submit_code_path),
compile_main = ('javac {0} -classpath '
'{1} -d {2}').format(clean_ref_code_path,
user_code_directory,
user_code_directory)
return compile_command, compile_main
def set_file_paths(self, directory, file_name):
output_path = "{0}{1}.class".format(directory, file_name)
return output_path
def compile_code(self):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
# create student code and moderator code file
self.submit_code_path = self.create_submit_code_file('Test.java')
self.test_code_path = self.create_submit_code_file('main.java')
self.write_to_submit_code_file(self.submit_code_path,
self.user_answer
)
self.write_to_submit_code_file(self.test_code_path, self.test_case)
clean_ref_code_path = self.test_code_path
if self.file_paths:
self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
user_code_directory = os.getcwd() + '/'
ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0]
self.user_output_path = self.set_file_paths(user_code_directory,
'Test'
)
self.ref_output_path = self.set_file_paths(user_code_directory,
ref_file_name
)
compile_command, self.compile_main = self.get_commands(
clean_ref_code_path,
user_code_directory
)
self.run_command_args = "java -cp {0} {1}".format(
user_code_directory,
ref_file_name
)
self.compiled_user_answer = self._run_command(compile_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.compiled_test_code = self._run_command(self.compile_main,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
return self.compiled_user_answer, self.compiled_test_code
def check_code(self):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
The second argument submit_code_path, is the path to the student
code, it is assumed to have executable permission.
Returns
--------
returns (True, "Correct answer") : If the student function returns
expected output when called by reference code.
returns (False, error_msg): If the student function fails to return
expected output when called by reference code.
Returns (False, error_msg): If mandatory arguments are not files or
if the required permissions are not given to the file(s).
"""
success = False
mark_fraction = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
# Only if compilation is successful, the program is executed
# And tested with testcases
if stdnt_stderr == '':
proc, main_out, main_err = self.compiled_test_code
main_err = self._remove_null_substitute_char(main_err)
if main_err == '':
ret = self._run_command(self.run_command_args, shell=True,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, None
mark_fraction = 1.0 if self.partial_grading else 0.0
else:
err = stdout + "\n" + stderr
else:
err = "Error:"
try:
error_lines = main_err.splitlines()
for e in error_lines:
if ':' in e:
err = err + "\n" + e.split(":", 1)[1]
else:
err = err + "\n" + e
except:
err = err + "\n" + main_err
else:
err = "Compilation Error:"
try:
error_lines = stdnt_stderr.splitlines()
for e in error_lines:
if ':' in e:
err = err + "\n" + e.split(":", 1)[1]
else:
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
return success, err, mark_fraction
|
[
"os.getcwd",
"os.remove",
"os.path.isfile",
"os.path.exists"
] |
[((1049, 1086), 'os.path.exists', 'os.path.exists', (['self.submit_code_path'], {}), '(self.submit_code_path)\n', (1063, 1086), False, 'import os\n'), ((1144, 1181), 'os.path.exists', 'os.path.exists', (['self.user_output_path'], {}), '(self.user_output_path)\n', (1158, 1181), False, 'import os\n'), ((1239, 1275), 'os.path.exists', 'os.path.exists', (['self.ref_output_path'], {}), '(self.ref_output_path)\n', (1253, 1275), False, 'import os\n'), ((1332, 1367), 'os.path.exists', 'os.path.exists', (['self.test_code_path'], {}), '(self.test_code_path)\n', (1346, 1367), False, 'import os\n'), ((1100, 1132), 'os.remove', 'os.remove', (['self.submit_code_path'], {}), '(self.submit_code_path)\n', (1109, 1132), False, 'import os\n'), ((1195, 1227), 'os.remove', 'os.remove', (['self.user_output_path'], {}), '(self.user_output_path)\n', (1204, 1227), False, 'import os\n'), ((1289, 1320), 'os.remove', 'os.remove', (['self.ref_output_path'], {}), '(self.ref_output_path)\n', (1298, 1320), False, 'import os\n'), ((1381, 1411), 'os.remove', 'os.remove', (['self.test_code_path'], {}), '(self.test_code_path)\n', (1390, 1411), False, 'import os\n'), ((2758, 2785), 'os.path.isfile', 'isfile', (['clean_ref_code_path'], {}), '(clean_ref_code_path)\n', (2764, 2785), False, 'from os.path import join, isfile\n'), ((2918, 2947), 'os.path.isfile', 'isfile', (['self.submit_code_path'], {}), '(self.submit_code_path)\n', (2924, 2947), False, 'from os.path import join, isfile\n'), ((3098, 3109), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3107, 3109), False, 'import os\n')]
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 <NAME>.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import os
import warnings
import unittest
from . import parser_test_case
from pygccxml import utils
class Test(parser_test_case.parser_test_case_t):
def test(self):
path = os.path.normpath("/mypath/folder1/folder2/folder3")
dirs = [
os.path.normpath("/mypath/folder1/folder2/"),
os.path.normpath("/mypath3/folder1/folder2/folder3"),
os.path.normpath("home"),
os.path.normpath("/test/test1/mypath")]
self.assertTrue(utils.utils.contains_parent_dir(path, dirs))
dirs = [os.path.normpath("/home"), os.path.normpath("/mypath/test/")]
self.assertFalse(utils.utils.contains_parent_dir(path, dirs))
def test_deprecation_wrapper(self):
"""
The DeprecationWrapper is not part of the public API
We still need to test it.
"""
a = utils.utils.DeprecationWrapper(
DeprecatedClass,
"DeprecatedClass",
"NewClass",
"1.9.0")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
a()
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
class DeprecatedClass(object):
"""
An empty class used for testing purposes.
"""
pass
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
|
[
"warnings.simplefilter",
"pygccxml.utils.utils.DeprecationWrapper",
"unittest.TestSuite",
"pygccxml.utils.utils.contains_parent_dir",
"unittest.TextTestRunner",
"unittest.makeSuite",
"warnings.catch_warnings",
"os.path.normpath"
] |
[((1594, 1614), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1612, 1614), False, 'import unittest\n'), ((375, 426), 'os.path.normpath', 'os.path.normpath', (['"""/mypath/folder1/folder2/folder3"""'], {}), "('/mypath/folder1/folder2/folder3')\n", (391, 426), False, 'import os\n'), ((1052, 1143), 'pygccxml.utils.utils.DeprecationWrapper', 'utils.utils.DeprecationWrapper', (['DeprecatedClass', '"""DeprecatedClass"""', '"""NewClass"""', '"""1.9.0"""'], {}), "(DeprecatedClass, 'DeprecatedClass',\n 'NewClass', '1.9.0')\n", (1082, 1143), False, 'from pygccxml import utils\n'), ((1633, 1657), 'unittest.makeSuite', 'unittest.makeSuite', (['Test'], {}), '(Test)\n', (1651, 1657), False, 'import unittest\n'), ((456, 500), 'os.path.normpath', 'os.path.normpath', (['"""/mypath/folder1/folder2/"""'], {}), "('/mypath/folder1/folder2/')\n", (472, 500), False, 'import os\n'), ((514, 566), 'os.path.normpath', 'os.path.normpath', (['"""/mypath3/folder1/folder2/folder3"""'], {}), "('/mypath3/folder1/folder2/folder3')\n", (530, 566), False, 'import os\n'), ((580, 604), 'os.path.normpath', 'os.path.normpath', (['"""home"""'], {}), "('home')\n", (596, 604), False, 'import os\n'), ((618, 656), 'os.path.normpath', 'os.path.normpath', (['"""/test/test1/mypath"""'], {}), "('/test/test1/mypath')\n", (634, 656), False, 'import os\n'), ((683, 726), 'pygccxml.utils.utils.contains_parent_dir', 'utils.utils.contains_parent_dir', (['path', 'dirs'], {}), '(path, dirs)\n', (714, 726), False, 'from pygccxml import utils\n'), ((745, 770), 'os.path.normpath', 'os.path.normpath', (['"""/home"""'], {}), "('/home')\n", (761, 770), False, 'import os\n'), ((772, 805), 'os.path.normpath', 'os.path.normpath', (['"""/mypath/test/"""'], {}), "('/mypath/test/')\n", (788, 805), False, 'import os\n'), ((833, 876), 'pygccxml.utils.utils.contains_parent_dir', 'utils.utils.contains_parent_dir', (['path', 'dirs'], {}), '(path, dirs)\n', (864, 876), False, 'from pygccxml import utils\n'), ((1202, 1238), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1225, 1238), False, 'import warnings\n'), ((1257, 1288), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (1278, 1288), False, 'import warnings\n'), ((1699, 1735), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1722, 1735), False, 'import unittest\n')]
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import logging
import os
import subprocess
import random
import cv2
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def change_block(schematic, b):
x, y, z = b
## change to red wool
schematic[y][z][x][0] = 35
schematic[y][z][x][1] = 14
def render(npy_p2b, out_dir, port, spp, img_size, mn=None):
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(npy_p2b.split(".")[1:-2])
+ "/schematic.npy"
)
schematic = np.load(npy_file)
print(schematic.shape)
house_name = os.path.basename(os.path.dirname(npy_file))
p2b = np.load(npy_p2b)
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + 63, zmid]) # TODO: +63 only works for flat_world seed=0w
yaw, distance = list(map(int, npy_p2b.split(".")[-2].split("_")))
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
if mn == [0, 0]:
M, N = p2b.shape[:2]
while True:
m = random.randint(0, M - 1)
n = random.randint(0, N - 1)
if p2b[m][n][0] != -1:
break
else:
m, n = mn
print("Select pixel at {}".format((m, n)))
print("Mapped block {}".format(p2b[m][n]))
change_block(schematic, p2b[m][n])
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky_verify.{}.{}.png".format(out_dir, house_name, chunky_id)
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
## draw the sampled pixel for a better view
img = cv2.imread(out_file)
cv2.circle(img, (n, m), 2, (255, 0, 0))
cv2.imwrite(out_file, img)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy_p2b")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
parser.add_argument("--mn", type=int, nargs=2, default=[0, 0])
args = parser.parse_args()
render(args.npy_p2b, args.out_dir, args.port, args.spp, args.size, args.mn)
|
[
"numpy.load",
"argparse.ArgumentParser",
"numpy.argmax",
"os.path.isfile",
"numpy.mean",
"numpy.sin",
"os.path.join",
"random.randint",
"cuberite_process.CuberiteProcess",
"cv2.imwrite",
"os.path.dirname",
"subprocess.Popen",
"cv2.circle",
"os.path.realpath",
"numpy.cos",
"logging.basicConfig",
"sys.path.insert",
"numpy.nonzero",
"cv2.imread",
"numpy.array",
"os.path.expanduser",
"logging.getLogger"
] |
[((249, 279), 'sys.path.insert', 'sys.path.insert', (['(0)', 'python_dir'], {}), '(0, python_dir)\n', (264, 279), False, 'import sys\n'), ((354, 424), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s [%(levelname)s]: %(message)s"""'}), "(format='%(asctime)s [%(levelname)s]: %(message)s')\n", (373, 424), False, 'import logging\n'), ((1231, 1248), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (1238, 1248), True, 'import numpy as np\n'), ((1347, 1363), 'numpy.load', 'np.load', (['npy_p2b'], {}), '(npy_p2b)\n', (1354, 1363), True, 'import numpy as np\n'), ((1501, 1538), 'numpy.nonzero', 'np.nonzero', (['(schematic[:, :, :, 0] > 0)'], {}), '(schematic[:, :, :, 0] > 0)\n', (1511, 1538), True, 'import numpy as np\n'), ((1613, 1646), 'numpy.array', 'np.array', (['[xmid, ymid + 63, zmid]'], {}), '([xmid, ymid + 63, zmid])\n', (1621, 1646), True, 'import numpy as np\n'), ((2307, 2409), 'cuberite_process.CuberiteProcess', 'CuberiteProcess', (['"""flat_world"""'], {'seed': '(0)', 'game_mode': '"""creative"""', 'place_blocks_yzx': 'schematic', 'port': 'port'}), "('flat_world', seed=0, game_mode='creative',\n place_blocks_yzx=schematic, port=port)\n", (2322, 2409), False, 'from cuberite_process import CuberiteProcess\n'), ((2517, 2549), 'os.path.join', 'os.path.join', (['p.workdir', '"""world"""'], {}), "(p.workdir, 'world')\n", (2529, 2549), False, 'import os\n'), ((2573, 2615), 'os.path.join', 'os.path.join', (['repo_home', '"""bin/render_view"""'], {}), "(repo_home, 'bin/render_view')\n", (2585, 2615), False, 'import os\n'), ((2627, 2658), 'os.path.isfile', 'os.path.isfile', (['render_view_bin'], {}), '(render_view_bin)\n', (2641, 2658), False, 'import os\n'), ((3503, 3523), 'cv2.imread', 'cv2.imread', (['out_file'], {}), '(out_file)\n', (3513, 3523), False, 'import cv2\n'), ((3528, 3567), 'cv2.circle', 'cv2.circle', (['img', '(n, m)', '(2)', '(255, 0, 0)'], {}), '(img, (n, m), 2, (255, 0, 0))\n', (3538, 3567), False, 'import cv2\n'), ((3572, 3598), 'cv2.imwrite', 'cv2.imwrite', (['out_file', 'img'], {}), '(out_file, img)\n', (3583, 3598), False, 'import cv2\n'), ((3641, 3666), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3664, 3666), False, 'import argparse\n'), ((220, 246), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (236, 246), False, 'import os\n'), ((425, 444), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (442, 444), False, 'import logging\n'), ((723, 763), 'numpy.mean', 'np.mean', (['(blocks[:, :, :, 0] == 2)'], {'axis': '(1)'}), '(blocks[:, :, :, 0] == 2, axis=1)\n', (730, 763), True, 'import numpy as np\n'), ((820, 839), 'numpy.argmax', 'np.argmax', (['dirt_pct'], {}), '(dirt_pct)\n', (829, 839), True, 'import numpy as np\n'), ((1310, 1335), 'os.path.dirname', 'os.path.dirname', (['npy_file'], {}), '(npy_file)\n', (1325, 1335), False, 'import os\n'), ((1563, 1574), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (1570, 1574), True, 'import numpy as np\n'), ((1576, 1587), 'numpy.mean', 'np.mean', (['ys'], {}), '(ys)\n', (1583, 1587), True, 'import numpy as np\n'), ((1589, 1600), 'numpy.mean', 'np.mean', (['zs'], {}), '(zs)\n', (1596, 1600), True, 'import numpy as np\n'), ((3382, 3404), 'subprocess.Popen', 'subprocess.Popen', (['call'], {}), '(call)\n', (3398, 3404), False, 'import subprocess\n'), ((1950, 1974), 'random.randint', 'random.randint', (['(0)', '(M - 1)'], {}), '(0, M - 1)\n', (1964, 1974), False, 'import random\n'), ((1991, 2015), 'random.randint', 'random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (2005, 2015), False, 'import random\n'), ((603, 614), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (609, 614), True, 'import numpy as np\n'), ((621, 634), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (627, 634), True, 'import numpy as np\n'), ((636, 649), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (642, 649), True, 'import numpy as np\n'), ((652, 663), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (658, 663), True, 'import numpy as np\n'), ((1081, 1104), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1099, 1104), False, 'import os\n'), ((587, 600), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (593, 600), True, 'import numpy as np\n')]
|
from .util import *
from .srs import *
from .geom import *
from .raster import *
from .vector import *
from .extent import Extent
from io import BytesIO
MaskAndExtent = namedtuple("MaskAndExtent", "mask extent id")
class RegionMask(object):
"""The RegionMask object represents a given region and exposes methods allowing
for easy manipulation of geospatial data around that region.
RegionMask objects are defined by providing a polygon (either via a vector
file, an ogr.Geometry, or a Well-Known-Text (WKT) string), a projection system
to work in, and an extent and pixel resolution to create a matrix mask (i.e.
boolean values) of.
* The extent of the generated mask matrix is the tightest fit around the region
in units of the pixel resolution. However, the extenT can be defined explcitly
if desired
* The region can be manipulated as a vector polygon via the ".geometry"
attribute, which exposes the geometry as an ogr.Geometry. To incoporate this
into other vector-handeling libraries it is suggested to use the
".ExportToWkt()" method available via OGR.
* The region can be manipulated as a raster matrix via the ".mask" attribute
which exposes the mask as a boolean numpy.ndarray
* Any raster source can be easily warped onto the region-mask's extent,
projection, and resolution via the ".warp" method
* Any vector source can be rasterized onto the region-mask's extent, projection,
and resolution via the ".rasterize" method
* The default mask set-up is defined by the constant members: DEFAULT_SRS,
DEFAULT_RES, and DEFAULT_PAD
Initializers:
-------------
* RegionMask(...)
- This is not the preferred way
* RegionMask.fromVector( ... )
* RegionMask.fromVectorFeature( ... )
* RegionMask.fromGeom( ... )
* RegionMask.fromMask( ... )
* RegionMask.load( ... )
- This function tries to determine which of the other initializers
should be used based off the input
"""
DEFAULT_SRS = 'europe_m'
DEFAULT_RES = 100
DEFAULT_PAD = None
def __init__(s, extent, pixelRes, mask=None, geom=None, attributes=None, **kwargs):
"""The default constructor for RegionMask objects. Creates a RegionMask
directly from a matrix mask and a given extent (and optionally a geometry).
Pixel resolution is calculated in accordance with the shape of the mask
mask and the provided extent
* Generally one should use the '.load' or else one of the '.fromXXX'
methods to create RegionMasks
Parameters:
-----------
extent : Extent object
The geospatial context of the region mask
* The extent must fit the given pixel sizes
* All computations using the RegionMask will be evaluated within this
spatial context
pixelRes : float or tuple
The RegionMask's native pixel size(s)
* If float : A pixel size to apply to both the X and Y dimension
* If (float float) : An X-dimension and Y-dimension pixel size
* All computations using the RegionMask will generate results in
reference to these pixel sizes (i.e. either at this resolution or
at some scaling of this resolution)
mask : numpy-ndarray
A mask over the context area defining which pixel as inside the region
and which are outside
* Must be a 2-Dimensional bool-matrix describing the region, where:
- 0/False -> "not in the region"
- 1/True -> "Inside the region"
* Either a mask or a geometry must be given, but not both
geom : ogr-Geomertry
A geometric representation of the RegionMask's region
* Either a mask or a geometry must be given, but not both
attributes : dict
Keyword attributes and values to carry along with the RegionMask
"""
# Check for bad input
if mask is None and geom is None: raise GeoKitRegionMaskError("Either mask or geom should be defined")
if not kwargs.get("mask_plus_geom_is_okay",False):
if not mask is None and not geom is None: raise GeoKitRegionMaskError("mask and geom cannot be defined simultaneously")
# Set basic values
s.extent = extent
s.srs = extent.srs
if s.srs is None:
raise GeoKitRegionMaskError("Extent SRS cannot be None")
# Set Pixel Size)
if not extent.fitsResolution(pixelRes):
raise GeoKitRegionMaskError("The given extent does not fit the given pixelRes")
try:
pixelWidth, pixelHeight = pixelRes
except:
pixelWidth, pixelHeight = pixelRes, pixelRes
s.pixelWidth = abs(pixelWidth)
s.pixelHeight = abs(pixelHeight)
if( s.pixelHeight == s.pixelWidth ):
s._pixelRes = s.pixelHeight
else:
s._pixelRes = None
# set height and width
## It turns out that I can't set these values here, since sometimes gdal
## functions can add an extra row (due to float comparison issues?) when
## warping and rasterizing
s.width = None #int(np.round((s.extent.xMax-s.extent.xMin)/s.pixelWidth))
s.height = None #int(np.round((s.extent.yMax-s.extent.yMin)/s.pixelHeight))
# Set mask
s._mask = mask
if not mask is None: # test the mask
# test type
if(mask.dtype != "bool" and mask.dtype != "uint8" ):
raise GeoKitRegionMaskError("Mask must be bool type")
if(mask.dtype == "uint8"):
mask = mask.astype("bool")
if not np.isclose(extent.xMin+pixelWidth*mask.shape[1], extent.xMax) or not np.isclose(extent.yMin+pixelHeight*mask.shape[0], extent.yMax):
raise GeoKitRegionMaskError("Extent and pixels sizes do not correspond to mask shape")
# Set geometry
if not geom is None: # test the geometry
if not isinstance(geom, ogr.Geometry):
raise GeoKitRegionMaskError("geom is not an ogr.Geometry object")
s._geometry = geom.Clone()
gSRS = geom.GetSpatialReference()
if gSRS is None:
raise GeoKitRegionMaskError("geom does not have an srs")
if not gSRS.IsSame(s.srs): transform(s._geometry, toSRS=s.srs, fromSRS=gSRS)
else:
s._geometry = None
# Set other containers
s._vector = None
s._vectorPath = None
# set attributes
s.attributes = {} if attributes is None else attributes
@staticmethod
def fromMask(extent, mask, attributes=None):
"""Make a RegionMask directly from amask matrix and extent
Note:
-----
Pixel sizes are calculated from the extent boundaries and mask dimensional
sizes
Parameters:
-----------
extent : Extent object
The geospatial context of the region mask
* The extent must fit the given pixel sizes
* All computations using the RegionMask will be evaluated within this
spatial context
mask : numpy-ndarray
A mask over the context area defining which pixel as inside the region
and which are outside
* Must be a 2-Dimensional bool-matrix describing the region, where:
- 0/False -> "not in the region"
- 1/True -> "Inside the region"
attributes : dict
Keyword attributes and values to carry along with the RegionMask
Returns:
--------
RegionMask
"""
# get pixelWidth and pixelHeight
pixelWidth = (extent.xMax-extent.xMin)/(mask.shape[1])
pixelHeight = (extent.yMax-extent.yMin)/(mask.shape[0])
return RegionMask(extent=extent, pixelRes=(pixelWidth, pixelHeight), mask=mask, attributes=attributes)
@staticmethod
def fromGeom(geom, pixelRes=DEFAULT_RES, srs=DEFAULT_SRS, extent=None, padExtent=DEFAULT_PAD, attributes=None, **k):
"""Make a RasterMask from a given geometry
Parameters:
-----------
geom : ogr-Geomertry or str
A geometric representation of the RegionMask's region
* If a string is given, geokit.geom.convertWKT(geom, srs) is called
to convert it to an ogr.Geometry
pixelRes : float or tuple
The RegionMask's native pixel resolution(s)
* If float : A pixel size to apply to both the X and Y dimension
* If (float float) : An X-dimension and Y-dimension pixel size
srs : Anything acceptable to geokit.srs.loadSRS()
The srs context of the generated RegionMask object
* This srs is superseded by the srs in an explicitly defined extent
* The default srs EPSG3035 is only valid for a European context
extent : Extent object
The geospatial context of the generated region mask
* The extent must fit the given pixel sizes
padExtent : float; optional
An amount by which to pad the extent before generating the RegionMask
attributes : dict
Keyword attributes and values to carry along with the RegionMask
Returns:
--------
RegionMask
"""
srs = loadSRS(srs)
# make sure we have a geometry with an srs
if( isinstance(geom, str)):
geom = convertWKT(geom, srs)
geom = geom.Clone() # clone to make sure we're free of outside dependencies
# set extent (if not given)
if extent is None:
extent = Extent.fromGeom(geom).castTo(srs).pad(padExtent).fit(pixelRes)
else:
if not extent.srs.IsSame(srs):
raise GeoKitRegionMaskError("The given srs does not match the extent's srs")
#extent = extent.pad(padExtent)
# make a RegionMask object
return RegionMask(extent=extent, pixelRes=pixelRes, geom=geom, attributes=attributes)
@staticmethod
def fromVector(source, where=None, geom=None, pixelRes=DEFAULT_RES, srs=DEFAULT_SRS, extent=None, padExtent=DEFAULT_PAD, limitOne=True, **kwargs):
"""Make a RasterMask from a given vector source
Note:
-----
Be careful when creating a RegionMask over a large area (such as a country)!
Using the default pixel size for a large area (such as a country) can
easily consume your system's memory
Parameters:
-----------
source : Anything acceptable by loadVector()
The vector data source to read from
where : str, int; optional
If string -> An SQL-like where statement to apply to the source
If int -> The feature's ID within the dataset
* Feature attribute name do not need quotes
* String values should be wrapped in 'single quotes'
Example: If the source vector has a string attribute called "ISO" and
a integer attribute called "POP", you could use....
where = "ISO='DEU' AND POP>1000"
geom : ogr.Geometry; optional
The geometry to search with
* All features are extracted which touch this geometry
pixelRes : float or tuple
The RegionMask's native pixel resolution(s)
* If float : A pixel size to apply to both the X and Y dimension
* If (float float) : An X-dimension and Y-dimension pixel size
srs : Anything acceptable to geokit.srs.loadSRS()
The srs context of the generated RegionMask object
* This srs is superseded by the srs in an explicitly defined extent
* The default srs EPSG3035 is only valid for a European context
extent : Extent object
The geospatial context of the generated region mask
* The extent must fit the given pixel sizes
* If not specified, the entire extent of the vector file is assumed
padExtent : float; optional
An amount by which to pad the extent before generating the RegionMask
limitOne : bool; optional
Whether or not to allow more than one feature to be extracted
Returns:
--------
RegionMask
"""
# Get all geoms which fit the search criteria
if isinstance(where, int):
geom,attr = extractFeature(source=source, where=where, srs=srs)
else:
ftrs = list(extractFeatures(source=source, where=where, srs=srs, asPandas=False))
if len(ftrs) ==0: raise GeoKitRegionMaskError("Zero features found")
elif len(ftrs) == 1:
geom = ftrs[0].geom
attr = ftrs[0].attr
else:
if limitOne: raise GeoKitRegionMaskError("Multiple fetures found. If you are okay with this, set 'limitOne' to False")
geom = flatten([f.geom for f in ftrs])
attr = None
# Done!
return RegionMask.fromGeom(geom, extent=extent, pixelRes=pixelRes, attributes=attr, padExtent=padExtent, srs=srs, **kwargs)
@staticmethod
def load(region, **kwargs):
"""Tries to initialize and return a RegionMask in the most appropriate way.
Note:
-----
If 'region' input is...
* Already a RegionMask, simply return it
* A file path, use RegionMask.fromVector
* An OGR Geometry object, assume is it to be loaded by RegionMask.fromGeom
* A NumPy array, assume is it to be loaded by RegionMask.fromMask
- An 'extent' input must also be given
Parameters:
-----------
region : Can be RegionMask, str, ogr.Geometry, numpy.ndarray
The shape defining the region over which to build the RegionMask
* See the note above
where : str, int; optional
If string -> An SQL-like where statement to apply to the source
If int -> The feature's ID within the dataset
* Feature attribute name do not need quotes
* String values should be wrapped in 'single quotes'
Example: If the source vector has a string attribute called "ISO" and
a integer attribute called "POP", you could use....
where = "ISO='DEU' AND POP>1000"
geom : ogr.Geometry; optional
The geometry to search with
* All features are extracted which touch this geometry
pixelRes : float or tuple
The RegionMask's native pixel resolution(s)
* If float : A pixel size to apply to both the X and Y dimension
* If (float float) : An X-dimension and Y-dimension pixel size
srs : Anything acceptable to geokit.srs.loadSRS()
The srs context of the generated RegionMask object
* This srs is superseded by the srs in an explicitly defined extent
* The default srs EPSG3035 is only valid for a European context
extent : Extent object
The geospatial context of the generated region mask
* The extent must fit the given pixel sizes
* If not specified, the entire extent of the vector file is assumed
padExtent : float; optional
An amount by which to pad the extent before generating the RegionMask
"""
if isinstance(region, RegionMask): return region
elif isinstance( region, str): return RegionMask.fromVector(region, **kwargs)
elif isinstance(region, ogr.Geometry): return RegionMask.fromGeom(region, **kwargs)
elif isinstance(region, np.ndarray): return RegionMask.fromMask(region, **kwargs)
else:
raise GeoKitRegionMaskError("Could not understand region input")
@property
def pixelRes(s):
"""The RegionMask's pixel size.
!!Only available when pixelWidth equals pixelHeight!!"""
if s._pixelRes is None:
raise GeoKitRegionMaskError("pixelRes only accessable when pixelWidth equals pixelHeight")
return s._pixelRes
def buildMask(s, **kwargs):
"""Explicitly build the RegionMask's mask matrix.
* The 'width' and 'height' attributes for the RegionMask are also set
when this function is called
* All kwargs are passed on to a call to geokit.vector.rasterize()
"""
if s._geometry is None:
raise GeoKitRegionMaskError("Cannot build mask when geometry is None")
s._mask = None
s._mask = s.rasterize(s.vectorPath, applyMask=False, **kwargs).astype(np.bool)
s.height, s.width = s._mask.shape
@property
def mask(s):
"""The RegionMask's mask array as an 2-dimensional boolean numpy array.
* If no mask was given at the time of the RegionMask's creation, then a
mask will be generated on first access to the 'mask' property
* The mask can be rebuilt in a customized way using the
RegionMask.buildMask() function
"""
if(s._mask is None): s.buildMask()
return s._mask
@property
def area(s):
return s.mask.sum()*s.pixelWidth*s.pixelHeight
def buildGeometry(s):
"""Explicitly build the RegionMask's geometry"""
if s._mask is None:
raise GeoKitRegionMaskError("Cannot build geometry when mask is None")
s._geometry = None
s._geometry = polygonizeMask( s.mask, bounds=s.extent.xyXY, srs=s.extent.srs, flat=True )
@property
def geometry(s):
"""Fetches a clone of the RegionMask's geometry as an OGR Geometry object
* If a geometry was not provided when the RegionMask was initialized,
then one will be generated from the RegionMask's mask matrix in the
RegionMask's extent
* The geometry can always be deleted and rebuild using the
RegionMask.rebuildGeometry() function
"""
if(s._geometry is None): s.buildGeometry()
return s._geometry.Clone()
@property
def vectorPath(s):
"""Returns a path to a vector path on disc which is built only once"""
if(s._vectorPath is None):
s._vectorPath = s._tempFile(ext=".shp")
createVector(s.geometry, output=s._vectorPath)
return s._vectorPath
@property
def vector(s):
"""Returns a vector saved in memory which is built only once"""
if(s._vector is None):
s._vector = quickVector(s.geometry)
return s._vector
def _repr_svg_(s):
if(not hasattr(s,"svg")):
f = BytesIO()
import matplotlib.pyplot as plt
plt.figure(figsize=(4,4))
ax = plt.subplot(111)
r= s.drawSelf(ax=ax)
ax.set_aspect('equal')
ax.autoscale(enable=True)
ax.axis('off')
plt.tight_layout()
plt.savefig(f, format="svg", dpi=100)
plt.close()
f.seek(0)
s.svg = f.read().decode('ascii')
return s.svg
def _tempFile(s, head="tmp", ext=".tif"):
"""***RM INTERNAL***
Use this to create a temporary file associated with the RegionMask which
will be deleted when the RM goes out of scope.
!! BEWARE OF EXTERNAL DEPENDANCIES WHEN THE RM IS GOING OUT OF SCOPE,
THIS WILL CAUSE A LOT OF ISSUES !!
"""
if(not hasattr(s,"_TMPDIR")):
# Create a temporary directory to use with this shape (and associated processes)
s._TMPDIR = TemporaryDirectory()
return NamedTemporaryFile(suffix=ext, prefix=head, dir=s._TMPDIR.name, delete=True).name
def __del__(s):
if(hasattr(s, "_TMPDIR")): s._TMPDIR.cleanup()
def _resolve(s, div):
if(div<0): div = 1.0/abs(int(div))
return (s.pixelWidth/div, s.pixelHeight/div)
def applyMask(s, mat, noData=0):
"""Shortcut to apply the RegionMask's mask to an array. Mainly intended
for internal use
* When the passed matrix does not have the same extents of the given matrix,
it is assumed that the RegionMask's mask needs to be scaled so that the
matrix dimensions match
* The RM's mask can only be scaled UP, and the given matrix's dimensions
must be mutiples of the mask's dimensions
Parameters:
-----------
mat : np.ndarray
The matrix to apply the mask to
* Must have dimensions equal, or are multiples of, the mask's
noData : float
The no-data value to set into matrix's values which are not within
the region
Returns:
--------
numpy.ndarray
"""
if(noData is None): noData=0
# Get size
Y,X = mat.shape
# make output array
out = np.array(mat)
# Apply mask
if( s.mask.shape == mat.shape ): # matrix dimensions coincide with mask's data
out[~s.mask] = noData
elif( Y>s.height and X>s.width ):
if( not Y%s.height==0 or not X%s.width==0 ):
raise GeoKitRegionMaskError("Matrix dimensions must be multiples of mask dimensions")
yScale = Y//s.height
xScale = X//s.width
scaledMask = scaleMatrix(s.mask, (yScale,xScale))
sel = np.where(~scaledMask)
out[sel] = noData
else:
raise GeoKitRegionMaskError("Could not map mask onto matrix")
return out
#######################################################################################
## Raw value processor
def _returnBlank(s, resolutionDiv=1, forceMaskShape=False, applyMask=True, noData=None, **kwargs):
# make output
if not forceMaskShape and resolutionDiv > 1:
yN = s.mask.shape[0]*int(resolutionDiv)
xN = s.mask.shape[1]*int(resolutionDiv)
output = np.zeros( (yN,xN) )
else:
output = np.zeros(s.mask.shape)
# apply mask, maybe
if applyMask:
output = s.applyMask(output, noData)
# Done
return output
def indicateValues(s, source, value, buffer=None, resolutionDiv=1, forceMaskShape=False, applyMask=True, noData=None, resampleAlg='bilinear', **kwargs):
"""
Indicates those pixels in the RegionMask which correspond to a particular
value, or range of values, from a given raster datasource
Returns a matrix matching the RegionMask's mask dimensions wherein 0 means
the pixels is not included in the indicated set, and 1 meaning the pixel
is included in the indicated set. Intermediate values are also possible.
This results from a scenario when the datasource's resolution does not
line up perfectly with the RegionMask's resolution and, as a result, a
RegionMask pixel overlaps multiple datasource pixels which are not all
indicated (or not-indicated).
* Value processing is performed BEFORE a warp takes place
* Output from the warp is clipped to values between 0 and 1
* If a boolean matrix is desired of the result, use "result > 0.5"
Parameters:
-----------
source : str or gdal.Dataset
The raster datasource to indicate from
value : float or tuple
The value or range of values to indicate
* If float : The exact value to accept
- Maybe cause issues due to float comparison issues. Using an
integer is usually better
* If [int/float, ...] : The exact values to accept
* If (float, float) : The inclusive Min and Max values to accept
- None refers to no bound
- Ex. (None, 5) -> "Indicate all values equal to and below 5"
buffer : float; optional
A buffer region to add around the indicated pixels
* Units are in the RegionMask's srs
* The buffering occurs AFTER the indication and warping step and
so it may not represent the original dataset exactly
- Buffering can be made more accurate by increasing the
'resolutionDiv' input
resolutionDiv : int
The factor by which to divide the RegionMask's native resolution
* This is useful if you need to represent very fine details
resampleAlg : str; optional
The resampling algorithm to use when warping values
* Knowing which option to use can have significant impacts!
* Options are: 'nearesampleAlg=resampleAlg, r', 'bilinear', 'cubic',
'average'
forceMaskShape : bool
If True, forces the returned matrix to have the same dimension as
the RegionMask's mask regardless of the 'resolutionDiv' argument
applyMask : bool
When True, the RegionMask's mask will be applied to the outputData
as described by RegionMask.applyMask
noData : numeric
The noData value to use when applying the mask
kwargs -- Passed on to RegionMask.warp()
* Most notably: 'resampleAlg'
Returns:
--------
numpy.ndarray
"""
# Unpack value
valueMin,valueMax = None, None
valueEquals = None
valueList = None
if isinstance(value, tuple):
valueMin,valueMax = value
elif isinstance(value, list):
valueList = value
else:
valueEquals = value
# make processor
def processor(data):
## Find nan values, maybe
if(not noData is None):
nodat = np.isnan(data)
## Do processing
if(not valueEquals is None):
output = data == valueEquals
elif(valueList is not None):
output = np.isin(data, valueList)
else:
output = np.ones(data.shape, dtype="bool")
if(not valueMin is None):
np.logical_and(data >= valueMin, output, output)
if(not valueMax is None):
np.logical_and(data <= valueMax, output, output)
## Fill nan values, maybe
if(not noData is None):
output[nodat] = noData
## Done!
return output
# Do processing
newDS = s.extent.mutateRaster(source, processor=processor, dtype="bool", noData=noData, matchContext=False)
# Warp onto region
final = s.warp(newDS, dtype="float32", resolutionDiv=resolutionDiv, resampleAlg=resampleAlg,
applyMask=False, noData=noData, returnMatrix=True, **kwargs)
# Check for results
if not (final > 0).any():
# no results were found
return s._returnBlank(resolutionDiv=resolutionDiv, forceMaskShape=forceMaskShape,
applyMask=applyMask, noData=noData)
# Apply a buffer if requested
if not buffer is None:
geoms = s.polygonizeMask(final>0.5, flat=False)
if len(geoms)>0:
geoms = [g.Buffer(buffer) for g in geoms]
areaDS = createVector(geoms)
final = s.rasterize( areaDS, dtype="float32", bands=[1], burnValues=[1], resolutionDiv=resolutionDiv,
applyMask=False, noData=noData)
else:
# no results were found
return s._returnBlank(resolutionDiv=resolutionDiv, forceMaskShape=forceMaskShape,
applyMask=applyMask, noData=noData)
# apply a threshold incase of funky warping issues
final[final>1.0] = 1
final[final<0.0] = 0
# Make sure we have the mask's shape
if forceMaskShape:
if resolutionDiv > 1:
final = scaleMatrix(final, -1*resolutionDiv)
# Apply mask?
if applyMask: final = s.applyMask(final, noData)
# Return result
return final
#######################################################################################
## Vector feature indicator
def indicateFeatures(s, source, where=None, buffer=None, bufferMethod='geom', resolutionDiv=1, forceMaskShape=False, applyMask=True, noData=0, **kwargs):
"""
Indicates the RegionMask pixels which are found within the features (or
a subset of the features) contained in a given vector datasource
* A Rasterization is performed from the input data set to the
RegionMask's mask.
-See geokit.vector.rasterize or, more specifically gdal.RasterizeOptions
kwargs for more info on how to control the rasterization step
Parameters:
-----------
source : str or gdal.Dataset
The vector datasource to indicate from
where : str; optional
An SQL-style filtering string
* Can be used to filter the input source according to their attributes
* For tips, see "http://www.gdal.org/ogr_sql.html"
Ex:
where="eye_color='Green' AND IQ>90"
buffer : float; optional
A buffer region to add around the indicated pixels
* Units are in the RegionMask's srs
bufferMethod : str; optional
An indicator determining the method to use when buffereing
* Options are: 'geom' and 'area'
* If 'geom', the function will attempt to grow each of the geometries
directly using the ogr library
- This can fail sometimes when the geometries are particularly
complex or if some of the geometries are not valid (as in, they
have self-intersections)
* If 'area', the function will first rasterize the raw geometries and
will then apply the buffer to the indicated pixels
- This is the safer option although is not as accurate as the 'geom'
option since it does not capture the exact edges of the geometries
- This method can be made more accurate by increasing the
'resolutionDiv' input
resolutionDiv : int; optional
The factor by which to divide the RegionMask's native resolution
* This is useful if you need to represent very fine details
forceMaskShape : bool; optional
If True, forces the returned matrix to have the same dimension as
the RegionMask's mask regardless of the 'resolutionDiv' argument
applyMask : bool; optional
When True, the RegionMask's mask will be applied to the outputData
as described by RegionMask.applyMask
noData : numeric
The noData value to use when applying the mask
kwargs -- Passed on to RegionMask.rasterize()
* Most notably: 'allTouched'
Returns:
--------
numpy.ndarray
"""
# Ensure path to dataSet exists
source = loadVector(source)
# Do we need to buffer?
if buffer==0: buffer=None
if not buffer is None and bufferMethod == 'geom':
def doBuffer(ftr): return {'geom':ftr.geom.Buffer(buffer)}
source = s.mutateVector(source, where=where, processor=doBuffer, matchContext=True, keepAttributes=False, _slim=True)
where=None # Set where to None since the filtering has already been done
if source is None: # this happens when the returned dataset is empty
return s._returnBlank(resolutionDiv=resolutionDiv, forceMaskShape=forceMaskShape,
applyMask=applyMask, noData=noData, **kwargs)
# Do rasterize
final = s.rasterize( source, dtype='float32', value=1, where=where, resolutionDiv=resolutionDiv,
applyMask=False, noData=noData)
# Check for results
if not (final > 0).any():
# no results were found
return s._returnBlank(resolutionDiv=resolutionDiv, forceMaskShape=forceMaskShape,
applyMask=applyMask, noData=noData)
# maybe we want to do the other buffer method
if not buffer is None and bufferMethod == 'area':
geoms = polygonizeMask(final>0.5, bounds=s.extent, srs=s.srs, flat=False)
if len(geoms)>0:
geoms = [g.Buffer(buffer) for g in geoms]
dataSet = createVector(geoms)
final = s.rasterize( dataSet, dtype="float32", bands=[1], burnValues=[1], resolutionDiv=resolutionDiv,
applyMask=False, noData=noData)
else:
return s._returnBlank(resolutionDiv=resolutionDiv, forceMaskShape=forceMaskShape,
applyMask=applyMask, noData=noData)
# Make sure we have the mask's shape
if forceMaskShape:
if resolutionDiv > 1:
final = scaleMatrix(final, -1*resolutionDiv)
# Apply mask?
if applyMask: final = s.applyMask(final, noData)
# Return
return final
#######################################################################################
## Vector feature indicator
def indicateGeoms(s, geom, **kwargs):
"""
Convenience wrapper to indicate values found within a geometry (or a
list of geometries)
* Simply creates a new vector source from the given geometry and then
calls RegionMask.indicateFeatures
* All keywords are passed on to RegionMask.indicateFeatures
"""
# Make a vector dataset
ds = quickVector(geom)
# Indicate features
return s.indicateFeatures(ds, **kwargs)
#######################################################################################
## Make a sub region generator
def subRegions(s, gridSize, asMaskAndExtent=False):
"""Generate a number of sub regions on a grid which combine into the total
RegionMask area
"""
# get useful matrix info
yN, xN = s.mask.shape
pixelGridSize = int(gridSize/min(s.pixelWidth, s.pixelHeight))
# Make grid areas
count = 0
for ys in range(0, yN, pixelGridSize):
yn = min(yN, ys+pixelGridSize)
yMax = s.extent.yMax - ys*s.pixelHeight
yMin = s.extent.yMax - yn*s.pixelHeight
for xs in range(0, xN, pixelGridSize):
xn = min(xN, xs+pixelGridSize)
xMin = s.extent.xMin + xs*s.pixelWidth
xMax = s.extent.xMin + xn*s.pixelWidth
sectionMask = s.mask[ys:yn, xs:xn]
if not sectionMask.any(): continue
sectionExtent = Extent( xMin,yMin,xMax,yMax, srs=s.srs ).fit((s.pixelWidth, s.pixelHeight))
if asMaskAndExtent:
yield MaskAndExtent( sectionMask, sectionExtent, count)
else:
yield RegionMask.fromMask(sectionExtent, sectionMask, dict(id=count))
count+=1
#############################################################################
## CONVENIENCE WRAPPERS
def drawMask( s, ax=None, **kwargs):
"""Convenience wrapper around geokit.util.drawImage which plots the
RegionMask's mask over the RegionMask's context.
* See geokit.util.drawImage for more info on argument options
* Unless specified, the plotting extent is set to the RegionMask's extent
- This only plays a role when generating a new axis
"""
xlim = kwargs.pop("xlim", (s.extent.xMin, s.extent.xMax))
ylim = kwargs.pop("ylim", (s.extent.yMin, s.extent.yMax))
return drawImage( s.mask, ax=ax, xlim=xlim, ylim=ylim, **kwargs )
def drawImage( s, matrix, ax=None, drawSelf=True, **kwargs):
"""Convenience wrapper around geokit.util.drawImage which plots matrix data
which is assumed to match the boundaries of the RegionMask
* See geokit.util.drawImage for more info on argument options
* Unless specified, the plotting extent is set to the RegionMask's extent
- This only plays a role when generating a new axis
"""
xlim = kwargs.pop("xlim", (s.extent.xMin, s.extent.xMax))
ylim = kwargs.pop("ylim", (s.extent.yMin, s.extent.yMax))
ax = drawImage( matrix, ax=ax, xlim=xlim, ylim=ylim, **kwargs )
if drawSelf:
s.drawSelf( ax=ax, fc='None', ec='k', linewidth=2)
return ax
def drawGeoms( s, geoms, ax=None, drawSelf=True, **kwargs):
"""Convenience wrapper around geokit.geom.drawGeoms which plots geometries
which are then plotted within the context of the RegionMask
* See geokit.geom.drawGeoms for more info on argument options
* Geometries are always plotted in the RegionMask's SRS
* Unless specified, x and y limits are set to the RegionMask's extent
- This only plays a role when generating a new axis
"""
xlim = kwargs.pop("xlim", (s.extent.xMin, s.extent.xMax))
ylim = kwargs.pop("ylim", (s.extent.yMin, s.extent.yMax))
ax = drawGeoms( geoms, ax=ax, srs=s.srs, xlim=xlim, ylim=ylim, **kwargs )
if drawSelf:
s.drawSelf( ax=ax, fc='None', ec='k', linewidth=2)
return ax
def drawSelf( s, ax=None, **kwargs):
"""Convenience wrapper around geokit.geom.drawGeoms which plots the
RegionMask's geometry
* See geokit.geom.drawGeoms for more info on argument options
* Geometry are always plotted in the RegionMask's SRS
* Unless specified, x and y limits are set to the RegionMask's extent
- This only plays a role when generating a new axis
"""
xlim = kwargs.pop("xlim", (s.extent.xMin, s.extent.xMax))
ylim = kwargs.pop("ylim", (s.extent.yMin, s.extent.yMax))
return drawGeoms( s.geometry, ax=ax, srs=s.srs, xlim=xlim, ylim=ylim, **kwargs )
def drawRaster( s, source, ax=None, drawSelf=True, **kwargs):
"""Convenience wrapper around geokit.raster.drawRaster which plots a raster
dataset within the context of the RegionMask
* See geokit.raster.drawRaster for more info on argument options
* The raster is always warped to the RegionMask's SRS
* Unless specified, x and y limits are set to the RegionMask's extent
- This only plays a role when generating a new axis
"""
xlim = kwargs.pop("xlim", (s.extent.xMin, s.extent.xMax))
ylim = kwargs.pop("ylim", (s.extent.yMin, s.extent.yMax))
ax = drawGeoms( s.geometry, ax=ax, srs=s.srs, xlim=xlim, ylim=ylim, **kwargs )
if drawSelf:
s.drawSelf( ax=ax, fc='None', ec='k', linewidth=2)
return ax
def createRaster(s, output=None, resolutionDiv=1, **kwargs):
"""Convenience wrapper for geokit.raster.createRaster which sets 'srs',
'bounds', 'pixelWidth', and 'pixelHeight' inputs
Parameters:
-----------
output : str; optional
A path to an output file to write to
resolutionDiv : int
The factor by which to divide the RegionMask's native resolution
* This is useful if you need to represent very fine details
**kwargs:
All other keywargs are passed on to geokit.raster.createRaster()
* See below for argument descriptions
Returns:
--------
* If 'output' is None: gdal.Dataset
* If 'output' is a string: None
"""
pW, pH = s._resolve(resolutionDiv)
return s.extent.createRaster( pixelWidth=pW, pixelHeight=pH, output=output, **kwargs)
def warp(s, source, output=None, resolutionDiv=1, returnMatrix=True, applyMask=True, noData=None, resampleAlg='bilinear', **kwargs):
"""Convenience wrapper for geokit.raster.warp() which automatically sets
'srs', 'bounds', 'pixelWidth', and 'pixelHeight' inputs
Note:
-----
When creating an 'in memory' raster vs one which is saved to disk, a slightly
different algorithm is used which can sometimes add an extra row of pixels. Be
aware of this if you intend to compare value-matricies directly from rasters
generated with this function.
Parameters:
-----------
source : str
The path to the raster file to warp
output : str; optional
A path to an output file to write to
resampleAlg : str; optional
The resampling algorithm to use when warping values
* Knowing which option to use can have significant impacts!
* Options are: 'nearesampleAlg=resampleAlg, r', 'bilinear', 'cubic',
'average'
resolutionDiv : int
The factor by which to divide the RegionMask's native resolution
* This is useful if you need to represent very fine details
returnAsMatrix : bool
When True, the resulting raster's matrix is return
* Should have the same dimensions as the RegionMask's mask matrix
applyMask : bool
When True, the RegionMask's mask will be applied to the outputData
as described by RegionMask.applyMask
noData : numeric
The noData value to use when applying the mask
**kwargs:
All other keywargs are passed on to geokit.raster.warp()
Returns:
--------
* If 'output' is None: gdal.Dataset
* If 'output' is a string: None
"""
pW, pH = s._resolve(resolutionDiv)
# do warp
if returnMatrix:
newDS = s.extent.warp(source=source, pixelWidth=pW, pixelHeight=pH, resampleAlg=resampleAlg, output=output, **kwargs)
else:
if applyMask:
if "cutline" in kwargs:
raise GeoKitRegionMaskError("Cannot apply both a cutline and the mask when returning the warped dataset")
newDS = s.extent.warp(source=source, pixelWidth=pW, pixelHeight=pH, resampleAlg=resampleAlg, cutline=s.vector, output=output, **kwargs)
else:
newDS = s.extent.warp(source=source, pixelWidth=pW, pixelHeight=pH, resampleAlg=resampleAlg, output=output, **kwargs)
if not returnMatrix: return newDS
# Read array
if newDS is None: newDS = output
final = extractMatrix(newDS)
# clean up
del newDS
# Apply mask, maybe
if(applyMask):
final = s.applyMask(final, noData)
# Return
return final
def rasterize(s, source, output=None, resolutionDiv=1, returnMatrix=True, applyMask=True, noData=None, **kwargs):
"""Convenience wrapper for geokit.vector.rasterize() which automatically
sets the 'srs', 'bounds', 'pixelWidth', and 'pixelHeight' inputs
Note:
-----
When creating an 'in memory' raster vs one which is saved to disk, a slightly
different algorithm is used which can sometimes add an extra row of pixels. Be
aware of this if you intend to compare value-matricies directly from rasters
generated with this function.
Parameters:
-----------
source : str
The path to the vector file to load
output : str; optional
A path to an output file to write to
resolutionDiv : int; optional
The factor by which to divide the RegionMask's native resolution
* This is useful if you need to represent very fine details
returnAsMatrix : bool; optional
When True, the resulting raster's matrix is return
* Should have the same dimensions as the RegionMask's mask matrix
applyMask : bool; optional
When True, the RegionMask's mask will be applied to the outputData
as described by RegionMask.applyMask
noData : numeric; optional
The noData value to use when applying the mask
**kwargs:
All other keywargs are passed on to geokit.vector.rasterize()
Returns:
--------
* If 'output' is None: gdal.Dataset
* If 'output' is a string: None
"""
pW, pH = s._resolve(resolutionDiv)
# do rasterization
if returnMatrix:
newDS = s.extent.rasterize(source=source, pixelWidth=pW, pixelHeight=pH, output=output, **kwargs)
else:
if applyMask:
if "cutline" in kwargs:
raise GeoKitRegionMaskError("Cannot apply both a cutline and the mask when returning the rasterized dataset")
newDS = s.extent.rasterize(source=source, pixelWidth=pW, pixelHeight=pH, cutline=s.vectorPath, output=output, **kwargs)
else:
newDS = s.extent.rasterize(source=source, pixelWidth=pW, pixelHeight=pH, output=output, **kwargs)
if not returnMatrix: return newDS
# Read array
if newDS is None: newDS = output
final = extractMatrix(newDS)
# clean up
del newDS
# Apply mask, maybe
if(applyMask):
final = s.applyMask(final, noData)
# Return
return final
def extractFeatures(s, source, **kwargs):
"""Convenience wrapper for geokit.vector.extractFeatures() by setting the
'geom' input to the RegionMask's geometry
Parameters:
-----------
source : str
The path to the vector file to load
**kwargs:
All other keyword arguments are passed on to vector.extractFeatures()
Returns:
--------
* If asPandas is True: pandas.DataFrame or pandas.Series
* If asPandas is False: generator
"""
return extractFeatures( source=source, geom=s.geometry, **kwargs )
def mutateVector(s, source, matchContext=False, **kwargs):
"""Convenience wrapper for geokit.vector.mutateVector which automatically
sets 'srs' and 'geom' inputs to the RegionMask's srs and geometry
* The RegionMask's geometry is always used to select features within the
source. If you need a broader scope, try using the RegionMask's extent's
version of this function
Note:
-----
If this is called without any arguments except for a source, it serves
to clip the vector source around the RegionMask
Parameters:
-----------
source : Anything acceptable to geokit.vector.loadVector()
The source to clip
matchContext : bool; optional
* If True, transforms all geometries to the RegionMask's srs before
mutating
* If False, only selects the geometries which touch the RegionMask
**kwargs:
All other keyword arguments are passed to geokit.vector.mutateVector
Returns:
--------
* If 'output' is None: gdal.Dataset
* If 'output' is a string: None
"""
# Get the working srs
if not matchContext:
vinfo = vectorInfo( source )
ext = s.extent.castTo(vinfo.srs)
else:
ext = s.extent
# mutate the source
return mutateVector(source, srs=ext.srs, geom=s.geometry, **kwargs)
def mutateRaster(s, source, matchContext=True, warpArgs=None, applyMask=True, processor=None, resampleAlg="bilinear", **mutateArgs):
"""Convenience wrapper for geokit.vector.mutateRaster which automatically
sets 'bounds'. It also warps the raster to the RegionMask's area
and srs before mutating
Note:
-----
If this is called without any arguments except for a source, it serves
to clip the raster source around the RegionMask, therefore performing
the same function as RegionMask.warp(..., returnMatrix=False)
Parameters:
-----------
source : Anything acceptable to geokit.raster.loadRaster()
The source to mutate
matchContext : bool; optional
* If True, Warp to the RegionMask's boundaries, srs and pixel size
before mutating
* If False, only warp to the RegionMask's boundaries, but keep its
srs and resolution intact
resampleAlg : str; optional
The resampling algorithm to use when warping values
* Knowing which option to use can have significant impacts!
* Options are: 'nearesampleAlg=resampleAlg, r', 'bilinear', 'cubic',
'average'
warpArgs : dict; optional
Arguments to apply to the warping step
* See geokit.raster.warp()
processor - function; optional
The function performing the mutation of the raster's data
* The function will take single argument (a 2D numpy.ndarray)
* The function must return a numpy.ndarray of the same size as the input
* The return type must also be containable within a Float32 (int and
boolean is okay)
* See example in geokit.raster.mutateRaster for more info
applyMask : bool; optional
When True, the RegionMask's mask will be applied to the outputData
as described by RegionMask.applyMask
**kwargs:
All other keyword arguments are passed to geokit.vector.mutateVector
Returns:
--------
* If 'output' is None: gdal.Dataset
* If 'output' is a string: None
"""
output = kwargs.pop("output", None)
if warpArgs is None: warpArgs = {}
# Do the warp and mutation
if matchContext:
source = s.warp(source, returnMatrix=False, applyMask=applyMask, resampleAlg=resampleAlg, **warpArgs)
if processor is None: return source
else: return mutateRaster(source, output=output, **mutateArgs)
else:
if applyMask:
if "cutline" in warpArgs:
raise GeoKitRegionMaskError("Cannot apply both a cutline and the mask during prewarping")
warpArgs["cutline"] = s.vector
return s.extent.mutateRaster( source, matchContext=False, warpArgs=warpArgs, resampleAlg=resampleAlg, **mutateArgs)
def polygonizeMatrix(s, matrix, flat=False, shrink=True, _raw=False):
"""Convenience wrapper for geokit.geom.polygonizeMatrix which autmatically
sets the 'bounds' and 'srs' inputs. The matrix data is assumed to span the
RegionMask exactly.
Each unique-valued group of pixels will be converted to a geometry
Parameters:
-----------
matrix : matrix_like
The matrix which will be turned into a geometry set
* Must be 2 dimensional
* Must be integer or boolean type
flat : bool
If True, flattens the resulting geometries which share a contiguous matrix
value into a single geometry object
shrink : bool
If True, shrink all geoms by a tiny amount in order to avoid geometry
overlapping issues
* The total amount shrunk should be very very small
* Generally this should be left as True unless it is ABSOLUTELY
necessary to maintain the same area
Returns:
--------
pandas.DataFrame -> With columns:
'geom' -> The contiguous-valued geometries
'value' -> The value for each geometry
"""
return polygonizeMatrix(matrix, bounds=s.extent.xyXY, srs=s.srs, flat=flat, shrink=shrink, _raw=_raw)
def polygonizeMask(s, mask, bounds=None, srs=None, flat=True, shrink=True):
"""Convenience wrapper for geokit.geom.polygonizeMask which autmatically
sets the 'bounds' and 'srs' inputs. The mask data is assumed to span the
RegionMask exactly
Each True-valued group of pixels will be converted to a geometry
Parameters:
-----------
mask : matrix_like
The mask which will be turned into a geometry set
* Must be 2 dimensional
* Must be boolean type
* True values are interpreted as 'in the geometry'
flat : bool
If True, flattens the resulting geometries into a single geometry
shrink : bool
If True, shrink all geoms by a tiny amount in order to avoid geometry
overlapping issues
* The total amount shrunk should be very very small
* Generally this should be left as True unless it is ABSOLUTELY
neccessary to maintain the same area
Returns:
--------
If 'flat' is True: ogr.Geometry
else: [ogr.Geometry, ]
"""
return polygonizeMask(mask, bounds=s.extent.xyXY, srs=s.srs, flat=flat, shrink=shrink)
|
[
"matplotlib.pyplot.subplot",
"io.BytesIO",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((19065, 19074), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (19072, 19074), False, 'from io import BytesIO\n'), ((19132, 19158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (19142, 19158), True, 'import matplotlib.pyplot as plt\n'), ((19175, 19191), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (19186, 19191), True, 'import matplotlib.pyplot as plt\n'), ((19339, 19357), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19355, 19357), True, 'import matplotlib.pyplot as plt\n'), ((19370, 19407), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f'], {'format': '"""svg"""', 'dpi': '(100)'}), "(f, format='svg', dpi=100)\n", (19381, 19407), True, 'import matplotlib.pyplot as plt\n'), ((19420, 19431), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19429, 19431), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from . import utils
from .user import User
from .reaction import Reaction
from .object import Object
from .calls import CallMessage
import re
from .enums import MessageType, try_enum
class Message:
"""Represents a message from Discord.
There should be no need to create one of these manually.
Attributes
-----------
edited_timestamp : Optional[datetime.datetime]
A naive UTC datetime object containing the edited time of the message.
timestamp : datetime.datetime
A naive UTC datetime object containing the time the message was created.
tts : bool
Specifies if the message was done with text-to-speech.
type: :class:`MessageType`
The type of message. In most cases this should not be checked, but it is helpful
in cases where it might be a system message for :attr:`system_content`.
author
A :class:`Member` that sent the message. If :attr:`channel` is a
private channel, then it is a :class:`User` instead.
content : str
The actual contents of the message.
nonce
The value used by the discord server and the client to verify that the message is successfully sent.
This is typically non-important.
embeds : list
A list of embedded objects. The elements are objects that meet oEmbed's specification_.
.. _specification: http://oembed.com/
channel
The :class:`Channel` that the message was sent from.
Could be a :class:`PrivateChannel` if it's a private message.
In :issue:`very rare cases <21>` this could be a :class:`Object` instead.
For the sake of convenience, this :class:`Object` instance has an attribute ``is_private`` set to ``True``.
server : Optional[:class:`Server`]
The server that the message belongs to. If not applicable (i.e. a PM) then it's None instead.
call: Optional[:class:`CallMessage`]
The call that the message refers to. This is only applicable to messages of type
:attr:`MessageType.call`.
mention_everyone : bool
Specifies if the message mentions everyone.
.. note::
This does not check if the ``@everyone`` text is in the message itself.
Rather this boolean indicates if the ``@everyone`` text is in the message
**and** it did end up mentioning everyone.
mentions: list
A list of :class:`Member` that were mentioned. If the message is in a private message
then the list will be of :class:`User` instead. For messages that are not of type
:attr:`MessageType.default`\, this array can be used to aid in system messages.
For more information, see :attr:`system_content`.
.. warning::
The order of the mentions list is not in any particular order so you should
not rely on it. This is a discord limitation, not one with the library.
channel_mentions : list
A list of :class:`Channel` that were mentioned. If the message is in a private message
then the list is always empty.
role_mentions : list
A list of :class:`Role` that were mentioned. If the message is in a private message
then the list is always empty.
id : str
The message ID.
attachments : list
A list of attachments given to a message.
pinned: bool
Specifies if the message is currently pinned.
reactions : List[:class:`Reaction`]
Reactions to a message. Reactions can be either custom emoji or standard unicode emoji.
"""
__slots__ = [ 'edited_timestamp', 'timestamp', 'tts', 'content', 'channel',
'mention_everyone', 'embeds', 'id', 'mentions', 'author',
'channel_mentions', 'server', '_raw_mentions', 'attachments',
'_clean_content', '_raw_channel_mentions', 'nonce', 'pinned',
'role_mentions', '_raw_role_mentions', 'type', 'call',
'_system_content', 'reactions' ]
def __init__(self, **kwargs):
self.reactions = kwargs.pop('reactions')
for reaction in self.reactions:
reaction.message = self
self._update(**kwargs)
def _update(self, **data):
# at the moment, the timestamps seem to be naive so they have no time zone and operate on UTC time.
# we can use this to our advantage to use strptime instead of a complicated parsing routine.
# example timestamp: 2015-08-21T12:03:45.782000+00:00
# sometimes the .%f modifier is missing
self.edited_timestamp = utils.parse_time(data.get('edited_timestamp'))
self.timestamp = utils.parse_time(data.get('timestamp'))
self.tts = data.get('tts', False)
self.pinned = data.get('pinned', False)
self.content = data.get('content')
self.mention_everyone = data.get('mention_everyone')
self.embeds = data.get('embeds')
self.id = data.get('id')
self.channel = data.get('channel')
self.author = User(**data.get('author', {}))
self.nonce = data.get('nonce')
self.attachments = data.get('attachments')
self.type = try_enum(MessageType, data.get('type'))
self._handle_upgrades(data.get('channel_id'))
self._handle_mentions(data.get('mentions', []), data.get('mention_roles', []))
self._handle_call(data.get('call'))
# clear the cached properties
cached = filter(lambda attr: attr[0] == '_', self.__slots__)
for attr in cached:
try:
delattr(self, attr)
except AttributeError:
pass
def _handle_mentions(self, mentions, role_mentions):
self.mentions = []
self.channel_mentions = []
self.role_mentions = []
if getattr(self.channel, 'is_private', True):
self.mentions = [User(**m) for m in mentions]
return
if self.server is not None:
for mention in mentions:
id_search = mention.get('id')
member = self.server.get_member(id_search)
if member is not None:
self.mentions.append(member)
it = filter(None, map(lambda m: self.server.get_channel(m), self.raw_channel_mentions))
self.channel_mentions = utils._unique(it)
for role_id in role_mentions:
role = utils.get(self.server.roles, id=role_id)
if role is not None:
self.role_mentions.append(role)
def _handle_call(self, call):
if call is None or self.type is not MessageType.call:
self.call = None
return
# we get the participant source from the mentions array or
# the author
participants = []
for uid in call.get('participants', []):
if uid == self.author.id:
participants.append(self.author)
else:
user = utils.find(lambda u: u.id == uid, self.mentions)
if user is not None:
participants.append(user)
call['participants'] = participants
self.call = CallMessage(message=self, **call)
@utils.cached_slot_property('_raw_mentions')
def raw_mentions(self):
"""A property that returns an array of user IDs matched with
the syntax of <@user_id> in the message content.
This allows you receive the user IDs of mentioned users
even in a private message context.
"""
return re.findall(r'<@!?([0-9]+)>', self.content)
@utils.cached_slot_property('_raw_channel_mentions')
def raw_channel_mentions(self):
"""A property that returns an array of channel IDs matched with
the syntax of <#channel_id> in the message content.
"""
return re.findall(r'<#([0-9]+)>', self.content)
@utils.cached_slot_property('_raw_role_mentions')
def raw_role_mentions(self):
"""A property that returns an array of role IDs matched with
the syntax of <@&role_id> in the message content.
"""
return re.findall(r'<@&([0-9]+)>', self.content)
@utils.cached_slot_property('_clean_content')
def clean_content(self):
"""A property that returns the content in a "cleaned up"
manner. This basically means that mentions are transformed
into the way the client shows it. e.g. ``<#id>`` will transform
into ``#name``.
This will also transform @everyone and @here mentions into
non-mentions.
"""
transformations = {
re.escape('<#{0.id}>'.format(channel)): '#' + channel.name
for channel in self.channel_mentions
}
mention_transforms = {
re.escape('<@{0.id}>'.format(member)): '@' + member.display_name
for member in self.mentions
}
# add the <@!user_id> cases as well..
second_mention_transforms = {
re.escape('<@!{0.id}>'.format(member)): '@' + member.display_name
for member in self.mentions
}
transformations.update(mention_transforms)
transformations.update(second_mention_transforms)
if self.server is not None:
role_transforms = {
re.escape('<@&{0.id}>'.format(role)): '@' + role.name
for role in self.role_mentions
}
transformations.update(role_transforms)
def repl(obj):
return transformations.get(re.escape(obj.group(0)), '')
pattern = re.compile('|'.join(transformations.keys()))
result = pattern.sub(repl, self.content)
transformations = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
def repl2(obj):
return transformations.get(obj.group(0), '')
pattern = re.compile('|'.join(transformations.keys()))
return pattern.sub(repl2, result)
def _handle_upgrades(self, channel_id):
self.server = None
if isinstance(self.channel, Object):
return
if self.channel is None:
if channel_id is not None:
self.channel = Object(id=channel_id)
self.channel.is_private = True
return
if not self.channel.is_private:
self.server = self.channel.server
found = self.server.get_member(self.author.id)
if found is not None:
self.author = found
@utils.cached_slot_property('_system_content')
def system_content(self):
"""A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.pins_add:
return '{0.name} pinned a message to this channel.'.format(self.author)
if self.type is MessageType.recipient_add:
return '{0.name} added {1.name} to the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.recipient_remove:
return '{0.name} removed {1.name} from the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.channel_name_change:
return '{0.author.name} changed the channel name: {0.content}'.format(self)
if self.type is MessageType.channel_icon_change:
return '{0.author.name} changed the channel icon.'.format(self)
if self.type is MessageType.call:
# we're at the call message type now, which is a bit more complicated.
# we can make the assumption that Message.channel is a PrivateChannel
# with the type ChannelType.group or ChannelType.private
call_ended = self.call.ended_timestamp is not None
if self.channel.me in self.call.participants:
return '{0.author.name} started a call.'.format(self)
elif call_ended:
return 'You missed a call from {0.author.name}'.format(self)
else:
return '{0.author.name} started a call \N{EM DASH} Join the call.'.format(self)
|
[
"re.findall"
] |
[((8639, 8680), 're.findall', 're.findall', (['"""<@!?([0-9]+)>"""', 'self.content'], {}), "('<@!?([0-9]+)>', self.content)\n", (8649, 8680), False, 'import re\n'), ((8935, 8974), 're.findall', 're.findall', (['"""<#([0-9]+)>"""', 'self.content'], {}), "('<#([0-9]+)>', self.content)\n", (8945, 8974), False, 'import re\n'), ((9218, 9258), 're.findall', 're.findall', (['"""<@&([0-9]+)>"""', 'self.content'], {}), "('<@&([0-9]+)>', self.content)\n", (9228, 9258), False, 'import re\n')]
|
import os
from typing import Any, Dict, Optional
from airflow.hooks.filesystem import FSHook
from airflow.sensors.filesystem import FileSensor
from airflow.utils.context import Context
from astronomer.providers.core.triggers.filesystem import FileTrigger
class FileSensorAsync(FileSensor):
"""
Waits for a file or folder to land in a filesystem using async.
If the path given is a directory then this sensor will only return true if
any files exist inside it (either directly, or within a subdirectory)
:param fs_conn_id: reference to the File (path)
:param filepath: File or folder name (relative to the base path set within the connection), can
be a glob.
:param recursive: when set to ``True``, enables recursive directory matching behavior of
``**`` in glob filepath parameter. Defaults to ``False``.
"""
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
if not self.poke(context=context):
hook = FSHook(self.fs_conn_id)
basepath = hook.get_path()
full_path = os.path.join(basepath, self.filepath)
self.log.info("Poking for file %s", full_path)
self.defer(
timeout=self.execution_timeout,
trigger=FileTrigger(
filepath=full_path,
recursive=self.recursive,
poll_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Dict[str, Any], event: Optional[Dict[str, Any]]) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
self.log.info("%s completed successfully.", self.task_id)
|
[
"airflow.hooks.filesystem.FSHook",
"astronomer.providers.core.triggers.filesystem.FileTrigger",
"os.path.join"
] |
[((1058, 1081), 'airflow.hooks.filesystem.FSHook', 'FSHook', (['self.fs_conn_id'], {}), '(self.fs_conn_id)\n', (1064, 1081), False, 'from airflow.hooks.filesystem import FSHook\n'), ((1145, 1182), 'os.path.join', 'os.path.join', (['basepath', 'self.filepath'], {}), '(basepath, self.filepath)\n', (1157, 1182), False, 'import os\n'), ((1339, 1435), 'astronomer.providers.core.triggers.filesystem.FileTrigger', 'FileTrigger', ([], {'filepath': 'full_path', 'recursive': 'self.recursive', 'poll_interval': 'self.poke_interval'}), '(filepath=full_path, recursive=self.recursive, poll_interval=\n self.poke_interval)\n', (1350, 1435), False, 'from astronomer.providers.core.triggers.filesystem import FileTrigger\n')]
|
from os import chdir, getcwd
from pytest import fixture
from detox_bridge import node
@fixture
def node_environment(tmpdir):
old = getcwd()
chdir(str(tmpdir))
open(".nvmrc", "w").write("v15.2.1")
yield node
chdir(old)
@fixture
def node_server(node_environment):
with node.start() as connection:
yield connection
|
[
"os.getcwd",
"os.chdir",
"detox_bridge.node.start"
] |
[((139, 147), 'os.getcwd', 'getcwd', ([], {}), '()\n', (145, 147), False, 'from os import chdir, getcwd\n'), ((231, 241), 'os.chdir', 'chdir', (['old'], {}), '(old)\n', (236, 241), False, 'from os import chdir, getcwd\n'), ((297, 309), 'detox_bridge.node.start', 'node.start', ([], {}), '()\n', (307, 309), False, 'from detox_bridge import node\n')]
|
import memtkinter as tk
from memtkinter.megawidgets import SettingsViewer
if __name__ == '__main__':
root = tk.Tk(keytype=tk.HKCU, filepath='./test.xml', name='test')
sv = SettingsViewer(root)
root.mainloop()
|
[
"memtkinter.megawidgets.SettingsViewer",
"memtkinter.Tk"
] |
[((114, 172), 'memtkinter.Tk', 'tk.Tk', ([], {'keytype': 'tk.HKCU', 'filepath': '"""./test.xml"""', 'name': '"""test"""'}), "(keytype=tk.HKCU, filepath='./test.xml', name='test')\n", (119, 172), True, 'import memtkinter as tk\n'), ((183, 203), 'memtkinter.megawidgets.SettingsViewer', 'SettingsViewer', (['root'], {}), '(root)\n', (197, 203), False, 'from memtkinter.megawidgets import SettingsViewer\n')]
|
import urllib2
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
return result
|
[
"urllib2.HTTPRedirectHandler.http_error_302",
"urllib2.HTTPRedirectHandler.http_error_301"
] |
[((150, 227), 'urllib2.HTTPRedirectHandler.http_error_301', 'urllib2.HTTPRedirectHandler.http_error_301', (['self', 'req', 'fp', 'code', 'msg', 'headers'], {}), '(self, req, fp, code, msg, headers)\n', (192, 227), False, 'import urllib2\n'), ((355, 432), 'urllib2.HTTPRedirectHandler.http_error_302', 'urllib2.HTTPRedirectHandler.http_error_302', (['self', 'req', 'fp', 'code', 'msg', 'headers'], {}), '(self, req, fp, code, msg, headers)\n', (397, 432), False, 'import urllib2\n')]
|
import pytest
skip = False
if not skip:
@pytest.fixture(scope="module", params=["primary_assembly", "toplevel"])
def assembly(request):
return request.param
@pytest.fixture(scope="module", params=["98", None])
def release_version(request):
return request.param
@pytest.fixture(scope="module", params=["hard", "soft", "unmasked"])
def masking(request):
return request.param
def test_ensembl_genome_download_links(assembly, masking, release_version, ensembl):
"""Test Ensembl links with various options
These genomes are hosted on ftp.ensembl.org
Vertebrates are downloaded from HTTP.
"""
mask = masking if masking != "unmasked" else "none"
toplevel = False if assembly == "primary_assembly" else True
version = release_version
assert ensembl.get_genome_download_link(
"GRCh38.p13", mask=mask, toplevel=toplevel, version=version
)
def test_ensemblgenomes_genome_download_links(masking, ensembl):
"""Test Ensembl FTP links for various genomes
These genomes are hosted on ftp.ensemblgenomes.org.
"""
mask = masking if masking != "unmasked" else "none"
for genome in ["Amel_HAv3.1", "ASM23943v1"]:
assert ensembl.get_genome_download_link(genome, mask=mask)
def test_ucsc_genome_download_links(masking, ucsc):
"""Test UCSC HTTP links for various genomes
Also test masking (unmasked should be ignored)."""
for genome in ["sacCer3", "hg38"]:
assert ucsc.get_genome_download_link(genome, mask=masking)
def test_ncbi_genome_download_links(masking, ncbi):
"""Test NCBI HTTPS links for various genomes
Also test masking (should be ignored).
These genomes are hosted on ftp://ftp.ncbi.nlm.nih.gov."""
for genome in ["Charlie1.0", "GRCh38.p13"]:
assert ncbi.get_genome_download_link(genome, mask=masking)
|
[
"pytest.fixture"
] |
[((47, 118), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': "['primary_assembly', 'toplevel']"}), "(scope='module', params=['primary_assembly', 'toplevel'])\n", (61, 118), False, 'import pytest\n'), ((181, 232), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': "['98', None]"}), "(scope='module', params=['98', None])\n", (195, 232), False, 'import pytest\n'), ((302, 369), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': "['hard', 'soft', 'unmasked']"}), "(scope='module', params=['hard', 'soft', 'unmasked'])\n", (316, 369), False, 'import pytest\n')]
|
import unittest
import numpy as np
from numpy import pi
from flow.core.experiment import SumoExperiment
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.vehicles import Vehicles
from flow.controllers.car_following_models import *
from flow.controllers.lane_change_controllers import StaticLaneChanger
from flow.scenarios.loop_merges.gen import LoopMergesGenerator
from flow.scenarios.loop_merges.loop_merges_scenario import LoopMergesScenario
from flow.envs.loop_merges import SimpleLoopMergesEnvironment
def loop_merge_exp_setup(vehicles=None):
sumo_params = SumoParams(time_step=0.1,
human_speed_mode="no_collide",
sumo_binary="sumo")
if vehicles is None:
vehicles = Vehicles()
vehicles.add_vehicles(veh_id="idm",
acceleration_controller=(IDMController, {}),
lane_change_controller=(StaticLaneChanger, {}),
num_vehicles=5)
vehicles.add_vehicles(veh_id="merge-idm",
acceleration_controller=(IDMController, {}),
lane_change_controller=(StaticLaneChanger, {}),
num_vehicles=5)
additional_env_params = {"target_velocity": 8, "max-deacc": -6,
"max-acc": 3}
env_params = EnvParams(additional_params=additional_env_params)
additional_net_params = {"merge_in_length": 200, "merge_in_angle": pi / 9,
"merge_out_length": 200,
"merge_out_angle": pi * 17 / 9,
"ring_radius": 200 / (2 * pi), "resolution": 40,
"lanes": 1, "speed_limit": 30}
net_params = NetParams(no_internal_links=False,
additional_params=additional_net_params)
initial_config = InitialConfig(spacing="custom",
additional_params={"merge_bunching": 0})
scenario = LoopMergesScenario("loop-merges", LoopMergesGenerator, vehicles,
net_params,
initial_config=initial_config)
env = SimpleLoopMergesEnvironment(env_params, sumo_params, scenario)
return env, scenario
class TestLoopMerges(unittest.TestCase):
"""
Tests the loop_merges generator, scenario, and environment.
"""
def setUp(self):
# create the environment and scenario classes for a ring road
self.env, scenario = loop_merge_exp_setup()
# instantiate an experiment class
self.exp = SumoExperiment(self.env, scenario)
def tearDown(self):
# terminate the traci instance
self.env.terminate()
# free up used memory
self.env = None
self.exp = None
def test_it_runs(self):
"""
Tests that the loop merges experiment runs, and vehicles do not exit
the network.
"""
self.exp.run(1, 10)
def test_gen_custom_start_pos(self):
"""
Tests that vehicle with the prefix "merge" are in the merge_in lane, and
all other vehicles are in the ring road.
"""
# reset the environment to ensure all vehicles are at their starting
# positions
self.env.reset()
ids = self.env.vehicles.get_ids()
# collect the starting edges of all vehicles
merge_starting_edges = []
other_starting_edges = []
for veh_id in ids:
if veh_id[:5] == "merge":
merge_starting_edges.append(self.env.vehicles.get_edge(veh_id))
else:
other_starting_edges.append(self.env.vehicles.get_edge(veh_id))
# ensure that all vehicles are starting in the edges they should be in
expected_merge_starting_edges = ["merge_in"]
self.assertTrue(
np.all([merge_starting_edges[i] in expected_merge_starting_edges
for i in range(len(merge_starting_edges))]))
self.assertTrue(
np.all([other_starting_edges[i] not in expected_merge_starting_edges
for i in range(len(other_starting_edges))]))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"flow.core.params.EnvParams",
"flow.core.params.SumoParams",
"flow.core.vehicles.Vehicles",
"flow.envs.loop_merges.SimpleLoopMergesEnvironment",
"flow.core.experiment.SumoExperiment",
"flow.core.params.InitialConfig",
"flow.core.params.NetParams",
"flow.scenarios.loop_merges.loop_merges_scenario.LoopMergesScenario"
] |
[((610, 686), 'flow.core.params.SumoParams', 'SumoParams', ([], {'time_step': '(0.1)', 'human_speed_mode': '"""no_collide"""', 'sumo_binary': '"""sumo"""'}), "(time_step=0.1, human_speed_mode='no_collide', sumo_binary='sumo')\n", (620, 686), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((1422, 1472), 'flow.core.params.EnvParams', 'EnvParams', ([], {'additional_params': 'additional_env_params'}), '(additional_params=additional_env_params)\n', (1431, 1472), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((1823, 1898), 'flow.core.params.NetParams', 'NetParams', ([], {'no_internal_links': '(False)', 'additional_params': 'additional_net_params'}), '(no_internal_links=False, additional_params=additional_net_params)\n', (1832, 1898), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((1948, 2020), 'flow.core.params.InitialConfig', 'InitialConfig', ([], {'spacing': '"""custom"""', 'additional_params': "{'merge_bunching': 0}"}), "(spacing='custom', additional_params={'merge_bunching': 0})\n", (1961, 2020), False, 'from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\n'), ((2072, 2183), 'flow.scenarios.loop_merges.loop_merges_scenario.LoopMergesScenario', 'LoopMergesScenario', (['"""loop-merges"""', 'LoopMergesGenerator', 'vehicles', 'net_params'], {'initial_config': 'initial_config'}), "('loop-merges', LoopMergesGenerator, vehicles, net_params,\n initial_config=initial_config)\n", (2090, 2183), False, 'from flow.scenarios.loop_merges.loop_merges_scenario import LoopMergesScenario\n'), ((2259, 2321), 'flow.envs.loop_merges.SimpleLoopMergesEnvironment', 'SimpleLoopMergesEnvironment', (['env_params', 'sumo_params', 'scenario'], {}), '(env_params, sumo_params, scenario)\n', (2286, 2321), False, 'from flow.envs.loop_merges import SimpleLoopMergesEnvironment\n'), ((4293, 4308), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4306, 4308), False, 'import unittest\n'), ((790, 800), 'flow.core.vehicles.Vehicles', 'Vehicles', ([], {}), '()\n', (798, 800), False, 'from flow.core.vehicles import Vehicles\n'), ((2676, 2710), 'flow.core.experiment.SumoExperiment', 'SumoExperiment', (['self.env', 'scenario'], {}), '(self.env, scenario)\n', (2690, 2710), False, 'from flow.core.experiment import SumoExperiment\n')]
|
from typing import Optional, Union
import itertools as it
from collections import OrderedDict
import numpy as np
import pandas as pd
from ConfigSpace import ConfigurationSpace
class fANOVA:
def __init__(
self,
X: Union[pd.DataFrame, np.ndarray],
Y,
configspace: ConfigurationSpace,
seed=0,
num_trees=16,
bootstrapping=True,
points_per_tree=-1,
ratio_features: float = 7 / 10,
min_samples_split=0,
min_samples_leaf=0,
max_depth=64,
cutoffs=(-np.inf, np.inf),
instance_features: Optional[np.ndarray] = None,
pca_components: Optional[int] = None,
):
"""
Calculate and provide midpoints and sizes from the forest's
split values in order to get the marginals
Parameters
------------
X: matrix with the features, either a np.array or a pd.DataFrame (numerically encoded)
Y: vector with the response values (numerically encoded)
configspace : ConfigSpace instantiation
num_trees: number of trees in the forest to be fit
seed: seed for the forests randomness
bootstrapping: whether to bootstrap the data for each tree or not
points_per_tree: number of points used for each tree
(only subsampling if bootstrapping is false)
ratio_features: number of features to be used at each split, default is 70%
min_samples_split: minimum number of samples required to attempt to split
min_samples_leaf: minimum number of samples required in a leaf
max_depth: maximal depth of each tree in the forest
cutoffs: tuple of (lower, upper), all values outside this range will be
mapped to either the lower or the upper bound. (See:
"Generalized Functional ANOVA Diagnostics for High Dimensional
Functions of Dependent Variables" by Hooker.)
"""
self.cs = configspace
self.cs_params = self.cs.get_hyperparameters()
self.num_dims = len(self.cs_params)
self.num_trees = num_trees
from deepcave.evaluators.epm.fanova_forest import fANOVAForest
self.forest = fANOVAForest(
configspace=configspace,
seed=seed,
num_trees=num_trees,
bootstrapping=bootstrapping,
points_per_tree=points_per_tree,
ratio_features=ratio_features,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth,
cutoffs=cutoffs,
instance_features=instance_features,
pca_components=pca_components,
)
self.forest.train(X, Y)
def quantify_importance(
self, dims, depth=1, sort=True
) -> dict[tuple, tuple[float, float, float, float]]:
"""
Inputs:
`depth`: How often dims should be combined.
Returns:
ordered dict on total importance
Dict[Tuple[dim_names] -> (
mean_fractions_individual,
mean_fractions_total,
std_fractions_individual,
std_fractions_total
)]
"""
if type(dims[0]) == str:
idx = []
for i, param in enumerate(dims):
idx.append(self.cs.get_idx_by_hyperparameter_name(param))
dimensions = idx
# make sure that all the V_U values are computed for each tree
else:
dimensions = dims
vu_individual, vu_total = self.forest.compute_marginals(dimensions, depth)
importance_dict = {}
for k in range(1, len(dimensions) + 1):
if k > depth:
break
for sub_dims in it.combinations(dimensions, k):
if type(dims[0]) == str:
dim_names = []
for j, dim in enumerate(sub_dims):
dim_names.append(self.cs.get_hyperparameter_by_idx(dim))
dim_names = tuple(dim_names)
importance_dict[dim_names] = {}
else:
importance_dict[sub_dims] = {}
# clean here to catch zero variance in a trees
non_zero_idx = np.nonzero(
[self.forest.trees_total_variance[t] for t in range(self.num_trees)]
)
if len(non_zero_idx[0]) == 0:
raise RuntimeError("Encountered zero total variance in all trees.")
fractions_total = np.array(
[
vu_total[sub_dims][t] / self.forest.trees_total_variance[t]
for t in non_zero_idx[0]
]
)
fractions_individual = np.array(
[
vu_individual[sub_dims][t] / self.forest.trees_total_variance[t]
for t in non_zero_idx[0]
]
)
if type(dims[0]) == str:
sub_dims = dim_names
importance_dict[sub_dims] = (
np.mean(fractions_individual),
np.mean(fractions_total),
np.std(fractions_individual),
np.std(fractions_total),
)
if sort:
sorted_importance_dict = {
k: v for k, v in sorted(importance_dict.items(), key=lambda item: item[1][1])
}
return sorted_importance_dict
return importance_dict
def marginal_mean_variance_for_values(self, dimlist, values_to_predict):
"""
Returns the marginal of selected parameters for specific values
Parameters
----------
dimlist: list
Contains the indices of ConfigSpace for the selected parameters
(starts with 0)
values_to_predict: list
Contains the values to be predicted
Returns
-------
tuple
marginal mean prediction and corresponding variance estimate
"""
sample = np.full(self.n_dims, np.nan, dtype=np.float)
for i in range(len(dimlist)):
sample[dimlist[i]] = values_to_predict[i]
return self.forest.forest.marginal_mean_variance_prediction(sample)
def get_most_important_pairwise_marginals(self, params=None, n=10):
"""
Returns the n most important pairwise marginals from the whole ConfigSpace
Parameters
----------
params: list of strings or ints
If specified, limit analysis to those parameters. If ints, interpreting as indices from ConfigurationSpace
n: int
The number of most relevant pairwise marginals that will be returned
Returns
-------
list:
Contains the n most important pairwise marginals
"""
self.tot_imp_dict = OrderedDict()
pairwise_marginals = []
if params is None:
dimensions = range(self.n_dims)
else:
if type(params[0]) == str:
idx = []
for i, param in enumerate(params):
idx.append(self.cs.get_idx_by_hyperparameter_name(param))
dimensions = idx
else:
dimensions = params
# pairs = it.combinations(dimensions,2)
pairs = [x for x in it.combinations(dimensions, 2)]
if params:
n = len(list(pairs))
for combi in pairs:
pairwise_marginal_performance = self.quantify_importance(combi)
tot_imp = pairwise_marginal_performance[combi]["individual importance"]
combi_names = [self.cs_params[combi[0]].name, self.cs_params[combi[1]].name]
pairwise_marginals.append((tot_imp, combi_names[0], combi_names[1]))
pairwise_marginal_performance = sorted(pairwise_marginals, reverse=True)
for marginal, p1, p2 in pairwise_marginal_performance[:n]:
self.tot_imp_dict[(p1, p2)] = marginal
return self.tot_imp_dict
def get_triple_marginals(self, params=None):
"""
Returns the n most important pairwise marginals from the whole ConfigSpace
Parameters
----------
params: list
The parameters
Returns
-------
list:
Contains most important triple marginals
"""
self.tot_imp_dict = OrderedDict()
triple_marginals = []
if len(params) < 3:
raise RuntimeError(
"Number of parameters have to be greater than %i. At least 3 parameters needed"
% len(params)
)
if type(params[0]) == str:
idx = []
for i, param in enumerate(params):
idx.append(self.cs.get_idx_by_hyperparameter_name(param))
dimensions = idx
else:
dimensions = params
triplets = [x for x in it.combinations(dimensions, 3)]
for combi in triplets:
triple_marginal_performance = self.quantify_importance(combi)
tot_imp = triple_marginal_performance[combi]["individual importance"]
combi_names = [
self.cs_params[combi[0]].name,
self.cs_params[combi[1]].name,
self.cs_params[combi[2]].name,
]
triple_marginals.append((tot_imp, combi_names[0], combi_names[1], combi_names[2]))
triple_marginal_performance = sorted(triple_marginals, reverse=True)
if params:
triple_marginal_performance = triple_marginal_performance[: len(list(triplets))]
for marginal, p1, p2, p3 in triple_marginal_performance:
self.tot_imp_dict[(p1, p2, p3)] = marginal
return self.tot_imp_dict
if __name__ == "__main__":
import sys
sys.path.insert(0, "../../")
import ConfigSpace
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
Constant,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
)
cs = CS.ConfigurationSpace(seed=1234)
alpha = CSH.UniformFloatHyperparameter(name="alpha", lower=0, upper=1)
beta = CSH.UniformFloatHyperparameter(name="beta", lower=0, upper=1)
gamma = CSH.UniformFloatHyperparameter(name="gamma", lower=0, upper=1)
gamma1 = CSH.UniformFloatHyperparameter(name="gamma1", lower=0, upper=1)
gamma2 = CSH.UniformFloatHyperparameter(name="gamma2", lower=0, upper=1)
gamma3 = CSH.UniformFloatHyperparameter(name="gamma3", lower=0, upper=1)
# Constants do not work
# gamma = CSH.Constant(name='gamma', value=1)
cs.add_hyperparameters([alpha, beta, gamma])
X = []
Y = []
for config in cs.sample_configuration(100):
cost = np.random.randn()
encoded = config.get_array()
X.append(encoded)
Y.append(cost)
X = np.array(X)
Y = np.array(Y)
conditional = {}
impute_values = {}
for idx, hp in enumerate(cs.get_hyperparameters()):
if idx not in conditional:
parents = cs.get_parents_of(hp.name)
if len(parents) == 0:
conditional[idx] = False
else:
conditional[idx] = True
if isinstance(hp, CategoricalHyperparameter):
impute_values[idx] = len(hp.choices)
elif isinstance(hp, (UniformFloatHyperparameter, UniformIntegerHyperparameter)):
impute_values[idx] = -1
elif isinstance(hp, Constant):
impute_values[idx] = 1
else:
raise ValueError
if conditional[idx] is True:
nonfinite_mask = ~np.isfinite(X[:, idx])
X[nonfinite_mask, idx] = impute_values[idx]
# f = fANOVA(X, Y, cs)
# imp = f.quantify_importance(cs.get_hyperparameter_names()[:3], depth=1)
# print(imp)
f = fANOVA(X, Y, cs)
imp = f.quantify_importance(cs.get_hyperparameter_names(), depth=1, sorted=False)
print(imp)
|
[
"numpy.full",
"ConfigSpace.ConfigurationSpace",
"numpy.random.randn",
"numpy.std",
"sys.path.insert",
"numpy.isfinite",
"deepcave.evaluators.epm.fanova_forest.fANOVAForest",
"itertools.combinations",
"numpy.mean",
"numpy.array",
"ConfigSpace.hyperparameters.UniformFloatHyperparameter",
"collections.OrderedDict"
] |
[((10094, 10122), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../"""'], {}), "(0, '../../')\n", (10109, 10122), False, 'import sys\n'), ((10434, 10466), 'ConfigSpace.ConfigurationSpace', 'CS.ConfigurationSpace', ([], {'seed': '(1234)'}), '(seed=1234)\n', (10455, 10466), True, 'import ConfigSpace as CS\n'), ((10480, 10542), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""alpha"""', 'lower': '(0)', 'upper': '(1)'}), "(name='alpha', lower=0, upper=1)\n", (10510, 10542), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10554, 10615), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""beta"""', 'lower': '(0)', 'upper': '(1)'}), "(name='beta', lower=0, upper=1)\n", (10584, 10615), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10628, 10690), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma', lower=0, upper=1)\n", (10658, 10690), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10704, 10767), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma1"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma1', lower=0, upper=1)\n", (10734, 10767), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10781, 10844), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma2"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma2', lower=0, upper=1)\n", (10811, 10844), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((10858, 10921), 'ConfigSpace.hyperparameters.UniformFloatHyperparameter', 'CSH.UniformFloatHyperparameter', ([], {'name': '"""gamma3"""', 'lower': '(0)', 'upper': '(1)'}), "(name='gamma3', lower=0, upper=1)\n", (10888, 10921), True, 'import ConfigSpace.hyperparameters as CSH\n'), ((11252, 11263), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (11260, 11263), True, 'import numpy as np\n'), ((11272, 11283), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (11280, 11283), True, 'import numpy as np\n'), ((2241, 2597), 'deepcave.evaluators.epm.fanova_forest.fANOVAForest', 'fANOVAForest', ([], {'configspace': 'configspace', 'seed': 'seed', 'num_trees': 'num_trees', 'bootstrapping': 'bootstrapping', 'points_per_tree': 'points_per_tree', 'ratio_features': 'ratio_features', 'min_samples_split': 'min_samples_split', 'min_samples_leaf': 'min_samples_leaf', 'max_depth': 'max_depth', 'cutoffs': 'cutoffs', 'instance_features': 'instance_features', 'pca_components': 'pca_components'}), '(configspace=configspace, seed=seed, num_trees=num_trees,\n bootstrapping=bootstrapping, points_per_tree=points_per_tree,\n ratio_features=ratio_features, min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf, max_depth=max_depth, cutoffs=cutoffs,\n instance_features=instance_features, pca_components=pca_components)\n', (2253, 2597), False, 'from deepcave.evaluators.epm.fanova_forest import fANOVAForest\n'), ((6309, 6353), 'numpy.full', 'np.full', (['self.n_dims', 'np.nan'], {'dtype': 'np.float'}), '(self.n_dims, np.nan, dtype=np.float)\n', (6316, 6353), True, 'import numpy as np\n'), ((7135, 7148), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7146, 7148), False, 'from collections import OrderedDict\n'), ((8674, 8687), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8685, 8687), False, 'from collections import OrderedDict\n'), ((11138, 11155), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (11153, 11155), True, 'import numpy as np\n'), ((3906, 3936), 'itertools.combinations', 'it.combinations', (['dimensions', 'k'], {}), '(dimensions, k)\n', (3921, 3936), True, 'import itertools as it\n'), ((4707, 4809), 'numpy.array', 'np.array', (['[(vu_total[sub_dims][t] / self.forest.trees_total_variance[t]) for t in\n non_zero_idx[0]]'], {}), '([(vu_total[sub_dims][t] / self.forest.trees_total_variance[t]) for\n t in non_zero_idx[0]])\n', (4715, 4809), True, 'import numpy as np\n'), ((4951, 5059), 'numpy.array', 'np.array', (['[(vu_individual[sub_dims][t] / self.forest.trees_total_variance[t]) for t in\n non_zero_idx[0]]'], {}), '([(vu_individual[sub_dims][t] / self.forest.trees_total_variance[t]\n ) for t in non_zero_idx[0]])\n', (4959, 5059), True, 'import numpy as np\n'), ((7623, 7653), 'itertools.combinations', 'it.combinations', (['dimensions', '(2)'], {}), '(dimensions, 2)\n', (7638, 7653), True, 'import itertools as it\n'), ((9203, 9233), 'itertools.combinations', 'it.combinations', (['dimensions', '(3)'], {}), '(dimensions, 3)\n', (9218, 9233), True, 'import itertools as it\n'), ((12080, 12102), 'numpy.isfinite', 'np.isfinite', (['X[:, idx]'], {}), '(X[:, idx])\n', (12091, 12102), True, 'import numpy as np\n'), ((5311, 5340), 'numpy.mean', 'np.mean', (['fractions_individual'], {}), '(fractions_individual)\n', (5318, 5340), True, 'import numpy as np\n'), ((5362, 5386), 'numpy.mean', 'np.mean', (['fractions_total'], {}), '(fractions_total)\n', (5369, 5386), True, 'import numpy as np\n'), ((5408, 5436), 'numpy.std', 'np.std', (['fractions_individual'], {}), '(fractions_individual)\n', (5414, 5436), True, 'import numpy as np\n'), ((5458, 5481), 'numpy.std', 'np.std', (['fractions_total'], {}), '(fractions_total)\n', (5464, 5481), True, 'import numpy as np\n')]
|
import os
import sys
sys.path.append(os.getcwd())
import pickle
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import restools
from papers.none2021_predicting_transition_using_reservoir_computing.extensions import relaminarisation_time, \
survival_function, laminarization_probability
from comsdk.research import Research
from comsdk.comaux import load_from_json, find_all_files_by_named_regexp
from reducedmodels.transition_to_turbulence import MoehlisFaisstEckhardtModel
def build_relaminarisation_times_from_tasks(res, tasks):
relam_times = []
for t_i in range(len(tasks)):
task_path = res.get_task_path(tasks[t_i])
with open(os.path.join(task_path, 'inputs.json'), 'r') as f:
inputs = json.load(f)
m = MoehlisFaisstEckhardtModel(Re=re, L_x=inputs['l_x'] * np.pi, L_z=inputs['l_z'] * np.pi)
filename_and_params = find_all_files_by_named_regexp(r'^(?P<num>\d+)$', task_path)
file_paths = [os.path.join(task_path, filename) for filename, params in filename_and_params]
for file_path in file_paths:
with open(file_path, 'rb') as f:
data_ = pickle.load(f)
t = data_['time']
ts = data_['timeseries']
ke = m.kinetic_energy(ts)
rt = relaminarisation_time(ke, T=1000, debug=False)
relam_times.append(rt if rt is not None else t[-1])
return relam_times
#def lam_probability(res, task):
# with open(os.path.join(res.get_task_path(task), f'trajectories_for_laminarisation_probability_for_true_Moehlis_model_{i}'), 'rb') as f:
# data = pickle.load(f)
# task_path = res.get_task_path(task)
# for energy_i in range(len(data['energy_levels'])):
# for rp_i in range(len(data['rps'][energy_i])):
# with open(os.path.join(task_path, 'inputs.json'), 'r') as f:
# inputs = json.load(f)
# m = MoehlisFaisstEckhardtModel(Re=inputs['re'], L_x=inputs['l_x'] * np.pi, L_z=inputs['l_z'] * np.pi)
# filename_and_params = find_all_files_by_named_regexp(r'^(?P<num>\d+)$', task_path)
# file_paths = [os.path.join(task_path, filename) for filename, params in filename_and_params]
# p_lams = np.zeros(len(file_paths))
# for i, file_path in enumerate(file_paths):
# with open(file_path, 'rb') as f:
# data_ = pickle.load(f)
# t = data_['time']
# ts = data_['timeseries']
# ke = m.kinetic_energy(ts)
# N = len(trajs[i])
# N_lam = 0
# for ens_member in trajs[i]:
# ke = m.kinetic_energy(ens_member[:-1, :])
# if np.all(ke > 10):
# N_lam += 1
# p_lams[i] = N_lam / N
## rt = relaminarisation_time(ke, T=1000, debug=False)
## relam_times.append(rt if rt is not None else t[-1])
## for i in range(len(trajs)):
# return p_lams
if __name__ == '__main__':
plt.style.use('resources/default.mplstyle')
res_id = 'RC_MOEHLIS'
res = Research.open(res_id)
fig, ax = plt.subplots(figsize=(10, 6))
# (85, 87) for predictions with noise
# (88,) for predictions without noise
for tasks, color, label in zip(((83, 86), (85, 87)), ('#ccdeea', 'tab:blue'), ('Truth', 'Prediction')):
m = MoehlisFaisstEckhardtModel(Re=500, L_x=1.75 * np.pi, L_z=1.2 * np.pi)
true_lam_probability = []
trajectory_global_i = 1
trajectory_numbers = []
rps = []
task_inputs = []
for task in tasks:
with open(os.path.join(res.get_task_path(task), 'inputs.json'), 'r') as f:
inputs = json.load(f)
task_inputs.append(inputs)
energy_levels = task_inputs[0]['energy_levels']
for energy_i in range(len(task_inputs[0]['energy_levels'])):
print(f'Reading energy level #{energy_i}: {task_inputs[0]["energy_levels"][energy_i]}')
trajs = []
for task, inputs in zip(tasks, task_inputs):
for rp_i in range(len(inputs['rps'][energy_i])):
with open(os.path.join(res.get_task_path(task), str(inputs['trajectory_numbers'][energy_i][rp_i])), 'rb') as f:
data = pickle.load(f)
trajs.append(data['timeseries'])
true_lam_probability.append(laminarization_probability(m, trajs))
#true_lam_probability.append(trajs)
# energy_levels = np.concatenate(energy_levels)
#true_lam_probability = np.concatenate(true_lam_probability)
ax.plot(energy_levels, true_lam_probability, 'o--', color=color, markersize=12, label=label)
# with open(os.path.join(res.local_research_path, 'laminarisation_probability_for_ESN_20_energy_levels_and_50_rps_per_each'), 'rb') as f:
# data = pickle.load(f)
# energy_levels = data['energy_levels']
# predicted_lam_probability = data['lam_probability']
# ax.plot(energy_levels, predicted_lam_probability, 'o--', color='tab:blue', markersize=12, label='Prediction')
# m = MoehlisFaisstEckhardtModel(Re=500, L_x=1.75 * np.pi, L_z=1.2 * np.pi)
# energy_levels = []
# energy_levels_bckp = []
# true_lam_probability = []
# trajectory_numbers = []
# trajectory_global_i = 1
# rps_bckp = []
# for i in [1, 2]:
# with open(os.path.join(res.get_task_path(83), f'trajectories_for_laminarisation_probability_for_true_Moehlis_model_{i}'), 'rb') as f:
# data = pickle.load(f)
# for energy_i in range(len(data['energy_levels'])):
# trajectory_numbers.append([])
# energy_levels_bckp.append(data['energy_levels'][energy_i])
# rps_bckp.append([rp.tolist() for rp in data['rps'][energy_i]])
# for rp_i in range(len(data['rps'][energy_i])):
# trajectory_numbers[-1].append(trajectory_global_i)
# #with open(os.path.join(res.get_task_path(83), str(trajectory_global_i)), 'wb') as f_ts:
# # n_steps = len(data['trajectories'][energy_i][rp_i])
# # t = np.linspace(0, n_steps, n_steps)
# # ts = data['trajectories'][energy_i][rp_i]
# # pickle.dump({'time': t, 'timeseries': ts}, f_ts)
# #trajectory_global_i += 1
# energy_levels.append(data['energy_levels'])
# true_lam_probability.append(laminarization_probability(m, data['trajectories']))
# with open('rps_bckp.json', 'w') as f:
# json.dump(rps_bckp, f)
# energy_levels = np.concatenate(energy_levels)
# true_lam_probability = np.concatenate(true_lam_probability)
# ax.plot(energy_levels, true_lam_probability, 'o--', color='#ccdeea', markersize=12, label='Truth')
ax.set_xlabel(r'$E$')
ax.set_ylabel(r'$P_{lam}(E)$')
ax.set_xscale('log')
ax.legend(fontsize=16)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax.grid()
plt.tight_layout()
plt.savefig('p_lam_esn.eps', dpi=200)
plt.show()
|
[
"json.load",
"matplotlib.pyplot.show",
"os.path.join",
"os.getcwd",
"comsdk.comaux.find_all_files_by_named_regexp",
"reducedmodels.transition_to_turbulence.MoehlisFaisstEckhardtModel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.style.use",
"comsdk.research.Research.open",
"papers.none2021_predicting_transition_using_reservoir_computing.extensions.relaminarisation_time",
"pickle.load",
"papers.none2021_predicting_transition_using_reservoir_computing.extensions.laminarization_probability",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((37, 48), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46, 48), False, 'import os\n'), ((2927, 2970), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""resources/default.mplstyle"""'], {}), "('resources/default.mplstyle')\n", (2940, 2970), True, 'import matplotlib.pyplot as plt\n'), ((3008, 3029), 'comsdk.research.Research.open', 'Research.open', (['res_id'], {}), '(res_id)\n', (3021, 3029), False, 'from comsdk.research import Research\n'), ((3045, 3074), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3057, 3074), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7035), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7033, 7035), True, 'import matplotlib.pyplot as plt\n'), ((7040, 7077), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""p_lam_esn.eps"""'], {'dpi': '(200)'}), "('p_lam_esn.eps', dpi=200)\n", (7051, 7077), True, 'import matplotlib.pyplot as plt\n'), ((7082, 7092), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7090, 7092), True, 'import matplotlib.pyplot as plt\n'), ((914, 974), 'comsdk.comaux.find_all_files_by_named_regexp', 'find_all_files_by_named_regexp', (['"""^(?P<num>\\\\d+)$"""', 'task_path'], {}), "('^(?P<num>\\\\d+)$', task_path)\n", (944, 974), False, 'from comsdk.comaux import load_from_json, find_all_files_by_named_regexp\n'), ((3279, 3348), 'reducedmodels.transition_to_turbulence.MoehlisFaisstEckhardtModel', 'MoehlisFaisstEckhardtModel', ([], {'Re': '(500)', 'L_x': '(1.75 * np.pi)', 'L_z': '(1.2 * np.pi)'}), '(Re=500, L_x=1.75 * np.pi, L_z=1.2 * np.pi)\n', (3305, 3348), False, 'from reducedmodels.transition_to_turbulence import MoehlisFaisstEckhardtModel\n'), ((767, 779), 'json.load', 'json.load', (['f'], {}), '(f)\n', (776, 779), False, 'import json\n'), ((796, 888), 'reducedmodels.transition_to_turbulence.MoehlisFaisstEckhardtModel', 'MoehlisFaisstEckhardtModel', ([], {'Re': 're', 'L_x': "(inputs['l_x'] * np.pi)", 'L_z': "(inputs['l_z'] * np.pi)"}), "(Re=re, L_x=inputs['l_x'] * np.pi, L_z=inputs[\n 'l_z'] * np.pi)\n", (822, 888), False, 'from reducedmodels.transition_to_turbulence import MoehlisFaisstEckhardtModel\n'), ((997, 1030), 'os.path.join', 'os.path.join', (['task_path', 'filename'], {}), '(task_path, filename)\n', (1009, 1030), False, 'import os\n'), ((1327, 1373), 'papers.none2021_predicting_transition_using_reservoir_computing.extensions.relaminarisation_time', 'relaminarisation_time', (['ke'], {'T': '(1000)', 'debug': '(False)'}), '(ke, T=1000, debug=False)\n', (1348, 1373), False, 'from papers.none2021_predicting_transition_using_reservoir_computing.extensions import relaminarisation_time, survival_function, laminarization_probability\n'), ((695, 733), 'os.path.join', 'os.path.join', (['task_path', '"""inputs.json"""'], {}), "(task_path, 'inputs.json')\n", (707, 733), False, 'import os\n'), ((1182, 1196), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1193, 1196), False, 'import pickle\n'), ((3628, 3640), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3637, 3640), False, 'import json\n'), ((4329, 4365), 'papers.none2021_predicting_transition_using_reservoir_computing.extensions.laminarization_probability', 'laminarization_probability', (['m', 'trajs'], {}), '(m, trajs)\n', (4355, 4365), False, 'from papers.none2021_predicting_transition_using_reservoir_computing.extensions import relaminarisation_time, survival_function, laminarization_probability\n'), ((4217, 4231), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4228, 4231), False, 'import pickle\n')]
|
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.forms import forms
from django.forms.models import modelformset_factory, formset_factory
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, loader
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from catnap.rest_views import (JsonEmitterMixin, AutoContentTypeMixin,
RestView, ListView, DetailView)
from rest_resources import UserResource, DeckResource
import models
class MyRestView(JsonEmitterMixin, AutoContentTypeMixin, RestView):
'''
Our JSON-formatted response base class.
'''
content_type_template_string = 'application/vnd.catnap-test.{0}+json'
#@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MyRestView, self).dispatch(*args, **kwargs)
# Resource views
class EntryPoint(MyRestView):
'''
Entry-point to our REST API.
This view's URL is the only one that clients should need to know,
and the only one that should be documented in the API!
'''
def get(self, request):
'''
List the available top-level resource URLs.
'''
context = {
'deck_list_url': reverse('api-deck_list'),
#'users': reverse('rest-users'),
}
return self.render_to_response(context)
class DeckList(ListView, MyRestView):
'''
List of decks.
'''
content_subtype = 'DeckList'
resource = DeckResource
def get_queryset(self):
return models.Deck.objects.order_by('name')
class Deck(DetailView, MyRestView):
'''
Detail view of a single deck.
'''
content_subtype = 'Deck'
def get_object(self):
return get_object_or_404(pk=self.kwargs.get('pk'))
|
[
"models.Deck.objects.order_by",
"django.core.urlresolvers.reverse"
] |
[((1698, 1734), 'models.Deck.objects.order_by', 'models.Deck.objects.order_by', (['"""name"""'], {}), "('name')\n", (1726, 1734), False, 'import models\n'), ((1389, 1413), 'django.core.urlresolvers.reverse', 'reverse', (['"""api-deck_list"""'], {}), "('api-deck_list')\n", (1396, 1413), False, 'from django.core.urlresolvers import reverse\n')]
|
#!/usr/bin/env python
import functools
import os.path
import random
import sys
import xml.etree.ElementTree
import numpy as np
import matplotlib.pyplot as plt
import skimage.data
import cv2
import PIL.Image
import pickle
def load_pascal_occluder(pascal_voc_root_path):
occluders = []
structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
annotation_paths = list_filepaths(os.path.join(pascal_voc_root_path, 'Annotations'))
for annotation_path in annotation_paths:
xml_root = xml.etree.ElementTree.parse(annotation_path).getroot()
is_segmented = (xml_root.find('segmented').text != '0')
if not is_segmented:
continue
boxes = []
for i_obj, obj in enumerate(xml_root.findall('object')):
is_person = (obj.find('name').text == 'person')
is_difficult = (obj.find('difficult').text != '0')
is_truncated = (obj.find('truncated').text != '0')
if not is_difficult and not is_truncated:
bndbox = obj.find('bndbox')
box = [int(bndbox.find(s).text) for s in ['xmin', 'ymin', 'xmax', 'ymax']]
boxes.append((i_obj, box))
if not boxes:
continue
im_filename = xml_root.find('filename').text
seg_filename = im_filename.replace('jpg', 'png')
im_path = os.path.join(pascal_voc_root_path, 'JPEGImages', im_filename)
seg_path = os.path.join(pascal_voc_root_path, 'SegmentationObject', seg_filename)
im = np.asarray(PIL.Image.open(im_path))
labels = np.asarray(PIL.Image.open(seg_path))
for i_obj, (xmin, ymin, xmax, ymax) in boxes:
object_mask = (labels[ymin:ymax, xmin:xmax] == i_obj + 1).astype(np.uint8) * 255
object_image = im[ymin:ymax, xmin:xmax]
if cv2.countNonZero(object_mask) < 500:
# Ignore small objects
continue
# Reduce the opacity of the mask along the border for smoother blending
eroded = cv2.erode(object_mask, structuring_element)
object_mask[eroded < object_mask] = 192
object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1)
if object_with_mask.size == 0:
continue
# Downscale for efficiency
object_with_mask = resize_by_factor(object_with_mask, 0.5)
occluders.append(object_with_mask)
print("total # of occluders: ", len(occluders))
return occluders
def load_coco_person_occluder(data_path, data_split):
img_dir_path = os.path.join(data_path, f'{data_split}2017')
part_seg_path = os.path.join(data_path, 'densepose_output', 'DensePose_maskRCNN_output')
dp_dict = load_dp_result(part_seg_path, data_split)
print("loaded dp result..., total imgs: ", len(dp_dict.keys()))
from densepose.data.structures import DensePoseResult
from timer import Timer
load_timer = Timer()
occluders = []
structuring_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
for img_name in dp_dict.keys():
img_path = os.path.join(img_dir_path, img_name)
load_timer.tic()
img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
img = img[:, :, ::-1].copy()
# img = np.asarray(PIL.Image.open(img_path))
load_timer.toc()
dp_outputs = dp_dict[img_name]
for output in dp_outputs:
encoded_dp = output['dp']
iuv_arr = DensePoseResult.decode_png_data(*encoded_dp)
_, h, w = iuv_arr.shape
dp_bbox = output['bbox']
xmin, ymin = int(dp_bbox[0] + 0.5), int(dp_bbox[1] + 0.5)
xmax, ymax = xmin+w, ymin+h
object_mask = (iuv_arr[0] != 0).astype(np.uint8) * 255
object_image = img[ymin:ymax, xmin:xmax]
if cv2.countNonZero(object_mask) < 5000:
# Ignore small objects or low resolution objects
continue
# Reduce the opacity of the mask along the border for smoother blending
eroded = cv2.erode(object_mask, structuring_element)
object_mask[eroded < object_mask] = 192
object_with_mask = np.concatenate([object_image, object_mask[..., np.newaxis]], axis=-1)
if object_with_mask.size == 0:
continue
# Downscale for efficiency
object_with_mask = resize_by_factor(object_with_mask, 0.5)
occluders.append(object_with_mask)
if len(occluders) > 5000:
break
print("img load time: ", load_timer.total_time)
print("total # of occluders: ", len(occluders))
return occluders
def load_dp_result(part_seg_path, data_split):
print(f'Load DensePose Result of COCO {data_split} set')
data_path = os.path.join(part_seg_path, f'coco_{data_split}.pkl')
with open(data_path, 'rb') as f:
raw_data_list = pickle.load(f)
data_dict = {}
for rd in raw_data_list:
key = rd['file_name'].split('/')[-1]
scores = rd['scores']
pred_data_list = []
for idx in range(len(scores)):
if scores[idx] > 0.5:
pred_data = {}
pred_data['bbox'] = rd['pred_boxes_XYXY'][idx]
pred_data['dp'] = rd['pred_densepose'].results[idx]
pred_data_list.append(pred_data)
data_dict[key] = pred_data_list
return data_dict
def occlude_with_objects(im, occluders):
"""Returns an augmented version of `im`, containing some occluders from the Pascal VOC dataset."""
result = im.copy()
width_height = np.asarray([im.shape[1], im.shape[0]])
count = np.random.randint(1, 5)
for _ in range(count):
occluder = random.choice(occluders)
im_scale_factor = min(width_height) / max(occluder.shape[:2])
random_scale_factor = np.random.uniform(0.2, 0.5)
scale_factor = random_scale_factor * im_scale_factor
try:
occluder = resize_by_factor(occluder, scale_factor)
except Exception as e:
print("error")
continue
# center = np.random.uniform([0, 0], width_height)
center = np.random.uniform(width_height/8, width_height/8*7)
paste_over(im_src=occluder, im_dst=result, center=center)
return result
def paste_over(im_src, im_dst, center):
"""Pastes `im_src` onto `im_dst` at a specified position, with alpha blending, in place.
Locations outside the bounds of `im_dst` are handled as expected (only a part or none of
`im_src` becomes visible).
Args:
im_src: The RGBA image to be pasted onto `im_dst`. Its size can be arbitrary.
im_dst: The target image.
alpha: A float (0.0-1.0) array of the same size as `im_src` controlling the alpha blending
at each pixel. Large values mean more visibility for `im_src`.
center: coordinates in `im_dst` where the center of `im_src` should be placed.
"""
width_height_src = np.asarray([im_src.shape[1], im_src.shape[0]])
width_height_dst = np.asarray([im_dst.shape[1], im_dst.shape[0]])
center = np.round(center).astype(np.int32)
raw_start_dst = center - width_height_src // 2
raw_end_dst = raw_start_dst + width_height_src
start_dst = np.clip(raw_start_dst, 0, width_height_dst)
end_dst = np.clip(raw_end_dst, 0, width_height_dst)
region_dst = im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]]
start_src = start_dst - raw_start_dst
end_src = width_height_src + (end_dst - raw_end_dst)
region_src = im_src[start_src[1]:end_src[1], start_src[0]:end_src[0]]
color_src = region_src[..., 0:3]
alpha = region_src[..., 3:].astype(np.float32)/255
im_dst[start_dst[1]:end_dst[1], start_dst[0]:end_dst[0]] = (
alpha * color_src + (1 - alpha) * region_dst)
return im_dst
def resize_by_factor(im, factor):
"""Returns a copy of `im` resized by `factor`, using bilinear interp for up and area interp
for downscaling.
"""
new_size = tuple(np.round(np.array([im.shape[1], im.shape[0]]) * factor).astype(int))
interp = cv2.INTER_LINEAR if factor > 1.0 else cv2.INTER_AREA
return cv2.resize(im, new_size, fx=factor, fy=factor, interpolation=interp)
def list_filepaths(dirpath):
names = os.listdir(dirpath)
paths = [os.path.join(dirpath, name) for name in names]
return sorted(filter(os.path.isfile, paths))
def main():
"""Demo of how to use the code"""
# path = 'something/something/VOCtrainval_11-May-2012/VOCdevkit/VOC2012'
path = sys.argv[1]
print('Loading occluders from Pascal VOC dataset...')
occluders = load_pascal_occluder(pascal_voc_root_path=path)
print('Found {} suitable objects'.format(len(occluders)))
original_im = cv2.resize(skimage.data.astronaut(), (256, 256))
fig, axarr = plt.subplots(3, 3, figsize=(7, 7))
for ax in axarr.ravel():
occluded_im = occlude_with_objects(original_im, occluders)
ax.imshow(occluded_im, interpolation="none")
ax.axis('off')
fig.tight_layout(h_pad=0)
# plt.savefig('examples.jpg', dpi=150, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
dp_path = '/home/redarknight/projects/detectron2/projects/DensePose/'
sys.path.insert(0, dp_path)
occluder = load_coco_person_occluder('/media/disk2/hongsuk/data/COCO/2017/', data_split='train')
# img = occlude_with_objects(dummy, occluder)
|
[
"numpy.clip",
"numpy.random.randint",
"pickle.load",
"cv2.erode",
"numpy.round",
"timer.Timer",
"matplotlib.pyplot.subplots",
"cv2.resize",
"matplotlib.pyplot.show",
"cv2.countNonZero",
"numpy.asarray",
"numpy.concatenate",
"numpy.random.uniform",
"densepose.data.structures.DensePoseResult.decode_png_data",
"cv2.getStructuringElement",
"sys.path.insert",
"random.choice",
"cv2.imread",
"numpy.array"
] |
[((319, 371), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(8, 8)'], {}), '(cv2.MORPH_ELLIPSE, (8, 8))\n', (344, 371), False, 'import cv2\n'), ((2989, 2996), 'timer.Timer', 'Timer', ([], {}), '()\n', (2994, 2996), False, 'from timer import Timer\n'), ((3043, 3095), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(8, 8)'], {}), '(cv2.MORPH_ELLIPSE, (8, 8))\n', (3068, 3095), False, 'import cv2\n'), ((5689, 5727), 'numpy.asarray', 'np.asarray', (['[im.shape[1], im.shape[0]]'], {}), '([im.shape[1], im.shape[0]])\n', (5699, 5727), True, 'import numpy as np\n'), ((5740, 5763), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (5757, 5763), True, 'import numpy as np\n'), ((7079, 7125), 'numpy.asarray', 'np.asarray', (['[im_src.shape[1], im_src.shape[0]]'], {}), '([im_src.shape[1], im_src.shape[0]])\n', (7089, 7125), True, 'import numpy as np\n'), ((7149, 7195), 'numpy.asarray', 'np.asarray', (['[im_dst.shape[1], im_dst.shape[0]]'], {}), '([im_dst.shape[1], im_dst.shape[0]])\n', (7159, 7195), True, 'import numpy as np\n'), ((7363, 7406), 'numpy.clip', 'np.clip', (['raw_start_dst', '(0)', 'width_height_dst'], {}), '(raw_start_dst, 0, width_height_dst)\n', (7370, 7406), True, 'import numpy as np\n'), ((7421, 7462), 'numpy.clip', 'np.clip', (['raw_end_dst', '(0)', 'width_height_dst'], {}), '(raw_end_dst, 0, width_height_dst)\n', (7428, 7462), True, 'import numpy as np\n'), ((8274, 8342), 'cv2.resize', 'cv2.resize', (['im', 'new_size'], {'fx': 'factor', 'fy': 'factor', 'interpolation': 'interp'}), '(im, new_size, fx=factor, fy=factor, interpolation=interp)\n', (8284, 8342), False, 'import cv2\n'), ((8939, 8973), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(7, 7)'}), '(3, 3, figsize=(7, 7))\n', (8951, 8973), True, 'import matplotlib.pyplot as plt\n'), ((9245, 9255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9253, 9255), True, 'import matplotlib.pyplot as plt\n'), ((9363, 9390), 'sys.path.insert', 'sys.path.insert', (['(0)', 'dp_path'], {}), '(0, dp_path)\n', (9378, 9390), False, 'import sys\n'), ((3227, 3297), 'cv2.imread', 'cv2.imread', (['img_path', '(cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)'], {}), '(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n', (3237, 3297), False, 'import cv2\n'), ((4989, 5003), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5000, 5003), False, 'import pickle\n'), ((5811, 5835), 'random.choice', 'random.choice', (['occluders'], {}), '(occluders)\n', (5824, 5835), False, 'import random\n'), ((5937, 5964), 'numpy.random.uniform', 'np.random.uniform', (['(0.2)', '(0.5)'], {}), '(0.2, 0.5)\n', (5954, 5964), True, 'import numpy as np\n'), ((6260, 6317), 'numpy.random.uniform', 'np.random.uniform', (['(width_height / 8)', '(width_height / 8 * 7)'], {}), '(width_height / 8, width_height / 8 * 7)\n', (6277, 6317), True, 'import numpy as np\n'), ((2051, 2094), 'cv2.erode', 'cv2.erode', (['object_mask', 'structuring_element'], {}), '(object_mask, structuring_element)\n', (2060, 2094), False, 'import cv2\n'), ((2178, 2247), 'numpy.concatenate', 'np.concatenate', (['[object_image, object_mask[..., np.newaxis]]'], {'axis': '(-1)'}), '([object_image, object_mask[..., np.newaxis]], axis=-1)\n', (2192, 2247), True, 'import numpy as np\n'), ((3548, 3592), 'densepose.data.structures.DensePoseResult.decode_png_data', 'DensePoseResult.decode_png_data', (['*encoded_dp'], {}), '(*encoded_dp)\n', (3579, 3592), False, 'from densepose.data.structures import DensePoseResult\n'), ((4146, 4189), 'cv2.erode', 'cv2.erode', (['object_mask', 'structuring_element'], {}), '(object_mask, structuring_element)\n', (4155, 4189), False, 'import cv2\n'), ((4273, 4342), 'numpy.concatenate', 'np.concatenate', (['[object_image, object_mask[..., np.newaxis]]'], {'axis': '(-1)'}), '([object_image, object_mask[..., np.newaxis]], axis=-1)\n', (4287, 4342), True, 'import numpy as np\n'), ((7210, 7226), 'numpy.round', 'np.round', (['center'], {}), '(center)\n', (7218, 7226), True, 'import numpy as np\n'), ((1844, 1873), 'cv2.countNonZero', 'cv2.countNonZero', (['object_mask'], {}), '(object_mask)\n', (1860, 1873), False, 'import cv2\n'), ((3912, 3941), 'cv2.countNonZero', 'cv2.countNonZero', (['object_mask'], {}), '(object_mask)\n', (3928, 3941), False, 'import cv2\n'), ((8137, 8173), 'numpy.array', 'np.array', (['[im.shape[1], im.shape[0]]'], {}), '([im.shape[1], im.shape[0]])\n', (8145, 8173), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import csv
import sys
from lib.config import get_or_create as get_or_create_config
from lib.rest import REST_CONFIG_FIELDS, api_get, api_get_paged
config = get_or_create_config('config.ini', 'bamboo', REST_CONFIG_FIELDS)
auth = (config['username'], config['password'])
base_path = config['url']
writer = csv.writer(sys.stdout)
def date_to_sheets_format(iso_date):
return iso_date.replace('T', ' ').split('+', 1)[0].rstrip('Z')
def seconds_to_sheets_time(time_secs):
return time_secs / (24 * 3600)
def result_row(last_result):
plan_key = last_result['plan']['master']['key'] if 'master' in last_result['plan'] else last_result['plan']['key']
plan_name = last_result['plan']['master']['shortName'] if 'master' in last_result['plan'] else last_result['plan']['shortName']
branch_key = last_result['plan']['key']
branch_name = last_result['plan']['shortName'] if 'master' in last_result['plan'] else ''
return [plan_key, plan_name, branch_key, branch_name, last_result['plan']['enabled'], last_result['buildResultKey'], date_to_sheets_format(last_result['buildCompletedDate']), seconds_to_sheets_time(last_result['buildDurationInSeconds']), last_result['lifeCycleState'], last_result['successful'], last_result['buildReason']]
def latest_result_row(results):
if len(results) > 0:
return result_row(results[0])
else:
return None
for response_json in api_get_paged('/plan', {'expand': 'plans.plan.branches'}, 'plans', base_path=base_path, auth=auth):
plans = response_json['plan']
for plan in plans:
branch_keys = [plan['key']] + [branch['key'] for branch in plan['branches']['branch']]
for branch_key in branch_keys:
result_resp = api_get('/result/%s' % branch_key, {'expand': 'results.result.plan'}, base_path=base_path, auth=auth).json()
latest_row = latest_result_row(result_resp['results']['result'])
if latest_row is not None:
writer.writerow(latest_row)
|
[
"lib.rest.api_get_paged",
"lib.config.get_or_create",
"csv.writer",
"lib.rest.api_get"
] |
[((177, 241), 'lib.config.get_or_create', 'get_or_create_config', (['"""config.ini"""', '"""bamboo"""', 'REST_CONFIG_FIELDS'], {}), "('config.ini', 'bamboo', REST_CONFIG_FIELDS)\n", (197, 241), True, 'from lib.config import get_or_create as get_or_create_config\n'), ((325, 347), 'csv.writer', 'csv.writer', (['sys.stdout'], {}), '(sys.stdout)\n', (335, 347), False, 'import csv\n'), ((1423, 1525), 'lib.rest.api_get_paged', 'api_get_paged', (['"""/plan"""', "{'expand': 'plans.plan.branches'}", '"""plans"""'], {'base_path': 'base_path', 'auth': 'auth'}), "('/plan', {'expand': 'plans.plan.branches'}, 'plans',\n base_path=base_path, auth=auth)\n", (1436, 1525), False, 'from lib.rest import REST_CONFIG_FIELDS, api_get, api_get_paged\n'), ((1740, 1845), 'lib.rest.api_get', 'api_get', (["('/result/%s' % branch_key)", "{'expand': 'results.result.plan'}"], {'base_path': 'base_path', 'auth': 'auth'}), "('/result/%s' % branch_key, {'expand': 'results.result.plan'},\n base_path=base_path, auth=auth)\n", (1747, 1845), False, 'from lib.rest import REST_CONFIG_FIELDS, api_get, api_get_paged\n')]
|
from starfish.core.experiment.builder import build_image, TileFetcher
from starfish.core.experiment.builder.defaultproviders import OnesTile, tile_fetcher_factory
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.types import Axes
def synthetic_stack(
num_round: int = 4,
num_ch: int = 4,
num_z: int = 12,
tile_height: int = 50,
tile_width: int = 40,
tile_fetcher: TileFetcher = None,
) -> ImageStack:
"""generate a synthetic ImageStack
Returns
-------
ImageStack :
imagestack containing a tensor whose default shape is (2, 3, 4, 30, 20)
and whose default values are all 1.
"""
if tile_fetcher is None:
tile_fetcher = tile_fetcher_factory(
OnesTile,
False,
{Axes.Y: tile_height, Axes.X: tile_width},
)
collection = build_image(
range(1),
range(num_round),
range(num_ch),
range(num_z),
tile_fetcher,
)
tileset = list(collection.all_tilesets())[0][1]
return ImageStack.from_tileset(tileset)
|
[
"starfish.core.imagestack.imagestack.ImageStack.from_tileset",
"starfish.core.experiment.builder.defaultproviders.tile_fetcher_factory"
] |
[((1086, 1118), 'starfish.core.imagestack.imagestack.ImageStack.from_tileset', 'ImageStack.from_tileset', (['tileset'], {}), '(tileset)\n', (1109, 1118), False, 'from starfish.core.imagestack.imagestack import ImageStack\n'), ((746, 831), 'starfish.core.experiment.builder.defaultproviders.tile_fetcher_factory', 'tile_fetcher_factory', (['OnesTile', '(False)', '{Axes.Y: tile_height, Axes.X: tile_width}'], {}), '(OnesTile, False, {Axes.Y: tile_height, Axes.X: tile_width}\n )\n', (766, 831), False, 'from starfish.core.experiment.builder.defaultproviders import OnesTile, tile_fetcher_factory\n')]
|
# Generated by Django 4.0.2 on 2022-02-06 08:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('twilioconfig', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PhoneOwnership',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"django.db.models.BigAutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey"
] |
[((264, 321), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (295, 321), False, 'from django.db import migrations, models\n'), ((502, 598), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (521, 598), False, 'from django.db import migrations, models\n'), ((727, 823), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (744, 823), False, 'from django.db import migrations, models\n')]
|
from __future__ import print_function
import numpy as np
import pandas as pd
import numpy.testing as npt
import pytest
import os
from collections import OrderedDict
import lifetimes.estimation as estimation
import lifetimes.utils as utils
from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations,\
load_transaction_data
@pytest.fixture
def cdnow_customers():
return load_cdnow_summary()
PATH_SAVE_MODEL = './base_fitter.pkl'
PATH_SAVE_BGNBD_MODEL = './betageo_fitter.pkl'
class TestBaseFitter():
def test_repr(self):
base_fitter = estimation.BaseFitter()
assert repr(base_fitter) == '<lifetimes.BaseFitter>'
base_fitter.params_ = dict(x=12.3, y=42)
base_fitter.data = np.array([1, 2, 3])
assert repr(base_fitter) == '<lifetimes.BaseFitter: fitted with 3 subjects, x: 12.30, y: 42.00>'
base_fitter.data = None
assert repr(base_fitter) == '<lifetimes.BaseFitter: x: 12.30, y: 42.00>'
def test_unload_params(self):
base_fitter = estimation.BaseFitter()
with pytest.raises(ValueError):
base_fitter._unload_params()
base_fitter.params_ = dict(x=12.3, y=42)
npt.assert_array_almost_equal([12.3, 42], base_fitter._unload_params('x', 'y'))
def test_save_load_model(self):
base_fitter = estimation.BaseFitter()
base_fitter.save_model(PATH_SAVE_MODEL)
assert os.path.exists(PATH_SAVE_MODEL) == True
base_fitter_saved = estimation.BaseFitter()
base_fitter_saved.load_model(PATH_SAVE_MODEL)
assert repr(base_fitter) == repr(base_fitter_saved)
os.remove(PATH_SAVE_MODEL)
class TestBetaGeoBetaBinomFitter():
@pytest.fixture()
def donations(self):
return load_donations()
def test_params_out_is_close_to_Hardie_paper(self, donations):
donations = donations
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
expected = np.array([1.204, 0.750, 0.657, 2.783])
npt.assert_array_almost_equal(expected, np.array(bbtf._unload_params('alpha','beta','gamma','delta')),
decimal=2)
def test_prob_alive_is_close_to_Hardie_paper_table_6(self, donations):
"""Table 6: P(Alive in 2002) as a Function of Recency and Frequency"""
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
bbtf.data['prob_alive'] = bbtf.conditional_probability_alive(1, donations['frequency'], donations['recency'], donations['periods'])
# Expected probabilities for last year 1995-0 repeat, 1999-2 repeat, 2001-6 repeat
expected = np.array([0.11, 0.59, 0.93])
prob_list = np.zeros(3)
prob_list[0] = (bbtf.data[(bbtf.data['frequency'] == 0) & (bbtf.data['recency'] == 0)]['prob_alive'])
prob_list[1] = (bbtf.data[(bbtf.data['frequency'] == 2) & (bbtf.data['recency'] == 4)]['prob_alive'])
prob_list[2] = (bbtf.data[(bbtf.data['frequency'] == 6) & (bbtf.data['recency'] == 6)]['prob_alive'])
npt.assert_array_almost_equal(expected, prob_list, decimal=2)
def test_conditional_expectation_returns_same_value_as_Hardie_excel_sheet(self, donations):
"""
Total from Hardie's Conditional Expectations (II) sheet.
http://brucehardie.com/notes/010/BGBB_2011-01-20_XLSX.zip
"""
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
pred_purchases = bbtf.conditional_expected_number_of_purchases_up_to_time(5, donations['frequency'], donations['recency'], donations['periods']) * donations['weights']
expected = 12884.2 # Sum of column F Exp Tot
npt.assert_almost_equal(expected, pred_purchases.sum(), decimal=0)
def test_expected_purchases_in_n_periods_returns_same_value_as_Hardie_excel_sheet(self, donations):
"""Total expected from Hardie's In-Sample Fit sheet."""
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
expected = np.array([3454.9, 1253.1]) # Cells C18 and C24
estimated = bbtf.expected_number_of_transactions_in_first_n_periods(6).loc[[0,6]].values.flatten()
npt.assert_almost_equal(expected, estimated, decimal=0)
def test_fit_with_index(self, donations):
bbtf = estimation.BetaGeoBetaBinomFitter()
index = range(len(donations), 0, -1)
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
index=index
)
assert (bbtf.data.index == index).all() == True
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
index=None
)
assert (bbtf.data.index == index).all() == False
def test_fit_with_and_without_weights(self, donations):
exploded_dataset = pd.DataFrame(columns=['frequency', 'recency', 'periods'])
for _, row in donations.iterrows():
exploded_dataset = exploded_dataset.append(
pd.DataFrame(
[[row['frequency'], row['recency'], row['periods']]] * row['weights'],
columns = ['frequency', 'recency', 'periods']
))
exploded_dataset = exploded_dataset.astype(np.int64)
assert exploded_dataset.shape[0] == donations['weights'].sum()
bbtf_noweights = estimation.BetaGeoBetaBinomFitter()
bbtf_noweights.fit(
exploded_dataset['frequency'],
exploded_dataset['recency'],
exploded_dataset['periods'],
)
bbtf = estimation.BetaGeoBetaBinomFitter()
bbtf.fit(
donations['frequency'],
donations['recency'],
donations['periods'],
donations['weights'],
)
npt.assert_array_almost_equal(
np.array(bbtf_noweights._unload_params('alpha','beta','gamma','delta')),
np.array(bbtf._unload_params('alpha','beta','gamma','delta')),
decimal=4
)
class TestGammaGammaFitter():
@pytest.fixture()
def cdnow_customers_with_monetary_value(self):
return load_cdnow_summary_data_with_monetary_value()
def test_params_out_is_close_to_Hardie_paper(self, cdnow_customers_with_monetary_value):
returning_cdnow_customers_with_monetary_value = cdnow_customers_with_monetary_value[
cdnow_customers_with_monetary_value['frequency'] > 0
]
ggf = estimation.GammaGammaFitter()
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=3
)
expected = np.array([6.25, 3.74, 15.44])
npt.assert_array_almost_equal(expected, np.array(ggf._unload_params('p', 'q', 'v')), decimal=2)
def test_conditional_expected_average_profit(self, cdnow_customers_with_monetary_value):
ggf = estimation.GammaGammaFitter()
ggf.params_ = OrderedDict({'p':6.25, 'q':3.74, 'v':15.44})
summary = cdnow_customers_with_monetary_value.head(10)
estimates = ggf.conditional_expected_average_profit(summary['frequency'], summary['monetary_value'])
expected = np.array([24.65, 18.91, 35.17, 35.17, 35.17, 71.46, 18.91, 35.17, 27.28, 35.17]) # from Hardie spreadsheet http://brucehardie.com/notes/025/
npt.assert_allclose(estimates.values, expected, atol=0.1)
def test_customer_lifetime_value_with_bgf(self, cdnow_customers_with_monetary_value):
ggf = estimation.GammaGammaFitter()
ggf.params_ = OrderedDict({'p':6.25, 'q':3.74, 'v':15.44})
bgf = estimation.BetaGeoFitter()
bgf.fit(cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['recency'],
cdnow_customers_with_monetary_value['T'],
iterative_fitting=3)
ggf_clv = ggf.customer_lifetime_value(
bgf,
cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['recency'],
cdnow_customers_with_monetary_value['T'],
cdnow_customers_with_monetary_value['monetary_value']
)
utils_clv = utils._customer_lifetime_value(
bgf,
cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['recency'],
cdnow_customers_with_monetary_value['T'],
ggf.conditional_expected_average_profit(cdnow_customers_with_monetary_value['frequency'],
cdnow_customers_with_monetary_value['monetary_value'])
)
npt.assert_equal(ggf_clv.values, utils_clv.values)
def test_fit_with_index(self, cdnow_customers_with_monetary_value):
returning_cdnow_customers_with_monetary_value = cdnow_customers_with_monetary_value[
cdnow_customers_with_monetary_value['frequency'] > 0
]
ggf = estimation.GammaGammaFitter()
index = range(len(returning_cdnow_customers_with_monetary_value), 0, -1)
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=1,
index=index
)
assert (ggf.data.index == index).all() == True
ggf = estimation.GammaGammaFitter()
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=1,
index=None
)
assert (ggf.data.index == index).all() == False
def test_params_out_is_close_to_Hardie_paper_with_q_constraint(self, cdnow_customers_with_monetary_value):
returning_cdnow_customers_with_monetary_value = cdnow_customers_with_monetary_value[
cdnow_customers_with_monetary_value['frequency'] > 0
]
ggf = estimation.GammaGammaFitter()
ggf.fit(
returning_cdnow_customers_with_monetary_value['frequency'],
returning_cdnow_customers_with_monetary_value['monetary_value'],
iterative_fitting=3,
q_constraint=True
)
expected = np.array([6.25, 3.74, 15.44])
npt.assert_array_almost_equal(expected, np.array(ggf._unload_params('p', 'q', 'v')), decimal=2)
def test_negative_log_likelihood_is_inf_when_q_constraint_true_and_q_lt_one(self):
frequency = 25
avg_monetary_value = 100
ggf = estimation.GammaGammaFitter()
assert np.isinf(ggf._negative_log_likelihood([6.25, -3.75, 15.44], frequency, avg_monetary_value, q_constraint=True))
class TestParetoNBDFitter():
def test_overflow_error(self):
ptf = estimation.ParetoNBDFitter()
params = np.array([10.465, 7.98565181e-03, 3.0516, 2.820])
freq = np.array([400., 500., 500.])
rec = np.array([5., 1., 4.])
age = np.array([6., 37., 37.])
assert all([r < 0 and not np.isinf(r) and not pd.isnull(r)
for r in ptf._log_A_0(params, freq, rec, age)])
def test_sum_of_scalar_inputs_to_negative_log_likelihood_is_equal_to_array(self):
ptf = estimation.ParetoNBDFitter
x = np.array([1, 3])
t_x = np.array([2, 2])
weights = np.array([1., 1.])
t = np.array([5, 6])
params = [1, 1, 1, 1]
assert ptf()._negative_log_likelihood(params, np.array([x[0]]), np.array([t_x[0]]), np.array([t[0]]), weights[0], 0) \
+ ptf()._negative_log_likelihood(params, np.array([x[1]]), np.array([t_x[1]]), np.array([t[1]]), weights[0], 0) \
== 2 * ptf()._negative_log_likelihood(params, x, t_x, t, weights, 0)
def test_params_out_is_close_to_Hardie_paper(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
expected = np.array([ 0.553, 10.578, 0.606, 11.669])
npt.assert_array_almost_equal(expected, np.array(ptf._unload_params('r', 'alpha', 's', 'beta')), decimal=2)
def test_expectation_returns_same_value_as_R_BTYD(self, cdnow_customers):
""" From https://cran.r-project.org/web/packages/BTYD/BTYD.pdf """
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], tol=1e-6)
expected = np.array([0.00000000, 0.05077821, 0.09916088, 0.14542507, 0.18979930,
0.23247466, 0.27361274, 0.31335159, 0.35181024, 0.38909211])
actual = ptf.expected_number_of_purchases_up_to_time(range(10))
npt.assert_allclose(expected, actual, atol=0.01)
def test_conditional_expectation_returns_same_value_as_R_BTYD(self, cdnow_customers):
""" From https://cran.r-project.org/web/packages/BTYD/vignettes/BTYD-walkthrough.pdf """
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
x = 26.00
t_x = 30.86
T = 31
t = 52
expected = 25.46
actual = ptf.conditional_expected_number_of_purchases_up_to_time(t, x, t_x, T)
assert abs(expected - actual) < 0.01
def test_conditional_expectation_underflow(self):
""" Test a pair of inputs for the ParetoNBD ptf.conditional_expected_number_of_purchases_up_to_time().
For a small change in the input, the result shouldn't change dramatically -- however, if the
function doesn't guard against numeric underflow, this change in input will result in an
underflow error.
"""
ptf = estimation.ParetoNBDFitter()
alpha = 10.58
beta = 11.67
r = 0.55
s = 0.61
ptf.params_ = OrderedDict({'alpha':alpha, 'beta':beta, 'r':r, 's':s})
# small change in inputs
left = ptf.conditional_expected_number_of_purchases_up_to_time(10, 132, 200, 200) # 6.2060517889632418
right = ptf.conditional_expected_number_of_purchases_up_to_time(10, 133, 200, 200) # 6.2528722475748113
assert abs(left - right) < 0.05
def test_conditional_probability_alive_is_between_0_and_1(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for freq in np.arange(0, 100, 10.):
for recency in np.arange(0, 100, 10.):
for t in np.arange(recency, 100, 10.):
assert 0.0 <= ptf.conditional_probability_alive(freq, recency, t) <= 1.0
def test_conditional_probability_alive(self, cdnow_customers):
"""
Target taken from page 8,
https://cran.r-project.org/web/packages/BTYD/vignettes/BTYD-walkthrough.pdf
"""
ptf = estimation.ParetoNBDFitter()
ptf.params_ = OrderedDict(
zip(['r', 'alpha', 's', 'beta'],
[0.5534, 10.5802, 0.6061, 11.6562]))
p_alive = ptf.conditional_probability_alive(26.00, 30.86, 31.00)
assert abs(p_alive - 0.9979) < 0.001
def test_conditional_probability_alive_overflow_error(self):
ptf = estimation.ParetoNBDFitter()
ptf.params_ = OrderedDict(
zip(['r', 'alpha', 's', 'beta'],
[10.465, 7.98565181e-03, 3.0516, 2.820]))
freq = np.array([40., 50., 50.])
rec = np.array([5., 1., 4.])
age = np.array([6., 37., 37.])
assert all([r <= 1 and r >= 0 and not np.isinf(r) and not pd.isnull(r)
for r in ptf.conditional_probability_alive(freq, rec, age)])
def test_conditional_probability_alive_matrix(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
Z = ptf.conditional_probability_alive_matrix()
max_t = int(ptf.data['T'].max())
for t_x in range(Z.shape[0]):
for x in range(Z.shape[1]):
assert Z[t_x][x] == ptf.conditional_probability_alive(x, t_x, max_t)
def test_fit_with_index(self, cdnow_customers):
ptf = estimation.ParetoNBDFitter()
index = range(len(cdnow_customers), 0, -1)
ptf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=index
)
assert (ptf.data.index == index).all() == True
ptf = estimation.ParetoNBDFitter()
ptf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
assert (ptf.data.index == index).all() == False
def test_conditional_probability_of_n_purchases_up_to_time_is_between_0_and_1(self, cdnow_customers):
"""
Due to the large parameter space we take a random subset.
"""
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for freq in np.random.choice(100, 5):
for recency in np.random.choice(100, 5):
for age in recency + np.random.choice(100, 5):
for t in np.random.choice(100, 5):
for n in np.random.choice(10, 5):
assert (
0.0
<= ptf.conditional_probability_of_n_purchases_up_to_time(n, t, freq, recency, age)
<= 1.0
)
def test_conditional_probability_of_n_purchases_up_to_time_adds_up_to_1(self, cdnow_customers):
"""
Due to the large parameter space we take a random subset. We also restrict our limits to keep the number of
values of n for which the probability needs to be calculated to a sane level.
"""
ptf = estimation.ParetoNBDFitter()
ptf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for freq in np.random.choice(10, 5):
for recency in np.random.choice(9, 5):
for age in np.random.choice(np.arange(recency, 10, 1), 5):
for t in 1 + np.random.choice(9, 5):
npt.assert_almost_equal(
np.sum([
ptf.conditional_probability_of_n_purchases_up_to_time(n, t, freq, recency, age)
for n in np.arange(0, 20, 1)
]),
1.0,
decimal=2
)
def test_fit_with_and_without_weights(self, cdnow_customers):
original_dataset_with_weights = cdnow_customers.copy()
original_dataset_with_weights = original_dataset_with_weights.groupby(['frequency', 'recency', 'T']).size()
original_dataset_with_weights = original_dataset_with_weights.reset_index()
original_dataset_with_weights = original_dataset_with_weights.rename(columns={0:'weights'})
pnbd_noweights = estimation.ParetoNBDFitter()
pnbd_noweights.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
)
pnbd = estimation.ParetoNBDFitter()
pnbd.fit(
original_dataset_with_weights['frequency'],
original_dataset_with_weights['recency'],
original_dataset_with_weights['T'],
original_dataset_with_weights['weights'],
)
npt.assert_array_almost_equal(
np.array(pnbd_noweights._unload_params('r', 'alpha', 's', 'beta')),
np.array(pnbd._unload_params('r', 'alpha', 's', 'beta')),
decimal=2
)
class TestBetaGeoFitter():
def test_sum_of_scalar_inputs_to_negative_log_likelihood_is_equal_to_array(self):
bgf = estimation.BetaGeoFitter
x = np.array([1, 3])
t_x = np.array([2, 2])
t = np.array([5, 6])
weights = np.array([1])
params = [1, 1, 1, 1]
assert bgf._negative_log_likelihood(params, x[0], np.array([t_x[0]]), np.array([t[0]]), weights[0], 0) \
+ bgf._negative_log_likelihood(params, x[1], np.array([t_x[1]]), np.array([t[1]]), weights[0], 0) \
== 2 * bgf._negative_log_likelihood(params, x, t_x, t, weights, 0)
def test_params_out_is_close_to_Hardie_paper(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
expected = np.array([0.243, 4.414, 0.793, 2.426])
npt.assert_array_almost_equal(expected, np.array(bfg._unload_params('r', 'alpha', 'a', 'b')), decimal=3)
def test_conditional_expectation_returns_same_value_as_Hardie_excel_sheet(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
x = 2
t_x = 30.43
T = 38.86
t = 39
expected = 1.226
actual = bfg.conditional_expected_number_of_purchases_up_to_time(t, x, t_x, T)
assert abs(expected - actual) < 0.001
def test_expectation_returns_same_value_Hardie_excel_sheet(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], tol=1e-6)
times = np.array([0.1429, 1.0, 3.00, 31.8571, 32.00, 78.00])
expected = np.array([0.0078 ,0.0532 ,0.1506 ,1.0405,1.0437, 1.8576])
actual = bfg.expected_number_of_purchases_up_to_time(times)
npt.assert_array_almost_equal(actual, expected, decimal=3)
def test_conditional_probability_alive_returns_1_if_no_repeat_purchases(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
assert bfg.conditional_probability_alive(0, 1, 1) == 1.0
def test_conditional_probability_alive_is_between_0_and_1(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for i in range(0, 100, 10):
for j in range(0, 100, 10):
for k in range(j, 100, 10):
assert 0 <= bfg.conditional_probability_alive(i, j, k) <= 1.0
def test_fit_method_allows_for_better_accuracy_by_using_iterative_fitting(self, cdnow_customers):
bfg1 = estimation.BetaGeoFitter()
bfg2 = estimation.BetaGeoFitter()
np.random.seed(0)
bfg1.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
np.random.seed(0)
bfg2.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
assert bfg1._negative_log_likelihood_ >= bfg2._negative_log_likelihood_
def test_penalizer_term_will_shrink_coefs_to_0(self, cdnow_customers):
bfg_no_penalizer = estimation.BetaGeoFitter()
bfg_no_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_1 = np.array(list(bfg_no_penalizer.params_.values()))
bfg_with_penalizer = estimation.BetaGeoFitter(penalizer_coef=0.1)
bfg_with_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_2 = np.array(list(bfg_with_penalizer.params_.values()))
assert np.all(params_2 < params_1)
bfg_with_more_penalizer = estimation.BetaGeoFitter(penalizer_coef=10)
bfg_with_more_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_3 = np.array(list(bfg_with_more_penalizer.params_.values()))
assert np.all(params_3 < params_2)
def test_conditional_probability_alive_matrix(self, cdnow_customers):
bfg = estimation.BetaGeoFitter()
bfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
Z = bfg.conditional_probability_alive_matrix()
max_t = int(bfg.data['T'].max())
assert Z[0][0] == 1
for t_x in range(Z.shape[0]):
for x in range(Z.shape[1]):
assert Z[t_x][x] == bfg.conditional_probability_alive(x, t_x, max_t)
def test_probability_of_n_purchases_up_to_time_same_as_R_BTYD(self):
""" See https://cran.r-project.org/web/packages/BTYD/BTYD.pdf """
bgf = estimation.BetaGeoFitter()
bgf.params_ = OrderedDict({'r':0.243, 'alpha':4.414, 'a':0.793, 'b':2.426})
# probability that a customer will make 10 repeat transactions in the
# time interval (0,2]
expected = 1.07869e-07
actual = bgf.probability_of_n_purchases_up_to_time(2,10)
assert abs(expected - actual) < 10e-5
# probability that a customer will make no repeat transactions in the
# time interval (0,39]
expected = 0.5737864
actual = bgf.probability_of_n_purchases_up_to_time(39,0)
assert abs(expected - actual) < 10e-5
# PMF
expected = np.array([0.0019995214, 0.0015170236, 0.0011633150, 0.0009003148, 0.0007023638,
0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.0002222260])
actual = np.array([bgf.probability_of_n_purchases_up_to_time(30,n) for n in range(11,21)])
npt.assert_array_almost_equal(expected, actual, decimal=5)
def test_scaling_inputs_gives_same_or_similar_results(self, cdnow_customers):
bgf = estimation.BetaGeoFitter()
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
scale = 10
bgf_with_large_inputs = estimation.BetaGeoFitter()
bgf_with_large_inputs.fit(cdnow_customers['frequency'], scale * cdnow_customers['recency'], scale * cdnow_customers['T'], iterative_fitting=2)
assert bgf_with_large_inputs._scale < 1.
assert abs(bgf_with_large_inputs.conditional_probability_alive(1, scale * 1, scale * 2) - bgf.conditional_probability_alive(1, 1, 2)) < 10e-5
assert abs(bgf_with_large_inputs.conditional_probability_alive(1, scale * 2, scale * 10) - bgf.conditional_probability_alive(1, 2, 10)) < 10e-5
def test_save_load_bgnbd(self, cdnow_customers):
"""Test saving and loading model for BG/NBD."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL)
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert (bgf_new.__dict__['data'] == bgf.__dict__['data']).all().all()
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_save_load_bgnbd_no_data(self, cdnow_customers):
"""Test saving and loading model for BG/NBD without data."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL, save_data=False)
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
assert bgf_new.__dict__['data'] is None
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_save_load_bgnbd_no_generate_data(self, cdnow_customers):
"""Test saving and loading model for BG/NBD without generate_new_data method."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL, save_generate_data_method=False)
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
assert bgf_new.__dict__['generate_new_data'] is None
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_save_load_bgnbd_no_data_replace_with_empty_str(self, cdnow_customers):
"""Test saving and loading model for BG/NBD without data with replaced value empty str."""
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
bgf.save_model(PATH_SAVE_BGNBD_MODEL, save_data=False, values_to_save=[''])
bgf_new = estimation.BetaGeoFitter()
bgf_new.load_model(PATH_SAVE_BGNBD_MODEL)
assert bgf_new.__dict__['penalizer_coef'] == bgf.__dict__['penalizer_coef']
assert bgf_new.__dict__['_scale'] == bgf.__dict__['_scale']
assert bgf_new.__dict__['params_'] == bgf.__dict__['params_']
assert bgf_new.__dict__['_negative_log_likelihood_'] == bgf.__dict__['_negative_log_likelihood_']
assert bgf_new.__dict__['predict'](1, 1, 2, 5) == bgf.__dict__['predict'](1, 1, 2, 5)
assert bgf_new.expected_number_of_purchases_up_to_time(1) == bgf.expected_number_of_purchases_up_to_time(1)
assert bgf_new.__dict__['data'] is ''
# remove saved model
os.remove(PATH_SAVE_BGNBD_MODEL)
def test_fit_with_index(self, cdnow_customers):
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
index = range(len(cdnow_customers), 0, -1)
bgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=index
)
assert (bgf.data.index == index).all() == True
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
assert (bgf.data.index == index).all() == False
def test_no_runtime_warnings_high_frequency(self, cdnow_customers):
old_settings = np.seterr(all='raise')
bgf = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
p_alive = bgf.conditional_probability_alive(frequency=1000, recency=10, T=100)
np.seterr(**old_settings)
assert p_alive == 0.
def test_using_weights_col_gives_correct_results(self, cdnow_customers):
cdnow_customers_weights = cdnow_customers.copy()
cdnow_customers_weights['weights'] = 1.0
cdnow_customers_weights = cdnow_customers_weights.groupby(['frequency', 'recency', 'T'])['weights'].sum()
cdnow_customers_weights = cdnow_customers_weights.reset_index()
assert (cdnow_customers_weights['weights'] > 1).any()
bgf_weights = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf_weights.fit(
cdnow_customers_weights['frequency'],
cdnow_customers_weights['recency'],
cdnow_customers_weights['T'],
weights=cdnow_customers_weights['weights']
)
bgf_no_weights = estimation.BetaGeoFitter(penalizer_coef=0.0)
bgf_no_weights.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T']
)
npt.assert_almost_equal(
np.array(bgf_no_weights._unload_params('r', 'alpha', 'a', 'b')),
np.array(bgf_weights._unload_params('r', 'alpha', 'a', 'b')),
decimal=4)
class TestModifiedBetaGammaFitter():
def test_sum_of_scalar_inputs_to_negative_log_likelihood_is_equal_to_array(self):
mbgf = estimation.ModifiedBetaGeoFitter
x = np.array([1, 3])
t_x = np.array([2, 2])
t = np.array([5, 6])
weights=np.array([1, 1])
params = [1, 1, 1, 1]
assert mbgf._negative_log_likelihood(params, np.array([x[0]]), np.array([t_x[0]]), np.array([t[0]]), weights[0], 0) \
+ mbgf._negative_log_likelihood(params, np.array([x[1]]), np.array([t_x[1]]), np.array([t[1]]), weights[0], 0) \
== 2 * mbgf._negative_log_likelihood(params, x, t_x, t, weights, 0)
def test_params_out_is_close_to_BTYDplus(self, cdnow_customers):
""" See https://github.com/mplatzer/BTYDplus """
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
expected = np.array([0.525, 6.183, 0.891, 1.614])
npt.assert_array_almost_equal(expected, np.array(mbfg._unload_params('r', 'alpha', 'a', 'b')), decimal=3)
def test_conditional_expectation_returns_same_value_as_Hardie_excel_sheet(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
x = 2
t_x = 30.43
T = 38.86
t = 39
expected = 1.226
actual = mbfg.conditional_expected_number_of_purchases_up_to_time(t, x, t_x, T)
assert abs(expected - actual) < 0.05
def test_expectation_returns_same_value_Hardie_excel_sheet(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], tol=1e-6, iterative_fitting=3)
times = np.array([0.1429, 1.0, 3.00, 31.8571, 32.00, 78.00])
expected = np.array([0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576])
actual = mbfg.expected_number_of_purchases_up_to_time(times)
npt.assert_allclose(actual, expected, rtol=0.05)
def test_conditional_probability_alive_returns_lessthan_1_if_no_repeat_purchases(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
assert mbfg.conditional_probability_alive(0, 1, 1) < 1.0
def test_conditional_probability_alive_is_between_0_and_1(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
for i in range(0, 100, 10):
for j in range(0, 100, 10):
for k in range(j, 100, 10):
assert 0 <= mbfg.conditional_probability_alive(i, j, k) <= 1.0
def test_fit_method_allows_for_better_accuracy_by_using_iterative_fitting(self, cdnow_customers):
mbfg1 = estimation.ModifiedBetaGeoFitter()
mbfg2 = estimation.ModifiedBetaGeoFitter()
np.random.seed(0)
mbfg1.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
np.random.seed(0)
mbfg2.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=5)
assert mbfg1._negative_log_likelihood_ >= mbfg2._negative_log_likelihood_
def test_penalizer_term_will_shrink_coefs_to_0(self, cdnow_customers):
mbfg_no_penalizer = estimation.ModifiedBetaGeoFitter()
mbfg_no_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
params_1 = np.array(list(mbfg_no_penalizer.params_.values()))
mbfg_with_penalizer = estimation.ModifiedBetaGeoFitter(penalizer_coef=0.1)
mbfg_with_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=3)
params_2 = np.array(list(mbfg_with_penalizer.params_.values()))
assert params_2.sum() < params_1.sum()
mbfg_with_more_penalizer = estimation.ModifiedBetaGeoFitter(penalizer_coef=1.)
mbfg_with_more_penalizer.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'], iterative_fitting=5)
params_3 = np.array(list(mbfg_with_more_penalizer.params_.values()))
assert params_3.sum() < params_2.sum()
def test_conditional_probability_alive_matrix(self, cdnow_customers):
mbfg = estimation.ModifiedBetaGeoFitter()
mbfg.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
Z = mbfg.conditional_probability_alive_matrix()
max_t = int(mbfg.data['T'].max())
for t_x in range(Z.shape[0]):
for x in range(Z.shape[1]):
assert Z[t_x][x] == mbfg.conditional_probability_alive(x, t_x, max_t)
def test_probability_of_n_purchases_up_to_time_same_as_R_BTYD(self):
""" See https://cran.r-project.org/web/packages/BTYD/BTYD.pdf """
mbgf = estimation.ModifiedBetaGeoFitter()
mbgf.params_ = OrderedDict({'r':0.243, 'alpha':4.414, 'a':0.793, 'b':2.426})
# probability that a customer will make 10 repeat transactions in the
# time interval (0,2]
expected = 1.07869e-07
actual = mbgf.probability_of_n_purchases_up_to_time(2, 10)
assert abs(expected - actual) < 10e-5
# PMF
expected = np.array([0.0019995214, 0.0015170236, 0.0011633150, 0.0009003148, 0.0007023638,
0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.0002222260])
actual = np.array([mbgf.probability_of_n_purchases_up_to_time(30, n) for n in range(11, 21)])
npt.assert_allclose(expected, actual, rtol=0.5)
def test_scaling_inputs_gives_same_or_similar_results(self, cdnow_customers):
mbgf = estimation.ModifiedBetaGeoFitter()
mbgf.fit(cdnow_customers['frequency'], cdnow_customers['recency'], cdnow_customers['T'])
scale = 10.
mbgf_with_large_inputs = estimation.ModifiedBetaGeoFitter()
mbgf_with_large_inputs.fit(cdnow_customers['frequency'], scale * cdnow_customers['recency'], scale * cdnow_customers['T'], iterative_fitting=2)
assert mbgf_with_large_inputs._scale < 1.
assert abs(mbgf_with_large_inputs.conditional_probability_alive(1, scale * 1, scale * 2) - mbgf.conditional_probability_alive(1, 1, 2)) < 10e-2
assert abs(mbgf_with_large_inputs.conditional_probability_alive(1, scale * 2, scale * 10) - mbgf.conditional_probability_alive(1, 2, 10)) < 10e-2
def test_mgbf_does_not_hang_for_small_datasets_but_can_be_improved_with_iterative_fitting(self, cdnow_customers):
reduced_dataset = cdnow_customers.iloc[:2]
mbfg1 = estimation.ModifiedBetaGeoFitter()
mbfg2 = estimation.ModifiedBetaGeoFitter()
np.random.seed(0)
mbfg1.fit(reduced_dataset['frequency'], reduced_dataset['recency'], reduced_dataset['T'])
np.random.seed(0)
mbfg2.fit(reduced_dataset['frequency'], reduced_dataset['recency'], reduced_dataset['T'], iterative_fitting=10)
assert mbfg1._negative_log_likelihood_ >= mbfg2._negative_log_likelihood_
def test_purchase_predictions_do_not_differ_much_if_looking_at_hourly_or_daily_frequencies(self):
transaction_data = load_transaction_data(parse_dates=['date'])
daily_summary = utils.summary_data_from_transaction_data(transaction_data, 'id', 'date', observation_period_end=max(transaction_data.date), freq='D')
hourly_summary = utils.summary_data_from_transaction_data(transaction_data, 'id', 'date', observation_period_end=max(transaction_data.date), freq='h')
thirty_days = 30
hours_in_day = 24
mbfg = estimation.ModifiedBetaGeoFitter()
np.random.seed(0)
mbfg.fit(daily_summary['frequency'], daily_summary['recency'], daily_summary['T'])
thirty_day_prediction_from_daily_data = mbfg.expected_number_of_purchases_up_to_time(thirty_days)
np.random.seed(0)
mbfg.fit(hourly_summary['frequency'], hourly_summary['recency'], hourly_summary['T'])
thirty_day_prediction_from_hourly_data = mbfg.expected_number_of_purchases_up_to_time(thirty_days * hours_in_day)
npt.assert_almost_equal(thirty_day_prediction_from_daily_data, thirty_day_prediction_from_hourly_data)
def test_fit_with_index(self, cdnow_customers):
mbgf = estimation.ModifiedBetaGeoFitter()
index = range(len(cdnow_customers), 0, -1)
mbgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=index
)
assert (mbgf.data.index == index).all() == True
mbgf = estimation.ModifiedBetaGeoFitter()
mbgf.fit(
cdnow_customers['frequency'],
cdnow_customers['recency'],
cdnow_customers['T'],
index=None
)
assert (mbgf.data.index == index).all() == False
|
[
"lifetimes.datasets.load_cdnow_summary",
"os.remove",
"numpy.random.seed",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"lifetimes.datasets.load_cdnow_summary_data_with_monetary_value",
"pandas.DataFrame",
"numpy.testing.assert_almost_equal",
"lifetimes.estimation.ModifiedBetaGeoFitter",
"os.path.exists",
"pytest.raises",
"numpy.testing.assert_equal",
"numpy.random.choice",
"numpy.testing.assert_allclose",
"lifetimes.estimation.GammaGammaFitter",
"lifetimes.datasets.load_donations",
"pytest.fixture",
"numpy.isinf",
"lifetimes.estimation.BaseFitter",
"lifetimes.datasets.load_transaction_data",
"numpy.all",
"numpy.seterr",
"numpy.zeros",
"lifetimes.estimation.BetaGeoBetaBinomFitter",
"lifetimes.estimation.BetaGeoFitter",
"pandas.isnull",
"numpy.array",
"collections.OrderedDict",
"lifetimes.estimation.ParetoNBDFitter"
] |
[((433, 453), 'lifetimes.datasets.load_cdnow_summary', 'load_cdnow_summary', ([], {}), '()\n', (451, 453), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((1745, 1761), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1759, 1761), False, 'import pytest\n'), ((6830, 6846), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (6844, 6846), False, 'import pytest\n'), ((614, 637), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (635, 637), True, 'import lifetimes.estimation as estimation\n'), ((775, 794), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (783, 794), True, 'import numpy as np\n'), ((1070, 1093), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (1091, 1093), True, 'import lifetimes.estimation as estimation\n'), ((1371, 1394), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (1392, 1394), True, 'import lifetimes.estimation as estimation\n'), ((1527, 1550), 'lifetimes.estimation.BaseFitter', 'estimation.BaseFitter', ([], {}), '()\n', (1548, 1550), True, 'import lifetimes.estimation as estimation\n'), ((1674, 1700), 'os.remove', 'os.remove', (['PATH_SAVE_MODEL'], {}), '(PATH_SAVE_MODEL)\n', (1683, 1700), False, 'import os\n'), ((1802, 1818), 'lifetimes.datasets.load_donations', 'load_donations', ([], {}), '()\n', (1816, 1818), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((1932, 1967), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (1965, 1967), True, 'import lifetimes.estimation as estimation\n'), ((2153, 2190), 'numpy.array', 'np.array', (['[1.204, 0.75, 0.657, 2.783]'], {}), '([1.204, 0.75, 0.657, 2.783])\n', (2161, 2190), True, 'import numpy as np\n'), ((2524, 2559), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (2557, 2559), True, 'import lifetimes.estimation as estimation\n'), ((2978, 3006), 'numpy.array', 'np.array', (['[0.11, 0.59, 0.93]'], {}), '([0.11, 0.59, 0.93])\n', (2986, 3006), True, 'import numpy as np\n'), ((3027, 3038), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3035, 3038), True, 'import numpy as np\n'), ((3377, 3438), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['expected', 'prob_list'], {'decimal': '(2)'}), '(expected, prob_list, decimal=2)\n', (3406, 3438), True, 'import numpy.testing as npt\n'), ((3707, 3742), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (3740, 3742), True, 'import lifetimes.estimation as estimation\n'), ((4398, 4433), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (4431, 4433), True, 'import lifetimes.estimation as estimation\n'), ((4619, 4645), 'numpy.array', 'np.array', (['[3454.9, 1253.1]'], {}), '([3454.9, 1253.1])\n', (4627, 4645), True, 'import numpy as np\n'), ((4781, 4836), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['expected', 'estimated'], {'decimal': '(0)'}), '(expected, estimated, decimal=0)\n', (4804, 4836), True, 'import numpy.testing as npt\n'), ((4900, 4935), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (4933, 4935), True, 'import lifetimes.estimation as estimation\n'), ((5243, 5278), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (5276, 5278), True, 'import lifetimes.estimation as estimation\n'), ((5615, 5672), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['frequency', 'recency', 'periods']"}), "(columns=['frequency', 'recency', 'periods'])\n", (5627, 5672), True, 'import pandas as pd\n'), ((6147, 6182), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (6180, 6182), True, 'import lifetimes.estimation as estimation\n'), ((6362, 6397), 'lifetimes.estimation.BetaGeoBetaBinomFitter', 'estimation.BetaGeoBetaBinomFitter', ([], {}), '()\n', (6395, 6397), True, 'import lifetimes.estimation as estimation\n'), ((6913, 6958), 'lifetimes.datasets.load_cdnow_summary_data_with_monetary_value', 'load_cdnow_summary_data_with_monetary_value', ([], {}), '()\n', (6956, 6958), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((7235, 7264), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (7262, 7264), True, 'import lifetimes.estimation as estimation\n'), ((7492, 7521), 'numpy.array', 'np.array', (['[6.25, 3.74, 15.44]'], {}), '([6.25, 3.74, 15.44])\n', (7500, 7521), True, 'import numpy as np\n'), ((7735, 7764), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (7762, 7764), True, 'import lifetimes.estimation as estimation\n'), ((7787, 7834), 'collections.OrderedDict', 'OrderedDict', (["{'p': 6.25, 'q': 3.74, 'v': 15.44}"], {}), "({'p': 6.25, 'q': 3.74, 'v': 15.44})\n", (7798, 7834), False, 'from collections import OrderedDict\n'), ((8024, 8109), 'numpy.array', 'np.array', (['[24.65, 18.91, 35.17, 35.17, 35.17, 71.46, 18.91, 35.17, 27.28, 35.17]'], {}), '([24.65, 18.91, 35.17, 35.17, 35.17, 71.46, 18.91, 35.17, 27.28, 35.17]\n )\n', (8032, 8109), True, 'import numpy as np\n'), ((8174, 8231), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['estimates.values', 'expected'], {'atol': '(0.1)'}), '(estimates.values, expected, atol=0.1)\n', (8193, 8231), True, 'import numpy.testing as npt\n'), ((8338, 8367), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (8365, 8367), True, 'import lifetimes.estimation as estimation\n'), ((8390, 8437), 'collections.OrderedDict', 'OrderedDict', (["{'p': 6.25, 'q': 3.74, 'v': 15.44}"], {}), "({'p': 6.25, 'q': 3.74, 'v': 15.44})\n", (8401, 8437), False, 'from collections import OrderedDict\n'), ((8450, 8476), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (8474, 8476), True, 'import lifetimes.estimation as estimation\n'), ((9536, 9586), 'numpy.testing.assert_equal', 'npt.assert_equal', (['ggf_clv.values', 'utils_clv.values'], {}), '(ggf_clv.values, utils_clv.values)\n', (9552, 9586), True, 'import numpy.testing as npt\n'), ((9843, 9872), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (9870, 9872), True, 'import lifetimes.estimation as estimation\n'), ((10257, 10286), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (10284, 10286), True, 'import lifetimes.estimation as estimation\n'), ((10869, 10898), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (10896, 10898), True, 'import lifetimes.estimation as estimation\n'), ((11157, 11186), 'numpy.array', 'np.array', (['[6.25, 3.74, 15.44]'], {}), '([6.25, 3.74, 15.44])\n', (11165, 11186), True, 'import numpy as np\n'), ((11449, 11478), 'lifetimes.estimation.GammaGammaFitter', 'estimation.GammaGammaFitter', ([], {}), '()\n', (11476, 11478), True, 'import lifetimes.estimation as estimation\n'), ((11688, 11716), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (11714, 11716), True, 'import lifetimes.estimation as estimation\n'), ((11734, 11781), 'numpy.array', 'np.array', (['[10.465, 0.00798565181, 3.0516, 2.82]'], {}), '([10.465, 0.00798565181, 3.0516, 2.82])\n', (11742, 11781), True, 'import numpy as np\n'), ((11799, 11830), 'numpy.array', 'np.array', (['[400.0, 500.0, 500.0]'], {}), '([400.0, 500.0, 500.0])\n', (11807, 11830), True, 'import numpy as np\n'), ((11842, 11867), 'numpy.array', 'np.array', (['[5.0, 1.0, 4.0]'], {}), '([5.0, 1.0, 4.0])\n', (11850, 11867), True, 'import numpy as np\n'), ((11879, 11906), 'numpy.array', 'np.array', (['[6.0, 37.0, 37.0]'], {}), '([6.0, 37.0, 37.0])\n', (11887, 11906), True, 'import numpy as np\n'), ((12179, 12195), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (12187, 12195), True, 'import numpy as np\n'), ((12210, 12226), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (12218, 12226), True, 'import numpy as np\n'), ((12245, 12265), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (12253, 12265), True, 'import numpy as np\n'), ((12276, 12292), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (12284, 12292), True, 'import numpy as np\n'), ((12745, 12773), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (12771, 12773), True, 'import lifetimes.estimation as estimation\n'), ((12910, 12950), 'numpy.array', 'np.array', (['[0.553, 10.578, 0.606, 11.669]'], {}), '([0.553, 10.578, 0.606, 11.669])\n', (12918, 12950), True, 'import numpy as np\n'), ((13236, 13264), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (13262, 13264), True, 'import lifetimes.estimation as estimation\n'), ((13391, 13518), 'numpy.array', 'np.array', (['[0.0, 0.05077821, 0.09916088, 0.14542507, 0.1897993, 0.23247466, 0.27361274,\n 0.31335159, 0.35181024, 0.38909211]'], {}), '([0.0, 0.05077821, 0.09916088, 0.14542507, 0.1897993, 0.23247466, \n 0.27361274, 0.31335159, 0.35181024, 0.38909211])\n', (13399, 13518), True, 'import numpy as np\n'), ((13614, 13662), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {'atol': '(0.01)'}), '(expected, actual, atol=0.01)\n', (13633, 13662), True, 'import numpy.testing as npt\n'), ((13865, 13893), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (13891, 13893), True, 'import lifetimes.estimation as estimation\n'), ((14643, 14671), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (14669, 14671), True, 'import lifetimes.estimation as estimation\n'), ((14771, 14830), 'collections.OrderedDict', 'OrderedDict', (["{'alpha': alpha, 'beta': beta, 'r': r, 's': s}"], {}), "({'alpha': alpha, 'beta': beta, 'r': r, 's': s})\n", (14782, 14830), False, 'from collections import OrderedDict\n'), ((15225, 15253), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (15251, 15253), True, 'import lifetimes.estimation as estimation\n'), ((15371, 15394), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10.0)'], {}), '(0, 100, 10.0)\n', (15380, 15394), True, 'import numpy as np\n'), ((15818, 15846), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (15844, 15846), True, 'import lifetimes.estimation as estimation\n'), ((16178, 16206), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (16204, 16206), True, 'import lifetimes.estimation as estimation\n'), ((16360, 16388), 'numpy.array', 'np.array', (['[40.0, 50.0, 50.0]'], {}), '([40.0, 50.0, 50.0])\n', (16368, 16388), True, 'import numpy as np\n'), ((16400, 16425), 'numpy.array', 'np.array', (['[5.0, 1.0, 4.0]'], {}), '([5.0, 1.0, 4.0])\n', (16408, 16425), True, 'import numpy as np\n'), ((16437, 16464), 'numpy.array', 'np.array', (['[6.0, 37.0, 37.0]'], {}), '([6.0, 37.0, 37.0])\n', (16445, 16464), True, 'import numpy as np\n'), ((16711, 16739), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (16737, 16739), True, 'import lifetimes.estimation as estimation\n'), ((17163, 17191), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (17189, 17191), True, 'import lifetimes.estimation as estimation\n'), ((17480, 17508), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (17506, 17508), True, 'import lifetimes.estimation as estimation\n'), ((17942, 17970), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (17968, 17970), True, 'import lifetimes.estimation as estimation\n'), ((18088, 18112), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18104, 18112), True, 'import numpy as np\n'), ((18941, 18969), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (18967, 18969), True, 'import lifetimes.estimation as estimation\n'), ((19087, 19110), 'numpy.random.choice', 'np.random.choice', (['(10)', '(5)'], {}), '(10, 5)\n', (19103, 19110), True, 'import numpy as np\n'), ((20140, 20168), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (20166, 20168), True, 'import lifetimes.estimation as estimation\n'), ((20339, 20367), 'lifetimes.estimation.ParetoNBDFitter', 'estimation.ParetoNBDFitter', ([], {}), '()\n', (20365, 20367), True, 'import lifetimes.estimation as estimation\n'), ((20992, 21008), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (21000, 21008), True, 'import numpy as np\n'), ((21023, 21039), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (21031, 21039), True, 'import numpy as np\n'), ((21052, 21068), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (21060, 21068), True, 'import numpy as np\n'), ((21087, 21100), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (21095, 21100), True, 'import numpy as np\n'), ((21523, 21549), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (21547, 21549), True, 'import lifetimes.estimation as estimation\n'), ((21686, 21724), 'numpy.array', 'np.array', (['[0.243, 4.414, 0.793, 2.426]'], {}), '([0.243, 4.414, 0.793, 2.426])\n', (21694, 21724), True, 'import numpy as np\n'), ((21955, 21981), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (21979, 21981), True, 'import lifetimes.estimation as estimation\n'), ((22405, 22431), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (22429, 22431), True, 'import lifetimes.estimation as estimation\n'), ((22555, 22604), 'numpy.array', 'np.array', (['[0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0]'], {}), '([0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0])\n', (22563, 22604), True, 'import numpy as np\n'), ((22627, 22685), 'numpy.array', 'np.array', (['[0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576]'], {}), '([0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576])\n', (22635, 22685), True, 'import numpy as np\n'), ((22761, 22819), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['actual', 'expected'], {'decimal': '(3)'}), '(actual, expected, decimal=3)\n', (22790, 22819), True, 'import numpy.testing as npt\n'), ((22935, 22961), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (22959, 22961), True, 'import lifetimes.estimation as estimation\n'), ((23226, 23252), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (23250, 23252), True, 'import lifetimes.estimation as estimation\n'), ((23671, 23697), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (23695, 23697), True, 'import lifetimes.estimation as estimation\n'), ((23713, 23739), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (23737, 23739), True, 'import lifetimes.estimation as estimation\n'), ((23749, 23766), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (23763, 23766), True, 'import numpy as np\n'), ((23873, 23890), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (23887, 23890), True, 'import numpy as np\n'), ((24193, 24219), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (24217, 24219), True, 'import lifetimes.estimation as estimation\n'), ((24428, 24472), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.1)'}), '(penalizer_coef=0.1)\n', (24452, 24472), True, 'import lifetimes.estimation as estimation\n'), ((24670, 24697), 'numpy.all', 'np.all', (['(params_2 < params_1)'], {}), '(params_2 < params_1)\n', (24676, 24697), True, 'import numpy as np\n'), ((24733, 24776), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(10)'}), '(penalizer_coef=10)\n', (24757, 24776), True, 'import lifetimes.estimation as estimation\n'), ((24984, 25011), 'numpy.all', 'np.all', (['(params_3 < params_2)'], {}), '(params_3 < params_2)\n', (24990, 25011), True, 'import numpy as np\n'), ((25102, 25128), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (25126, 25128), True, 'import lifetimes.estimation as estimation\n'), ((25676, 25702), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (25700, 25702), True, 'import lifetimes.estimation as estimation\n'), ((25725, 25790), 'collections.OrderedDict', 'OrderedDict', (["{'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426}"], {}), "({'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426})\n", (25736, 25790), False, 'from collections import OrderedDict\n'), ((26319, 26477), 'numpy.array', 'np.array', (['[0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, 0.0007023638, \n 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.000222226]'], {}), '([0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, \n 0.0007023638, 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, \n 0.000222226])\n', (26327, 26477), True, 'import numpy as np\n'), ((26606, 26664), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['expected', 'actual'], {'decimal': '(5)'}), '(expected, actual, decimal=5)\n', (26635, 26664), True, 'import numpy.testing as npt\n'), ((26762, 26788), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (26786, 26788), True, 'import lifetimes.estimation as estimation\n'), ((26936, 26962), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (26960, 26962), True, 'import lifetimes.estimation as estimation\n'), ((27590, 27634), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (27614, 27634), True, 'import lifetimes.estimation as estimation\n'), ((27796, 27822), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (27820, 27822), True, 'import lifetimes.estimation as estimation\n'), ((28526, 28558), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (28535, 28558), False, 'import os\n'), ((28704, 28748), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (28728, 28748), True, 'import lifetimes.estimation as estimation\n'), ((28927, 28953), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (28951, 28953), True, 'import lifetimes.estimation as estimation\n'), ((29628, 29660), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (29637, 29660), False, 'import os\n'), ((29835, 29879), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (29859, 29879), True, 'import lifetimes.estimation as estimation\n'), ((30074, 30100), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (30098, 30100), True, 'import lifetimes.estimation as estimation\n'), ((30788, 30820), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (30797, 30820), False, 'import os\n'), ((31019, 31063), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (31043, 31063), True, 'import lifetimes.estimation as estimation\n'), ((31263, 31289), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {}), '()\n', (31287, 31289), True, 'import lifetimes.estimation as estimation\n'), ((31962, 31994), 'os.remove', 'os.remove', (['PATH_SAVE_BGNBD_MODEL'], {}), '(PATH_SAVE_BGNBD_MODEL)\n', (31971, 31994), False, 'import os\n'), ((32062, 32106), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (32086, 32106), True, 'import lifetimes.estimation as estimation\n'), ((32395, 32439), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (32419, 32439), True, 'import lifetimes.estimation as estimation\n'), ((32758, 32780), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (32767, 32780), True, 'import numpy as np\n'), ((32795, 32839), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (32819, 32839), True, 'import lifetimes.estimation as estimation\n'), ((33102, 33127), 'numpy.seterr', 'np.seterr', ([], {}), '(**old_settings)\n', (33111, 33127), True, 'import numpy as np\n'), ((33612, 33656), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (33636, 33656), True, 'import lifetimes.estimation as estimation\n'), ((33914, 33958), 'lifetimes.estimation.BetaGeoFitter', 'estimation.BetaGeoFitter', ([], {'penalizer_coef': '(0.0)'}), '(penalizer_coef=0.0)\n', (33938, 33958), True, 'import lifetimes.estimation as estimation\n'), ((34501, 34517), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (34509, 34517), True, 'import numpy as np\n'), ((34532, 34548), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (34540, 34548), True, 'import numpy as np\n'), ((34561, 34577), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (34569, 34577), True, 'import numpy as np\n'), ((34594, 34610), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (34602, 34610), True, 'import numpy as np\n'), ((35114, 35148), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (35146, 35148), True, 'import lifetimes.estimation as estimation\n'), ((35286, 35324), 'numpy.array', 'np.array', (['[0.525, 6.183, 0.891, 1.614]'], {}), '([0.525, 6.183, 0.891, 1.614])\n', (35294, 35324), True, 'import numpy as np\n'), ((35557, 35591), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (35589, 35591), True, 'import lifetimes.estimation as estimation\n'), ((36017, 36051), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (36049, 36051), True, 'import lifetimes.estimation as estimation\n'), ((36197, 36246), 'numpy.array', 'np.array', (['[0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0]'], {}), '([0.1429, 1.0, 3.0, 31.8571, 32.0, 78.0])\n', (36205, 36246), True, 'import numpy as np\n'), ((36269, 36327), 'numpy.array', 'np.array', (['[0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576]'], {}), '([0.0078, 0.0532, 0.1506, 1.0405, 1.0437, 1.8576])\n', (36277, 36327), True, 'import numpy as np\n'), ((36405, 36453), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['actual', 'expected'], {'rtol': '(0.05)'}), '(actual, expected, rtol=0.05)\n', (36424, 36453), True, 'import numpy.testing as npt\n'), ((36579, 36613), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (36611, 36613), True, 'import lifetimes.estimation as estimation\n'), ((36880, 36914), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (36912, 36914), True, 'import lifetimes.estimation as estimation\n'), ((37335, 37369), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (37367, 37369), True, 'import lifetimes.estimation as estimation\n'), ((37386, 37420), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (37418, 37420), True, 'import lifetimes.estimation as estimation\n'), ((37430, 37447), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (37444, 37447), True, 'import numpy as np\n'), ((37555, 37572), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (37569, 37572), True, 'import numpy as np\n'), ((37878, 37912), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (37910, 37912), True, 'import lifetimes.estimation as estimation\n'), ((38124, 38176), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {'penalizer_coef': '(0.1)'}), '(penalizer_coef=0.1)\n', (38156, 38176), True, 'import lifetimes.estimation as estimation\n'), ((38465, 38517), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {'penalizer_coef': '(1.0)'}), '(penalizer_coef=1.0)\n', (38497, 38517), True, 'import lifetimes.estimation as estimation\n'), ((38869, 38903), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (38901, 38903), True, 'import lifetimes.estimation as estimation\n'), ((39427, 39461), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (39459, 39461), True, 'import lifetimes.estimation as estimation\n'), ((39485, 39550), 'collections.OrderedDict', 'OrderedDict', (["{'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426}"], {}), "({'r': 0.243, 'alpha': 4.414, 'a': 0.793, 'b': 2.426})\n", (39496, 39550), False, 'from collections import OrderedDict\n'), ((39832, 39990), 'numpy.array', 'np.array', (['[0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, 0.0007023638, \n 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.000222226]'], {}), '([0.0019995214, 0.0015170236, 0.001163315, 0.0009003148, \n 0.0007023638, 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, \n 0.000222226])\n', (39840, 39990), True, 'import numpy as np\n'), ((40122, 40169), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {'rtol': '(0.5)'}), '(expected, actual, rtol=0.5)\n', (40141, 40169), True, 'import numpy.testing as npt\n'), ((40268, 40302), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (40300, 40302), True, 'import lifetimes.estimation as estimation\n'), ((40453, 40487), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (40485, 40487), True, 'import lifetimes.estimation as estimation\n'), ((41183, 41217), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (41215, 41217), True, 'import lifetimes.estimation as estimation\n'), ((41234, 41268), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (41266, 41268), True, 'import lifetimes.estimation as estimation\n'), ((41278, 41295), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (41292, 41295), True, 'import numpy as np\n'), ((41403, 41420), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (41417, 41420), True, 'import numpy as np\n'), ((41753, 41796), 'lifetimes.datasets.load_transaction_data', 'load_transaction_data', ([], {'parse_dates': "['date']"}), "(parse_dates=['date'])\n", (41774, 41796), False, 'from lifetimes.datasets import load_cdnow_summary, load_cdnow_summary_data_with_monetary_value, load_donations, load_transaction_data\n'), ((42180, 42214), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (42212, 42214), True, 'import lifetimes.estimation as estimation\n'), ((42224, 42241), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (42238, 42241), True, 'import numpy as np\n'), ((42448, 42465), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (42462, 42465), True, 'import numpy as np\n'), ((42691, 42797), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['thirty_day_prediction_from_daily_data', 'thirty_day_prediction_from_hourly_data'], {}), '(thirty_day_prediction_from_daily_data,\n thirty_day_prediction_from_hourly_data)\n', (42714, 42797), True, 'import numpy.testing as npt\n'), ((42862, 42896), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (42894, 42896), True, 'import lifetimes.estimation as estimation\n'), ((43188, 43222), 'lifetimes.estimation.ModifiedBetaGeoFitter', 'estimation.ModifiedBetaGeoFitter', ([], {}), '()\n', (43220, 43222), True, 'import lifetimes.estimation as estimation\n'), ((1107, 1132), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1120, 1132), False, 'import pytest\n'), ((1458, 1489), 'os.path.exists', 'os.path.exists', (['PATH_SAVE_MODEL'], {}), '(PATH_SAVE_MODEL)\n', (1472, 1489), False, 'import os\n'), ((15422, 15445), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10.0)'], {}), '(0, 100, 10.0)\n', (15431, 15445), True, 'import numpy as np\n'), ((18141, 18165), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18157, 18165), True, 'import numpy as np\n'), ((19139, 19161), 'numpy.random.choice', 'np.random.choice', (['(9)', '(5)'], {}), '(9, 5)\n', (19155, 19161), True, 'import numpy as np\n'), ((5790, 5923), 'pandas.DataFrame', 'pd.DataFrame', (["([[row['frequency'], row['recency'], row['periods']]] * row['weights'])"], {'columns': "['frequency', 'recency', 'periods']"}), "([[row['frequency'], row['recency'], row['periods']]] * row[\n 'weights'], columns=['frequency', 'recency', 'periods'])\n", (5802, 5923), True, 'import pandas as pd\n'), ((15471, 15500), 'numpy.arange', 'np.arange', (['recency', '(100)', '(10.0)'], {}), '(recency, 100, 10.0)\n', (15480, 15500), True, 'import numpy as np\n'), ((12377, 12393), 'numpy.array', 'np.array', (['[x[0]]'], {}), '([x[0]])\n', (12385, 12393), True, 'import numpy as np\n'), ((12395, 12413), 'numpy.array', 'np.array', (['[t_x[0]]'], {}), '([t_x[0]])\n', (12403, 12413), True, 'import numpy as np\n'), ((12415, 12431), 'numpy.array', 'np.array', (['[t[0]]'], {}), '([t[0]])\n', (12423, 12431), True, 'import numpy as np\n'), ((12503, 12519), 'numpy.array', 'np.array', (['[x[1]]'], {}), '([x[1]])\n', (12511, 12519), True, 'import numpy as np\n'), ((12521, 12539), 'numpy.array', 'np.array', (['[t_x[1]]'], {}), '([t_x[1]])\n', (12529, 12539), True, 'import numpy as np\n'), ((12541, 12557), 'numpy.array', 'np.array', (['[t[1]]'], {}), '([t[1]])\n', (12549, 12557), True, 'import numpy as np\n'), ((18204, 18228), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18220, 18228), True, 'import numpy as np\n'), ((18259, 18283), 'numpy.random.choice', 'np.random.choice', (['(100)', '(5)'], {}), '(100, 5)\n', (18275, 18283), True, 'import numpy as np\n'), ((19207, 19232), 'numpy.arange', 'np.arange', (['recency', '(10)', '(1)'], {}), '(recency, 10, 1)\n', (19216, 19232), True, 'import numpy as np\n'), ((21189, 21207), 'numpy.array', 'np.array', (['[t_x[0]]'], {}), '([t_x[0]])\n', (21197, 21207), True, 'import numpy as np\n'), ((21209, 21225), 'numpy.array', 'np.array', (['[t[0]]'], {}), '([t[0]])\n', (21217, 21225), True, 'import numpy as np\n'), ((21301, 21319), 'numpy.array', 'np.array', (['[t_x[1]]'], {}), '([t_x[1]])\n', (21309, 21319), True, 'import numpy as np\n'), ((21321, 21337), 'numpy.array', 'np.array', (['[t[1]]'], {}), '([t[1]])\n', (21329, 21337), True, 'import numpy as np\n'), ((34694, 34710), 'numpy.array', 'np.array', (['[x[0]]'], {}), '([x[0]])\n', (34702, 34710), True, 'import numpy as np\n'), ((34712, 34730), 'numpy.array', 'np.array', (['[t_x[0]]'], {}), '([t_x[0]])\n', (34720, 34730), True, 'import numpy as np\n'), ((34732, 34748), 'numpy.array', 'np.array', (['[t[0]]'], {}), '([t[0]])\n', (34740, 34748), True, 'import numpy as np\n'), ((34819, 34835), 'numpy.array', 'np.array', (['[x[1]]'], {}), '([x[1]])\n', (34827, 34835), True, 'import numpy as np\n'), ((34837, 34855), 'numpy.array', 'np.array', (['[t_x[1]]'], {}), '([t_x[1]])\n', (34845, 34855), True, 'import numpy as np\n'), ((34857, 34873), 'numpy.array', 'np.array', (['[t[1]]'], {}), '([t[1]])\n', (34865, 34873), True, 'import numpy as np\n'), ((11938, 11949), 'numpy.isinf', 'np.isinf', (['r'], {}), '(r)\n', (11946, 11949), True, 'import numpy as np\n'), ((11958, 11970), 'pandas.isnull', 'pd.isnull', (['r'], {}), '(r)\n', (11967, 11970), True, 'import pandas as pd\n'), ((16508, 16519), 'numpy.isinf', 'np.isinf', (['r'], {}), '(r)\n', (16516, 16519), True, 'import numpy as np\n'), ((16528, 16540), 'pandas.isnull', 'pd.isnull', (['r'], {}), '(r)\n', (16537, 16540), True, 'import pandas as pd\n'), ((18318, 18341), 'numpy.random.choice', 'np.random.choice', (['(10)', '(5)'], {}), '(10, 5)\n', (18334, 18341), True, 'import numpy as np\n'), ((19271, 19293), 'numpy.random.choice', 'np.random.choice', (['(9)', '(5)'], {}), '(9, 5)\n', (19287, 19293), True, 'import numpy as np\n'), ((19534, 19553), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (19543, 19553), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright (C) ATHENA AUTHORS
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=too-few-public-methods, no-member, too-many-arguments, unused-argument
""" learning rate """
import tensorflow as tf
from ..utils.hparam import register_and_parse_hparams
class WarmUpLearningSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
""" WarmUp Learning rate schedule for Adam
Used as :
optimizer = tf.keras.optimizers.Adam(learning_rate = WarmUpLearningSchedule(512),
beta_1=0.9, beta_2=0.98, epsilon=1e-9)
Args :
model_dim is the something related to total model parameters
warmup_steps is the highest learning rate iters
Returns:
return the learning rate
Idea from the paper: Attention Is All You Need
"""
def __init__(self, model_dim=512, warmup_steps=4000, k=1.0,
decay_steps=99999999, decay_rate=1.0):
super().__init__()
self.model_dim = tf.cast(model_dim, tf.float32)
self.warmup_steps = warmup_steps
self.k = k
self.decay_steps = tf.cast(decay_steps, tf.float32)
self.decay_rate = tf.cast(decay_rate, tf.float32)
def __call__(self, step):
step = tf.cast(step, tf.float32)
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
k = self.k * tf.cast(self.decay_rate ** (step // self.decay_steps), tf.float32)
return k * tf.math.rsqrt(self.model_dim) * tf.math.minimum(arg1, arg2)
class WarmUpAdam(tf.keras.optimizers.Adam):
"""WarmUpAdam Implementation """
default_config = {
"d_model": 512,
"warmup_steps": 8000,
"k": 0.5,
"decay_steps": 100000,
"decay_rate": 1.0
}
def __init__(self, config=None, beta_1=0.9, beta_2=0.999, epsilon=1e-7,
amsgrad=False, name="WarmUpAdam", **kwargs):
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
super().__init__(
learning_rate=WarmUpLearningSchedule(
self.hparams.d_model,
self.hparams.warmup_steps,
self.hparams.k,
self.hparams.decay_steps,
self.hparams.decay_rate
),
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
)
class ExponentialDecayLearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
""" ExponentialDecayLearningRateSchedule
Used as :
optimizer = tf.keras.optimizers.Adam(
learning_rate = ExponentialDecayLearningRate(0.01, 100))
Args :
initial_lr, decay_steps
Returns:
initial_lr * (0.5 ** (step // decay_steps))
"""
def __init__(self, initial_lr=0.005, decay_steps=10000, decay_rate=0.5):
super().__init__()
self.initial_lr = initial_lr
self.decay_steps = tf.cast(decay_steps, tf.float32)
self.decay_rate = tf.cast(decay_rate, tf.float32)
def __call__(self, step):
step = tf.cast(step, tf.float32)
factor = tf.cast(self.decay_rate ** (step // self.decay_steps), tf.float32)
return self.initial_lr * factor
class ExponentialDecayAdam(tf.keras.optimizers.Adam):
"""WarmUpAdam Implementation """
default_config = {
"initial_lr": 0.005,
"decay_steps": 10000,
"decay_rate": 0.5
}
def __init__(self, config=None, beta_1=0.9, beta_2=0.999, epsilon=1e-7,
amsgrad=False, name="WarmUpAdam", **kwargs):
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
super().__init__(
learning_rate=ExponentialDecayLearningRateSchedule(
self.hparams.initial_lr,
self.hparams.decay_steps,
self.hparams.decay_rate
),
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
)
|
[
"tensorflow.cast",
"tensorflow.math.minimum",
"tensorflow.math.rsqrt"
] |
[((1604, 1634), 'tensorflow.cast', 'tf.cast', (['model_dim', 'tf.float32'], {}), '(model_dim, tf.float32)\n', (1611, 1634), True, 'import tensorflow as tf\n'), ((1722, 1754), 'tensorflow.cast', 'tf.cast', (['decay_steps', 'tf.float32'], {}), '(decay_steps, tf.float32)\n', (1729, 1754), True, 'import tensorflow as tf\n'), ((1781, 1812), 'tensorflow.cast', 'tf.cast', (['decay_rate', 'tf.float32'], {}), '(decay_rate, tf.float32)\n', (1788, 1812), True, 'import tensorflow as tf\n'), ((1859, 1884), 'tensorflow.cast', 'tf.cast', (['step', 'tf.float32'], {}), '(step, tf.float32)\n', (1866, 1884), True, 'import tensorflow as tf\n'), ((1900, 1919), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['step'], {}), '(step)\n', (1913, 1919), True, 'import tensorflow as tf\n'), ((3601, 3633), 'tensorflow.cast', 'tf.cast', (['decay_steps', 'tf.float32'], {}), '(decay_steps, tf.float32)\n', (3608, 3633), True, 'import tensorflow as tf\n'), ((3660, 3691), 'tensorflow.cast', 'tf.cast', (['decay_rate', 'tf.float32'], {}), '(decay_rate, tf.float32)\n', (3667, 3691), True, 'import tensorflow as tf\n'), ((3738, 3763), 'tensorflow.cast', 'tf.cast', (['step', 'tf.float32'], {}), '(step, tf.float32)\n', (3745, 3763), True, 'import tensorflow as tf\n'), ((3781, 3847), 'tensorflow.cast', 'tf.cast', (['(self.decay_rate ** (step // self.decay_steps))', 'tf.float32'], {}), '(self.decay_rate ** (step // self.decay_steps), tf.float32)\n', (3788, 3847), True, 'import tensorflow as tf\n'), ((1991, 2057), 'tensorflow.cast', 'tf.cast', (['(self.decay_rate ** (step // self.decay_steps))', 'tf.float32'], {}), '(self.decay_rate ** (step // self.decay_steps), tf.float32)\n', (1998, 2057), True, 'import tensorflow as tf\n'), ((2110, 2137), 'tensorflow.math.minimum', 'tf.math.minimum', (['arg1', 'arg2'], {}), '(arg1, arg2)\n', (2125, 2137), True, 'import tensorflow as tf\n'), ((2078, 2107), 'tensorflow.math.rsqrt', 'tf.math.rsqrt', (['self.model_dim'], {}), '(self.model_dim)\n', (2091, 2107), True, 'import tensorflow as tf\n')]
|
import setuptools
import versioneer
short_description = "A distributed compute and database platform for quantum chemistry."
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except FileNotFoundError:
long_description = short_description
if __name__ == "__main__":
setuptools.setup(
name="qcfractal",
description=short_description,
author="The QCArchive Development Team",
author_email="<EMAIL>",
url="https://github.com/molssi/qcfractal",
license="BSD-3C",
include_package_data=True,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=setuptools.find_packages(),
python_requires=">=3.7",
install_requires=[
# Core dependencies
"numpy >=1.17",
"msgpack >=0.6.1",
"tornado",
"requests",
"pyyaml >=5.1",
"pydantic >=1.4.0",
# Security dependencies
"bcrypt",
"cryptography",
# Storage dependencies
"sqlalchemy >=1.3,<1.4",
"alembic",
"psycopg2 >=2.7",
# QCPortal dependencies
"tqdm",
"plotly >=4.0.0",
"pandas",
"h5py",
"pyarrow >=0.15.0",
# 'double-conversion >=3.0.0',
# QCArchive depends
"qcengine==0.22",
"qcelemental==0.24",
],
entry_points={
"console_scripts": [
"qcfractal-server=qcfractal.cli.qcfractal_server:main",
"qcfractal-manager=qcfractal.cli.qcfractal_manager:main",
],
"pytest11": ["qcfractal_testing=qcfractal.testing"],
},
extras_require={
"api_logging": ["geoip2"],
"docs": [
"sphinx==1.2.3", # autodoc was broken in 1.3.1
"sphinxcontrib-napoleon",
"sphinx_rtd_theme",
"numpydoc",
],
"lint": ["black", "isort"],
"tests": ["pytest", "pytest-cov", "requests-mock"],
},
tests_require=["pytest", "pytest-cov"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
],
zip_safe=True,
long_description=long_description,
long_description_content_type="text/markdown",
)
|
[
"versioneer.get_version",
"setuptools.find_packages",
"versioneer.get_cmdclass"
] |
[((608, 632), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (630, 632), False, 'import versioneer\n'), ((651, 676), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (674, 676), False, 'import versioneer\n'), ((695, 721), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (719, 721), False, 'import setuptools\n')]
|
import json
import logging
import urllib3
from uuid import uuid4
import nexus
logger = logging.getLogger()
logger.setLevel(logging.INFO)
http = urllib3.PoolManager()
CFN_SUCCESS = "SUCCESS"
CFN_FAILED = "FAILED"
def handler(event, context):
def cfn_error(message=None):
logger.error("| cfn_error: %s" % message)
cfn_send(event, context, CFN_FAILED, reason=message)
try:
logger.info(event)
# cloudformation request type (create/update/delete)
request_type = event['RequestType']
# extract resource properties
props = event['ResourceProperties']
old_props = event.get('OldResourceProperties', {})
if request_type == "Create":
physical_id = f"nexus.on.aws.{str(uuid4())}"
else:
physical_id = event.get('PhysicalResourceId', None)
if not physical_id:
cfn_error("invalid request: request type is '%s' but 'PhysicalResourceId' is not defined" % request_type)
return
if request_type != "Delete":
username = props['Username']
password = props['Password']
endpoint = props['Endpoint']
blobstoreName = props['BlobStoreName'] if 'BlobStoreName' in props else 's3-blobsstore'
bucketName = props['S3BucketName']
nexusHelper = nexus.Nexus(username=username, password=password, endpoint=endpoint)
nexusHelper.deleteAllRepos()
nexusHelper.removeDefaultFileBlobstore()
nexusHelper.createS3Blobstore(blobstoreName, bucketName, '-1')
cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id)
except KeyError as e:
cfn_error(f"invalid request. Missing key {str(e)}")
except Exception as e:
logger.exception(str(e))
cfn_error(str(e))
# sends a response to cloudformation
def cfn_send(event, context, responseStatus, responseData={}, physicalResourceId=None, noEcho=False, reason=None):
responseUrl = event['ResponseURL']
logger.info(responseUrl)
responseBody = {}
responseBody['Status'] = responseStatus
responseBody['Reason'] = reason or ('See the details in CloudWatch Log Stream: ' + context.log_stream_name)
responseBody['PhysicalResourceId'] = physicalResourceId or context.log_stream_name
responseBody['StackId'] = event['StackId']
responseBody['RequestId'] = event['RequestId']
responseBody['LogicalResourceId'] = event['LogicalResourceId']
responseBody['NoEcho'] = noEcho
responseBody['Data'] = responseData
body = json.dumps(responseBody)
logger.info("| response body:\n" + body)
headers = {
'content-type' : '',
'content-length' : str(len(body))
}
try:
response = http.request('PUT',
responseUrl,
body=body,
headers=headers,
retries=False)
logger.info("| status code: " + str(response.status))
except Exception as e:
logger.error("| unable to send response to CloudFormation")
logger.exception(e)
|
[
"uuid.uuid4",
"nexus.Nexus",
"json.dumps",
"urllib3.PoolManager",
"logging.getLogger"
] |
[((88, 107), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (105, 107), False, 'import logging\n'), ((145, 166), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (164, 166), False, 'import urllib3\n'), ((2595, 2619), 'json.dumps', 'json.dumps', (['responseBody'], {}), '(responseBody)\n', (2605, 2619), False, 'import json\n'), ((1366, 1434), 'nexus.Nexus', 'nexus.Nexus', ([], {'username': 'username', 'password': 'password', 'endpoint': 'endpoint'}), '(username=username, password=password, endpoint=endpoint)\n', (1377, 1434), False, 'import nexus\n'), ((766, 773), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (771, 773), False, 'from uuid import uuid4\n')]
|
# Copyright (c) 2019, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import typing as ty
import numpy as np
import fire_rs.firemodel.propagation
import fire_rs.geodata.environment
import fire_rs.geodata.geo_data
import fire_rs.geodata.wildfire
class RealWildfire:
"""Generate wildfire maps by combining ignition, propagation and weather changes operations"""
def __init__(self, start_timestamp: datetime.datetime,
environment: fire_rs.firemodel.propagation.Environment):
self._environment = environment
self.start_timestamp = start_timestamp
self._current_time = self.start_timestamp
# Pending ignitions
# type: ty.MutableMapping[float, ty.Tuple[int, int]]
self._pending_ignitions = {}
self._perimeter = None
self._fire_map = fire_rs.firemodel.propagation.empty_firemap(self._environment.raster)
self._action_log = []
def ignite(self, position: ty.Union[ty.Tuple[float, float], fire_rs.geodata.geo_data.Point]):
"""Set some location on fire"""
c = self._environment.raster.array_index(position)
self._pending_ignitions[c] = self._current_time.timestamp()
self._fire_map["ignition"][c] = self._pending_ignitions[c]
self._action_log.append(
(self._current_time, "{} position {} ".format("Ignite", str(position))))
def change_wind(self, speed, direction):
self._environment.update_area_wind(speed, direction)
self._action_log.append(
(self._current_time,
"{} to {} km/h {} °".format("Set Wind", str(speed), str(direction / np.pi * 180))))
def propagate(self, duration: datetime.timedelta):
if self._perimeter:
self._pending_ignitions = {**self._pending_ignitions, **self._perimeter.cells}
old_fire_map = self._fire_map.clone()
# First propagation
fireprop = fire_rs.firemodel.propagation.FirePropagation(self._environment)
# Mark burnt cells, so fire do not propagate over them again
mask = np.where(
(old_fire_map.data["ignition"] > 0) & (old_fire_map.data["ignition"] < np.inf))
if self._perimeter:
mask = np.where(self._perimeter.area_array | np.isfinite(self._perimeter.array))
fireprop.prop_data.data["ignition"][mask] = np.NaN
for k, v in self._pending_ignitions.items():
fireprop.set_ignition_cell((k[0], k[1], v))
fireprop.propagate((self._current_time + duration).timestamp())
# remove pending ignitions
self._pending_ignitions = {}
# Store firemap
self._fire_map = fireprop.ignitions()
# Fuse the old firemap wih the new one
self._fire_map.data["ignition"][mask] = old_fire_map["ignition"][mask]
# Advance time
self._current_time += duration
# Calculate perimeter
self._perimeter = fire_rs.geodata.wildfire.Perimeter(self._fire_map,
self.current_time.timestamp())
self._action_log.append(
(self._current_time,
"{} for {}".format("Propagate", str(duration))))
@property
def action_log(self) -> ty.Sequence[ty.Tuple[datetime.datetime, str]]:
return self._action_log
@property
def current_time(self) -> datetime.datetime:
return self._current_time
@property
def current_perimeter(self) -> ty.Optional[fire_rs.geodata.wildfire.Perimeter]:
return self._perimeter
@property
def fire_map(self) -> fire_rs.geodata.geo_data.GeoData:
return self._fire_map
def perimeter(self, threshold_time: ty.Union[datetime.datetime, float]):
t = threshold_time.timestamp() if isinstance(threshold_time,
datetime.datetime) else threshold_time
return fire_rs.geodata.wildfire.Perimeter(self._fire_map, t)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import fire_rs.geodata.display
# Test realwildfire
time = datetime.datetime.now()
area = [[480060.0, 485060.0], [6210074.0, 6215074.0]]
speed = 1.
direction = 0.
world = fire_rs.geodata.environment.World()
env = fire_rs.firemodel.propagation.Environment(area, speed, direction, world=world)
rw = RealWildfire(time, env)
actions = [(rw.ignite, ((area[0][0] + 1000.0, area[1][0] + 1000.),)),
(rw.propagate, (datetime.timedelta(minutes=30.),)),
(rw.change_wind, (3, np.pi / 4)),
(rw.propagate, (datetime.timedelta(minutes=31.),)),
(rw.change_wind, (3, np.pi / 2)),
(rw.ignite, ((area[0][0] + 3000.0, area[1][0] + 3000.),)),
(rw.propagate, (datetime.timedelta(minutes=32.),)),
(rw.change_wind, (3, 0.)),
(rw.propagate, (datetime.timedelta(minutes=33.),)),
(rw.change_wind, (3, np.pi / 4)),
(rw.propagate, (datetime.timedelta(minutes=34.),)),
(rw.change_wind, (3, np.pi / 2)),
(rw.propagate, (datetime.timedelta(minutes=35.),))
]
fig = plt.figure()
ax = fig.gca()
for action in actions:
fig.clear()
ax = fig.gca()
if len(action[1]) == 0:
action[0]()
else:
action[0](*action[1])
v_min = np.nanmin(rw.fire_map.data["ignition"][np.isfinite(rw.fire_map.data["ignition"])])
v_max = np.nanmax(rw.fire_map.data["ignition"][np.isfinite(rw.fire_map.data["ignition"])])
fig.colorbar(ax.matshow(rw.fire_map.data["ignition"], vmin=v_min, vmax=v_max),
format=fire_rs.geodata.display.SecondDateFormatter('%d/%m/%y %H:%M'), )
if rw.current_perimeter:
# if rw.current_perimeter.area_array is not None:
# ax.matshow(rw.current_perimeter.area_array)
fig.colorbar(
ax.matshow(rw.current_perimeter.array, cmap="Reds", vmin=v_min, vmax=v_max),
format=fire_rs.geodata.display.SecondDateFormatter('%d/%m/%y %H:%M'))
fig.show()
print("bye")
|
[
"numpy.isfinite",
"matplotlib.pyplot.figure",
"numpy.where",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((5389, 5412), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5410, 5412), False, 'import datetime\n'), ((6491, 6503), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6501, 6503), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3454), 'numpy.where', 'np.where', (["((old_fire_map.data['ignition'] > 0) & (old_fire_map.data['ignition'] < np.inf)\n )"], {}), "((old_fire_map.data['ignition'] > 0) & (old_fire_map.data[\n 'ignition'] < np.inf))\n", (3369, 3454), True, 'import numpy as np\n'), ((5781, 5813), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(30.0)'}), '(minutes=30.0)\n', (5799, 5813), False, 'import datetime\n'), ((5897, 5929), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(31.0)'}), '(minutes=31.0)\n', (5915, 5929), False, 'import datetime\n'), ((6087, 6119), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(32.0)'}), '(minutes=32.0)\n', (6105, 6119), False, 'import datetime\n'), ((6196, 6228), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(33.0)'}), '(minutes=33.0)\n', (6214, 6228), False, 'import datetime\n'), ((6312, 6344), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(34.0)'}), '(minutes=34.0)\n', (6330, 6344), False, 'import datetime\n'), ((6428, 6460), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(35.0)'}), '(minutes=35.0)\n', (6446, 6460), False, 'import datetime\n'), ((6755, 6796), 'numpy.isfinite', 'np.isfinite', (["rw.fire_map.data['ignition']"], {}), "(rw.fire_map.data['ignition'])\n", (6766, 6796), True, 'import numpy as np\n'), ((6854, 6895), 'numpy.isfinite', 'np.isfinite', (["rw.fire_map.data['ignition']"], {}), "(rw.fire_map.data['ignition'])\n", (6865, 6895), True, 'import numpy as np\n'), ((3548, 3582), 'numpy.isfinite', 'np.isfinite', (['self._perimeter.array'], {}), '(self._perimeter.array)\n', (3559, 3582), True, 'import numpy as np\n')]
|
import os
from datetime import date
from typing import List
import requests
from bs4 import BeautifulSoup
from context_types import (
HttpRequestParameters,
SearchContext,
SearchRequest,
SearchResult,
SearchResultPage,
)
from document_utilities import DocUtil
from utilities import Folders
def make_http_request_parameters(
search_request: SearchRequest, page: int
) -> HttpRequestParameters:
# url_semantic_distance = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=semantic+distance&btnG=' # semantic distance
# related = 'https://scholar.google.com/scholar?q=related:8y78kUMDHwkJ:scholar.google.com/&scioq=semantic+distance&hl=en&as_sdt=0,5'
# url_semantic_distance_wordnet = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=semantic+distance+wordnet&btnG=' # semantic distance wordnet
# related2 = 'https://scholar.google.com/scholar?q=related:8y78kUMDHwkJ:scholar.google.com/&scioq=semantic+distance&hl=en&as_sdt=0,5'
# url3 = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=%22semantic+distance%22++in+wordnet&btnG=&oq=" # "semantic distance" in wordnet
# related3 = 'https://scholar.google.com/scholar?q=related:7FXqMoX8luQJ:scholar.google.com/&scioq=%22semantic+distance%22++in+wordnet&hl=en&as_sdt=0,5'
if page == 0:
url = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q="
for idx, term in enumerate(search_request.terms):
if idx == 0:
url = url + term
else:
url = url + "+" + term
idx = idx + 1
url = url + "&btnG="
else:
url = "https://scholar.google.com/scholar?start=" + str(page * 10) + "&q="
for idx, term in enumerate(search_request.terms):
if idx == 0:
url = url + term
else:
url = url + "+" + term
idx = idx + 1
url = url + "&hl=en&as_sdt=0,5"
return HttpRequestParameters(page, url)
def download_page(http_request_parameters: HttpRequestParameters, filepath: str):
print("---------------------------------------")
print("page", http_request_parameters.page)
print("== HEADERS ==")
print(http_request_parameters.headers)
print("== REQUEST URL ==")
print(http_request_parameters.url)
response = requests.get(
http_request_parameters.url, headers=http_request_parameters.headers
)
print("== RESPONSE ===")
print(response)
print("---------------------------------------")
if response.status_code != 200:
return
print("saving response to:", filepath)
with open(filepath, "a", encoding="utf-8") as fh:
fh.write(response.text)
def process_page(
http_request_parameters: HttpRequestParameters,
filepath: str,
) -> SearchResultPage:
print("reading response file %s" % filepath)
print()
search_results = []
with open(filepath, "r", encoding="utf-8") as fh:
page = BeautifulSoup(fh.read(), "lxml")
groups = page.find_all(attrs={"class": "gs_r gs_or gs_scl"})
# print ('number of result groups =', len(groups))
# print('----------------------------------------')
for idx, group in enumerate(groups):
# print('processing group', idx)
# print(group)
# print('GROUP ----------------------------------------')
entity_gs_ri = group.find(attrs={"class": "gs_ri"})
entity_gs_ri_anchors = entity_gs_ri.find_all(
"a", href=True
) # get first href
authorship = entity_gs_ri.find("div", attrs={"class": "gs_a"}).text
# bug in .text if text is xml/html
# workaround: manually convert to text
abstractHtml = entity_gs_ri.find("div", attrs={"class": "gs_rs"})
abstract = DocUtil.html_to_text(str(abstractHtml))
citations = 0
related = ""
cited_tag = "Cited by"
related_tag = "Related articles"
for anchor in entity_gs_ri_anchors:
txt = anchor.text
if txt.startswith(cited_tag):
citations = int(txt.replace(cited_tag, ""))
if txt.startswith(related_tag):
related = anchor["href"]
authors_publication = authorship.split("-", 1)
authors = authors_publication[0].strip()
publication = authors_publication[1].strip()
result = SearchResult(authors, publication)
result.title = entity_gs_ri.a.text
result.site_url = entity_gs_ri_anchors[0]["href"]
result.abstract = abstract
result.citations = citations
result.related = related
entity_gs_or_ggsm = group.find(attrs={"class": "gs_or_ggsm"})
if entity_gs_or_ggsm:
entity_gs_or_ggsm_anchors = entity_gs_or_ggsm.find_all("a", href=True)
result.pdf_url = entity_gs_or_ggsm_anchors[0]["href"]
# print (result.to_json_formatted())
result.year = DocUtil.get_year(publication)
current_year = date.today().year
citation_divisor = current_year - result.year
if (citation_divisor) < 1:
citation_divisor = 1
result.citation_weight = result.citations / citation_divisor
search_results.append(result)
# print('ENTITY ----------------------------------------')
# for entity in entities:
# print(entity)
# print('ENTITY ----------------------------------------')
# print('----------------------------------------')
return SearchResultPage(http_request_parameters, filepath, search_results)
def search_scholar(
search_request: SearchRequest, num_pages: int
) -> List[SearchResultPage]:
if not search_request:
print("ERROR: no search request specified")
return []
if not search_request.terms:
print("ERROR: no search terms specified")
return []
search_result_pages: List[SearchResultPage] = []
for page in range(0, num_pages):
response_filepath = (
Folders.responses()
+ search_request.source_name
+ "_"
+ search_request.identifier_hash
+ "_"
+ str(page)
+ ".html"
)
http_request_parameters = make_http_request_parameters(search_request, page)
if not os.path.isfile(response_filepath):
download_page(http_request_parameters, response_filepath)
search_result_page = process_page(http_request_parameters, response_filepath)
search_result_pages.append(search_result_page)
return search_result_pages
# =============================================================================
# def main():
# # TODO: parse quoted strings
# n = len(sys.argv)
# if (n > 1):
# terms = []
# n = n -1
# for i in range (0, n):
# terms.append(sys.argv[i + 1])
# scholar_search(terms, 3)
# else:
# print("Usage: %s [search terms]" % sys.argv[0])
# if __name__ == "__main__":
# main()
|
[
"utilities.Folders.responses",
"context_types.HttpRequestParameters",
"document_utilities.DocUtil.get_year",
"datetime.date.today",
"os.path.isfile",
"context_types.SearchResult",
"requests.get",
"context_types.SearchResultPage"
] |
[((2015, 2047), 'context_types.HttpRequestParameters', 'HttpRequestParameters', (['page', 'url'], {}), '(page, url)\n', (2036, 2047), False, 'from context_types import HttpRequestParameters, SearchContext, SearchRequest, SearchResult, SearchResultPage\n'), ((2402, 2489), 'requests.get', 'requests.get', (['http_request_parameters.url'], {'headers': 'http_request_parameters.headers'}), '(http_request_parameters.url, headers=http_request_parameters.\n headers)\n', (2414, 2489), False, 'import requests\n'), ((5903, 5970), 'context_types.SearchResultPage', 'SearchResultPage', (['http_request_parameters', 'filepath', 'search_results'], {}), '(http_request_parameters, filepath, search_results)\n', (5919, 5970), False, 'from context_types import HttpRequestParameters, SearchContext, SearchRequest, SearchResult, SearchResultPage\n'), ((4650, 4684), 'context_types.SearchResult', 'SearchResult', (['authors', 'publication'], {}), '(authors, publication)\n', (4662, 4684), False, 'from context_types import HttpRequestParameters, SearchContext, SearchRequest, SearchResult, SearchResultPage\n'), ((5270, 5299), 'document_utilities.DocUtil.get_year', 'DocUtil.get_year', (['publication'], {}), '(publication)\n', (5286, 5299), False, 'from document_utilities import DocUtil\n'), ((6738, 6771), 'os.path.isfile', 'os.path.isfile', (['response_filepath'], {}), '(response_filepath)\n', (6752, 6771), False, 'import os\n'), ((5328, 5340), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5338, 5340), False, 'from datetime import date\n'), ((6427, 6446), 'utilities.Folders.responses', 'Folders.responses', ([], {}), '()\n', (6444, 6446), False, 'from utilities import Folders\n')]
|
# encoding: utf-8
"""
trace.py
Created by <NAME> on 2009-09-06.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import StringIO
import traceback
def trace ():
buff = StringIO.StringIO()
traceback.print_exc(file=buff)
r = buff.getvalue()
buff.close()
return r
|
[
"traceback.print_exc",
"StringIO.StringIO"
] |
[((186, 205), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (203, 205), False, 'import StringIO\n'), ((207, 237), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'buff'}), '(file=buff)\n', (226, 237), False, 'import traceback\n')]
|
#runas import numpy as np; n = 20; a = np.arange(n*n*n).reshape((n,n,n)).astype(np.uint8); b = 2. ; goodExpoMeasure(a, b)
#pythran export goodExpoMeasure(uint8[][][], float)
import numpy
def goodExpoMeasure(inRGB, sigma):
'''
Compute the good exposition image quality measure on 1 input image.
'''
R = inRGB[0,:,:].astype(numpy.float64)
G = inRGB[1,:,:].astype(numpy.float64)
B = inRGB[2,:,:].astype(numpy.float64)
goodExpoR = numpy.exp(- ((R - 128)**2) / sigma)
goodExpoG = numpy.exp(- ((G - 128)**2) / sigma)
goodExpoB = numpy.exp(- ((B - 128)**2) / sigma)
goodExpo = goodExpoR * goodExpoG * goodExpoB
goodExpo = (numpy.round(goodExpo, 2) * (2**8-1)).astype(numpy.uint8)
return goodExpo
|
[
"numpy.round",
"numpy.exp"
] |
[((455, 489), 'numpy.exp', 'numpy.exp', (['(-(R - 128) ** 2 / sigma)'], {}), '(-(R - 128) ** 2 / sigma)\n', (464, 489), False, 'import numpy\n'), ((507, 541), 'numpy.exp', 'numpy.exp', (['(-(G - 128) ** 2 / sigma)'], {}), '(-(G - 128) ** 2 / sigma)\n', (516, 541), False, 'import numpy\n'), ((559, 593), 'numpy.exp', 'numpy.exp', (['(-(B - 128) ** 2 / sigma)'], {}), '(-(B - 128) ** 2 / sigma)\n', (568, 593), False, 'import numpy\n'), ((662, 686), 'numpy.round', 'numpy.round', (['goodExpo', '(2)'], {}), '(goodExpo, 2)\n', (673, 686), False, 'import numpy\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 12:27:27 2020
@author: panton01
"""
### ----------------- IMPORTS ----------------- ###
import json
import matplotlib.pyplot as plt
from matplotlib.widgets import Button, SpanSelector, TextBox
from user_gui.user_verify import UserVerify
### ------------------------------------------ ###
if __name__ == '__main__' :
# Load config file
try:
config = open('config.json', 'r').read()
config = json.loads(config)
except Exception as err:
raise FileNotFoundError(f"Unable to read the config file.\n{err}")
# Get variables from config dictionary
input_path = config['main_path']
# Create instance
obj = UserVerify(input_path)
file_id = obj.select_file() # user file selection
data, idx_bounds = obj.main_func(file_id) # get data and seizure index
if idx_bounds is not False:
if idx_bounds.shape[0] == 0: # check for zero seizures
obj.save_emptyidx(data.shape[0],file_id)
else: # otherwise proceed with gui creation
# get gui
from user_gui.verify_gui import matplotGui,fig,ax
fig.suptitle('To Submit Press Enter; To Select Drag Mouse Pointer : '+file_id, fontsize=12)
# init object
callback = matplotGui(data,idx_bounds,obj, file_id)
# add buttons
axprev = plt.axes([0.625, 0.05, 0.13, 0.075]) # previous
bprev = Button(axprev, 'Previous: <')
bprev.on_clicked(callback.previous)
axnext = plt.axes([0.765, 0.05, 0.13, 0.075]) # next
bnext = Button(axnext, 'Next: >')
bnext.on_clicked(callback.forward)
axaccept = plt.axes([0.125, 0.05, 0.13, 0.075]) # accept
baccept = Button(axaccept, 'Accept: y')
baccept.on_clicked(callback.accept)
axreject = plt.axes([0.265, 0.05, 0.13, 0.075]) # reject
breject = Button(axreject, 'Reject: n')
breject.on_clicked(callback.reject)
axbox = plt.axes([0.5, 0.055, 0.05, 0.05]) # seizure number
text_box = TextBox(axbox, 'Szr #', initial='0')
text_box.on_submit(callback.submit)
# add key press
idx_out = fig.canvas.mpl_connect('key_press_event', callback.keypress)
# set useblit True on gtkagg for enhanced performance
span = SpanSelector(ax, callback.onselect, 'horizontal', useblit=True,
rectprops=dict(alpha=0.5, facecolor='red'))
plt.show()
|
[
"matplotlib.pyplot.show",
"json.loads",
"matplotlib.pyplot.axes",
"matplotlib.widgets.TextBox",
"user_gui.verify_gui.fig.suptitle",
"matplotlib.widgets.Button",
"user_gui.verify_gui.matplotGui",
"user_gui.user_verify.UserVerify",
"user_gui.verify_gui.fig.canvas.mpl_connect"
] |
[((713, 735), 'user_gui.user_verify.UserVerify', 'UserVerify', (['input_path'], {}), '(input_path)\n', (723, 735), False, 'from user_gui.user_verify import UserVerify\n'), ((472, 490), 'json.loads', 'json.loads', (['config'], {}), '(config)\n', (482, 490), False, 'import json\n'), ((1184, 1281), 'user_gui.verify_gui.fig.suptitle', 'fig.suptitle', (["('To Submit Press Enter; To Select Drag Mouse Pointer : ' + file_id)"], {'fontsize': '(12)'}), "('To Submit Press Enter; To Select Drag Mouse Pointer : ' +\n file_id, fontsize=12)\n", (1196, 1281), False, 'from user_gui.verify_gui import matplotGui, fig, ax\n'), ((1341, 1383), 'user_gui.verify_gui.matplotGui', 'matplotGui', (['data', 'idx_bounds', 'obj', 'file_id'], {}), '(data, idx_bounds, obj, file_id)\n', (1351, 1383), False, 'from user_gui.verify_gui import matplotGui, fig, ax\n'), ((1442, 1478), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.625, 0.05, 0.13, 0.075]'], {}), '([0.625, 0.05, 0.13, 0.075])\n', (1450, 1478), True, 'import matplotlib.pyplot as plt\n'), ((1510, 1539), 'matplotlib.widgets.Button', 'Button', (['axprev', '"""Previous: <"""'], {}), "(axprev, 'Previous: <')\n", (1516, 1539), False, 'from matplotlib.widgets import Button, SpanSelector, TextBox\n'), ((1609, 1645), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.765, 0.05, 0.13, 0.075]'], {}), '([0.765, 0.05, 0.13, 0.075])\n', (1617, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1698), 'matplotlib.widgets.Button', 'Button', (['axnext', '"""Next: >"""'], {}), "(axnext, 'Next: >')\n", (1679, 1698), False, 'from matplotlib.widgets import Button, SpanSelector, TextBox\n'), ((1769, 1805), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.125, 0.05, 0.13, 0.075]'], {}), '([0.125, 0.05, 0.13, 0.075])\n', (1777, 1805), True, 'import matplotlib.pyplot as plt\n'), ((1837, 1866), 'matplotlib.widgets.Button', 'Button', (['axaccept', '"""Accept: y"""'], {}), "(axaccept, 'Accept: y')\n", (1843, 1866), False, 'from matplotlib.widgets import Button, SpanSelector, TextBox\n'), ((1938, 1974), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.265, 0.05, 0.13, 0.075]'], {}), '([0.265, 0.05, 0.13, 0.075])\n', (1946, 1974), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2035), 'matplotlib.widgets.Button', 'Button', (['axreject', '"""Reject: n"""'], {}), "(axreject, 'Reject: n')\n", (2012, 2035), False, 'from matplotlib.widgets import Button, SpanSelector, TextBox\n'), ((2104, 2138), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.5, 0.055, 0.05, 0.05]'], {}), '([0.5, 0.055, 0.05, 0.05])\n', (2112, 2138), True, 'import matplotlib.pyplot as plt\n'), ((2179, 2215), 'matplotlib.widgets.TextBox', 'TextBox', (['axbox', '"""Szr #"""'], {'initial': '"""0"""'}), "(axbox, 'Szr #', initial='0')\n", (2186, 2215), False, 'from matplotlib.widgets import Button, SpanSelector, TextBox\n'), ((2327, 2387), 'user_gui.verify_gui.fig.canvas.mpl_connect', 'fig.canvas.mpl_connect', (['"""key_press_event"""', 'callback.keypress'], {}), "('key_press_event', callback.keypress)\n", (2349, 2387), False, 'from user_gui.verify_gui import matplotGui, fig, ax\n'), ((2622, 2632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2630, 2632), True, 'import matplotlib.pyplot as plt\n')]
|
from .models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(max_length=20, min_length=8, trim_whitespace=False, write_only=True)
class Meta:
model = User
fields = ('id', 'nickname', 'username', 'email', 'password')
# serializer's default `create` method will call `model.objects.create`
# method to create new instance, override to create user correctly.
def create(self, validated_data):
return User.objects.create_user(**validated_data)
# since the password cannot be changed directly
# override to update user correctly
def update(self, instance, validated_data):
if 'password' in validated_data:
instance.set_password(validated_data['password'])
instance.nickname = validated_data.get('nickname', instance.nickname)
instance.save()
return instance
|
[
"rest_framework.serializers.CharField"
] |
[((131, 225), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(20)', 'min_length': '(8)', 'trim_whitespace': '(False)', 'write_only': '(True)'}), '(max_length=20, min_length=8, trim_whitespace=False,\n write_only=True)\n', (152, 225), False, 'from rest_framework import serializers\n')]
|
from __future__ import division, print_function
import numpy as np
from dipy.denoise.nlmeans_block import nlmeans_block
def non_local_means(arr, sigma, mask=None, patch_radius=1, block_radius=5,
rician=True):
r""" Non-local means for denoising 3D and 4D images, using
blockwise averaging approach
Parameters
----------
arr : 3D or 4D ndarray
The array to be denoised
mask : 3D ndarray
sigma : float
standard deviation of the noise estimated from the data
patch_radius : int
patch size is ``2 x patch_radius + 1``. Default is 1.
block_radius : int
block size is ``2 x block_radius + 1``. Default is 5.
rician : boolean
If True the noise is estimated as Rician, otherwise Gaussian noise
is assumed.
Returns
-------
denoised_arr : ndarray
the denoised ``arr`` which has the same shape as ``arr``.
References
----------
.. [Coupe08] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, C.
Barillot, An Optimized Blockwise Non Local Means Denoising
Filter for 3D Magnetic Resonance Images, IEEE Transactions on
Medical Imaging, 27(4):425-441, 2008
.. [Coupe11] <NAME>, <NAME>, <NAME>, <NAME>.
Adaptive Multiresolution Non-Local Means Filter for 3D MR Image
Denoising IET Image Processing, Institution of Engineering and
Technology, 2011
"""
if not np.isscalar(sigma) and not sigma.shape == (1, ):
raise ValueError("Sigma input needs to be of type float", sigma)
if mask is None and arr.ndim > 2:
mask = np.ones((arr.shape[0], arr.shape[1], arr.shape[2]), dtype='f8')
else:
mask = np.ascontiguousarray(mask, dtype='f8')
if mask.ndim != 3:
raise ValueError('mask needs to be a 3D ndarray', mask.shape)
if arr.ndim == 3:
return np.array(nlmeans_block(
np.double(arr),
mask,
patch_radius,
block_radius,
sigma,
np.int(rician))).astype(arr.dtype)
elif arr.ndim == 4:
denoised_arr = np.zeros_like(arr)
for i in range(arr.shape[-1]):
denoised_arr[..., i] = np.array(nlmeans_block(np.double(
arr[..., i]), mask, patch_radius, block_radius, sigma,
np.int(rician))).astype(arr.dtype)
return denoised_arr
else:
raise ValueError("Only 3D or 4D array are supported!", arr.shape)
|
[
"numpy.zeros_like",
"numpy.double",
"numpy.isscalar",
"numpy.ones",
"numpy.int",
"numpy.ascontiguousarray"
] |
[((1669, 1732), 'numpy.ones', 'np.ones', (['(arr.shape[0], arr.shape[1], arr.shape[2])'], {'dtype': '"""f8"""'}), "((arr.shape[0], arr.shape[1], arr.shape[2]), dtype='f8')\n", (1676, 1732), True, 'import numpy as np\n'), ((1758, 1796), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['mask'], {'dtype': '"""f8"""'}), "(mask, dtype='f8')\n", (1778, 1796), True, 'import numpy as np\n'), ((1494, 1512), 'numpy.isscalar', 'np.isscalar', (['sigma'], {}), '(sigma)\n', (1505, 1512), True, 'import numpy as np\n'), ((2164, 2182), 'numpy.zeros_like', 'np.zeros_like', (['arr'], {}), '(arr)\n', (2177, 2182), True, 'import numpy as np\n'), ((1965, 1979), 'numpy.double', 'np.double', (['arr'], {}), '(arr)\n', (1974, 1979), True, 'import numpy as np\n'), ((2082, 2096), 'numpy.int', 'np.int', (['rician'], {}), '(rician)\n', (2088, 2096), True, 'import numpy as np\n'), ((2280, 2302), 'numpy.double', 'np.double', (['arr[..., i]'], {}), '(arr[..., i])\n', (2289, 2302), True, 'import numpy as np\n'), ((2378, 2392), 'numpy.int', 'np.int', (['rician'], {}), '(rician)\n', (2384, 2392), True, 'import numpy as np\n')]
|
from setuptools import setup, find_packages
setup(
name='pyRandomWalk',
version='0.0.1',
packages=find_packages(where='src'),
install_requires=['numpy', 'pandas', 'little_helpers']
)
|
[
"setuptools.find_packages"
] |
[((111, 137), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (124, 137), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python
"""
Python package for the fuzzware emulator.
"""
import os
import subprocess
import sys
from distutils.command.build import build
from setuptools import setup
class Build(build):
"""Customized setuptools build command - builds native unicorn bindings on build."""
def run(self):
protoc_command = ["make", "-C", "fuzzware_harness/native", "clean", "all"]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
build.run(self)
def get_packages(rel_dir):
packages = [rel_dir]
for x in os.walk(rel_dir):
# break into parts
base = list(os.path.split(x[0]))
if base[0] == "":
del base[0]
for mod_name in x[1]:
packages.append(".".join(base + [mod_name]))
return packages
setup(name='fuzzware_harness',
version='0.1',
description='This is the Python library and native modules for the Fuzzware emulation component',
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
url='https://github.com/RUB-SysSec',
packages=get_packages('fuzzware_harness'), requires=['PyYAML','intelhex', 'monkeyhex'],
include_package_data=True,
cmdclass = {
'build': Build,
},
entry_points = {
'console_scripts': [
'fuzzware_harness = fuzzware_harness.harness:main',
]
}
)
|
[
"distutils.command.build.build.run",
"os.walk",
"subprocess.call",
"os.path.split",
"sys.exit"
] |
[((564, 580), 'os.walk', 'os.walk', (['rel_dir'], {}), '(rel_dir)\n', (571, 580), False, 'import os\n'), ((482, 497), 'distutils.command.build.build.run', 'build.run', (['self'], {}), '(self)\n', (491, 497), False, 'from distutils.command.build import build\n'), ((411, 442), 'subprocess.call', 'subprocess.call', (['protoc_command'], {}), '(protoc_command)\n', (426, 442), False, 'import subprocess\n'), ((461, 473), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (469, 473), False, 'import sys\n'), ((629, 648), 'os.path.split', 'os.path.split', (['x[0]'], {}), '(x[0])\n', (642, 648), False, 'import os\n')]
|
'''
Created on 2 Dec 2017
@author: julianporter
'''
from .findSource import findFiles
from setuptools import Command
import shutil
import os
class Cleaner(Command) :
description='Perform a true deep clean, removing distributions, builds and egg-info'
user_options=[]
def __init__(self,dist,**kwargs):
super(Cleaner,self).__init__(dist,**kwargs)
def initialize_options(self):
self.directories=[]
self.files=[]
def finalize_options(self):
self.directories=['build','dist','geoconv.egg-info']
self.files=[]
def run(self):
for directory in self.directories:
try:
shutil.rmtree(directory)
print(f"{directory} deleted")
except FileNotFoundError:
print(f"{directory} does not exist, so not deleted...")
except Exception as e:
print(f"{e.__class__.__name__} : {e}")
for file in self.files:
try:
os.remove(file)
print(f"{directory} deleted")
except FileNotFoundError:
print(f"{directory} does not exist, so not deleted...")
except Exception as e:
print(f"{e.__class__.__name__} : {e}")
objects=findFiles('/opt/git/OSGridConverter/cpp/',pattern=r'^.*\.o$')
print('Files are:')
for o in objects: print(f' {o}')
for o in objects:
print(f'Removing {o}')
os.remove(o)
|
[
"shutil.rmtree",
"os.remove"
] |
[((1543, 1555), 'os.remove', 'os.remove', (['o'], {}), '(o)\n', (1552, 1555), False, 'import os\n'), ((700, 724), 'shutil.rmtree', 'shutil.rmtree', (['directory'], {}), '(directory)\n', (713, 724), False, 'import shutil\n'), ((1036, 1051), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1045, 1051), False, 'import os\n')]
|
from aoc import AOC
aoc = AOC(year=__year__, day=__day__)
data = aoc.load()
|
[
"aoc.AOC"
] |
[((27, 58), 'aoc.AOC', 'AOC', ([], {'year': '__year__', 'day': '__day__'}), '(year=__year__, day=__day__)\n', (30, 58), False, 'from aoc import AOC\n')]
|
# Copyright (c) 2018, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import smach
import time
class WaitTimeState(smach.State):
"""
Waits for a few (wait_time_sec) seconds before transfer to next state.
"""
def __init__(self, wait_time_sec=3):
smach.State.__init__(self, outcomes=["next"])
self._wait_time_sec = wait_time_sec
def execute(self, userdata):
del userdata
time.sleep(self._wait_time_sec)
return "next"
class ResetRetryCounter(smach.State):
"""
Resets retry counter, name of the counter is given by retry_counter_name.
"""
def __init__(self, retry_counter_name, reset_value):
assert isinstance(retry_counter_name, str)
assert isinstance(reset_value, int)
smach.State.__init__(
self, outcomes=["next"], output_keys=[retry_counter_name])
self._retry_counter_name = retry_counter_name
self._reset_value = reset_value
def execute(self, userdata):
setattr(userdata, self._retry_counter_name, self._reset_value)
return "next"
class DecreaseAndTestRetry(smach.State):
"""
Decreases retry counter, name of the counter is given by retry_counter_name.
When reaches 0, give up retrying.
"""
def __init__(self, retry_counter_name):
assert isinstance(retry_counter_name, str)
smach.State.__init__(
self, outcomes=["continue", "give_up"],
input_keys=[retry_counter_name], output_keys=[retry_counter_name])
self._retry_counter_name = retry_counter_name
def execute(self, userdata):
if getattr(userdata, self._retry_counter_name) > 0:
setattr(userdata, self._retry_counter_name,
getattr(userdata, self._retry_counter_name) - 1)
return "continue"
return "give_up"
class SetVariables(smach.State):
"""
Sets userdata.
"""
def __init__(self, var_dict):
assert isinstance(var_dict, dict)
for key in var_dict.keys():
assert isinstance(key, str)
smach.State.__init__(self, outcomes=["next"], output_keys=var_dict.keys())
self._var_dict = dict
def execute(self, userdata):
for k, v in self._var_dict:
setattr(userdata, k, v)
return "next"
class VariableSwitch(smach.State):
"""
Tests userdata.
"""
def __init__(self, var_name, var_values):
assert isinstance(var_name, str)
assert isinstance(var_values, list)
for value in var_values.keys():
assert isinstance(var_values, str)
smach.State.__init__(
self, outcomes=var_values+["_other"], input_keys=[var_name])
self._var_name = var_name
self._var_values = var_values
def execute(self, userdata):
if getattr(userdata, self._var_name) in self._var_values:
return getattr(userdata, self._var_name)
return "_other"
class BypassState(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=["next"])
def execute(self, userdata):
return "next"
|
[
"smach.State.__init__",
"time.sleep"
] |
[((1771, 1816), 'smach.State.__init__', 'smach.State.__init__', (['self'], {'outcomes': "['next']"}), "(self, outcomes=['next'])\n", (1791, 1816), False, 'import smach\n'), ((1911, 1942), 'time.sleep', 'time.sleep', (['self._wait_time_sec'], {}), '(self._wait_time_sec)\n', (1921, 1942), False, 'import time\n'), ((2236, 2315), 'smach.State.__init__', 'smach.State.__init__', (['self'], {'outcomes': "['next']", 'output_keys': '[retry_counter_name]'}), "(self, outcomes=['next'], output_keys=[retry_counter_name])\n", (2256, 2315), False, 'import smach\n'), ((2793, 2925), 'smach.State.__init__', 'smach.State.__init__', (['self'], {'outcomes': "['continue', 'give_up']", 'input_keys': '[retry_counter_name]', 'output_keys': '[retry_counter_name]'}), "(self, outcomes=['continue', 'give_up'], input_keys=[\n retry_counter_name], output_keys=[retry_counter_name])\n", (2813, 2925), False, 'import smach\n'), ((3924, 4012), 'smach.State.__init__', 'smach.State.__init__', (['self'], {'outcomes': "(var_values + ['_other'])", 'input_keys': '[var_name]'}), "(self, outcomes=var_values + ['_other'], input_keys=[\n var_name])\n", (3944, 4012), False, 'import smach\n'), ((4301, 4346), 'smach.State.__init__', 'smach.State.__init__', (['self'], {'outcomes': "['next']"}), "(self, outcomes=['next'])\n", (4321, 4346), False, 'import smach\n')]
|
from django.shortcuts import render
from models import *
from forms import *
# Create your views here.
def homepage(request):
# get top five blogs here
latest_article = Article.objects.order_by('-article_published_time')[:5]
page_name = 'ME_KUN_HAN'
context = {'latest_article': latest_article, 'page_name':page_name}
return render(request, 'blog/index.html', context)
def page_list(request, page_list_num):
# get the blog begin with blog_begin_num
i_page_num = int(page_list_num)
list_article = Article.objects.order_by('-article_published_time')[(i_page_num - 1) * 2:(i_page_num - 1) * 2 + 2]
# get the number of all pages.
# PS: the max size of blogs in one page is 5
if i_page_num < 2:
previous = 1
else:
previous = i_page_num - 1
count = Article.objects.all().count()
print("the i_page_num is ", i_page_num, " the article's count is ", count)
if i_page_num * 2 >= count:
next_page = i_page_num
else:
next_page = i_page_num + 1
list_page = [previous, next_page, count]
context = {
'list_article': list_article,
'list_page': list_page,
}
return render(request, 'blog/pagelist.html', context)
def article_page(request, article_file):
# get comments here
return render(request, 'blog/BlogPages/' + article_file + '.html')
def leave_message(request):
if request.method == 'POST':
contact_message = ContactMeMessage(request.POST)
if contact_message.is_valid():
print('the contact message is valid')
name = request.POST['customer_name']
email = request.POST['email_address']
phone = request.POST['phone_number']
message = request.POST['message']
print(name, email, phone, message)
return render(request, 'blog/leave_message_result.html')
# static files
def aboutMe(request):
return render(request, 'blog/about.html')
def contact(request):
return render(request, 'blog/contact.html')
def player(request):
return render(request, 'blog/player.html')
|
[
"django.shortcuts.render"
] |
[((346, 389), 'django.shortcuts.render', 'render', (['request', '"""blog/index.html"""', 'context'], {}), "(request, 'blog/index.html', context)\n", (352, 389), False, 'from django.shortcuts import render\n'), ((1180, 1226), 'django.shortcuts.render', 'render', (['request', '"""blog/pagelist.html"""', 'context'], {}), "(request, 'blog/pagelist.html', context)\n", (1186, 1226), False, 'from django.shortcuts import render\n'), ((1306, 1365), 'django.shortcuts.render', 'render', (['request', "('blog/BlogPages/' + article_file + '.html')"], {}), "(request, 'blog/BlogPages/' + article_file + '.html')\n", (1312, 1365), False, 'from django.shortcuts import render\n'), ((1827, 1876), 'django.shortcuts.render', 'render', (['request', '"""blog/leave_message_result.html"""'], {}), "(request, 'blog/leave_message_result.html')\n", (1833, 1876), False, 'from django.shortcuts import render\n'), ((1927, 1961), 'django.shortcuts.render', 'render', (['request', '"""blog/about.html"""'], {}), "(request, 'blog/about.html')\n", (1933, 1961), False, 'from django.shortcuts import render\n'), ((1997, 2033), 'django.shortcuts.render', 'render', (['request', '"""blog/contact.html"""'], {}), "(request, 'blog/contact.html')\n", (2003, 2033), False, 'from django.shortcuts import render\n'), ((2068, 2103), 'django.shortcuts.render', 'render', (['request', '"""blog/player.html"""'], {}), "(request, 'blog/player.html')\n", (2074, 2103), False, 'from django.shortcuts import render\n')]
|
import os
import re
from pdb import set_trace
from os.path import join
def main():
data_root = os.path.expanduser("~/dataset/pets/train")
name2dir = {}
for dir in os.listdir(data_root):
catergory_name = re.match(r'([a-zA-Z_]*)_[0-9]*.jpg', os.listdir(join(data_root, dir))[0])
name2dir[catergory_name[1]] = dir
with open("split/Pet37/Pet37_train.txt", "r") as f:
lines = f.readlines()
lines_new = []
for line in lines:
catergory_name = re.match(r'([a-zA-Z_]*)_[0-9]*.jpg [0-9]*', line)
catergory_name = catergory_name[1]
lines_new.append(line.replace(catergory_name, join("train", name2dir[catergory_name], catergory_name)))
with open("split/Pet37/Pet37_train_new.txt", "w") as f:
f.writelines(lines_new)
with open("split/Pet37/Pet37_val.txt", "r") as f:
lines = f.readlines()
lines_new = []
for line in lines:
catergory_name = re.match(r'([a-zA-Z_]*)_[0-9]*.jpg [0-9]*', line)
catergory_name = catergory_name[1]
lines_new.append(line.replace(catergory_name, join("train", name2dir[catergory_name], catergory_name)))
with open("split/Pet37/Pet37_val_new.txt", "w") as f:
f.writelines(lines_new)
with open("split/Pet37/Pet37_test.txt", "r") as f:
lines = f.readlines()
lines_new = []
for line in lines:
catergory_name = re.match(r'([a-zA-Z_]*)_[0-9]*.jpg [0-9]*', line)
catergory_name = catergory_name[1]
lines_new.append(line.replace(catergory_name, join("val", name2dir[catergory_name], catergory_name)))
with open("split/Pet37/Pet37_test_new.txt", "w") as f:
f.writelines(lines_new)
if __name__ == "__main__":
main()
|
[
"re.match",
"os.path.expanduser",
"os.listdir",
"os.path.join"
] |
[((108, 150), 'os.path.expanduser', 'os.path.expanduser', (['"""~/dataset/pets/train"""'], {}), "('~/dataset/pets/train')\n", (126, 150), False, 'import os\n'), ((190, 211), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (200, 211), False, 'import os\n'), ((518, 566), 're.match', 're.match', (['"""([a-zA-Z_]*)_[0-9]*.jpg [0-9]*"""', 'line'], {}), "('([a-zA-Z_]*)_[0-9]*.jpg [0-9]*', line)\n", (526, 566), False, 'import re\n'), ((983, 1031), 're.match', 're.match', (['"""([a-zA-Z_]*)_[0-9]*.jpg [0-9]*"""', 'line'], {}), "('([a-zA-Z_]*)_[0-9]*.jpg [0-9]*', line)\n", (991, 1031), False, 'import re\n'), ((1447, 1495), 're.match', 're.match', (['"""([a-zA-Z_]*)_[0-9]*.jpg [0-9]*"""', 'line'], {}), "('([a-zA-Z_]*)_[0-9]*.jpg [0-9]*', line)\n", (1455, 1495), False, 'import re\n'), ((667, 722), 'os.path.join', 'join', (['"""train"""', 'name2dir[catergory_name]', 'catergory_name'], {}), "('train', name2dir[catergory_name], catergory_name)\n", (671, 722), False, 'from os.path import join\n'), ((1132, 1187), 'os.path.join', 'join', (['"""train"""', 'name2dir[catergory_name]', 'catergory_name'], {}), "('train', name2dir[catergory_name], catergory_name)\n", (1136, 1187), False, 'from os.path import join\n'), ((1596, 1649), 'os.path.join', 'join', (['"""val"""', 'name2dir[catergory_name]', 'catergory_name'], {}), "('val', name2dir[catergory_name], catergory_name)\n", (1600, 1649), False, 'from os.path import join\n'), ((287, 307), 'os.path.join', 'join', (['data_root', 'dir'], {}), '(data_root, dir)\n', (291, 307), False, 'from os.path import join\n')]
|
# $Id: Box.py,v 1.1.2.1 2007/01/06 10:44:59 marcusva Exp $
#
# Copyright (c) 2007, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""A container, which allows absolute positioning of its widgets."""
from Container import Container
from Constants import *
import base
class Box (Container):
"""Box (width, height) -> Box
A container widget which allows absolute positioning of its widgets.
The Box widget places its attached children relative to it own
topleft coordinates but does not layout them. Instead they are
positioned absolutely, which includes possible overlapping blits
outside of the visible Box area.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
"""
def __init__ (self, width, height):
Container.__init__ (self)
self.minsize = width, height
def set_focus (self, focus=True):
"""B.set_focus (focus=True) -> None
Overrides the set_focus() behaviour for the Box.
The Box class is not focusable by default. It is a layout
class for other widgets, so it does not need to get the input
focus and thus it will return false without doing anything.
"""
return False
def draw_bg (self):
"""B.draw_bg () -> None
Draws the Box background surface and returns it.
Creates the visible surface of the Box and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_box (self)
def draw (self):
"""B.draw () -> None
Draws the Box surface and places its children on it.
"""
Container.draw (self)
blit = self.image.blit
for widget in self.children:
blit (widget.image, widget.rect)
|
[
"Container.Container.__init__",
"Container.Container.draw",
"base.GlobalStyle.engine.draw_box"
] |
[((2084, 2108), 'Container.Container.__init__', 'Container.__init__', (['self'], {}), '(self)\n', (2102, 2108), False, 'from Container import Container\n'), ((2754, 2792), 'base.GlobalStyle.engine.draw_box', 'base.GlobalStyle.engine.draw_box', (['self'], {}), '(self)\n', (2786, 2792), False, 'import base\n'), ((2927, 2947), 'Container.Container.draw', 'Container.draw', (['self'], {}), '(self)\n', (2941, 2947), False, 'from Container import Container\n')]
|
# api: python
# title: faulthandler
# description: capture fatal errors / memory fauls / Gtk and threading bugs
# version: -1
# type: io
# category: debug
# priority: development
#
# Debug Gtk/glibs/python/threading crashes.
#
# * Gdk:ERROR:/build/buildd/gtk+2.0-2.24.23/gdk/gdkregion-generic.c:1110:miUnionNonO:
# assertion failed: (y1 < y2)
# * foobar: double free or corruption (fasttop): 0x...
import faulthandler
faulthandler.enable()
# file=open("/tmp/st2.log", "a+"), all_threads=True
class dev_faulthandler(object):
def __init__(sefl, *x, **kw):
pass
|
[
"faulthandler.enable"
] |
[((422, 443), 'faulthandler.enable', 'faulthandler.enable', ([], {}), '()\n', (441, 443), False, 'import faulthandler\n')]
|
def set(username):
import requests
from bs4 import BeautifulSoup
global user
global url
global response
global soup
user = username
url = "https://github.com/"+user
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
user_tg = soup.find('span', class_='p-nickname vcard-username d-block')
if user_tg == None:
return "invalid username "+username
def name():
full_name = soup.find('span', class_='p-name vcard-fullname d-block overflow-hidden')
return full_name.text.strip()
def username():
user = soup.find('span', class_='p-nickname vcard-username d-block')
return user.text.strip()
def bio():
bio = soup.find('div', class_='p-note user-profile-bio mb-3 js-user-profile-bio f4')
if bio != None and bio.div != None:
return bio.div.text
def followers():
people = soup.find_all('span', class_='text-bold color-text-primary')
if len(people) != 0:
return people[0].text
def following():
people = soup.find_all('span', class_='text-bold color-text-primary')
if len(people) != 0:
return people[1].text
def star():
people = soup.find_all('span', class_='text-bold color-text-primary')
if len(people) != 0:
return people[2].text
def organization():
org = soup.find('span', class_='p-org')
if org != None:
return org.text
def location():
loc = soup.find('span', class_='p-label')
if loc != None:
return loc.text
def website():
website = soup.find('li', class_='vcard-detail pt-1 css-truncate css-truncate-target')
if website != None:
return website.a['href']
def count_repositories():
repo = soup.find('div', class_='UnderlineNav width-full box-shadow-none').find_all('span')
return repo[0].text
def count_projects():
proj = soup.find('div', class_='UnderlineNav width-full box-shadow-none').find_all('span')
return proj[1].text
def info():
print("Name:",name())
print("Username:",username())
print("Bio:",bio())
print("Followers:",followers())
print("Following:",following())
print("Stars:",star())
print("Organization:",organization())
print("Location:",location())
print("Website:",website())
print("Repositories:",count_repositories())
print("Projects:",count_projects())
def get(username):
try:
import requests
from bs4 import BeautifulSoup
url = 'https://github.com/'+username
response = requests.get(url)
if response.status_code != 200:
print("Searching Failed!")
else:
soup = BeautifulSoup(response.text, 'html.parser')
user_tg = soup.find('span', class_='p-nickname vcard-username d-block')
if user_tg != None:
user = user_tg.text.strip()
name_tg = soup.find('span', class_='p-name vcard-fullname d-block overflow-hidden')
name = name_tg.text.strip()
bio_tg = soup.find('div', class_='p-note user-profile-bio mb-3 js-user-profile-bio f4')
bio = ''
if bio_tg != None and bio_tg.div != None:
bio = bio_tg.div.text
public_tg = soup.find_all('span', class_='text-bold color-text-primary')
followers = 0
followings = 0
stars = 0
if len(public_tg) != 0:
followers = public_tg[0].text
followings = public_tg[1].text
stars = public_tg[2].text
org_tg = soup.find('span', class_='p-org')
organization = ''
if org_tg != None:
organization = org_tg.text
loc_tg = soup.find('span', class_='p-label')
location = ''
if loc_tg != None:
location = loc_tg.text
web_tg = soup.find('li', class_='vcard-detail pt-1 css-truncate css-truncate-target')
website = ''
if web_tg != None:
website = web_tg.a['href']
repo_proj_tg = soup.find('div', class_='UnderlineNav width-full box-shadow-none').find_all('span')
repositories = repo_proj_tg[0].text
projects = repo_proj_tg[1].text
return {
"name": name,
"username": user,
"bio": bio,
"followers": followers,
"followings": followings,
"stars": stars,
"organization": organization,
"location": location,
"website": website,
"repositories": repositories,
"projects": projects
}
else:
return "invalid username "+username
except requests.exceptions.RequestException:
print("Connection Error")
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((212, 229), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (224, 229), False, 'import requests\n'), ((241, 284), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (254, 284), False, 'from bs4 import BeautifulSoup\n'), ((2519, 2536), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2531, 2536), False, 'import requests\n'), ((2658, 2701), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (2671, 2701), False, 'from bs4 import BeautifulSoup\n')]
|
"""
Class for the products produced by Acme Corportation.
"""
from random import randint
class Product:
"""Product class parameters:
name(=None)
price(=10)
weight(=20)
flammability(=0.5)
Methods:
stealability (Determines likelihood of product theft.)
explode (Likelihood the product will explode)
"""
def __init__(self, name=None, price=10, weight=20, flammability=0.5,
identifier=randint(1000000, 10000000)):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = identifier
def stealability(self):
"""Determines likelihood of product theft."""
self.stealable = self.price/self.weight
# print (self.price, self.weight, self.stealable)
if self.stealable < 0.5:
print("Not so stealable...")
elif self.stealable >= .5 and self.stealable < 1.0:
return("Kinda stealable.")
else:
return("Very stealable!")
def explode(self):
"""Likelihood the product will explode"""
self.expl = self.weight*self.flammability
# print(self.expl)
if self.expl < 10:
return("...fizzle")
elif self.expl >= 10 or self.expl < 50:
return("...boom!")
else:
return("...BABOOM!!")
class BoxingGlove(Product):
"""A special specific thing which adds punch!"""
weight = 10
def __init__(self, name=None, price=10, weight=10, flammability=0.5, identifier=randint(1000000, 10000000)):
super().__init__(name=name, price=price, weight=weight,
flammability=flammability, identifier=identifier)
def explode(self):
"""Gloves don't explode"""
return("...it's a glove.")
def punch(self, weight=10):
"""How hard is that punch?"""
if self.weight < 5:
return("That tickles.")
elif self.weight >= 5 or self.weight < 15:
return("Hey that hurt!")
else:
return("OUCH!")
|
[
"random.randint"
] |
[((441, 467), 'random.randint', 'randint', (['(1000000)', '(10000000)'], {}), '(1000000, 10000000)\n', (448, 467), False, 'from random import randint\n'), ((1564, 1590), 'random.randint', 'randint', (['(1000000)', '(10000000)'], {}), '(1000000, 10000000)\n', (1571, 1590), False, 'from random import randint\n')]
|
import random
from locust import User, task, between
from locust_task import MilvusTask
from client import MilvusClient
from milvus import DataType
connection_type = "single"
host = "192.168.1.6"
port = 19530
collection_name = "create_collection_hello"
dim = 128
nb = 50000
m = MilvusClient(host=host, port=port, collection_name=collection_name)
# m.clean_db()
m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=True, other_fields=None)
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
entities = m.generate_entities(vectors)
class FlushTask(User):
wait_time = between(0.001, 0.002)
if connection_type == "single":
client = MilvusTask(m=m)
else:
client = MilvusTask(host=host, port=port, collection_name=collection_name)
@task(1)
def insert(self):
self.client.insert(entities)
# @task(1)
# def create_partition(self):
# tag = 'tag_'.join(random.choice(string.ascii_letters) for _ in range(8))
# self.client.create_partition(tag, collection_name)
|
[
"locust_task.MilvusTask",
"client.MilvusClient",
"random.random",
"locust.between",
"locust.task"
] |
[((279, 346), 'client.MilvusClient', 'MilvusClient', ([], {'host': 'host', 'port': 'port', 'collection_name': 'collection_name'}), '(host=host, port=port, collection_name=collection_name)\n', (291, 346), False, 'from client import MilvusClient\n'), ((603, 624), 'locust.between', 'between', (['(0.001)', '(0.002)'], {}), '(0.001, 0.002)\n', (610, 624), False, 'from locust import User, task, between\n'), ((793, 800), 'locust.task', 'task', (['(1)'], {}), '(1)\n', (797, 800), False, 'from locust import User, task, between\n'), ((465, 480), 'random.random', 'random.random', ([], {}), '()\n', (478, 480), False, 'import random\n'), ((678, 693), 'locust_task.MilvusTask', 'MilvusTask', ([], {'m': 'm'}), '(m=m)\n', (688, 693), False, 'from locust_task import MilvusTask\n'), ((721, 786), 'locust_task.MilvusTask', 'MilvusTask', ([], {'host': 'host', 'port': 'port', 'collection_name': 'collection_name'}), '(host=host, port=port, collection_name=collection_name)\n', (731, 786), False, 'from locust_task import MilvusTask\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a ResNet model on the ImageNet dataset using custom training loops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from official.vision.image_classification import imagenet_preprocessing
from official.vision.image_classification import common
from official.vision.image_classification import resnet_model
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
flags.DEFINE_boolean(name='use_tf_function', default=True,
help='Wrap the train and test step inside a '
'tf.function.')
flags.DEFINE_boolean(name='single_l2_loss_op', default=False,
help='Calculate L2_loss on concatenated weights, '
'instead of using Keras per-layer L2 loss.')
def build_stats(train_result, eval_result, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
train_result: The final loss at training time.
eval_result: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
time_callback: Time tracking callback instance.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_result:
stats['eval_loss'] = eval_result[0]
stats['eval_acc'] = eval_result[1]
stats['train_loss'] = train_result[0]
stats['train_acc'] = train_result[1]
if time_callback:
timestamp_log = time_callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = time_callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
time_callback.batch_size * time_callback.log_steps *
(len(time_callback.timestamp_log) - 1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def get_input_dataset(flags_obj, strategy):
"""Returns the test and train input datasets."""
dtype = flags_core.get_tf_dtype(flags_obj)
use_dataset_fn = isinstance(strategy, tf.distribute.experimental.TPUStrategy)
batch_size = flags_obj.batch_size
if use_dataset_fn:
if batch_size % strategy.num_replicas_in_sync != 0:
raise ValueError(
'Batch size must be divisible by number of replicas : {}'.format(
strategy.num_replicas_in_sync))
# As auto rebatching is not supported in
# `experimental_distribute_datasets_from_function()` API, which is
# required when cloning dataset to multiple workers in eager mode,
# we use per-replica batch size.
batch_size = int(batch_size / strategy.num_replicas_in_sync)
if flags_obj.use_synthetic_data:
input_fn = common.get_synth_input_fn(
height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
num_channels=imagenet_preprocessing.NUM_CHANNELS,
num_classes=imagenet_preprocessing.NUM_CLASSES,
dtype=dtype,
drop_remainder=True)
else:
input_fn = imagenet_preprocessing.input_fn
def _train_dataset_fn(ctx=None):
train_ds = input_fn(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=batch_size,
parse_record_fn=imagenet_preprocessing.parse_record,
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
dtype=dtype,
input_context=ctx,
drop_remainder=True)
return train_ds
if strategy:
if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
train_ds = strategy.experimental_distribute_datasets_from_function(_train_dataset_fn)
else:
train_ds = strategy.experimental_distribute_dataset(_train_dataset_fn())
else:
train_ds = _train_dataset_fn()
test_ds = None
if not flags_obj.skip_eval:
def _test_data_fn(ctx=None):
test_ds = input_fn(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=batch_size,
parse_record_fn=imagenet_preprocessing.parse_record,
dtype=dtype,
input_context=ctx)
return test_ds
if strategy:
if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
test_ds = strategy.experimental_distribute_datasets_from_function(
_test_data_fn)
else:
test_ds = strategy.experimental_distribute_dataset(_test_data_fn())
else:
test_ds = _test_data_fn()
return train_ds, test_ds
def get_num_train_iterations(flags_obj):
"""Returns the number of training steps, train and test epochs."""
train_steps = (
imagenet_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size)
train_epochs = flags_obj.train_epochs
if flags_obj.train_steps:
train_steps = min(flags_obj.train_steps, train_steps)
train_epochs = 1
eval_steps = (
imagenet_preprocessing.NUM_IMAGES['validation'] // flags_obj.batch_size)
return train_steps, train_epochs, eval_steps
def _steps_to_run(steps_in_current_epoch, steps_per_epoch, steps_per_loop):
"""Calculates steps to run on device."""
if steps_per_loop <= 0:
raise ValueError('steps_per_loop should be positive integer.')
if steps_per_loop == 1:
return steps_per_loop
return min(steps_per_loop, steps_per_epoch - steps_in_current_epoch)
def run(flags_obj):
"""Run ResNet ImageNet training and eval loop using custom training loops.
Args:
flags_obj: An object containing parsed flag values.
Raises:
ValueError: If fp16 is passed as it is not currently supported.
Returns:
Dictionary of training and eval stats.
"""
keras_utils.set_session_config(
enable_eager=flags_obj.enable_eager,
enable_xla=flags_obj.enable_xla)
dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == tf.float16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
'mixed_float16')
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
elif dtype == tf.bfloat16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
'mixed_bfloat16')
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
# This only affects GPU.
common.set_cudnn_batchnorm_mode()
# TODO(anj-s): Set data_format without using Keras.
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
tf.keras.backend.set_image_data_format(data_format)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs,
tpu_address=flags_obj.tpu)
train_ds, test_ds = get_input_dataset(flags_obj, strategy)
per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations(
flags_obj)
steps_per_loop = min(flags_obj.steps_per_loop, per_epoch_steps)
logging.info("Training %d epochs, each epoch has %d steps, "
"total steps: %d; Eval %d steps",
train_epochs, per_epoch_steps, train_epochs * per_epoch_steps,
eval_steps)
time_callback = keras_utils.TimeHistory(flags_obj.batch_size,
flags_obj.log_steps)
with distribution_utils.get_strategy_scope(strategy):
resnet_model.change_keras_layer(flags_obj.use_tf_keras_layers)
model = resnet_model.resnet50(
num_classes=imagenet_preprocessing.NUM_CLASSES,
batch_size=flags_obj.batch_size,
use_l2_regularizer=not flags_obj.single_l2_loss_op)
lr_schedule = common.PiecewiseConstantDecayWithWarmup(
batch_size=flags_obj.batch_size,
epoch_size=imagenet_preprocessing.NUM_IMAGES['train'],
warmup_epochs=common.LR_SCHEDULE[0][1],
boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),
multipliers=list(p[0] for p in common.LR_SCHEDULE),
compute_lr_on_cpu=True)
optimizer = common.get_optimizer(lr_schedule)
if dtype == tf.float16:
loss_scale = flags_core.get_loss_scale(flags_obj, default_for_fp16=128)
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer, loss_scale)
elif flags_obj.fp16_implementation == 'graph_rewrite':
# `dtype` is still float32 in this case. We built the graph in float32 and
# let the graph rewrite change parts of it float16.
if not flags_obj.use_tf_function:
raise ValueError('--fp16_implementation=graph_rewrite requires '
'--use_tf_function to be true')
loss_scale = flags_core.get_loss_scale(flags_obj, default_for_fp16=128)
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer, loss_scale)
current_step = 0
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
logging.info("Load checkpoint %s", latest_checkpoint)
current_step = optimizer.iterations.numpy()
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'test_accuracy', dtype=tf.float32)
trainable_variables = model.trainable_variables
def step_fn(inputs):
"""Per-Replica StepFn."""
images, labels = inputs
with tf.GradientTape() as tape:
logits = model(images, training=True)
prediction_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits)
loss = tf.reduce_sum(prediction_loss) * (1.0/ flags_obj.batch_size)
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if flags_obj.single_l2_loss_op:
l2_loss = resnet_model.L2_WEIGHT_DECAY * 2 * tf.add_n([
tf.nn.l2_loss(v)
for v in trainable_variables
if 'bn' not in v.name
])
loss += (l2_loss / num_replicas)
else:
loss += (tf.reduce_sum(model.losses) / num_replicas)
# Scale the loss
if flags_obj.dtype == "fp16":
loss = optimizer.get_scaled_loss(loss)
grads = tape.gradient(loss, trainable_variables)
# Unscale the grads
if flags_obj.dtype == "fp16":
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, trainable_variables))
train_loss.update_state(loss)
training_accuracy.update_state(labels, logits)
@tf.function
def train_steps(iterator, steps):
"""Performs distributed training steps in a loop."""
for _ in tf.range(steps):
strategy.experimental_run_v2(step_fn, args=(next(iterator),))
def train_single_step(iterator):
if strategy:
strategy.experimental_run_v2(step_fn, args=(next(iterator),))
else:
return step_fn(next(iterator))
def test_step(iterator):
"""Evaluation StepFn."""
def step_fn(inputs):
images, labels = inputs
logits = model(images, training=False)
loss = tf.keras.losses.sparse_categorical_crossentropy(labels,
logits)
loss = tf.reduce_sum(loss) * (1.0/ flags_obj.batch_size)
test_loss.update_state(loss)
test_accuracy.update_state(labels, logits)
if strategy:
strategy.experimental_run_v2(step_fn, args=(next(iterator),))
else:
step_fn(next(iterator))
if flags_obj.use_tf_function:
train_single_step = tf.function(train_single_step)
test_step = tf.function(test_step)
if flags_obj.enable_tensorboard:
summary_writer = tf.summary.create_file_writer(flags_obj.model_dir)
else:
summary_writer = None
train_iter = iter(train_ds)
time_callback.on_train_begin()
for epoch in range(current_step // per_epoch_steps, train_epochs):
train_loss.reset_states()
training_accuracy.reset_states()
steps_in_current_epoch = 0
while steps_in_current_epoch < per_epoch_steps:
time_callback.on_batch_begin(
steps_in_current_epoch+epoch*per_epoch_steps)
steps = _steps_to_run(steps_in_current_epoch, per_epoch_steps,
steps_per_loop)
if steps == 1:
train_single_step(train_iter)
else:
# Converts steps to a Tensor to avoid tf.function retracing.
train_steps(train_iter, tf.convert_to_tensor(steps, dtype=tf.int32))
time_callback.on_batch_end(
steps_in_current_epoch+epoch*per_epoch_steps)
steps_in_current_epoch += steps
logging.info('Training loss: %s, accuracy: %s at epoch %d',
train_loss.result().numpy(),
training_accuracy.result().numpy(),
epoch + 1)
if (not flags_obj.skip_eval and
(epoch + 1) % flags_obj.epochs_between_evals == 0):
test_loss.reset_states()
test_accuracy.reset_states()
test_iter = iter(test_ds)
for _ in range(eval_steps):
test_step(test_iter)
logging.info('Test loss: %s, accuracy: %s%% at epoch: %d',
test_loss.result().numpy(),
test_accuracy.result().numpy(),
epoch + 1)
if flags_obj.enable_checkpoint_and_export:
checkpoint_name = checkpoint.save(
os.path.join(flags_obj.model_dir,
'model.ckpt-{}'.format(epoch + 1)))
logging.info('Saved checkpoint to %s', checkpoint_name)
if summary_writer:
current_steps = steps_in_current_epoch + (epoch * per_epoch_steps)
with summary_writer.as_default():
tf.summary.scalar('train_loss', train_loss.result(), current_steps)
tf.summary.scalar(
'train_accuracy', training_accuracy.result(), current_steps)
tf.summary.scalar('eval_loss', test_loss.result(), current_steps)
tf.summary.scalar(
'eval_accuracy', test_accuracy.result(), current_steps)
time_callback.on_train_end()
if summary_writer:
summary_writer.close()
eval_result = None
train_result = None
if not flags_obj.skip_eval:
eval_result = [test_loss.result().numpy(),
test_accuracy.result().numpy()]
train_result = [train_loss.result().numpy(),
training_accuracy.result().numpy()]
stats = build_stats(train_result, eval_result, time_callback)
return stats
def main(_):
model_helpers.apply_clean(flags.FLAGS)
with logger.benchmark_context(flags.FLAGS):
stats = run(flags.FLAGS)
logging.info('Run stats:\n%s', stats)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
common.define_keras_flags()
app.run(main)
|
[
"tensorflow.reduce_sum",
"tensorflow.distribute.get_strategy",
"official.utils.misc.keras_utils.set_session_config",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer",
"tensorflow.train.experimental.enable_mixed_precision_graph_rewrite",
"official.vision.image_classification.resnet_model.resnet50",
"absl.logging.info",
"tensorflow.train.latest_checkpoint",
"official.vision.image_classification.common.define_keras_flags",
"absl.flags.DEFINE_boolean",
"absl.logging.set_verbosity",
"official.utils.flags.core.get_loss_scale",
"tensorflow.compat.v2.keras.mixed_precision.experimental.Policy",
"tensorflow.train.Checkpoint",
"tensorflow.test.is_built_with_cuda",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"official.utils.flags.core.get_tf_dtype",
"official.vision.image_classification.resnet_model.change_keras_layer",
"tensorflow.nn.l2_loss",
"tensorflow.range",
"official.utils.misc.distribution_utils.get_distribution_strategy",
"official.utils.misc.model_helpers.apply_clean",
"official.utils.misc.keras_utils.TimeHistory",
"official.vision.image_classification.common.get_optimizer",
"official.vision.image_classification.common.get_synth_input_fn",
"tensorflow.keras.backend.set_image_data_format",
"tensorflow.convert_to_tensor",
"official.utils.misc.distribution_utils.get_strategy_scope",
"official.vision.image_classification.common.set_cudnn_batchnorm_mode",
"absl.app.run",
"official.utils.logs.logger.benchmark_context",
"tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy",
"tensorflow.function",
"tensorflow.summary.create_file_writer",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.GradientTape"
] |
[((1407, 1529), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', ([], {'name': '"""use_tf_function"""', 'default': '(True)', 'help': '"""Wrap the train and test step inside a tf.function."""'}), "(name='use_tf_function', default=True, help=\n 'Wrap the train and test step inside a tf.function.')\n", (1427, 1529), False, 'from absl import flags\n'), ((1570, 1734), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', ([], {'name': '"""single_l2_loss_op"""', 'default': '(False)', 'help': '"""Calculate L2_loss on concatenated weights, instead of using Keras per-layer L2 loss."""'}), "(name='single_l2_loss_op', default=False, help=\n 'Calculate L2_loss on concatenated weights, instead of using Keras per-layer L2 loss.'\n )\n", (1590, 1734), False, 'from absl import flags\n'), ((2918, 2952), 'official.utils.flags.core.get_tf_dtype', 'flags_core.get_tf_dtype', (['flags_obj'], {}), '(flags_obj)\n', (2941, 2952), True, 'from official.utils.flags import core as flags_core\n'), ((6526, 6630), 'official.utils.misc.keras_utils.set_session_config', 'keras_utils.set_session_config', ([], {'enable_eager': 'flags_obj.enable_eager', 'enable_xla': 'flags_obj.enable_xla'}), '(enable_eager=flags_obj.enable_eager,\n enable_xla=flags_obj.enable_xla)\n', (6556, 6630), False, 'from official.utils.misc import keras_utils\n'), ((6651, 6685), 'official.utils.flags.core.get_tf_dtype', 'flags_core.get_tf_dtype', (['flags_obj'], {}), '(flags_obj)\n', (6674, 6685), True, 'from official.utils.flags import core as flags_core\n'), ((7102, 7135), 'official.vision.image_classification.common.set_cudnn_batchnorm_mode', 'common.set_cudnn_batchnorm_mode', ([], {}), '()\n', (7133, 7135), False, 'from official.vision.image_classification import common\n'), ((7366, 7417), 'tensorflow.keras.backend.set_image_data_format', 'tf.keras.backend.set_image_data_format', (['data_format'], {}), '(data_format)\n', (7404, 7417), True, 'import tensorflow as tf\n'), ((7432, 7672), 'official.utils.misc.distribution_utils.get_distribution_strategy', 'distribution_utils.get_distribution_strategy', ([], {'distribution_strategy': 'flags_obj.distribution_strategy', 'num_gpus': 'flags_obj.num_gpus', 'all_reduce_alg': 'flags_obj.all_reduce_alg', 'num_packs': 'flags_obj.num_packs', 'tpu_address': 'flags_obj.tpu'}), '(distribution_strategy=\n flags_obj.distribution_strategy, num_gpus=flags_obj.num_gpus,\n all_reduce_alg=flags_obj.all_reduce_alg, num_packs=flags_obj.num_packs,\n tpu_address=flags_obj.tpu)\n', (7476, 7672), False, 'from official.utils.misc import distribution_utils\n'), ((7910, 8091), 'absl.logging.info', 'logging.info', (['"""Training %d epochs, each epoch has %d steps, total steps: %d; Eval %d steps"""', 'train_epochs', 'per_epoch_steps', '(train_epochs * per_epoch_steps)', 'eval_steps'], {}), "(\n 'Training %d epochs, each epoch has %d steps, total steps: %d; Eval %d steps'\n , train_epochs, per_epoch_steps, train_epochs * per_epoch_steps, eval_steps\n )\n", (7922, 8091), False, 'from absl import logging\n'), ((8144, 8210), 'official.utils.misc.keras_utils.TimeHistory', 'keras_utils.TimeHistory', (['flags_obj.batch_size', 'flags_obj.log_steps'], {}), '(flags_obj.batch_size, flags_obj.log_steps)\n', (8167, 8210), False, 'from official.utils.misc import keras_utils\n'), ((15802, 15840), 'official.utils.misc.model_helpers.apply_clean', 'model_helpers.apply_clean', (['flags.FLAGS'], {}), '(flags.FLAGS)\n', (15827, 15840), False, 'from official.utils.misc import model_helpers\n'), ((15918, 15955), 'absl.logging.info', 'logging.info', (['"""Run stats:\n%s"""', 'stats'], {}), "('Run stats:\\n%s', stats)\n", (15930, 15955), False, 'from absl import logging\n'), ((15987, 16022), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (16008, 16022), False, 'from absl import logging\n'), ((16025, 16052), 'official.vision.image_classification.common.define_keras_flags', 'common.define_keras_flags', ([], {}), '()\n', (16050, 16052), False, 'from official.vision.image_classification import common\n'), ((16055, 16068), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (16062, 16068), False, 'from absl import app\n'), ((3633, 3903), 'official.vision.image_classification.common.get_synth_input_fn', 'common.get_synth_input_fn', ([], {'height': 'imagenet_preprocessing.DEFAULT_IMAGE_SIZE', 'width': 'imagenet_preprocessing.DEFAULT_IMAGE_SIZE', 'num_channels': 'imagenet_preprocessing.NUM_CHANNELS', 'num_classes': 'imagenet_preprocessing.NUM_CLASSES', 'dtype': 'dtype', 'drop_remainder': '(True)'}), '(height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,\n width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE, num_channels=\n imagenet_preprocessing.NUM_CHANNELS, num_classes=imagenet_preprocessing\n .NUM_CLASSES, dtype=dtype, drop_remainder=True)\n', (3658, 3903), False, 'from official.vision.image_classification import common\n'), ((6725, 6796), 'tensorflow.compat.v2.keras.mixed_precision.experimental.Policy', 'tf.compat.v2.keras.mixed_precision.experimental.Policy', (['"""mixed_float16"""'], {}), "('mixed_float16')\n", (6779, 6796), True, 'import tensorflow as tf\n'), ((6810, 6876), 'tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy', 'tf.compat.v2.keras.mixed_precision.experimental.set_policy', (['policy'], {}), '(policy)\n', (6868, 6876), True, 'import tensorflow as tf\n'), ((8261, 8308), 'official.utils.misc.distribution_utils.get_strategy_scope', 'distribution_utils.get_strategy_scope', (['strategy'], {}), '(strategy)\n', (8298, 8308), False, 'from official.utils.misc import distribution_utils\n'), ((8314, 8376), 'official.vision.image_classification.resnet_model.change_keras_layer', 'resnet_model.change_keras_layer', (['flags_obj.use_tf_keras_layers'], {}), '(flags_obj.use_tf_keras_layers)\n', (8345, 8376), False, 'from official.vision.image_classification import resnet_model\n'), ((8389, 8552), 'official.vision.image_classification.resnet_model.resnet50', 'resnet_model.resnet50', ([], {'num_classes': 'imagenet_preprocessing.NUM_CLASSES', 'batch_size': 'flags_obj.batch_size', 'use_l2_regularizer': '(not flags_obj.single_l2_loss_op)'}), '(num_classes=imagenet_preprocessing.NUM_CLASSES,\n batch_size=flags_obj.batch_size, use_l2_regularizer=not flags_obj.\n single_l2_loss_op)\n', (8410, 8552), False, 'from official.vision.image_classification import resnet_model\n'), ((8952, 8985), 'official.vision.image_classification.common.get_optimizer', 'common.get_optimizer', (['lr_schedule'], {}), '(lr_schedule)\n', (8972, 8985), False, 'from official.vision.image_classification import common\n'), ((9798, 9851), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model', 'optimizer': 'optimizer'}), '(model=model, optimizer=optimizer)\n', (9817, 9851), True, 'import tensorflow as tf\n'), ((9876, 9923), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['flags_obj.model_dir'], {}), '(flags_obj.model_dir)\n', (9902, 9923), True, 'import tensorflow as tf\n'), ((10122, 10175), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""train_loss"""'], {'dtype': 'tf.float32'}), "('train_loss', dtype=tf.float32)\n", (10143, 10175), True, 'import tensorflow as tf\n'), ((10200, 10286), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', (['"""training_accuracy"""'], {'dtype': 'tf.float32'}), "('training_accuracy', dtype=tf.\n float32)\n", (10242, 10286), True, 'import tensorflow as tf\n'), ((10307, 10359), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""test_loss"""'], {'dtype': 'tf.float32'}), "('test_loss', dtype=tf.float32)\n", (10328, 10359), True, 'import tensorflow as tf\n'), ((10380, 10457), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', (['"""test_accuracy"""'], {'dtype': 'tf.float32'}), "('test_accuracy', dtype=tf.float32)\n", (10422, 10457), True, 'import tensorflow as tf\n'), ((15848, 15885), 'official.utils.logs.logger.benchmark_context', 'logger.benchmark_context', (['flags.FLAGS'], {}), '(flags.FLAGS)\n', (15872, 15885), False, 'from official.utils.logs import logger\n'), ((6919, 6991), 'tensorflow.compat.v2.keras.mixed_precision.experimental.Policy', 'tf.compat.v2.keras.mixed_precision.experimental.Policy', (['"""mixed_bfloat16"""'], {}), "('mixed_bfloat16')\n", (6973, 6991), True, 'import tensorflow as tf\n'), ((7005, 7071), 'tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy', 'tf.compat.v2.keras.mixed_precision.experimental.set_policy', (['policy'], {}), '(policy)\n', (7063, 7071), True, 'import tensorflow as tf\n'), ((7313, 7341), 'tensorflow.test.is_built_with_cuda', 'tf.test.is_built_with_cuda', ([], {}), '()\n', (7339, 7341), True, 'import tensorflow as tf\n'), ((9034, 9092), 'official.utils.flags.core.get_loss_scale', 'flags_core.get_loss_scale', (['flags_obj'], {'default_for_fp16': '(128)'}), '(flags_obj, default_for_fp16=128)\n', (9059, 9092), True, 'from official.utils.flags import core as flags_core\n'), ((9111, 9190), 'tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer', 'tf.keras.mixed_precision.experimental.LossScaleOptimizer', (['optimizer', 'loss_scale'], {}), '(optimizer, loss_scale)\n', (9167, 9190), True, 'import tensorflow as tf\n'), ((10000, 10053), 'absl.logging.info', 'logging.info', (['"""Load checkpoint %s"""', 'latest_checkpoint'], {}), "('Load checkpoint %s', latest_checkpoint)\n", (10012, 10053), False, 'from absl import logging\n'), ((11869, 11884), 'tensorflow.range', 'tf.range', (['steps'], {}), '(steps)\n', (11877, 11884), True, 'import tensorflow as tf\n'), ((12791, 12821), 'tensorflow.function', 'tf.function', (['train_single_step'], {}), '(train_single_step)\n', (12802, 12821), True, 'import tensorflow as tf\n'), ((12840, 12862), 'tensorflow.function', 'tf.function', (['test_step'], {}), '(test_step)\n', (12851, 12862), True, 'import tensorflow as tf\n'), ((12924, 12974), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['flags_obj.model_dir'], {}), '(flags_obj.model_dir)\n', (12953, 12974), True, 'import tensorflow as tf\n'), ((9589, 9647), 'official.utils.flags.core.get_loss_scale', 'flags_core.get_loss_scale', (['flags_obj'], {'default_for_fp16': '(128)'}), '(flags_obj, default_for_fp16=128)\n', (9614, 9647), True, 'from official.utils.flags import core as flags_core\n'), ((9666, 9751), 'tensorflow.train.experimental.enable_mixed_precision_graph_rewrite', 'tf.train.experimental.enable_mixed_precision_graph_rewrite', (['optimizer', 'loss_scale'], {}), '(optimizer,\n loss_scale)\n', (9724, 9751), True, 'import tensorflow as tf\n'), ((10619, 10636), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10634, 10636), True, 'import tensorflow as tf\n'), ((10719, 10782), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['labels', 'logits'], {}), '(labels, logits)\n', (10766, 10782), True, 'import tensorflow as tf\n'), ((12316, 12379), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['labels', 'logits'], {}), '(labels, logits)\n', (12363, 12379), True, 'import tensorflow as tf\n'), ((14768, 14823), 'absl.logging.info', 'logging.info', (['"""Saved checkpoint to %s"""', 'checkpoint_name'], {}), "('Saved checkpoint to %s', checkpoint_name)\n", (14780, 14823), False, 'from absl import logging\n'), ((10811, 10841), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prediction_loss'], {}), '(prediction_loss)\n', (10824, 10841), True, 'import tensorflow as tf\n'), ((10895, 10923), 'tensorflow.distribute.get_strategy', 'tf.distribute.get_strategy', ([], {}), '()\n', (10921, 10923), True, 'import tensorflow as tf\n'), ((12458, 12477), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (12471, 12477), True, 'import tensorflow as tf\n'), ((11252, 11279), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['model.losses'], {}), '(model.losses)\n', (11265, 11279), True, 'import tensorflow as tf\n'), ((13706, 13749), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['steps'], {'dtype': 'tf.int32'}), '(steps, dtype=tf.int32)\n', (13726, 13749), True, 'import tensorflow as tf\n'), ((11066, 11082), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (11079, 11082), True, 'import tensorflow as tf\n')]
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Optional
import mxnet as mx
import numpy as np
import sockeye.constants as C
logger = logging.getLogger(__name__)
def get_initializer(default_init_type: str, default_init_scale: float, default_init_xavier_rand_type: str,
default_init_xavier_factor_type: str, embed_init_type: str, embed_init_sigma: float,
rnn_init_type: str) -> mx.initializer.Initializer:
"""
Returns a mixed MXNet initializer.
:param default_init_type: The default weight initializer type.
:param default_init_scale: The scale used for default weight initialization (only used with uniform initialization).
:param default_init_xavier_rand_type: Xavier random number generator type.
:param default_init_xavier_factor_type: Xavier factor type.
:param embed_init_type: Embedding matrix initialization type.
:param embed_init_sigma: Sigma for normal initialization of embedding matrix.
:param rnn_init_type: Initialization type for RNN h2h matrices.
:return: Mixed initializer.
"""
# default initializer
if default_init_type == C.INIT_XAVIER:
default_init = [(C.DEFAULT_INIT_PATTERN,
mx.init.Xavier(rnd_type=default_init_xavier_rand_type,
factor_type=default_init_xavier_factor_type,
magnitude=default_init_scale))]
elif default_init_type == C.INIT_UNIFORM:
default_init = [(C.DEFAULT_INIT_PATTERN, mx.init.Uniform(scale=default_init_scale))]
else:
raise ValueError("Unknown default initializer %s." % default_init_type)
# embedding initializer
if embed_init_type == C.EMBED_INIT_NORMAL:
embed_init = [(C.EMBED_INIT_PATTERN, mx.init.Normal(sigma=embed_init_sigma))]
elif embed_init_type == C.EMBED_INIT_DEFAULT:
embed_init = []
else:
raise ValueError('Unknown embedding initializer: %s' % embed_init_type)
# rnn initializer
if rnn_init_type == C.RNN_INIT_ORTHOGONAL:
rnn_init = [(C.RNN_INIT_PATTERN, mx.initializer.Orthogonal())]
elif rnn_init_type == C.RNN_INIT_ORTHOGONAL_STACKED:
rnn_init = [(C.RNN_INIT_PATTERN, StackedOrthogonalInit(scale=1.0, rand_type="eye"))]
elif rnn_init_type == C.RNN_INIT_DEFAULT:
rnn_init = []
else:
raise ValueError('Unknown RNN initializer: %s' % rnn_init_type)
params_init_pairs = embed_init + rnn_init + default_init
return mx.initializer.Mixed(*zip(*params_init_pairs))
@mx.init.register
class StackedOrthogonalInit(mx.initializer.Initializer):
"""
Initializes weight as Orthogonal matrix. Here we assume that the weight consists of stacked square matrices of
the same size.
For example one could have 3 (2,2) matrices resulting in a (6,2) matrix. This situation arises in RNNs when one
wants to perform multiple h2h transformations in a single matrix multiplication.
Reference:
Exact solutions to the nonlinear dynamics of learning in deep linear neural networks
arXiv preprint arXiv:1312.6120 (2013).
:param scale: Scaling factor of weight.
:param rand_type: use "uniform" or "normal" random number to initialize weight.
"eye" simply sets the matrix to an identity matrix.
"""
def __init__(self, scale=1.414, rand_type="uniform"):
super().__init__()
self.scale = scale
self.rand_type = rand_type
def _init_weight(self, sym_name, arr):
assert len(arr.shape) == 2, "Only 2d weight matrices supported."
base_dim = arr.shape[1]
stacked_dim = arr.shape[0] # base_dim * num_sub_matrices
assert stacked_dim % base_dim == 0, \
"Dim1 must be a multiple of dim2 (as weight = stacked square matrices)."
num_sub_matrices = stacked_dim // base_dim
logger.info("Initializing weight %s (shape=%s, num_sub_matrices=%d) with an orthogonal weight matrix.",
sym_name, arr.shape, num_sub_matrices)
for mat_idx in range(0, num_sub_matrices):
if self.rand_type == "uniform":
tmp = np.random.uniform(-1.0, 1.0, (base_dim, base_dim))
_, __, q = np.linalg.svd(tmp)
elif self.rand_type == "normal":
tmp = np.random.normal(0.0, 1.0, (base_dim, base_dim))
_, __, q = np.linalg.svd(tmp)
elif self.rand_type == "eye":
q = np.eye(base_dim)
else:
raise ValueError("unknown rand_type %s" % self.rand_type)
q = self.scale * q
arr[mat_idx * base_dim:mat_idx * base_dim + base_dim] = q
|
[
"mxnet.init.Uniform",
"numpy.random.uniform",
"mxnet.initializer.Orthogonal",
"numpy.linalg.svd",
"mxnet.init.Xavier",
"numpy.random.normal",
"numpy.eye",
"mxnet.init.Normal",
"logging.getLogger"
] |
[((689, 716), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (706, 716), False, 'import logging\n'), ((1780, 1914), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'rnd_type': 'default_init_xavier_rand_type', 'factor_type': 'default_init_xavier_factor_type', 'magnitude': 'default_init_scale'}), '(rnd_type=default_init_xavier_rand_type, factor_type=\n default_init_xavier_factor_type, magnitude=default_init_scale)\n', (1794, 1914), True, 'import mxnet as mx\n'), ((2342, 2380), 'mxnet.init.Normal', 'mx.init.Normal', ([], {'sigma': 'embed_init_sigma'}), '(sigma=embed_init_sigma)\n', (2356, 2380), True, 'import mxnet as mx\n'), ((2658, 2685), 'mxnet.initializer.Orthogonal', 'mx.initializer.Orthogonal', ([], {}), '()\n', (2683, 2685), True, 'import mxnet as mx\n'), ((4712, 4762), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(base_dim, base_dim)'], {}), '(-1.0, 1.0, (base_dim, base_dim))\n', (4729, 4762), True, 'import numpy as np\n'), ((4790, 4808), 'numpy.linalg.svd', 'np.linalg.svd', (['tmp'], {}), '(tmp)\n', (4803, 4808), True, 'import numpy as np\n'), ((2087, 2128), 'mxnet.init.Uniform', 'mx.init.Uniform', ([], {'scale': 'default_init_scale'}), '(scale=default_init_scale)\n', (2102, 2128), True, 'import mxnet as mx\n'), ((4876, 4924), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(base_dim, base_dim)'], {}), '(0.0, 1.0, (base_dim, base_dim))\n', (4892, 4924), True, 'import numpy as np\n'), ((4952, 4970), 'numpy.linalg.svd', 'np.linalg.svd', (['tmp'], {}), '(tmp)\n', (4965, 4970), True, 'import numpy as np\n'), ((5033, 5049), 'numpy.eye', 'np.eye', (['base_dim'], {}), '(base_dim)\n', (5039, 5049), True, 'import numpy as np\n')]
|
from PIL import Image
image = Image.open('monro.jpg')
if image.mode != "RGB":
image = image.convert("RGB")
red, green, blue = image.split()
offset_pix = 50
coordinates_left_offset = (offset_pix, 0, image.width, image.height)
coordinates_bothsides_offset = (offset_pix*0.5, 0, image.width-offset_pix*0.5, image.height)
coordinates_right_offset = (0, 0, image.width-offset_pix, image.height)
cropped_left_red = red.crop(coordinates_left_offset)
cropped_both_red = red.crop(coordinates_bothsides_offset)
cropped_right_red = red.crop(coordinates_right_offset)
offset_red = Image.blend(cropped_right_red, cropped_both_red, 0.28)
cropped_right_blue = blue.crop(coordinates_right_offset)
cropped_left_blue = blue.crop(coordinates_left_offset)
cropped_both_blue = blue.crop(coordinates_bothsides_offset)
offset_blue = Image.blend(cropped_left_blue, cropped_both_blue, 0.72)
cropped_both_green = green.crop(coordinates_bothsides_offset)
new_image = Image.merge("RGB", (offset_red, offset_blue, cropped_both_green))
new_image.save('new_image.jpg')
new_image.thumbnail((80, 80), reducing_gap=3.0)
new_image.save('avatar.jpg')
|
[
"PIL.Image.blend",
"PIL.Image.merge",
"PIL.Image.open"
] |
[((31, 54), 'PIL.Image.open', 'Image.open', (['"""monro.jpg"""'], {}), "('monro.jpg')\n", (41, 54), False, 'from PIL import Image\n'), ((577, 631), 'PIL.Image.blend', 'Image.blend', (['cropped_right_red', 'cropped_both_red', '(0.28)'], {}), '(cropped_right_red, cropped_both_red, 0.28)\n', (588, 631), False, 'from PIL import Image\n'), ((819, 874), 'PIL.Image.blend', 'Image.blend', (['cropped_left_blue', 'cropped_both_blue', '(0.72)'], {}), '(cropped_left_blue, cropped_both_blue, 0.72)\n', (830, 874), False, 'from PIL import Image\n'), ((951, 1016), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '(offset_red, offset_blue, cropped_both_green)'], {}), "('RGB', (offset_red, offset_blue, cropped_both_green))\n", (962, 1016), False, 'from PIL import Image\n')]
|
from colorama import Fore, Back
from data_storing.assets.common import Timespan, MeasureUnit
import fundamentals.miscellaneous as fund_utils
from utilities.exchange_rates import Exchange
from utilities.common_methods import Methods as methods
from utilities import log
from utilities.common_methods import getDebugInfo
from utilities.globals import websites
from datetime import datetime, date
from data_storing.assets.database_manager import DatabaseManager as db_mngr
from data_storing.assets.tables import Equity
def get_last_financial_statement(financial_statements):
try:
last_financial_statement = None
last_year = 0
for financial_statement in financial_statements:
if financial_statement.period_length == Timespan.annual and \
financial_statement.period_ending.year > last_year:
last_financial_statement = financial_statement
last_year = financial_statement.period_ending.year
return last_financial_statement
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
def get_annual_financial_statement(financial_statements, year):
try:
financial_statement_of_interest = None
for financial_statement in financial_statements:
if financial_statement.period_ending.year == year and \
financial_statement.period_length == Timespan.annual:
financial_statement_of_interest = financial_statement
break
return financial_statement_of_interest
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
def get_measure_unit_multiplier(measure_unit):
try:
multiplier = None
if measure_unit == MeasureUnit.billion:
multiplier = 1000000000
elif measure_unit == MeasureUnit.million:
multiplier = 1000000
elif measure_unit == MeasureUnit.thousand:
multiplier = 1000
return multiplier
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
def get_exchange_rate(input_var, equity=None):
"""
The method returns the exchange rate of the equity's country with respect to the USD
@param input_var it could be equity to get the exchange rate from of it could be the currency itself.
@param equity passed in case in the input_var is empty, we use the overview currency
@return the exchange rate which was found.
"""
try:
# if the input_var is the equity get it from the stock exchange country
if isinstance(input_var, Equity):
currency = Exchange.country.get(input_var.country)
# else the input_var is a currency, then get it, but if it is empty, then again get it from the stock exchange.
elif isinstance(input_var, str):
currency = input_var
if not currency and equity is not None:
currency = methods.validate(equity.overview.currency)
else:
raise Exception("Not correct input to the method get_exchange_rate")
if currency is None:
return None
exchange_rate = 1
if currency != 'USD':
exchange_rate = Exchange.get_rate(currency)
return exchange_rate
except Exception as e:
str_input_var = str(input_var)
info = str(equity)
log.error(f"There is a problem in the code!: input_var = {str_input_var} and {info} {e}\n{getDebugInfo()}")
return None
def is_equity_undesirable(equity):
"""
The method return true, if the equity is in the list of undesirable type
@param equity the equity to investigate.
@return true if the equity is of the undesirable type
"""
try:
# Do not include financial sector
#
sector = equity.sector.lower()
industry = equity.industry.lower()
equity_type = equity.equity_type.lower()
if sector in fund_utils.gv.sector_to_avoid or \
industry in fund_utils.gv.industry_to_avoid or \
equity_type in fund_utils.gv.equity_type_to_avoid:
return True
else:
return False
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
def get_dates_for_period_in_days(num_days, year, month):
"""
Given the number of days, the year and the month it returns the dates in between
@param num_days the number of days for the period
@param year the year of interest
@param month the month when to start taking the date.
@return the range of period, starting date, and ending dates
"""
try:
dates = {}
if not year:
dates['end_date'] = datetime.now().date()
else:
dates['end_date'] = date(year + 1, month - 1, 1) # self.year, self.month
dates['start_date'] = methods.backward_days(dates['end_date'], num_days)
return dates
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
def get_testing_year_date_range(year, month):
"""
Given the equity in input it returns the range of dates for the investing year.
@param equity the equity we are interested in the dates
@return the range of period, starting date, and ending dates
"""
try:
dates = {}
if not year:
dates['end_date'] = datetime.now().date()
dates['start_date'] = methods.backward_days(dates['end_date'], 365)
else:
dates['start_date'] = date(year + 1, month, 1)
dates['end_date'] = methods.forward_days(dates['start_date'], 364 + 0) # 28
#dates['start_date_str'] = dates['start_date'].strftime("%m/%d/%Y")
#dates['end_date_str'] = dates['end_date'].strftime("%m/%d/%Y")
return dates
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
def get_next_month_date_given_date(input_date):
"""
The method takes in input a date and it returns the date of the following month.
@param input_date the date in input to modify
@return the input date of the following month
"""
next_month_date = None
try:
next_month_date = input_date.replace(month=input_date.month + 1)
except ValueError:
if input_date.month == 12:
next_month_date = input_date.replace(year=input_date.year + 1, month=1)
else:
# next month is too short to have "same date"
# pick your own heuristic, or re-raise the exception:
raise Exception("Something wrong with the date to calculate the following month")
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
finally:
return next_month_date
def get_color_and_keyword_gain_loss(delta_capital):
try:
if delta_capital > 0:
color_back = Back.GREEN
color_fore = Fore.BLACK
keyword = "gained"
elif delta_capital < 0:
color_back = Back.RED
color_fore = Fore.WHITE
keyword = "lost"
else: # The same
color_back = ""
color_fore = ""
keyword = "stable"
return color_fore, color_back, keyword
except Exception as e:
log.error(f"There is a problem in the code!: {e}\n{getDebugInfo()}")
######################################
# Code for getting the equity prices #
######################################
def open_browser_driver():
"""
The method opens the driver of the browser to be ready to scrape date from the website
@return the scraping object with the driver of the browser in it.
"""
try:
# scrape the prices only if we do not have them.
from website_scraping.investing_dot_com.scraping_prices import ScrapingPrices
scraping_prices = ScrapingPrices()
scraping_prices.instantiate_driver(invisible=False)
scraping_prices.add_cookies(websites.investing_dot_com)
return scraping_prices
except Exception as e:
log.error(f"There is a problem opening the browser driver: {e}\n{getDebugInfo()}")
def scrape_monthly_prices(scraping_prices, equity):
"""
The method access the investing website of the equity, to extract monthly recent prices
@param scraping_prices the scraping object with the driver of the browser in it.
@param equity the equity to extract the prices
@return Nothing
"""
try:
scraping_prices.retrieve_monthly_company_historical_data(equity, starting_date="01/01/2014")
except Exception as e:
log.error(f"There is a problem extracting the equity prices: {e}\n{getDebugInfo()}")
def scrape_prices(decile_dictionary):
"""
Method used to scrape the prices from the website, it checks if the proces are available, it not it scrape them.
@param decile_dictionary the dictionary of the equity to get the prices.
@return Nothing
"""
try:
# needed to scrape the missing equity prices.
scraping_prices = open_browser_driver()
counter = 0
for equity_id, equity_score in decile_dictionary.items():
counter += 1
print(f"counter = {counter}")
equity = db_mngr.query_equity_by_id(equity_id=equity_id)
if equity.prices:
from datetime import date
# oldest = min(equity.prices, key=lambda price: price.day)
newest_date = max(equity.prices, key=lambda price: price.day)
current_month = date(date.today().year, date.today().month, 1)
if current_month > newest_date.day:
# newest_date = newest.day.strftime("1/%-m/%Y")
# today_date = date.now().strftime("1/%-m/%Y")
scrape_monthly_prices(scraping_prices, equity)
else:
scrape_monthly_prices(scraping_prices, equity)
except Exception as e:
log.error(f"There is a problem extracting the prices: {e}\n{getDebugInfo()}")
#######
# END #
#######
|
[
"website_scraping.investing_dot_com.scraping_prices.ScrapingPrices",
"utilities.common_methods.getDebugInfo",
"datetime.date",
"data_storing.assets.database_manager.DatabaseManager.query_equity_by_id",
"utilities.common_methods.Methods.validate",
"utilities.exchange_rates.Exchange.country.get",
"datetime.date.today",
"utilities.exchange_rates.Exchange.get_rate",
"utilities.common_methods.Methods.forward_days",
"utilities.common_methods.Methods.backward_days",
"datetime.datetime.now"
] |
[((4965, 5015), 'utilities.common_methods.Methods.backward_days', 'methods.backward_days', (["dates['end_date']", 'num_days'], {}), "(dates['end_date'], num_days)\n", (4986, 5015), True, 'from utilities.common_methods import Methods as methods\n'), ((8010, 8026), 'website_scraping.investing_dot_com.scraping_prices.ScrapingPrices', 'ScrapingPrices', ([], {}), '()\n', (8024, 8026), False, 'from website_scraping.investing_dot_com.scraping_prices import ScrapingPrices\n'), ((2701, 2740), 'utilities.exchange_rates.Exchange.country.get', 'Exchange.country.get', (['input_var.country'], {}), '(input_var.country)\n', (2721, 2740), False, 'from utilities.exchange_rates import Exchange\n'), ((3291, 3318), 'utilities.exchange_rates.Exchange.get_rate', 'Exchange.get_rate', (['currency'], {}), '(currency)\n', (3308, 3318), False, 'from utilities.exchange_rates import Exchange\n'), ((4881, 4909), 'datetime.date', 'date', (['(year + 1)', '(month - 1)', '(1)'], {}), '(year + 1, month - 1, 1)\n', (4885, 4909), False, 'from datetime import date\n'), ((5551, 5596), 'utilities.common_methods.Methods.backward_days', 'methods.backward_days', (["dates['end_date']", '(365)'], {}), "(dates['end_date'], 365)\n", (5572, 5596), True, 'from utilities.common_methods import Methods as methods\n'), ((5645, 5669), 'datetime.date', 'date', (['(year + 1)', 'month', '(1)'], {}), '(year + 1, month, 1)\n', (5649, 5669), False, 'from datetime import date\n'), ((5702, 5752), 'utilities.common_methods.Methods.forward_days', 'methods.forward_days', (["dates['start_date']", '(364 + 0)'], {}), "(dates['start_date'], 364 + 0)\n", (5722, 5752), True, 'from utilities.common_methods import Methods as methods\n'), ((9407, 9454), 'data_storing.assets.database_manager.DatabaseManager.query_equity_by_id', 'db_mngr.query_equity_by_id', ([], {'equity_id': 'equity_id'}), '(equity_id=equity_id)\n', (9433, 9454), True, 'from data_storing.assets.database_manager import DatabaseManager as db_mngr\n'), ((3014, 3056), 'utilities.common_methods.Methods.validate', 'methods.validate', (['equity.overview.currency'], {}), '(equity.overview.currency)\n', (3030, 3056), True, 'from utilities.common_methods import Methods as methods\n'), ((4813, 4827), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4825, 4827), False, 'from datetime import datetime, date\n'), ((5495, 5509), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5507, 5509), False, 'from datetime import datetime, date\n'), ((1105, 1119), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (1117, 1119), False, 'from utilities.common_methods import getDebugInfo\n'), ((1670, 1684), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (1682, 1684), False, 'from utilities.common_methods import getDebugInfo\n'), ((2132, 2146), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (2144, 2146), False, 'from utilities.common_methods import getDebugInfo\n'), ((3539, 3553), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (3551, 3553), False, 'from utilities.common_methods import getDebugInfo\n'), ((4340, 4354), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (4352, 4354), False, 'from utilities.common_methods import getDebugInfo\n'), ((5123, 5137), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (5135, 5137), False, 'from utilities.common_methods import getDebugInfo\n'), ((6015, 6029), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (6027, 6029), False, 'from utilities.common_methods import getDebugInfo\n'), ((6854, 6868), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (6866, 6868), False, 'from utilities.common_methods import getDebugInfo\n'), ((7490, 7504), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (7502, 7504), False, 'from utilities.common_methods import getDebugInfo\n'), ((8282, 8296), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (8294, 8296), False, 'from utilities.common_methods import getDebugInfo\n'), ((8831, 8845), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (8843, 8845), False, 'from utilities.common_methods import getDebugInfo\n'), ((9719, 9731), 'datetime.date.today', 'date.today', ([], {}), '()\n', (9729, 9731), False, 'from datetime import date\n'), ((9738, 9750), 'datetime.date.today', 'date.today', ([], {}), '()\n', (9748, 9750), False, 'from datetime import date\n'), ((10192, 10206), 'utilities.common_methods.getDebugInfo', 'getDebugInfo', ([], {}), '()\n', (10204, 10206), False, 'from utilities.common_methods import getDebugInfo\n')]
|
import uuid
import cherrypy
from sqlalchemy.orm import Query
from deli_counter.http.mounts.root.routes.v1.auth.z.validation_models.roles import ResponseRole, RequestCreateRole, \
ParamsRole, ParamsListRole, RoleType
from ingredients_db.models.authz import AuthZRole
from ingredients_http.request_methods import RequestMethods
from ingredients_http.route import Route
from ingredients_http.router import Router
# TODO: add ability to add policies to a role
# Add ability to list policies in a role
# These roles cannot be added, deleted or modified
protected_roles = [
"admin",
"viewer",
"default_member",
"default_service_account"
]
class AuthZRoleRouter(Router):
def __init__(self):
super().__init__('roles')
@Route(methods=[RequestMethods.POST])
@cherrypy.tools.model_in(cls=RequestCreateRole)
@cherrypy.tools.model_out(cls=ResponseRole)
def create(self):
request: RequestCreateRole = cherrypy.request.model
project = None
if request.type == RoleType.PROJECT:
self.mount.validate_project_scope()
self.mount.enforce_policy("roles:create:project")
project = cherrypy.request.project
else:
self.mount.enforce_policy("roles:create:global")
with cherrypy.request.db_session() as session:
role = session.query(AuthZRole).filter(AuthZRole.name == request.name).first()
if role is not None:
raise cherrypy.HTTPError(409, 'A role with the requested name already exists.')
if request.name.lower() in protected_roles:
raise cherrypy.HTTPError(400, "Cannot create a protected role with the name of " + request.name)
role = AuthZRole()
role.name = request.name
role.description = request.description
if project is not None:
role.project_id = project.id
session.add(role)
session.commit()
session.refresh(role)
return ResponseRole.from_database(role)
@Route('{role_id}')
@cherrypy.tools.model_params(cls=ParamsRole)
@cherrypy.tools.model_out(cls=ResponseRole)
@cherrypy.tools.resource_object(id_param="role_id", cls=AuthZRole)
@cherrypy.tools.enforce_policy(policy_name="roles:get")
def get(self, role_id):
return ResponseRole.from_database(cherrypy.request.resource_object)
@Route()
@cherrypy.tools.model_params(cls=ParamsListRole)
@cherrypy.tools.model_out_pagination(cls=ResponseRole)
@cherrypy.tools.enforce_policy(policy_name="roles:list")
def list(self, type: RoleType, limit: int, marker: uuid.UUID):
if type == RoleType.GLOBAL:
starting_query = Query(AuthZRole).filter(AuthZRole.project_id == None) # noqa: E711
else:
self.mount.validate_project_scope()
starting_query = Query(AuthZRole).filter(AuthZRole.project_id == cherrypy.request.project.id)
return self.paginate(AuthZRole, ResponseRole, limit, marker, starting_query=starting_query)
@Route('{role_id}', methods=[RequestMethods.DELETE])
@cherrypy.tools.model_params(cls=ParamsRole)
@cherrypy.tools.resource_object(id_param="role_id", cls=AuthZRole)
def delete(self, role_id):
cherrypy.response.status = 204
# Fix for https://github.com/cherrypy/cherrypy/issues/1657
del cherrypy.response.headers['Content-Type']
with cherrypy.request.db_session() as session:
role: AuthZRole = session.merge(cherrypy.request.resource_object, load=False)
if role.project_id is not None:
self.mount.validate_project_scope()
self.mount.enforce_policy("roles:delete:project")
if role.project_id != cherrypy.request.project.id:
raise cherrypy.HTTPError(401, "Cannot delete a role in another project.")
else:
self.mount.enforce_policy("roles:delete:global")
if role.name in protected_roles:
raise cherrypy.HTTPError(400, "Cannot delete a protected role with the name of " + role.name)
# TODO: check if role is in use
session.delete(role)
session.commit()
|
[
"cherrypy.tools.model_params",
"deli_counter.http.mounts.root.routes.v1.auth.z.validation_models.roles.ResponseRole.from_database",
"cherrypy.tools.model_out",
"cherrypy.tools.resource_object",
"ingredients_http.route.Route",
"cherrypy.tools.enforce_policy",
"cherrypy.request.db_session",
"ingredients_db.models.authz.AuthZRole",
"sqlalchemy.orm.Query",
"cherrypy.HTTPError",
"cherrypy.tools.model_out_pagination",
"cherrypy.tools.model_in"
] |
[((754, 790), 'ingredients_http.route.Route', 'Route', ([], {'methods': '[RequestMethods.POST]'}), '(methods=[RequestMethods.POST])\n', (759, 790), False, 'from ingredients_http.route import Route\n'), ((796, 842), 'cherrypy.tools.model_in', 'cherrypy.tools.model_in', ([], {'cls': 'RequestCreateRole'}), '(cls=RequestCreateRole)\n', (819, 842), False, 'import cherrypy\n'), ((848, 890), 'cherrypy.tools.model_out', 'cherrypy.tools.model_out', ([], {'cls': 'ResponseRole'}), '(cls=ResponseRole)\n', (872, 890), False, 'import cherrypy\n'), ((2071, 2089), 'ingredients_http.route.Route', 'Route', (['"""{role_id}"""'], {}), "('{role_id}')\n", (2076, 2089), False, 'from ingredients_http.route import Route\n'), ((2095, 2138), 'cherrypy.tools.model_params', 'cherrypy.tools.model_params', ([], {'cls': 'ParamsRole'}), '(cls=ParamsRole)\n', (2122, 2138), False, 'import cherrypy\n'), ((2144, 2186), 'cherrypy.tools.model_out', 'cherrypy.tools.model_out', ([], {'cls': 'ResponseRole'}), '(cls=ResponseRole)\n', (2168, 2186), False, 'import cherrypy\n'), ((2192, 2257), 'cherrypy.tools.resource_object', 'cherrypy.tools.resource_object', ([], {'id_param': '"""role_id"""', 'cls': 'AuthZRole'}), "(id_param='role_id', cls=AuthZRole)\n", (2222, 2257), False, 'import cherrypy\n'), ((2263, 2317), 'cherrypy.tools.enforce_policy', 'cherrypy.tools.enforce_policy', ([], {'policy_name': '"""roles:get"""'}), "(policy_name='roles:get')\n", (2292, 2317), False, 'import cherrypy\n'), ((2428, 2435), 'ingredients_http.route.Route', 'Route', ([], {}), '()\n', (2433, 2435), False, 'from ingredients_http.route import Route\n'), ((2441, 2488), 'cherrypy.tools.model_params', 'cherrypy.tools.model_params', ([], {'cls': 'ParamsListRole'}), '(cls=ParamsListRole)\n', (2468, 2488), False, 'import cherrypy\n'), ((2494, 2547), 'cherrypy.tools.model_out_pagination', 'cherrypy.tools.model_out_pagination', ([], {'cls': 'ResponseRole'}), '(cls=ResponseRole)\n', (2529, 2547), False, 'import cherrypy\n'), ((2553, 2608), 'cherrypy.tools.enforce_policy', 'cherrypy.tools.enforce_policy', ([], {'policy_name': '"""roles:list"""'}), "(policy_name='roles:list')\n", (2582, 2608), False, 'import cherrypy\n'), ((3083, 3134), 'ingredients_http.route.Route', 'Route', (['"""{role_id}"""'], {'methods': '[RequestMethods.DELETE]'}), "('{role_id}', methods=[RequestMethods.DELETE])\n", (3088, 3134), False, 'from ingredients_http.route import Route\n'), ((3140, 3183), 'cherrypy.tools.model_params', 'cherrypy.tools.model_params', ([], {'cls': 'ParamsRole'}), '(cls=ParamsRole)\n', (3167, 3183), False, 'import cherrypy\n'), ((3189, 3254), 'cherrypy.tools.resource_object', 'cherrypy.tools.resource_object', ([], {'id_param': '"""role_id"""', 'cls': 'AuthZRole'}), "(id_param='role_id', cls=AuthZRole)\n", (3219, 3254), False, 'import cherrypy\n'), ((2032, 2064), 'deli_counter.http.mounts.root.routes.v1.auth.z.validation_models.roles.ResponseRole.from_database', 'ResponseRole.from_database', (['role'], {}), '(role)\n', (2058, 2064), False, 'from deli_counter.http.mounts.root.routes.v1.auth.z.validation_models.roles import ResponseRole, RequestCreateRole, ParamsRole, ParamsListRole, RoleType\n'), ((2361, 2421), 'deli_counter.http.mounts.root.routes.v1.auth.z.validation_models.roles.ResponseRole.from_database', 'ResponseRole.from_database', (['cherrypy.request.resource_object'], {}), '(cherrypy.request.resource_object)\n', (2387, 2421), False, 'from deli_counter.http.mounts.root.routes.v1.auth.z.validation_models.roles import ResponseRole, RequestCreateRole, ParamsRole, ParamsListRole, RoleType\n'), ((1288, 1317), 'cherrypy.request.db_session', 'cherrypy.request.db_session', ([], {}), '()\n', (1315, 1317), False, 'import cherrypy\n'), ((1740, 1751), 'ingredients_db.models.authz.AuthZRole', 'AuthZRole', ([], {}), '()\n', (1749, 1751), False, 'from ingredients_db.models.authz import AuthZRole\n'), ((3459, 3488), 'cherrypy.request.db_session', 'cherrypy.request.db_session', ([], {}), '()\n', (3486, 3488), False, 'import cherrypy\n'), ((1476, 1549), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(409)', '"""A role with the requested name already exists."""'], {}), "(409, 'A role with the requested name already exists.')\n", (1494, 1549), False, 'import cherrypy\n'), ((1629, 1723), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(400)', "('Cannot create a protected role with the name of ' + request.name)"], {}), "(400, 'Cannot create a protected role with the name of ' +\n request.name)\n", (1647, 1723), False, 'import cherrypy\n'), ((4066, 4157), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(400)', "('Cannot delete a protected role with the name of ' + role.name)"], {}), "(400, 'Cannot delete a protected role with the name of ' +\n role.name)\n", (4084, 4157), False, 'import cherrypy\n'), ((2741, 2757), 'sqlalchemy.orm.Query', 'Query', (['AuthZRole'], {}), '(AuthZRole)\n', (2746, 2757), False, 'from sqlalchemy.orm import Query\n'), ((2900, 2916), 'sqlalchemy.orm.Query', 'Query', (['AuthZRole'], {}), '(AuthZRole)\n', (2905, 2916), False, 'from sqlalchemy.orm import Query\n'), ((3847, 3914), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(401)', '"""Cannot delete a role in another project."""'], {}), "(401, 'Cannot delete a role in another project.')\n", (3865, 3914), False, 'import cherrypy\n')]
|
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.mail import Mail
app = Flask(__name__)
app.config.from_object('notejam.config.Config')
db = SQLAlchemy(app)
from models import *
db.init_app(app)
db.create_all()
db.session.commit()
login_manager = LoginManager()
login_manager.login_view = "signin"
login_manager.init_app(app)
mail = Mail()
mail.init_app(app)
from notejam import views
|
[
"flask.ext.sqlalchemy.SQLAlchemy",
"flask.Flask",
"flask.ext.mail.Mail",
"flask.ext.login.LoginManager"
] |
[((158, 173), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (163, 173), False, 'from flask import Flask\n'), ((227, 242), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (237, 242), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((336, 350), 'flask.ext.login.LoginManager', 'LoginManager', ([], {}), '()\n', (348, 350), False, 'from flask.ext.login import LoginManager\n'), ((423, 429), 'flask.ext.mail.Mail', 'Mail', ([], {}), '()\n', (427, 429), False, 'from flask.ext.mail import Mail\n')]
|
#!/usr/bin/env python3
from __future__ import print_function
import os,argparse,logging,pdb,io,copy
import numpy as np
import time
import shutil
import molecules as rxmol
import chemfiles as rxccfile
def matchdihd(dihd,func):
a=(dihd[1].atomtype==func.a or func.a=='*')
b=(dihd[2].atomtype==func.b or func.b=='*')
c=(dihd[3].atomtype==func.c or func.c=='*')
d=(dihd[4].atomtype==func.d or func.d=='*')
forward=a and b and c and d
a=(dihd[1].atomtype==func.d or func.d=='*')
b=(dihd[2].atomtype==func.c or func.c=='*')
c=(dihd[3].atomtype==func.b or func.b=='*')
d=(dihd[4].atomtype==func.a or func.a=='*')
backward=a and b and c and d
if forward or backward:
return True
else:
return False
def matchbond(bond,func):
a=(bond[1].atomtype==func.a or func.a=='*')
b=(bond[2].atomtype==func.b or func.b=='*')
forward=a and b
a=(bond[1].atomtype==func.b or func.b=='*')
b=(bond[2].atomtype==func.a or func.a=='*')
backward=a and b
if forward or backward:
return True
else:
return False
def matchangle(angle,func):
a=(angle[1].atomtype==func.a or func.a=='*')
b=(angle[2].atomtype==func.b or func.b=='*')
c=(angle[3].atomtype==func.c or func.c=='*')
forward=a and b and c
a=(angle[1].atomtype==func.c or func.c=='*')
b=(angle[2].atomtype==func.b or func.b=='*')
c=(angle[3].atomtype==func.a or func.a=='*')
backward=a and b and c
if forward or backward:
return True
else:
return False
def katachi(mmresult,loopid,opt,convthreshold): # mmresult: phf_result_mmxxx without extension
# Def read initial:
foldername = 'katachi_'+mmresult.split('_result')[0]
try:
shutil.rmtree(foldername)
except:
pass
os.mkdir(foldername)
os.chdir(foldername)
shutil.copy(os.path.join('..',mmresult+'.com'), '.')
init=rxccfile.File(mmresult)
stdgeom=rxmol.Molecule('stdgeom')
init.com.read()
xyz=io.StringIO(init.com.xyz)
stdgeom.readfromxyz(xyz)
stdgeom.readchargefromlist(init.com.atomchargelist)
stdgeom.readtypefromlist(init.com.atomtypelist)
stdgeom.readconnectivity(init.com.connectivity)
L=[]
L.extend(stdgeom.dihdlist.values())
L.extend(stdgeom.anglelist.values())
L.extend(stdgeom.bondlist.values())
nozomuL=[]
nozomuL.extend(init.com.nozomudihdfunc)
nozomuL.extend(init.com.nozomuanglefunc)
nozomuL.extend(init.com.nozomubondfunc)
stdL=[]
stdL.extend(init.com.nozomuanglefunc)
stdL.extend(init.com.nozomubondfunc)
# Def iteration (stdgeom,currentfile):
def iteration(stdgeom,currentfile,opt,loopid,convthreshold):
os.system('cp '+currentfile.comname+' MM0.com')
with open('MM0.com','r') as f:
dihds=''
for line in f:
if line.find('AmbTrs')>=0:
dihds+=line
while True:
currentfile=rxccfile.File('MM'+str(loopid))
if opt=='opt':
os.system('sed -i "s/#p opt=(nomicro,cartesian) /#p /g" '+currentfile.comname)
os.system('sed -i "s/#p/#p opt=(nomicro,cartesian)/g" '+currentfile.comname)
os.system('sed -i "/freq/d" '+currentfile.comname)
os.system('sed -i "/chk/d" '+currentfile.comname)
elif opt=='calcall':
os.system('sed -i "s/#p opt=(nomicro,cartesian,tight,calcall) /#p /g" '+currentfile.comname)
os.system('sed -i "s/#p opt=(nomicro,cartesian,tight,calcall) /#p /g" '+currentfile.comname)
os.system('sed -i "s/#p/#p opt=(nomicro,cartesian,tight,calcall)/g" '+currentfile.comname)
os.system('sed -i "/freq/d" '+currentfile.comname)
os.system('sed -i "/chk/d" '+currentfile.comname)
# if loopid>1000:
# raise StopIteration
currentfile.com.read()
try:
currentfile.com.rung09()
currentfile.com.isover()
currentfile.runformchk()
ifstop=True
os.system('rm '+currentfile.chkname+' '+currentfile.logname)
except:
logging.error("Calculation failed, try again.")
try:
currentfile.com.rung09()
currentfile.com.isover()
currentfile.runformchk()
ifstop=True
os.system('rm '+currentfile.chkname+' '+currentfile.logname)
except:
logging.critical('Calculation still failed, continue...')
currentfile.runformchk()
os.system('rm '+currentfile.chkname+' '+currentfile.logname)
ifstop=False
# logging.info('minimum max2 is chosen from loop'+str(minmax2loop))
# os.system('cp MM'+str(minmax2loop)+'.com ../'+mmresult[0:3]+'amd'+mmresult[3:]+'.com')
# os.system('sed -i "s/#p opt=(verytight,z-matrix,calcall)/#p /g" ../*.com')
# os.system('sed -i "/chk/d" ../*.com')
# return
currentfile.fchk.read()
currentgeom=rxmol.Molecule('currentgeom')
currentgeom.readfromxyz(io.StringIO(currentfile.fchk.xyz))
currentgeom.readchargefromlist(currentfile.com.atomchargelist)
currentgeom.readtypefromlist(currentfile.com.atomtypelist)
currentgeom.readconnectivity(currentfile.com.connectivity)
for angle in currentgeom.anglelist.values():
for anglefunc in currentfile.com.nozomuanglefunc:
if matchangle(angle,anglefunc):
angle.nozomufunc=anglefunc
for bond in currentgeom.bondlist.values():
for bondfunc in currentfile.com.nozomubondfunc:
if matchbond(bond,bondfunc):
bond.nozomufunc=bondfunc
# reassign current eqvalue
for nozomufunc in currentfile.com.nozomuanglefunc:
eq=0
i=0
for angle in currentgeom.anglelist.values():
if angle.nozomufunc==nozomufunc:
eq+=angle.anglevalue
i+=1
nozomufunc.eqvalue=eq/i
for nozomufunc in currentfile.com.nozomubondfunc:
eq=0
i=0
for bond in currentgeom.bondlist.values():
if bond.nozomufunc==nozomufunc:
eq+=bond.length
i+=1
nozomufunc.eqvalue=eq/i
currentL=[]
currentL.extend(currentfile.com.nozomuanglefunc)
currentL.extend(currentfile.com.nozomubondfunc)
for item1 in currentL:
for item2 in currentL:
if item1.value==item2.value and item1.repr!=item2.repr:
item1.eqvalue=(item1.eqvalue+item2.eqvalue)/2
item2.eqvalue=item1.eqvalue
logging.debug('Averaged old eqvalue '+item1.repr+' and '+item2.repr+' '+str(item2.eqvalue))
delta1=[x.eqvalue-y.eqvalue for x,y in zip(stdL,currentL) if x.type=='bond']
delta2=[x.eqvalue-y.eqvalue for x,y in zip(stdL,currentL) if x.type=='angle']
max1=sorted(delta1,key=abs,reverse=True)[0]
max2=sorted(delta2,key=abs,reverse=True)[0]
try:
if abs(max2)<abs(minmax2):
minmax2=abs(max2)
minmax2loop=loopid
except:
minmax2=abs(max2)
minmax2loop=loopid
if loopid-minmax2loop>convthreshold:
logging.info('Stopped for convergence: max Delta2 do not decrease in '+str(convthreshold)+' cycles')
if opt=='calcall':
logging.info('minimum max2 is chosen from loop'+str(minmax2loop))
os.system('cp MM'+str(minmax2loop)+'.com ../katachi_'+mmresult+'.com')
os.system('sed -i "s/#p opt=(nomicro,cartesian,tight,calcall)/#p freq/g" ../*.com')
os.system('sed -i "/chk/d" ../*.com')
return loopid
else:
opt='calcall'
minmax2=100
minmax2loop=loopid
logging.info('------------------------------')
logging.info('Loop '+str(loopid)+' keyword '+opt+': max bond delta: '+str(max1)+' max angle delta: '+str(max2))
logging.info('MinMax2 is '+str(minmax2)+' at loop '+str(minmax2loop))
logging.info('------------------------------')
if abs(max1)<0.0001 and abs(max2)<0.01:
if opt=='opt':
opt='calcall'
logging.info('-------------------------------------------')
logging.info('opt converged at '+str(max1)+' '+str(max2)+' '+str(loopid))
logging.info('-------------------------------------------')
minmax2=100
minmax2loop=loopid+1
elif opt=='calcall':
logging.info('-------------------------------------------')
logging.info('calcall converged at '+str(max1)+' '+str(max2)+' '+str(loopid))
logging.info('-------------------------------------------')
os.system('cp '+currentfile.comname+' ../katachi_'+mmresult+'.com')
os.system('sed -i "s/#p opt=(nomicro,cartesian,tight,calcall)/#p freq/g" ../*.com')
os.system('sed -i "/chk/d" ../*.com')
if ifstop:
return loopid
delta=[x.eqvalue-y.eqvalue for x,y in zip(stdL,currentL)]
try:
type(last)
except UnboundLocalError:
if loopid!=0:
lastfile=rxccfile.File('MM'+str(loopid-1))
lastfile.com.read()
lastL=[]
lastL.extend(lastfile.com.nozomuanglefunc)
lastL.extend(lastfile.com.nozomubondfunc)
last=lastL
else:
last=copy.deepcopy(stdL)
for now,std,former,delt in zip(currentL,stdL,last,delta):
if former.eqvalue+delt>0 and former.eqvalue+delt<180:
now.eqvalue=former.eqvalue+delt
if now.type=='bond' and abs(delt)>0.0001:
logging.warning(now.repr+' '+str(delt))
if now.type=='angle' and abs(delt)>0.01:
logging.warning(now.repr+' '+str(delt))
for item1 in currentL:
for item2 in currentL:
if item1.value==item2.value and item1.repr!=item2.repr:
item1.eqvalue=(item1.eqvalue+item2.eqvalue)/2
item2.eqvalue=item1.eqvalue
logging.debug('Averaged new eqvalue '+item1.repr+' and '+item2.repr+' '+str(item2.eqvalue))
last=copy.deepcopy(currentL)
finalxyz=''
for atom in stdgeom:
finalxyz+=atom.atomsym+'-'+atom.atomtype+'-'+'{:<9.6f}'.format(float(atom.atomcharge))+' '+' '.join(["{: .12f}".format(x) for x in atom.coords])+'\n'
finalhead=currentfile.com.commandline+'\nfinal\n\n'+str(currentfile.fchk.totalcharge)+' '+str(currentfile.fchk.multiplicity)+'\n'+finalxyz+'\n'+currentfile.com.connectivity+'\n'
finaltail=''
finaltail+=dihds
for item in currentfile.com.nozomuanglefunc:
finaltail+='HrmBnd1 '+item.repr+' '
parm="{: .3f}".format(item.value)
finaltail+=' '+parm+' {: .4f}'.format(item.eqvalue)+'\n'
for item in currentfile.com.nozomubondfunc:
finaltail+='HrmStr1 '+item.repr+' '
parm="{: .3f}".format(item.value)
finaltail+=' '+parm+' {: .5f}'.format(item.eqvalue)+'\n'
for addfunc in currentfile.com.additionfunc:
finaltail+=addfunc.content
for nozovdw in currentfile.com.nozomuvdw:
finaltail+=nozovdw.content
finaltail+='\n\n'
loopid+=1
with open('MM'+str(loopid)+'.com','w') as f:
f.write(finalhead+finaltail)
del currentgeom
del currentfile
id = iteration(stdgeom,init,opt,loopid,convthreshold)
return id
if __name__=='__main__':
# Parse Input
start = time.perf_counter()
parser=argparse.ArgumentParser()
parser.add_argument('mmresult',help="parameterized result MM file.")
parser.add_argument('loopid',help="loopid",default=0)
parser.add_argument('opt',help='opt or calcall',default='opt')
parser.add_argument('convthreshold',help='convergence threshold',default=10)
args=parser.parse_args()
mmresult=args.mmresult
mmresult=mmresult[:mmresult.find('.')]
loopid=int(args.loopid)
convthreshold=int(args.convthreshold)
opt=args.opt
if loopid==0:
logging.basicConfig(filename=args.mmresult+'.katachiout',level=logging.DEBUG,filemode='w')
else:
logging.basicConfig(filename=args.mmresult+'.katachiout',level=logging.DEBUG)
console=logging.StreamHandler()
console.setLevel(logging.INFO)
formatter=logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
id = katachi(mmresult,loopid,opt,convthreshold)
id = str(id)
stop = time.perf_counter()
t = stop - start
os.system('echo "'+str(t)+' '+id+ '" >> ~/tkatachi')
|
[
"os.mkdir",
"io.StringIO",
"chemfiles.File",
"copy.deepcopy",
"argparse.ArgumentParser",
"logging.basicConfig",
"molecules.Molecule",
"logging.error",
"logging.StreamHandler",
"time.perf_counter",
"os.system",
"logging.Formatter",
"logging.info",
"logging.critical",
"shutil.rmtree",
"os.path.join",
"os.chdir",
"logging.getLogger"
] |
[((1800, 1820), 'os.mkdir', 'os.mkdir', (['foldername'], {}), '(foldername)\n', (1808, 1820), False, 'import os, argparse, logging, pdb, io, copy\n'), ((1825, 1845), 'os.chdir', 'os.chdir', (['foldername'], {}), '(foldername)\n', (1833, 1845), False, 'import os, argparse, logging, pdb, io, copy\n'), ((1913, 1936), 'chemfiles.File', 'rxccfile.File', (['mmresult'], {}), '(mmresult)\n', (1926, 1936), True, 'import chemfiles as rxccfile\n'), ((1949, 1974), 'molecules.Molecule', 'rxmol.Molecule', (['"""stdgeom"""'], {}), "('stdgeom')\n", (1963, 1974), True, 'import molecules as rxmol\n'), ((2003, 2028), 'io.StringIO', 'io.StringIO', (['init.com.xyz'], {}), '(init.com.xyz)\n', (2014, 2028), False, 'import os, argparse, logging, pdb, io, copy\n'), ((12744, 12763), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12761, 12763), False, 'import time\n'), ((12775, 12800), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12798, 12800), False, 'import os, argparse, logging, pdb, io, copy\n'), ((13491, 13514), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (13512, 13514), False, 'import os, argparse, logging, pdb, io, copy\n'), ((13564, 13612), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)-8s %(message)s"""'], {}), "('%(levelname)-8s %(message)s')\n", (13581, 13612), False, 'import os, argparse, logging, pdb, io, copy\n'), ((13778, 13797), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13795, 13797), False, 'import time\n'), ((1745, 1770), 'shutil.rmtree', 'shutil.rmtree', (['foldername'], {}), '(foldername)\n', (1758, 1770), False, 'import shutil\n'), ((1862, 1899), 'os.path.join', 'os.path.join', (['""".."""', "(mmresult + '.com')"], {}), "('..', mmresult + '.com')\n", (1874, 1899), False, 'import os, argparse, logging, pdb, io, copy\n'), ((2710, 2761), 'os.system', 'os.system', (["('cp ' + currentfile.comname + ' MM0.com')"], {}), "('cp ' + currentfile.comname + ' MM0.com')\n", (2719, 2761), False, 'import os, argparse, logging, pdb, io, copy\n'), ((13292, 13391), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(args.mmresult + '.katachiout')", 'level': 'logging.DEBUG', 'filemode': '"""w"""'}), "(filename=args.mmresult + '.katachiout', level=logging.\n DEBUG, filemode='w')\n", (13311, 13391), False, 'import os, argparse, logging, pdb, io, copy\n'), ((13401, 13486), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(args.mmresult + '.katachiout')", 'level': 'logging.DEBUG'}), "(filename=args.mmresult + '.katachiout', level=logging.DEBUG\n )\n", (13420, 13486), False, 'import os, argparse, logging, pdb, io, copy\n'), ((5242, 5271), 'molecules.Molecule', 'rxmol.Molecule', (['"""currentgeom"""'], {}), "('currentgeom')\n", (5256, 5271), True, 'import molecules as rxmol\n'), ((8491, 8537), 'logging.info', 'logging.info', (['"""------------------------------"""'], {}), "('------------------------------')\n", (8503, 8537), False, 'import os, argparse, logging, pdb, io, copy\n'), ((8757, 8803), 'logging.info', 'logging.info', (['"""------------------------------"""'], {}), "('------------------------------')\n", (8769, 8803), False, 'import os, argparse, logging, pdb, io, copy\n'), ((11235, 11258), 'copy.deepcopy', 'copy.deepcopy', (['currentL'], {}), '(currentL)\n', (11248, 11258), False, 'import os, argparse, logging, pdb, io, copy\n'), ((13653, 13674), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (13670, 13674), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3039, 3124), 'os.system', 'os.system', (['(\'sed -i "s/#p opt=(nomicro,cartesian) /#p /g" \' + currentfile.comname)'], {}), '(\'sed -i "s/#p opt=(nomicro,cartesian) /#p /g" \' + currentfile.comname\n )\n', (3048, 3124), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3134, 3212), 'os.system', 'os.system', (['(\'sed -i "s/#p/#p opt=(nomicro,cartesian)/g" \' + currentfile.comname)'], {}), '(\'sed -i "s/#p/#p opt=(nomicro,cartesian)/g" \' + currentfile.comname)\n', (3143, 3212), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3227, 3279), 'os.system', 'os.system', (['(\'sed -i "/freq/d" \' + currentfile.comname)'], {}), '(\'sed -i "/freq/d" \' + currentfile.comname)\n', (3236, 3279), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3294, 3345), 'os.system', 'os.system', (['(\'sed -i "/chk/d" \' + currentfile.comname)'], {}), '(\'sed -i "/chk/d" \' + currentfile.comname)\n', (3303, 3345), False, 'import os, argparse, logging, pdb, io, copy\n'), ((4121, 4187), 'os.system', 'os.system', (["('rm ' + currentfile.chkname + ' ' + currentfile.logname)"], {}), "('rm ' + currentfile.chkname + ' ' + currentfile.logname)\n", (4130, 4187), False, 'import os, argparse, logging, pdb, io, copy\n'), ((5308, 5341), 'io.StringIO', 'io.StringIO', (['currentfile.fchk.xyz'], {}), '(currentfile.fchk.xyz)\n', (5319, 5341), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3393, 3491), 'os.system', 'os.system', (['(\'sed -i "s/#p opt=(nomicro,cartesian,tight,calcall) /#p /g" \' +\n currentfile.comname)'], {}), '(\'sed -i "s/#p opt=(nomicro,cartesian,tight,calcall) /#p /g" \' +\n currentfile.comname)\n', (3402, 3491), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3502, 3600), 'os.system', 'os.system', (['(\'sed -i "s/#p opt=(nomicro,cartesian,tight,calcall) /#p /g" \' +\n currentfile.comname)'], {}), '(\'sed -i "s/#p opt=(nomicro,cartesian,tight,calcall) /#p /g" \' +\n currentfile.comname)\n', (3511, 3600), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3611, 3707), 'os.system', 'os.system', (['(\'sed -i "s/#p/#p opt=(nomicro,cartesian,tight,calcall)/g" \' + currentfile.\n comname)'], {}), '(\'sed -i "s/#p/#p opt=(nomicro,cartesian,tight,calcall)/g" \' +\n currentfile.comname)\n', (3620, 3707), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3718, 3770), 'os.system', 'os.system', (['(\'sed -i "/freq/d" \' + currentfile.comname)'], {}), '(\'sed -i "/freq/d" \' + currentfile.comname)\n', (3727, 3770), False, 'import os, argparse, logging, pdb, io, copy\n'), ((3785, 3836), 'os.system', 'os.system', (['(\'sed -i "/chk/d" \' + currentfile.comname)'], {}), '(\'sed -i "/chk/d" \' + currentfile.comname)\n', (3794, 3836), False, 'import os, argparse, logging, pdb, io, copy\n'), ((4218, 4265), 'logging.error', 'logging.error', (['"""Calculation failed, try again."""'], {}), "('Calculation failed, try again.')\n", (4231, 4265), False, 'import os, argparse, logging, pdb, io, copy\n'), ((8174, 8262), 'os.system', 'os.system', (['"""sed -i "s/#p opt=(nomicro,cartesian,tight,calcall)/#p freq/g" ../*.com"""'], {}), '(\n \'sed -i "s/#p opt=(nomicro,cartesian,tight,calcall)/#p freq/g" ../*.com\')\n', (8183, 8262), False, 'import os, argparse, logging, pdb, io, copy\n'), ((8278, 8315), 'os.system', 'os.system', (['"""sed -i "/chk/d" ../*.com"""'], {}), '(\'sed -i "/chk/d" ../*.com\')\n', (8287, 8315), False, 'import os, argparse, logging, pdb, io, copy\n'), ((8942, 9001), 'logging.info', 'logging.info', (['"""-------------------------------------------"""'], {}), "('-------------------------------------------')\n", (8954, 9001), False, 'import os, argparse, logging, pdb, io, copy\n'), ((9116, 9175), 'logging.info', 'logging.info', (['"""-------------------------------------------"""'], {}), "('-------------------------------------------')\n", (9128, 9175), False, 'import os, argparse, logging, pdb, io, copy\n'), ((4474, 4540), 'os.system', 'os.system', (["('rm ' + currentfile.chkname + ' ' + currentfile.logname)"], {}), "('rm ' + currentfile.chkname + ' ' + currentfile.logname)\n", (4483, 4540), False, 'import os, argparse, logging, pdb, io, copy\n'), ((9306, 9365), 'logging.info', 'logging.info', (['"""-------------------------------------------"""'], {}), "('-------------------------------------------')\n", (9318, 9365), False, 'import os, argparse, logging, pdb, io, copy\n'), ((9484, 9543), 'logging.info', 'logging.info', (['"""-------------------------------------------"""'], {}), "('-------------------------------------------')\n", (9496, 9543), False, 'import os, argparse, logging, pdb, io, copy\n'), ((9564, 9639), 'os.system', 'os.system', (["('cp ' + currentfile.comname + ' ../katachi_' + mmresult + '.com')"], {}), "('cp ' + currentfile.comname + ' ../katachi_' + mmresult + '.com')\n", (9573, 9639), False, 'import os, argparse, logging, pdb, io, copy\n'), ((9652, 9740), 'os.system', 'os.system', (['"""sed -i "s/#p opt=(nomicro,cartesian,tight,calcall)/#p freq/g" ../*.com"""'], {}), '(\n \'sed -i "s/#p opt=(nomicro,cartesian,tight,calcall)/#p freq/g" ../*.com\')\n', (9661, 9740), False, 'import os, argparse, logging, pdb, io, copy\n'), ((9756, 9793), 'os.system', 'os.system', (['"""sed -i "/chk/d" ../*.com"""'], {}), '(\'sed -i "/chk/d" ../*.com\')\n', (9765, 9793), False, 'import os, argparse, logging, pdb, io, copy\n'), ((10382, 10401), 'copy.deepcopy', 'copy.deepcopy', (['stdL'], {}), '(stdL)\n', (10395, 10401), False, 'import os, argparse, logging, pdb, io, copy\n'), ((4579, 4636), 'logging.critical', 'logging.critical', (['"""Calculation still failed, continue..."""'], {}), "('Calculation still failed, continue...')\n", (4595, 4636), False, 'import os, argparse, logging, pdb, io, copy\n'), ((4702, 4768), 'os.system', 'os.system', (["('rm ' + currentfile.chkname + ' ' + currentfile.logname)"], {}), "('rm ' + currentfile.chkname + ' ' + currentfile.logname)\n", (4711, 4768), False, 'import os, argparse, logging, pdb, io, copy\n')]
|
"uxml2dict setup module."
def main():
from setuptools import setup
from uxml2dict import Xml2Dict as x2d
install_requires = ["microapp>=0.2.3", "xmltodict"]
setup(
name=x2d._name_,
version=x2d._version_,
description=x2d._description_,
long_description=x2d._long_description_,
author=x2d._author_,
author_email=x2d._author_email_,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="microapp uxml2dict",
include_package_data=True,
install_requires=install_requires,
packages=["uxml2dict"],
entry_points={"microapp.apps": "uxml2dict = uxml2dict"},
project_urls={
"Bug Reports": "https://github.com/grnydawn/uxml2dict/issues",
"Source": "https://github.com/grnydawn/uxml2dict",
}
)
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
main()
|
[
"multiprocessing.freeze_support",
"setuptools.setup"
] |
[((177, 1087), 'setuptools.setup', 'setup', ([], {'name': 'x2d._name_', 'version': 'x2d._version_', 'description': 'x2d._description_', 'long_description': 'x2d._long_description_', 'author': 'x2d._author_', 'author_email': 'x2d._author_email_', 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8']", 'keywords': '"""microapp uxml2dict"""', 'include_package_data': '(True)', 'install_requires': 'install_requires', 'packages': "['uxml2dict']", 'entry_points': "{'microapp.apps': 'uxml2dict = uxml2dict'}", 'project_urls': "{'Bug Reports': 'https://github.com/grnydawn/uxml2dict/issues', 'Source':\n 'https://github.com/grnydawn/uxml2dict'}"}), "(name=x2d._name_, version=x2d._version_, description=x2d._description_,\n long_description=x2d._long_description_, author=x2d._author_,\n author_email=x2d._author_email_, classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'], keywords='microapp uxml2dict',\n include_package_data=True, install_requires=install_requires, packages=\n ['uxml2dict'], entry_points={'microapp.apps': 'uxml2dict = uxml2dict'},\n project_urls={'Bug Reports':\n 'https://github.com/grnydawn/uxml2dict/issues', 'Source':\n 'https://github.com/grnydawn/uxml2dict'})\n", (182, 1087), False, 'from setuptools import setup\n'), ((1345, 1377), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (1375, 1377), False, 'import multiprocessing\n')]
|
import sqlite3
conn = sqlite3.connect('NewDB.db')
cursor = conn.cursor()
table ="""CREATE TABLE STUDENT(NAME VARCHAR(255), CLASS VARCHAR(255), SECTION VARCHAR(255));"""
cursor.execute(table)
cursor.execute('''INSERT INTO STUDENT VALUES ('John', '8', 'A')''')
cursor.execute('''INSERT INTO STUDENT VALUES ('Adam', '8', 'B')''')
cursor.execute('''INSERT INTO STUDENT VALUES ('Ann', '10', 'A')''')
print("Data Inserted in the table: ")
data=cursor.execute('''SELECT * FROM STUDENT''')
for row in data:
print(row)
conn.commit()
conn.close()
|
[
"sqlite3.connect"
] |
[((26, 53), 'sqlite3.connect', 'sqlite3.connect', (['"""NewDB.db"""'], {}), "('NewDB.db')\n", (41, 53), False, 'import sqlite3\n')]
|
#!/usr/bin/env python
import io
import os
import re
from setuptools import setup
classifiers =[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Visualization",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
def _read(*parts, **kwargs):
filepath = os.path.join(os.path.dirname(__file__), *parts)
encoding = kwargs.pop('encoding', 'utf-8')
with io.open(filepath, encoding=encoding) as fh:
text = fh.read()
return text
def get_version():
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
_read('svgpath2mpl.py'),
re.MULTILINE).group(1)
return version
def get_long_description():
return _read('README.rst')
install_requires = ['numpy', 'matplotlib']
tests_require = ['nose']
setup(
name='svgpath2mpl',
author='<NAME>',
author_email='<EMAIL>',
version=get_version(),
license='BSD',
description='SVG path parser for matplotlib',
long_description=get_long_description(),
keywords=['svg', 'path', 'matplotlib', 'plotting', 'visualization'],
url='https://github.com/nvictus/svgpath2mpl',
py_modules=['svgpath2mpl'],
zip_safe=False,
classifiers=classifiers,
install_requires=install_requires,
tests_require=tests_require
)
|
[
"os.path.dirname",
"io.open"
] |
[((649, 674), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (664, 674), False, 'import os\n'), ((740, 776), 'io.open', 'io.open', (['filepath'], {'encoding': 'encoding'}), '(filepath, encoding=encoding)\n', (747, 776), False, 'import io\n')]
|
"""
Algorithm entry poing.
Methods of the APPO class initiate all other components (rollout & policy workers and learners) in the main thread,
and then fork their separate processes.
All data structures that are shared between processes are also created during the construction of APPO.
This class contains the algorithm main loop. All the actual work is done in separate worker processes, so
the only task of the main loop is to collect summaries and stats from the workers and log/save them to disk.
Hyperparameters specific to policy gradient algorithms are defined in this file. See also algorithm.py.
"""
import json
import math
import multiprocessing
import os
import time
from collections import deque
from os.path import join
from queue import Empty
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch.multiprocessing import JoinableQueue as TorchJoinableQueue
from multi_sample_factory.algorithms.algorithm import ReinforcementLearningAlgorithm
from multi_sample_factory.algorithms.appo.actor_worker import ActorWorker
from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars
from multi_sample_factory.algorithms.appo.learner import LearnerWorker
from multi_sample_factory.algorithms.appo.policy_worker import PolicyWorker
from multi_sample_factory.algorithms.appo.population_based_training import PopulationBasedTraining
from multi_sample_factory.algorithms.appo.shared_buffers import SharedBuffers
from multi_sample_factory.algorithms.utils.algo_utils import EXTRA_PER_POLICY_SUMMARIES, EXTRA_EPISODIC_STATS_PROCESSING, \
ExperimentStatus
from multi_sample_factory.envs.env_utils import get_default_reward_shaping
from multi_sample_factory.utils.timing import Timing
from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, \
ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger
from multi_sample_factory.algorithms.utils.action_distributions import transform_action_space
if os.name == 'nt':
from multi_sample_factory.utils import Queue as MpQueue
else:
from faster_fifo import Queue as MpQueue
# noinspection PyUnresolvedReferences
import faster_fifo_reduction
torch.multiprocessing.set_sharing_strategy('file_system')
class APPO(ReinforcementLearningAlgorithm):
"""Async PPO."""
@classmethod
def add_cli_args(cls, parser):
p = parser
super().add_cli_args(p)
p.add_argument('--experiment_summaries_interval', default=20, type=int, help='How often in seconds we write avg. statistics about the experiment (reward, episode length, extra stats...)')
p.add_argument('--adam_eps', default=1e-6, type=float, help='Adam epsilon parameter (1e-8 to 1e-5 seem to reliably work okay, 1e-3 and up does not work)')
p.add_argument('--adam_beta1', default=0.9, type=float, help='Adam momentum decay coefficient')
p.add_argument('--adam_beta2', default=0.999, type=float, help='Adam second momentum decay coefficient')
p.add_argument('--gae_lambda', default=0.95, type=float, help='Generalized Advantage Estimation discounting (only used when V-trace is False')
p.add_argument(
'--rollout', default=32, type=int,
help='Length of the rollout from each environment in timesteps.'
'Once we collect this many timesteps on actor worker, we send this trajectory to the learner.'
'The length of the rollout will determine how many timesteps are used to calculate bootstrapped'
'Monte-Carlo estimates of discounted rewards, advantages, GAE, or V-trace targets. Shorter rollouts'
'reduce variance, but the estimates are less precise (bias vs variance tradeoff).'
'For RNN policies, this should be a multiple of --recurrence, so every rollout will be split'
'into (n = rollout / recurrence) segments for backpropagation. V-trace algorithm currently requires that'
'rollout == recurrence, which what you want most of the time anyway.'
'Rollout length is independent from the episode length. Episode length can be both shorter or longer than'
'rollout, although for PBT training it is currently recommended that rollout << episode_len'
'(see function finalize_trajectory in actor_worker.py)',
)
p.add_argument('--num_workers', default=multiprocessing.cpu_count(), type=int, help='Number of parallel environment workers. Should be less than num_envs and should divide num_envs')
p.add_argument(
'--recurrence', default=32, type=int,
help='Trajectory length for backpropagation through time. If recurrence=1 there is no backpropagation through time, and experience is shuffled completely randomly'
'For V-trace recurrence should be equal to rollout length.',
)
p.add_argument('--use_rnn', default=True, type=str2bool, help='Whether to use RNN core in a policy or not')
p.add_argument('--rnn_type', default='gru', choices=['gru', 'lstm'], type=str, help='Type of RNN cell to use if use_rnn is True')
p.add_argument('--rnn_num_layers', default=1, type=int, help='Number of RNN layers to use if use_rnn is True')
p.add_argument('--ppo_clip_ratio', default=0.1, type=float, help='We use unbiased clip(x, 1+e, 1/(1+e)) instead of clip(x, 1+e, 1-e) in the paper')
p.add_argument('--ppo_clip_value', default=1.0, type=float, help='Maximum absolute change in value estimate until it is clipped. Sensitive to value magnitude')
p.add_argument('--batch_size', default=1024, type=int, help='Minibatch size for SGD')
p.add_argument(
'--num_batches_per_iteration', default=1, type=int,
help='How many minibatches we collect before training on the collected experience. It is generally recommended to set this to 1 for most experiments, because any higher value will increase the policy lag.'
'But in some specific circumstances it can be beneficial to have a larger macro-batch in order to shuffle and decorrelate the minibatches.'
'Here and throughout the codebase: macro batch is the portion of experience that learner processes per iteration (consisting of 1 or several minibatches)',
)
p.add_argument('--ppo_epochs', default=1, type=int, help='Number of training epochs before a new batch of experience is collected')
p.add_argument(
'--num_minibatches_to_accumulate', default=-1, type=int,
help='This parameter governs the maximum number of minibatches the learner can accumulate before further experience collection is stopped.'
'The default value (-1) will set this to 2 * num_batches_per_iteration, so if the experience collection is faster than the training,'
'the learner will accumulate enough minibatches for 2 iterations of training (but no more). This is a good balance between policy-lag and throughput.'
'When the limit is reached, the learner will notify the actor workers that they ought to stop the experience collection until accumulated minibatches'
'are processed. Set this parameter to 1 * num_batches_per_iteration to further reduce policy-lag.'
'If the experience collection is very non-uniform, increasing this parameter can increase overall throughput, at the cost of increased policy-lag.'
'A value of 0 is treated specially. This means the experience accumulation is turned off, and all experience collection will be halted during training.'
'This is the regime with potentially lowest policy-lag.'
'When this parameter is 0 and num_workers * num_envs_per_worker * rollout == num_batches_per_iteration * batch_size, the algorithm is similar to'
'regular synchronous PPO.',
)
p.add_argument('--max_grad_norm', default=4.0, type=float, help='Max L2 norm of the gradient vector')
# components of the loss function
p.add_argument('--exploration_loss_coeff', default=0.003, type=float,
help='Coefficient for the exploration component of the loss function.')
p.add_argument('--value_loss_coeff', default=0.5, type=float, help='Coefficient for the critic loss')
p.add_argument('--kl_loss_coeff', default=0.0, type=float,
help='Coefficient for fixed KL loss (as used by Schulman et al. in https://arxiv.org/pdf/1707.06347.pdf). '
'Highly recommended for environments with continuous action spaces.',
)
p.add_argument('--exploration_loss', default='entropy', type=str, choices=['entropy', 'symmetric_kl'],
help='Usually the exploration loss is based on maximizing the entropy of the probability'
' distribution. Note that mathematically maximizing entropy of the categorical probability '
'distribution is exactly the same as minimizing the (regular) KL-divergence between'
' this distribution and a uniform prior. The downside of using the entropy term '
'(or regular asymmetric KL-divergence) is the fact that penalty does not increase as '
'probabilities of some actions approach zero. I.e. numerically, there is almost '
'no difference between an action distribution with a probability epsilon > 0 for '
'some action and an action distribution with a probability = zero for this action.'
' For many tasks the first (epsilon) distribution is preferrable because we keep some '
'(albeit small) amount of exploration, while the second distribution will never explore '
'this action ever again.'
'Unlike the entropy term, symmetric KL divergence between the action distribution '
'and a uniform prior approaches infinity when entropy of the distribution approaches zero,'
' so it can prevent the pathological situations where the agent stops exploring. '
'Empirically, symmetric KL-divergence yielded slightly better results on some problems.',
)
# APPO-specific
p.add_argument(
'--num_envs_per_worker', default=2, type=int,
help='Number of envs on a single CPU actor, in high-throughput configurations this should be in 10-30 range for Atari/VizDoom'
'Must be even for double-buffered sampling!',
)
p.add_argument(
'--worker_num_splits', default=2, type=int,
help='Typically we split a vector of envs into two parts for "double buffered" experience collection'
'Set this to 1 to disable double buffering. Set this to 3 for triple buffering!',
)
p.add_argument('--num_policies', default=1, type=int, help='Number of policies to train jointly')
p.add_argument('--policy_workers_per_policy', default=1, type=int, help='Number of policy workers that compute forward pass (per policy)')
p.add_argument(
'--max_policy_lag', default=10000, type=int,
help='Max policy lag in policy versions. Discard all experience that is older than this. This should be increased for configurations with multiple epochs of SGD because naturally'
'policy-lag may exceed this value.',
)
p.add_argument(
'--traj_buffers_excess_ratio', default=1.3, type=float,
help='Increase this value to make sure the system always has enough free trajectory buffers (can be useful when i.e. a lot of inactive agents in multi-agent envs)'
'Decrease this to 1.0 to save as much RAM as possible.',
)
p.add_argument(
'--decorrelate_experience_max_seconds', default=10, type=int,
help='Decorrelating experience serves two benefits. First: this is better for learning because samples from workers come from random moments in the episode, becoming more "i.i.d".'
'Second, and more important one: this is good for environments with highly non-uniform one-step times, including long and expensive episode resets. If experience is not decorrelated'
'then training batches will come in bursts e.g. after a bunch of environments finished resets and many iterations on the learner might be required,'
'which will increase the policy-lag of the new experience collected. The performance of the Sample Factory is best when experience is generated as more-or-less'
'uniform stream. Try increasing this to 100-200 seconds to smoothen the experience distribution in time right from the beginning (it will eventually spread out and settle anyway)',
)
p.add_argument(
'--decorrelate_envs_on_one_worker', default=True, type=str2bool,
help='In addition to temporal decorrelation of worker processes, also decorrelate envs within one worker process'
'For environments with a fixed episode length it can prevent the reset from happening in the same rollout for all envs simultaneously, which makes experience collection more uniform.',
)
p.add_argument('--with_vtrace', default=True, type=str2bool, help='Enables V-trace off-policy correction. If this is True, then GAE is not used')
p.add_argument('--vtrace_rho', default=1.0, type=float, help='rho_hat clipping parameter of the V-trace algorithm (importance sampling truncation)')
p.add_argument('--vtrace_c', default=1.0, type=float, help='c_hat clipping parameter of the V-trace algorithm. Low values for c_hat can reduce variance of the advantage estimates (similar to GAE lambda < 1)')
p.add_argument(
'--set_workers_cpu_affinity', default=True, type=str2bool,
help='Whether to assign workers to specific CPU cores or not. The logic is beneficial for most workloads because prevents a lot of context switching.'
'However for some environments it can be better to disable it, to allow one worker to use all cores some of the time. This can be the case for some DMLab environments with very expensive episode reset'
'that can use parallel CPU cores for level generation.',
)
p.add_argument(
'--force_envs_single_thread', default=True, type=str2bool,
help='Some environments may themselves use parallel libraries such as OpenMP or MKL. Since we parallelize environments on the level of workers, there is no need to keep this parallel semantic.'
'This flag uses threadpoolctl to force libraries such as OpenMP and MKL to use only a single thread within the environment.'
'Default value (True) is recommended unless you are running fewer workers than CPU cores.',
)
p.add_argument('--reset_timeout_seconds', default=120, type=int, help='Fail worker on initialization if not a single environment was reset in this time (worker probably got stuck)')
p.add_argument('--default_niceness', default=0, type=int, help='Niceness of the highest priority process (the learner). Values below zero require elevated privileges.')
p.add_argument(
'--train_in_background_thread', default=True, type=str2bool,
help='Using background thread for training is faster and allows preparing the next batch while training is in progress.'
'Unfortunately debugging can become very tricky in this case. So there is an option to use only a single thread on the learner to simplify the debugging.',
)
p.add_argument('--learner_main_loop_num_cores', default=1, type=int, help='When batching on the learner is the bottleneck, increasing the number of cores PyTorch uses can improve the performance')
p.add_argument('--actor_worker_gpus', default=[], type=int, nargs='*', help='By default, actor workers only use CPUs. Changes this if e.g. you need GPU-based rendering on the actors')
# PBT stuff
p.add_argument('--with_pbt', default=False, type=str2bool, help='Enables population-based training basic features')
p.add_argument('--pbt_mix_policies_in_one_env', default=True, type=str2bool, help='For multi-agent envs, whether we mix different policies in one env.')
p.add_argument('--pbt_period_env_steps', default=int(5e6), type=int, help='Periodically replace the worst policies with the best ones and perturb the hyperparameters')
p.add_argument('--pbt_start_mutation', default=int(2e7), type=int, help='Allow initial diversification, start PBT after this many env steps')
p.add_argument('--pbt_replace_fraction', default=0.3, type=float, help='A portion of policies performing worst to be replace by better policies (rounded up)')
p.add_argument('--pbt_mutation_rate', default=0.15, type=float, help='Probability that a parameter mutates')
p.add_argument('--pbt_replace_reward_gap', default=0.1, type=float, help='Relative gap in true reward when replacing weights of the policy with a better performing one')
p.add_argument('--pbt_replace_reward_gap_absolute', default=1e-6, type=float, help='Absolute gap in true reward when replacing weights of the policy with a better performing one')
p.add_argument('--pbt_optimize_batch_size', default=False, type=str2bool, help='Whether to optimize batch size or not (experimental)')
p.add_argument(
'--pbt_target_objective', default='true_reward', type=str,
help='Policy stat to optimize with PBT. true_reward (default) is equal to raw env reward if not specified, but can also be any other per-policy stat.'
'For DMlab-30 use value "dmlab_target_objective" (which is capped human normalized score)',
)
# CPC|A options
p.add_argument('--use_cpc', default=False, type=str2bool, help='Use CPC|A as an auxiliary loss durning learning')
p.add_argument('--cpc_forward_steps', default=8, type=int, help='Number of forward prediction steps for CPC')
p.add_argument('--cpc_time_subsample', default=6, type=int, help='Number of timesteps to sample from each batch. This should be less than recurrence to decorrelate experience.')
p.add_argument('--cpc_forward_subsample', default=2, type=int, help='Number of forward steps to sample for loss computation. This should be less than cpc_forward_steps to decorrelate gradients.')
# debugging options
p.add_argument('--benchmark', default=False, type=str2bool, help='Benchmark mode')
p.add_argument('--sampler_only', default=False, type=str2bool, help='Do not send experience to the learner, measuring sampling throughput')
def __init__(self, cfg):
super().__init__(cfg)
# we should not use CUDA in the main thread, only on the workers
set_global_cuda_envvars(cfg)
tmp_env = make_env_func(self.cfg, env_config=None)
self.obs_space = tmp_env.observation_space
self.action_space = transform_action_space(tmp_env.action_space)
self.num_agents = tmp_env.num_agents
self.reward_shaping_scheme = None
if self.cfg.with_pbt:
self.reward_shaping_scheme = get_default_reward_shaping(tmp_env)
tmp_env.close()
# shared memory allocation
self.traj_buffers = SharedBuffers(self.cfg, self.num_agents, self.obs_space, self.action_space)
self.actor_workers = None
self.report_queue = MpQueue(40 * 1000 * 1000)
self.policy_workers = dict()
self.policy_queues = dict()
self.learner_workers = dict()
self.workers_by_handle = None
self.policy_inputs = [[] for _ in range(self.cfg.num_policies)]
self.policy_outputs = dict()
for worker_idx in range(self.cfg.num_workers):
for split_idx in range(self.cfg.worker_num_splits):
self.policy_outputs[(worker_idx, split_idx)] = dict()
self.policy_avg_stats = dict()
self.policy_lag = [dict() for _ in range(self.cfg.num_policies)]
self.last_timing = dict()
self.env_steps = dict()
self.samples_collected = [0 for _ in range(self.cfg.num_policies)]
self.total_env_steps_since_resume = 0
# currently this applies only to the current run, not experiment as a whole
# to change this behavior we'd need to save the state of the main loop to a filesystem
self.total_train_seconds = 0
self.last_report = time.time()
self.last_experiment_summaries = 0
self.report_interval = 5.0 # sec
self.experiment_summaries_interval = self.cfg.experiment_summaries_interval # sec
self.avg_stats_intervals = (2, 12, 60) # 10 seconds, 1 minute, 5 minutes
self.fps_stats = deque([], maxlen=max(self.avg_stats_intervals))
self.throughput_stats = [deque([], maxlen=5) for _ in range(self.cfg.num_policies)]
self.avg_stats = dict()
self.stats = dict() # regular (non-averaged) stats
self.writers = dict()
writer_keys = list(range(self.cfg.num_policies))
for key in writer_keys:
summary_dir = join(summaries_dir(experiment_dir(cfg=self.cfg)), str(key))
summary_dir = ensure_dir_exists(summary_dir)
self.writers[key] = SummaryWriter(summary_dir, flush_secs=20)
self.pbt = PopulationBasedTraining(self.cfg, self.reward_shaping_scheme, self.writers)
def _cfg_dict(self):
if isinstance(self.cfg, dict):
return self.cfg
else:
return vars(self.cfg)
def _save_cfg(self):
cfg_dict = self._cfg_dict()
with open(cfg_file(self.cfg), 'w') as json_file:
json.dump(cfg_dict, json_file, indent=2)
def initialize(self):
self._save_cfg()
save_git_diff(experiment_dir(cfg=self.cfg))
init_file_logger(experiment_dir(self.cfg))
def finalize(self):
pass
def create_actor_worker(self, idx, actor_queue):
learner_queues = {p: w.task_queue for p, w in self.learner_workers.items()}
return ActorWorker(
self.cfg, self.obs_space, self.action_space, self.num_agents, idx, self.traj_buffers,
task_queue=actor_queue, policy_queues=self.policy_queues,
report_queue=self.report_queue, learner_queues=learner_queues,
)
# noinspection PyProtectedMember
def init_subset(self, indices, actor_queues):
"""
Initialize a subset of actor workers (rollout workers) and wait until the first reset() is completed for all
envs on these workers.
This function will retry if the worker process crashes during the initial reset.
:param indices: indices of actor workers to initialize
:param actor_queues: task queues corresponding to these workers
:return: initialized workers
"""
reset_timelimit_seconds = self.cfg.reset_timeout_seconds # fail worker if not a single env was reset in that time
workers = dict()
last_env_initialized = dict()
for i in indices:
w = self.create_actor_worker(i, actor_queues[i])
w.init()
w.request_reset()
workers[i] = w
last_env_initialized[i] = time.time()
total_num_envs = self.cfg.num_workers * self.cfg.num_envs_per_worker
envs_initialized = [0] * self.cfg.num_workers
workers_finished = set()
while len(workers_finished) < len(workers):
failed_worker = -1
try:
report = self.report_queue.get(timeout=1.0)
if 'initialized_env' in report:
worker_idx, split_idx, env_i = report['initialized_env']
last_env_initialized[worker_idx] = time.time()
envs_initialized[worker_idx] += 1
log.debug(
'Progress for %d workers: %d/%d envs initialized...',
len(indices), sum(envs_initialized), total_num_envs,
)
elif 'finished_reset' in report:
workers_finished.add(report['finished_reset'])
elif 'critical_error' in report:
failed_worker = report['critical_error']
except Empty:
pass
for worker_idx, w in workers.items():
if worker_idx in workers_finished:
continue
time_passed = time.time() - last_env_initialized[worker_idx]
timeout = time_passed > reset_timelimit_seconds
if timeout or failed_worker == worker_idx or not w.process.is_alive():
envs_initialized[worker_idx] = 0
log.error('Worker %d is stuck or failed (%.3f). Reset!', w.worker_idx, time_passed)
log.debug('Status: %r', w.process.is_alive())
stuck_worker = w
stuck_worker.process.kill()
new_worker = self.create_actor_worker(worker_idx, actor_queues[worker_idx])
new_worker.init()
new_worker.request_reset()
last_env_initialized[worker_idx] = time.time()
workers[worker_idx] = new_worker
del stuck_worker
return workers.values()
# noinspection PyUnresolvedReferences
def init_workers(self):
"""
Initialize all types of workers and start their worker processes.
"""
actor_queues = [MpQueue(2 * 1000 * 1000) for _ in range(self.cfg.num_workers)]
policy_worker_queues = dict()
for policy_id in range(self.cfg.num_policies):
policy_worker_queues[policy_id] = []
for i in range(self.cfg.policy_workers_per_policy):
policy_worker_queues[policy_id].append(TorchJoinableQueue())
log.info('Initializing learners...')
policy_locks = [multiprocessing.Lock() for _ in range(self.cfg.num_policies)]
resume_experience_collection_cv = [multiprocessing.Condition() for _ in range(self.cfg.num_policies)]
learner_idx = 0
for policy_id in range(self.cfg.num_policies):
learner_worker = LearnerWorker(
learner_idx, policy_id, self.cfg, self.obs_space, self.action_space,
self.report_queue, policy_worker_queues[policy_id], self.traj_buffers,
policy_locks[policy_id], resume_experience_collection_cv[policy_id],
)
learner_worker.start_process()
learner_worker.init()
self.learner_workers[policy_id] = learner_worker
learner_idx += 1
log.info('Initializing policy workers...')
for policy_id in range(self.cfg.num_policies):
self.policy_workers[policy_id] = []
policy_queue = MpQueue()
self.policy_queues[policy_id] = policy_queue
for i in range(self.cfg.policy_workers_per_policy):
policy_worker = PolicyWorker(
i, policy_id, self.cfg, self.obs_space, self.action_space, self.traj_buffers,
policy_queue, actor_queues, self.report_queue, policy_worker_queues[policy_id][i],
policy_locks[policy_id], resume_experience_collection_cv[policy_id],
)
self.policy_workers[policy_id].append(policy_worker)
policy_worker.start_process()
log.info('Initializing actors...')
# We support actor worker initialization in groups, which can be useful for some envs that
# e.g. crash when too many environments are being initialized in parallel.
# Currently the limit is not used since it is not required for any envs supported out of the box,
# so we parallelize initialization as hard as we can.
# If this is required for your environment, perhaps a better solution would be to use global locks,
# like FileLock (see doom_gym.py)
self.actor_workers = []
max_parallel_init = int(1e9) # might be useful to limit this for some envs
worker_indices = list(range(self.cfg.num_workers))
for i in range(0, self.cfg.num_workers, max_parallel_init):
workers = self.init_subset(worker_indices[i:i + max_parallel_init], actor_queues)
self.actor_workers.extend(workers)
def init_pbt(self):
if self.cfg.with_pbt:
self.pbt.init(self.learner_workers, self.actor_workers)
def finish_initialization(self):
"""Wait until policy workers are fully initialized."""
for policy_id, workers in self.policy_workers.items():
for w in workers:
log.debug('Waiting for policy worker %d-%d to finish initialization...', policy_id, w.worker_idx)
w.init()
log.debug('Policy worker %d-%d initialized!', policy_id, w.worker_idx)
def update_env_steps_actor(self):
for w in self.actor_workers:
w.update_env_steps(self.env_steps)
def process_report(self, report):
"""Process stats from various types of workers."""
if 'policy_id' in report:
policy_id = report['policy_id']
if 'learner_env_steps' in report:
if policy_id in self.env_steps:
delta = report['learner_env_steps'] - self.env_steps[policy_id]
self.total_env_steps_since_resume += delta
self.env_steps[policy_id] = report['learner_env_steps']
if 'episodic' in report:
s = report['episodic']
for _, key, value in iterate_recursively(s):
if key not in self.policy_avg_stats:
self.policy_avg_stats[key] = [deque(maxlen=self.cfg.stats_avg) for _ in range(self.cfg.num_policies)]
self.policy_avg_stats[key][policy_id].append(value)
for extra_stat_func in EXTRA_EPISODIC_STATS_PROCESSING:
extra_stat_func(policy_id, key, value, self.cfg)
if 'train' in report:
self.report_train_summaries(report['train'], policy_id)
if 'samples' in report:
self.samples_collected[policy_id] += report['samples']
if 'timing' in report:
for k, v in report['timing'].items():
if k not in self.avg_stats:
self.avg_stats[k] = deque([], maxlen=50)
self.avg_stats[k].append(v)
if 'stats' in report:
self.stats.update(report['stats'])
def report(self):
"""
Called periodically (every X seconds, see report_interval).
Print experiment stats (FPS, avg rewards) to console and dump TF summaries collected from workers to disk.
"""
if len(self.env_steps) < self.cfg.num_policies:
return
now = time.time()
self.fps_stats.append((now, self.total_env_steps_since_resume))
if len(self.fps_stats) <= 1:
return
fps = []
for avg_interval in self.avg_stats_intervals:
past_moment, past_frames = self.fps_stats[max(0, len(self.fps_stats) - 1 - avg_interval)]
fps.append((self.total_env_steps_since_resume - past_frames) / (now - past_moment))
sample_throughput = dict()
for policy_id in range(self.cfg.num_policies):
self.throughput_stats[policy_id].append((now, self.samples_collected[policy_id]))
if len(self.throughput_stats[policy_id]) > 1:
past_moment, past_samples = self.throughput_stats[policy_id][0]
sample_throughput[policy_id] = (self.samples_collected[policy_id] - past_samples) / (now - past_moment)
else:
sample_throughput[policy_id] = math.nan
total_env_steps = sum(self.env_steps.values())
self.print_stats(fps, sample_throughput, total_env_steps)
if time.time() - self.last_experiment_summaries > self.experiment_summaries_interval:
self.report_experiment_summaries(fps[0], sample_throughput)
self.last_experiment_summaries = time.time()
def print_stats(self, fps, sample_throughput, total_env_steps):
fps_str = []
for interval, fps_value in zip(self.avg_stats_intervals, fps):
fps_str.append(f'{int(interval * self.report_interval)} sec: {fps_value:.1f}')
fps_str = f'({", ".join(fps_str)})'
samples_per_policy = ', '.join([f'{p}: {s:.1f}' for p, s in sample_throughput.items()])
lag_stats = self.policy_lag[0]
lag = AttrDict()
for key in ['min', 'avg', 'max']:
lag[key] = lag_stats.get(f'version_diff_{key}', -1)
policy_lag_str = f'min: {lag.min:.1f}, avg: {lag.avg:.1f}, max: {lag.max:.1f}'
log.debug(
'Fps is %s. Total num frames: %d. Throughput: %s. Samples: %d. Policy #0 lag: (%s)',
fps_str, total_env_steps, samples_per_policy, sum(self.samples_collected), policy_lag_str,
)
if 'reward' in self.policy_avg_stats:
policy_reward_stats = []
for policy_id in range(self.cfg.num_policies):
reward_stats = self.policy_avg_stats['reward'][policy_id]
if len(reward_stats) > 0:
policy_reward_stats.append((policy_id, f'{np.mean(reward_stats):.3f}'))
log.debug('Avg episode reward: %r', policy_reward_stats)
def report_train_summaries(self, stats, policy_id):
#if(self.learner_workers[0].get_rank != 0):
# return
for key, scalar in stats.items():
self.writers[policy_id].add_scalar(f'train/{key}', scalar, self.env_steps[policy_id])
if 'version_diff' in key:
self.policy_lag[policy_id][key] = scalar
def report_experiment_summaries(self, fps, sample_throughput):
#if(self.learner_workers[0].get_rank != 0):
# return
memory_mb = memory_consumption_mb()
default_policy = 0
for policy_id, env_steps in self.env_steps.items():
if policy_id == default_policy:
self.writers[policy_id].add_scalar('0_aux/_fps', fps, env_steps)
self.writers[policy_id].add_scalar('0_aux/master_process_memory_mb', float(memory_mb), env_steps)
for key, value in self.avg_stats.items():
if len(value) >= value.maxlen or (len(value) > 10 and self.total_train_seconds > 300):
self.writers[policy_id].add_scalar(f'stats/{key}', np.mean(value), env_steps)
for key, value in self.stats.items():
self.writers[policy_id].add_scalar(f'stats/{key}', value, env_steps)
if not math.isnan(sample_throughput[policy_id]):
self.writers[policy_id].add_scalar('0_aux/_sample_throughput', sample_throughput[policy_id], env_steps)
for key, stat in self.policy_avg_stats.items():
if len(stat[policy_id]) >= stat[policy_id].maxlen or (len(stat[policy_id]) > 10 and self.total_train_seconds > 300):
stat_value = np.mean(stat[policy_id])
writer = self.writers[policy_id]
# custom summaries have their own sections in tensorboard
if '/' in key:
avg_tag = key
min_tag = f'{key}_min'
max_tag = f'{key}_max'
else:
avg_tag = f'0_aux/avg_{key}'
min_tag = f'0_aux/avg_{key}_min'
max_tag = f'0_aux/avg_{key}_max'
writer.add_scalar(avg_tag, float(stat_value), env_steps)
# for key stats report min/max as well
if key in ('reward', 'true_reward', 'len'):
writer.add_scalar(min_tag, float(min(stat[policy_id])), env_steps)
writer.add_scalar(max_tag, float(max(stat[policy_id])), env_steps)
for extra_summaries_func in EXTRA_PER_POLICY_SUMMARIES:
extra_summaries_func(policy_id, self.policy_avg_stats, env_steps, self.writers[policy_id], self.cfg)
def _should_end_training(self):
end = len(self.env_steps) > 0 and all(s > self.cfg.train_for_env_steps for s in self.env_steps.values())
end |= self.total_train_seconds > self.cfg.train_for_seconds
if self.cfg.benchmark:
end |= self.total_env_steps_since_resume >= int(2e6)
end |= sum(self.samples_collected) >= int(1e6)
return end
def run(self):
"""
This function contains the main loop of the algorithm, as well as initialization/cleanup code.
:return: ExperimentStatus (SUCCESS, FAILURE, INTERRUPTED). Useful in testing.
"""
status = ExperimentStatus.SUCCESS
if os.path.isfile(done_filename(self.cfg)):
log.warning('Training already finished! Remove "done" file to continue training')
return status
self.init_workers()
self.init_pbt()
self.finish_initialization()
log.info('Collecting experience...')
timing = Timing()
with timing.timeit('experience'):
# noinspection PyBroadException
try:
while not self._should_end_training():
try:
reports = self.report_queue.get_many(timeout=0.1)
for report in reports:
self.process_report(report)
except Empty:
pass
if time.time() - self.last_report > self.report_interval:
self.report()
now = time.time()
self.total_train_seconds += now - self.last_report
self.last_report = now
self.update_env_steps_actor()
self.pbt.update(self.env_steps, self.policy_avg_stats)
except Exception:
log.exception('Exception in driver loop')
status = ExperimentStatus.FAILURE
except KeyboardInterrupt:
log.warning('Keyboard interrupt detected in driver loop, exiting...')
status = ExperimentStatus.INTERRUPTED
for learner in self.learner_workers.values():
# timeout is needed here because some environments may crash on KeyboardInterrupt (e.g. VizDoom)
# Therefore the learner train loop will never do another iteration and will never save the model.
# This is not an issue with normal exit, e.g. due to desired number of frames reached.
learner.save_model(timeout=5.0)
all_workers = self.actor_workers
for workers in self.policy_workers.values():
all_workers.extend(workers)
all_workers.extend(self.learner_workers.values())
child_processes = list_child_processes()
time.sleep(0.1)
log.debug('Closing workers...')
for i, w in enumerate(all_workers):
w.close()
time.sleep(0.01)
for i, w in enumerate(all_workers):
w.join()
log.debug('Workers joined!')
# VizDoom processes often refuse to die for an unidentified reason, so we're force killing them with a hack
kill_processes(child_processes)
fps = self.total_env_steps_since_resume / timing.experience
log.info('Collected %r, FPS: %.1f', self.env_steps, fps)
log.info('Timing: %s', timing)
if self._should_end_training():
with open(done_filename(self.cfg), 'w') as fobj:
fobj.write(f'{self.env_steps}')
time.sleep(0.5)
log.info('Done!')
return status
|
[
"multi_sample_factory.utils.utils.log.error",
"multi_sample_factory.utils.utils.cfg_file",
"multiprocessing.Lock",
"numpy.mean",
"multi_sample_factory.utils.utils.list_child_processes",
"collections.deque",
"multiprocessing.cpu_count",
"multi_sample_factory.algorithms.appo.population_based_training.PopulationBasedTraining",
"multi_sample_factory.algorithms.appo.appo_utils.set_global_cuda_envvars",
"multi_sample_factory.algorithms.utils.action_distributions.transform_action_space",
"torch.multiprocessing.set_sharing_strategy",
"multi_sample_factory.utils.utils.memory_consumption_mb",
"multi_sample_factory.utils.utils.experiment_dir",
"multiprocessing.Condition",
"multi_sample_factory.envs.env_utils.get_default_reward_shaping",
"multi_sample_factory.utils.utils.log.exception",
"multi_sample_factory.utils.utils.ensure_dir_exists",
"json.dump",
"multi_sample_factory.algorithms.appo.actor_worker.ActorWorker",
"multi_sample_factory.algorithms.appo.appo_utils.make_env_func",
"multi_sample_factory.utils.utils.log.info",
"multi_sample_factory.algorithms.appo.appo_utils.iterate_recursively",
"math.isnan",
"multi_sample_factory.algorithms.appo.policy_worker.PolicyWorker",
"multi_sample_factory.algorithms.appo.shared_buffers.SharedBuffers",
"time.sleep",
"multi_sample_factory.utils.utils.kill_processes",
"multi_sample_factory.utils.utils.done_filename",
"multi_sample_factory.algorithms.appo.learner.LearnerWorker",
"tensorboardX.SummaryWriter",
"multi_sample_factory.utils.utils.log.warning",
"faster_fifo.Queue",
"multi_sample_factory.utils.timing.Timing",
"time.time",
"torch.multiprocessing.JoinableQueue",
"multi_sample_factory.utils.utils.log.debug",
"multi_sample_factory.utils.utils.AttrDict"
] |
[((2328, 2385), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (2370, 2385), False, 'import torch\n'), ((19399, 19427), 'multi_sample_factory.algorithms.appo.appo_utils.set_global_cuda_envvars', 'set_global_cuda_envvars', (['cfg'], {}), '(cfg)\n', (19422, 19427), False, 'from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars\n'), ((19447, 19487), 'multi_sample_factory.algorithms.appo.appo_utils.make_env_func', 'make_env_func', (['self.cfg'], {'env_config': 'None'}), '(self.cfg, env_config=None)\n', (19460, 19487), False, 'from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars\n'), ((19567, 19611), 'multi_sample_factory.algorithms.utils.action_distributions.transform_action_space', 'transform_action_space', (['tmp_env.action_space'], {}), '(tmp_env.action_space)\n', (19589, 19611), False, 'from multi_sample_factory.algorithms.utils.action_distributions import transform_action_space\n'), ((19896, 19971), 'multi_sample_factory.algorithms.appo.shared_buffers.SharedBuffers', 'SharedBuffers', (['self.cfg', 'self.num_agents', 'self.obs_space', 'self.action_space'], {}), '(self.cfg, self.num_agents, self.obs_space, self.action_space)\n', (19909, 19971), False, 'from multi_sample_factory.algorithms.appo.shared_buffers import SharedBuffers\n'), ((20036, 20061), 'faster_fifo.Queue', 'MpQueue', (['(40 * 1000 * 1000)'], {}), '(40 * 1000 * 1000)\n', (20043, 20061), True, 'from faster_fifo import Queue as MpQueue\n'), ((21058, 21069), 'time.time', 'time.time', ([], {}), '()\n', (21067, 21069), False, 'import time\n'), ((21945, 22020), 'multi_sample_factory.algorithms.appo.population_based_training.PopulationBasedTraining', 'PopulationBasedTraining', (['self.cfg', 'self.reward_shaping_scheme', 'self.writers'], {}), '(self.cfg, self.reward_shaping_scheme, self.writers)\n', (21968, 22020), False, 'from multi_sample_factory.algorithms.appo.population_based_training import PopulationBasedTraining\n'), ((22681, 22913), 'multi_sample_factory.algorithms.appo.actor_worker.ActorWorker', 'ActorWorker', (['self.cfg', 'self.obs_space', 'self.action_space', 'self.num_agents', 'idx', 'self.traj_buffers'], {'task_queue': 'actor_queue', 'policy_queues': 'self.policy_queues', 'report_queue': 'self.report_queue', 'learner_queues': 'learner_queues'}), '(self.cfg, self.obs_space, self.action_space, self.num_agents,\n idx, self.traj_buffers, task_queue=actor_queue, policy_queues=self.\n policy_queues, report_queue=self.report_queue, learner_queues=\n learner_queues)\n', (22692, 22913), False, 'from multi_sample_factory.algorithms.appo.actor_worker import ActorWorker\n'), ((26522, 26558), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Initializing learners..."""'], {}), "('Initializing learners...')\n", (26530, 26558), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((27327, 27369), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Initializing policy workers..."""'], {}), "('Initializing policy workers...')\n", (27335, 27369), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((28111, 28145), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Initializing actors..."""'], {}), "('Initializing actors...')\n", (28119, 28145), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((31577, 31588), 'time.time', 'time.time', ([], {}), '()\n', (31586, 31588), False, 'import time\n'), ((33297, 33307), 'multi_sample_factory.utils.utils.AttrDict', 'AttrDict', ([], {}), '()\n', (33305, 33307), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((34675, 34698), 'multi_sample_factory.utils.utils.memory_consumption_mb', 'memory_consumption_mb', ([], {}), '()\n', (34696, 34698), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37877, 37913), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Collecting experience..."""'], {}), "('Collecting experience...')\n", (37885, 37913), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37932, 37940), 'multi_sample_factory.utils.timing.Timing', 'Timing', ([], {}), '()\n', (37938, 37940), False, 'from multi_sample_factory.utils.timing import Timing\n'), ((39731, 39753), 'multi_sample_factory.utils.utils.list_child_processes', 'list_child_processes', ([], {}), '()\n', (39751, 39753), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((39763, 39778), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (39773, 39778), False, 'import time\n'), ((39787, 39818), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Closing workers..."""'], {}), "('Closing workers...')\n", (39796, 39818), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((39987, 40015), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Workers joined!"""'], {}), "('Workers joined!')\n", (39996, 40015), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40141, 40172), 'multi_sample_factory.utils.utils.kill_processes', 'kill_processes', (['child_processes'], {}), '(child_processes)\n', (40155, 40172), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40250, 40306), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Collected %r, FPS: %.1f"""', 'self.env_steps', 'fps'], {}), "('Collected %r, FPS: %.1f', self.env_steps, fps)\n", (40258, 40306), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40315, 40345), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Timing: %s"""', 'timing'], {}), "('Timing: %s', timing)\n", (40323, 40345), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40505, 40520), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (40515, 40520), False, 'import time\n'), ((40529, 40546), 'multi_sample_factory.utils.utils.log.info', 'log.info', (['"""Done!"""'], {}), "('Done!')\n", (40537, 40546), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((19771, 19806), 'multi_sample_factory.envs.env_utils.get_default_reward_shaping', 'get_default_reward_shaping', (['tmp_env'], {}), '(tmp_env)\n', (19797, 19806), False, 'from multi_sample_factory.envs.env_utils import get_default_reward_shaping\n'), ((21437, 21456), 'collections.deque', 'deque', (['[]'], {'maxlen': '(5)'}), '([], maxlen=5)\n', (21442, 21456), False, 'from collections import deque\n'), ((21820, 21850), 'multi_sample_factory.utils.utils.ensure_dir_exists', 'ensure_dir_exists', (['summary_dir'], {}), '(summary_dir)\n', (21837, 21850), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((21883, 21924), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['summary_dir'], {'flush_secs': '(20)'}), '(summary_dir, flush_secs=20)\n', (21896, 21924), False, 'from tensorboardX import SummaryWriter\n'), ((22293, 22333), 'json.dump', 'json.dump', (['cfg_dict', 'json_file'], {'indent': '(2)'}), '(cfg_dict, json_file, indent=2)\n', (22302, 22333), False, 'import json\n'), ((22408, 22436), 'multi_sample_factory.utils.utils.experiment_dir', 'experiment_dir', ([], {'cfg': 'self.cfg'}), '(cfg=self.cfg)\n', (22422, 22436), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((22463, 22487), 'multi_sample_factory.utils.utils.experiment_dir', 'experiment_dir', (['self.cfg'], {}), '(self.cfg)\n', (22477, 22487), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((23861, 23872), 'time.time', 'time.time', ([], {}), '()\n', (23870, 23872), False, 'import time\n'), ((26166, 26190), 'faster_fifo.Queue', 'MpQueue', (['(2 * 1000 * 1000)'], {}), '(2 * 1000 * 1000)\n', (26173, 26190), True, 'from faster_fifo import Queue as MpQueue\n'), ((26583, 26605), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (26603, 26605), False, 'import multiprocessing\n'), ((26688, 26715), 'multiprocessing.Condition', 'multiprocessing.Condition', ([], {}), '()\n', (26713, 26715), False, 'import multiprocessing\n'), ((26864, 27101), 'multi_sample_factory.algorithms.appo.learner.LearnerWorker', 'LearnerWorker', (['learner_idx', 'policy_id', 'self.cfg', 'self.obs_space', 'self.action_space', 'self.report_queue', 'policy_worker_queues[policy_id]', 'self.traj_buffers', 'policy_locks[policy_id]', 'resume_experience_collection_cv[policy_id]'], {}), '(learner_idx, policy_id, self.cfg, self.obs_space, self.\n action_space, self.report_queue, policy_worker_queues[policy_id], self.\n traj_buffers, policy_locks[policy_id], resume_experience_collection_cv[\n policy_id])\n', (26877, 27101), False, 'from multi_sample_factory.algorithms.appo.learner import LearnerWorker\n'), ((27501, 27510), 'faster_fifo.Queue', 'MpQueue', ([], {}), '()\n', (27508, 27510), True, 'from faster_fifo import Queue as MpQueue\n'), ((32838, 32849), 'time.time', 'time.time', ([], {}), '()\n', (32847, 32849), False, 'import time\n'), ((34094, 34150), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Avg episode reward: %r"""', 'policy_reward_stats'], {}), "('Avg episode reward: %r', policy_reward_stats)\n", (34103, 34150), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37632, 37655), 'multi_sample_factory.utils.utils.done_filename', 'done_filename', (['self.cfg'], {}), '(self.cfg)\n', (37645, 37655), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((37670, 37756), 'multi_sample_factory.utils.utils.log.warning', 'log.warning', (['"""Training already finished! Remove "done" file to continue training"""'], {}), '(\n \'Training already finished! Remove "done" file to continue training\')\n', (37681, 37756), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((39897, 39913), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (39907, 39913), False, 'import time\n'), ((4567, 4594), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4592, 4594), False, 'import multiprocessing\n'), ((22242, 22260), 'multi_sample_factory.utils.utils.cfg_file', 'cfg_file', (['self.cfg'], {}), '(self.cfg)\n', (22250, 22260), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((27665, 27919), 'multi_sample_factory.algorithms.appo.policy_worker.PolicyWorker', 'PolicyWorker', (['i', 'policy_id', 'self.cfg', 'self.obs_space', 'self.action_space', 'self.traj_buffers', 'policy_queue', 'actor_queues', 'self.report_queue', 'policy_worker_queues[policy_id][i]', 'policy_locks[policy_id]', 'resume_experience_collection_cv[policy_id]'], {}), '(i, policy_id, self.cfg, self.obs_space, self.action_space,\n self.traj_buffers, policy_queue, actor_queues, self.report_queue,\n policy_worker_queues[policy_id][i], policy_locks[policy_id],\n resume_experience_collection_cv[policy_id])\n', (27677, 27919), False, 'from multi_sample_factory.algorithms.appo.policy_worker import PolicyWorker\n'), ((29364, 29465), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Waiting for policy worker %d-%d to finish initialization..."""', 'policy_id', 'w.worker_idx'], {}), "('Waiting for policy worker %d-%d to finish initialization...',\n policy_id, w.worker_idx)\n", (29373, 29465), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((29503, 29573), 'multi_sample_factory.utils.utils.log.debug', 'log.debug', (['"""Policy worker %d-%d initialized!"""', 'policy_id', 'w.worker_idx'], {}), "('Policy worker %d-%d initialized!', policy_id, w.worker_idx)\n", (29512, 29573), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((30302, 30324), 'multi_sample_factory.algorithms.appo.appo_utils.iterate_recursively', 'iterate_recursively', (['s'], {}), '(s)\n', (30321, 30324), False, 'from multi_sample_factory.algorithms.appo.appo_utils import make_env_func, iterate_recursively, set_global_cuda_envvars\n'), ((32638, 32649), 'time.time', 'time.time', ([], {}), '()\n', (32647, 32649), False, 'import time\n'), ((35457, 35497), 'math.isnan', 'math.isnan', (['sample_throughput[policy_id]'], {}), '(sample_throughput[policy_id])\n', (35467, 35497), False, 'import math\n'), ((21753, 21781), 'multi_sample_factory.utils.utils.experiment_dir', 'experiment_dir', ([], {'cfg': 'self.cfg'}), '(cfg=self.cfg)\n', (21767, 21781), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((24381, 24392), 'time.time', 'time.time', ([], {}), '()\n', (24390, 24392), False, 'import time\n'), ((25091, 25102), 'time.time', 'time.time', ([], {}), '()\n', (25100, 25102), False, 'import time\n'), ((25364, 25451), 'multi_sample_factory.utils.utils.log.error', 'log.error', (['"""Worker %d is stuck or failed (%.3f). Reset!"""', 'w.worker_idx', 'time_passed'], {}), "('Worker %d is stuck or failed (%.3f). Reset!', w.worker_idx,\n time_passed)\n", (25373, 25451), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((25837, 25848), 'time.time', 'time.time', ([], {}), '()\n', (25846, 25848), False, 'import time\n'), ((26491, 26511), 'torch.multiprocessing.JoinableQueue', 'TorchJoinableQueue', ([], {}), '()\n', (26509, 26511), True, 'from torch.multiprocessing import JoinableQueue as TorchJoinableQueue\n'), ((31113, 31133), 'collections.deque', 'deque', (['[]'], {'maxlen': '(50)'}), '([], maxlen=50)\n', (31118, 31133), False, 'from collections import deque\n'), ((35846, 35870), 'numpy.mean', 'np.mean', (['stat[policy_id]'], {}), '(stat[policy_id])\n', (35853, 35870), True, 'import numpy as np\n'), ((38824, 38865), 'multi_sample_factory.utils.utils.log.exception', 'log.exception', (['"""Exception in driver loop"""'], {}), "('Exception in driver loop')\n", (38837, 38865), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((38970, 39039), 'multi_sample_factory.utils.utils.log.warning', 'log.warning', (['"""Keyboard interrupt detected in driver loop, exiting..."""'], {}), "('Keyboard interrupt detected in driver loop, exiting...')\n", (38981, 39039), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((40409, 40432), 'multi_sample_factory.utils.utils.done_filename', 'done_filename', (['self.cfg'], {}), '(self.cfg)\n', (40422, 40432), False, 'from multi_sample_factory.utils.utils import summaries_dir, experiment_dir, log, str2bool, memory_consumption_mb, cfg_file, ensure_dir_exists, list_child_processes, kill_processes, AttrDict, done_filename, save_git_diff, init_file_logger\n'), ((38512, 38523), 'time.time', 'time.time', ([], {}), '()\n', (38521, 38523), False, 'import time\n'), ((30437, 30469), 'collections.deque', 'deque', ([], {'maxlen': 'self.cfg.stats_avg'}), '(maxlen=self.cfg.stats_avg)\n', (30442, 30469), False, 'from collections import deque\n'), ((35266, 35280), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (35273, 35280), True, 'import numpy as np\n'), ((38388, 38399), 'time.time', 'time.time', ([], {}), '()\n', (38397, 38399), False, 'import time\n'), ((34052, 34073), 'numpy.mean', 'np.mean', (['reward_stats'], {}), '(reward_stats)\n', (34059, 34073), True, 'import numpy as np\n')]
|
"""
Module description:
"""
__version__ = '0.3.1'
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
from types import SimpleNamespace
import typing as t
import numpy as np
import logging as pylog
from elliot.utils import logging
from hyperopt import STATUS_OK
class ModelCoordinator(object):
"""
This class handles the selection of hyperparameters for the hyperparameter tuning realized with HyperOpt.
"""
def __init__(self, data_objs, base: SimpleNamespace, params, model_class: t.ClassVar, test_fold_index: int):
"""
The constructor creates a Placeholder of the recommender model.
:param base: a SimpleNamespace that contains the configuration (main level) options
:param params: a SimpleNamespace that contains the hyper-parameters of the model
:param model_class: the class of the recommendation model
"""
self.logger = logging.get_logger(self.__class__.__name__, pylog.CRITICAL if base.config_test else pylog.DEBUG)
self.data_objs = data_objs
self.base = base
self.params = params
self.model_class = model_class
self.test_fold_index = test_fold_index
self.model_config_index = 0
def objective(self, args):
"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""
sampled_namespace = SimpleNamespace(**args)
model_params = SimpleNamespace(**self.params[0].__dict__)
self.logger.info("Hyperparameter tuning exploration:")
for (k, v) in sampled_namespace.__dict__.items():
model_params.__setattr__(k, v)
self.logger.info(f"{k} set to {model_params.__getattribute__(k)}")
losses = []
results = []
for trainval_index, data_obj in enumerate(self.data_objs):
self.logger.info(f"Exploration: Hyperparameter exploration number {self.model_config_index+1}")
self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")
self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")
model = self.model_class(data=data_obj, config=self.base, params=model_params)
model.train()
losses.append(model.get_loss())
results.append(model.get_results())
self.model_config_index += 1
loss = np.average(losses)
results = self._average_results(results)
return {
'loss': loss,
'status': STATUS_OK,
'params': model.get_params(),
'val_results': {k: result_dict["val_results"] for k, result_dict in results.items()},
'val_statistical_results': {k: result_dict["val_statistical_results"] for k, result_dict in model.get_results().items()},
'test_results': {k: result_dict["test_results"] for k, result_dict in results.items()},
'test_statistical_results': {k: result_dict["test_statistical_results"] for k, result_dict in model.get_results().items()},
'name': model.name
}
def single(self):
"""
This function respect the signature, and the return format required for HyperOpt optimization
:param args: a Dictionary that contains the new hyper-parameter values that will be used in the current run
:return: it returns a Dictionary with loss, and status being required by HyperOpt,
and params, and results being required by the framework
"""
self.logger.info("Hyperparameters:")
for k, v in self.params.__dict__.items():
self.logger.info(f"{k} set to {v}")
losses = []
results = []
for trainval_index, data_obj in enumerate(self.data_objs):
self.logger.info(f"Exploration: Test Fold exploration number {self.test_fold_index+1}")
self.logger.info(f"Exploration: Train-Validation Fold exploration number {trainval_index+1}")
model = self.model_class(data=data_obj, config=self.base, params=self.params)
model.train()
losses.append(model.get_loss())
results.append(model.get_results())
loss = np.average(losses)
results = self._average_results(results)
return {
'loss': loss,
'status': STATUS_OK,
'params': model.get_params(),
'val_results': {k: result_dict["val_results"] for k, result_dict in results.items()},
'val_statistical_results': {k: result_dict["val_statistical_results"] for k, result_dict in model.get_results().items()},
'test_results': {k: result_dict["test_results"] for k, result_dict in results.items()},
'test_statistical_results': {k: result_dict["test_statistical_results"] for k, result_dict in model.get_results().items()},
'name': model.name
}
@staticmethod
def _average_results(results_list):
ks = list(results_list[0].keys())
eval_result_types = ["val_results", "test_results"]
metrics = list(results_list[0][ks[0]]["val_results"].keys())
return {k: {type_: {metric: np.average([fold_result[k][type_][metric]
for fold_result in results_list])
for metric in metrics}
for type_ in eval_result_types}
for k in ks}
|
[
"numpy.average",
"types.SimpleNamespace",
"elliot.utils.logging.get_logger"
] |
[((916, 1017), 'elliot.utils.logging.get_logger', 'logging.get_logger', (['self.__class__.__name__', '(pylog.CRITICAL if base.config_test else pylog.DEBUG)'], {}), '(self.__class__.__name__, pylog.CRITICAL if base.\n config_test else pylog.DEBUG)\n', (934, 1017), False, 'from elliot.utils import logging\n'), ((1681, 1704), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**args)\n', (1696, 1704), False, 'from types import SimpleNamespace\n'), ((1728, 1770), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**self.params[0].__dict__)\n', (1743, 1770), False, 'from types import SimpleNamespace\n'), ((2701, 2719), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (2711, 2719), True, 'import numpy as np\n'), ((4500, 4518), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (4510, 4518), True, 'import numpy as np\n'), ((5462, 5537), 'numpy.average', 'np.average', (['[fold_result[k][type_][metric] for fold_result in results_list]'], {}), '([fold_result[k][type_][metric] for fold_result in results_list])\n', (5472, 5537), True, 'import numpy as np\n')]
|
import tkinter as tk
from scanner import scanner
from multiprocessing import Process
import os
window = tk.Tk()
window.title("Scanner")
window.rowconfigure([0,1,2,3,4],minsize=50)
window.columnconfigure([0,1,3],minsize=50)
global plist
plist = []
def Scan(sig):
if sig == 1:
target = str(targetEntry.get())
start = int(startPort.get())
try:
end = int(endPort.get())
except:
end = start
p = Process(target=scanner,args=(target,start,end))
plist.append(p)
p.start()
if sig == 0:
print("Killing all Processes...")
for p in plist:
p.kill()
print("Done")
os.system("clear")
label1 = tk.Label(text="Enter Target Name/IP: ")
targetEntry = tk.Entry()
label2 = tk.Label(text="Enter Start Port: ")
startPort = tk.Entry()
label3 = tk.Label(text="Enter End Port (Optional): ")
endPort = tk.Entry()
label4 = tk.Label(text="Kill Process: ")
killprocess = tk.Entry()
start = tk.Button(text="Start Scan",command=lambda:Scan(1))
# stop = tk.Button(text="Stop Scan",command=lambda:Scan(0))
killall = tk.Button(text="Kill All",command=lambda:Scan(0))
label1.grid(row=0,column=0)
targetEntry.grid(row=0,column=1)
label2.grid(row=1,column=0)
startPort.grid(row=1,column=1)
label3.grid(row=2,column=0)
endPort.grid(row=2,column=1)
start.grid(row=3,column=0)
# stop.grid(row=3,column=1)
# label4.grid(row=4,column=0)
# killprocess.grid(row=4,column=1)
killall.grid(row=3,column=2)
window.mainloop()
|
[
"tkinter.Entry",
"os.system",
"multiprocessing.Process",
"tkinter.Label",
"tkinter.Tk"
] |
[((105, 112), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (110, 112), True, 'import tkinter as tk\n'), ((714, 753), 'tkinter.Label', 'tk.Label', ([], {'text': '"""Enter Target Name/IP: """'}), "(text='Enter Target Name/IP: ')\n", (722, 753), True, 'import tkinter as tk\n'), ((768, 778), 'tkinter.Entry', 'tk.Entry', ([], {}), '()\n', (776, 778), True, 'import tkinter as tk\n'), ((788, 823), 'tkinter.Label', 'tk.Label', ([], {'text': '"""Enter Start Port: """'}), "(text='Enter Start Port: ')\n", (796, 823), True, 'import tkinter as tk\n'), ((836, 846), 'tkinter.Entry', 'tk.Entry', ([], {}), '()\n', (844, 846), True, 'import tkinter as tk\n'), ((856, 900), 'tkinter.Label', 'tk.Label', ([], {'text': '"""Enter End Port (Optional): """'}), "(text='Enter End Port (Optional): ')\n", (864, 900), True, 'import tkinter as tk\n'), ((911, 921), 'tkinter.Entry', 'tk.Entry', ([], {}), '()\n', (919, 921), True, 'import tkinter as tk\n'), ((931, 962), 'tkinter.Label', 'tk.Label', ([], {'text': '"""Kill Process: """'}), "(text='Kill Process: ')\n", (939, 962), True, 'import tkinter as tk\n'), ((977, 987), 'tkinter.Entry', 'tk.Entry', ([], {}), '()\n', (985, 987), True, 'import tkinter as tk\n'), ((461, 511), 'multiprocessing.Process', 'Process', ([], {'target': 'scanner', 'args': '(target, start, end)'}), '(target=scanner, args=(target, start, end))\n', (468, 511), False, 'from multiprocessing import Process\n'), ((685, 703), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (694, 703), False, 'import os\n')]
|
import os, sys
import torch
from torch.utils.data import Dataset
import imageio as io
import cv2
from sklearn.model_selection import StratifiedKFold
import numpy as np
from numpy.lib.stride_tricks import as_strided
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
def imread(p):
img = io.imread(p)
# opencv faster
img = cv2.resize(img, (224, 224),
interpolation=cv2.INTER_CUBIC)
#img = imresize(img, (224, 224), 3)
img = img.astype('float32')/255.0
img -= mean
img /= std
return np.transpose(img, (2, 0, 1))
class ImageList(Dataset):
def __init__(self, root, videos):
self.root = root
self.videos = videos
def __getitem__(self, index):
vid = self.videos[index] # path to video folder (of images)
path = os.path.join(self.root, vid)
img_list = os.listdir(path)
img_list = [os.path.join(path, name)
for name in sorted(img_list)]
video = np.stack([imread(p) for p in img_list])
return torch.from_numpy(video)
def __len__(self):
return len(self.videos)
class VideoList(Dataset):
def __init__(self, root, videos, for_train=False, seq_length=16):
self.root = root
self.videos = videos
self.for_train = for_train
self.seq_length = seq_length
# pick randomly 1 sequence per video to train,
# pick evenly 20 sequences per video to validate/test
def __getitem__(self, index):
name, c = self.videos[index]
path = os.path.join(self.root, name + '.npy')
feat = np.load(path)
n, d = feat.shape # d=2048
if self.for_train:
start = np.random.randint(0, n-self.seq_length)
feat = feat[start:start+self.seq_length]
feat = feat[None, ...] # RxLxD, R = 1
frame_indexes = np.array(range(start, start+self.seq_length))
frame_indexes = frame_indexes[None, ...]
else:
R = 20 # Sample the 20 sequences
S = (n-self.seq_length) // (R-1)
indexes = np.array(range(n))
sn, sd = feat.strides
i_sn, = indexes.strides
feat = as_strided(feat, shape=(R, self.seq_length, d), strides=(S*sn, sn, sd))
indexes = as_strided(indexes, shape=(R, self.seq_length), strides=(S*i_sn, i_sn))
feat = np.ascontiguousarray(feat) # RxLxD, R = 20
frame_indexes = np.ascontiguousarray(indexes) # RxL
return feat, c, name, frame_indexes
def __len__(self):
return len(self.videos)
def collate(batch):
x, y, z, w = zip(*batch)
x = torch.cat([torch.from_numpy(a) for a in x]) # (bR)xLxD
w = torch.cat([torch.from_numpy(a) for a in w]) # (bR)xL
x = x.permute(1, 0, 2).contiguous() # Lx(bR)xD
y = torch.LongTensor(y)
return x, y, z, w
def class_dict(ids_file):
class2idx = {}
with open(ids_file) as f:
for line in f:
c, name = line.split()
class2idx[name] = int(c) - 1
return class2idx
def video_list(data_file, class2idx):
data = []
with open(data_file) as f:
for line in f:
name = line.split()[0]
name = os.path.splitext(name)[0]
c = name.split('/')[0]
c = class2idx[c]
data.append((name, c))
return data
def train_split(data, n_splits=5, select=0, seed=2017):
labels = np.array([d[1] for d in data])
rng = np.random.RandomState(seed)
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=rng)
cv = list(skf.split(labels, labels))
train_index, valid_index = cv[select]
train_data = [data[idx] for idx in train_index]
valid_data = [data[idx] for idx in valid_index]
return train_data, valid_data
|
[
"numpy.load",
"torch.LongTensor",
"imageio.imread",
"numpy.ascontiguousarray",
"numpy.transpose",
"numpy.random.RandomState",
"numpy.random.randint",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"numpy.lib.stride_tricks.as_strided",
"os.path.splitext",
"os.path.join",
"os.listdir",
"cv2.resize",
"torch.from_numpy"
] |
[((300, 312), 'imageio.imread', 'io.imread', (['p'], {}), '(p)\n', (309, 312), True, 'import imageio as io\n'), ((343, 401), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (224, 224), interpolation=cv2.INTER_CUBIC)\n', (353, 401), False, 'import cv2\n'), ((534, 562), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (546, 562), True, 'import numpy as np\n'), ((2859, 2878), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (2875, 2878), False, 'import torch\n'), ((3473, 3503), 'numpy.array', 'np.array', (['[d[1] for d in data]'], {}), '([d[1] for d in data])\n', (3481, 3503), True, 'import numpy as np\n'), ((3514, 3541), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3535, 3541), True, 'import numpy as np\n'), ((3552, 3618), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': 'rng'}), '(n_splits=n_splits, shuffle=True, random_state=rng)\n', (3567, 3618), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((800, 828), 'os.path.join', 'os.path.join', (['self.root', 'vid'], {}), '(self.root, vid)\n', (812, 828), False, 'import os, sys\n'), ((848, 864), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (858, 864), False, 'import os, sys\n'), ((1027, 1050), 'torch.from_numpy', 'torch.from_numpy', (['video'], {}), '(video)\n', (1043, 1050), False, 'import torch\n'), ((1526, 1564), 'os.path.join', 'os.path.join', (['self.root', "(name + '.npy')"], {}), "(self.root, name + '.npy')\n", (1538, 1564), False, 'import os, sys\n'), ((1581, 1594), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1588, 1594), True, 'import numpy as np\n'), ((885, 909), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (897, 909), False, 'import os, sys\n'), ((1680, 1721), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n - self.seq_length)'], {}), '(0, n - self.seq_length)\n', (1697, 1721), True, 'import numpy as np\n'), ((2210, 2283), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['feat'], {'shape': '(R, self.seq_length, d)', 'strides': '(S * sn, sn, sd)'}), '(feat, shape=(R, self.seq_length, d), strides=(S * sn, sn, sd))\n', (2220, 2283), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((2304, 2377), 'numpy.lib.stride_tricks.as_strided', 'as_strided', (['indexes'], {'shape': '(R, self.seq_length)', 'strides': '(S * i_sn, i_sn)'}), '(indexes, shape=(R, self.seq_length), strides=(S * i_sn, i_sn))\n', (2314, 2377), False, 'from numpy.lib.stride_tricks import as_strided\n'), ((2396, 2422), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['feat'], {}), '(feat)\n', (2416, 2422), True, 'import numpy as np\n'), ((2467, 2496), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['indexes'], {}), '(indexes)\n', (2487, 2496), True, 'import numpy as np\n'), ((2674, 2693), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (2690, 2693), False, 'import torch\n'), ((2740, 2759), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (2756, 2759), False, 'import torch\n'), ((3261, 3283), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (3277, 3283), False, 'import os, sys\n')]
|
import time
from pytest import fixture
class MockHttpRequest:
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET = {}
self.POST = {}
self.COOKIES = {}
self.META = {}
self.FILES = {}
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
class MockHttpResponse:
status_code = 200
def __init__(self):
pass
class MockHttpResponse4xx(MockHttpResponse):
status_code = 400
class MockHttpResponse5xx(MockHttpResponse):
status_code = 500
class TestMiddleware:
start_time = 1587450924.562038
finish_time = 1587451302.836959
put_metrics_calls = 0
time_call_counts = 0
def put_metric_2xx_handler(self, *args):
if self.put_metrics_calls != 1:
assert args == ('2xx', 'responses-2xx-quantity', 1, 'Count')
else:
assert args == ('request-time-handler', 'execution-time', (self.finish_time - self.start_time) * 1e6,
'Microseconds')
self.put_metrics_calls += 1
return None
def put_metric_4xx_handler(self, *args):
if self.put_metrics_calls != 1:
assert args == ('4xx', 'responses-4xx-quantity', 1, 'Count')
else:
assert args == ('request-time-handler', 'execution-time', (self.finish_time - self.start_time) * 1e6,
'Microseconds')
self.put_metrics_calls += 1
return None
def put_metric_5xx_handler(self, *args):
if self.put_metrics_calls != 1:
assert args == ('5xx', 'responses-5xx-quantity', 1, 'Count')
else:
assert args == ('request-time-handler', 'execution-time', (self.finish_time - self.start_time) * 1e6,
'Microseconds')
self.put_metrics_calls += 1
return None
def _mock_time(self):
if self.time_call_counts == 0:
mock_time = self.start_time
else:
mock_time = self.finish_time
self.time_call_counts += 1
return mock_time
@fixture
def init_env_variables(self, monkeypatch):
monkeypatch.setenv('CLOUDWATCH__METRICS_NAMESPACE', 'test-namespace')
monkeypatch.setenv('CLOUDWATCH__METRICS_ENABLE', 'True')
monkeypatch.setenv('CLOUDWATCH__METRICS_BUFFER_SIZE', '2')
monkeypatch.setenv('CLOUDWATCH__METRICS_FLUSH_TIMEOUT', '10')
monkeypatch.setattr(time, 'time', lambda: 1587450924.562038)
@fixture
def time_patching(self, monkeypatch):
monkeypatch.setattr(time, 'time', self._mock_time)
def test_middleware_2xx_response(self, monkeypatch, init_env_variables, time_patching):
from cloudwatch_metrics.metric_recorder import CloudwatchMetricRecorder
from cloudwatch_metrics.metric_middleware import CloudWatchMiddleware
self.time_call_counts = 0
monkeypatch.setattr(CloudwatchMetricRecorder, 'put_metric', self.put_metric_2xx_handler)
request = MockHttpRequest()
response = MockHttpResponse()
middleware = CloudWatchMiddleware(lambda x: response)
mw_response = middleware.__call__(request)
assert response == mw_response
assert response.status_code == mw_response.status_code
self.put_metrics_calls = 0
def test_middleware_4xx_response(self, monkeypatch, init_env_variables, time_patching):
from cloudwatch_metrics.metric_recorder import CloudwatchMetricRecorder
from cloudwatch_metrics.metric_middleware import CloudWatchMiddleware
self.time_call_counts = 0
monkeypatch.setattr(CloudwatchMetricRecorder, 'put_metric', self.put_metric_4xx_handler)
request = MockHttpRequest()
response = MockHttpResponse4xx()
middleware = CloudWatchMiddleware(lambda x: response)
mw_response = middleware.__call__(request)
assert response == mw_response
assert response.status_code == mw_response.status_code
self.put_metrics_calls = 0
def test_middleware_5xx_response(self, monkeypatch, init_env_variables, time_patching):
from cloudwatch_metrics.metric_recorder import CloudwatchMetricRecorder
from cloudwatch_metrics.metric_middleware import CloudWatchMiddleware
self.time_call_counts = 0
monkeypatch.setattr(CloudwatchMetricRecorder, 'put_metric', self.put_metric_5xx_handler)
request = MockHttpRequest()
response = MockHttpResponse5xx()
middleware = CloudWatchMiddleware(lambda x: response)
mw_response = middleware.__call__(request)
assert response == mw_response
assert response.status_code == mw_response.status_code
self.put_metrics_calls = 0
|
[
"cloudwatch_metrics.metric_middleware.CloudWatchMiddleware"
] |
[((3191, 3231), 'cloudwatch_metrics.metric_middleware.CloudWatchMiddleware', 'CloudWatchMiddleware', (['(lambda x: response)'], {}), '(lambda x: response)\n', (3211, 3231), False, 'from cloudwatch_metrics.metric_middleware import CloudWatchMiddleware\n'), ((3902, 3942), 'cloudwatch_metrics.metric_middleware.CloudWatchMiddleware', 'CloudWatchMiddleware', (['(lambda x: response)'], {}), '(lambda x: response)\n', (3922, 3942), False, 'from cloudwatch_metrics.metric_middleware import CloudWatchMiddleware\n'), ((4613, 4653), 'cloudwatch_metrics.metric_middleware.CloudWatchMiddleware', 'CloudWatchMiddleware', (['(lambda x: response)'], {}), '(lambda x: response)\n', (4633, 4653), False, 'from cloudwatch_metrics.metric_middleware import CloudWatchMiddleware\n')]
|
import os
import numpy as np
import scipy.sparse
import psutil
def getMemUsageOfCurProcess_MiB(field='rss'):
# return the memory usage in MB
process = psutil.Process(os.getpid())
mem = getattr(process.memory_info(), field)
mem_MiB = mem / float(2 ** 20)
return mem_MiB
def calcObjSize_MiB(arr):
if hasattr(arr, "__dict__"):
arr = arr.__dict__
MiB_PER_BYTE = 1.0 / float(2**20)
if isinstance(arr, np.ndarray):
return arr.nbytes * MiB_PER_BYTE
elif isinstance(arr, scipy.sparse.csr_matrix):
nbyt = arr.data.nbytes + arr.indices.nbytes + arr.indptr.nbytes
return nbyt * MiB_PER_BYTE
elif isinstance(arr, dict):
total = 0
for key in arr:
total += calcObjSize_MiB(arr[key])
return total
else:
return 0
|
[
"os.getpid"
] |
[((175, 186), 'os.getpid', 'os.getpid', ([], {}), '()\n', (184, 186), False, 'import os\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from ai.h2o.sparkling.ExternalBackendConf import ExternalBackendConf
from ai.h2o.sparkling.Initializer import Initializer
from ai.h2o.sparkling.InternalBackendConf import InternalBackendConf
from ai.h2o.sparkling.SharedBackendConf import SharedBackendConf
from pyspark.ml.util import _jvm
class H2OConf(SharedBackendConf, InternalBackendConf, ExternalBackendConf):
def __init__(self, spark=None):
try:
if spark is not None:
warnings.warn(
"Constructor H2OConf(spark) with spark argument is deprecated. Please use just H2OConf(). "
"The argument will be removed in release 3.32.")
Initializer.load_sparkling_jar()
self._jconf = _jvm().org.apache.spark.h2o.H2OConf()
except:
raise
|
[
"warnings.warn",
"pyspark.ml.util._jvm",
"ai.h2o.sparkling.Initializer.Initializer.load_sparkling_jar"
] |
[((1475, 1507), 'ai.h2o.sparkling.Initializer.Initializer.load_sparkling_jar', 'Initializer.load_sparkling_jar', ([], {}), '()\n', (1505, 1507), False, 'from ai.h2o.sparkling.Initializer import Initializer\n'), ((1267, 1428), 'warnings.warn', 'warnings.warn', (['"""Constructor H2OConf(spark) with spark argument is deprecated. Please use just H2OConf(). The argument will be removed in release 3.32."""'], {}), "(\n 'Constructor H2OConf(spark) with spark argument is deprecated. Please use just H2OConf(). The argument will be removed in release 3.32.'\n )\n", (1280, 1428), False, 'import warnings\n'), ((1534, 1540), 'pyspark.ml.util._jvm', '_jvm', ([], {}), '()\n', (1538, 1540), False, 'from pyspark.ml.util import _jvm\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A JWT HMAC key manager."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from typing import Text, Type
from tink.proto import jwt_hmac_pb2
from tink.proto import tink_pb2
from tink import core
from tink.cc.pybind import tink_bindings
from tink.jwt import _jwt_error
from tink.jwt import _jwt_format
from tink.jwt import _jwt_mac
from tink.jwt import _jwt_validator
from tink.jwt import _raw_jwt
from tink.jwt import _verified_jwt
_JWT_HMAC_KEY_TYPE = 'type.googleapis.com/google.crypto.tink.JwtHmacKey'
_ALGORITHM_STRING = {
jwt_hmac_pb2.HS256: 'HS256',
jwt_hmac_pb2.HS384: 'HS384',
jwt_hmac_pb2.HS512: 'HS512'
}
class _JwtHmac(_jwt_mac.JwtMac):
"""Interface for authenticating and verifying JWT with JWS MAC."""
def __init__(self, cc_mac: tink_bindings.Mac, algorithm: Text):
self._cc_mac = cc_mac
self._algorithm = algorithm
@core.use_tink_errors
def _compute_mac(self, data: bytes) -> bytes:
return self._cc_mac.compute_mac(data)
@core.use_tink_errors
def _verify_mac(self, mac_value: bytes, data: bytes) -> None:
self._cc_mac.verify_mac(mac_value, data)
def compute_mac_and_encode(self, raw_jwt: _raw_jwt.RawJwt) -> Text:
"""Computes a MAC and encodes the token."""
unsigned = _jwt_format.create_unsigned_compact(self._algorithm,
raw_jwt.json_payload())
return _jwt_format.create_signed_compact(unsigned,
self._compute_mac(unsigned))
def verify_mac_and_decode(
self, compact: Text,
validator: _jwt_validator.JwtValidator) -> _verified_jwt.VerifiedJwt:
"""Verifies, validates and decodes a MACed compact JWT token."""
parts = _jwt_format.split_signed_compact(compact)
unsigned_compact, json_header, json_payload, mac = parts
self._verify_mac(mac, unsigned_compact)
_jwt_format.validate_header(json_header, self._algorithm)
raw_jwt = _raw_jwt.RawJwt.from_json_payload(json_payload)
_jwt_validator.validate(validator, raw_jwt)
return _verified_jwt.VerifiedJwt._create(raw_jwt) # pylint: disable=protected-access
class MacCcToPyJwtMacKeyManager(core.KeyManager[_jwt_mac.JwtMac]):
"""Transforms C++ KeyManager into a Python KeyManager."""
def __init__(self):
self._cc_key_manager = tink_bindings.MacKeyManager.from_cc_registry(
'type.googleapis.com/google.crypto.tink.JwtHmacKey')
def primitive_class(self) -> Type[_jwt_mac.JwtMac]:
return _jwt_mac.JwtMac
@core.use_tink_errors
def primitive(self, key_data: tink_pb2.KeyData) -> _jwt_mac.JwtMac:
if key_data.type_url != _JWT_HMAC_KEY_TYPE:
raise _jwt_error.JwtInvalidError('Invalid key data key type')
jwt_hmac_key = jwt_hmac_pb2.JwtHmacKey.FromString(key_data.value)
algorithm = _ALGORITHM_STRING[jwt_hmac_key.algorithm]
cc_mac = self._cc_key_manager.primitive(key_data.SerializeToString())
return _JwtHmac(cc_mac, algorithm)
def key_type(self) -> Text:
return self._cc_key_manager.key_type()
@core.use_tink_errors
def new_key_data(self,
key_template: tink_pb2.KeyTemplate) -> tink_pb2.KeyData:
data = self._cc_key_manager.new_key_data(key_template.SerializeToString())
return tink_pb2.KeyData.FromString(data)
def register():
tink_bindings.register_jwt()
core.Registry.register_key_manager(
MacCcToPyJwtMacKeyManager(), new_key_allowed=True)
|
[
"tink.jwt._jwt_error.JwtInvalidError",
"tink.proto.tink_pb2.KeyData.FromString",
"tink.cc.pybind.tink_bindings.register_jwt",
"tink.cc.pybind.tink_bindings.MacKeyManager.from_cc_registry",
"tink.jwt._jwt_format.split_signed_compact",
"tink.jwt._raw_jwt.RawJwt.from_json_payload",
"tink.proto.jwt_hmac_pb2.JwtHmacKey.FromString",
"tink.jwt._jwt_validator.validate",
"tink.jwt._jwt_format.validate_header",
"tink.jwt._verified_jwt.VerifiedJwt._create"
] |
[((3973, 4001), 'tink.cc.pybind.tink_bindings.register_jwt', 'tink_bindings.register_jwt', ([], {}), '()\n', (3999, 4001), False, 'from tink.cc.pybind import tink_bindings\n'), ((2400, 2441), 'tink.jwt._jwt_format.split_signed_compact', '_jwt_format.split_signed_compact', (['compact'], {}), '(compact)\n', (2432, 2441), False, 'from tink.jwt import _jwt_format\n'), ((2551, 2608), 'tink.jwt._jwt_format.validate_header', '_jwt_format.validate_header', (['json_header', 'self._algorithm'], {}), '(json_header, self._algorithm)\n', (2578, 2608), False, 'from tink.jwt import _jwt_format\n'), ((2623, 2670), 'tink.jwt._raw_jwt.RawJwt.from_json_payload', '_raw_jwt.RawJwt.from_json_payload', (['json_payload'], {}), '(json_payload)\n', (2656, 2670), False, 'from tink.jwt import _raw_jwt\n'), ((2675, 2718), 'tink.jwt._jwt_validator.validate', '_jwt_validator.validate', (['validator', 'raw_jwt'], {}), '(validator, raw_jwt)\n', (2698, 2718), False, 'from tink.jwt import _jwt_validator\n'), ((2730, 2772), 'tink.jwt._verified_jwt.VerifiedJwt._create', '_verified_jwt.VerifiedJwt._create', (['raw_jwt'], {}), '(raw_jwt)\n', (2763, 2772), False, 'from tink.jwt import _verified_jwt\n'), ((2988, 3090), 'tink.cc.pybind.tink_bindings.MacKeyManager.from_cc_registry', 'tink_bindings.MacKeyManager.from_cc_registry', (['"""type.googleapis.com/google.crypto.tink.JwtHmacKey"""'], {}), "(\n 'type.googleapis.com/google.crypto.tink.JwtHmacKey')\n", (3032, 3090), False, 'from tink.cc.pybind import tink_bindings\n'), ((3407, 3457), 'tink.proto.jwt_hmac_pb2.JwtHmacKey.FromString', 'jwt_hmac_pb2.JwtHmacKey.FromString', (['key_data.value'], {}), '(key_data.value)\n', (3441, 3457), False, 'from tink.proto import jwt_hmac_pb2\n'), ((3919, 3952), 'tink.proto.tink_pb2.KeyData.FromString', 'tink_pb2.KeyData.FromString', (['data'], {}), '(data)\n', (3946, 3952), False, 'from tink.proto import tink_pb2\n'), ((3332, 3387), 'tink.jwt._jwt_error.JwtInvalidError', '_jwt_error.JwtInvalidError', (['"""Invalid key data key type"""'], {}), "('Invalid key data key type')\n", (3358, 3387), False, 'from tink.jwt import _jwt_error\n')]
|
import json
#reading
stringOfJsonData = '{"name": "Bob", "isCat": true, "miceCaught": 0, "felineIQ": null}'
jsonDataAsPythonValue = json.loads(stringOfJsonData)
print(jsonDataAsPythonValue)
#alt print
print("\n")
print("Again with Dumps() \n")
print(json.dumps(jsonDataAsPythonValue))
#writing
#formatting
|
[
"json.loads",
"json.dumps"
] |
[((137, 165), 'json.loads', 'json.loads', (['stringOfJsonData'], {}), '(stringOfJsonData)\n', (147, 165), False, 'import json\n'), ((262, 295), 'json.dumps', 'json.dumps', (['jsonDataAsPythonValue'], {}), '(jsonDataAsPythonValue)\n', (272, 295), False, 'import json\n')]
|
import asyncio
import aiohttp
import discord
from async_rediscache import RedisSession
from botcore import StartupError
from botcore.site_api import APIClient
from discord.ext import commands
import bot
from bot import constants
from bot.bot import Bot
from bot.log import get_logger, setup_sentry
setup_sentry()
LOCALHOST = "127.0.0.1"
async def _create_redis_session() -> RedisSession:
"""Create and connect to a redis session."""
redis_session = RedisSession(
address=(constants.Redis.host, constants.Redis.port),
password=constants.Redis.password,
minsize=1,
maxsize=20,
use_fakeredis=constants.Redis.use_fakeredis,
global_namespace="bot",
)
try:
await redis_session.connect()
except OSError as e:
raise StartupError(e)
return redis_session
async def main() -> None:
"""Entry async method for starting the bot."""
statsd_url = constants.Stats.statsd_host
if constants.DEBUG_MODE:
# Since statsd is UDP, there are no errors for sending to a down port.
# For this reason, setting the statsd host to 127.0.0.1 for development
# will effectively disable stats.
statsd_url = LOCALHOST
allowed_roles = list({discord.Object(id_) for id_ in constants.MODERATION_ROLES})
intents = discord.Intents.all()
intents.presences = False
intents.dm_typing = False
intents.dm_reactions = False
intents.invites = False
intents.webhooks = False
intents.integrations = False
async with aiohttp.ClientSession() as session:
bot.instance = Bot(
guild_id=constants.Guild.id,
http_session=session,
redis_session=await _create_redis_session(),
statsd_url=statsd_url,
command_prefix=commands.when_mentioned_or(constants.Bot.prefix),
activity=discord.Game(name=f"Commands: {constants.Bot.prefix}help"),
case_insensitive=True,
max_messages=10_000,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=allowed_roles),
intents=intents,
allowed_roles=list({discord.Object(id_) for id_ in constants.MODERATION_ROLES}),
api_client=APIClient(
site_api_url=f"{constants.URLs.site_api_schema}{constants.URLs.site_api}",
site_api_token=constants.Keys.site_api,
),
)
async with bot.instance as _bot:
await _bot.start(constants.Bot.token)
try:
asyncio.run(main())
except StartupError as e:
message = "Unknown Startup Error Occurred."
if isinstance(e.exception, (aiohttp.ClientConnectorError, aiohttp.ServerDisconnectedError)):
message = "Could not connect to site API. Is it running?"
elif isinstance(e.exception, OSError):
message = "Could not connect to Redis. Is it running?"
# The exception is logged with an empty message so the actual message is visible at the bottom
log = get_logger("bot")
log.fatal("", exc_info=e.exception)
log.fatal(message)
exit(69)
|
[
"async_rediscache.RedisSession",
"discord.AllowedMentions",
"discord.ext.commands.when_mentioned_or",
"bot.log.setup_sentry",
"discord.Object",
"aiohttp.ClientSession",
"bot.log.get_logger",
"botcore.site_api.APIClient",
"discord.Game",
"botcore.StartupError",
"discord.Intents.all"
] |
[((301, 315), 'bot.log.setup_sentry', 'setup_sentry', ([], {}), '()\n', (313, 315), False, 'from bot.log import get_logger, setup_sentry\n'), ((462, 665), 'async_rediscache.RedisSession', 'RedisSession', ([], {'address': '(constants.Redis.host, constants.Redis.port)', 'password': 'constants.Redis.password', 'minsize': '(1)', 'maxsize': '(20)', 'use_fakeredis': 'constants.Redis.use_fakeredis', 'global_namespace': '"""bot"""'}), "(address=(constants.Redis.host, constants.Redis.port), password\n =constants.Redis.password, minsize=1, maxsize=20, use_fakeredis=\n constants.Redis.use_fakeredis, global_namespace='bot')\n", (474, 665), False, 'from async_rediscache import RedisSession\n'), ((1324, 1345), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (1343, 1345), False, 'import discord\n'), ((1545, 1568), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1566, 1568), False, 'import aiohttp\n'), ((2996, 3013), 'bot.log.get_logger', 'get_logger', (['"""bot"""'], {}), "('bot')\n", (3006, 3013), False, 'from bot.log import get_logger, setup_sentry\n'), ((797, 812), 'botcore.StartupError', 'StartupError', (['e'], {}), '(e)\n', (809, 812), False, 'from botcore import StartupError\n'), ((1250, 1269), 'discord.Object', 'discord.Object', (['id_'], {}), '(id_)\n', (1264, 1269), False, 'import discord\n'), ((1803, 1851), 'discord.ext.commands.when_mentioned_or', 'commands.when_mentioned_or', (['constants.Bot.prefix'], {}), '(constants.Bot.prefix)\n', (1829, 1851), False, 'from discord.ext import commands\n'), ((1874, 1932), 'discord.Game', 'discord.Game', ([], {'name': 'f"""Commands: {constants.Bot.prefix}help"""'}), "(name=f'Commands: {constants.Bot.prefix}help')\n", (1886, 1932), False, 'import discord\n'), ((2031, 2091), 'discord.AllowedMentions', 'discord.AllowedMentions', ([], {'everyone': '(False)', 'roles': 'allowed_roles'}), '(everyone=False, roles=allowed_roles)\n', (2054, 2091), False, 'import discord\n'), ((2238, 2371), 'botcore.site_api.APIClient', 'APIClient', ([], {'site_api_url': 'f"""{constants.URLs.site_api_schema}{constants.URLs.site_api}"""', 'site_api_token': 'constants.Keys.site_api'}), "(site_api_url=\n f'{constants.URLs.site_api_schema}{constants.URLs.site_api}',\n site_api_token=constants.Keys.site_api)\n", (2247, 2371), False, 'from botcore.site_api import APIClient\n'), ((2154, 2173), 'discord.Object', 'discord.Object', (['id_'], {}), '(id_)\n', (2168, 2173), False, 'import discord\n')]
|
import tkinter as tk
class GUI:
def __init__(self, master):
self.table = [ ]
self.number_table = [ ]
for i in range(0, 9):
cols = [ ]
for j in range(0, 9):
e = tk.Entry(master, width=5, font=60)
e.grid(row=i, column=j)
cols.append(e)
self.table.append(cols)
self.text = tk.StringVar()
self.text.set("")
self.solve_b = tk.Button(master, text="Solve", command=setup, bg='black', fg='white').grid(row=9, column=0, columnspan=9, sticky=tk.NSEW)
self.clear_b = tk.Button(master, text="Clear", command=self.clear, bg='black', fg='white').grid(row=10, column=0, columnspan=9, sticky=tk.NSEW)
self.label = tk.Label(master, textvariable=self.text).grid(row=11, column=0, columnspan=9, sticky=tk.NSEW)
def extract_numbers(self):
self.number_table.clear()
for _rows in self.table:
temp = []
for _item in _rows:
number = _item.get()
if number != "":
number = int(_item.get())
temp.append(number)
self.number_table.append(temp)
def label_text(self, text):
self.text.set(text)
def draw_board(self, y, x):
self.table[ y ][ x ].delete(0, "end")
self.table[ y ][ x ].insert(0, self.number_table[ y ][ x ])
def clear(self):
for i in range(9):
for j in range(9):
self.table[ i ][ j ].delete(0, "end")
self.label_text("Cleared")
def full_check(y, x):
if row_check(y) and column_check(x) and square_check(x, y):
return True
else:
return False
def row_check(y):
temp_list = [j for j in app.number_table[y] if j != '']
if len(temp_list) == len(set(temp_list)):
check = True
else:
check = False
return check
def column_check(x):
column = []
for j in range(9):
column.append(app.number_table[j][x])
temp_list = [j for j in column if j != '']
if len(temp_list) == len(set(temp_list)):
check = True
else:
check = False
return check
def square_check(x, y):
square = []
def quad_check(num):
if num < 3:
x_multiplier = 0
elif num < 6:
x_multiplier = 1
else:
x_multiplier = 2
return x_multiplier
x_cord = quad_check(x) * 3
y_cord = quad_check(y) * 3
for f in range(3):
for j in range(3):
temp = app.number_table[y_cord + j][x_cord + f]
square.append(temp)
temp_list = [a for a in square if a != '']
if len(temp_list) == len(set(temp_list)):
check = True
else:
check = False
return check
def setup():
app.extract_numbers()
init_check = True
for i in range(9):
for j in range(9):
if not full_check(i, j):
init_check = False
if init_check:
static_table = [ ]
for rows in app.number_table:
static_table_row = [ ]
for item in rows:
static_table_row.append(item)
static_table.append(static_table_row)
backtracking(static_table)
else:
app.label_text("Unsolvable")
def backtracking(static_table):
main_check_bool = False
i = 0
direction = 0
while not main_check_bool:
if static_table[divmod(i, 9)[0]][divmod(i, 9)[1]] == "":
if app.number_table[divmod(i, 9)[0]][divmod(i, 9)[1]] == "":
app.number_table[divmod(i, 9)[0]][divmod(i, 9)[1]] = 1
app.draw_board(divmod(i, 9)[0], divmod(i, 9)[1])
if full_check(divmod(i, 9)[0], divmod(i, 9)[1]):
direction = 1
i = i + 1
else:
app.number_table[divmod(i, 9)[0]][divmod(i, 9)[1]] = app.number_table[divmod(i, 9)[0]][divmod(i, 9)[1]] + 1
app.draw_board(divmod(i, 9)[0], divmod(i, 9)[1])
if app.number_table[divmod(i, 9)[0]][divmod(i, 9)[1]] > 9:
app.number_table[divmod(i, 9)[0]][divmod(i, 9)[1]] = ""
app.draw_board(divmod(i, 9)[0], divmod(i, 9)[1])
direction = -1
i = i - 1
else:
if full_check(divmod(i, 9)[0], divmod(i, 9)[1]):
direction = 1
i = i + 1
else:
if direction < 0:
i = i - 1
elif direction > 0:
i = i + 1
if i == 81:
main_check_bool = True
app.label_text("Solved")
if __name__ == "__main__":
window = tk.Tk()
window.title("SUDOKO")
window.iconbitmap('sudoku.ico')
app = GUI(window)
tk.mainloop()
|
[
"tkinter.StringVar",
"tkinter.mainloop",
"tkinter.Button",
"tkinter.Entry",
"tkinter.Label",
"tkinter.Tk"
] |
[((4737, 4744), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4742, 4744), True, 'import tkinter as tk\n'), ((4834, 4847), 'tkinter.mainloop', 'tk.mainloop', ([], {}), '()\n', (4845, 4847), True, 'import tkinter as tk\n'), ((391, 405), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (403, 405), True, 'import tkinter as tk\n'), ((229, 263), 'tkinter.Entry', 'tk.Entry', (['master'], {'width': '(5)', 'font': '(60)'}), '(master, width=5, font=60)\n', (237, 263), True, 'import tkinter as tk\n'), ((455, 525), 'tkinter.Button', 'tk.Button', (['master'], {'text': '"""Solve"""', 'command': 'setup', 'bg': '"""black"""', 'fg': '"""white"""'}), "(master, text='Solve', command=setup, bg='black', fg='white')\n", (464, 525), True, 'import tkinter as tk\n'), ((601, 676), 'tkinter.Button', 'tk.Button', (['master'], {'text': '"""Clear"""', 'command': 'self.clear', 'bg': '"""black"""', 'fg': '"""white"""'}), "(master, text='Clear', command=self.clear, bg='black', fg='white')\n", (610, 676), True, 'import tkinter as tk\n'), ((751, 791), 'tkinter.Label', 'tk.Label', (['master'], {'textvariable': 'self.text'}), '(master, textvariable=self.text)\n', (759, 791), True, 'import tkinter as tk\n')]
|
from factory import fuzzy
from factory.django import DjangoModelFactory
from tidings.models import Watch
class WatchFactory(DjangoModelFactory):
class Meta:
model = Watch
event_type = "fooevent"
is_active = True
secret = fuzzy.FuzzyText(length=10)
|
[
"factory.fuzzy.FuzzyText"
] |
[((248, 274), 'factory.fuzzy.FuzzyText', 'fuzzy.FuzzyText', ([], {'length': '(10)'}), '(length=10)\n', (263, 274), False, 'from factory import fuzzy\n')]
|
#
# This file is part of GreatFET
#
from __future__ import print_function
import sys
from warnings import warn
from ..interface import GreatFETInterface
from ..support.bits import bits
from ..protocol.jtag_svf import SVFParser, SVFEventHandler
class JTAGPatternError(IOError):
""" Class for errors that come from a JTAG read not matching the expected response. """
def __init__(self, message, result):
self.result = result
super(JTAGPatternError, self).__init__(message)
# FIXME: should this be an instance of a 'target' class?
class JTAGDevice(GreatFETInterface):
""" Class representing a single device on a JTAG scan chain. """
DESCRIPTION = "no description available"
# A list of supported IDCODEs for the relevant class.
# Used unless the supports_idcode() method is overridden.
SUPPORTED_IDCODES = []
# A list of any GreatFET subcommands that are useful for driving this target;
# for informational use.
SUPPORTED_CONSOLE_COMMANDS = []
@classmethod
def from_idcode(cls, idcode, position_in_chain=0):
""" Attempts to create a JTAGDevice object that fits the provided IDCODE. """
# Assume the generic device class is the most appropriate class for the device, initially.
most_appropriate_class = cls
# Search each imported subclass for the
for subclass in cls.__subclasses__():
if subclass.supports_idcode(idcode):
most_appropriate_class = subclass
break
# Finally, create an instance of the most appropriate class for this object.
instance = object.__new__(most_appropriate_class)
most_appropriate_class.__init__(instance, idcode, position_in_chain)
return instance
@classmethod
def supports_idcode(cls, idcode):
"""
Returns true iff this class supports the given IDCODE.
This default implementation uses SUPPORTED_IDCODES, but subclasses can override this
for more nuanced behavior.
"""
return idcode in cls.SUPPORTED_IDCODES
@classmethod
def supported_console_commands(cls):
""" Returns a list of GreatFET subcommands that provide access to the given class. """
return cls.SUPPORTED_CONSOLE_COMMANDS
def idcode(self):
""" Returns this device's IDCODE. """
return self._idcode
def description(self):
""" Returns a short description of the device. """
return self.DESCRIPTION
def __init__(self, idcode, position_in_chain):
self._idcode = idcode
class JTAGChain(GreatFETInterface):
""" Class representing a JTAG scan-chain interface. """
# Short name for this type of interface.
INTERFACE_SHORT_NAME = "jtag"
#
# Simple mapping that captures the various TAP FSM states.
# Names from the JTAG SVF specification are used directly, so we can easily parse SVF files.
#
STATE_PROGRESSIONS = {
'RESET': {0: 'IDLE', 1: 'RESET' },
'IDLE': {0: 'IDLE', 1: 'DRSELECT' },
# Data register path.
'DRSELECT': {0: 'DRCAPTURE', 1: 'IRSELECT' },
'DRCAPTURE': {0: 'DRSHIFT', 1: 'DREXIT1' },
'DRSHIFT': {0: 'SHIFT_DR', 1: 'DREXIT1' },
'DREXIT1': {0: 'DRPAUSE', 1: 'DRUPDATE' },
'DRPAUSE': {0: 'DRPAUSE', 1: 'DREXIT2' },
'DREXIT2': {0: 'DRSHIFT', 1: 'DRUPDATE' },
'DRUPDATE': {0: 'IDLE', 1: 'DRSELECT' },
# Instruction register path.
'IRSELECT': {0: 'IRCAPTURE', 1: 'RESET' },
'IRCAPTURE': {0: 'IRSHIFT', 1: 'IREXIT1' },
'IRSHIFT': {0: 'IRSHIFT', 1: 'IREXIT1' },
'IREXIT1': {0: 'IRPAUSE', 1: 'IRUPDATE' },
'IRPAUSE': {0: 'IRPAUSE', 1: 'IREXIT2' },
'IREXIT2': {0: 'IRSHIFT', 1: 'IRUPDATE' },
'IRUPDATE': {0: 'IDLE', 1: 'DRSELECT' },
}
def __init__(self, board, max_frequency=405e3):
""" Creates a new JTAG scan-chain interface.
Paramters:
board -- the GreatFET board we're working with.
max_frequency -- the maximum frequency we should attempt scan out data with
"""
# Grab our JTAG API object.
self.api = board.apis.jtag
# Assume we're starting our chain in 'IDLE'.
self.state = 'IDLE'
# Configure our chain to run at the relevant frequency.
self.frequency = int(max_frequency)
self.max_bits_per_scan = self.api.configure(self.frequency)
def set_frequency(self, max_frequency):
""" Sets the operating frequency of future transactions on this JTAG chain. """
self.frequency = int(max_frequency)
self.api.configure(self.frequency)
def _progress_state(self, tms_value):
""" Adjusts our internal model of the TAP FSM to account for an applied TMS value. """
# Normalize our state to always be 1 or 0.
tms_value = 1 if tms_value else 0
# Move our state to the next state per our TAP FSM.
self.state = self.STATE_PROGRESSIONS[self.state][tms_value]
def pulse_tms(self, cycles=1, asserted=True):
""" Asserts or de-asserts TMS for the given number of cycles; used for navigating the TAP FSM. """
# Run the clock for a single cycle, with TMS asserted each time.
for _ in range(cycles):
self.api.run_clock(1, asserted)
self._progress_state(asserted)
def initialize_chain(self):
""" Put the scan chain into its initial state, allowing fresh JTAG communications. """
# Pulse the TMS line five times -- this brings us into the TEST_RESET state, which resets the test logic.
self.pulse_tms(5)
# We now should know that we're in the RESET state.
assert(self.state == 'RESET')
def _receive_data(self, bits_to_scan, advance_state=False):
""" Performs a raw scan-in of data, and returns the result. """
# Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
result = self.api.scan_in(bits_to_scan, advance_state)
# Once we're complete, advance our state, if necessary.
if advance_state:
self._progress_state(True)
return result
def _pad_data_to_length(self, length_in_bits, data=None):
""" Pads a given data set to a given length, in bits. """
# Compute how many bytes we need the data to be.
target_length_bytes = (length_in_bits + 7) // 8
# If our data doesn't need padding, return it directly.
if data and (len(data) >= target_length_bytes):
return data
# Create a mutable array of data; and add any data we have.
padded = bytearray()
if data:
padded.extend(data)
# Figure out how much padding we need.
padding_necessary = target_length_bytes - len(padded)
padded.extend("b\0" * padding_necessary)
# Return our padded data.
return padded
def _transmit_data(self, bits_to_scan, data=None, advance_state=False):
""" Performs a raw scan-out of data, discarding any result. """
# Pad our data to the relevant length.
data = self._pad_data_to_length(bits_to_scan)
# Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
self.api.scan_out(bits_to_scan, advance_state, data)
# Once we're complete, advance our state, if necessary.
if advance_state:
self._progress_state(True)
def _scan_data(self, bits_to_scan, byte_data, advance_state=False):
""" Performs a raw scan-in of data, and returns the result. """
# Perform our actual data scan-in.
# TODO: break larger-than-maximum transactions into smaller ones.
result = self.api.scan(bits_to_scan, advance_state, byte_data)
# Once we're complete, advance our state, if necessary.
if advance_state:
self._progress_state(True)
return result
def _next_hop_towards(self, state):
""" Identify the next TMS value we should apply to move towards the given state. """
# Special case: if we're headed to RESET, then our next hop is always 1.
if state == 'RESET':
return 1
# Special case: if we're in the Select-DR state, we'll steer either towards the instruction column ('1')
# or data column ('0') based on the target state.
if self.state == 'DRSELECT':
return 1 if 'IR' in state else 0
# Grab the next states for TMS values of one and zero.
next_states = self.STATE_PROGRESSIONS[self.state]
# We'll apply a simple heuristic to advance through the TAP FSM.
# First, we'll identify it providing a '1' would cause us to loop back towards the current state,
# which will occur if we'd stay in the same state with a '1', or if we'd move out of the core FSM.
towards_one_would_loop = (next_states[1] == self.state) or (next_states[1] == 'RESET')
# Next, we'll apply the following simple heuristics:
# - If pulsing clock with TMS=0 would land us in the right state, do so.
# - If pulsing clock with TMS=1 would cause us to self, loop, pulse clock with TMS=0.
# - Otherwise, pulse clock with TMS=1, as TMS=1 generally moves us through the TAP FSM.
target_state_is_towards_zero = (next_states[0] == state)
return 0 if (target_state_is_towards_zero or towards_one_would_loop) else 1
def _ensure_in_state(self, state):
"""
Ensures the JTAG TAP FSM is in the given state.
If we're not; progresses the TAP FSM by pulsing TMS until we reach the relevant state.
"""
# Progress through the TAP FSM until we're in the right state.
while self.state != state:
# Identify the direction we'll need to move in order to move closer to our target state...
next_hop = self._next_hop_towards(state)
# ... and apply it.
self.pulse_tms(asserted=next_hop)
def move_to_state(self, state_name):
""" Moves the JTAG scan chain to the relevant state.
Parameters:
state_name: The target state to wind up in, as a string. States are accepted in the format
defined in the JTAG SVF standard, and thus should be one of:
"RESET", "IDLE", "DRSELECT", "DRCAPTURE", "DRSHIFT", "DREXIT1", "DRPAUSE",
"DREXIT2", "DRUPDATE", "IRSELECT", "IRCAPTURE", "IRSHIFT", "IREXIT1", "IRPAUSE",
"IREXIT2", "IRUPDATE"
"""
self._ensure_in_state(state_name.strip())
def _shift_while_in_state(self, state, tdi=None, length=None, ignore_response=False, advance_state=False, byteorder='big'):
""" Shifts data through the chain while in the given state. """
# Normalize our data into a bitstring type that we can easily work with.
# This both ensures we have a known format; and implicitly handles things like padding.
if tdi:
data_bits = bits(tdi, length, byteorder=byteorder)
# Convert from our raw data to the format we'll need to send down to the device.
bit_length = len(data_bits)
data_bytes = data_bits.to_bytes(byteorder='big')
else:
if length is None:
raise ValueError("either TDI or length must be provided!")
bit_length = length
# Move into our shift-DR state.
self._ensure_in_state(state)
# Finally, issue the transaction itself.
if tdi and ignore_response:
self._transmit_data(bit_length, data_bytes, advance_state)
return None
elif tdi:
result = self._scan_data(bit_length, data_bytes, advance_state)
else:
result = self._receive_data(bit_length, advance_state)
# Return our data, converted back up to bits.
return bits(result, bit_length)
def _validate_response(self, response_bits, tdo=None, mask=None):
""" Validates the response provided by a _shift_while_in_state call, in the traditional JTAG SVF form. """
# If we don't have any data to validate against, vacuously succeed.
if (not tdo) or (not response_bits):
return
# If we have a mask, mask both the TDO value and response, and then compare.
masked_response = mask & response_bits if mask else response_bits
masked_tdo = mask & tdo if mask else tdo
if masked_response != masked_tdo:
raise JTAGPatternError("Scan result did not match expected pattern: {} != {} (expected)!".format(
masked_response, masked_tdo), response_bits)
def shift_data(self, tdi=None, length=None, tdo=None, mask=None,
ignore_response=False, advance_state=False, byteorder='big'):
""" Shifts data through the scan-chain's data register.
Parameters:
tdi -- The bits to be scanned out via TDI. Can be a support.bits() object, a string of 1's and 0's,
an integer, or bytes. If this is an integer or bytes object, the length argument must be provided.
If omitted or None, a string of all zeroes will be used,
length -- The length of the transaction to be performed, in bits. This can be longer than the TDI data;
in which case the transmission will be padded with zeroes.
tdo -- The expected data to be received from the scan operation. If this is provided, the read result
will be compared to this data (optionally masked by mask), and an exception will be thrown if
the data doesn't match this value. Designed to behave like the SVF TDO field.
mask -- If provided, the given tdo argument will be masked, such that only bits corresponding to a '1'
in this mask argument are considered when checking against 'tdo'. This is the behavior defiend
in the SVF standard; see it for more information.
ignore_response -- If provided; the returned response will always be empty, and tdo and mask will be ignored.
This allows for slight a performance optimization, as we don't have to shuttle data back.
byteorder -- The byteorder to consider the tdi value in; if bytes are provided.
Returns the bits read, or None if the response is ignored.
"""
# Perform the core shift, and gather the response.
response = self._shift_while_in_state('DRSHIFT', tdi=tdi, length=length, ignore_response=ignore_response,
advance_state=advance_state, byteorder=byteorder)
# Validate our response against any provided constraints.
self._validate_response(response, tdo=tdo, mask=mask)
return response
def shift_instruction(self, tdi=None, length=None, tdo=None, mask=None,
ignore_response=False, advance_state=False, byteorder='big'):
""" Shifts data through the chain's instruction register.
Parameters:
tdi -- The bits to be scanned out via TDI. Can be a support.bits() object, a string of 1's and 0's,
an integer, or bytes. If this is an integer or bytes object, the length argument must be provided.
If omitted or None, a string of all zeroes will be used,
length -- The length of the transaction to be performed, in bits. This can be longer than the TDI data;
in which case the transmission will be padded with zeroes.
tdo -- The expected data to be received from the scan operation. If this is provided, the read result
will be compared to this data (optionally masked by mask), and an exception will be thrown if
the data doesn't match this value. Designed to behave like the SVF TDO field.
mask -- If provided, the given tdo argument will be masked, such that only bits corresponding to a '1'
in this mask argument are considered when checking against 'tdo'. This is the behavior defiend
in the SVF standard; see it for more information.
ignore_response -- If provided; the returned response will always be empty, and tdo and mask will be ignored.
This allows for slight a performance optimization, as we don't have to shuttle data back.
byteorder -- The byteorder to consider the tdi value in; if bytes are provided.
Returns the bits read, or None if the response is ignored.
"""
# Perform the core shift, and gather the response.
response = self._shift_while_in_state('IRSHIFT', tdi=tdi, length=length, ignore_response=ignore_response,
advance_state=advance_state, byteorder=byteorder)
# Validate our response against any provided constraints.
self._validate_response(response, tdo=tdo, mask=mask)
return response
def run_test(self, cycles, from_state='IDLE', end_state=None):
""" Places the device into the RUNTEST/IDLE (or provided) state, and pulses the JTAG clock.
Paraameters:
cycles -- The number of cycles for which the device should remain in the given state.
from_state -- The state in which the cycles should be spent; defaults to IDLE.
end_state -- The state in which the device should be placed after the test is complete.
"""
if from_state:
self.move_to_state(from_state)
self.api.run_clock(cycles, False, timeout=0)
if from_state:
self.move_to_state(end_state)
def _create_device_for_idcode(self, idcode, position_in_chain):
""" Creates a JTAGDevice object for the relevant idcode. """
return JTAGDevice.from_idcode(idcode, position_in_chain)
def enumerate(self, return_idcodes=False):
""" Initializes the JTAG TAP FSM, and attempts to identify all connected devices.
Parameters:
return_idcodes -- If true, this method will return a list of IDCodes rather than JTAGDevice objects.
Returns a list of JTAGDevices (return_idcodes=False) or JTAG IDCODES (return_idcodes=True).
"""
devices = []
# Place the JTAG TAP FSM into its initial state, so we can perform enumeration.
self.initialize_chain()
# Resetting the TAP FSM also automatically loaded the instruction register with the IDCODE
# instruction, and accordingly filled the chain of data registers with each device's IDCODE.
# We can accordingly just scan out the data using shift_data.
# Once we (re-)initialize the chain, each device automatically loads the IDCODE instruction
# for execution. This means that if we just scan in data, we'll receive each device's IDCODE,
# followed by a null terminator (32 bits of zeroes).
position_in_chain = 0
while True:
# Attempt to read a 32-bit IDCODE from the device.
raw_idcode = self.shift_data(length=32)
idcode = int.from_bytes(raw_idcode, byteorder='little')
# If our IDCODE is all 1's, and we have no devices, we seem to be stuck at one.
# Warn the user.
if idcode == 0xFFFFFFFF and not devices:
warn("TDI appears to be stuck at '1'. Check your wiring?")
# If we've received our null IDCODE, we've finished enumerating the chain.
# We'll also treat an all-1's IDCODE as a terminator, as this invalid IDCODE occurs
# if TDI is stuck-at-one.
if idcode in (0x00000000, 0xFFFFFFFF):
self.pulse_tms(asserted=True)
break
if return_idcodes:
devices.append(idcode)
else:
devices.append(self._create_device_for_idcode(idcode, position_in_chain))
position_in_chain += 1
return devices
def play_svf_instructions(self, svf_string, log_function=None, error_log_function=print):
""" Executes a string of JTAG SVF instructions, strumming the relevant scan chain.
svf_string -- A string containing valid JTAG SVF instructions to be executed.
log_function -- If provided, this function will be called with verbose operation information.
log_error -- This function will be used to print information about errors that occur.
"""
# Create the parser that will run our SVF file, and run our SVF.
parser = SVFParser(svf_string, GreatfetSVFEventHandler(self, log_function, error_log_function))
parser.parse_file()
def play_svf_file(self, svf_file, log_function=None, error_log_function=print):
""" Executes the JTAG SVF instructions from the given file.
svf_file -- A filename or file object pointing to a JTAG SVF file.
log_function -- If provided, this function will be called with verbose operation information.
log_error -- This function will be used to print information about errors that occur.
"""
close_after = False
if isinstance(svf_file, str):
svf_file = open(svf_file, 'r')
close_after = True
self.play_svf_instructions(svf_file.read(), log_function=log_function, error_log_function=error_log_function)
if close_after:
svf_file.close()
class GreatfetSVFEventHandler(SVFEventHandler):
""" SVF event handler that delegates handling of SVF instructions to a GreatFET JTAG interface. """
def __init__(self, interface, verbose_log_function=None, error_log_function=print):
""" Creates a new SVF event handler.
Parameters:
interface: The GreatFET JTAG interface that will execute our JTAG commands.
"""
if verbose_log_function is None:
verbose_log_function = lambda string : None
if error_log_function is None:
error_log_function = print
self.interface = interface
self.log = verbose_log_function
self.log_error = error_log_function
# Assume that after a data / instruction shift operation that we'll
# wind up in the IDLE state, per the SVF standard. The SVF file can
# override these defaults
self.end_dr_state = 'IDLE'
self.end_ir_state = 'IDLE'
# By default, don't have any headers or trailers for IR or DR shifts.
# The SVF can override these using the HDR/TDR/HIR/TIR instructions.
nullary_padding = {'tdi': bits(), 'tdo': bits(), 'mask': bits(), }
self.dr_header = nullary_padding.copy()
self.dr_trailer = nullary_padding.copy()
self.ir_header = nullary_padding.copy()
self.ir_trailer = nullary_padding.copy()
# Store default masks for our ShiftIR and ShiftDR instructions.
self.last_dr_mask = None
self.last_dr_smask = None
self.ir_mask = None
self.ir_smask = None
def svf_frequency(self, frequency):
"""Called when the ``FREQUENCY`` command is encountered."""
self.log (" -- FREQUENCY set to {}".format(frequency))
self.interface.set_frequency(frequency)
def svf_trst(self, mode):
"""Called when the ``TRST`` command is encountered."""
warn('SVF provided TRST command; but this implementation does not yet support driving the TRST line')
def svf_state(self, state, path):
"""Called when the ``STATE`` command is encountered."""
# Visit each state in any intermediate paths provided...
if path:
for intermediate in path:
self.log("STATE; Moving through {}.".format(intermediate))
self.interface.move_to_state(intermediate)
# ... ensuring we end up in the relevant state.
self.log("Moving to {} STATE.".format(state))
self.interface.move_to_state(state)
def svf_endir(self, state):
"""Called when the ``ENDIR`` command is encountered."""
self.log("Moving to {} after each Shift-IR.".format(state))
self.end_dr_state = state
def svf_enddr(self, state):
"""Called when the ``ENDDR`` command is encountered."""
self.log("Moving to {} after each Shift-DR.".format(state))
self.end_ir_state = state
def svf_hir(self, **header):
"""Called when the ``HIR`` command is encountered."""
self.log("Applying Shift-IR prefix. ")
self.ir_header = header
def svf_tir(self, **trailer):
self.log("Applying Shift-IR suffix. ")
self.ir_trailer = trailer
def svf_hdr(self, **header):
"""Called when the ``HDR`` command is encountered."""
self.log("Applying Shift-DR header. ")
self.dr_header = header
def svf_tdr(self, **trailer):
"""Called when the ``TDR`` command is encountered."""
self.log("Applying Shift-DR suffix. ")
self.dr_trailer = trailer
def svf_sir(self, **data):
"""Called when the ``SIR`` command is encountered."""
# Append our header and trailer to each of our arguments.
arguments = {}
for arg, value in data.items():
header = self.ir_header[arg] if (arg in self.ir_header) else bits()
trailer = self.ir_trailer[arg] if (arg in self.ir_trailer) else bits()
arguments[arg] = (header + value + trailer) if value else None
if data['mask']:
self.ir_mask = data['mask']
if data['smask']:
self.ir_smask = data['mask']
self.log("Performing SHIFT-IR:")
self.log( "out: {}".format(arguments['tdi']))
self.log( "expected: {}".format(arguments['tdo']))
self.log( "mask: {}".format(arguments['tdo']))
try:
result = self.interface.shift_instruction(tdi=arguments['tdi'], tdo=arguments['tdo'], mask=arguments['mask'])
except JTAGPatternError as e:
self.log( "in: {} [FAIL]\n".format(e.result))
self.log_error("\n\n<!> Failure while performing SHIFT-IR: \n " + str(e))
raise
self.log( "in: {} [OK]\n".format(result))
def svf_sdr(self, **data):
"""Called when the ``SDR`` command is encountered."""
# Append our header and trailer to each of our arguments.
arguments = {}
for arg, value in data.items():
header = self.dr_header[arg] if (arg in self.dr_header) else bits()
trailer = self.dr_trailer[arg] if (arg in self.dr_trailer) else bits()
arguments[arg] = (header + value + trailer) if value else None
if data['mask']:
self.dr_mask = data['mask']
if data['smask']:
self.dr_smask = data['mask']
self.log("Performing SHIFT-DR:")
self.log( "out: {}".format(arguments['tdi']))
self.log( "expected: {}".format(arguments['tdo']))
self.log( "mask: {}".format(arguments['tdo']))
try:
result = self.interface.shift_data(tdi=arguments['tdi'], tdo=arguments['tdo'], mask=arguments['mask'])
except JTAGPatternError as e:
self.log( "in: {} [FAIL]\n".format(e.result))
self.log_error("\n\n<!> Failure while performing SHIFT-DR: \n " + str(e))
raise
self.log( "in: {} [OK]\n".format(result))
def svf_runtest(self, run_state, run_count, run_clock, min_time, max_time, end_state):
"""Called when the ``RUNTEST`` command is encountered."""
self.log("Running test for {} cycles.".format(run_count))
self.interface.run_test(run_count, from_state=run_state, end_state=end_state)
def svf_piomap(self, mapping):
"""Called when the ``PIOMAP`` command is encountered."""
raise NotImplementedError("This implementation does not yet support PIOMAP.")
def svf_pio(self, vector):
"""Called when the ``PIO`` command is encountered."""
raise NotImplementedError("This implementation does not yet support PIO.")
|
[
"warnings.warn"
] |
[((23663, 23774), 'warnings.warn', 'warn', (['"""SVF provided TRST command; but this implementation does not yet support driving the TRST line"""'], {}), "(\n 'SVF provided TRST command; but this implementation does not yet support driving the TRST line'\n )\n", (23667, 23774), False, 'from warnings import warn\n'), ((19657, 19715), 'warnings.warn', 'warn', (['"""TDI appears to be stuck at \'1\'. Check your wiring?"""'], {}), '("TDI appears to be stuck at \'1\'. Check your wiring?")\n', (19661, 19715), False, 'from warnings import warn\n')]
|
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
import time
from Swing.util.mplstyle import style1
import seaborn as sns
from palettable.colorbrewer.qualitative import Set1_3
def get_df(df, fp, min_lag, max_lag, td_window):
new_df = df[(df['file_path'] == fp) & (df['min_lag'] == min_lag) & (df['max_lag'] == max_lag) & (df['td_window'] == td_window)]
return(new_df)
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
# check if the columns are misaligned.
if type(df['permutation_n'].iloc[0]) is str:
new_col = df.columns.tolist()
new_col.pop(0)
new_df = df.iloc[:,0:len(df.iloc[0])-1]
new_df.columns = new_col
df=new_df
agg_df = agg_df.append(df)
return(agg_df)
input_folder_list = ["/projects/p20519/roller_output/gnw/RandomForest/"]
test_statistic = ['aupr', 'auroc']
save_tag = "window_scan"
n_trials = 100
start = time.time()
agg_df = read_tdr_results(input_folder_list, folder_str = "2017-09")
#agg_df.to_pickle("Dionesus_window_scan.pkl")
#agg_df = pd.read_pickle("Dionesus_window_scan.pkl")
end = time.time()
stat = 'aupr'
network_list = agg_df['file_path'].unique().tolist()
window_sizes = range(1,22)
outer_list = []
overall_df = pd.DataFrame()
for td_window in window_sizes:
inner_list = []
for network in network_list:
baseline = get_df(agg_df, network, 0, 0, 21)
if len(baseline) == 0:
continue
if 21-td_window > 2:
max_lag = 3
else:
max_lag = 21-td_window
if (td_window == 21):
min_lag = 0
max_lag = 0
else:
min_lag = 1
comparisons = get_df(agg_df, network, min_lag, max_lag, td_window)
if len(comparisons) == 0:
continue
stat = 'aupr'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
stat = 'auroc'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
overall_df = overall_df.append(comparisons.iloc[0:50,:], ignore_index = True)
outer_list.append(inner_list)
stat = 'percent_aupr'
colors = []
for w in range(1, 21):
test_data = overall_df[overall_df.td_window == w]
baseline = overall_df[overall_df.td_window == 21]
baseline_mean = baseline[stat].mean()
diff = np.mean(test_data[stat])-baseline_mean
if stats.ttest_ind(test_data[stat], baseline[stat])[1] < 0.05:
if diff > 0:
colors.append(Set1_3.mpl_colors[0])
else:
colors.append(Set1_3.mpl_colors[1])
else:
colors.append('grey')
fig, ax = plt.subplots(figsize=(11,7))
sns.boxplot(ax = ax, data = overall_df, x = 'td_window', y = 'percent_aupr', palette=colors)
xlabs = ax.get_xticks()
ax.set_xticklabels(['{:d}'.format(x+1) for x in xlabs])
ax.set_ylabel('Percent Difference AUPR')
ax.set_xlabel('Window Size')
fig.savefig('RandomForest_10_AUPR_window_scan.png')
fig, ax = plt.subplots(figsize=(11,7))
sns.boxplot(ax = ax, data = overall_df, x = 'td_window', y = 'percent_auroc', palette=colors)
xlabs = ax.get_xticks()
ax.set_xticklabels(['{:d}'.format(x+1) for x in xlabs])
ax.set_ylabel('Percent Difference AUROC')
ax.set_xlabel('Window Size')
fig.savefig('RandomForest_10_AUROC_window_scan.png')
|
[
"pandas.DataFrame",
"pandas.read_csv",
"scipy.stats.ttest_ind",
"time.time",
"seaborn.boxplot",
"matplotlib.use",
"numpy.mean",
"matplotlib.pyplot.subplots",
"os.listdir"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((1417, 1428), 'time.time', 'time.time', ([], {}), '()\n', (1426, 1428), False, 'import time\n'), ((1603, 1614), 'time.time', 'time.time', ([], {}), '()\n', (1612, 1614), False, 'import time\n'), ((1740, 1754), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1752, 1754), True, 'import pandas as pd\n'), ((3270, 3299), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(11, 7)'}), '(figsize=(11, 7))\n', (3282, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3387), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'data': 'overall_df', 'x': '"""td_window"""', 'y': '"""percent_aupr"""', 'palette': 'colors'}), "(ax=ax, data=overall_df, x='td_window', y='percent_aupr',\n palette=colors)\n", (3310, 3387), True, 'import seaborn as sns\n'), ((3605, 3634), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(11, 7)'}), '(figsize=(11, 7))\n', (3617, 3634), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3723), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'data': 'overall_df', 'x': '"""td_window"""', 'y': '"""percent_auroc"""', 'palette': 'colors'}), "(ax=ax, data=overall_df, x='td_window', y='percent_auroc',\n palette=colors)\n", (3645, 3723), True, 'import seaborn as sns\n'), ((650, 664), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (662, 664), True, 'import pandas as pd\n'), ((727, 751), 'os.listdir', 'os.listdir', (['input_folder'], {}), '(input_folder)\n', (737, 751), False, 'import os\n'), ((2982, 3006), 'numpy.mean', 'np.mean', (['test_data[stat]'], {}), '(test_data[stat])\n', (2989, 3006), True, 'import numpy as np\n'), ((3028, 3076), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['test_data[stat]', 'baseline[stat]'], {}), '(test_data[stat], baseline[stat])\n', (3043, 3076), False, 'from scipy import stats\n'), ((812, 876), 'pandas.read_csv', 'pd.read_csv', (['(input_folder + file_path)'], {'sep': '"""\t"""', 'engine': '"""python"""'}), "(input_folder + file_path, sep='\\t', engine='python')\n", (823, 876), True, 'import pandas as pd\n')]
|
"""
it runs some tests on the source code ranging from formatting
to type checking with mypy
"""
from pathlib import Path
import subprocess
import shlex
import black # type: ignore[import]
import sys
import os
here = Path(os.path.abspath(__file__)).parent
all_scripts = [here / "corpe.py", here / "tests.py"]
all_scripts.extend(
here / "src" / script
for script in os.listdir(here / "src")
if script.endswith(".py")
)
MyPy_SHOW_ERROR_CODES: bool = True
def echo_and_call(cmd: list[str]) -> None:
print(f"[CMD] {shlex.join(cmd)}")
subprocess.call(cmd)
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.argv.append("-full")
full = "-full" in sys.argv or "-f" in sys.argv
if "-format" in sys.argv or full:
for script in all_scripts:
path = here / script
if black.format_file_in_place(
path, False, black.FileMode(), black.WriteBack.YES
):
print(f"Formatted file: {script}")
else:
print(f"Skipping file {script} as it is already formatted")
if "-mypy" in sys.argv or full:
cmd = [sys.executable, "-m", "mypy"]
cmd.extend(str(here / script) for script in all_scripts)
if MyPy_SHOW_ERROR_CODES:
cmd.append("--show-error-codes")
echo_and_call(cmd)
|
[
"black.FileMode",
"os.path.abspath",
"sys.argv.append",
"shlex.join",
"subprocess.call",
"os.listdir"
] |
[((557, 577), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (572, 577), False, 'import subprocess\n'), ((227, 252), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (242, 252), False, 'import os\n'), ((642, 666), 'sys.argv.append', 'sys.argv.append', (['"""-full"""'], {}), "('-full')\n", (657, 666), False, 'import sys\n'), ((378, 402), 'os.listdir', 'os.listdir', (["(here / 'src')"], {}), "(here / 'src')\n", (388, 402), False, 'import os\n'), ((534, 549), 'shlex.join', 'shlex.join', (['cmd'], {}), '(cmd)\n', (544, 549), False, 'import shlex\n'), ((898, 914), 'black.FileMode', 'black.FileMode', ([], {}), '()\n', (912, 914), False, 'import black\n')]
|
# -*- coding: utf-8 -*-
from conans import CMake, ConanFile, tools
import os
class VulkanHppConan(ConanFile):
name = "vulkan_hpp"
version = "1.1.107"
license = "Apache-2.0"
author = "bincrafters <<EMAIL>>"
url = "https://github.com/bincrafters-conan-vulkan_hpp"
homepage = "https://github.com/KhronosGroup/Vulkan-Hpp"
description = "Open-Source Vulkan C++ API"
topics = ("vulkan", "khronos", "ghraphics", "api", "c++", )
exports = ["LICENSE.md", ]
_source_subfolder = "source_subfolder"
_generator_git_revision = "7900c655f3e2be62fa8dd25e09eae1170c76cfa2"
scm = {
"type": "git",
"url": "https://github.com/KhronosGroup/Vulkan-Hpp.git",
"subfolder": _source_subfolder,
"revision": _generator_git_revision,
}
generators = "cmake",
def build_requirements(self):
self.build_requires("tinyxml2/7.0.1@nicolastagliani/stable")
def requirements(self):
self.requires("vulkan_headers/{}@{}/{}".format(self.version, self.user, self.channel))
def source(self):
cmakelists = os.path.join(self._source_subfolder, "CMakeLists.txt")
tools.replace_in_file(cmakelists,
"project(VulkanHppGenerator)",
"project(VulkanHppGenerator)\n"
"include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n"
"conan_basic_setup(TARGETS)")
tools.replace_in_file(cmakelists,
"${TINYXML2_SOURCES}",
"")
tools.replace_in_file(cmakelists,
"${TINYXML2_HEADERS}",
"")
tools.save_append(cmakelists, "\n\ntarget_link_libraries(VulkanHppGenerator PUBLIC CONAN_PKG::tinyxml2)\n")
generator_src = os.path.join(self._source_subfolder, "VulkanHppGenerator.cpp")
tools.replace_path_in_file(generator_src,
"VULKAN_HPP_FILE",
"destfilename")
tools.replace_path_in_file(generator_src,
" std::string filename = (argc == 1) ? VK_SPEC : argv[1];",
" std::string filename = (argc < 2) ? VK_SPEC : argv[1];\n"
" std::string destfilename = (argc < 3) ? VULKAN_HPP_FILE : argv[2];")
def build(self):
cmake = CMake(self)
cmake.configure(source_dir=os.path.join(self.source_folder, self._source_subfolder))
cmake.build()
generator_exe = os.path.join(self.build_folder, "bin", "VulkanHppGenerator{}".format(".exe" if tools.os_info.is_windows else "",))
vk_xml_path = os.path.join(self.deps_user_info["vulkan_headers"].VULKAN_REGISTRY_PATH, "vk.xml")
vulkan_hpp_path = os.path.join(self.build_folder, "vulkan.hpp")
self.run("{} {} {}".format(generator_exe, vk_xml_path, vulkan_hpp_path))
def package(self):
self.copy("vulkan.hpp", src=self.build_folder, dst=os.path.join("include", "vulkan"))
self.copy("LICENSE.md", dst="licenses")
|
[
"os.path.join",
"conans.tools.replace_in_file",
"conans.CMake",
"conans.tools.save_append",
"conans.tools.replace_path_in_file"
] |
[((1095, 1149), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""CMakeLists.txt"""'], {}), "(self._source_subfolder, 'CMakeLists.txt')\n", (1107, 1149), False, 'import os\n'), ((1158, 1343), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['cmakelists', '"""project(VulkanHppGenerator)"""', '"""project(VulkanHppGenerator)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup(TARGETS)"""'], {}), '(cmakelists, \'project(VulkanHppGenerator)\',\n """project(VulkanHppGenerator)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup(TARGETS)"""\n )\n', (1179, 1343), False, 'from conans import CMake, ConanFile, tools\n'), ((1468, 1528), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['cmakelists', '"""${TINYXML2_SOURCES}"""', '""""""'], {}), "(cmakelists, '${TINYXML2_SOURCES}', '')\n", (1489, 1528), False, 'from conans import CMake, ConanFile, tools\n'), ((1597, 1657), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['cmakelists', '"""${TINYXML2_HEADERS}"""', '""""""'], {}), "(cmakelists, '${TINYXML2_HEADERS}', '')\n", (1618, 1657), False, 'from conans import CMake, ConanFile, tools\n'), ((1726, 1843), 'conans.tools.save_append', 'tools.save_append', (['cmakelists', '"""\n\ntarget_link_libraries(VulkanHppGenerator PUBLIC CONAN_PKG::tinyxml2)\n"""'], {}), '(cmakelists,\n """\n\ntarget_link_libraries(VulkanHppGenerator PUBLIC CONAN_PKG::tinyxml2)\n"""\n )\n', (1743, 1843), False, 'from conans import CMake, ConanFile, tools\n'), ((1859, 1921), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""VulkanHppGenerator.cpp"""'], {}), "(self._source_subfolder, 'VulkanHppGenerator.cpp')\n", (1871, 1921), False, 'import os\n'), ((1930, 2006), 'conans.tools.replace_path_in_file', 'tools.replace_path_in_file', (['generator_src', '"""VULKAN_HPP_FILE"""', '"""destfilename"""'], {}), "(generator_src, 'VULKAN_HPP_FILE', 'destfilename')\n", (1956, 2006), False, 'from conans import CMake, ConanFile, tools\n'), ((2085, 2339), 'conans.tools.replace_path_in_file', 'tools.replace_path_in_file', (['generator_src', '""" std::string filename = (argc == 1) ? VK_SPEC : argv[1];"""', '""" std::string filename = (argc < 2) ? VK_SPEC : argv[1];\n std::string destfilename = (argc < 3) ? VULKAN_HPP_FILE : argv[2];"""'], {}), '(generator_src,\n \' std::string filename = (argc == 1) ? VK_SPEC : argv[1];\',\n """ std::string filename = (argc < 2) ? VK_SPEC : argv[1];\n std::string destfilename = (argc < 3) ? VULKAN_HPP_FILE : argv[2];"""\n )\n', (2111, 2339), False, 'from conans import CMake, ConanFile, tools\n'), ((2470, 2481), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (2475, 2481), False, 'from conans import CMake, ConanFile, tools\n'), ((2759, 2845), 'os.path.join', 'os.path.join', (["self.deps_user_info['vulkan_headers'].VULKAN_REGISTRY_PATH", '"""vk.xml"""'], {}), "(self.deps_user_info['vulkan_headers'].VULKAN_REGISTRY_PATH,\n 'vk.xml')\n", (2771, 2845), False, 'import os\n'), ((2868, 2913), 'os.path.join', 'os.path.join', (['self.build_folder', '"""vulkan.hpp"""'], {}), "(self.build_folder, 'vulkan.hpp')\n", (2880, 2913), False, 'import os\n'), ((2517, 2573), 'os.path.join', 'os.path.join', (['self.source_folder', 'self._source_subfolder'], {}), '(self.source_folder, self._source_subfolder)\n', (2529, 2573), False, 'import os\n'), ((3078, 3111), 'os.path.join', 'os.path.join', (['"""include"""', '"""vulkan"""'], {}), "('include', 'vulkan')\n", (3090, 3111), False, 'import os\n')]
|
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dlxapi.configuration import Configuration
class FieldTagAddedEvent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'portfolio_id': 'str',
'tag_parent_name': 'str',
'tag_id': 'str',
'tag_color': 'str',
'name': 'str',
'id': 'str',
'tag_name': 'str',
'tag_parent_id': 'str',
'portfolio_plan': 'PortfolioPlan'
}
attribute_map = {
'portfolio_id': 'portfolioId',
'tag_parent_name': 'tagParentName',
'tag_id': 'tagId',
'tag_color': 'tagColor',
'name': 'name',
'id': 'id',
'tag_name': 'tagName',
'tag_parent_id': 'tagParentId',
'portfolio_plan': 'portfolioPlan'
}
def __init__(self, portfolio_id=None, tag_parent_name=None, tag_id=None, tag_color=None, name=None, id=None, tag_name=None, tag_parent_id=None, portfolio_plan=None, _configuration=None): # noqa: E501
"""FieldTagAddedEvent - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._portfolio_id = None
self._tag_parent_name = None
self._tag_id = None
self._tag_color = None
self._name = None
self._id = None
self._tag_name = None
self._tag_parent_id = None
self._portfolio_plan = None
self.discriminator = None
if portfolio_id is not None:
self.portfolio_id = portfolio_id
if tag_parent_name is not None:
self.tag_parent_name = tag_parent_name
if tag_id is not None:
self.tag_id = tag_id
if tag_color is not None:
self.tag_color = tag_color
if name is not None:
self.name = name
if id is not None:
self.id = id
if tag_name is not None:
self.tag_name = tag_name
if tag_parent_id is not None:
self.tag_parent_id = tag_parent_id
if portfolio_plan is not None:
self.portfolio_plan = portfolio_plan
@property
def portfolio_id(self):
"""Gets the portfolio_id of this FieldTagAddedEvent. # noqa: E501
:return: The portfolio_id of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, portfolio_id):
"""Sets the portfolio_id of this FieldTagAddedEvent.
:param portfolio_id: The portfolio_id of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._portfolio_id = portfolio_id
@property
def tag_parent_name(self):
"""Gets the tag_parent_name of this FieldTagAddedEvent. # noqa: E501
:return: The tag_parent_name of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._tag_parent_name
@tag_parent_name.setter
def tag_parent_name(self, tag_parent_name):
"""Sets the tag_parent_name of this FieldTagAddedEvent.
:param tag_parent_name: The tag_parent_name of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._tag_parent_name = tag_parent_name
@property
def tag_id(self):
"""Gets the tag_id of this FieldTagAddedEvent. # noqa: E501
:return: The tag_id of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._tag_id
@tag_id.setter
def tag_id(self, tag_id):
"""Sets the tag_id of this FieldTagAddedEvent.
:param tag_id: The tag_id of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._tag_id = tag_id
@property
def tag_color(self):
"""Gets the tag_color of this FieldTagAddedEvent. # noqa: E501
:return: The tag_color of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._tag_color
@tag_color.setter
def tag_color(self, tag_color):
"""Sets the tag_color of this FieldTagAddedEvent.
:param tag_color: The tag_color of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._tag_color = tag_color
@property
def name(self):
"""Gets the name of this FieldTagAddedEvent. # noqa: E501
:return: The name of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FieldTagAddedEvent.
:param name: The name of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this FieldTagAddedEvent. # noqa: E501
:return: The id of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FieldTagAddedEvent.
:param id: The id of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._id = id
@property
def tag_name(self):
"""Gets the tag_name of this FieldTagAddedEvent. # noqa: E501
:return: The tag_name of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._tag_name
@tag_name.setter
def tag_name(self, tag_name):
"""Sets the tag_name of this FieldTagAddedEvent.
:param tag_name: The tag_name of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._tag_name = tag_name
@property
def tag_parent_id(self):
"""Gets the tag_parent_id of this FieldTagAddedEvent. # noqa: E501
:return: The tag_parent_id of this FieldTagAddedEvent. # noqa: E501
:rtype: str
"""
return self._tag_parent_id
@tag_parent_id.setter
def tag_parent_id(self, tag_parent_id):
"""Sets the tag_parent_id of this FieldTagAddedEvent.
:param tag_parent_id: The tag_parent_id of this FieldTagAddedEvent. # noqa: E501
:type: str
"""
self._tag_parent_id = tag_parent_id
@property
def portfolio_plan(self):
"""Gets the portfolio_plan of this FieldTagAddedEvent. # noqa: E501
:return: The portfolio_plan of this FieldTagAddedEvent. # noqa: E501
:rtype: PortfolioPlan
"""
return self._portfolio_plan
@portfolio_plan.setter
def portfolio_plan(self, portfolio_plan):
"""Sets the portfolio_plan of this FieldTagAddedEvent.
:param portfolio_plan: The portfolio_plan of this FieldTagAddedEvent. # noqa: E501
:type: PortfolioPlan
"""
self._portfolio_plan = portfolio_plan
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FieldTagAddedEvent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FieldTagAddedEvent):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FieldTagAddedEvent):
return True
return self.to_dict() != other.to_dict()
|
[
"six.iteritems",
"dlxapi.configuration.Configuration"
] |
[((7649, 7682), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (7662, 7682), False, 'import six\n'), ((1772, 1787), 'dlxapi.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1785, 1787), False, 'from dlxapi.configuration import Configuration\n')]
|
''' This is the main entry point for the probe
It will:
1. Read configuration from its database (probably a text file to start with)
2. Execute measurements as required (ping first, we'll add module support later, maybe)
3. Make those measurements available using prometheus style metrics on :9091/metrics (by default)
Later:
- Support configuration of additional nodes through the web api :9091/config
'''
# Definitions...
hostname = 'localhost'
webserver_port = 9091
# Set up a webserver so our metrics can be scraped...
from prometheus_client import start_http_server, Summary
import random
import time
# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
# Decorate function with metric.
@REQUEST_TIME.time()
def process_request(t):
"""A dummy function that takes some time."""
time.sleep(t)
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(8000)
# Generate some requests.
while True:
process_request(random.random())
exit
|
[
"random.random",
"time.sleep",
"prometheus_client.start_http_server",
"prometheus_client.Summary"
] |
[((683, 753), 'prometheus_client.Summary', 'Summary', (['"""request_processing_seconds"""', '"""Time spent processing request"""'], {}), "('request_processing_seconds', 'Time spent processing request')\n", (690, 753), False, 'from prometheus_client import start_http_server, Summary\n'), ((886, 899), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (896, 899), False, 'import time\n'), ((981, 1004), 'prometheus_client.start_http_server', 'start_http_server', (['(8000)'], {}), '(8000)\n', (998, 1004), False, 'from prometheus_client import start_http_server, Summary\n'), ((1075, 1090), 'random.random', 'random.random', ([], {}), '()\n', (1088, 1090), False, 'import random\n')]
|
from enum import Enum
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class ChoiceEnum(Enum):
@classmethod
def choices(cls):
return tuple((item.name, item.value) for item in cls)
class Category(ChoiceEnum):
company = 'Company satisfaction'
personal = 'Personal satisfaction'
team = 'Team satisfaction'
class Question(models.Model):
class Meta:
abstract = True
question_string = models.CharField(max_length=1000, null=False)
category = models.CharField(max_length=10, choices=Category.choices(),
default=Category.personal.name)
class RangeMarkQuestion(Question):
mark = models.PositiveIntegerField(validators=[
MinValueValidator(1), MaxValueValidator(10)
], null=True)
class BooleanMarkQuestion(Question):
mark = models.NullBooleanField(null=True)
|
[
"django.db.models.CharField",
"django.db.models.NullBooleanField",
"django.core.validators.MinValueValidator",
"django.core.validators.MaxValueValidator"
] |
[((484, 529), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'null': '(False)'}), '(max_length=1000, null=False)\n', (500, 529), False, 'from django.db import models\n'), ((878, 912), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {'null': '(True)'}), '(null=True)\n', (901, 912), False, 'from django.db import models\n'), ((766, 786), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(1)'], {}), '(1)\n', (783, 786), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((788, 809), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(10)'], {}), '(10)\n', (805, 809), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n')]
|
# Copyright 2020 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5.models.mesh_transformer."""
from absl.testing import absltest
import t5.data
from t5.data import test_utils
from t5.models import mesh_transformer
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
tf.disable_v2_behavior()
tf.enable_eager_execution()
class MeshDatasetFnsTest(test_utils.FakeMixtureTest):
def check_ds_shape(self, ds, sequence_length):
for k, v in tf.data.get_output_shapes(ds).items():
feat = k.split("_")[0]
if len(v) == 0: # pylint:disable=g-explicit-length-test
expected_shape = []
elif feat in sequence_length:
expected_shape = [sequence_length[feat]]
else:
expected_shape = [None]
self.assertEqual(expected_shape, v.as_list())
def verify_mesh_dataset_fn(self, mixture_name, train, use_cached):
if train:
dataset_fn = mesh_transformer.mesh_train_dataset_fn
split = tfds.Split.TRAIN
else:
dataset_fn = mesh_transformer.mesh_eval_dataset_fn
split = tfds.Split.VALIDATION
vocabulary = t5.data.MixtureRegistry.get(mixture_name).get_vocabulary()
sequence_length = {"inputs": 13, "targets": 13}
output = dataset_fn(
mixture_name,
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=split,
use_cached=use_cached)
if train:
ds = output
self.check_ds_shape(ds, sequence_length)
# Materialize a few batches to test for errors.
list(zip(range(10), tfds.as_numpy(ds)))
else:
self.assertLen(output, 1)
output = output[0]
(name, dsfn, postprocess_fn, metric_fns) = output
self.assertEqual("cached_task" if use_cached else "uncached_task", name)
ds = dsfn()
self.check_ds_shape(ds, sequence_length)
# No postprocess_fn is supplied so it should function as a pass-through
self.assertEqual("test", postprocess_fn("test"))
# test_utils task has empty metric_fns list
self.assertEqual([], metric_fns)
# Materialize the full dataset to test for errors.
list(tfds.as_numpy(ds))
def test_mesh_train_dataset_fn(self):
self.verify_mesh_dataset_fn(
mixture_name="cached_mixture", train=True, use_cached=True,
)
self.verify_mesh_dataset_fn(
mixture_name="uncached_mixture", train=True, use_cached=False,
)
def test_mesh_eval_dataset_fn(self):
self.verify_mesh_dataset_fn(
mixture_name="cached_mixture", train=False, use_cached=True,
)
self.verify_mesh_dataset_fn(
mixture_name="uncached_mixture", train=False, use_cached=False,
)
def test_maybe_shuffle_and_subsample_dataset_no_shuffle(self):
ds = tf.data.Dataset.range(100)
num_eval_examples = 10
shuffle_eval_examples = False
num_repeat = 2
ds = mesh_transformer.maybe_shuffle_and_subsample_dataset(
ds, num_eval_examples, shuffle_eval_examples)
ds = ds.repeat(num_repeat)
list_examples = list(tfds.as_numpy(ds))
# Assert on the number of examples.
self.assertLen(list_examples, num_eval_examples * num_repeat)
# Since `shuffle_eval_examples` is false, we will get the same examples
# repeated `num_repeat` times.
# Ex: [0, 1, 2, 3, 0, 1, 2, 3]
self.assertEqual(list_examples, list(range(num_eval_examples)) * num_repeat)
def test_maybe_shuffle_and_subsample_dataset_shuffle(self):
ds = tf.data.Dataset.range(100)
num_eval_examples = 10
shuffle_eval_examples = True
num_repeat = 2
ds = mesh_transformer.maybe_shuffle_and_subsample_dataset(
ds, num_eval_examples, shuffle_eval_examples,
num_repeat * num_eval_examples) # shuffle buffer size.
ds = ds.repeat(num_repeat)
list_examples = list(tfds.as_numpy(ds))
# With high probability, not every slice of `num_eval_examples` in
# `list_examples` will be the same.
self.assertNotEqual(list_examples[:num_eval_examples],
list_examples[num_eval_examples:2 * num_eval_examples])
if __name__ == "__main__":
absltest.main()
|
[
"absl.testing.absltest.main",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.compat.v1.data.Dataset.range",
"t5.models.mesh_transformer.maybe_shuffle_and_subsample_dataset",
"tensorflow_datasets.as_numpy",
"tensorflow.compat.v1.data.get_output_shapes",
"tensorflow.compat.v1.disable_v2_behavior"
] |
[((814, 838), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (836, 838), True, 'import tensorflow.compat.v1 as tf\n'), ((839, 866), 'tensorflow.compat.v1.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (864, 866), True, 'import tensorflow.compat.v1 as tf\n'), ((4611, 4626), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4624, 4626), False, 'from absl.testing import absltest\n'), ((3259, 3285), 'tensorflow.compat.v1.data.Dataset.range', 'tf.data.Dataset.range', (['(100)'], {}), '(100)\n', (3280, 3285), True, 'import tensorflow.compat.v1 as tf\n'), ((3376, 3478), 't5.models.mesh_transformer.maybe_shuffle_and_subsample_dataset', 'mesh_transformer.maybe_shuffle_and_subsample_dataset', (['ds', 'num_eval_examples', 'shuffle_eval_examples'], {}), '(ds, num_eval_examples,\n shuffle_eval_examples)\n', (3428, 3478), False, 'from t5.models import mesh_transformer\n'), ((3966, 3992), 'tensorflow.compat.v1.data.Dataset.range', 'tf.data.Dataset.range', (['(100)'], {}), '(100)\n', (3987, 3992), True, 'import tensorflow.compat.v1 as tf\n'), ((4082, 4216), 't5.models.mesh_transformer.maybe_shuffle_and_subsample_dataset', 'mesh_transformer.maybe_shuffle_and_subsample_dataset', (['ds', 'num_eval_examples', 'shuffle_eval_examples', '(num_repeat * num_eval_examples)'], {}), '(ds, num_eval_examples,\n shuffle_eval_examples, num_repeat * num_eval_examples)\n', (4134, 4216), False, 'from t5.models import mesh_transformer\n'), ((3541, 3558), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (3554, 3558), True, 'import tensorflow_datasets as tfds\n'), ((4311, 4328), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (4324, 4328), True, 'import tensorflow_datasets as tfds\n'), ((989, 1018), 'tensorflow.compat.v1.data.get_output_shapes', 'tf.data.get_output_shapes', (['ds'], {}), '(ds)\n', (1014, 1018), True, 'import tensorflow.compat.v1 as tf\n'), ((2648, 2665), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (2661, 2665), True, 'import tensorflow_datasets as tfds\n'), ((2071, 2088), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (2084, 2088), True, 'import tensorflow_datasets as tfds\n')]
|
# coding=utf-8
from django import forms
from modules.employee_management.employee_info.models import Employee
from modules.payroll_manage.payroll_detail.models import *
from modules.project_manage.models import Project
from modules.social_security.social_security_detail.models import *
SOCIAL_SECURITY_BALANCE = (
('1', u'平衡'),
('2', u'盈余'),
('3', u'亏损'),
)
# 社保审核
class SocialSecurityAudit(models.Model):
name = models.CharField(u"姓名", max_length=255)
identity_card_number = models.CharField(u"身份证号", max_length=18)
# 员工外键带出=入职时间,离职时间,项目名称,部门,项目负责人
employee = models.ForeignKey(Employee, verbose_name=u"员工编号", blank=True, null=True)
social_security_date = models.DateField(u"社保月份")
social_security_billing = models.PositiveIntegerField(u"社保结算", blank=True, null=True)
social_security_outlay = models.PositiveIntegerField(u"社保支出", blank=True, null=True)
social_security_balance = models.CharField(u"社保平衡", max_length=1, choices=SOCIAL_SECURITY_BALANCE)
provident_fund_billing = models.PositiveIntegerField(u"公积金结算", blank=True, null=True)
provident_fund_outlay = models.PositiveIntegerField(u"公积金支出", blank=True, null=True)
provident_fund_balance = models.CharField(u"公积金平衡", max_length=1, choices=SOCIAL_SECURITY_BALANCE)
remark = models.CharField(u"备注", max_length=255, blank=True, null=True)
def __str__(self):
return self.remark
class Meta:
verbose_name = u"社保审核"
ordering = ['-id'] # id倒叙
permissions = (
("browse_socialsecurityaudit", u"浏览 社保审核"),
("export_socialsecurityaudit", u"导出 社保审核"),
)
class SocialSecurityAuditForm(forms.ModelForm):
identity_card_number = forms.ChoiceField(label=u'身份证号')
def __init__(self, *args, **kwargs):
super(SocialSecurityAuditForm, self).__init__(*args, **kwargs)
# 组装身份证号,从相同月份的社保明细和薪资汇总明细中的所有身份证号码不重复地显示出来
socialsecuritydetail_id = SocialSecurityDetail.objects.values_list("identity_card_number", flat=True)
payrolldetail_id = PayrollDetail.objects.values_list("identity_card_number", flat=True)
identity_card_number_list = list(set(list(socialsecuritydetail_id) + list(payrolldetail_id)))
self.fields['identity_card_number'].choices = ((x, x) for x in identity_card_number_list)
class Meta:
model = SocialSecurityAudit
fields = ['identity_card_number', 'social_security_date', 'remark']
|
[
"django.forms.ChoiceField"
] |
[((1616, 1648), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'label': 'u"""身份证号"""'}), "(label=u'身份证号')\n", (1633, 1648), False, 'from django import forms\n')]
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Get the Savu version
"""
# from pkg_resources import get_distribution
#
# __version__ = get_distribution('savu').version
import os
path = os.path.abspath(os.path.dirname(__file__))
thepath = path + '/../install/'
thepath = thepath if os.path.exists(thepath) else path + '/install/'
with open(thepath + 'latest_version.txt', 'r') as f:
version_file = f.readline().strip()
__version__ = version_file.split('savu_v')[1].split('/')[0]
__install__ = 'install/' + version_file.split('/')[0]
|
[
"os.path.dirname",
"os.path.exists"
] |
[((751, 776), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (766, 776), False, 'import os\n'), ((831, 854), 'os.path.exists', 'os.path.exists', (['thepath'], {}), '(thepath)\n', (845, 854), False, 'import os\n')]
|
#!/usr/bin/env python3
# zulip-send -- Sends a message to the specified recipients.
import argparse
import logging
import sys
from typing import Any, Dict
import zulip
logging.basicConfig()
log = logging.getLogger("zulip-send")
def do_send_message(client: zulip.Client, message_data: Dict[str, Any]) -> bool:
"""Sends a message and optionally prints status about the same."""
if message_data["type"] == "stream":
log.info(
'Sending message to stream "%s", subject "%s"... '
% (message_data["to"], message_data["subject"])
)
else:
log.info("Sending message to {}... ".format(message_data["to"]))
response = client.send_message(message_data)
if response["result"] == "success":
log.info("Message sent.")
return True
else:
log.error(response["msg"])
return False
def main() -> int:
usage = """zulip-send [options] [recipient...]
Sends a message to specified recipients.
Examples: zulip-send --stream denmark --subject castle -m "Something is rotten in the state of Denmark."
zulip-send <EMAIL> <EMAIL> -m "Conscience doth make cowards of us all."
Specify your Zulip API credentials and server in a ~/.zuliprc file or using the options.
"""
parser = zulip.add_default_arguments(argparse.ArgumentParser(usage=usage))
parser.add_argument(
"recipients", nargs="*", help="email addresses of the recipients of the message"
)
parser.add_argument(
"-m", "--message", help="Specifies the message to send, prevents interactive prompting."
)
group = parser.add_argument_group("Stream parameters")
group.add_argument(
"-s",
"--stream",
dest="stream",
action="store",
help="Allows the user to specify a stream for the message.",
)
group.add_argument(
"-S",
"--subject",
dest="subject",
action="store",
help="Allows the user to specify a subject for the message.",
)
options = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.INFO)
# Sanity check user data
if len(options.recipients) != 0 and (options.stream or options.subject):
parser.error("You cannot specify both a username and a stream/subject.")
if len(options.recipients) == 0 and (bool(options.stream) != bool(options.subject)):
parser.error("Stream messages must have a subject")
if len(options.recipients) == 0 and not (options.stream and options.subject):
parser.error("You must specify a stream/subject or at least one recipient.")
client = zulip.init_from_options(options)
if not options.message:
options.message = sys.stdin.read()
if options.stream:
message_data = {
"type": "stream",
"content": options.message,
"subject": options.subject,
"to": options.stream,
}
else:
message_data = {
"type": "private",
"content": options.message,
"to": options.recipients,
}
if not do_send_message(client, message_data):
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"zulip.init_from_options",
"argparse.ArgumentParser",
"logging.basicConfig",
"sys.stdin.read",
"logging.getLogger"
] |
[((171, 192), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (190, 192), False, 'import logging\n'), ((200, 231), 'logging.getLogger', 'logging.getLogger', (['"""zulip-send"""'], {}), "('zulip-send')\n", (217, 231), False, 'import logging\n'), ((2668, 2700), 'zulip.init_from_options', 'zulip.init_from_options', (['options'], {}), '(options)\n', (2691, 2700), False, 'import zulip\n'), ((1329, 1365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (1352, 1365), False, 'import argparse\n'), ((2756, 2772), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (2770, 2772), False, 'import sys\n'), ((2108, 2127), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2125, 2127), False, 'import logging\n')]
|