max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
ProjectEuler/9.py
|
RobVor/Python
| 0
|
6626551
|
"""A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2."""
def Pythagorean_Triplet(N):
for a in range(1,N):
for b in range(1,N):
c = 1000 - a - b
if c > 0:
if c*c==a*a+b*b:
print(a*b*c)
break
Pythagorean_Triplet(1000)
|
"""A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2."""
def Pythagorean_Triplet(N):
for a in range(1,N):
for b in range(1,N):
c = 1000 - a - b
if c > 0:
if c*c==a*a+b*b:
print(a*b*c)
break
Pythagorean_Triplet(1000)
|
en
| 0.743266
|
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a^2 + b^2 = c^2 For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
| 4.377424
| 4
|
marmot/representations/tests/test_word_qe_representation_generator.py
|
qe-team/marmot
| 19
|
6626552
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import yaml
import marmot
from marmot.representations.word_qe_representation_generator import WordQERepresentationGenerator
from marmot.experiment.import_utils import build_object
def join_with_module_path(loader, node):
""" define custom tag handler to join paths with the path of the marmot module """
module_path = os.path.dirname(marmot.representations.tests.__file__)
resolved = loader.construct_scalar(node)
return os.path.join(module_path, resolved)
## register the tag handler
yaml.add_constructor('!join', join_with_module_path)
class WordQERepresentationGeneratorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
test_config = os.path.join(module_path, 'test_config.yaml')
with open(test_config, "r") as cfg_file:
self.config = yaml.load(cfg_file.read())
self.target_file = os.path.join(module_path, 'test_data/dev.target')
self.source_file = os.path.join(module_path, 'test_data/dev.source')
self.tags_file = os.path.join(module_path, 'test_data/dev.target.tags')
def test_generator(self):
generator = WordQERepresentationGenerator(self.source_file, self.target_file, self.tags_file)
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
def test_load_from_config(self):
generator = build_object(self.config['representations']['training'][1])
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
# TODO: test that tokenization happens like we expect
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import yaml
import marmot
from marmot.representations.word_qe_representation_generator import WordQERepresentationGenerator
from marmot.experiment.import_utils import build_object
def join_with_module_path(loader, node):
""" define custom tag handler to join paths with the path of the marmot module """
module_path = os.path.dirname(marmot.representations.tests.__file__)
resolved = loader.construct_scalar(node)
return os.path.join(module_path, resolved)
## register the tag handler
yaml.add_constructor('!join', join_with_module_path)
class WordQERepresentationGeneratorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
test_config = os.path.join(module_path, 'test_config.yaml')
with open(test_config, "r") as cfg_file:
self.config = yaml.load(cfg_file.read())
self.target_file = os.path.join(module_path, 'test_data/dev.target')
self.source_file = os.path.join(module_path, 'test_data/dev.source')
self.tags_file = os.path.join(module_path, 'test_data/dev.target.tags')
def test_generator(self):
generator = WordQERepresentationGenerator(self.source_file, self.target_file, self.tags_file)
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
def test_load_from_config(self):
generator = build_object(self.config['representations']['training'][1])
data_obj = generator.generate()
self.assertTrue('target' in data_obj)
self.assertTrue('source' in data_obj)
self.assertTrue('tags' in data_obj)
self.assertTrue(len(data_obj['target']) == len(data_obj['source']) == len(data_obj['tags']))
self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))
# TODO: test that tokenization happens like we expect
if __name__ == '__main__':
unittest.main()
|
en
| 0.63436
|
#!/usr/bin/python # -*- coding: utf-8 -*- define custom tag handler to join paths with the path of the marmot module ## register the tag handler # TODO: test that tokenization happens like we expect
| 2.502521
| 3
|
verification/referee.py
|
CheckiO-Missions/checkio-mission-family-gifts
| 0
|
6626553
|
<reponame>CheckiO-Missions/checkio-mission-family-gifts
from checkio.signals import ON_CONNECT
from checkio import api
from checkio.referees.io import CheckiOReferee
from tests import TESTS
cover = """def cover(f, data):
return f(set(str(x) for x in data[0]), tuple(set(str(n) for n in coup) for coup in data[1]))
"""
ERR_REPEAT = "Every person should be able to give to a different" \
" person than he offered the past years"
ERR_COUPLE = "Couples should not give to one another"
ERR_COUNT = "You can find {} chain(s)."
ERR_TYPE = "Wrong result type. " \
"The longest list of gift chains as a list/tuple of lists/tuples with strings."
ERR_WRONG_NAMES = "Wrong Family names"
def checker(data, user_result):
total = data[0]
family = set(data[1][0])
couples = tuple(set(x) for x in data[1][1])
if (not isinstance(user_result, (list, tuple)) or
any(not isinstance(chain, (list, tuple)) for chain in user_result)):
return False, ERR_TYPE
if len(user_result) < total:
return False, ERR_COUNT.format(total)
gifted = set()
for chain in user_result:
if not isinstance(chain, (list, tuple)) or not all(isinstance(el, str) for el in chain):
return False, ERR_TYPE
if set(chain) != family or len(chain) != len(family):
return False, ERR_WRONG_NAMES
for f, s in zip(chain, chain[1:] + [chain[0]]):
if {f, s} in couples:
return False, ERR_COUPLE
if (f, s) in gifted:
return False, ERR_REPEAT
gifted.add((f, s))
return True, "Ok"
api.add_listener(
ON_CONNECT,
CheckiOReferee(
tests=TESTS,
cover_code={
'python-27': cover,
'python-3': cover
},
checker=checker,
function_name="find_chains"
# add_allowed_modules=[],
# add_close_builtins=[],
# remove_allowed_modules=[]
).on_ready)
|
from checkio.signals import ON_CONNECT
from checkio import api
from checkio.referees.io import CheckiOReferee
from tests import TESTS
cover = """def cover(f, data):
return f(set(str(x) for x in data[0]), tuple(set(str(n) for n in coup) for coup in data[1]))
"""
ERR_REPEAT = "Every person should be able to give to a different" \
" person than he offered the past years"
ERR_COUPLE = "Couples should not give to one another"
ERR_COUNT = "You can find {} chain(s)."
ERR_TYPE = "Wrong result type. " \
"The longest list of gift chains as a list/tuple of lists/tuples with strings."
ERR_WRONG_NAMES = "Wrong Family names"
def checker(data, user_result):
total = data[0]
family = set(data[1][0])
couples = tuple(set(x) for x in data[1][1])
if (not isinstance(user_result, (list, tuple)) or
any(not isinstance(chain, (list, tuple)) for chain in user_result)):
return False, ERR_TYPE
if len(user_result) < total:
return False, ERR_COUNT.format(total)
gifted = set()
for chain in user_result:
if not isinstance(chain, (list, tuple)) or not all(isinstance(el, str) for el in chain):
return False, ERR_TYPE
if set(chain) != family or len(chain) != len(family):
return False, ERR_WRONG_NAMES
for f, s in zip(chain, chain[1:] + [chain[0]]):
if {f, s} in couples:
return False, ERR_COUPLE
if (f, s) in gifted:
return False, ERR_REPEAT
gifted.add((f, s))
return True, "Ok"
api.add_listener(
ON_CONNECT,
CheckiOReferee(
tests=TESTS,
cover_code={
'python-27': cover,
'python-3': cover
},
checker=checker,
function_name="find_chains"
# add_allowed_modules=[],
# add_close_builtins=[],
# remove_allowed_modules=[]
).on_ready)
|
en
| 0.306534
|
def cover(f, data): return f(set(str(x) for x in data[0]), tuple(set(str(n) for n in coup) for coup in data[1])) # add_allowed_modules=[], # add_close_builtins=[], # remove_allowed_modules=[]
| 2.862688
| 3
|
det3d/datasets/pipelines/__init__.py
|
reinforcementdriving/CIA-SSD
| 382
|
6626554
|
from .compose import Compose
from .formating import Reformat
# from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals
from .loading import *
from .test_aug import MultiScaleFlipAug
from .transforms import (
Expand,
MinIoURandomCrop,
Normalize,
Pad,
PhotoMetricDistortion,
RandomCrop,
RandomFlip,
Resize,
SegResizeFlipPadRescale,
)
from .preprocess_v4 import Preprocess, Voxelization, AssignTarget
__all__ = [
"Compose",
"to_tensor",
"ToTensor",
"ImageToTensor",
"ToDataContainer",
"Transpose",
"Collect",
"LoadImageAnnotations",
"LoadImageFromFile",
"LoadProposals",
"MultiScaleFlipAug",
"Resize",
"RandomFlip",
"Pad",
"RandomCrop",
"Normalize",
"SegResizeFlipPadRescale",
"MinIoURandomCrop",
"Expand",
"PhotoMetricDistortion",
"Preprocess",
"Voxelization",
"AssignTarget",
]
|
from .compose import Compose
from .formating import Reformat
# from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals
from .loading import *
from .test_aug import MultiScaleFlipAug
from .transforms import (
Expand,
MinIoURandomCrop,
Normalize,
Pad,
PhotoMetricDistortion,
RandomCrop,
RandomFlip,
Resize,
SegResizeFlipPadRescale,
)
from .preprocess_v4 import Preprocess, Voxelization, AssignTarget
__all__ = [
"Compose",
"to_tensor",
"ToTensor",
"ImageToTensor",
"ToDataContainer",
"Transpose",
"Collect",
"LoadImageAnnotations",
"LoadImageFromFile",
"LoadProposals",
"MultiScaleFlipAug",
"Resize",
"RandomFlip",
"Pad",
"RandomCrop",
"Normalize",
"SegResizeFlipPadRescale",
"MinIoURandomCrop",
"Expand",
"PhotoMetricDistortion",
"Preprocess",
"Voxelization",
"AssignTarget",
]
|
en
| 0.428708
|
# from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals
| 1.447724
| 1
|
meteocalc/windchill.py
|
malexer/meteocalc
| 20
|
6626555
|
<filename>meteocalc/windchill.py<gh_stars>10-100
"""Module for calculation of Wind chill.
Wind-chill or windchill (popularly wind chill factor) is the lowering of
body temperature due to the passing-flow of lower-temperature air.
Wind chill numbers are always lower than the air temperature for values
where the formula is valid.
When the apparent temperature is higher than the air temperature,
the heat index is used instead.
Check wikipedia for more info:
https://en.wikipedia.org/wiki/Wind_chill
Formula details:
https://www.wpc.ncep.noaa.gov/html/windchill.shtml
"""
from .temperature import Temp, F
def wind_chill(temperature, wind_speed):
"""Calculate Wind Chill (feels like temperature) based on NOAA.
Default unit for resulting Temp value is Fahrenheit and it will be used
in case of casting to int/float. Use Temp properties to convert result to
Celsius (Temp.c) or Kelvin (Temp.k).
Wind Chill Temperature is only defined for temperatures at or below
50 F and wind speeds above 3 mph.
:param temperature: temperature value in Fahrenheit or Temp instance.
:type temperature: int, float, Temp
:param wind_speed: wind speed in mph
:type wind_speed: int, float
:returns: Wind chill value
:rtype: Temp
"""
T = temperature.f if isinstance(temperature, Temp) else temperature
V = wind_speed
if T > 50 or V <= 3:
raise ValueError(
"Wind Chill Temperature is only defined for temperatures at"
" or below 50 F and wind speeds above 3 mph.")
WINDCHILL = 35.74 + (0.6215 * T) - 35.75 * V**0.16 + 0.4275 * T * V**0.16
return Temp(WINDCHILL, unit=F)
|
<filename>meteocalc/windchill.py<gh_stars>10-100
"""Module for calculation of Wind chill.
Wind-chill or windchill (popularly wind chill factor) is the lowering of
body temperature due to the passing-flow of lower-temperature air.
Wind chill numbers are always lower than the air temperature for values
where the formula is valid.
When the apparent temperature is higher than the air temperature,
the heat index is used instead.
Check wikipedia for more info:
https://en.wikipedia.org/wiki/Wind_chill
Formula details:
https://www.wpc.ncep.noaa.gov/html/windchill.shtml
"""
from .temperature import Temp, F
def wind_chill(temperature, wind_speed):
"""Calculate Wind Chill (feels like temperature) based on NOAA.
Default unit for resulting Temp value is Fahrenheit and it will be used
in case of casting to int/float. Use Temp properties to convert result to
Celsius (Temp.c) or Kelvin (Temp.k).
Wind Chill Temperature is only defined for temperatures at or below
50 F and wind speeds above 3 mph.
:param temperature: temperature value in Fahrenheit or Temp instance.
:type temperature: int, float, Temp
:param wind_speed: wind speed in mph
:type wind_speed: int, float
:returns: Wind chill value
:rtype: Temp
"""
T = temperature.f if isinstance(temperature, Temp) else temperature
V = wind_speed
if T > 50 or V <= 3:
raise ValueError(
"Wind Chill Temperature is only defined for temperatures at"
" or below 50 F and wind speeds above 3 mph.")
WINDCHILL = 35.74 + (0.6215 * T) - 35.75 * V**0.16 + 0.4275 * T * V**0.16
return Temp(WINDCHILL, unit=F)
|
en
| 0.750588
|
Module for calculation of Wind chill. Wind-chill or windchill (popularly wind chill factor) is the lowering of body temperature due to the passing-flow of lower-temperature air. Wind chill numbers are always lower than the air temperature for values where the formula is valid. When the apparent temperature is higher than the air temperature, the heat index is used instead. Check wikipedia for more info: https://en.wikipedia.org/wiki/Wind_chill Formula details: https://www.wpc.ncep.noaa.gov/html/windchill.shtml Calculate Wind Chill (feels like temperature) based on NOAA. Default unit for resulting Temp value is Fahrenheit and it will be used in case of casting to int/float. Use Temp properties to convert result to Celsius (Temp.c) or Kelvin (Temp.k). Wind Chill Temperature is only defined for temperatures at or below 50 F and wind speeds above 3 mph. :param temperature: temperature value in Fahrenheit or Temp instance. :type temperature: int, float, Temp :param wind_speed: wind speed in mph :type wind_speed: int, float :returns: Wind chill value :rtype: Temp
| 4.059145
| 4
|
iotbx/examples/pdb_hierarchy.py
|
dperl-sol/cctbx_project
| 155
|
6626556
|
from __future__ import absolute_import, division, print_function
import iotbx.pdb
import random
import sys
def run(args):
for file_name in args:
pdb_inp = iotbx.pdb.input(file_name=file_name)
#
hierarchy = pdb_inp.construct_hierarchy()
#
hierarchy.overall_counts().show()
#
print("""
# Primary "view" of hierarchy:
# model, chain, residue_group, atom_group, atom""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
for residue_group in chain.residue_groups():
print(' residue_group: resseq="%s" icode="%s"' % (
residue_group.resseq, residue_group.icode))
for atom_group in residue_group.atom_groups():
print(' atom_group: altloc="%s" resname="%s"' % (
atom_group.altloc, atom_group.resname))
for atom in atom_group.atoms():
print(' ', atom.format_atom_record())
print(" atom.xyz: ", atom.xyz)
print(" atom.occ: ", atom.occ)
print(" atom.b: ", atom.b)
print(' atom.segid: "%s"' % atom.segid)
#
print("""
# Secondary (read-only) "view" of the hierarchy:
# model, chain, conformer, residue, atom""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
for conformer in chain.conformers():
print(' conformer: "%s"' % conformer.altloc)
for residue in conformer.residues():
print(' residue: resname="%s" resseq="%s" icode="%s"' % (
residue.resname, residue.resseq, residue.icode))
for atom in residue.atoms():
print(' ', atom.format_atom_record())
#
print("""
# Special case: if there are no alt. conf. you can eliminate one
# level of the hierarchy (which may be more intuitive at first).""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
# The next line will fail (AssertionError) if there are alt. conf.
for residue in chain.residues():
print(' residue: resname="%s" resseq="%s" icode="%s"' % (
residue.resname, residue.resseq, residue.icode))
for atom in residue.atoms():
print(' ', atom.format_atom_record())
#
print("""
# A third "view" of the hierarchy:
# model, chain, residue_group, conformer, residue, atom
# This is useful for handling all conformers of a given residue_group
# together.
# All meaningful PDB files will only have one residue per conformer.""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
for residue_group in chain.residue_groups():
print(' residue_group: resseq="%s" icode="%s"' % (
residue_group.resseq, residue_group.icode))
for conformer in residue_group.conformers():
print(' conformer: altloc="%s"' % (
conformer.altloc))
residue = conformer.only_residue()
print(' residue: resname="%s"' % residue.resname)
for atom in residue.atoms():
print(' ', atom.format_atom_record())
#
# Pick a random atom and trace back to its parents.
# (each time you run the script the result is different!)
pdb_atoms = hierarchy.atoms()
atom = random.choice(pdb_atoms)
atom_group = atom.parent()
residue_group = atom_group.parent()
chain = residue_group.parent()
model = chain.parent()
root = model.parent()
#
# To expose a bit how it works internally:
# - root is a reference to the original hierarchy:
assert root.is_similar_hierarchy(other=hierarchy)
# - it actually is a reference pointing to the same piece of memory
assert root.memory_id() == hierarchy.memory_id()
#
# Modify arbitrarily.
atom.name = "XY"
atom_group.altloc = "Z"
atom_group.resname = "NOP"
residue_group.resseq = "9999"
residue_group.icode = "I"
chain.id = "Q"
model.id = "9"
#
# Add an atom to the atom_group
atom = iotbx.pdb.hierarchy.atom()
atom.name = "NEW"
atom_group.append_atom(atom=atom)
# need for more complicated functions such as selections
hierarchy.reset_atom_i_seqs()
#
print("""
# Format entire hierarchy as pdb string and pdb file.""")
print(hierarchy.as_pdb_string(append_end=True))
hierarchy.write_pdb_file(file_name="junk.pdb")
if (__name__ == "__main__"):
run(sys.argv[1:])
|
from __future__ import absolute_import, division, print_function
import iotbx.pdb
import random
import sys
def run(args):
for file_name in args:
pdb_inp = iotbx.pdb.input(file_name=file_name)
#
hierarchy = pdb_inp.construct_hierarchy()
#
hierarchy.overall_counts().show()
#
print("""
# Primary "view" of hierarchy:
# model, chain, residue_group, atom_group, atom""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
for residue_group in chain.residue_groups():
print(' residue_group: resseq="%s" icode="%s"' % (
residue_group.resseq, residue_group.icode))
for atom_group in residue_group.atom_groups():
print(' atom_group: altloc="%s" resname="%s"' % (
atom_group.altloc, atom_group.resname))
for atom in atom_group.atoms():
print(' ', atom.format_atom_record())
print(" atom.xyz: ", atom.xyz)
print(" atom.occ: ", atom.occ)
print(" atom.b: ", atom.b)
print(' atom.segid: "%s"' % atom.segid)
#
print("""
# Secondary (read-only) "view" of the hierarchy:
# model, chain, conformer, residue, atom""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
for conformer in chain.conformers():
print(' conformer: "%s"' % conformer.altloc)
for residue in conformer.residues():
print(' residue: resname="%s" resseq="%s" icode="%s"' % (
residue.resname, residue.resseq, residue.icode))
for atom in residue.atoms():
print(' ', atom.format_atom_record())
#
print("""
# Special case: if there are no alt. conf. you can eliminate one
# level of the hierarchy (which may be more intuitive at first).""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
# The next line will fail (AssertionError) if there are alt. conf.
for residue in chain.residues():
print(' residue: resname="%s" resseq="%s" icode="%s"' % (
residue.resname, residue.resseq, residue.icode))
for atom in residue.atoms():
print(' ', atom.format_atom_record())
#
print("""
# A third "view" of the hierarchy:
# model, chain, residue_group, conformer, residue, atom
# This is useful for handling all conformers of a given residue_group
# together.
# All meaningful PDB files will only have one residue per conformer.""")
for model in hierarchy.models():
print('model: "%s"' % model.id)
for chain in model.chains():
print('chain: "%s"' % chain.id)
for residue_group in chain.residue_groups():
print(' residue_group: resseq="%s" icode="%s"' % (
residue_group.resseq, residue_group.icode))
for conformer in residue_group.conformers():
print(' conformer: altloc="%s"' % (
conformer.altloc))
residue = conformer.only_residue()
print(' residue: resname="%s"' % residue.resname)
for atom in residue.atoms():
print(' ', atom.format_atom_record())
#
# Pick a random atom and trace back to its parents.
# (each time you run the script the result is different!)
pdb_atoms = hierarchy.atoms()
atom = random.choice(pdb_atoms)
atom_group = atom.parent()
residue_group = atom_group.parent()
chain = residue_group.parent()
model = chain.parent()
root = model.parent()
#
# To expose a bit how it works internally:
# - root is a reference to the original hierarchy:
assert root.is_similar_hierarchy(other=hierarchy)
# - it actually is a reference pointing to the same piece of memory
assert root.memory_id() == hierarchy.memory_id()
#
# Modify arbitrarily.
atom.name = "XY"
atom_group.altloc = "Z"
atom_group.resname = "NOP"
residue_group.resseq = "9999"
residue_group.icode = "I"
chain.id = "Q"
model.id = "9"
#
# Add an atom to the atom_group
atom = iotbx.pdb.hierarchy.atom()
atom.name = "NEW"
atom_group.append_atom(atom=atom)
# need for more complicated functions such as selections
hierarchy.reset_atom_i_seqs()
#
print("""
# Format entire hierarchy as pdb string and pdb file.""")
print(hierarchy.as_pdb_string(append_end=True))
hierarchy.write_pdb_file(file_name="junk.pdb")
if (__name__ == "__main__"):
run(sys.argv[1:])
|
en
| 0.857442
|
# # # # Primary "view" of hierarchy: # model, chain, residue_group, atom_group, atom # # Secondary (read-only) "view" of the hierarchy: # model, chain, conformer, residue, atom # # Special case: if there are no alt. conf. you can eliminate one # level of the hierarchy (which may be more intuitive at first). # The next line will fail (AssertionError) if there are alt. conf. # # A third "view" of the hierarchy: # model, chain, residue_group, conformer, residue, atom # This is useful for handling all conformers of a given residue_group # together. # All meaningful PDB files will only have one residue per conformer. # # Pick a random atom and trace back to its parents. # (each time you run the script the result is different!) # # To expose a bit how it works internally: # - root is a reference to the original hierarchy: # - it actually is a reference pointing to the same piece of memory # # Modify arbitrarily. # # Add an atom to the atom_group # need for more complicated functions such as selections # # Format entire hierarchy as pdb string and pdb file.
| 2.198142
| 2
|
glance_store/tests/functional/swift/test_functional_swift.py
|
mail2nsrajesh/glance_store
| 0
|
6626557
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import random
import time
from oslo_config import cfg
import swiftclient
from glance_store.tests.functional import base
CONF = cfg.CONF
logging.basicConfig()
class TestSwift(base.BaseFunctionalTests):
def __init__(self, *args, **kwargs):
super(TestSwift, self).__init__('swift', *args, **kwargs)
self.auth = self.config.get('admin', 'auth_address')
user = self.config.get('admin', 'user')
self.key = self.config.get('admin', 'key')
self.region = self.config.get('admin', 'region')
self.tenant, self.username = user.split(':')
CONF.set_override('swift_store_user',
user,
group='glance_store')
CONF.set_override('swift_store_auth_address',
self.auth,
group='glance_store')
CONF.set_override('swift_store_key',
self.key,
group='glance_store')
CONF.set_override('swift_store_create_container_on_put',
True,
group='glance_store')
CONF.set_override('swift_store_region',
self.region,
group='glance_store')
CONF.set_override('swift_store_create_container_on_put',
True,
group='glance_store')
def setUp(self):
self.container = ("glance_store_container_" +
str(int(random.random() * 1000)))
CONF.set_override('swift_store_container',
self.container,
group='glance_store')
super(TestSwift, self).setUp()
def tearDown(self):
for x in range(1, 4):
time.sleep(x)
try:
swift = swiftclient.client.Connection(auth_version='2',
user=self.username,
key=self.key,
tenant_name=self.tenant,
authurl=self.auth)
_, objects = swift.get_container(self.container)
for obj in objects:
swift.delete_object(self.container, obj.get('name'))
swift.delete_container(self.container)
except Exception:
if x < 3:
pass
else:
raise
else:
break
super(TestSwift, self).tearDown()
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import random
import time
from oslo_config import cfg
import swiftclient
from glance_store.tests.functional import base
CONF = cfg.CONF
logging.basicConfig()
class TestSwift(base.BaseFunctionalTests):
def __init__(self, *args, **kwargs):
super(TestSwift, self).__init__('swift', *args, **kwargs)
self.auth = self.config.get('admin', 'auth_address')
user = self.config.get('admin', 'user')
self.key = self.config.get('admin', 'key')
self.region = self.config.get('admin', 'region')
self.tenant, self.username = user.split(':')
CONF.set_override('swift_store_user',
user,
group='glance_store')
CONF.set_override('swift_store_auth_address',
self.auth,
group='glance_store')
CONF.set_override('swift_store_key',
self.key,
group='glance_store')
CONF.set_override('swift_store_create_container_on_put',
True,
group='glance_store')
CONF.set_override('swift_store_region',
self.region,
group='glance_store')
CONF.set_override('swift_store_create_container_on_put',
True,
group='glance_store')
def setUp(self):
self.container = ("glance_store_container_" +
str(int(random.random() * 1000)))
CONF.set_override('swift_store_container',
self.container,
group='glance_store')
super(TestSwift, self).setUp()
def tearDown(self):
for x in range(1, 4):
time.sleep(x)
try:
swift = swiftclient.client.Connection(auth_version='2',
user=self.username,
key=self.key,
tenant_name=self.tenant,
authurl=self.auth)
_, objects = swift.get_container(self.container)
for obj in objects:
swift.delete_object(self.container, obj.get('name'))
swift.delete_container(self.container)
except Exception:
if x < 3:
pass
else:
raise
else:
break
super(TestSwift, self).tearDown()
|
en
| 0.846256
|
# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
| 1.666386
| 2
|
tests/garage/torch/algos/test_pearl.py
|
cnheider/garage
| 0
|
6626558
|
"""This script is a test that fails when PEARL performance is too low."""
import pickle
import pytest
from garage.envs import MetaWorldSetTaskEnv, normalize, PointEnv
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import (ContextConditionedPolicy,
TanhGaussianMLPPolicy)
from garage.torch.q_functions import ContinuousMLPQFunction
from garage.trainer import Trainer
from tests.fixtures import snapshot_config
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import metaworld # isort:skip
@pytest.mark.mujoco
class TestPEARL:
"""Test class for PEARL."""
@pytest.mark.large
def test_pearl_ml1_push(self):
"""Test PEARL with ML1 Push environment."""
params = dict(seed=1,
num_epochs=1,
num_train_tasks=5,
latent_size=7,
encoder_hidden_sizes=[10, 10, 10],
net_size=30,
meta_batch_size=16,
num_steps_per_epoch=40,
num_initial_steps=40,
num_tasks_sample=15,
num_steps_prior=15,
num_extra_rl_steps_posterior=15,
batch_size=256,
embedding_batch_size=8,
embedding_mini_batch_size=8,
reward_scale=10.,
use_information_bottleneck=True,
use_next_obs_in_context=False,
use_gpu=False)
net_size = params['net_size']
set_seed(params['seed'])
# create multi-task environment and sample tasks
ml1 = metaworld.ML1('push-v1')
train_env = MetaWorldSetTaskEnv(ml1, 'train')
env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=train_env,
wrapper=lambda env, _: normalize(env))
env = env_sampler.sample(params['num_train_tasks'])
test_env = MetaWorldSetTaskEnv(ml1, 'test')
test_env_sampler = SetTaskSampler(
MetaWorldSetTaskEnv,
env=test_env,
wrapper=lambda env, _: normalize(env))
augmented_env = PEARL.augment_env_spec(env[0](), params['latent_size'])
qf = ContinuousMLPQFunction(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), params['latent_size'], 'vf')
vf = ContinuousMLPQFunction(
env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=params['num_train_tasks'],
latent_dim=params['latent_size'],
encoder_hidden_sizes=params['encoder_hidden_sizes'],
test_env_sampler=test_env_sampler,
meta_batch_size=params['meta_batch_size'],
num_steps_per_epoch=params['num_steps_per_epoch'],
num_initial_steps=params['num_initial_steps'],
num_tasks_sample=params['num_tasks_sample'],
num_steps_prior=params['num_steps_prior'],
num_extra_rl_steps_posterior=params[
'num_extra_rl_steps_posterior'],
batch_size=params['batch_size'],
embedding_batch_size=params['embedding_batch_size'],
embedding_mini_batch_size=params['embedding_mini_batch_size'],
reward_scale=params['reward_scale'],
)
set_gpu_mode(params['use_gpu'], gpu_id=0)
if params['use_gpu']:
pearl.to()
trainer = Trainer(snapshot_config)
trainer.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
n_workers=1,
worker_class=PEARLWorker)
trainer.train(n_epochs=params['num_epochs'],
batch_size=params['batch_size'])
def test_pickling(self):
"""Test pickle and unpickle."""
net_size = 10
env_sampler = SetTaskSampler(PointEnv)
env = env_sampler.sample(5)
test_env_sampler = SetTaskSampler(PointEnv)
augmented_env = PEARL.augment_env_spec(env[0](), 5)
qf = ContinuousMLPQFunction(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), 5, 'vf')
vf = ContinuousMLPQFunction(
env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(env=env,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=5,
num_test_tasks=5,
latent_dim=5,
encoder_hidden_sizes=[10, 10],
test_env_sampler=test_env_sampler)
# This line is just to improve coverage
pearl.to()
pickled = pickle.dumps(pearl)
unpickled = pickle.loads(pickled)
assert hasattr(unpickled, '_replay_buffers')
assert hasattr(unpickled, '_context_replay_buffers')
assert unpickled._is_resuming
|
"""This script is a test that fails when PEARL performance is too low."""
import pickle
import pytest
from garage.envs import MetaWorldSetTaskEnv, normalize, PointEnv
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import (ContextConditionedPolicy,
TanhGaussianMLPPolicy)
from garage.torch.q_functions import ContinuousMLPQFunction
from garage.trainer import Trainer
from tests.fixtures import snapshot_config
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import metaworld # isort:skip
@pytest.mark.mujoco
class TestPEARL:
"""Test class for PEARL."""
@pytest.mark.large
def test_pearl_ml1_push(self):
"""Test PEARL with ML1 Push environment."""
params = dict(seed=1,
num_epochs=1,
num_train_tasks=5,
latent_size=7,
encoder_hidden_sizes=[10, 10, 10],
net_size=30,
meta_batch_size=16,
num_steps_per_epoch=40,
num_initial_steps=40,
num_tasks_sample=15,
num_steps_prior=15,
num_extra_rl_steps_posterior=15,
batch_size=256,
embedding_batch_size=8,
embedding_mini_batch_size=8,
reward_scale=10.,
use_information_bottleneck=True,
use_next_obs_in_context=False,
use_gpu=False)
net_size = params['net_size']
set_seed(params['seed'])
# create multi-task environment and sample tasks
ml1 = metaworld.ML1('push-v1')
train_env = MetaWorldSetTaskEnv(ml1, 'train')
env_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=train_env,
wrapper=lambda env, _: normalize(env))
env = env_sampler.sample(params['num_train_tasks'])
test_env = MetaWorldSetTaskEnv(ml1, 'test')
test_env_sampler = SetTaskSampler(
MetaWorldSetTaskEnv,
env=test_env,
wrapper=lambda env, _: normalize(env))
augmented_env = PEARL.augment_env_spec(env[0](), params['latent_size'])
qf = ContinuousMLPQFunction(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), params['latent_size'], 'vf')
vf = ContinuousMLPQFunction(
env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=params['num_train_tasks'],
latent_dim=params['latent_size'],
encoder_hidden_sizes=params['encoder_hidden_sizes'],
test_env_sampler=test_env_sampler,
meta_batch_size=params['meta_batch_size'],
num_steps_per_epoch=params['num_steps_per_epoch'],
num_initial_steps=params['num_initial_steps'],
num_tasks_sample=params['num_tasks_sample'],
num_steps_prior=params['num_steps_prior'],
num_extra_rl_steps_posterior=params[
'num_extra_rl_steps_posterior'],
batch_size=params['batch_size'],
embedding_batch_size=params['embedding_batch_size'],
embedding_mini_batch_size=params['embedding_mini_batch_size'],
reward_scale=params['reward_scale'],
)
set_gpu_mode(params['use_gpu'], gpu_id=0)
if params['use_gpu']:
pearl.to()
trainer = Trainer(snapshot_config)
trainer.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
n_workers=1,
worker_class=PEARLWorker)
trainer.train(n_epochs=params['num_epochs'],
batch_size=params['batch_size'])
def test_pickling(self):
"""Test pickle and unpickle."""
net_size = 10
env_sampler = SetTaskSampler(PointEnv)
env = env_sampler.sample(5)
test_env_sampler = SetTaskSampler(PointEnv)
augmented_env = PEARL.augment_env_spec(env[0](), 5)
qf = ContinuousMLPQFunction(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), 5, 'vf')
vf = ContinuousMLPQFunction(
env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(env=env,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=5,
num_test_tasks=5,
latent_dim=5,
encoder_hidden_sizes=[10, 10],
test_env_sampler=test_env_sampler)
# This line is just to improve coverage
pearl.to()
pickled = pickle.dumps(pearl)
unpickled = pickle.loads(pickled)
assert hasattr(unpickled, '_replay_buffers')
assert hasattr(unpickled, '_context_replay_buffers')
assert unpickled._is_resuming
|
en
| 0.872055
|
This script is a test that fails when PEARL performance is too low. # pylint: disable=unused-import # noqa: F401 # pylint: disable=broad-except # isort:skip Test class for PEARL. Test PEARL with ML1 Push environment. # create multi-task environment and sample tasks Test pickle and unpickle. # This line is just to improve coverage
| 2.156543
| 2
|
decisiontree/migrations/0005_auto_20180808_1125.py
|
datamade/rapidsms-decisiontree-app
| 1
|
6626559
|
<filename>decisiontree/migrations/0005_auto_20180808_1125.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-08 11:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('decisiontree', '0004_answer_color'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(help_text='The text to send to the user, e.g., a question, completion text.', max_length=500, verbose_name='message text')),
('error_response', models.CharField(blank=True, help_text='Optional error message if the user does not send a valid response.', max_length=160)),
],
),
migrations.RemoveField(
model_name='tree',
name='completion_text',
),
migrations.RemoveField(
model_name='treestate',
name='question',
),
migrations.AlterField(
model_name='answer',
name='type',
field=models.CharField(choices=[('A', 'Exact Match'), ('R', 'Regular Expression'), ('C', 'Custom Logic')], max_length=1),
),
migrations.AlterField(
model_name='session',
name='num_tries',
field=models.PositiveIntegerField(help_text='The number of times the user has tried to respond to the current message.'),
),
migrations.AlterField(
model_name='session',
name='state',
field=models.ForeignKey(blank=True, help_text='None if the session is complete.', null=True, on_delete=django.db.models.deletion.CASCADE, to='decisiontree.TreeState'),
),
migrations.AlterField(
model_name='transition',
name='next_state',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='next_state', to='decisiontree.TreeState'),
preserve_default=False,
),
migrations.AlterField(
model_name='tree',
name='root_state',
field=models.ForeignKey(help_text='The first Message sent when this Tree is triggered, which may lead to many more.', on_delete=django.db.models.deletion.CASCADE, related_name='tree_set', to='decisiontree.TreeState'),
),
migrations.AlterField(
model_name='tree',
name='trigger',
field=models.CharField(help_text='The incoming message which triggers this Tree.', max_length=30, unique=True, verbose_name='Keyword'),
),
migrations.AlterField(
model_name='treestate',
name='num_retries',
field=models.PositiveIntegerField(blank=True, help_text="The number of tries the user has to get out of this state. If empty, there is no limit. When the number of retries is hit, the user's session will be terminated.", null=True),
),
migrations.DeleteModel(
name='Question',
),
migrations.AddField(
model_name='treestate',
name='message',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='decisiontree.Message'),
preserve_default=False,
),
]
|
<filename>decisiontree/migrations/0005_auto_20180808_1125.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-08 11:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('decisiontree', '0004_answer_color'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(help_text='The text to send to the user, e.g., a question, completion text.', max_length=500, verbose_name='message text')),
('error_response', models.CharField(blank=True, help_text='Optional error message if the user does not send a valid response.', max_length=160)),
],
),
migrations.RemoveField(
model_name='tree',
name='completion_text',
),
migrations.RemoveField(
model_name='treestate',
name='question',
),
migrations.AlterField(
model_name='answer',
name='type',
field=models.CharField(choices=[('A', 'Exact Match'), ('R', 'Regular Expression'), ('C', 'Custom Logic')], max_length=1),
),
migrations.AlterField(
model_name='session',
name='num_tries',
field=models.PositiveIntegerField(help_text='The number of times the user has tried to respond to the current message.'),
),
migrations.AlterField(
model_name='session',
name='state',
field=models.ForeignKey(blank=True, help_text='None if the session is complete.', null=True, on_delete=django.db.models.deletion.CASCADE, to='decisiontree.TreeState'),
),
migrations.AlterField(
model_name='transition',
name='next_state',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='next_state', to='decisiontree.TreeState'),
preserve_default=False,
),
migrations.AlterField(
model_name='tree',
name='root_state',
field=models.ForeignKey(help_text='The first Message sent when this Tree is triggered, which may lead to many more.', on_delete=django.db.models.deletion.CASCADE, related_name='tree_set', to='decisiontree.TreeState'),
),
migrations.AlterField(
model_name='tree',
name='trigger',
field=models.CharField(help_text='The incoming message which triggers this Tree.', max_length=30, unique=True, verbose_name='Keyword'),
),
migrations.AlterField(
model_name='treestate',
name='num_retries',
field=models.PositiveIntegerField(blank=True, help_text="The number of tries the user has to get out of this state. If empty, there is no limit. When the number of retries is hit, the user's session will be terminated.", null=True),
),
migrations.DeleteModel(
name='Question',
),
migrations.AddField(
model_name='treestate',
name='message',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='decisiontree.Message'),
preserve_default=False,
),
]
|
en
| 0.786689
|
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-08-08 11:25
| 1.77309
| 2
|
qtgmc_modern/filters/__init__.py
|
Ichunjo/secret-project
| 1
|
6626560
|
"""
Interface implementing various vapoursynth filters
"""
# flake8: noqa
from ._deinterlacers import *
from ._denoisers import *
from ._conv import *
from ._mvtools import *
|
"""
Interface implementing various vapoursynth filters
"""
# flake8: noqa
from ._deinterlacers import *
from ._denoisers import *
from ._conv import *
from ._mvtools import *
|
en
| 0.544107
|
Interface implementing various vapoursynth filters # flake8: noqa
| 0.997325
| 1
|
02. Algorithms/04. Recursion/Problems/08. Factorial of N/Solution.py
|
dr490n1s3d-3d8/Data-Structures-and-Algorithms
| 1
|
6626561
|
# Factorial of a number using recursion
def recur_factorial(n):
if n == 1:
return n
else:
return n*recur_factorial(n-1)
num = 7
# check if the number is negative
if num < 0:
print("Sorry, factorial does not exist for negative numbers")
elif num == 0:
print("The factorial of 0 is 1")
else:
print("The factorial of", num, "is", recur_factorial(num))
|
# Factorial of a number using recursion
def recur_factorial(n):
if n == 1:
return n
else:
return n*recur_factorial(n-1)
num = 7
# check if the number is negative
if num < 0:
print("Sorry, factorial does not exist for negative numbers")
elif num == 0:
print("The factorial of 0 is 1")
else:
print("The factorial of", num, "is", recur_factorial(num))
|
en
| 0.598742
|
# Factorial of a number using recursion # check if the number is negative
| 4.315842
| 4
|
utils/function.py
|
gabriel-tjio/ASH
| 0
|
6626562
|
<reponame>gabriel-tjio/ASH
import torch
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def calc_mean_std_gcn(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, int(C/16), -1).var(dim=2) + eps
feat_var = feat_var.repeat(1,16,1).view(N,C,-1)
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, int(C/16), -1).mean(dim=2).repeat(1,16,1).view(N,C,-1)
feat_mean = feat_mean.view(N, C, 1, 1)
return feat_mean, feat_std
def calc_mean_std_gcn32(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, int(C/32), -1).var(dim=2) + eps
feat_var = feat_var.repeat(1,32,1).view(N,C,-1)
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, int(C/32), -1).mean(dim=2).repeat(1,32,1).view(N,C,-1)
feat_mean = feat_mean.view(N, C, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
# arbitrary mask
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def adaptive_group_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std_gcn(style_feat)
# arbitrary mask
content_mean, content_std = calc_mean_std_gcn(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def _calc_feat_flatten_mean_std(feat):
# takes 3D feat (C, H, W), return mean and std of array within channels
assert (feat.size()[0] == 3)
assert (isinstance(feat, torch.FloatTensor))
feat_flatten = feat.view(3, -1)
mean = feat_flatten.mean(dim=-1, keepdim=True)
std = feat_flatten.std(dim=-1, keepdim=True)
return feat_flatten, mean, std
def _mat_sqrt(x):
U, D, V = torch.svd(x)
return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t())
def coral(source, target):
# assume both source and target are 3D array (C, H, W)
# Note: flatten -> f
source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source)
source_f_norm = (source_f - source_f_mean.expand_as(
source_f)) / source_f_std.expand_as(source_f)
source_f_cov_eye = \
torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3)
target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target)
target_f_norm = (target_f - target_f_mean.expand_as(
target_f)) / target_f_std.expand_as(target_f)
target_f_cov_eye = \
torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3)
source_f_norm_transfer = torch.mm(
_mat_sqrt(target_f_cov_eye),
torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)),
source_f_norm)
)
source_f_transfer = source_f_norm_transfer * \
target_f_std.expand_as(source_f_norm) + \
target_f_mean.expand_as(source_f_norm)
return source_f_transfer.view(source.size())
|
import torch
def calc_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def calc_mean_std_gcn(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, int(C/16), -1).var(dim=2) + eps
feat_var = feat_var.repeat(1,16,1).view(N,C,-1)
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, int(C/16), -1).mean(dim=2).repeat(1,16,1).view(N,C,-1)
feat_mean = feat_mean.view(N, C, 1, 1)
return feat_mean, feat_std
def calc_mean_std_gcn32(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, int(C/32), -1).var(dim=2) + eps
feat_var = feat_var.repeat(1,32,1).view(N,C,-1)
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, int(C/32), -1).mean(dim=2).repeat(1,32,1).view(N,C,-1)
feat_mean = feat_mean.view(N, C, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
# arbitrary mask
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def adaptive_group_normalization(content_feat, style_feat):
assert (content_feat.size()[:2] == style_feat.size()[:2])
size = content_feat.size()
style_mean, style_std = calc_mean_std_gcn(style_feat)
# arbitrary mask
content_mean, content_std = calc_mean_std_gcn(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
def _calc_feat_flatten_mean_std(feat):
# takes 3D feat (C, H, W), return mean and std of array within channels
assert (feat.size()[0] == 3)
assert (isinstance(feat, torch.FloatTensor))
feat_flatten = feat.view(3, -1)
mean = feat_flatten.mean(dim=-1, keepdim=True)
std = feat_flatten.std(dim=-1, keepdim=True)
return feat_flatten, mean, std
def _mat_sqrt(x):
U, D, V = torch.svd(x)
return torch.mm(torch.mm(U, D.pow(0.5).diag()), V.t())
def coral(source, target):
# assume both source and target are 3D array (C, H, W)
# Note: flatten -> f
source_f, source_f_mean, source_f_std = _calc_feat_flatten_mean_std(source)
source_f_norm = (source_f - source_f_mean.expand_as(
source_f)) / source_f_std.expand_as(source_f)
source_f_cov_eye = \
torch.mm(source_f_norm, source_f_norm.t()) + torch.eye(3)
target_f, target_f_mean, target_f_std = _calc_feat_flatten_mean_std(target)
target_f_norm = (target_f - target_f_mean.expand_as(
target_f)) / target_f_std.expand_as(target_f)
target_f_cov_eye = \
torch.mm(target_f_norm, target_f_norm.t()) + torch.eye(3)
source_f_norm_transfer = torch.mm(
_mat_sqrt(target_f_cov_eye),
torch.mm(torch.inverse(_mat_sqrt(source_f_cov_eye)),
source_f_norm)
)
source_f_transfer = source_f_norm_transfer * \
target_f_std.expand_as(source_f_norm) + \
target_f_mean.expand_as(source_f_norm)
return source_f_transfer.view(source.size())
|
en
| 0.83043
|
# eps is a small value added to the variance to avoid divide-by-zero. # eps is a small value added to the variance to avoid divide-by-zero. # eps is a small value added to the variance to avoid divide-by-zero. # arbitrary mask # arbitrary mask # takes 3D feat (C, H, W), return mean and std of array within channels # assume both source and target are 3D array (C, H, W) # Note: flatten -> f
| 2.697952
| 3
|
ichnaea/data/station.py
|
JaredKerim-Mozilla/ichnaea
| 0
|
6626563
|
from collections import defaultdict
from ichnaea.data.base import DataTask
from ichnaea.geocalc import (
distance,
range_to_points,
)
from ichnaea.models import (
Cell,
CellArea,
CellBlacklist,
CellObservation,
Wifi,
WifiBlacklist,
WifiObservation,
)
from ichnaea import util
class StationRemover(DataTask):
def __init__(self, task, session, pipe):
DataTask.__init__(self, task, session)
self.pipe = pipe
class CellRemover(StationRemover):
def remove(self, cell_keys):
cells_removed = 0
changed_areas = set()
data_queue = self.task.app.data_queues['update_cellarea']
for key in cell_keys:
query = Cell.querykey(self.session, key)
cells_removed += query.delete()
changed_areas.add(CellArea.to_hashkey(key))
if changed_areas:
data_queue.enqueue(changed_areas, pipe=self.pipe)
return cells_removed
class WifiRemover(StationRemover):
def remove(self, wifi_keys):
query = Wifi.querykeys(self.session, wifi_keys)
length = query.delete(synchronize_session=False)
return length
class StationUpdater(DataTask):
MAX_OLD_OBSERVATIONS = 1000
def __init__(self, task, session, pipe,
remove_task=None, update_task=None):
DataTask.__init__(self, task, session)
self.pipe = pipe
self.remove_task = remove_task
self.updated_areas = set()
self.update_task = update_task
self.data_queue = self.task.app.data_queues[self.queue_name]
def calculate_new_position(self, station, observations):
# This function returns True if the station was found to be moving.
length = len(observations)
latitudes = [obs.lat for obs in observations]
longitudes = [obs.lon for obs in observations]
new_lat = sum(latitudes) / length
new_lon = sum(longitudes) / length
if station.lat and station.lon:
latitudes.append(station.lat)
longitudes.append(station.lon)
existing_station = True
else:
station.lat = new_lat
station.lon = new_lon
existing_station = False
# calculate extremes of observations, existing location estimate
# and existing extreme values
def extreme(vals, attr, function):
new = function(vals)
old = getattr(station, attr, None)
if old is not None:
return function(new, old)
else:
return new
min_lat = extreme(latitudes, 'min_lat', min)
min_lon = extreme(longitudes, 'min_lon', min)
max_lat = extreme(latitudes, 'max_lat', max)
max_lon = extreme(longitudes, 'max_lon', max)
# calculate sphere-distance from opposite corners of
# bounding box containing current location estimate
# and new observations; if too big, station is moving
box_dist = distance(min_lat, min_lon, max_lat, max_lon)
if existing_station:
if box_dist > self.max_dist_km:
# Signal a moving station and return early without updating
# the station since it will be deleted by caller momentarily
return True
# limit the maximum weight of the old station estimate
old_weight = min(station.total_measures,
self.MAX_OLD_OBSERVATIONS)
new_weight = old_weight + length
station.lat = ((station.lat * old_weight) +
(new_lat * length)) / new_weight
station.lon = ((station.lon * old_weight) +
(new_lon * length)) / new_weight
# increase total counter, new isn't used
station.total_measures = station.total_measures + length
# update max/min lat/lon columns
station.min_lat = min_lat
station.min_lon = min_lon
station.max_lat = max_lat
station.max_lon = max_lon
# give radio-range estimate between extreme values and centroid
ctr = (station.lat, station.lon)
points = [(min_lat, min_lon),
(min_lat, max_lon),
(max_lat, min_lon),
(max_lat, max_lon)]
station.range = range_to_points(ctr, points) * 1000.0
station.modified = util.utcnow()
def blacklist_stations(self, stations):
moving_keys = []
utcnow = util.utcnow()
for station in stations:
station_key = self.blacklist_model.to_hashkey(station)
query = self.blacklist_model.querykey(self.session, station_key)
blacklisted_station = query.first()
moving_keys.append(station_key)
if blacklisted_station:
blacklisted_station.time = utcnow
blacklisted_station.count += 1
else:
blacklisted_station = self.blacklist_model(
time=utcnow,
count=1,
**station_key.__dict__)
self.session.add(blacklisted_station)
if moving_keys:
self.stats_client.incr(
'items.blacklisted.%s_moving' % self.station_type,
len(moving_keys))
self.remove_task.delay(moving_keys)
def update(self, batch=10):
all_observations = self.data_queue.dequeue(batch=batch)
station_obs = defaultdict(list)
for obs in all_observations:
station_obs[self.station_model.to_hashkey(obs)].append(obs)
if not station_obs:
return (0, 0)
stations = list(self.station_model.iterkeys(
self.session, list(station_obs.keys())))
if not stations: # pragma: no cover
# TODO: This task depends on the station records to be
# pre-created, move that logic into this task later on.
return (0, 0)
moving_stations = set()
for station in stations:
observations = station_obs.get(station.hashkey())
if observations:
moving = self.calculate_new_position(station, observations)
if moving:
moving_stations.add(station)
# track potential updates to dependent areas
self.add_area_update(station)
self.queue_area_updates()
if moving_stations:
self.blacklist_stations(moving_stations)
if self.data_queue.enough_data(batch=batch): # pragma: no cover
self.update_task.apply_async(
kwargs={'batch': batch},
countdown=2,
expires=10)
return (len(stations), len(moving_stations))
def add_area_update(self, station):
pass
def queue_area_updates(self):
pass
class CellUpdater(StationUpdater):
blacklist_model = CellBlacklist
max_dist_km = 150
observation_model = CellObservation
queue_name = 'update_cell'
station_model = Cell
station_type = 'cell'
def add_area_update(self, station):
self.updated_areas.add(CellArea.to_hashkey(station))
def queue_area_updates(self):
if self.updated_areas:
data_queue = self.task.app.data_queues['update_cellarea']
data_queue.enqueue(self.updated_areas, pipe=self.pipe)
class WifiUpdater(StationUpdater):
blacklist_model = WifiBlacklist
max_dist_km = 5
observation_model = WifiObservation
queue_name = 'update_wifi'
station_model = Wifi
station_type = 'wifi'
|
from collections import defaultdict
from ichnaea.data.base import DataTask
from ichnaea.geocalc import (
distance,
range_to_points,
)
from ichnaea.models import (
Cell,
CellArea,
CellBlacklist,
CellObservation,
Wifi,
WifiBlacklist,
WifiObservation,
)
from ichnaea import util
class StationRemover(DataTask):
def __init__(self, task, session, pipe):
DataTask.__init__(self, task, session)
self.pipe = pipe
class CellRemover(StationRemover):
def remove(self, cell_keys):
cells_removed = 0
changed_areas = set()
data_queue = self.task.app.data_queues['update_cellarea']
for key in cell_keys:
query = Cell.querykey(self.session, key)
cells_removed += query.delete()
changed_areas.add(CellArea.to_hashkey(key))
if changed_areas:
data_queue.enqueue(changed_areas, pipe=self.pipe)
return cells_removed
class WifiRemover(StationRemover):
def remove(self, wifi_keys):
query = Wifi.querykeys(self.session, wifi_keys)
length = query.delete(synchronize_session=False)
return length
class StationUpdater(DataTask):
MAX_OLD_OBSERVATIONS = 1000
def __init__(self, task, session, pipe,
remove_task=None, update_task=None):
DataTask.__init__(self, task, session)
self.pipe = pipe
self.remove_task = remove_task
self.updated_areas = set()
self.update_task = update_task
self.data_queue = self.task.app.data_queues[self.queue_name]
def calculate_new_position(self, station, observations):
# This function returns True if the station was found to be moving.
length = len(observations)
latitudes = [obs.lat for obs in observations]
longitudes = [obs.lon for obs in observations]
new_lat = sum(latitudes) / length
new_lon = sum(longitudes) / length
if station.lat and station.lon:
latitudes.append(station.lat)
longitudes.append(station.lon)
existing_station = True
else:
station.lat = new_lat
station.lon = new_lon
existing_station = False
# calculate extremes of observations, existing location estimate
# and existing extreme values
def extreme(vals, attr, function):
new = function(vals)
old = getattr(station, attr, None)
if old is not None:
return function(new, old)
else:
return new
min_lat = extreme(latitudes, 'min_lat', min)
min_lon = extreme(longitudes, 'min_lon', min)
max_lat = extreme(latitudes, 'max_lat', max)
max_lon = extreme(longitudes, 'max_lon', max)
# calculate sphere-distance from opposite corners of
# bounding box containing current location estimate
# and new observations; if too big, station is moving
box_dist = distance(min_lat, min_lon, max_lat, max_lon)
if existing_station:
if box_dist > self.max_dist_km:
# Signal a moving station and return early without updating
# the station since it will be deleted by caller momentarily
return True
# limit the maximum weight of the old station estimate
old_weight = min(station.total_measures,
self.MAX_OLD_OBSERVATIONS)
new_weight = old_weight + length
station.lat = ((station.lat * old_weight) +
(new_lat * length)) / new_weight
station.lon = ((station.lon * old_weight) +
(new_lon * length)) / new_weight
# increase total counter, new isn't used
station.total_measures = station.total_measures + length
# update max/min lat/lon columns
station.min_lat = min_lat
station.min_lon = min_lon
station.max_lat = max_lat
station.max_lon = max_lon
# give radio-range estimate between extreme values and centroid
ctr = (station.lat, station.lon)
points = [(min_lat, min_lon),
(min_lat, max_lon),
(max_lat, min_lon),
(max_lat, max_lon)]
station.range = range_to_points(ctr, points) * 1000.0
station.modified = util.utcnow()
def blacklist_stations(self, stations):
moving_keys = []
utcnow = util.utcnow()
for station in stations:
station_key = self.blacklist_model.to_hashkey(station)
query = self.blacklist_model.querykey(self.session, station_key)
blacklisted_station = query.first()
moving_keys.append(station_key)
if blacklisted_station:
blacklisted_station.time = utcnow
blacklisted_station.count += 1
else:
blacklisted_station = self.blacklist_model(
time=utcnow,
count=1,
**station_key.__dict__)
self.session.add(blacklisted_station)
if moving_keys:
self.stats_client.incr(
'items.blacklisted.%s_moving' % self.station_type,
len(moving_keys))
self.remove_task.delay(moving_keys)
def update(self, batch=10):
all_observations = self.data_queue.dequeue(batch=batch)
station_obs = defaultdict(list)
for obs in all_observations:
station_obs[self.station_model.to_hashkey(obs)].append(obs)
if not station_obs:
return (0, 0)
stations = list(self.station_model.iterkeys(
self.session, list(station_obs.keys())))
if not stations: # pragma: no cover
# TODO: This task depends on the station records to be
# pre-created, move that logic into this task later on.
return (0, 0)
moving_stations = set()
for station in stations:
observations = station_obs.get(station.hashkey())
if observations:
moving = self.calculate_new_position(station, observations)
if moving:
moving_stations.add(station)
# track potential updates to dependent areas
self.add_area_update(station)
self.queue_area_updates()
if moving_stations:
self.blacklist_stations(moving_stations)
if self.data_queue.enough_data(batch=batch): # pragma: no cover
self.update_task.apply_async(
kwargs={'batch': batch},
countdown=2,
expires=10)
return (len(stations), len(moving_stations))
def add_area_update(self, station):
pass
def queue_area_updates(self):
pass
class CellUpdater(StationUpdater):
blacklist_model = CellBlacklist
max_dist_km = 150
observation_model = CellObservation
queue_name = 'update_cell'
station_model = Cell
station_type = 'cell'
def add_area_update(self, station):
self.updated_areas.add(CellArea.to_hashkey(station))
def queue_area_updates(self):
if self.updated_areas:
data_queue = self.task.app.data_queues['update_cellarea']
data_queue.enqueue(self.updated_areas, pipe=self.pipe)
class WifiUpdater(StationUpdater):
blacklist_model = WifiBlacklist
max_dist_km = 5
observation_model = WifiObservation
queue_name = 'update_wifi'
station_model = Wifi
station_type = 'wifi'
|
en
| 0.871855
|
# This function returns True if the station was found to be moving. # calculate extremes of observations, existing location estimate # and existing extreme values # calculate sphere-distance from opposite corners of # bounding box containing current location estimate # and new observations; if too big, station is moving # Signal a moving station and return early without updating # the station since it will be deleted by caller momentarily # limit the maximum weight of the old station estimate # increase total counter, new isn't used # update max/min lat/lon columns # give radio-range estimate between extreme values and centroid # pragma: no cover # TODO: This task depends on the station records to be # pre-created, move that logic into this task later on. # track potential updates to dependent areas # pragma: no cover
| 2.275813
| 2
|
trecs/tests/test_PopularityRecommender.py
|
amywinecoff/t-recs
| 0
|
6626564
|
from trecs.models import PopularityRecommender
from trecs.components import Creators
import numpy as np
import pytest
import test_helpers
class TestPopularityRecommender:
def test_default(self):
c = PopularityRecommender()
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_arguments(self, items=None, users=None):
if items is None:
items = np.random.randint(1, 1000)
if users is None:
users = np.random.randint(1, 100)
# init with given arguments
c = PopularityRecommender(num_users=users, num_items=items)
test_helpers.assert_correct_num_users(users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_partial_arguments(self, items=None, users=None):
# init with partially given arguments
if items is None:
items = np.random.randint(1, 1000)
if users is None:
users = np.random.randint(1, 100)
c = PopularityRecommender(num_users=users)
test_helpers.assert_correct_num_users(users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(num_items=items)
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(num_users=users, num_items=items)
test_helpers.assert_correct_num_users(users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_representations(self, item_repr=None, user_repr=None):
if item_repr is None:
items = np.random.randint(5, 1000)
item_repr = np.random.random(size=(1, items))
if user_repr is None or user_repr.shape[1] != item_repr.shape[0]:
users = np.random.randint(5, 100)
user_repr = np.random.randint(10, size=(users, 1))
c = PopularityRecommender(item_representation=item_repr)
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(item_repr.shape[1], c, c.items_hat.shape[1])
test_helpers.assert_equal_arrays(item_repr, c.items_hat)
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(user_representation=user_repr)
test_helpers.assert_correct_num_users(user_repr.shape[0], c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_equal_arrays(user_repr, c.users_hat)
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(user_representation=user_repr, item_representation=item_repr)
test_helpers.assert_correct_num_users(user_repr.shape[0], c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(item_repr.shape[1], c, c.items_hat.shape[1])
test_helpers.assert_equal_arrays(user_repr, c.users_hat)
test_helpers.assert_equal_arrays(item_repr, c.items_hat)
test_helpers.assert_not_none(c.predicted_scores)
def test_wrong_representation(
self, user_repr=None, item_repr=None, bad_user_repr=None, bad_item_repr=None
):
if item_repr is None:
items = np.random.randint(1000)
item_repr = np.random.random(size=(1, items))
if user_repr is None or user_repr.shape[1] != item_repr.shape[0]:
users = np.random.randint(100)
user_repr = np.random.randint(10, size=(users, 1))
if bad_user_repr is None or bad_user_repr.shape[1] == item_repr.shape[0]:
# |A| shouldn't match item_repr.shape[0]
bad_user_repr = np.random.randint(10, size=(user_repr.shape[0], user_repr.shape[1] + 2))
if bad_item_repr is None or bad_item_repr.shape[0] == user_repr.shape[1]:
# |A| shouldn't match user_repr.shape[1]
bad_item_repr = np.random.random(size=(item_repr.shape[0] + 1, item_repr.shape[1]))
with pytest.raises(ValueError):
c = PopularityRecommender(
user_representation=bad_user_repr, item_representation=item_repr
)
with pytest.raises(ValueError):
c = PopularityRecommender(
user_representation=user_repr, item_representation=bad_item_repr
)
def test_additional_params(self, num_items_per_iter=None):
if num_items_per_iter is None:
num_items_per_iter = np.random.randint(5, 100)
c = PopularityRecommender(verbose=False, num_items_per_iter=num_items_per_iter)
assert num_items_per_iter == c.num_items_per_iter
# also check other params
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_seeding(self, seed=None, items=None, users=None):
if seed is None:
seed = np.random.randint(100000)
s1 = PopularityRecommender(seed=seed, record_base_state=True)
s2 = PopularityRecommender(seed=seed, record_base_state=True)
test_helpers.assert_equal_arrays(s1.items_hat, s2.items_hat)
test_helpers.assert_equal_arrays(s1.users_hat, s2.users_hat)
s1.run(timesteps=5)
s2.run(timesteps=5)
# check that measurements are the same
meas1 = s1.get_measurements()
meas2 = s2.get_measurements()
test_helpers.assert_equal_measurements(meas1, meas2)
systate1 = s1.get_system_state()
systate2 = s2.get_system_state()
test_helpers.assert_equal_system_state(systate1, systate2)
if items is None:
items = np.random.randint(20, 1000)
if users is None:
users = np.random.randint(20, 100)
s1 = PopularityRecommender(
seed=seed, num_users=users, num_items=items, record_base_state=True
)
s2 = PopularityRecommender(
seed=seed, num_users=users, num_items=items, record_base_state=True
)
test_helpers.assert_equal_arrays(s1.items_hat, s2.items_hat)
test_helpers.assert_equal_arrays(s1.users_hat, s2.users_hat)
s1.run(timesteps=5)
s2.run(timesteps=5)
# check that measurements are the same
meas1 = s1.get_measurements()
meas2 = s2.get_measurements()
test_helpers.assert_equal_measurements(meas1, meas2)
systate1 = s1.get_system_state()
systate2 = s2.get_system_state()
test_helpers.assert_equal_system_state(systate1, systate2)
def test_recommendations(self):
num_users = 5
num_items = 5
num_attr = 5
users = np.eye(num_users) # 5 users, 5 attributes
items = np.zeros((num_attr, num_items)) # 5 items, 5 attributes
items[:, 0] = 10 # this item will be most desirable to users
model = PopularityRecommender(
actual_user_representation=users,
actual_item_representation=items,
num_items_per_iter=num_items,
)
init_pred_scores = np.copy(model.predicted_scores)
# after one iteration of training, the most popular item
# should be the item at index 0
model.run(1)
# assert new scores have changed
trained_preds = np.copy(model.predicted_scores)
with pytest.raises(AssertionError):
test_helpers.assert_equal_arrays(init_pred_scores, trained_preds)
# assert that recommendations are now "perfect"
model.num_items_per_iter = 1
recommendations = model.recommend()
correct_rec = np.array([[0], [0], [0], [0], [0]])
test_helpers.assert_equal_arrays(recommendations, correct_rec)
# assert that items_hat and users_hat are as expected
user_rep = np.ones(num_users).reshape(-1, 1)
test_helpers.assert_equal_arrays(model.users_hat, user_rep)
item_rep = np.zeros(num_items).reshape(1, -1)
item_rep[0, 0] = 5 # all users should have interacted with this item
test_helpers.assert_equal_arrays(model.items_hat, item_rep)
# new model that only shows 2 items per iteration
model = PopularityRecommender(
actual_user_representation=users, actual_item_representation=items, num_items_per_iter=2
)
model.run(5) # run for 5 timesteps
# assert that recommendations are now "perfect"
model.num_items_per_iter = 1
recommendations = model.recommend()
most_popular = np.argmax(model.items_hat) # extract most popular item
correct_rec = np.ones(num_users).reshape(-1, 1) * most_popular
test_helpers.assert_equal_arrays(recommendations, correct_rec)
def test_creator_items(self):
users = np.random.randint(10, size=(100, 10))
items = np.random.randint(2, size=(10, 100))
creator_profiles = Creators(
np.random.uniform(size=(50, 10)), creation_probability=1.0
) # 50 creator profiles
p = PopularityRecommender(
actual_user_representation=users,
actual_item_representation=items,
creators=creator_profiles,
)
p.run(1, repeated_items=True)
assert p.items.shape[1] == 150 # 50 new items
assert p.items_hat.shape[1] == 150
assert p.users.state_history[-1].shape[1] == 150
|
from trecs.models import PopularityRecommender
from trecs.components import Creators
import numpy as np
import pytest
import test_helpers
class TestPopularityRecommender:
def test_default(self):
c = PopularityRecommender()
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_arguments(self, items=None, users=None):
if items is None:
items = np.random.randint(1, 1000)
if users is None:
users = np.random.randint(1, 100)
# init with given arguments
c = PopularityRecommender(num_users=users, num_items=items)
test_helpers.assert_correct_num_users(users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_partial_arguments(self, items=None, users=None):
# init with partially given arguments
if items is None:
items = np.random.randint(1, 1000)
if users is None:
users = np.random.randint(1, 100)
c = PopularityRecommender(num_users=users)
test_helpers.assert_correct_num_users(users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(num_items=items)
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(num_users=users, num_items=items)
test_helpers.assert_correct_num_users(users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_representations(self, item_repr=None, user_repr=None):
if item_repr is None:
items = np.random.randint(5, 1000)
item_repr = np.random.random(size=(1, items))
if user_repr is None or user_repr.shape[1] != item_repr.shape[0]:
users = np.random.randint(5, 100)
user_repr = np.random.randint(10, size=(users, 1))
c = PopularityRecommender(item_representation=item_repr)
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(item_repr.shape[1], c, c.items_hat.shape[1])
test_helpers.assert_equal_arrays(item_repr, c.items_hat)
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(user_representation=user_repr)
test_helpers.assert_correct_num_users(user_repr.shape[0], c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_equal_arrays(user_repr, c.users_hat)
test_helpers.assert_not_none(c.predicted_scores)
c = PopularityRecommender(user_representation=user_repr, item_representation=item_repr)
test_helpers.assert_correct_num_users(user_repr.shape[0], c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(item_repr.shape[1], c, c.items_hat.shape[1])
test_helpers.assert_equal_arrays(user_repr, c.users_hat)
test_helpers.assert_equal_arrays(item_repr, c.items_hat)
test_helpers.assert_not_none(c.predicted_scores)
def test_wrong_representation(
self, user_repr=None, item_repr=None, bad_user_repr=None, bad_item_repr=None
):
if item_repr is None:
items = np.random.randint(1000)
item_repr = np.random.random(size=(1, items))
if user_repr is None or user_repr.shape[1] != item_repr.shape[0]:
users = np.random.randint(100)
user_repr = np.random.randint(10, size=(users, 1))
if bad_user_repr is None or bad_user_repr.shape[1] == item_repr.shape[0]:
# |A| shouldn't match item_repr.shape[0]
bad_user_repr = np.random.randint(10, size=(user_repr.shape[0], user_repr.shape[1] + 2))
if bad_item_repr is None or bad_item_repr.shape[0] == user_repr.shape[1]:
# |A| shouldn't match user_repr.shape[1]
bad_item_repr = np.random.random(size=(item_repr.shape[0] + 1, item_repr.shape[1]))
with pytest.raises(ValueError):
c = PopularityRecommender(
user_representation=bad_user_repr, item_representation=item_repr
)
with pytest.raises(ValueError):
c = PopularityRecommender(
user_representation=user_repr, item_representation=bad_item_repr
)
def test_additional_params(self, num_items_per_iter=None):
if num_items_per_iter is None:
num_items_per_iter = np.random.randint(5, 100)
c = PopularityRecommender(verbose=False, num_items_per_iter=num_items_per_iter)
assert num_items_per_iter == c.num_items_per_iter
# also check other params
test_helpers.assert_correct_num_users(c.num_users, c, c.users_hat.shape[0])
test_helpers.assert_correct_num_items(c.num_items, c, c.items_hat.shape[1])
test_helpers.assert_not_none(c.predicted_scores)
def test_seeding(self, seed=None, items=None, users=None):
if seed is None:
seed = np.random.randint(100000)
s1 = PopularityRecommender(seed=seed, record_base_state=True)
s2 = PopularityRecommender(seed=seed, record_base_state=True)
test_helpers.assert_equal_arrays(s1.items_hat, s2.items_hat)
test_helpers.assert_equal_arrays(s1.users_hat, s2.users_hat)
s1.run(timesteps=5)
s2.run(timesteps=5)
# check that measurements are the same
meas1 = s1.get_measurements()
meas2 = s2.get_measurements()
test_helpers.assert_equal_measurements(meas1, meas2)
systate1 = s1.get_system_state()
systate2 = s2.get_system_state()
test_helpers.assert_equal_system_state(systate1, systate2)
if items is None:
items = np.random.randint(20, 1000)
if users is None:
users = np.random.randint(20, 100)
s1 = PopularityRecommender(
seed=seed, num_users=users, num_items=items, record_base_state=True
)
s2 = PopularityRecommender(
seed=seed, num_users=users, num_items=items, record_base_state=True
)
test_helpers.assert_equal_arrays(s1.items_hat, s2.items_hat)
test_helpers.assert_equal_arrays(s1.users_hat, s2.users_hat)
s1.run(timesteps=5)
s2.run(timesteps=5)
# check that measurements are the same
meas1 = s1.get_measurements()
meas2 = s2.get_measurements()
test_helpers.assert_equal_measurements(meas1, meas2)
systate1 = s1.get_system_state()
systate2 = s2.get_system_state()
test_helpers.assert_equal_system_state(systate1, systate2)
def test_recommendations(self):
num_users = 5
num_items = 5
num_attr = 5
users = np.eye(num_users) # 5 users, 5 attributes
items = np.zeros((num_attr, num_items)) # 5 items, 5 attributes
items[:, 0] = 10 # this item will be most desirable to users
model = PopularityRecommender(
actual_user_representation=users,
actual_item_representation=items,
num_items_per_iter=num_items,
)
init_pred_scores = np.copy(model.predicted_scores)
# after one iteration of training, the most popular item
# should be the item at index 0
model.run(1)
# assert new scores have changed
trained_preds = np.copy(model.predicted_scores)
with pytest.raises(AssertionError):
test_helpers.assert_equal_arrays(init_pred_scores, trained_preds)
# assert that recommendations are now "perfect"
model.num_items_per_iter = 1
recommendations = model.recommend()
correct_rec = np.array([[0], [0], [0], [0], [0]])
test_helpers.assert_equal_arrays(recommendations, correct_rec)
# assert that items_hat and users_hat are as expected
user_rep = np.ones(num_users).reshape(-1, 1)
test_helpers.assert_equal_arrays(model.users_hat, user_rep)
item_rep = np.zeros(num_items).reshape(1, -1)
item_rep[0, 0] = 5 # all users should have interacted with this item
test_helpers.assert_equal_arrays(model.items_hat, item_rep)
# new model that only shows 2 items per iteration
model = PopularityRecommender(
actual_user_representation=users, actual_item_representation=items, num_items_per_iter=2
)
model.run(5) # run for 5 timesteps
# assert that recommendations are now "perfect"
model.num_items_per_iter = 1
recommendations = model.recommend()
most_popular = np.argmax(model.items_hat) # extract most popular item
correct_rec = np.ones(num_users).reshape(-1, 1) * most_popular
test_helpers.assert_equal_arrays(recommendations, correct_rec)
def test_creator_items(self):
users = np.random.randint(10, size=(100, 10))
items = np.random.randint(2, size=(10, 100))
creator_profiles = Creators(
np.random.uniform(size=(50, 10)), creation_probability=1.0
) # 50 creator profiles
p = PopularityRecommender(
actual_user_representation=users,
actual_item_representation=items,
creators=creator_profiles,
)
p.run(1, repeated_items=True)
assert p.items.shape[1] == 150 # 50 new items
assert p.items_hat.shape[1] == 150
assert p.users.state_history[-1].shape[1] == 150
|
en
| 0.852394
|
# init with given arguments # init with partially given arguments # |A| shouldn't match item_repr.shape[0] # |A| shouldn't match user_repr.shape[1] # also check other params # check that measurements are the same # check that measurements are the same # 5 users, 5 attributes # 5 items, 5 attributes # this item will be most desirable to users # after one iteration of training, the most popular item # should be the item at index 0 # assert new scores have changed # assert that recommendations are now "perfect" # assert that items_hat and users_hat are as expected # all users should have interacted with this item # new model that only shows 2 items per iteration # run for 5 timesteps # assert that recommendations are now "perfect" # extract most popular item # 50 creator profiles # 50 new items
| 2.231647
| 2
|
Chapter04/chapter-4/codeblock/cb-mnist.py
|
PacktPublishing/Deep-Learning-Essentials
| 24
|
6626565
|
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def cnn_model_fn(features, labels, mode):
# Input Layer
INPUT = tf.reshape(features["x"], [-1, 28, 28, 1])
# Conv-1 Layer
CONV1 = tf.layers.conv2d(
inputs=INPUT,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pool-1 Layer
POOL1 = tf.layers.max_pooling2d(inputs=CONV1, pool_size=[2, 2], strides=2)
# Conv-2 Layer
CONV2 = tf.layers.conv2d(
inputs=POOL1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pool-2 Layer
POOL2 = tf.layers.max_pooling2d(inputs=CONV2, pool_size=[2, 2], strides=2)
# Pool-2 Flattened Layer
POOL2_FLATTENED = tf.reshape(POOL2, [-1, 7 * 7 * 64])
FC1 = tf.layers.dense(inputs=POOL2_FLATTENED, units=1024, activation=tf.nn.relu)
# Dropout Layer
DROPOUT = tf.layers.dropout(
inputs=FC1, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
FC2 = tf.layers.dense(inputs=DROPOUT, units=10)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=FC2)
# Configure the Training Op (for TRAIN mode)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if __name__ == "__main__":
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
features = mnist.train.images
labels = np.asarray(mnist.train.labels, dtype=np.int32)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": features},
y=labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
mnist_classifier.train(input_fn=train_input_fn,steps=20000)
|
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def cnn_model_fn(features, labels, mode):
# Input Layer
INPUT = tf.reshape(features["x"], [-1, 28, 28, 1])
# Conv-1 Layer
CONV1 = tf.layers.conv2d(
inputs=INPUT,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pool-1 Layer
POOL1 = tf.layers.max_pooling2d(inputs=CONV1, pool_size=[2, 2], strides=2)
# Conv-2 Layer
CONV2 = tf.layers.conv2d(
inputs=POOL1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pool-2 Layer
POOL2 = tf.layers.max_pooling2d(inputs=CONV2, pool_size=[2, 2], strides=2)
# Pool-2 Flattened Layer
POOL2_FLATTENED = tf.reshape(POOL2, [-1, 7 * 7 * 64])
FC1 = tf.layers.dense(inputs=POOL2_FLATTENED, units=1024, activation=tf.nn.relu)
# Dropout Layer
DROPOUT = tf.layers.dropout(
inputs=FC1, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
FC2 = tf.layers.dense(inputs=DROPOUT, units=10)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=FC2)
# Configure the Training Op (for TRAIN mode)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if __name__ == "__main__":
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
features = mnist.train.images
labels = np.asarray(mnist.train.labels, dtype=np.int32)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": features},
y=labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
mnist_classifier.train(input_fn=train_input_fn,steps=20000)
|
en
| 0.670345
|
# Input Layer # Conv-1 Layer # Pool-1 Layer # Conv-2 Layer # Pool-2 Layer # Pool-2 Flattened Layer # Dropout Layer # Calculate Loss (for both TRAIN and EVAL modes) # Configure the Training Op (for TRAIN mode)
| 3.091342
| 3
|
pywslegislature/services/__init__.py
|
JacksonMaxfield/wa_legislature
| 5
|
6626566
|
<filename>pywslegislature/services/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Services module for pywslegislature."""
# include all exampleParameters
from .exampleParameters import ExampleParameters # noqa: F401, F403
# import all services
from .amendmentService import AmendmentService # noqa: F401, F403
from .committeeService import CommitteeService # noqa: F401, F403
from .committeeActionService import CommitteeActionService # noqa: F401, F403
from .committeeMeetingsService import CommitteeMeetingService # noqa: F401, F403
from .legislationService import LegislationService # noqa: F401, F403
from .legislativeDocumentService import LegislativeDocumentService # noqa: F401, F403
from .rcwSiteAffectedService import RCWCiteAffectedService # noqa: F401, F403
from .sessionLawService import SessionLawService # noqa: F401, F403
from .sponsorService import SponsorService # noqa: F401, F403
|
<filename>pywslegislature/services/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Services module for pywslegislature."""
# include all exampleParameters
from .exampleParameters import ExampleParameters # noqa: F401, F403
# import all services
from .amendmentService import AmendmentService # noqa: F401, F403
from .committeeService import CommitteeService # noqa: F401, F403
from .committeeActionService import CommitteeActionService # noqa: F401, F403
from .committeeMeetingsService import CommitteeMeetingService # noqa: F401, F403
from .legislationService import LegislationService # noqa: F401, F403
from .legislativeDocumentService import LegislativeDocumentService # noqa: F401, F403
from .rcwSiteAffectedService import RCWCiteAffectedService # noqa: F401, F403
from .sessionLawService import SessionLawService # noqa: F401, F403
from .sponsorService import SponsorService # noqa: F401, F403
|
en
| 0.331874
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Services module for pywslegislature. # include all exampleParameters # noqa: F401, F403 # import all services # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403 # noqa: F401, F403
| 1.449699
| 1
|
yann/modules/shape.py
|
michalwols/yann
| 32
|
6626567
|
from torch.nn import Module
from ..exceptions import ShapeInferenceError
class Reshape(Module):
method = None
def __init__(self, *dims):
super(Reshape, self).__init__()
self.dims = dims
def forward(self, input):
return getattr(input, self.method)(*self.dims)
def state_dict(self, destination=None, prefix='', keep_vars=False):
return {
'dims': self.dims
}
def load_state_dict(self, state_dict, strict=True):
self.dims = state_dict['dims']
class Squeeze(Reshape):
method = 'squeeze'
class Permute(Reshape):
method = 'permute'
class Transpose(Reshape):
method = 'transpose'
class View(Reshape):
method = 'view'
class Flatten(Reshape):
def forward(self, input, *rest):
return input.view(input.size(0), -1)
def flatten(input):
return input.view(input.size(0), -1)
class FlattenSequences(Module):
def forward(self, input, *rest):
return flatten_sequence(input)
def flatten_sequence(seq_batch):
seq_len, batch_size, *rest = seq_batch.size()
return seq_batch.view(seq_len * batch_size, -1)
class Infer(Module):
def __init__(self, cls, *args, **kwargs):
super(Infer, self).__init__()
self.shape_dim = kwargs.pop('shape_dim', 1)
self.cls = cls
self.args = args
self.kwargs = kwargs
self.module = None
@property
def was_inferred(self):
return self.module is not None
def forward(self, x):
if self.module is None:
try:
self.module = self.cls(x.shape[self.shape_dim], *self.args, **self.kwargs)
except IndexError as e:
raise ShapeInferenceError(f"Improper shape dim ({self.shape_dim}) selected for {self.cls} with input of shape {x.shape}")
return self.module(x)
@classmethod
def shed(cls, module):
# TODO: modify the model to drop the Infer nodes and replace them with the initialized module
raise NotImplementedError()
|
from torch.nn import Module
from ..exceptions import ShapeInferenceError
class Reshape(Module):
method = None
def __init__(self, *dims):
super(Reshape, self).__init__()
self.dims = dims
def forward(self, input):
return getattr(input, self.method)(*self.dims)
def state_dict(self, destination=None, prefix='', keep_vars=False):
return {
'dims': self.dims
}
def load_state_dict(self, state_dict, strict=True):
self.dims = state_dict['dims']
class Squeeze(Reshape):
method = 'squeeze'
class Permute(Reshape):
method = 'permute'
class Transpose(Reshape):
method = 'transpose'
class View(Reshape):
method = 'view'
class Flatten(Reshape):
def forward(self, input, *rest):
return input.view(input.size(0), -1)
def flatten(input):
return input.view(input.size(0), -1)
class FlattenSequences(Module):
def forward(self, input, *rest):
return flatten_sequence(input)
def flatten_sequence(seq_batch):
seq_len, batch_size, *rest = seq_batch.size()
return seq_batch.view(seq_len * batch_size, -1)
class Infer(Module):
def __init__(self, cls, *args, **kwargs):
super(Infer, self).__init__()
self.shape_dim = kwargs.pop('shape_dim', 1)
self.cls = cls
self.args = args
self.kwargs = kwargs
self.module = None
@property
def was_inferred(self):
return self.module is not None
def forward(self, x):
if self.module is None:
try:
self.module = self.cls(x.shape[self.shape_dim], *self.args, **self.kwargs)
except IndexError as e:
raise ShapeInferenceError(f"Improper shape dim ({self.shape_dim}) selected for {self.cls} with input of shape {x.shape}")
return self.module(x)
@classmethod
def shed(cls, module):
# TODO: modify the model to drop the Infer nodes and replace them with the initialized module
raise NotImplementedError()
|
en
| 0.694703
|
# TODO: modify the model to drop the Infer nodes and replace them with the initialized module
| 2.303735
| 2
|
propnet/web/app.py
|
dwinston/propnet
| 0
|
6626568
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
from dash.dependencies import Input, Output, State
from propnet import log_stream
from propnet.web.layouts_models import model_layout, models_index
from propnet.web.layouts_symbols import symbol_layout, symbols_index
from propnet.web.layouts_plot import plot_layout
from propnet.web.layouts_home import home_layout
from propnet.web.layouts_interactive import interactive_layout
from propnet.web.layouts_explore import explore_layout
from mp_dash_components import GraphComponent
from propnet.web.utils import parse_path
from flask_caching import Cache
import logging
log = logging.getLogger(__name__)
# TODO: Fix math rendering
app = dash.Dash()
server = app.server
app.config.supress_callback_exceptions = True # TODO: remove this?
app.scripts.config.serve_locally = True
app.title = "propnet"
route = dcc.Location(id='url', refresh=False)
cache = Cache(app.server, config={
'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '.tmp'
})
layout_menu = html.Div(
children=[dcc.Link('What is propnet?', href='/home'),
html.Span(' • '),
dcc.Link('Explore', href='/explore'),
html.Span(' • '),
#dcc.Link('Models', href='/model'),
#html.Span(' • '),
#dcc.Link('Properties', href='/property'),
#html.Span(' • '),
dcc.Link('Generate', href='/generate'),
html.Span(' • '),
dcc.Link('Plot', href='/plot')
])
# header
app.layout = html.Div(
children=[route,
html.Div([html.H3(app.title), layout_menu, html.Br()],
style={'textAlign': 'center'}),
html.Div(id='page-content'),
# hidden table to make sure table component loads
# (Dash limitation; may be removed in future)
html.Div(children=[dt.DataTable(rows=[{}]), GraphComponent(graph={'nodes':[], 'edges': []})],
style={'display': 'none'})],
style={'marginLeft': 200, 'marginRight': 200, 'marginTop': 30})
# standard Dash css, fork this for a custom theme
# we real web devs now
app.css.append_css(
{'external_url': 'https://codepen.io/mkhorton-the-reactor/pen/oQbddV.css'})
# app.css.append_css(
# {'external_url': 'https://codepen.io/montoyjh/pen/YjPKae.css'})
app.css.append_css(
{'external_url': 'https://codepen.io/mikesmith1611/pen/QOKgpG.css'})
PLOT_LAYOUT = plot_layout(app)
INTERACTIVE_LAYOUT = interactive_layout(app)
EXPLORE_LAYOUT = explore_layout(app)
# routing, current routes defined are:
# / for home page
# /model for model summary
# /model/model_name for information on that model
# /property for property summary
# /property/property_name for information on that property
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
"""
Args:
pathname:
Returns:
"""
path_info = parse_path(pathname)
if path_info:
if path_info['mode'] == 'model':
if path_info['value']:
return model_layout(path_info['value'])
else:
return models_index
elif path_info['mode'] == 'property':
if path_info['value']:
property_name = path_info['value']
return symbol_layout(property_name)
else:
return symbols_index()
elif path_info['mode'] == 'explore':
return EXPLORE_LAYOUT
elif path_info['mode'] == 'plot':
return PLOT_LAYOUT
elif path_info['mode'] == 'generate':
return INTERACTIVE_LAYOUT
elif path_info['mode'] == 'home':
return home_layout()
else:
return '404'
else:
return home_layout()
if __name__ == '__main__':
app.run_server(debug=True)
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
from dash.dependencies import Input, Output, State
from propnet import log_stream
from propnet.web.layouts_models import model_layout, models_index
from propnet.web.layouts_symbols import symbol_layout, symbols_index
from propnet.web.layouts_plot import plot_layout
from propnet.web.layouts_home import home_layout
from propnet.web.layouts_interactive import interactive_layout
from propnet.web.layouts_explore import explore_layout
from mp_dash_components import GraphComponent
from propnet.web.utils import parse_path
from flask_caching import Cache
import logging
log = logging.getLogger(__name__)
# TODO: Fix math rendering
app = dash.Dash()
server = app.server
app.config.supress_callback_exceptions = True # TODO: remove this?
app.scripts.config.serve_locally = True
app.title = "propnet"
route = dcc.Location(id='url', refresh=False)
cache = Cache(app.server, config={
'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '.tmp'
})
layout_menu = html.Div(
children=[dcc.Link('What is propnet?', href='/home'),
html.Span(' • '),
dcc.Link('Explore', href='/explore'),
html.Span(' • '),
#dcc.Link('Models', href='/model'),
#html.Span(' • '),
#dcc.Link('Properties', href='/property'),
#html.Span(' • '),
dcc.Link('Generate', href='/generate'),
html.Span(' • '),
dcc.Link('Plot', href='/plot')
])
# header
app.layout = html.Div(
children=[route,
html.Div([html.H3(app.title), layout_menu, html.Br()],
style={'textAlign': 'center'}),
html.Div(id='page-content'),
# hidden table to make sure table component loads
# (Dash limitation; may be removed in future)
html.Div(children=[dt.DataTable(rows=[{}]), GraphComponent(graph={'nodes':[], 'edges': []})],
style={'display': 'none'})],
style={'marginLeft': 200, 'marginRight': 200, 'marginTop': 30})
# standard Dash css, fork this for a custom theme
# we real web devs now
app.css.append_css(
{'external_url': 'https://codepen.io/mkhorton-the-reactor/pen/oQbddV.css'})
# app.css.append_css(
# {'external_url': 'https://codepen.io/montoyjh/pen/YjPKae.css'})
app.css.append_css(
{'external_url': 'https://codepen.io/mikesmith1611/pen/QOKgpG.css'})
PLOT_LAYOUT = plot_layout(app)
INTERACTIVE_LAYOUT = interactive_layout(app)
EXPLORE_LAYOUT = explore_layout(app)
# routing, current routes defined are:
# / for home page
# /model for model summary
# /model/model_name for information on that model
# /property for property summary
# /property/property_name for information on that property
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
"""
Args:
pathname:
Returns:
"""
path_info = parse_path(pathname)
if path_info:
if path_info['mode'] == 'model':
if path_info['value']:
return model_layout(path_info['value'])
else:
return models_index
elif path_info['mode'] == 'property':
if path_info['value']:
property_name = path_info['value']
return symbol_layout(property_name)
else:
return symbols_index()
elif path_info['mode'] == 'explore':
return EXPLORE_LAYOUT
elif path_info['mode'] == 'plot':
return PLOT_LAYOUT
elif path_info['mode'] == 'generate':
return INTERACTIVE_LAYOUT
elif path_info['mode'] == 'home':
return home_layout()
else:
return '404'
else:
return home_layout()
if __name__ == '__main__':
app.run_server(debug=True)
|
en
| 0.518335
|
# TODO: Fix math rendering # TODO: remove this? #dcc.Link('Models', href='/model'), #html.Span(' • '), #dcc.Link('Properties', href='/property'), #html.Span(' • '), # header # hidden table to make sure table component loads # (Dash limitation; may be removed in future) # standard Dash css, fork this for a custom theme # we real web devs now # app.css.append_css( # {'external_url': 'https://codepen.io/montoyjh/pen/YjPKae.css'}) # routing, current routes defined are: # / for home page # /model for model summary # /model/model_name for information on that model # /property for property summary # /property/property_name for information on that property Args: pathname: Returns:
| 2.006684
| 2
|
movimentacoes/models.py
|
robsonleal/challenge_backend
| 0
|
6626569
|
from django.db import models
from datetime import datetime
# Create your models here.
class Receita(models.Model):
descricao = models.CharField(max_length=100)
valor = models.CharField(max_length=10)
data = models.DateField(default=datetime.now, blank=True)
class Despesa(models.Model):
descricao = models.CharField(max_length=100)
valor = models.CharField(max_length=10)
data = models.DateField(default=datetime.now, blank=True)
|
from django.db import models
from datetime import datetime
# Create your models here.
class Receita(models.Model):
descricao = models.CharField(max_length=100)
valor = models.CharField(max_length=10)
data = models.DateField(default=datetime.now, blank=True)
class Despesa(models.Model):
descricao = models.CharField(max_length=100)
valor = models.CharField(max_length=10)
data = models.DateField(default=datetime.now, blank=True)
|
en
| 0.963489
|
# Create your models here.
| 2.462155
| 2
|
simpleflow/format.py
|
David-Wobrock/simpleflow
| 69
|
6626570
|
<gh_stars>10-100
import os
from sqlite3 import OperationalError
from uuid import uuid4
import lazy_object_proxy
from diskcache import Cache
from simpleflow import constants, logger, storage
from simpleflow.settings import SIMPLEFLOW_ENABLE_DISK_CACHE
from simpleflow.utils import json_dumps, json_loads_or_raw
JUMBO_FIELDS_MEMORY_CACHE = {}
class JumboTooLargeError(ValueError):
pass
def _jumbo_fields_bucket():
# wrapped into a function so easier to override for tests
bucket = os.getenv("SIMPLEFLOW_JUMBO_FIELDS_BUCKET")
if not bucket:
return
# trim trailing / if there, would provoke double slashes down the road
if bucket.endswith("/"):
bucket = bucket[:-1]
return bucket
def decode(content, parse_json=True, use_proxy=True):
if content is None:
return content
if content.startswith(constants.JUMBO_FIELDS_PREFIX):
def unwrap():
location, _size = content.split()
value = _pull_jumbo_field(location)
if parse_json:
return json_loads_or_raw(value)
return value
if use_proxy:
return lazy_object_proxy.Proxy(unwrap)
return unwrap()
if parse_json:
return json_loads_or_raw(content)
return content
def encode(message, max_length, allow_jumbo_fields=True):
if not message:
return message
can_use_jumbo_fields = allow_jumbo_fields and _jumbo_fields_bucket()
if len(message) > max_length:
if not can_use_jumbo_fields:
_log_message_too_long(message)
raise JumboTooLargeError("Message too long ({} chars)".format(len(message)))
if len(message) > constants.JUMBO_FIELDS_MAX_SIZE:
_log_message_too_long(message)
raise JumboTooLargeError(
"Message too long even for a jumbo field ({} chars)".format(
len(message)
)
)
jumbo_signature = _push_jumbo_field(message)
if len(jumbo_signature) > max_length:
raise JumboTooLargeError(
"Jumbo field signature is longer than the max allowed length "
"for this field: {} ; reduce jumbo bucket length?".format(
jumbo_signature
)
)
return jumbo_signature
return message
def _get_cached(path):
# 1/ memory cache
if path in JUMBO_FIELDS_MEMORY_CACHE:
return JUMBO_FIELDS_MEMORY_CACHE[path]
# 2/ disk cache
if SIMPLEFLOW_ENABLE_DISK_CACHE:
try:
# NB: this cache may also be triggered on activity workers, where it's not that
# useful. The performance hit should be minimal. To be improved later.
# NB2: cache has to be lazily instantiated here, cache objects do not survive forks,
# see DiskCache docs.
cache = Cache(constants.CACHE_DIR)
# generate a dedicated cache key because this cache may be shared with other
# features of simpleflow at some point
cache_key = "jumbo_fields/" + path.split("/")[-1]
if cache_key in cache:
logger.debug(
"diskcache: getting key={} from cache_dir={}".format(
cache_key, constants.CACHE_DIR
)
)
return cache[cache_key]
except OperationalError:
logger.warning("diskcache: got an OperationalError, skipping cache usage")
# nothing to return, but better be explicit here
return
def _set_cached(path, content):
# 1/ memory cache
JUMBO_FIELDS_MEMORY_CACHE[path] = content
# 2/ disk cache
if SIMPLEFLOW_ENABLE_DISK_CACHE:
try:
cache = Cache(constants.CACHE_DIR)
cache_key = "jumbo_fields/" + path.split("/")[-1]
logger.debug(
"diskcache: setting key={} on cache_dir={}".format(
cache_key, constants.CACHE_DIR
)
)
cache.set(cache_key, content, expire=3 * constants.HOUR)
except OperationalError:
logger.warning(
"diskcache: got an OperationalError on write, skipping cache write"
)
def _push_jumbo_field(message):
size = len(message)
uuid = str(uuid4())
bucket_with_dir = _jumbo_fields_bucket()
if "/" in bucket_with_dir:
bucket, directory = _jumbo_fields_bucket().split("/", 1)
path = "{}/{}".format(directory, uuid)
else:
bucket = bucket_with_dir
path = uuid
storage.push_content(bucket, path, message)
_set_cached(path, message)
return "{}{}/{} {}".format(constants.JUMBO_FIELDS_PREFIX, bucket, path, size)
def _pull_jumbo_field(location):
bucket, path = location.replace(constants.JUMBO_FIELDS_PREFIX, "").split("/", 1)
cached_value = _get_cached(path)
if cached_value:
return cached_value
content = storage.pull_content(bucket, path)
_set_cached(path, content)
return content
def _log_message_too_long(message):
if len(message) > constants.MAX_LOG_FIELD:
message = "{} <...truncated to {} chars>".format(
message[: constants.MAX_LOG_FIELD], constants.MAX_LOG_FIELD
)
logger.error("Message too long, will raise: {}".format(message))
# A few helpers to wrap common SWF fields
def details(message):
return encode(message, constants.MAX_DETAILS_LENGTH)
def execution_context(message):
return encode(message, constants.MAX_EXECUTION_CONTEXT_LENGTH)
def heartbeat_details(message):
return encode(message, constants.MAX_HEARTBEAT_DETAILS_LENGTH)
def identity(message):
# we don't allow the use of jumbo fields for identity because it's guaranteed
# to change on every task, and we fear it makes the decider too slow
# NB: this should be revisited / questionned later, maybe not such a problem?
return encode(message, constants.MAX_IDENTITY_LENGTH, allow_jumbo_fields=False)
def input(message):
return encode(json_dumps(message), constants.MAX_INPUT_LENGTH)
def reason(message):
return encode(message, constants.MAX_REASON_LENGTH)
def result(message):
return encode(json_dumps(message), constants.MAX_RESULT_LENGTH)
def control(message):
return encode(json_dumps(message), constants.MAX_CONTROL_LENGTH)
|
import os
from sqlite3 import OperationalError
from uuid import uuid4
import lazy_object_proxy
from diskcache import Cache
from simpleflow import constants, logger, storage
from simpleflow.settings import SIMPLEFLOW_ENABLE_DISK_CACHE
from simpleflow.utils import json_dumps, json_loads_or_raw
JUMBO_FIELDS_MEMORY_CACHE = {}
class JumboTooLargeError(ValueError):
pass
def _jumbo_fields_bucket():
# wrapped into a function so easier to override for tests
bucket = os.getenv("SIMPLEFLOW_JUMBO_FIELDS_BUCKET")
if not bucket:
return
# trim trailing / if there, would provoke double slashes down the road
if bucket.endswith("/"):
bucket = bucket[:-1]
return bucket
def decode(content, parse_json=True, use_proxy=True):
if content is None:
return content
if content.startswith(constants.JUMBO_FIELDS_PREFIX):
def unwrap():
location, _size = content.split()
value = _pull_jumbo_field(location)
if parse_json:
return json_loads_or_raw(value)
return value
if use_proxy:
return lazy_object_proxy.Proxy(unwrap)
return unwrap()
if parse_json:
return json_loads_or_raw(content)
return content
def encode(message, max_length, allow_jumbo_fields=True):
if not message:
return message
can_use_jumbo_fields = allow_jumbo_fields and _jumbo_fields_bucket()
if len(message) > max_length:
if not can_use_jumbo_fields:
_log_message_too_long(message)
raise JumboTooLargeError("Message too long ({} chars)".format(len(message)))
if len(message) > constants.JUMBO_FIELDS_MAX_SIZE:
_log_message_too_long(message)
raise JumboTooLargeError(
"Message too long even for a jumbo field ({} chars)".format(
len(message)
)
)
jumbo_signature = _push_jumbo_field(message)
if len(jumbo_signature) > max_length:
raise JumboTooLargeError(
"Jumbo field signature is longer than the max allowed length "
"for this field: {} ; reduce jumbo bucket length?".format(
jumbo_signature
)
)
return jumbo_signature
return message
def _get_cached(path):
# 1/ memory cache
if path in JUMBO_FIELDS_MEMORY_CACHE:
return JUMBO_FIELDS_MEMORY_CACHE[path]
# 2/ disk cache
if SIMPLEFLOW_ENABLE_DISK_CACHE:
try:
# NB: this cache may also be triggered on activity workers, where it's not that
# useful. The performance hit should be minimal. To be improved later.
# NB2: cache has to be lazily instantiated here, cache objects do not survive forks,
# see DiskCache docs.
cache = Cache(constants.CACHE_DIR)
# generate a dedicated cache key because this cache may be shared with other
# features of simpleflow at some point
cache_key = "jumbo_fields/" + path.split("/")[-1]
if cache_key in cache:
logger.debug(
"diskcache: getting key={} from cache_dir={}".format(
cache_key, constants.CACHE_DIR
)
)
return cache[cache_key]
except OperationalError:
logger.warning("diskcache: got an OperationalError, skipping cache usage")
# nothing to return, but better be explicit here
return
def _set_cached(path, content):
# 1/ memory cache
JUMBO_FIELDS_MEMORY_CACHE[path] = content
# 2/ disk cache
if SIMPLEFLOW_ENABLE_DISK_CACHE:
try:
cache = Cache(constants.CACHE_DIR)
cache_key = "jumbo_fields/" + path.split("/")[-1]
logger.debug(
"diskcache: setting key={} on cache_dir={}".format(
cache_key, constants.CACHE_DIR
)
)
cache.set(cache_key, content, expire=3 * constants.HOUR)
except OperationalError:
logger.warning(
"diskcache: got an OperationalError on write, skipping cache write"
)
def _push_jumbo_field(message):
size = len(message)
uuid = str(uuid4())
bucket_with_dir = _jumbo_fields_bucket()
if "/" in bucket_with_dir:
bucket, directory = _jumbo_fields_bucket().split("/", 1)
path = "{}/{}".format(directory, uuid)
else:
bucket = bucket_with_dir
path = uuid
storage.push_content(bucket, path, message)
_set_cached(path, message)
return "{}{}/{} {}".format(constants.JUMBO_FIELDS_PREFIX, bucket, path, size)
def _pull_jumbo_field(location):
bucket, path = location.replace(constants.JUMBO_FIELDS_PREFIX, "").split("/", 1)
cached_value = _get_cached(path)
if cached_value:
return cached_value
content = storage.pull_content(bucket, path)
_set_cached(path, content)
return content
def _log_message_too_long(message):
if len(message) > constants.MAX_LOG_FIELD:
message = "{} <...truncated to {} chars>".format(
message[: constants.MAX_LOG_FIELD], constants.MAX_LOG_FIELD
)
logger.error("Message too long, will raise: {}".format(message))
# A few helpers to wrap common SWF fields
def details(message):
return encode(message, constants.MAX_DETAILS_LENGTH)
def execution_context(message):
return encode(message, constants.MAX_EXECUTION_CONTEXT_LENGTH)
def heartbeat_details(message):
return encode(message, constants.MAX_HEARTBEAT_DETAILS_LENGTH)
def identity(message):
# we don't allow the use of jumbo fields for identity because it's guaranteed
# to change on every task, and we fear it makes the decider too slow
# NB: this should be revisited / questionned later, maybe not such a problem?
return encode(message, constants.MAX_IDENTITY_LENGTH, allow_jumbo_fields=False)
def input(message):
return encode(json_dumps(message), constants.MAX_INPUT_LENGTH)
def reason(message):
return encode(message, constants.MAX_REASON_LENGTH)
def result(message):
return encode(json_dumps(message), constants.MAX_RESULT_LENGTH)
def control(message):
return encode(json_dumps(message), constants.MAX_CONTROL_LENGTH)
|
en
| 0.889103
|
# wrapped into a function so easier to override for tests # trim trailing / if there, would provoke double slashes down the road # 1/ memory cache # 2/ disk cache # NB: this cache may also be triggered on activity workers, where it's not that # useful. The performance hit should be minimal. To be improved later. # NB2: cache has to be lazily instantiated here, cache objects do not survive forks, # see DiskCache docs. # generate a dedicated cache key because this cache may be shared with other # features of simpleflow at some point # nothing to return, but better be explicit here # 1/ memory cache # 2/ disk cache # A few helpers to wrap common SWF fields # we don't allow the use of jumbo fields for identity because it's guaranteed # to change on every task, and we fear it makes the decider too slow # NB: this should be revisited / questionned later, maybe not such a problem?
| 2.137801
| 2
|
conclusions/6-graphs/GraphQuiz-solution.py
|
balajisomasale/Udacity_Data-Structures-and-algorithms
| 0
|
6626571
|
<filename>conclusions/6-graphs/GraphQuiz-solution.py
def get_edge_list(self):
edge_list = []
for edge_object in self.edges:
edge = (edge_object.value, edge_object.node_from.value, edge_object.node_to.value)
edge_list.append(edge)
return edge_list
def get_adjacency_list(self):
max_index = self.find_max_index()
adjacency_list = [None] * (max_index + 1)
for edge_object in self.edges:
if adjacency_list[edge_object.node_from.value]:
adjacency_list[edge_object.node_from.value].append((edge_object.node_to.value, edge_object.value))
else:
adjacency_list[edge_object.node_from.value] = [(edge_object.node_to.value, edge_object.value)]
return adjacency_list
def get_adjacency_matrix(self):
max_index = self.find_max_index()
adjacency_matrix = [[0 for i in range(max_index + 1)] for j in range(max_index + 1)]
for edge_object in self.edges:
adjacency_matrix[edge_object.node_from.value][edge_object.node_to.value] = edge_object.value
return adjacency_matrix
def find_max_index(self):
max_index = -1
if len(self.nodes):
for node in self.nodes:
if node.value > max_index:
max_index = node.value
return max_index
|
<filename>conclusions/6-graphs/GraphQuiz-solution.py
def get_edge_list(self):
edge_list = []
for edge_object in self.edges:
edge = (edge_object.value, edge_object.node_from.value, edge_object.node_to.value)
edge_list.append(edge)
return edge_list
def get_adjacency_list(self):
max_index = self.find_max_index()
adjacency_list = [None] * (max_index + 1)
for edge_object in self.edges:
if adjacency_list[edge_object.node_from.value]:
adjacency_list[edge_object.node_from.value].append((edge_object.node_to.value, edge_object.value))
else:
adjacency_list[edge_object.node_from.value] = [(edge_object.node_to.value, edge_object.value)]
return adjacency_list
def get_adjacency_matrix(self):
max_index = self.find_max_index()
adjacency_matrix = [[0 for i in range(max_index + 1)] for j in range(max_index + 1)]
for edge_object in self.edges:
adjacency_matrix[edge_object.node_from.value][edge_object.node_to.value] = edge_object.value
return adjacency_matrix
def find_max_index(self):
max_index = -1
if len(self.nodes):
for node in self.nodes:
if node.value > max_index:
max_index = node.value
return max_index
|
none
| 1
| 3.365783
| 3
|
|
my_vim_files/python27/Lib/test/test_unicode_file.py
|
satsaeid/dotfiles
| 0
|
6626572
|
<reponame>satsaeid/dotfiles<filename>my_vim_files/python27/Lib/test/test_unicode_file.py
# Test some Unicode file name semantics
# We dont test many operations on files other than
# that their names can be used with Unicode characters.
import os, glob, time, shutil
import unicodedata
import unittest
from test.test_support import run_unittest, TESTFN_UNICODE
from test.test_support import TESTFN_ENCODING, TESTFN_UNICODE_UNENCODEABLE
try:
TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
raise unittest.SkipTest("No Unicode filesystem semantics on this platform.")
if TESTFN_ENCODED.decode(TESTFN_ENCODING) != TESTFN_UNICODE:
# The file system encoding does not support Latin-1
# (which test_support assumes), so try the file system
# encoding instead.
import sys
try:
TESTFN_UNICODE = unicode("@test-\xe0\xf2", sys.getfilesystemencoding())
TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
if '?' in TESTFN_ENCODED:
# MBCS will not report the error properly
raise UnicodeError, "mbcs encoding problem"
except (UnicodeError, TypeError):
raise unittest.SkipTest("Cannot find a suiteable filename.")
if TESTFN_ENCODED.decode(TESTFN_ENCODING) != TESTFN_UNICODE:
raise unittest.SkipTest("Cannot find a suitable filename.")
def remove_if_exists(filename):
if os.path.exists(filename):
os.unlink(filename)
class TestUnicodeFiles(unittest.TestCase):
# The 'do_' functions are the actual tests. They generally assume the
# file already exists etc.
# Do all the tests we can given only a single filename. The file should
# exist.
def _do_single(self, filename):
self.assertTrue(os.path.exists(filename))
self.assertTrue(os.path.isfile(filename))
self.assertTrue(os.access(filename, os.R_OK))
self.assertTrue(os.path.exists(os.path.abspath(filename)))
self.assertTrue(os.path.isfile(os.path.abspath(filename)))
self.assertTrue(os.access(os.path.abspath(filename), os.R_OK))
os.chmod(filename, 0777)
os.utime(filename, None)
os.utime(filename, (time.time(), time.time()))
# Copy/rename etc tests using the same filename
self._do_copyish(filename, filename)
# Filename should appear in glob output
self.assertTrue(
os.path.abspath(filename)==os.path.abspath(glob.glob(filename)[0]))
# basename should appear in listdir.
path, base = os.path.split(os.path.abspath(filename))
if isinstance(base, str):
base = base.decode(TESTFN_ENCODING)
file_list = os.listdir(path)
# listdir() with a unicode arg may or may not return Unicode
# objects, depending on the platform.
if file_list and isinstance(file_list[0], str):
file_list = [f.decode(TESTFN_ENCODING) for f in file_list]
# Normalize the unicode strings, as round-tripping the name via the OS
# may return a different (but equivalent) value.
base = unicodedata.normalize("NFD", base)
file_list = [unicodedata.normalize("NFD", f) for f in file_list]
self.assertIn(base, file_list)
# Do as many "equivalancy' tests as we can - ie, check that although we
# have different types for the filename, they refer to the same file.
def _do_equivalent(self, filename1, filename2):
# Note we only check "filename1 against filename2" - we don't bother
# checking "filename2 against 1", as we assume we are called again with
# the args reversed.
self.assertTrue(type(filename1)!=type(filename2),
"No point checking equivalent filenames of the same type")
# stat and lstat should return the same results.
self.assertEqual(os.stat(filename1),
os.stat(filename2))
self.assertEqual(os.lstat(filename1),
os.lstat(filename2))
# Copy/rename etc tests using equivalent filename
self._do_copyish(filename1, filename2)
# Tests that copy, move, etc one file to another.
def _do_copyish(self, filename1, filename2):
# Should be able to rename the file using either name.
self.assertTrue(os.path.isfile(filename1)) # must exist.
os.rename(filename1, filename2 + ".new")
self.assertTrue(os.path.isfile(filename1+".new"))
os.rename(filename1 + ".new", filename2)
self.assertTrue(os.path.isfile(filename2))
shutil.copy(filename1, filename2 + ".new")
os.unlink(filename1 + ".new") # remove using equiv name.
# And a couple of moves, one using each name.
shutil.move(filename1, filename2 + ".new")
self.assertTrue(not os.path.exists(filename2))
shutil.move(filename1 + ".new", filename2)
self.assertTrue(os.path.exists(filename1))
# Note - due to the implementation of shutil.move,
# it tries a rename first. This only fails on Windows when on
# different file systems - and this test can't ensure that.
# So we test the shutil.copy2 function, which is the thing most
# likely to fail.
shutil.copy2(filename1, filename2 + ".new")
os.unlink(filename1 + ".new")
def _do_directory(self, make_name, chdir_name, encoded):
cwd = os.getcwd()
if os.path.isdir(make_name):
os.rmdir(make_name)
os.mkdir(make_name)
try:
os.chdir(chdir_name)
try:
if not encoded:
cwd_result = os.getcwdu()
name_result = make_name
else:
cwd_result = os.getcwd().decode(TESTFN_ENCODING)
name_result = make_name.decode(TESTFN_ENCODING)
cwd_result = unicodedata.normalize("NFD", cwd_result)
name_result = unicodedata.normalize("NFD", name_result)
self.assertEqual(os.path.basename(cwd_result),name_result)
finally:
os.chdir(cwd)
finally:
os.rmdir(make_name)
# The '_test' functions 'entry points with params' - ie, what the
# top-level 'test' functions would be if they could take params
def _test_single(self, filename):
remove_if_exists(filename)
f = file(filename, "w")
f.close()
try:
self._do_single(filename)
finally:
os.unlink(filename)
self.assertTrue(not os.path.exists(filename))
# and again with os.open.
f = os.open(filename, os.O_CREAT)
os.close(f)
try:
self._do_single(filename)
finally:
os.unlink(filename)
def _test_equivalent(self, filename1, filename2):
remove_if_exists(filename1)
self.assertTrue(not os.path.exists(filename2))
f = file(filename1, "w")
f.close()
try:
self._do_equivalent(filename1, filename2)
finally:
os.unlink(filename1)
# The 'test' functions are unittest entry points, and simply call our
# _test functions with each of the filename combinations we wish to test
def test_single_files(self):
self._test_single(TESTFN_ENCODED)
self._test_single(TESTFN_UNICODE)
if TESTFN_UNICODE_UNENCODEABLE is not None:
self._test_single(TESTFN_UNICODE_UNENCODEABLE)
def test_equivalent_files(self):
self._test_equivalent(TESTFN_ENCODED, TESTFN_UNICODE)
self._test_equivalent(TESTFN_UNICODE, TESTFN_ENCODED)
def test_directories(self):
# For all 'equivalent' combinations:
# Make dir with encoded, chdir with unicode, checkdir with encoded
# (or unicode/encoded/unicode, etc
ext = ".dir"
self._do_directory(TESTFN_ENCODED+ext, TESTFN_ENCODED+ext, True)
self._do_directory(TESTFN_ENCODED+ext, TESTFN_UNICODE+ext, True)
self._do_directory(TESTFN_UNICODE+ext, TESTFN_ENCODED+ext, False)
self._do_directory(TESTFN_UNICODE+ext, TESTFN_UNICODE+ext, False)
# Our directory name that can't use a non-unicode name.
if TESTFN_UNICODE_UNENCODEABLE is not None:
self._do_directory(TESTFN_UNICODE_UNENCODEABLE+ext,
TESTFN_UNICODE_UNENCODEABLE+ext,
False)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
# Test some Unicode file name semantics
# We dont test many operations on files other than
# that their names can be used with Unicode characters.
import os, glob, time, shutil
import unicodedata
import unittest
from test.test_support import run_unittest, TESTFN_UNICODE
from test.test_support import TESTFN_ENCODING, TESTFN_UNICODE_UNENCODEABLE
try:
TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
raise unittest.SkipTest("No Unicode filesystem semantics on this platform.")
if TESTFN_ENCODED.decode(TESTFN_ENCODING) != TESTFN_UNICODE:
# The file system encoding does not support Latin-1
# (which test_support assumes), so try the file system
# encoding instead.
import sys
try:
TESTFN_UNICODE = unicode("@test-\xe0\xf2", sys.getfilesystemencoding())
TESTFN_ENCODED = TESTFN_UNICODE.encode(TESTFN_ENCODING)
if '?' in TESTFN_ENCODED:
# MBCS will not report the error properly
raise UnicodeError, "mbcs encoding problem"
except (UnicodeError, TypeError):
raise unittest.SkipTest("Cannot find a suiteable filename.")
if TESTFN_ENCODED.decode(TESTFN_ENCODING) != TESTFN_UNICODE:
raise unittest.SkipTest("Cannot find a suitable filename.")
def remove_if_exists(filename):
if os.path.exists(filename):
os.unlink(filename)
class TestUnicodeFiles(unittest.TestCase):
# The 'do_' functions are the actual tests. They generally assume the
# file already exists etc.
# Do all the tests we can given only a single filename. The file should
# exist.
def _do_single(self, filename):
self.assertTrue(os.path.exists(filename))
self.assertTrue(os.path.isfile(filename))
self.assertTrue(os.access(filename, os.R_OK))
self.assertTrue(os.path.exists(os.path.abspath(filename)))
self.assertTrue(os.path.isfile(os.path.abspath(filename)))
self.assertTrue(os.access(os.path.abspath(filename), os.R_OK))
os.chmod(filename, 0777)
os.utime(filename, None)
os.utime(filename, (time.time(), time.time()))
# Copy/rename etc tests using the same filename
self._do_copyish(filename, filename)
# Filename should appear in glob output
self.assertTrue(
os.path.abspath(filename)==os.path.abspath(glob.glob(filename)[0]))
# basename should appear in listdir.
path, base = os.path.split(os.path.abspath(filename))
if isinstance(base, str):
base = base.decode(TESTFN_ENCODING)
file_list = os.listdir(path)
# listdir() with a unicode arg may or may not return Unicode
# objects, depending on the platform.
if file_list and isinstance(file_list[0], str):
file_list = [f.decode(TESTFN_ENCODING) for f in file_list]
# Normalize the unicode strings, as round-tripping the name via the OS
# may return a different (but equivalent) value.
base = unicodedata.normalize("NFD", base)
file_list = [unicodedata.normalize("NFD", f) for f in file_list]
self.assertIn(base, file_list)
# Do as many "equivalancy' tests as we can - ie, check that although we
# have different types for the filename, they refer to the same file.
def _do_equivalent(self, filename1, filename2):
# Note we only check "filename1 against filename2" - we don't bother
# checking "filename2 against 1", as we assume we are called again with
# the args reversed.
self.assertTrue(type(filename1)!=type(filename2),
"No point checking equivalent filenames of the same type")
# stat and lstat should return the same results.
self.assertEqual(os.stat(filename1),
os.stat(filename2))
self.assertEqual(os.lstat(filename1),
os.lstat(filename2))
# Copy/rename etc tests using equivalent filename
self._do_copyish(filename1, filename2)
# Tests that copy, move, etc one file to another.
def _do_copyish(self, filename1, filename2):
# Should be able to rename the file using either name.
self.assertTrue(os.path.isfile(filename1)) # must exist.
os.rename(filename1, filename2 + ".new")
self.assertTrue(os.path.isfile(filename1+".new"))
os.rename(filename1 + ".new", filename2)
self.assertTrue(os.path.isfile(filename2))
shutil.copy(filename1, filename2 + ".new")
os.unlink(filename1 + ".new") # remove using equiv name.
# And a couple of moves, one using each name.
shutil.move(filename1, filename2 + ".new")
self.assertTrue(not os.path.exists(filename2))
shutil.move(filename1 + ".new", filename2)
self.assertTrue(os.path.exists(filename1))
# Note - due to the implementation of shutil.move,
# it tries a rename first. This only fails on Windows when on
# different file systems - and this test can't ensure that.
# So we test the shutil.copy2 function, which is the thing most
# likely to fail.
shutil.copy2(filename1, filename2 + ".new")
os.unlink(filename1 + ".new")
def _do_directory(self, make_name, chdir_name, encoded):
cwd = os.getcwd()
if os.path.isdir(make_name):
os.rmdir(make_name)
os.mkdir(make_name)
try:
os.chdir(chdir_name)
try:
if not encoded:
cwd_result = os.getcwdu()
name_result = make_name
else:
cwd_result = os.getcwd().decode(TESTFN_ENCODING)
name_result = make_name.decode(TESTFN_ENCODING)
cwd_result = unicodedata.normalize("NFD", cwd_result)
name_result = unicodedata.normalize("NFD", name_result)
self.assertEqual(os.path.basename(cwd_result),name_result)
finally:
os.chdir(cwd)
finally:
os.rmdir(make_name)
# The '_test' functions 'entry points with params' - ie, what the
# top-level 'test' functions would be if they could take params
def _test_single(self, filename):
remove_if_exists(filename)
f = file(filename, "w")
f.close()
try:
self._do_single(filename)
finally:
os.unlink(filename)
self.assertTrue(not os.path.exists(filename))
# and again with os.open.
f = os.open(filename, os.O_CREAT)
os.close(f)
try:
self._do_single(filename)
finally:
os.unlink(filename)
def _test_equivalent(self, filename1, filename2):
remove_if_exists(filename1)
self.assertTrue(not os.path.exists(filename2))
f = file(filename1, "w")
f.close()
try:
self._do_equivalent(filename1, filename2)
finally:
os.unlink(filename1)
# The 'test' functions are unittest entry points, and simply call our
# _test functions with each of the filename combinations we wish to test
def test_single_files(self):
self._test_single(TESTFN_ENCODED)
self._test_single(TESTFN_UNICODE)
if TESTFN_UNICODE_UNENCODEABLE is not None:
self._test_single(TESTFN_UNICODE_UNENCODEABLE)
def test_equivalent_files(self):
self._test_equivalent(TESTFN_ENCODED, TESTFN_UNICODE)
self._test_equivalent(TESTFN_UNICODE, TESTFN_ENCODED)
def test_directories(self):
# For all 'equivalent' combinations:
# Make dir with encoded, chdir with unicode, checkdir with encoded
# (or unicode/encoded/unicode, etc
ext = ".dir"
self._do_directory(TESTFN_ENCODED+ext, TESTFN_ENCODED+ext, True)
self._do_directory(TESTFN_ENCODED+ext, TESTFN_UNICODE+ext, True)
self._do_directory(TESTFN_UNICODE+ext, TESTFN_ENCODED+ext, False)
self._do_directory(TESTFN_UNICODE+ext, TESTFN_UNICODE+ext, False)
# Our directory name that can't use a non-unicode name.
if TESTFN_UNICODE_UNENCODEABLE is not None:
self._do_directory(TESTFN_UNICODE_UNENCODEABLE+ext,
TESTFN_UNICODE_UNENCODEABLE+ext,
False)
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
en
| 0.877249
|
# Test some Unicode file name semantics # We dont test many operations on files other than # that their names can be used with Unicode characters. # Either the file system encoding is None, or the file name # cannot be encoded in the file system encoding. # The file system encoding does not support Latin-1 # (which test_support assumes), so try the file system # encoding instead. # MBCS will not report the error properly # The 'do_' functions are the actual tests. They generally assume the # file already exists etc. # Do all the tests we can given only a single filename. The file should # exist. # Copy/rename etc tests using the same filename # Filename should appear in glob output # basename should appear in listdir. # listdir() with a unicode arg may or may not return Unicode # objects, depending on the platform. # Normalize the unicode strings, as round-tripping the name via the OS # may return a different (but equivalent) value. # Do as many "equivalancy' tests as we can - ie, check that although we # have different types for the filename, they refer to the same file. # Note we only check "filename1 against filename2" - we don't bother # checking "filename2 against 1", as we assume we are called again with # the args reversed. # stat and lstat should return the same results. # Copy/rename etc tests using equivalent filename # Tests that copy, move, etc one file to another. # Should be able to rename the file using either name. # must exist. # remove using equiv name. # And a couple of moves, one using each name. # Note - due to the implementation of shutil.move, # it tries a rename first. This only fails on Windows when on # different file systems - and this test can't ensure that. # So we test the shutil.copy2 function, which is the thing most # likely to fail. # The '_test' functions 'entry points with params' - ie, what the # top-level 'test' functions would be if they could take params # and again with os.open. # The 'test' functions are unittest entry points, and simply call our # _test functions with each of the filename combinations we wish to test # For all 'equivalent' combinations: # Make dir with encoded, chdir with unicode, checkdir with encoded # (or unicode/encoded/unicode, etc # Our directory name that can't use a non-unicode name.
| 3.045214
| 3
|
homeassistant/components/cover/garadget.py
|
TastyPi/home-assistant
| 13
|
6626573
|
"""
Platform for the garadget cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/garadget/
"""
import logging
import voluptuous as vol
import requests
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.const import CONF_DEVICE, CONF_USERNAME, CONF_PASSWORD,\
CONF_ACCESS_TOKEN, CONF_NAME, STATE_UNKNOWN, STATE_CLOSED, STATE_OPEN,\
CONF_COVERS
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = 'Garadget'
ATTR_SIGNAL_STRENGTH = "wifi signal strength (dB)"
ATTR_TIME_IN_STATE = "time in state"
ATTR_SENSOR_STRENGTH = "sensor reflection rate"
ATTR_AVAILABLE = "available"
STATE_OPENING = "opening"
STATE_CLOSING = "closing"
STATE_STOPPED = "stopped"
STATE_OFFLINE = "offline"
STATES_MAP = {
"open": STATE_OPEN,
"opening": STATE_OPENING,
"closed": STATE_CLOSED,
"closing": STATE_CLOSING,
"stopped": STATE_STOPPED
}
# Validation of the user's configuration
COVER_SCHEMA = vol.Schema({
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): vol.Schema({cv.slug: COVER_SCHEMA}),
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo covers."""
covers = []
devices = config.get(CONF_COVERS, {})
_LOGGER.debug(devices)
for device_id, device_config in devices.items():
args = {
"name": device_config.get(CONF_NAME),
"device_id": device_config.get(CONF_DEVICE, device_id),
"username": device_config.get(CONF_USERNAME),
"password": device_config.get(CONF_PASSWORD),
"access_token": device_config.get(CONF_ACCESS_TOKEN)
}
covers.append(GaradgetCover(hass, args))
add_devices(covers)
class GaradgetCover(CoverDevice):
"""Representation of a demo cover."""
# pylint: disable=no-self-use, too-many-instance-attributes
def __init__(self, hass, args):
"""Initialize the cover."""
self.particle_url = 'https://api.particle.io'
self.hass = hass
self._name = args['name']
self.device_id = args['device_id']
self.access_token = args['access_token']
self.obtained_token = False
self._username = args['username']
self._password = args['password']
self._state = STATE_UNKNOWN
self.time_in_state = None
self.signal = None
self.sensor = None
self._unsub_listener_cover = None
self._available = True
if self.access_token is None:
self.access_token = self.get_token()
self._obtained_token = True
# Lets try to get the configured name if not provided.
try:
if self._name is None:
doorconfig = self._get_variable("doorConfig")
if doorconfig["nme"] is not None:
self._name = doorconfig["nme"]
self.update()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error('Unable to connect to server: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
self._available = False
self._name = DEFAULT_NAME
except KeyError as ex:
_LOGGER.warning('Garadget device %(device)s seems to be offline',
dict(device=self.device_id))
self._name = DEFAULT_NAME
self._state = STATE_OFFLINE
self._available = False
def __del__(self):
"""Try to remove token."""
if self._obtained_token is True:
if self.access_token is not None:
self.remove_token()
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.time_in_state is not None:
data[ATTR_TIME_IN_STATE] = self.time_in_state
if self.sensor is not None:
data[ATTR_SENSOR_STRENGTH] = self.sensor
if self.access_token is not None:
data[CONF_ACCESS_TOKEN] = self.access_token
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state == STATE_UNKNOWN:
return None
else:
return self._state == STATE_CLOSED
def get_token(self):
"""Get new token for usage during this session."""
args = {
'grant_type': 'password',
'username': self._username,
'password': self._password
}
url = '{}/oauth/token'.format(self.particle_url)
ret = requests.post(url,
auth=('particle', 'particle'),
data=args)
return ret.json()['access_token']
def remove_token(self):
"""Remove authorization token from API."""
ret = requests.delete('{}/v1/access_tokens/{}'.format(
self.particle_url,
self.access_token),
auth=(self._username, self._password))
return ret.text
def _start_watcher(self, command):
"""Start watcher."""
_LOGGER.debug("Starting Watcher for command: %s ", command)
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._check_state)
def _check_state(self, now):
"""Check the state of the service during an operation."""
self.update()
self.update_ha_state()
def close_cover(self):
"""Close the cover."""
if self._state not in ["close", "closing"]:
ret = self._put_command("setState", "close")
self._start_watcher('close')
return ret.get('return_value') == 1
def open_cover(self):
"""Open the cover."""
if self._state not in ["open", "opening"]:
ret = self._put_command("setState", "open")
self._start_watcher('open')
return ret.get('return_value') == 1
def stop_cover(self):
"""Stop the door where it is."""
if self._state not in ["stopped"]:
ret = self._put_command("setState", "stop")
self._start_watcher('stop')
return ret['return_value'] == 1
def update(self):
"""Get updated status from API."""
try:
status = self._get_variable("doorStatus")
_LOGGER.debug("Current Status: %s", status['status'])
self._state = STATES_MAP.get(status['status'], STATE_UNKNOWN)
self.time_in_state = status['time']
self.signal = status['signal']
self.sensor = status['sensor']
self._availble = True
except requests.exceptions.ConnectionError as ex:
_LOGGER.error('Unable to connect to server: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
except KeyError as ex:
_LOGGER.warning('Garadget device %(device)s seems to be offline',
dict(device=self.device_id))
self._state = STATE_OFFLINE
if self._state not in [STATE_CLOSING, STATE_OPENING]:
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
def _get_variable(self, var):
"""Get latest status."""
url = '{}/v1/devices/{}/{}?access_token={}'.format(
self.particle_url,
self.device_id,
var,
self.access_token,
)
ret = requests.get(url)
result = {}
for pairs in ret.json()['result'].split('|'):
key = pairs.split('=')
result[key[0]] = key[1]
return result
def _put_command(self, func, arg=None):
"""Send commands to API."""
params = {'access_token': self.access_token}
if arg:
params['command'] = arg
url = '{}/v1/devices/{}/{}'.format(
self.particle_url,
self.device_id,
func)
ret = requests.post(url, data=params)
return ret.json()
|
"""
Platform for the garadget cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/garadget/
"""
import logging
import voluptuous as vol
import requests
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.const import CONF_DEVICE, CONF_USERNAME, CONF_PASSWORD,\
CONF_ACCESS_TOKEN, CONF_NAME, STATE_UNKNOWN, STATE_CLOSED, STATE_OPEN,\
CONF_COVERS
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = 'Garadget'
ATTR_SIGNAL_STRENGTH = "wifi signal strength (dB)"
ATTR_TIME_IN_STATE = "time in state"
ATTR_SENSOR_STRENGTH = "sensor reflection rate"
ATTR_AVAILABLE = "available"
STATE_OPENING = "opening"
STATE_CLOSING = "closing"
STATE_STOPPED = "stopped"
STATE_OFFLINE = "offline"
STATES_MAP = {
"open": STATE_OPEN,
"opening": STATE_OPENING,
"closed": STATE_CLOSED,
"closing": STATE_CLOSING,
"stopped": STATE_STOPPED
}
# Validation of the user's configuration
COVER_SCHEMA = vol.Schema({
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): vol.Schema({cv.slug: COVER_SCHEMA}),
})
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo covers."""
covers = []
devices = config.get(CONF_COVERS, {})
_LOGGER.debug(devices)
for device_id, device_config in devices.items():
args = {
"name": device_config.get(CONF_NAME),
"device_id": device_config.get(CONF_DEVICE, device_id),
"username": device_config.get(CONF_USERNAME),
"password": device_config.get(CONF_PASSWORD),
"access_token": device_config.get(CONF_ACCESS_TOKEN)
}
covers.append(GaradgetCover(hass, args))
add_devices(covers)
class GaradgetCover(CoverDevice):
"""Representation of a demo cover."""
# pylint: disable=no-self-use, too-many-instance-attributes
def __init__(self, hass, args):
"""Initialize the cover."""
self.particle_url = 'https://api.particle.io'
self.hass = hass
self._name = args['name']
self.device_id = args['device_id']
self.access_token = args['access_token']
self.obtained_token = False
self._username = args['username']
self._password = args['password']
self._state = STATE_UNKNOWN
self.time_in_state = None
self.signal = None
self.sensor = None
self._unsub_listener_cover = None
self._available = True
if self.access_token is None:
self.access_token = self.get_token()
self._obtained_token = True
# Lets try to get the configured name if not provided.
try:
if self._name is None:
doorconfig = self._get_variable("doorConfig")
if doorconfig["nme"] is not None:
self._name = doorconfig["nme"]
self.update()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error('Unable to connect to server: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
self._available = False
self._name = DEFAULT_NAME
except KeyError as ex:
_LOGGER.warning('Garadget device %(device)s seems to be offline',
dict(device=self.device_id))
self._name = DEFAULT_NAME
self._state = STATE_OFFLINE
self._available = False
def __del__(self):
"""Try to remove token."""
if self._obtained_token is True:
if self.access_token is not None:
self.remove_token()
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.time_in_state is not None:
data[ATTR_TIME_IN_STATE] = self.time_in_state
if self.sensor is not None:
data[ATTR_SENSOR_STRENGTH] = self.sensor
if self.access_token is not None:
data[CONF_ACCESS_TOKEN] = self.access_token
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state == STATE_UNKNOWN:
return None
else:
return self._state == STATE_CLOSED
def get_token(self):
"""Get new token for usage during this session."""
args = {
'grant_type': 'password',
'username': self._username,
'password': self._password
}
url = '{}/oauth/token'.format(self.particle_url)
ret = requests.post(url,
auth=('particle', 'particle'),
data=args)
return ret.json()['access_token']
def remove_token(self):
"""Remove authorization token from API."""
ret = requests.delete('{}/v1/access_tokens/{}'.format(
self.particle_url,
self.access_token),
auth=(self._username, self._password))
return ret.text
def _start_watcher(self, command):
"""Start watcher."""
_LOGGER.debug("Starting Watcher for command: %s ", command)
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._check_state)
def _check_state(self, now):
"""Check the state of the service during an operation."""
self.update()
self.update_ha_state()
def close_cover(self):
"""Close the cover."""
if self._state not in ["close", "closing"]:
ret = self._put_command("setState", "close")
self._start_watcher('close')
return ret.get('return_value') == 1
def open_cover(self):
"""Open the cover."""
if self._state not in ["open", "opening"]:
ret = self._put_command("setState", "open")
self._start_watcher('open')
return ret.get('return_value') == 1
def stop_cover(self):
"""Stop the door where it is."""
if self._state not in ["stopped"]:
ret = self._put_command("setState", "stop")
self._start_watcher('stop')
return ret['return_value'] == 1
def update(self):
"""Get updated status from API."""
try:
status = self._get_variable("doorStatus")
_LOGGER.debug("Current Status: %s", status['status'])
self._state = STATES_MAP.get(status['status'], STATE_UNKNOWN)
self.time_in_state = status['time']
self.signal = status['signal']
self.sensor = status['sensor']
self._availble = True
except requests.exceptions.ConnectionError as ex:
_LOGGER.error('Unable to connect to server: %(reason)s',
dict(reason=ex))
self._state = STATE_OFFLINE
except KeyError as ex:
_LOGGER.warning('Garadget device %(device)s seems to be offline',
dict(device=self.device_id))
self._state = STATE_OFFLINE
if self._state not in [STATE_CLOSING, STATE_OPENING]:
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
def _get_variable(self, var):
"""Get latest status."""
url = '{}/v1/devices/{}/{}?access_token={}'.format(
self.particle_url,
self.device_id,
var,
self.access_token,
)
ret = requests.get(url)
result = {}
for pairs in ret.json()['result'].split('|'):
key = pairs.split('=')
result[key[0]] = key[1]
return result
def _put_command(self, func, arg=None):
"""Send commands to API."""
params = {'access_token': self.access_token}
if arg:
params['command'] = arg
url = '{}/v1/devices/{}/{}'.format(
self.particle_url,
self.device_id,
func)
ret = requests.post(url, data=params)
return ret.json()
|
en
| 0.819768
|
Platform for the garadget cover component. For more details about this platform, please refer to the documentation https://home-assistant.io/components/garadget/ # Validation of the user's configuration Setup the Demo covers. Representation of a demo cover. # pylint: disable=no-self-use, too-many-instance-attributes Initialize the cover. # Lets try to get the configured name if not provided. Try to remove token. Return the name of the cover. No polling needed for a demo cover. Return True if entity is available. Return the device state attributes. Return if the cover is closed. Get new token for usage during this session. Remove authorization token from API. Start watcher. Check the state of the service during an operation. Close the cover. Open the cover. Stop the door where it is. Get updated status from API. Get latest status. Send commands to API.
| 1.975181
| 2
|
Regression/prepare_file_travis.py
|
ruohai0925/artemis
| 0
|
6626574
|
<reponame>ruohai0925/artemis
# Copyright 2018-2019 <NAME>, <NAME>, <NAME>
# <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script modifies `WarpX-test.ini` (which is used for nightly builds)
# and creates the file `travis-test.ini` (which is used for continous
# integration on TravisCI (https://travis-ci.org/)
# The subtests that are selected are controlled by WARPX_TEST_DIM
# The architecture (CPU/GPU) is selected by WARPX_TEST_ARCH
import re
import os
# Get relevant environment variables
arch = os.environ.get('WARPX_TEST_ARCH', 'CPU')
ci_regular_cartesian_2d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_2D') == 'TRUE'
ci_regular_cartesian_3d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_3D') == 'TRUE'
ci_psatd = os.environ.get('WARPX_CI_PSATD', 'TRUE') == 'TRUE'
ci_python_main = os.environ.get('WARPX_CI_PYTHON_MAIN') == 'TRUE'
ci_single_precision = os.environ.get('WARPX_CI_SINGLE_PRECISION') == 'TRUE'
ci_rz_or_nompi = os.environ.get('WARPX_CI_RZ_OR_NOMPI') == 'TRUE'
ci_qed = os.environ.get('WARPX_CI_QED') == 'TRUE'
ci_eb = os.environ.get('WARPX_CI_EB') == 'TRUE'
ci_openpmd = os.environ.get('WARPX_CI_OPENPMD') == 'TRUE'
ci_ccache = os.environ.get('WARPX_CI_CCACHE') == 'TRUE'
ci_num_make_jobs = os.environ.get('WARPX_CI_NUM_MAKE_JOBS', None)
# Find the directory in which the tests should be run
current_dir = os.getcwd()
test_dir = re.sub('warpx/Regression', '', current_dir )
with open('WarpX-tests.ini') as f:
text = f.read()
# Replace default folder name
text = re.sub('/home/regtester/AMReX_RegTesting', test_dir, text)
# Remove the web directory
text = re.sub('[\w\-\/]*/web', '', text)
# Add doComparison = 0 for each test
text = re.sub( '\[(?P<name>.*)\]\nbuildDir = ',
'[\g<name>]\ndoComparison = 0\nbuildDir = ', text )
# Change compile options when running on GPU
if arch == 'GPU':
text = re.sub( 'addToCompileString =',
'addToCompileString = USE_GPU=TRUE USE_OMP=FALSE USE_ACC=TRUE ', text)
text = re.sub( 'COMP\s*=.*', 'COMP = pgi', text )
print('Compiling for %s' %arch)
# Extra dependencies
if ci_openpmd:
text = re.sub('addToCompileString =',
'addToCompileString = USE_OPENPMD=TRUE ', text)
# always build with PSATD support (runtime controlled if used)
if ci_psatd:
text = re.sub('addToCompileString =',
'addToCompileString = USE_PSATD=TRUE ', text)
text = re.sub('USE_PSATD=FALSE',
'', text)
# Ccache
if ci_ccache:
text = re.sub('addToCompileString =',
'addToCompileString = USE_CCACHE=TRUE ', text)
# Add runtime option: crash for unused variables
text = re.sub('runtime_params =',
'runtime_params = amrex.abort_on_unused_inputs=1 ',
text)
# Use less/more cores for compiling, e.g. public CI only provides 2 cores
if ci_num_make_jobs is not None:
text = re.sub( 'numMakeJobs = \d+', 'numMakeJobs = {}'.format(ci_num_make_jobs), text )
# Use only 1 OMP thread for running
text = re.sub( 'numthreads = \d+', 'numthreads = 1', text)
# Prevent emails from being sent
text = re.sub( 'sendEmailWhenFail = 1', 'sendEmailWhenFail = 0', text )
# Remove Python test (does not compile)
text = re.sub( '\[Python_Langmuir\]\n(.+\n)*', '', text)
# Remove Langmuir_x/y/z test (too long; not that useful)
text = re.sub( '\[Langmuir_[xyz]\]\n(.+\n)*', '', text)
# Select the tests to be run
# --------------------------
# - Extract test blocks (they are identified by the fact that they contain "inputFile")
select_test_regex = r'(\[(.+\n)*inputFile(.+\n)*)'
test_blocks = [ match[0] for match in re.findall(select_test_regex, text) ]
# - Remove the test blocks from `text` (only the selected ones will be added back)
text = re.sub( select_test_regex, '', text )
def select_tests(blocks, match_string_list, do_test):
"""Remove or keep tests from list in WarpX-tests.ini according to do_test variable"""
if do_test not in [True, False]:
raise ValueError("do_test must be True or False")
if (do_test == False):
for match_string in match_string_list:
print('Selecting tests without ' + match_string)
blocks = [ block for block in blocks if not match_string in block ]
else:
for match_string in match_string_list:
print('Selecting tests with ' + match_string)
blocks = [ block for block in blocks if match_string in block ]
return blocks
if ci_regular_cartesian_2d:
test_blocks = select_tests(test_blocks, ['dim = 2'], True)
test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False)
test_blocks = select_tests(test_blocks, ['useMPI = 0'], False)
test_blocks = select_tests(test_blocks, ['QED=TRUE'], False)
test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False)
if ci_regular_cartesian_3d:
test_blocks = select_tests(test_blocks, ['dim = 2'], False)
test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False)
test_blocks = select_tests(test_blocks, ['useMPI = 0'], False)
test_blocks = select_tests(test_blocks, ['QED=TRUE'], False)
test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False)
if ci_python_main:
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], True)
if ci_single_precision:
test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], True)
if ci_rz_or_nompi:
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], False)
block1 = select_tests(test_blocks, ['USE_RZ=TRUE'], True)
block2 = select_tests(test_blocks, ['useMPI = 0'], True)
test_blocks = block1 + block2
if ci_qed:
test_blocks = select_tests(test_blocks, ['QED=TRUE'], True)
if ci_eb:
test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], True)
# - Add the selected test blocks to the text
text = text + '\n' + '\n'.join(test_blocks)
with open('travis-tests.ini', 'w') as f:
f.write(text)
|
# Copyright 2018-2019 <NAME>, <NAME>, <NAME>
# <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
# This script modifies `WarpX-test.ini` (which is used for nightly builds)
# and creates the file `travis-test.ini` (which is used for continous
# integration on TravisCI (https://travis-ci.org/)
# The subtests that are selected are controlled by WARPX_TEST_DIM
# The architecture (CPU/GPU) is selected by WARPX_TEST_ARCH
import re
import os
# Get relevant environment variables
arch = os.environ.get('WARPX_TEST_ARCH', 'CPU')
ci_regular_cartesian_2d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_2D') == 'TRUE'
ci_regular_cartesian_3d = os.environ.get('WARPX_CI_REGULAR_CARTESIAN_3D') == 'TRUE'
ci_psatd = os.environ.get('WARPX_CI_PSATD', 'TRUE') == 'TRUE'
ci_python_main = os.environ.get('WARPX_CI_PYTHON_MAIN') == 'TRUE'
ci_single_precision = os.environ.get('WARPX_CI_SINGLE_PRECISION') == 'TRUE'
ci_rz_or_nompi = os.environ.get('WARPX_CI_RZ_OR_NOMPI') == 'TRUE'
ci_qed = os.environ.get('WARPX_CI_QED') == 'TRUE'
ci_eb = os.environ.get('WARPX_CI_EB') == 'TRUE'
ci_openpmd = os.environ.get('WARPX_CI_OPENPMD') == 'TRUE'
ci_ccache = os.environ.get('WARPX_CI_CCACHE') == 'TRUE'
ci_num_make_jobs = os.environ.get('WARPX_CI_NUM_MAKE_JOBS', None)
# Find the directory in which the tests should be run
current_dir = os.getcwd()
test_dir = re.sub('warpx/Regression', '', current_dir )
with open('WarpX-tests.ini') as f:
text = f.read()
# Replace default folder name
text = re.sub('/home/regtester/AMReX_RegTesting', test_dir, text)
# Remove the web directory
text = re.sub('[\w\-\/]*/web', '', text)
# Add doComparison = 0 for each test
text = re.sub( '\[(?P<name>.*)\]\nbuildDir = ',
'[\g<name>]\ndoComparison = 0\nbuildDir = ', text )
# Change compile options when running on GPU
if arch == 'GPU':
text = re.sub( 'addToCompileString =',
'addToCompileString = USE_GPU=TRUE USE_OMP=FALSE USE_ACC=TRUE ', text)
text = re.sub( 'COMP\s*=.*', 'COMP = pgi', text )
print('Compiling for %s' %arch)
# Extra dependencies
if ci_openpmd:
text = re.sub('addToCompileString =',
'addToCompileString = USE_OPENPMD=TRUE ', text)
# always build with PSATD support (runtime controlled if used)
if ci_psatd:
text = re.sub('addToCompileString =',
'addToCompileString = USE_PSATD=TRUE ', text)
text = re.sub('USE_PSATD=FALSE',
'', text)
# Ccache
if ci_ccache:
text = re.sub('addToCompileString =',
'addToCompileString = USE_CCACHE=TRUE ', text)
# Add runtime option: crash for unused variables
text = re.sub('runtime_params =',
'runtime_params = amrex.abort_on_unused_inputs=1 ',
text)
# Use less/more cores for compiling, e.g. public CI only provides 2 cores
if ci_num_make_jobs is not None:
text = re.sub( 'numMakeJobs = \d+', 'numMakeJobs = {}'.format(ci_num_make_jobs), text )
# Use only 1 OMP thread for running
text = re.sub( 'numthreads = \d+', 'numthreads = 1', text)
# Prevent emails from being sent
text = re.sub( 'sendEmailWhenFail = 1', 'sendEmailWhenFail = 0', text )
# Remove Python test (does not compile)
text = re.sub( '\[Python_Langmuir\]\n(.+\n)*', '', text)
# Remove Langmuir_x/y/z test (too long; not that useful)
text = re.sub( '\[Langmuir_[xyz]\]\n(.+\n)*', '', text)
# Select the tests to be run
# --------------------------
# - Extract test blocks (they are identified by the fact that they contain "inputFile")
select_test_regex = r'(\[(.+\n)*inputFile(.+\n)*)'
test_blocks = [ match[0] for match in re.findall(select_test_regex, text) ]
# - Remove the test blocks from `text` (only the selected ones will be added back)
text = re.sub( select_test_regex, '', text )
def select_tests(blocks, match_string_list, do_test):
"""Remove or keep tests from list in WarpX-tests.ini according to do_test variable"""
if do_test not in [True, False]:
raise ValueError("do_test must be True or False")
if (do_test == False):
for match_string in match_string_list:
print('Selecting tests without ' + match_string)
blocks = [ block for block in blocks if not match_string in block ]
else:
for match_string in match_string_list:
print('Selecting tests with ' + match_string)
blocks = [ block for block in blocks if match_string in block ]
return blocks
if ci_regular_cartesian_2d:
test_blocks = select_tests(test_blocks, ['dim = 2'], True)
test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False)
test_blocks = select_tests(test_blocks, ['useMPI = 0'], False)
test_blocks = select_tests(test_blocks, ['QED=TRUE'], False)
test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False)
if ci_regular_cartesian_3d:
test_blocks = select_tests(test_blocks, ['dim = 2'], False)
test_blocks = select_tests(test_blocks, ['USE_RZ=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], False)
test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], False)
test_blocks = select_tests(test_blocks, ['useMPI = 0'], False)
test_blocks = select_tests(test_blocks, ['QED=TRUE'], False)
test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], False)
if ci_python_main:
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], True)
if ci_single_precision:
test_blocks = select_tests(test_blocks, ['PRECISION=FLOAT', 'USE_SINGLE_PRECISION_PARTICLES=TRUE'], True)
if ci_rz_or_nompi:
test_blocks = select_tests(test_blocks, ['PYTHON_MAIN=TRUE'], False)
block1 = select_tests(test_blocks, ['USE_RZ=TRUE'], True)
block2 = select_tests(test_blocks, ['useMPI = 0'], True)
test_blocks = block1 + block2
if ci_qed:
test_blocks = select_tests(test_blocks, ['QED=TRUE'], True)
if ci_eb:
test_blocks = select_tests(test_blocks, ['USE_EB=TRUE'], True)
# - Add the selected test blocks to the text
text = text + '\n' + '\n'.join(test_blocks)
with open('travis-tests.ini', 'w') as f:
f.write(text)
|
en
| 0.8177
|
# Copyright 2018-2019 <NAME>, <NAME>, <NAME> # <NAME> # # This file is part of WarpX. # # License: BSD-3-Clause-LBNL # This script modifies `WarpX-test.ini` (which is used for nightly builds) # and creates the file `travis-test.ini` (which is used for continous # integration on TravisCI (https://travis-ci.org/) # The subtests that are selected are controlled by WARPX_TEST_DIM # The architecture (CPU/GPU) is selected by WARPX_TEST_ARCH # Get relevant environment variables # Find the directory in which the tests should be run # Replace default folder name # Remove the web directory # Add doComparison = 0 for each test # Change compile options when running on GPU # Extra dependencies # always build with PSATD support (runtime controlled if used) # Ccache # Add runtime option: crash for unused variables # Use less/more cores for compiling, e.g. public CI only provides 2 cores # Use only 1 OMP thread for running # Prevent emails from being sent # Remove Python test (does not compile) # Remove Langmuir_x/y/z test (too long; not that useful) # Select the tests to be run # -------------------------- # - Extract test blocks (they are identified by the fact that they contain "inputFile") # - Remove the test blocks from `text` (only the selected ones will be added back) Remove or keep tests from list in WarpX-tests.ini according to do_test variable # - Add the selected test blocks to the text
| 1.765017
| 2
|
ex/ex100.py
|
Ozcry/PythonExercicio
| 0
|
6626575
|
'''Faça um programa que tenha uma lista chamada números e duas funções chamadas sorteia() e somaPar(). A primeira função
vai sortear 5 números e vai colocá-los dentro da lista e a segunda função vai mostrar a soma entre todos os valores
PARES sorteados pela função anterior.'''
from random import randint
from time import sleep
def sorteia(num):
print('\033[1;33m-=\033[m' * 20)
print('\033[34mSorteando 5 valores da lista: \033[m', end='')
for c in range(0, 5):
b = int(randint(0, 9))
num.append(b)
print(b, end=' ')
sleep(0.5)
print('\033[1;32mPRONTO!\033[m')
def somapar(num):
soma = 0
for c in num:
if c % 2 == 0:
soma += c
print(f'\033[35mSomando os valores pares de\033[m {num}\033[35m, temos \033[m{soma}')
print('\033[1;33m-=\033[m' * 20)
print('\033[1;32mFIM\033[m')
lst = []
sorteia(lst)
somapar(lst)
|
'''Faça um programa que tenha uma lista chamada números e duas funções chamadas sorteia() e somaPar(). A primeira função
vai sortear 5 números e vai colocá-los dentro da lista e a segunda função vai mostrar a soma entre todos os valores
PARES sorteados pela função anterior.'''
from random import randint
from time import sleep
def sorteia(num):
print('\033[1;33m-=\033[m' * 20)
print('\033[34mSorteando 5 valores da lista: \033[m', end='')
for c in range(0, 5):
b = int(randint(0, 9))
num.append(b)
print(b, end=' ')
sleep(0.5)
print('\033[1;32mPRONTO!\033[m')
def somapar(num):
soma = 0
for c in num:
if c % 2 == 0:
soma += c
print(f'\033[35mSomando os valores pares de\033[m {num}\033[35m, temos \033[m{soma}')
print('\033[1;33m-=\033[m' * 20)
print('\033[1;32mFIM\033[m')
lst = []
sorteia(lst)
somapar(lst)
|
pt
| 0.992826
|
Faça um programa que tenha uma lista chamada números e duas funções chamadas sorteia() e somaPar(). A primeira função vai sortear 5 números e vai colocá-los dentro da lista e a segunda função vai mostrar a soma entre todos os valores PARES sorteados pela função anterior.
| 4.143519
| 4
|
source/tool/tuner.py
|
douglasresende/lambda-deep-learning-demo
| 80
|
6626576
|
import os
import random
import importlib
from source.tool import config_parser
CONVERT_STR2NUM = ["piecewise_lr_decay", "piecewise_boundaries"]
def type_convert(v):
""" convert value to int, float or str"""
try:
float(v)
tp = 1 if v.count(".") == 0 else 2
except ValueError:
tp = -1
if tp == 1:
return int(v)
elif tp == 2:
return float(v)
elif tp == -1:
return v
else:
assert False, "Unknown type for hyper parameter: {}".format(tp)
def excute(app_config, runner_config, callback_config,
inputter_config, modeler_config,
inputter_module, modeler_module,
runner_module,
callback_names):
augmenter = (None if not inputter_config.augmenter else
importlib.import_module(
"source.augmenter." + inputter_config.augmenter))
encoder = (None if not hasattr(inputter_config, 'encode_method') else
importlib.import_module(
"source.network.encoder." + inputter_config.encode_method))
net = importlib.import_module("source.network." + modeler_config.network)
callbacks = []
for name in callback_names:
callback = importlib.import_module(
"source.callback." + name).build(
callback_config)
callbacks.append(callback)
if encoder:
inputter = inputter_module.build(
inputter_config, augmenter, encoder)
else:
inputter = inputter_module.build(
inputter_config, augmenter)
modeler = modeler_module.build(
modeler_config, net)
runner = runner_module.build(
runner_config, inputter, modeler, callbacks)
# Run application
runner.run()
def train(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module):
runner_config.reduce_ops = runner_config.train_reduce_ops
runner_config.mode = "train"
callback_config.mode = "train"
inputter_config.mode = "train"
modeler_config.mode = "train"
inputter_config.dataset_meta = inputter_config.train_dataset_meta
excute(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module,
callback_config.train_callbacks)
def eval(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module):
runner_config.reduce_ops = runner_config.eval_reduce_ops
runner_config.mode = "eval"
callback_config.mode = "eval"
inputter_config.mode = "eval"
modeler_config.mode = "eval"
inputter_config.epochs = 1
# Optional: use a different split for evaluation
# Should not use testing dataset
# inputter_config.dataset_meta = \
# os.path.expanduser(config.eval_dataset_meta)
inputter_config.dataset_meta = inputter_config.eval_dataset_meta
excute(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module,
callback_config.eval_callbacks)
def update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, value):
configs = [app_config, runner_config, callback_config, inputter_config, modeler_config]
for config in configs:
if hasattr(config, field):
setattr(config, field, value)
return configs
def tune(app_config, runner_config, callback_config,
inputter_config, modeler_config,
inputter_module, modeler_module,
runner_module):
# Parse config file
tune_config = config_parser.yaml_parse(modeler_config.tune_config_path)
# Setup the tuning jobs
num_trials = tune_config["num_trials"]
dir_ori = os.path.join(callback_config.model_dir, "tune", "trial")
t = 0
while t < num_trials:
dir_update = dir_ori
# Update fixed params (epochs needs to be reset)
for field in tune_config["fixedparams"].keys():
value = tune_config["fixedparams"][field]
if field in CONVERT_STR2NUM:
value = list(map(float, tune_config["fixedparams"][field].split(",")))
app_config, runner_config, callback_config, inputter_config, modeler_config = \
update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, value)
# Update hyper parameter
for sample_type in tune_config["hyperparams"].keys():
for field in tune_config["hyperparams"][sample_type].keys():
if sample_type == "generate":
values = list(
map(float,
tune_config["hyperparams"][sample_type][field].split(",")))
v = 10 ** random.uniform(values[0], values[1])
app_config, runner_config, callback_config, inputter_config, modeler_config = \
update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, v)
dir_update = dir_update + "_" + field + "_" + "{0:.5f}".format(v)
elif sample_type == "select":
values = tune_config["hyperparams"][sample_type][field].split(",")
v = type_convert(random.choice(values))
app_config, runner_config, callback_config, inputter_config, modeler_config = \
update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, v)
dir_update = dir_update + "_" + field + "_" + str(v)
if not os.path.isdir(dir_update):
callback_config.model_dir = dir_update
train(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module)
eval(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module)
t = t + 1
|
import os
import random
import importlib
from source.tool import config_parser
CONVERT_STR2NUM = ["piecewise_lr_decay", "piecewise_boundaries"]
def type_convert(v):
""" convert value to int, float or str"""
try:
float(v)
tp = 1 if v.count(".") == 0 else 2
except ValueError:
tp = -1
if tp == 1:
return int(v)
elif tp == 2:
return float(v)
elif tp == -1:
return v
else:
assert False, "Unknown type for hyper parameter: {}".format(tp)
def excute(app_config, runner_config, callback_config,
inputter_config, modeler_config,
inputter_module, modeler_module,
runner_module,
callback_names):
augmenter = (None if not inputter_config.augmenter else
importlib.import_module(
"source.augmenter." + inputter_config.augmenter))
encoder = (None if not hasattr(inputter_config, 'encode_method') else
importlib.import_module(
"source.network.encoder." + inputter_config.encode_method))
net = importlib.import_module("source.network." + modeler_config.network)
callbacks = []
for name in callback_names:
callback = importlib.import_module(
"source.callback." + name).build(
callback_config)
callbacks.append(callback)
if encoder:
inputter = inputter_module.build(
inputter_config, augmenter, encoder)
else:
inputter = inputter_module.build(
inputter_config, augmenter)
modeler = modeler_module.build(
modeler_config, net)
runner = runner_module.build(
runner_config, inputter, modeler, callbacks)
# Run application
runner.run()
def train(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module):
runner_config.reduce_ops = runner_config.train_reduce_ops
runner_config.mode = "train"
callback_config.mode = "train"
inputter_config.mode = "train"
modeler_config.mode = "train"
inputter_config.dataset_meta = inputter_config.train_dataset_meta
excute(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module,
callback_config.train_callbacks)
def eval(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module):
runner_config.reduce_ops = runner_config.eval_reduce_ops
runner_config.mode = "eval"
callback_config.mode = "eval"
inputter_config.mode = "eval"
modeler_config.mode = "eval"
inputter_config.epochs = 1
# Optional: use a different split for evaluation
# Should not use testing dataset
# inputter_config.dataset_meta = \
# os.path.expanduser(config.eval_dataset_meta)
inputter_config.dataset_meta = inputter_config.eval_dataset_meta
excute(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module,
callback_config.eval_callbacks)
def update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, value):
configs = [app_config, runner_config, callback_config, inputter_config, modeler_config]
for config in configs:
if hasattr(config, field):
setattr(config, field, value)
return configs
def tune(app_config, runner_config, callback_config,
inputter_config, modeler_config,
inputter_module, modeler_module,
runner_module):
# Parse config file
tune_config = config_parser.yaml_parse(modeler_config.tune_config_path)
# Setup the tuning jobs
num_trials = tune_config["num_trials"]
dir_ori = os.path.join(callback_config.model_dir, "tune", "trial")
t = 0
while t < num_trials:
dir_update = dir_ori
# Update fixed params (epochs needs to be reset)
for field in tune_config["fixedparams"].keys():
value = tune_config["fixedparams"][field]
if field in CONVERT_STR2NUM:
value = list(map(float, tune_config["fixedparams"][field].split(",")))
app_config, runner_config, callback_config, inputter_config, modeler_config = \
update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, value)
# Update hyper parameter
for sample_type in tune_config["hyperparams"].keys():
for field in tune_config["hyperparams"][sample_type].keys():
if sample_type == "generate":
values = list(
map(float,
tune_config["hyperparams"][sample_type][field].split(",")))
v = 10 ** random.uniform(values[0], values[1])
app_config, runner_config, callback_config, inputter_config, modeler_config = \
update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, v)
dir_update = dir_update + "_" + field + "_" + "{0:.5f}".format(v)
elif sample_type == "select":
values = tune_config["hyperparams"][sample_type][field].split(",")
v = type_convert(random.choice(values))
app_config, runner_config, callback_config, inputter_config, modeler_config = \
update(app_config, runner_config, callback_config, inputter_config, modeler_config, field, v)
dir_update = dir_update + "_" + field + "_" + str(v)
if not os.path.isdir(dir_update):
callback_config.model_dir = dir_update
train(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module)
eval(app_config,
runner_config,
callback_config,
inputter_config,
modeler_config,
inputter_module,
modeler_module,
runner_module)
t = t + 1
|
en
| 0.348751
|
convert value to int, float or str # Run application # Optional: use a different split for evaluation # Should not use testing dataset # inputter_config.dataset_meta = \ # os.path.expanduser(config.eval_dataset_meta) # Parse config file # Setup the tuning jobs # Update fixed params (epochs needs to be reset) # Update hyper parameter
| 2.727533
| 3
|
rl-multilayer/setup.py
|
hammer-wang/oml-ppo
| 8
|
6626577
|
<gh_stars>1-10
from setuptools import setup
setup(name='RLMultilayer',
version='0.0.1',
install_requires=['gym', 'tmm'] # And any other dependencies
)
|
from setuptools import setup
setup(name='RLMultilayer',
version='0.0.1',
install_requires=['gym', 'tmm'] # And any other dependencies
)
|
en
| 0.390392
|
# And any other dependencies
| 1.133809
| 1
|
mqtt/mqtt.py
|
robinvanemden/sensors
| 3
|
6626578
|
<reponame>robinvanemden/sensors
"""
JADS 2020 Data-Driven Food Value Chain course
Introduction to Sensors
Minimal MQTT client demo - demonstrates ease of use.
Makes use of the open http://www.mqtt-dashboard.com/index.html
And yes, globals are evil ;)
"""
import threading
import paho.mqtt.client as mqtt
import os
def on_connect(client, userdata, flags, rc):
global topic
global publisher_name
print("Welcome " + publisher_name + ", you're connected to " + topic + "\n")
print("Type 'q' to exit the chat.\n")
client.subscribe(topic)
def on_message(client, userdata, msg):
global publisher_name
incoming_message = msg.payload.decode()
splitted_msg = [x.strip() for x in incoming_message.split(',', 1)]
sender_name = splitted_msg[0]
if sender_name != publisher_name:
print(sender_name + ":" + splitted_msg[1])
def publish():
global publisher_name
global topic
new_msg = input()
if new_msg == "quit" or new_msg == "q" or new_msg == "exit" or new_msg == "quit()":
os._exit(1)
client.publish(topic, publisher_name + "," + new_msg)
return publish()
def receive():
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
def config():
global publisher_name
global topic
while True:
publisher_name = input("Enter your username: ")
if publisher_name.isalpha():
break
print("Please enter characters A-Z only")
return "Loading chat (" + topic + ")..."
topic = "jads/intro-to-sensors"
print(config())
client = mqtt.Client()
client.connect("broker.hivemq.com", 1883, 60)
publish_thread = threading.Thread(target=publish)
receive_thread = threading.Thread(target=receive)
publish_thread.start()
receive_thread.start()
|
"""
JADS 2020 Data-Driven Food Value Chain course
Introduction to Sensors
Minimal MQTT client demo - demonstrates ease of use.
Makes use of the open http://www.mqtt-dashboard.com/index.html
And yes, globals are evil ;)
"""
import threading
import paho.mqtt.client as mqtt
import os
def on_connect(client, userdata, flags, rc):
global topic
global publisher_name
print("Welcome " + publisher_name + ", you're connected to " + topic + "\n")
print("Type 'q' to exit the chat.\n")
client.subscribe(topic)
def on_message(client, userdata, msg):
global publisher_name
incoming_message = msg.payload.decode()
splitted_msg = [x.strip() for x in incoming_message.split(',', 1)]
sender_name = splitted_msg[0]
if sender_name != publisher_name:
print(sender_name + ":" + splitted_msg[1])
def publish():
global publisher_name
global topic
new_msg = input()
if new_msg == "quit" or new_msg == "q" or new_msg == "exit" or new_msg == "quit()":
os._exit(1)
client.publish(topic, publisher_name + "," + new_msg)
return publish()
def receive():
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
def config():
global publisher_name
global topic
while True:
publisher_name = input("Enter your username: ")
if publisher_name.isalpha():
break
print("Please enter characters A-Z only")
return "Loading chat (" + topic + ")..."
topic = "jads/intro-to-sensors"
print(config())
client = mqtt.Client()
client.connect("broker.hivemq.com", 1883, 60)
publish_thread = threading.Thread(target=publish)
receive_thread = threading.Thread(target=receive)
publish_thread.start()
receive_thread.start()
|
en
| 0.610516
|
JADS 2020 Data-Driven Food Value Chain course Introduction to Sensors Minimal MQTT client demo - demonstrates ease of use. Makes use of the open http://www.mqtt-dashboard.com/index.html And yes, globals are evil ;)
| 2.960768
| 3
|
components/fuselage/primitives/fcone.py
|
skilkis/KBE
| 6
|
6626579
|
<filename>components/fuselage/primitives/fcone.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Required ParaPy Modules
from parapy.geom import *
from parapy.core import *
# Required Modules
from fframe import *
from mframe import *
from directories import *
from math import sqrt
__author__ = ["<NAME>"]
__all__ = ["FCone"]
class FCone(GeomBase):
""" This creates the nose or tailcone for the fuselage.
:returns: Nose and/or tailcone for fuselage
:param support_frame: This is an FFrame Class
:type support_frame: FFrame
"""
__initargs__ = ["support_frame", "side_tangent", "top_tangent", "direction", "tip_point_z"]
__icon__ = os.path.join(DIRS['ICON_DIR'], 'cone.png')
# A parameter for debugging, turns the visibility of miscellaneous parts ON/OFF
__show_primitives = False # type: bool
support_frame = Input(FFrame(width=1.0, height=0.5), validator=val.IsInstance((FFrame, MFrame))) #
side_tangent = Input(Vector(-0.88, -0.65, 0), validator=val.IsInstance(Vector))
top_tangent = Input(Vector(0.8851351164623547, 0, 0.46533410105554684), validator=val.IsInstance(Vector))
direction = Input('x_', validator=val.OneOf(["x", "x_"]))
slenderness_ratio = Input(1, validator=val.Range(0, 1.5)) # Nose-cone length / frame diagonal
transparency = Input(None)
@Attribute
def length(self):
diagonal = sqrt((self.support_frame.height ** 2) + (self.support_frame.width ** 2))
return self.slenderness_ratio * diagonal
@Attribute
def build_direction(self):
value = (-1 if self.direction == 'x_' else 1)
return value
@Attribute
def side_tangent_reflected(self):
x = self.side_tangent.x
y = self.side_tangent.y
z = self.side_tangent.z
return Vector(x, -y, z)
@Attribute
def tip_point(self):
support_position = self.support_frame.position
support_mid_point = self.support_frame.spline_points[1]
# delta_z = self.build_direction * (self.side_tangent.z / self.side_tangent.x) * self.length
return Point(support_position.x + (self.build_direction * self.length),
support_position.y, support_mid_point.z)
@Attribute
def guides(self):
start_frame = self.support_frame
points = start_frame.spline_points
frame_curve = self.support_frame.curve
frame_curve_split = SplitCurve(curve_in=frame_curve, tool=points[1]).edges
v_curve = InterpolatedCurve(points=[points[0], self.tip_point, points[2]],
tangents=[Vector(self.build_direction, 0, 0), # Bottom forced Horizontal
Vector(0, 0, 1), # Mid-Point Vector (Forced z+ from sign convention)
self.top_tangent])
v_curve_split = SplitCurve(curve_in=v_curve, tool=self.tip_point).edges
h_curve = InterpolatedCurve(points=[points[1], self.tip_point, points[3]],
tangents=[self.side_tangent,
Vector(0, -1, 0), # Mid-Point Vector (Forced y-)
self.side_tangent_reflected])
h_curve_split = SplitCurve(curve_in=h_curve, tool=self.tip_point).edges
return {'f_curve': frame_curve_split, 'v_curve': v_curve_split, 'h_curve': h_curve_split}
# --- Output Surface: ---------------------------------------------------------------------------------------------
@Part
def cone(self):
return SewnShell([self.cone_right, self.cone_left], transparency=self.transparency)
# --- Primitives: -------------------------------------------------------------------------------------------------
@Part(in_tree=__show_primitives)
def filled_top(self):
return FilledSurface(curves=[self.guides['f_curve'][1], self.guides['v_curve'][1].reversed,
self.guides['h_curve'][0].reversed])
@Part(in_tree=__show_primitives)
def filled_bot(self):
return FilledSurface(curves=[self.guides['h_curve'][0], self.guides['v_curve'][0].reversed,
self.guides['f_curve'][0]])
@Part(in_tree=__show_primitives)
def cone_right(self):
return SewnShell([self.filled_top, self.filled_bot])
@Part(in_tree=__show_primitives)
def cone_left(self):
return MirroredShape(shape_in=self.cone_right,
reference_point=self.position,
vector1=self.position.Vx_,
vector2=self.position.Vz)
if __name__ == '__main__':
from parapy.gui import display
obj = FCone()
display(obj)
|
<filename>components/fuselage/primitives/fcone.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Required ParaPy Modules
from parapy.geom import *
from parapy.core import *
# Required Modules
from fframe import *
from mframe import *
from directories import *
from math import sqrt
__author__ = ["<NAME>"]
__all__ = ["FCone"]
class FCone(GeomBase):
""" This creates the nose or tailcone for the fuselage.
:returns: Nose and/or tailcone for fuselage
:param support_frame: This is an FFrame Class
:type support_frame: FFrame
"""
__initargs__ = ["support_frame", "side_tangent", "top_tangent", "direction", "tip_point_z"]
__icon__ = os.path.join(DIRS['ICON_DIR'], 'cone.png')
# A parameter for debugging, turns the visibility of miscellaneous parts ON/OFF
__show_primitives = False # type: bool
support_frame = Input(FFrame(width=1.0, height=0.5), validator=val.IsInstance((FFrame, MFrame))) #
side_tangent = Input(Vector(-0.88, -0.65, 0), validator=val.IsInstance(Vector))
top_tangent = Input(Vector(0.8851351164623547, 0, 0.46533410105554684), validator=val.IsInstance(Vector))
direction = Input('x_', validator=val.OneOf(["x", "x_"]))
slenderness_ratio = Input(1, validator=val.Range(0, 1.5)) # Nose-cone length / frame diagonal
transparency = Input(None)
@Attribute
def length(self):
diagonal = sqrt((self.support_frame.height ** 2) + (self.support_frame.width ** 2))
return self.slenderness_ratio * diagonal
@Attribute
def build_direction(self):
value = (-1 if self.direction == 'x_' else 1)
return value
@Attribute
def side_tangent_reflected(self):
x = self.side_tangent.x
y = self.side_tangent.y
z = self.side_tangent.z
return Vector(x, -y, z)
@Attribute
def tip_point(self):
support_position = self.support_frame.position
support_mid_point = self.support_frame.spline_points[1]
# delta_z = self.build_direction * (self.side_tangent.z / self.side_tangent.x) * self.length
return Point(support_position.x + (self.build_direction * self.length),
support_position.y, support_mid_point.z)
@Attribute
def guides(self):
start_frame = self.support_frame
points = start_frame.spline_points
frame_curve = self.support_frame.curve
frame_curve_split = SplitCurve(curve_in=frame_curve, tool=points[1]).edges
v_curve = InterpolatedCurve(points=[points[0], self.tip_point, points[2]],
tangents=[Vector(self.build_direction, 0, 0), # Bottom forced Horizontal
Vector(0, 0, 1), # Mid-Point Vector (Forced z+ from sign convention)
self.top_tangent])
v_curve_split = SplitCurve(curve_in=v_curve, tool=self.tip_point).edges
h_curve = InterpolatedCurve(points=[points[1], self.tip_point, points[3]],
tangents=[self.side_tangent,
Vector(0, -1, 0), # Mid-Point Vector (Forced y-)
self.side_tangent_reflected])
h_curve_split = SplitCurve(curve_in=h_curve, tool=self.tip_point).edges
return {'f_curve': frame_curve_split, 'v_curve': v_curve_split, 'h_curve': h_curve_split}
# --- Output Surface: ---------------------------------------------------------------------------------------------
@Part
def cone(self):
return SewnShell([self.cone_right, self.cone_left], transparency=self.transparency)
# --- Primitives: -------------------------------------------------------------------------------------------------
@Part(in_tree=__show_primitives)
def filled_top(self):
return FilledSurface(curves=[self.guides['f_curve'][1], self.guides['v_curve'][1].reversed,
self.guides['h_curve'][0].reversed])
@Part(in_tree=__show_primitives)
def filled_bot(self):
return FilledSurface(curves=[self.guides['h_curve'][0], self.guides['v_curve'][0].reversed,
self.guides['f_curve'][0]])
@Part(in_tree=__show_primitives)
def cone_right(self):
return SewnShell([self.filled_top, self.filled_bot])
@Part(in_tree=__show_primitives)
def cone_left(self):
return MirroredShape(shape_in=self.cone_right,
reference_point=self.position,
vector1=self.position.Vx_,
vector2=self.position.Vz)
if __name__ == '__main__':
from parapy.gui import display
obj = FCone()
display(obj)
|
en
| 0.517934
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # Required ParaPy Modules # Required Modules This creates the nose or tailcone for the fuselage. :returns: Nose and/or tailcone for fuselage :param support_frame: This is an FFrame Class :type support_frame: FFrame # A parameter for debugging, turns the visibility of miscellaneous parts ON/OFF # type: bool # # Nose-cone length / frame diagonal # delta_z = self.build_direction * (self.side_tangent.z / self.side_tangent.x) * self.length # Bottom forced Horizontal # Mid-Point Vector (Forced z+ from sign convention) # Mid-Point Vector (Forced y-) # --- Output Surface: --------------------------------------------------------------------------------------------- # --- Primitives: -------------------------------------------------------------------------------------------------
| 2.321459
| 2
|
post/admin.py
|
abdukhashimov/django-rest-blog-2
| 0
|
6626580
|
<reponame>abdukhashimov/django-rest-blog-2
from django.contrib import admin
from post.models import Post, Category, Tag
class PostAdmin(admin.ModelAdmin):
fields = ('title', 'thumbnail', 'content', 'tag', 'category')
def save_model(self, request, obj, form, change):
obj.author = request.user
return super().save_model(request, obj, form, change)
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
from django.contrib import admin
from post.models import Post, Category, Tag
class PostAdmin(admin.ModelAdmin):
fields = ('title', 'thumbnail', 'content', 'tag', 'category')
def save_model(self, request, obj, form, change):
obj.author = request.user
return super().save_model(request, obj, form, change)
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
none
| 1
| 2.114059
| 2
|
|
eSports/users/views.py
|
bdg-python-team1/Tournament_Project
| 0
|
6626581
|
from django.shortcuts import render, redirect
from .models import Profile
from .forms import UserRegisterForm, ProfileUpdateForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.views import View
from django.utils.decorators import method_decorator
from django.contrib.auth.models import User
# def register(request):
# registered = False
# if request.method == 'POST':
# user_form = UserRegisterForm(data=request.POST)
# profile_form = ProfileUpdateForm(data=request.POST)
#
# if user_form.is_valid() and profile_form.is_valid():
# user = user_form.save()
# user.set_password(<PASSWORD>)
# user.save()
# profile = profile_form.save(commit=False)
# profile.user = user
# if 'picture' in request.FILES:
# profile.picture = request.FILES['picture']
# profile.save()
# registered = True
# else:
# print(user_form.errors, profile_form.errors)
#
# else:
# user_form = UserRegisterForm()
# profile_form = ProfileUpdateForm()
# return render(request, 'users/register.html',
# {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
#
#
# def user_login(request):
#
# if request.method == 'POST':
# username = request.POST.get('username')
# password = request.POST.get('password')
# user = authenticate(username=username, password=password)
# if user:
# if user.is_active:
# login(request, user)
# return redirect(reverse('home'))
# else:
# return HttpResponse("Your eSports account is disabled.")
# else:
# print("Invalid login details: {0}, {1}".format(username, password))
# return HttpResponse("Invalid login details supplied.")
# else:
# return render(request, 'registration/login.html', {})
#
# @login_required
# def user_logout(request):
# logout(request)
# return redirect(reverse('auth_login'))
@login_required
def register_profile(request):
form = ProfileUpdateForm()
if request.method == 'POST':
form = ProfileUpdateForm(request.POST, request.FILES)
if form.is_valid():
user_profile = form.save(commit=False)
user_profile.user = request.user
user_profile.save()
return redirect('home')
else:
print(form.errors)
context_dict = {'form': form}
return render(request, 'contest/profile_registration.html', context_dict)
class ProfileView(View):
def get_user_details(self, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
userprofile = Profile.objects.get_or_create(user=user)[0]
form = ProfileUpdateForm()
return user, userprofile, form
@method_decorator(login_required)
def get(self, request, username):
try:
(user, userprofile, form) = self.get_user_details(username)
except TypeError:
return redirect('home')
context_dict = {'userprofile': userprofile,
'selecteduser': user,
'form': form}
return render(request, 'contest/profile.html', context_dict)
@method_decorator(login_required)
def post(self, request, username):
try:
(user, userprofile, form) = self.get_user_details(username)
except TypeError:
return redirect('home')
form = ProfileUpdateForm(request.POST, request.FILES, instance=userprofile)
if form.is_valid():
form.save(commit=True)
return redirect('profile', user.username)
else:
print(form.errors)
context_dict = {'userprofile': userprofile,
'selecteduser': user,
'form': form}
return render(request, 'contest/profile.html', context_dict)
class ListProfilesView(View):
@method_decorator(login_required)
def get(self, request):
profiles = Profile.objects.all()
return render(request,
'contest/list_profiles.html', {'userprofile_list': profiles})
|
from django.shortcuts import render, redirect
from .models import Profile
from .forms import UserRegisterForm, ProfileUpdateForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.views import View
from django.utils.decorators import method_decorator
from django.contrib.auth.models import User
# def register(request):
# registered = False
# if request.method == 'POST':
# user_form = UserRegisterForm(data=request.POST)
# profile_form = ProfileUpdateForm(data=request.POST)
#
# if user_form.is_valid() and profile_form.is_valid():
# user = user_form.save()
# user.set_password(<PASSWORD>)
# user.save()
# profile = profile_form.save(commit=False)
# profile.user = user
# if 'picture' in request.FILES:
# profile.picture = request.FILES['picture']
# profile.save()
# registered = True
# else:
# print(user_form.errors, profile_form.errors)
#
# else:
# user_form = UserRegisterForm()
# profile_form = ProfileUpdateForm()
# return render(request, 'users/register.html',
# {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
#
#
# def user_login(request):
#
# if request.method == 'POST':
# username = request.POST.get('username')
# password = request.POST.get('password')
# user = authenticate(username=username, password=password)
# if user:
# if user.is_active:
# login(request, user)
# return redirect(reverse('home'))
# else:
# return HttpResponse("Your eSports account is disabled.")
# else:
# print("Invalid login details: {0}, {1}".format(username, password))
# return HttpResponse("Invalid login details supplied.")
# else:
# return render(request, 'registration/login.html', {})
#
# @login_required
# def user_logout(request):
# logout(request)
# return redirect(reverse('auth_login'))
@login_required
def register_profile(request):
form = ProfileUpdateForm()
if request.method == 'POST':
form = ProfileUpdateForm(request.POST, request.FILES)
if form.is_valid():
user_profile = form.save(commit=False)
user_profile.user = request.user
user_profile.save()
return redirect('home')
else:
print(form.errors)
context_dict = {'form': form}
return render(request, 'contest/profile_registration.html', context_dict)
class ProfileView(View):
def get_user_details(self, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
userprofile = Profile.objects.get_or_create(user=user)[0]
form = ProfileUpdateForm()
return user, userprofile, form
@method_decorator(login_required)
def get(self, request, username):
try:
(user, userprofile, form) = self.get_user_details(username)
except TypeError:
return redirect('home')
context_dict = {'userprofile': userprofile,
'selecteduser': user,
'form': form}
return render(request, 'contest/profile.html', context_dict)
@method_decorator(login_required)
def post(self, request, username):
try:
(user, userprofile, form) = self.get_user_details(username)
except TypeError:
return redirect('home')
form = ProfileUpdateForm(request.POST, request.FILES, instance=userprofile)
if form.is_valid():
form.save(commit=True)
return redirect('profile', user.username)
else:
print(form.errors)
context_dict = {'userprofile': userprofile,
'selecteduser': user,
'form': form}
return render(request, 'contest/profile.html', context_dict)
class ListProfilesView(View):
@method_decorator(login_required)
def get(self, request):
profiles = Profile.objects.all()
return render(request,
'contest/list_profiles.html', {'userprofile_list': profiles})
|
en
| 0.42662
|
# def register(request): # registered = False # if request.method == 'POST': # user_form = UserRegisterForm(data=request.POST) # profile_form = ProfileUpdateForm(data=request.POST) # # if user_form.is_valid() and profile_form.is_valid(): # user = user_form.save() # user.set_password(<PASSWORD>) # user.save() # profile = profile_form.save(commit=False) # profile.user = user # if 'picture' in request.FILES: # profile.picture = request.FILES['picture'] # profile.save() # registered = True # else: # print(user_form.errors, profile_form.errors) # # else: # user_form = UserRegisterForm() # profile_form = ProfileUpdateForm() # return render(request, 'users/register.html', # {'user_form': user_form, 'profile_form': profile_form, 'registered': registered}) # # # def user_login(request): # # if request.method == 'POST': # username = request.POST.get('username') # password = request.POST.get('password') # user = authenticate(username=username, password=password) # if user: # if user.is_active: # login(request, user) # return redirect(reverse('home')) # else: # return HttpResponse("Your eSports account is disabled.") # else: # print("Invalid login details: {0}, {1}".format(username, password)) # return HttpResponse("Invalid login details supplied.") # else: # return render(request, 'registration/login.html', {}) # # @login_required # def user_logout(request): # logout(request) # return redirect(reverse('auth_login'))
| 2.16989
| 2
|
paxos/practical.py
|
hnfgns/paxos
| 1
|
6626582
|
<reponame>hnfgns/paxos
'''
This module builds upon the essential Paxos implementation and adds
functionality required for most practical uses of the algorithm.
'''
from paxos import essential
from paxos.essential import ProposalID
class Messenger (essential.Messenger):
def send_prepare_nack(self, to_uid, proposal_id, promised_id):
'''
Sends a Prepare Nack message for the proposal to the specified node
'''
def send_accept_nack(self, to_uid, proposal_id, promised_id):
'''
Sends a Accept! Nack message for the proposal to the specified node
'''
def on_leadership_acquired(self):
'''
Called when leadership has been aquired. This is not a guaranteed
position. Another node may assume leadership at any time and it's
even possible that another may have successfully done so before this
callback is exectued. Use this method with care.
The safe way to guarantee leadership is to use a full Paxos instance
whith the resolution value being the UID of the leader node. To avoid
potential issues arising from timing and/or failure, the election
result may be restricted to a certain time window. Prior to the end of
the window the leader may attempt to re-elect itself to extend it's
term in office.
'''
class Proposer (essential.Proposer):
'''
This class extends the functionality of the essential Proposer
implementation by tracking whether the proposer believes itself to
be the current leader of the Paxos instance. It also supports a flag
to disable active paritcipation in the Paxos instance.
The 'leader' attribute is a boolean value indicating the Proposer's
belief in whether or not it is the current leader. As the documentation
for the Messenger.on_leadership_acquired() method describes, multiple
nodes may simultaneously believe themselves to be the leader.
The 'active' attribute is a boolean value indicating whether or not
the Proposer should send outgoing messages (defaults to True). Setting
this attribute to false places the Proposer in a "passive" mode where
it processes all incoming messages but drops all messages it would
otherwise send.
'''
leader = False
active = True
def set_proposal(self, value):
'''
Sets the proposal value for this node iff this node is not already aware of
another proposal having already been accepted.
'''
if self.proposed_value is None:
self.proposed_value = value
if self.leader and self.active:
self.messenger.send_accept( self.proposal_id, value )
def prepare(self, increment_proposal_number=True):
'''
Sends a prepare request to all Acceptors as the first step in
attempting to acquire leadership of the Paxos instance. If the
'increment_proposal_number' argument is True (the default), the
proposal id will be set higher than that of any previous observed
proposal id. Otherwise the previously used proposal id will simply be
retransmitted.
'''
if increment_proposal_number:
self.leader = False
self.promises_rcvd = set()
self.proposal_id = (self.next_proposal_number, self.proposer_uid)
self.next_proposal_number += 1
if self.active:
self.messenger.send_prepare(self.proposal_id)
def observe_proposal(self, from_uid, proposal_id):
'''
Optional method used to update the proposal counter as proposals are
seen on the network. When co-located with Acceptors and/or Learners,
this method may be used to avoid a message delay when attempting to
assume leadership (guaranteed NACK if the proposal number is too low).
'''
if from_uid != self.proposer_uid:
if proposal_id >= (self.next_proposal_number, self.proposer_uid):
self.next_proposal_number = proposal_id.number + 1
def recv_prepare_nack(self, from_uid, proposal_id, promised_id):
'''
Called when an explicit NACK is sent in response to a prepare message.
'''
self.observe_proposal( from_uid, promised_id )
def recv_accept_nack(self, from_uid, proposal_id, promised_id):
'''
Called when an explicit NACK is sent in response to an accept message
'''
def resend_accept(self):
'''
Retransmits an Accept! message iff this node is the leader and has
a proposal value
'''
if self.leader and self.proposed_value and self.active:
self.messenger.send_accept(self.proposal_id, self.proposed_value)
def recv_promise(self, from_uid, proposal_id, prev_accepted_id, prev_accepted_value):
'''
Called when a Promise message is received from the network
'''
self.observe_proposal( from_uid, proposal_id )
if self.leader or proposal_id != self.proposal_id or from_uid in self.promises_rcvd:
return
self.promises_rcvd.add( from_uid )
if prev_accepted_id > self.last_accepted_id:
self.last_accepted_id = prev_accepted_id
# If the Acceptor has already accepted a value, we MUST set our proposal
# to that value. Otherwise, we may retain our current value.
if prev_accepted_value is not None:
self.proposed_value = prev_accepted_value
if len(self.promises_rcvd) == self.quorum_size:
self.leader = True
self.messenger.on_leadership_acquired()
if self.proposed_value is not None and self.active:
self.messenger.send_accept(self.proposal_id, self.proposed_value)
class Acceptor (essential.Acceptor):
'''
Acceptors act as the fault-tolerant memory for Paxos. To ensure correctness
in the presense of failure, Acceptors must be able to remember the promises
they've made even in the event of power outages. Consequently, any changes
to the promised_id, accepted_id, and/or accepted_value must be persisted to
stable media prior to sending promise and accepted messages. After calling
the recv_prepare() and recv_accept_request(), the property
'persistence_required' should be checked to see if persistence is required.
Note that because Paxos permits any combination of dropped packets, not
every promise/accepted message needs to be sent. This implementation only
responds to the first prepare/accept_request message received and ignores
all others until the Acceptor's values are persisted to stable media (which
is typically a slow process). After saving the promised_id, accepted_id,
and accepted_value variables, the "persisted" method must be called to send
the pending promise and/or accepted messages.
The 'active' attribute is a boolean value indicating whether or not
the Acceptor should send outgoing messages (defaults to True). Setting
this attribute to false places the Acceptor in a "passive" mode where
it processes all incoming messages but drops all messages it would
otherwise send.
'''
pending_promise = None # None or the UID to send a promise message to
pending_accepted = None # None or the UID to send an accepted message to
active = True
@property
def persistance_required(self):
return self.pending_promise is not None or self.pending_accepted is not None
def recover(self, promised_id, accepted_id, accepted_value):
self.promised_id = promised_id
self.accepted_id = accepted_id
self.accepted_value = accepted_value
def recv_prepare(self, from_uid, proposal_id):
'''
Called when a Prepare message is received from the network
'''
if proposal_id == self.promised_id:
# Duplicate prepare message. No change in state is necessary so the response
# may be sent immediately
if self.active:
self.messenger.send_promise(from_uid, proposal_id, self.accepted_id, self.accepted_value)
elif proposal_id > self.promised_id:
if self.pending_promise is None:
self.promised_id = proposal_id
if self.active:
self.pending_promise = from_uid
else:
if self.active:
self.messenger.send_prepare_nack(from_uid, proposal_id, self.promised_id)
def recv_accept_request(self, from_uid, proposal_id, value):
'''
Called when an Accept! message is received from the network
'''
if proposal_id == self.accepted_id and value == self.accepted_value:
# Duplicate accepted proposal. No change in state is necessary so the response
# may be sent immediately
if self.active:
self.messenger.send_accepted(proposal_id, value)
elif proposal_id >= self.promised_id:
if self.pending_accepted is None:
self.promised_id = proposal_id
self.accepted_value = value
self.accepted_id = proposal_id
if self.active:
self.pending_accepted = from_uid
else:
if self.active:
self.messenger.send_accept_nack(from_uid, proposal_id, self.promised_id)
def persisted(self):
'''
This method sends any pending Promise and/or Accepted messages. Prior to
calling this method, the application must ensure that the promised_id
accepted_id, and accepted_value variables have been persisted to stable
media.
'''
if self.active:
if self.pending_promise:
self.messenger.send_promise(self.pending_promise,
self.promised_id,
self.accepted_id,
self.accepted_value)
if self.pending_accepted:
self.messenger.send_accepted(self.accepted_id,
self.accepted_value)
self.pending_promise = None
self.pending_accepted = None
class Learner (essential.Learner):
'''
No additional functionality required.
'''
class Node (Proposer, Acceptor, Learner):
'''
This class supports the common model where each node on a network preforms
all three Paxos roles, Proposer, Acceptor, and Learner.
'''
def __init__(self, messenger, node_uid, quorum_size):
self.messenger = messenger
self.node_uid = node_uid
self.quorum_size = quorum_size
@property
def proposer_uid(self):
return self.node_uid
def change_quorum_size(self, quorum_size):
self.quorum_size = quorum_size
def recv_prepare(self, from_uid, proposal_id):
self.observe_proposal( from_uid, proposal_id )
return super(Node,self).recv_prepare( from_uid, proposal_id )
|
'''
This module builds upon the essential Paxos implementation and adds
functionality required for most practical uses of the algorithm.
'''
from paxos import essential
from paxos.essential import ProposalID
class Messenger (essential.Messenger):
def send_prepare_nack(self, to_uid, proposal_id, promised_id):
'''
Sends a Prepare Nack message for the proposal to the specified node
'''
def send_accept_nack(self, to_uid, proposal_id, promised_id):
'''
Sends a Accept! Nack message for the proposal to the specified node
'''
def on_leadership_acquired(self):
'''
Called when leadership has been aquired. This is not a guaranteed
position. Another node may assume leadership at any time and it's
even possible that another may have successfully done so before this
callback is exectued. Use this method with care.
The safe way to guarantee leadership is to use a full Paxos instance
whith the resolution value being the UID of the leader node. To avoid
potential issues arising from timing and/or failure, the election
result may be restricted to a certain time window. Prior to the end of
the window the leader may attempt to re-elect itself to extend it's
term in office.
'''
class Proposer (essential.Proposer):
'''
This class extends the functionality of the essential Proposer
implementation by tracking whether the proposer believes itself to
be the current leader of the Paxos instance. It also supports a flag
to disable active paritcipation in the Paxos instance.
The 'leader' attribute is a boolean value indicating the Proposer's
belief in whether or not it is the current leader. As the documentation
for the Messenger.on_leadership_acquired() method describes, multiple
nodes may simultaneously believe themselves to be the leader.
The 'active' attribute is a boolean value indicating whether or not
the Proposer should send outgoing messages (defaults to True). Setting
this attribute to false places the Proposer in a "passive" mode where
it processes all incoming messages but drops all messages it would
otherwise send.
'''
leader = False
active = True
def set_proposal(self, value):
'''
Sets the proposal value for this node iff this node is not already aware of
another proposal having already been accepted.
'''
if self.proposed_value is None:
self.proposed_value = value
if self.leader and self.active:
self.messenger.send_accept( self.proposal_id, value )
def prepare(self, increment_proposal_number=True):
'''
Sends a prepare request to all Acceptors as the first step in
attempting to acquire leadership of the Paxos instance. If the
'increment_proposal_number' argument is True (the default), the
proposal id will be set higher than that of any previous observed
proposal id. Otherwise the previously used proposal id will simply be
retransmitted.
'''
if increment_proposal_number:
self.leader = False
self.promises_rcvd = set()
self.proposal_id = (self.next_proposal_number, self.proposer_uid)
self.next_proposal_number += 1
if self.active:
self.messenger.send_prepare(self.proposal_id)
def observe_proposal(self, from_uid, proposal_id):
'''
Optional method used to update the proposal counter as proposals are
seen on the network. When co-located with Acceptors and/or Learners,
this method may be used to avoid a message delay when attempting to
assume leadership (guaranteed NACK if the proposal number is too low).
'''
if from_uid != self.proposer_uid:
if proposal_id >= (self.next_proposal_number, self.proposer_uid):
self.next_proposal_number = proposal_id.number + 1
def recv_prepare_nack(self, from_uid, proposal_id, promised_id):
'''
Called when an explicit NACK is sent in response to a prepare message.
'''
self.observe_proposal( from_uid, promised_id )
def recv_accept_nack(self, from_uid, proposal_id, promised_id):
'''
Called when an explicit NACK is sent in response to an accept message
'''
def resend_accept(self):
'''
Retransmits an Accept! message iff this node is the leader and has
a proposal value
'''
if self.leader and self.proposed_value and self.active:
self.messenger.send_accept(self.proposal_id, self.proposed_value)
def recv_promise(self, from_uid, proposal_id, prev_accepted_id, prev_accepted_value):
'''
Called when a Promise message is received from the network
'''
self.observe_proposal( from_uid, proposal_id )
if self.leader or proposal_id != self.proposal_id or from_uid in self.promises_rcvd:
return
self.promises_rcvd.add( from_uid )
if prev_accepted_id > self.last_accepted_id:
self.last_accepted_id = prev_accepted_id
# If the Acceptor has already accepted a value, we MUST set our proposal
# to that value. Otherwise, we may retain our current value.
if prev_accepted_value is not None:
self.proposed_value = prev_accepted_value
if len(self.promises_rcvd) == self.quorum_size:
self.leader = True
self.messenger.on_leadership_acquired()
if self.proposed_value is not None and self.active:
self.messenger.send_accept(self.proposal_id, self.proposed_value)
class Acceptor (essential.Acceptor):
'''
Acceptors act as the fault-tolerant memory for Paxos. To ensure correctness
in the presense of failure, Acceptors must be able to remember the promises
they've made even in the event of power outages. Consequently, any changes
to the promised_id, accepted_id, and/or accepted_value must be persisted to
stable media prior to sending promise and accepted messages. After calling
the recv_prepare() and recv_accept_request(), the property
'persistence_required' should be checked to see if persistence is required.
Note that because Paxos permits any combination of dropped packets, not
every promise/accepted message needs to be sent. This implementation only
responds to the first prepare/accept_request message received and ignores
all others until the Acceptor's values are persisted to stable media (which
is typically a slow process). After saving the promised_id, accepted_id,
and accepted_value variables, the "persisted" method must be called to send
the pending promise and/or accepted messages.
The 'active' attribute is a boolean value indicating whether or not
the Acceptor should send outgoing messages (defaults to True). Setting
this attribute to false places the Acceptor in a "passive" mode where
it processes all incoming messages but drops all messages it would
otherwise send.
'''
pending_promise = None # None or the UID to send a promise message to
pending_accepted = None # None or the UID to send an accepted message to
active = True
@property
def persistance_required(self):
return self.pending_promise is not None or self.pending_accepted is not None
def recover(self, promised_id, accepted_id, accepted_value):
self.promised_id = promised_id
self.accepted_id = accepted_id
self.accepted_value = accepted_value
def recv_prepare(self, from_uid, proposal_id):
'''
Called when a Prepare message is received from the network
'''
if proposal_id == self.promised_id:
# Duplicate prepare message. No change in state is necessary so the response
# may be sent immediately
if self.active:
self.messenger.send_promise(from_uid, proposal_id, self.accepted_id, self.accepted_value)
elif proposal_id > self.promised_id:
if self.pending_promise is None:
self.promised_id = proposal_id
if self.active:
self.pending_promise = from_uid
else:
if self.active:
self.messenger.send_prepare_nack(from_uid, proposal_id, self.promised_id)
def recv_accept_request(self, from_uid, proposal_id, value):
'''
Called when an Accept! message is received from the network
'''
if proposal_id == self.accepted_id and value == self.accepted_value:
# Duplicate accepted proposal. No change in state is necessary so the response
# may be sent immediately
if self.active:
self.messenger.send_accepted(proposal_id, value)
elif proposal_id >= self.promised_id:
if self.pending_accepted is None:
self.promised_id = proposal_id
self.accepted_value = value
self.accepted_id = proposal_id
if self.active:
self.pending_accepted = from_uid
else:
if self.active:
self.messenger.send_accept_nack(from_uid, proposal_id, self.promised_id)
def persisted(self):
'''
This method sends any pending Promise and/or Accepted messages. Prior to
calling this method, the application must ensure that the promised_id
accepted_id, and accepted_value variables have been persisted to stable
media.
'''
if self.active:
if self.pending_promise:
self.messenger.send_promise(self.pending_promise,
self.promised_id,
self.accepted_id,
self.accepted_value)
if self.pending_accepted:
self.messenger.send_accepted(self.accepted_id,
self.accepted_value)
self.pending_promise = None
self.pending_accepted = None
class Learner (essential.Learner):
'''
No additional functionality required.
'''
class Node (Proposer, Acceptor, Learner):
'''
This class supports the common model where each node on a network preforms
all three Paxos roles, Proposer, Acceptor, and Learner.
'''
def __init__(self, messenger, node_uid, quorum_size):
self.messenger = messenger
self.node_uid = node_uid
self.quorum_size = quorum_size
@property
def proposer_uid(self):
return self.node_uid
def change_quorum_size(self, quorum_size):
self.quorum_size = quorum_size
def recv_prepare(self, from_uid, proposal_id):
self.observe_proposal( from_uid, proposal_id )
return super(Node,self).recv_prepare( from_uid, proposal_id )
|
en
| 0.874554
|
This module builds upon the essential Paxos implementation and adds functionality required for most practical uses of the algorithm. Sends a Prepare Nack message for the proposal to the specified node Sends a Accept! Nack message for the proposal to the specified node Called when leadership has been aquired. This is not a guaranteed position. Another node may assume leadership at any time and it's even possible that another may have successfully done so before this callback is exectued. Use this method with care. The safe way to guarantee leadership is to use a full Paxos instance whith the resolution value being the UID of the leader node. To avoid potential issues arising from timing and/or failure, the election result may be restricted to a certain time window. Prior to the end of the window the leader may attempt to re-elect itself to extend it's term in office. This class extends the functionality of the essential Proposer implementation by tracking whether the proposer believes itself to be the current leader of the Paxos instance. It also supports a flag to disable active paritcipation in the Paxos instance. The 'leader' attribute is a boolean value indicating the Proposer's belief in whether or not it is the current leader. As the documentation for the Messenger.on_leadership_acquired() method describes, multiple nodes may simultaneously believe themselves to be the leader. The 'active' attribute is a boolean value indicating whether or not the Proposer should send outgoing messages (defaults to True). Setting this attribute to false places the Proposer in a "passive" mode where it processes all incoming messages but drops all messages it would otherwise send. Sets the proposal value for this node iff this node is not already aware of another proposal having already been accepted. Sends a prepare request to all Acceptors as the first step in attempting to acquire leadership of the Paxos instance. If the 'increment_proposal_number' argument is True (the default), the proposal id will be set higher than that of any previous observed proposal id. Otherwise the previously used proposal id will simply be retransmitted. Optional method used to update the proposal counter as proposals are seen on the network. When co-located with Acceptors and/or Learners, this method may be used to avoid a message delay when attempting to assume leadership (guaranteed NACK if the proposal number is too low). Called when an explicit NACK is sent in response to a prepare message. Called when an explicit NACK is sent in response to an accept message Retransmits an Accept! message iff this node is the leader and has a proposal value Called when a Promise message is received from the network # If the Acceptor has already accepted a value, we MUST set our proposal # to that value. Otherwise, we may retain our current value. Acceptors act as the fault-tolerant memory for Paxos. To ensure correctness in the presense of failure, Acceptors must be able to remember the promises they've made even in the event of power outages. Consequently, any changes to the promised_id, accepted_id, and/or accepted_value must be persisted to stable media prior to sending promise and accepted messages. After calling the recv_prepare() and recv_accept_request(), the property 'persistence_required' should be checked to see if persistence is required. Note that because Paxos permits any combination of dropped packets, not every promise/accepted message needs to be sent. This implementation only responds to the first prepare/accept_request message received and ignores all others until the Acceptor's values are persisted to stable media (which is typically a slow process). After saving the promised_id, accepted_id, and accepted_value variables, the "persisted" method must be called to send the pending promise and/or accepted messages. The 'active' attribute is a boolean value indicating whether or not the Acceptor should send outgoing messages (defaults to True). Setting this attribute to false places the Acceptor in a "passive" mode where it processes all incoming messages but drops all messages it would otherwise send. # None or the UID to send a promise message to # None or the UID to send an accepted message to Called when a Prepare message is received from the network # Duplicate prepare message. No change in state is necessary so the response # may be sent immediately Called when an Accept! message is received from the network # Duplicate accepted proposal. No change in state is necessary so the response # may be sent immediately This method sends any pending Promise and/or Accepted messages. Prior to calling this method, the application must ensure that the promised_id accepted_id, and accepted_value variables have been persisted to stable media. No additional functionality required. This class supports the common model where each node on a network preforms all three Paxos roles, Proposer, Acceptor, and Learner.
| 2.847674
| 3
|
lib/datasets/tools/multilabel_list.py
|
Flsahkong/seeDiff
| 321
|
6626583
|
<reponame>Flsahkong/seeDiff
import os
import xml.etree.ElementTree as ET
import sys
argvs = sys.argv
def load_image_set_index(ref):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(ref)
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def load_pascal_annotation(ref_path, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(ref_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
obj_list = []
for ix, obj in enumerate(objs):
cls = obj.find('name').text.lower().strip()
obj_list.append(cls)
return list(set(obj_list))
indexes = load_image_set_index(argvs[1])
images_list = open(argvs[3],'w')
for index in indexes:
objs = load_pascal_annotation(argvs[2],index)
write_word = os.path.join('/research/masaito/detection_dataset/VOCdevkit/VOC2007/JPEGImages', index + '.jpg' + ' ')
for name in objs:
write_word = write_word + name + ' '
images_list.write(write_word + '\n')
|
import os
import xml.etree.ElementTree as ET
import sys
argvs = sys.argv
def load_image_set_index(ref):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(ref)
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def load_pascal_annotation(ref_path, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(ref_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
obj_list = []
for ix, obj in enumerate(objs):
cls = obj.find('name').text.lower().strip()
obj_list.append(cls)
return list(set(obj_list))
indexes = load_image_set_index(argvs[1])
images_list = open(argvs[3],'w')
for index in indexes:
objs = load_pascal_annotation(argvs[2],index)
write_word = os.path.join('/research/masaito/detection_dataset/VOCdevkit/VOC2007/JPEGImages', index + '.jpg' + ' ')
for name in objs:
write_word = write_word + name + ' '
images_list.write(write_word + '\n')
|
en
| 0.778022
|
Load the indexes listed in this dataset's image set file. # Example path to image set file: # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt Load image and bounding boxes info from XML file in the PASCAL VOC format.
| 2.960005
| 3
|
exercicio14.py
|
juniooor/Exercicios-python
| 0
|
6626584
|
#<NAME>-Pescador, homem de bem, comprou um microcomputador para controlar o rendimento diário de seu trabalho. Toda vez que ele traz um peso de peixes maior que o estabelecido pelo regulamento de pesca do estado de São Paulo (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente. João precisa que você faça um programa que leia a variável peso (peso de peixes) e calcule o excesso. Gravar na variável excesso a quantidade de quilos além do limite e na variável multa o valor da multa que João deverá pagar. Imprima os dados do programa com as mensagens adequadas.
print('Programa para calcular multa de pesca')
peso=int(input('Digite quantos quilos de peixe tem: '))
multa=4
if peso>50:
sobra=peso-50
multa=sobra*multa
print('você vai pagar R${} de multa'.format(multa))
else:
print('você não precisa pagar taxa')
|
#<NAME>-Pescador, homem de bem, comprou um microcomputador para controlar o rendimento diário de seu trabalho. Toda vez que ele traz um peso de peixes maior que o estabelecido pelo regulamento de pesca do estado de São Paulo (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente. João precisa que você faça um programa que leia a variável peso (peso de peixes) e calcule o excesso. Gravar na variável excesso a quantidade de quilos além do limite e na variável multa o valor da multa que João deverá pagar. Imprima os dados do programa com as mensagens adequadas.
print('Programa para calcular multa de pesca')
peso=int(input('Digite quantos quilos de peixe tem: '))
multa=4
if peso>50:
sobra=peso-50
multa=sobra*multa
print('você vai pagar R${} de multa'.format(multa))
else:
print('você não precisa pagar taxa')
|
pt
| 0.968845
|
#<NAME>-Pescador, homem de bem, comprou um microcomputador para controlar o rendimento diário de seu trabalho. Toda vez que ele traz um peso de peixes maior que o estabelecido pelo regulamento de pesca do estado de São Paulo (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente. João precisa que você faça um programa que leia a variável peso (peso de peixes) e calcule o excesso. Gravar na variável excesso a quantidade de quilos além do limite e na variável multa o valor da multa que João deverá pagar. Imprima os dados do programa com as mensagens adequadas.
| 3.656868
| 4
|
Python_3/tests/ex_debug_test.py
|
waynegm/OpendTect-External-Attributes
| 22
|
6626585
|
<reponame>waynegm/OpendTect-External-Attributes<filename>Python_3/tests/ex_debug_test.py
# External Attribute Debug Test
#
import sys,os
import numpy as np
import web_pdb
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
xa.params = {
'Inputs': ['Input1'],
'Parallel' : False
}
#
def doCompute():
#
# Start debugging before computation starts
#
web_pdb.set_trace()
#
while True:
xa.doInput()
inp = xa.Input['Input1'][0,0,:]
#
# Add some more local variables
#
inline = xa.TI['inl']
crossline = xa.TI['crl']
#
xa.Output = inp
xa.doOutput()
#
xa.doCompute = doCompute
#
xa.run(sys.argv[1:])
|
# External Attribute Debug Test
#
import sys,os
import numpy as np
import web_pdb
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
xa.params = {
'Inputs': ['Input1'],
'Parallel' : False
}
#
def doCompute():
#
# Start debugging before computation starts
#
web_pdb.set_trace()
#
while True:
xa.doInput()
inp = xa.Input['Input1'][0,0,:]
#
# Add some more local variables
#
inline = xa.TI['inl']
crossline = xa.TI['crl']
#
xa.Output = inp
xa.doOutput()
#
xa.doCompute = doCompute
#
xa.run(sys.argv[1:])
|
en
| 0.500944
|
# External Attribute Debug Test # # # # # # Start debugging before computation starts # # # # Add some more local variables # # # #
| 2.132638
| 2
|
pypeline/common/formats/newick.py
|
KHanghoj/epiPALEOMIX
| 2
|
6626586
|
#!/usr/bin/python
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Required due to use of NotImplementedError in setattr:
# pylint: disable=R0921
import re
from pypeline.common.utilities import \
safe_coerce_to_tuple, \
Immutable, \
TotallyOrdered
from pypeline.common.formats._graph import \
GraphError, \
_Graph
class NewickError(GraphError):
pass
class NewickParseError(NewickError):
"""Exception raised if errors occur during parsing
of Newick strings."""
pass
class Newick(TotallyOrdered, Immutable):
"""Immutable object representing a Newick node.
Nodes are classified as either internal nodes (have children),
or leaf nodes (does not have children). A node MUST either have
1 or more child-nodes, or have a name and/or a length. This is to
ensure that nodes can be represented in an unambigious manner
using the Newick format.
No assumptions are made about the type of the 'name' and the 'length'
properties when simply parsing the tree, and these are simply converted
into strings when the Newick string is generated. However, additional
contraints apply when unrooting/rerooting trees (see below). """
def __init__(self, name = None, length = None, children = None):
"""See class documentation for constraints."""
name = name or None
length = length or None
children = tuple(children or ())
nw_hash = hash((name, length, children))
Immutable.__init__(self,
name = name,
length = length,
children = children,
_hash = nw_hash)
if not (self.children or self.name or self.length):
raise NewickError("Leaf nodes MUST have either a name or a length")
# Ensure that these values are hashable
hash(self.name)
hash(self.length)
weight = 0
for child in self.children:
if not isinstance(child, Newick):
raise TypeError("Child nodes must be Newick nodes")
weight += 1
object.__setattr__(self, "_weight", weight)
@property
def is_leaf(self):
"""Returns true if the node is a leaf (has no children)."""
return not self.children
def get_leaf_nodes(self):
"""Returns iterable for leaf-nodes accessible from this node."""
if not self.is_leaf:
for child in self.children:
for leaf in child.get_leaf_nodes():
yield leaf
else:
yield self
def get_leaf_names(self):
for node in self.get_leaf_nodes():
yield node.name
def reroot_on_taxa(self, taxa):
"""Returns the Newick tree from this node, but rooted on the midpoint
of the branch leading to one or more taxa. Note that the taxa are not
required to form a clade. If the taxa do not form a monophyletic clade,
then the outgroup will include more taxa than those passed to the
function."""
return _NewickGraph(self).reroot_on_taxa(taxa)
def reroot_on_midpoint(self):
"""Returns the newick tree from this node, but rooted on the midpoint
of the tree. That is to say that a root node is added at the exact
midpoint of the longest path in the unrooted tree. If this midpoint
lies at an existing internal node, then this node is made the root.
Note that the sorting of nodes is not preserved, and that any
uninformative nodes (lacking name/length, while connecting two
other nodes, e.g. the old root) are spliced out.
All nodes must have a length of zero or greater (no missing values
are allowed), but note that rerooting behavior around nodes with
length zero may yield unexpected results."""
if len(list(self.get_leaf_nodes())) < 2:
return self # No meaningful way to reroot such trees
return _NewickGraph(self).reroot_on_midpoint()
def add_support(self, bootstraps, fmt = "{Support}"):
"""Adds support values to the current tree, based on a set of trees containing
the same taxa. It is assumed that the support trees represent unrooted or
arbitarily rooted trees, and no weight is given to the rooted topology of these
trees.
The main tree should itself be rooted, and the the toplogy and ordering of this
tree is preserved, with node-names updated using the formatting string 'fmt'.
Formatting is carried out using str.format, with these fields:
{Support} -- The total number of trees in which a clade is supported.
{Percentage} -- The percentage of trees in which a clade is supported (float).
{Fraction} -- The fraction of trees in which a clade is supported (float).
For example, typical percentage support-values can be realized by setting 'fmt'
to the value "{Percentage:.0f}" to produce integer values.
"""
clade_counts = {}
leaf_names_lst = list(self.get_leaf_names())
leaf_names = frozenset(leaf_names_lst)
if len(leaf_names) != len(leaf_names_lst):
raise NewickError("Cannot add support values to trees with duplicate leaf names")
bootstraps = safe_coerce_to_tuple(bootstraps)
for support_tree in bootstraps:
support_tree_names = frozenset(support_tree.get_leaf_names())
if leaf_names != support_tree_names:
raise NewickError("Support tree does not contain same set of leaf nodes")
support_graph = _NewickGraph(support_tree)
for clade in support_graph.get_clade_names():
clade_counts[clade] = clade_counts.get(clade, 0) + 1
return self._add_support(self, len(bootstraps), clade_counts, fmt)
@classmethod
def from_string(cls, string):
"""Parses a Newick string and returns a representation of the tree.
See e.g. http://en.wikipedia.org/wiki/Newick_format
Note that implicit nodes, such as (), (A,), and the like are not
allowed, as they cannot always be represented/parsed in an unambigious
manner. Thus all leaf nodes must have a name and/or a length."""
tokens = _tokenize(string)
if tokens and tokens[0] == "(":
top_node = _parse_tokens(tokens)
else:
top_node = _parse_child(tokens)
if tokens != [";"]:
raise NewickParseError("Missing terminating semi-colon")
return top_node
def __lt__(self, other):
"""See TotallyOrdered"""
if not isinstance(other, Newick):
return NotImplemented
# pylint: disable=W0212
return (-self._weight, self.name, self.length, self.children) \
< (-other._weight, other.name, other.length, other.children)
def __hash__(self):
"""Hashing function, see 'hash'."""
return self._hash
def __repr__(self):
"""Representation corresponds to the Newick string for the (sub)tree,
which can be parsed by 'from_string'."""
return "%s;" % (self._to_str(),)
def _to_str(self):
fields = []
if self.children:
fields.append("(")
for child in self.children:
fields.append(child._to_str()) # pylint: disable=W0212
fields.append(",")
fields.pop()
fields.append(")")
if self.name is not None:
fields.append(str(self.name))
if self.length is not None:
fields.append(":")
fields.append(str(self.length))
return "".join(fields)
def _add_support(self, node, total, clade_counts, fmt):
"""Recursively annotates a subtree with support values,
excepting leaf nodes (where the name is preserved) and
the root node (where the name is cleared)."""
if node.is_leaf:
return node
clade = frozenset(leaf.name for leaf in node.get_leaf_nodes())
support = clade_counts.get(clade, 0)
name = fmt.format(Support = support,
Percentage = (support * 100.0) / (total or 1),
Fraction = (support * 1.0) / (total or 1))
children = []
for child in node.children:
children.append(self._add_support(child, total, clade_counts, fmt))
return Newick(name = (None if (node is self) else name),
length = node.length,
children = children)
################################################################################
################################################################################
## Functions related to NEWICK parsing
_TOKENIZER = re.compile("([():,;])")
_NODE_KEYS = frozenset(("name", "length", "children"))
def _tokenize(string):
result = []
for field in _TOKENIZER.split(string):
field = field.strip()
if field:
result.append(field)
return result
def _parse_tokens(tokens):
assert tokens and tokens[0] == "("
tokens.pop(0)
child, children = None, []
while tokens and (tokens[0] not in ");"):
if tokens[0] == ",":
children.append(child)
tokens.pop(0)
child = _parse_child(tokens)
children.append(child)
if any(child is None for child in children):
raise NewickParseError("Implicit leaf nodes (no name OR length) are not allowed")
elif not tokens or (tokens[0] != ")"):
raise NewickParseError("Malformed Newick string, contains unbalanced parantheses")
tokens.pop(0)
return _parse_child(tokens, children = children)
def _parse_child(tokens, children = None):
if tokens and tokens[0] == "(":
return _parse_tokens(tokens)
name, length = None, None
while tokens and (tokens[0] not in ",);"):
if (tokens[0] == ":"):
if length is not None:
raise NewickParseError("Node has multiple length values")
tokens.pop(0)
if tokens[0] in ",);":
raise NewickParseError("Missing length value")
length = tokens.pop(0).strip()
else:
name = tokens.pop(0).strip()
if not (name or length or children):
raise NewickParseError("Parsing of implicit nodes not supported")
return Newick(name = name,
length = length,
children = children)
################################################################################
################################################################################
## Class related to tree manipulations
class _NewickGraph(_Graph):
def __init__(self, node):
_Graph.__init__(self)
self._collect_names_and_blengths(node)
self.prune_uninformative_nodes()
def _collect_names_and_blengths(self, c_node):
c_node_id = id(c_node)
self.set_name(c_node_id, c_node.name)
for child in c_node.children:
child_id = id(child)
self.add_connection(c_node_id, child_id, child.length)
self._collect_names_and_blengths(child)
def rebuild_tree(self, parent_id, node_id):
"""Rebuilds a newick tree starting at a node with id
'node_id' and a parent with id 'parent_id' (or the
same value as 'node_id' if a root node)."""
children = []
for child_id in self.connections[node_id]:
if child_id != parent_id:
children.append(self.rebuild_tree(node_id, child_id))
children.sort()
blength = self.connections.get(parent_id).get(node_id)
if isinstance(blength, float):
blength = repr(blength)
return Newick(name = self.names.get(node_id),
length = blength,
children = children)
|
#!/usr/bin/python
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Required due to use of NotImplementedError in setattr:
# pylint: disable=R0921
import re
from pypeline.common.utilities import \
safe_coerce_to_tuple, \
Immutable, \
TotallyOrdered
from pypeline.common.formats._graph import \
GraphError, \
_Graph
class NewickError(GraphError):
pass
class NewickParseError(NewickError):
"""Exception raised if errors occur during parsing
of Newick strings."""
pass
class Newick(TotallyOrdered, Immutable):
"""Immutable object representing a Newick node.
Nodes are classified as either internal nodes (have children),
or leaf nodes (does not have children). A node MUST either have
1 or more child-nodes, or have a name and/or a length. This is to
ensure that nodes can be represented in an unambigious manner
using the Newick format.
No assumptions are made about the type of the 'name' and the 'length'
properties when simply parsing the tree, and these are simply converted
into strings when the Newick string is generated. However, additional
contraints apply when unrooting/rerooting trees (see below). """
def __init__(self, name = None, length = None, children = None):
"""See class documentation for constraints."""
name = name or None
length = length or None
children = tuple(children or ())
nw_hash = hash((name, length, children))
Immutable.__init__(self,
name = name,
length = length,
children = children,
_hash = nw_hash)
if not (self.children or self.name or self.length):
raise NewickError("Leaf nodes MUST have either a name or a length")
# Ensure that these values are hashable
hash(self.name)
hash(self.length)
weight = 0
for child in self.children:
if not isinstance(child, Newick):
raise TypeError("Child nodes must be Newick nodes")
weight += 1
object.__setattr__(self, "_weight", weight)
@property
def is_leaf(self):
"""Returns true if the node is a leaf (has no children)."""
return not self.children
def get_leaf_nodes(self):
"""Returns iterable for leaf-nodes accessible from this node."""
if not self.is_leaf:
for child in self.children:
for leaf in child.get_leaf_nodes():
yield leaf
else:
yield self
def get_leaf_names(self):
for node in self.get_leaf_nodes():
yield node.name
def reroot_on_taxa(self, taxa):
"""Returns the Newick tree from this node, but rooted on the midpoint
of the branch leading to one or more taxa. Note that the taxa are not
required to form a clade. If the taxa do not form a monophyletic clade,
then the outgroup will include more taxa than those passed to the
function."""
return _NewickGraph(self).reroot_on_taxa(taxa)
def reroot_on_midpoint(self):
"""Returns the newick tree from this node, but rooted on the midpoint
of the tree. That is to say that a root node is added at the exact
midpoint of the longest path in the unrooted tree. If this midpoint
lies at an existing internal node, then this node is made the root.
Note that the sorting of nodes is not preserved, and that any
uninformative nodes (lacking name/length, while connecting two
other nodes, e.g. the old root) are spliced out.
All nodes must have a length of zero or greater (no missing values
are allowed), but note that rerooting behavior around nodes with
length zero may yield unexpected results."""
if len(list(self.get_leaf_nodes())) < 2:
return self # No meaningful way to reroot such trees
return _NewickGraph(self).reroot_on_midpoint()
def add_support(self, bootstraps, fmt = "{Support}"):
"""Adds support values to the current tree, based on a set of trees containing
the same taxa. It is assumed that the support trees represent unrooted or
arbitarily rooted trees, and no weight is given to the rooted topology of these
trees.
The main tree should itself be rooted, and the the toplogy and ordering of this
tree is preserved, with node-names updated using the formatting string 'fmt'.
Formatting is carried out using str.format, with these fields:
{Support} -- The total number of trees in which a clade is supported.
{Percentage} -- The percentage of trees in which a clade is supported (float).
{Fraction} -- The fraction of trees in which a clade is supported (float).
For example, typical percentage support-values can be realized by setting 'fmt'
to the value "{Percentage:.0f}" to produce integer values.
"""
clade_counts = {}
leaf_names_lst = list(self.get_leaf_names())
leaf_names = frozenset(leaf_names_lst)
if len(leaf_names) != len(leaf_names_lst):
raise NewickError("Cannot add support values to trees with duplicate leaf names")
bootstraps = safe_coerce_to_tuple(bootstraps)
for support_tree in bootstraps:
support_tree_names = frozenset(support_tree.get_leaf_names())
if leaf_names != support_tree_names:
raise NewickError("Support tree does not contain same set of leaf nodes")
support_graph = _NewickGraph(support_tree)
for clade in support_graph.get_clade_names():
clade_counts[clade] = clade_counts.get(clade, 0) + 1
return self._add_support(self, len(bootstraps), clade_counts, fmt)
@classmethod
def from_string(cls, string):
"""Parses a Newick string and returns a representation of the tree.
See e.g. http://en.wikipedia.org/wiki/Newick_format
Note that implicit nodes, such as (), (A,), and the like are not
allowed, as they cannot always be represented/parsed in an unambigious
manner. Thus all leaf nodes must have a name and/or a length."""
tokens = _tokenize(string)
if tokens and tokens[0] == "(":
top_node = _parse_tokens(tokens)
else:
top_node = _parse_child(tokens)
if tokens != [";"]:
raise NewickParseError("Missing terminating semi-colon")
return top_node
def __lt__(self, other):
"""See TotallyOrdered"""
if not isinstance(other, Newick):
return NotImplemented
# pylint: disable=W0212
return (-self._weight, self.name, self.length, self.children) \
< (-other._weight, other.name, other.length, other.children)
def __hash__(self):
"""Hashing function, see 'hash'."""
return self._hash
def __repr__(self):
"""Representation corresponds to the Newick string for the (sub)tree,
which can be parsed by 'from_string'."""
return "%s;" % (self._to_str(),)
def _to_str(self):
fields = []
if self.children:
fields.append("(")
for child in self.children:
fields.append(child._to_str()) # pylint: disable=W0212
fields.append(",")
fields.pop()
fields.append(")")
if self.name is not None:
fields.append(str(self.name))
if self.length is not None:
fields.append(":")
fields.append(str(self.length))
return "".join(fields)
def _add_support(self, node, total, clade_counts, fmt):
"""Recursively annotates a subtree with support values,
excepting leaf nodes (where the name is preserved) and
the root node (where the name is cleared)."""
if node.is_leaf:
return node
clade = frozenset(leaf.name for leaf in node.get_leaf_nodes())
support = clade_counts.get(clade, 0)
name = fmt.format(Support = support,
Percentage = (support * 100.0) / (total or 1),
Fraction = (support * 1.0) / (total or 1))
children = []
for child in node.children:
children.append(self._add_support(child, total, clade_counts, fmt))
return Newick(name = (None if (node is self) else name),
length = node.length,
children = children)
################################################################################
################################################################################
## Functions related to NEWICK parsing
_TOKENIZER = re.compile("([():,;])")
_NODE_KEYS = frozenset(("name", "length", "children"))
def _tokenize(string):
result = []
for field in _TOKENIZER.split(string):
field = field.strip()
if field:
result.append(field)
return result
def _parse_tokens(tokens):
assert tokens and tokens[0] == "("
tokens.pop(0)
child, children = None, []
while tokens and (tokens[0] not in ");"):
if tokens[0] == ",":
children.append(child)
tokens.pop(0)
child = _parse_child(tokens)
children.append(child)
if any(child is None for child in children):
raise NewickParseError("Implicit leaf nodes (no name OR length) are not allowed")
elif not tokens or (tokens[0] != ")"):
raise NewickParseError("Malformed Newick string, contains unbalanced parantheses")
tokens.pop(0)
return _parse_child(tokens, children = children)
def _parse_child(tokens, children = None):
if tokens and tokens[0] == "(":
return _parse_tokens(tokens)
name, length = None, None
while tokens and (tokens[0] not in ",);"):
if (tokens[0] == ":"):
if length is not None:
raise NewickParseError("Node has multiple length values")
tokens.pop(0)
if tokens[0] in ",);":
raise NewickParseError("Missing length value")
length = tokens.pop(0).strip()
else:
name = tokens.pop(0).strip()
if not (name or length or children):
raise NewickParseError("Parsing of implicit nodes not supported")
return Newick(name = name,
length = length,
children = children)
################################################################################
################################################################################
## Class related to tree manipulations
class _NewickGraph(_Graph):
def __init__(self, node):
_Graph.__init__(self)
self._collect_names_and_blengths(node)
self.prune_uninformative_nodes()
def _collect_names_and_blengths(self, c_node):
c_node_id = id(c_node)
self.set_name(c_node_id, c_node.name)
for child in c_node.children:
child_id = id(child)
self.add_connection(c_node_id, child_id, child.length)
self._collect_names_and_blengths(child)
def rebuild_tree(self, parent_id, node_id):
"""Rebuilds a newick tree starting at a node with id
'node_id' and a parent with id 'parent_id' (or the
same value as 'node_id' if a root node)."""
children = []
for child_id in self.connections[node_id]:
if child_id != parent_id:
children.append(self.rebuild_tree(node_id, child_id))
children.sort()
blength = self.connections.get(parent_id).get(node_id)
if isinstance(blength, float):
blength = repr(blength)
return Newick(name = self.names.get(node_id),
length = blength,
children = children)
|
en
| 0.815288
|
#!/usr/bin/python # # Copyright (c) 2012 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Required due to use of NotImplementedError in setattr: # pylint: disable=R0921 Exception raised if errors occur during parsing of Newick strings. Immutable object representing a Newick node. Nodes are classified as either internal nodes (have children), or leaf nodes (does not have children). A node MUST either have 1 or more child-nodes, or have a name and/or a length. This is to ensure that nodes can be represented in an unambigious manner using the Newick format. No assumptions are made about the type of the 'name' and the 'length' properties when simply parsing the tree, and these are simply converted into strings when the Newick string is generated. However, additional contraints apply when unrooting/rerooting trees (see below). See class documentation for constraints. # Ensure that these values are hashable Returns true if the node is a leaf (has no children). Returns iterable for leaf-nodes accessible from this node. Returns the Newick tree from this node, but rooted on the midpoint of the branch leading to one or more taxa. Note that the taxa are not required to form a clade. If the taxa do not form a monophyletic clade, then the outgroup will include more taxa than those passed to the function. Returns the newick tree from this node, but rooted on the midpoint of the tree. That is to say that a root node is added at the exact midpoint of the longest path in the unrooted tree. If this midpoint lies at an existing internal node, then this node is made the root. Note that the sorting of nodes is not preserved, and that any uninformative nodes (lacking name/length, while connecting two other nodes, e.g. the old root) are spliced out. All nodes must have a length of zero or greater (no missing values are allowed), but note that rerooting behavior around nodes with length zero may yield unexpected results. # No meaningful way to reroot such trees Adds support values to the current tree, based on a set of trees containing the same taxa. It is assumed that the support trees represent unrooted or arbitarily rooted trees, and no weight is given to the rooted topology of these trees. The main tree should itself be rooted, and the the toplogy and ordering of this tree is preserved, with node-names updated using the formatting string 'fmt'. Formatting is carried out using str.format, with these fields: {Support} -- The total number of trees in which a clade is supported. {Percentage} -- The percentage of trees in which a clade is supported (float). {Fraction} -- The fraction of trees in which a clade is supported (float). For example, typical percentage support-values can be realized by setting 'fmt' to the value "{Percentage:.0f}" to produce integer values. Parses a Newick string and returns a representation of the tree. See e.g. http://en.wikipedia.org/wiki/Newick_format Note that implicit nodes, such as (), (A,), and the like are not allowed, as they cannot always be represented/parsed in an unambigious manner. Thus all leaf nodes must have a name and/or a length. See TotallyOrdered # pylint: disable=W0212 Hashing function, see 'hash'. Representation corresponds to the Newick string for the (sub)tree, which can be parsed by 'from_string'. # pylint: disable=W0212 Recursively annotates a subtree with support values, excepting leaf nodes (where the name is preserved) and the root node (where the name is cleared). ################################################################################ ################################################################################ ## Functions related to NEWICK parsing ################################################################################ ################################################################################ ## Class related to tree manipulations Rebuilds a newick tree starting at a node with id 'node_id' and a parent with id 'parent_id' (or the same value as 'node_id' if a root node).
| 1.837702
| 2
|
R/init_data.py
|
Xiaojieqiu/scLVM
| 0
|
6626587
|
import sys
import scipy as SP
import pylab as PL
from matplotlib import cm
import h5py
#make sure your paths point to limix and scLVM directories
limix_path = '/Users/florian/Code/python_code/limix-0.6.4/build/release.darwin/interfaces/python'
sclvm_path = '/Users/florian/Code/python_code/scPy/scLVM/'
sys.path.append(limix_path)
sys.path.append(sclvm_path)
#import scLVM
sys.path.append('./../scLVM')
from scLVM import scLVM
|
import sys
import scipy as SP
import pylab as PL
from matplotlib import cm
import h5py
#make sure your paths point to limix and scLVM directories
limix_path = '/Users/florian/Code/python_code/limix-0.6.4/build/release.darwin/interfaces/python'
sclvm_path = '/Users/florian/Code/python_code/scPy/scLVM/'
sys.path.append(limix_path)
sys.path.append(sclvm_path)
#import scLVM
sys.path.append('./../scLVM')
from scLVM import scLVM
|
en
| 0.847866
|
#make sure your paths point to limix and scLVM directories #import scLVM
| 1.35537
| 1
|
Ejercicio4/src/exercise4.py
|
NAL-GitHub-Octavio/TECMTY.TC1028.Python.py.Basicos
| 0
|
6626588
|
<gh_stars>0
def main():
#escribe tu código abajo de esta línea
main()
|
def main():
#escribe tu código abajo de esta línea
main()
|
es
| 0.99885
|
#escribe tu código abajo de esta línea
| 1.744954
| 2
|
ml/main.py
|
Ldude162/AP-CSP
| 1
|
6626589
|
<gh_stars>1-10
import pandas as pd
import numpy as np
'''
This is how I originally imported the data.
data = pd.read_csv('./ml/data.csv', sep=',', header=None)
ratings = pd.read_csv('./ml/ratings.csv', sep=',', header=None)
people = pd.read_csv('./ml/people.csv', sep=',', header=None)
movies = data.loc[1:57, 2]
ratings = ratings.astype(float)
ratings.to_numpy()
movies.to_frame()
'''
# Add numpy arrays here (ratings is just a rectangle of the numerical ratings, people is a row of initials and names, movies is a row of movie names):
'''
ratings = []
movies = []
people = []
'''
def guessRating(ratings, movies, people):
# using pandas because I feel more comfortable with it
result = pd.DataFrame()
usertable = pd.DataFrame(data=3, index=range(people.shape[1]), columns=range(people.shape[1]))
ratings = pd.DataFrame(ratings)
print(people)
# Uses a simplified version of collaborative filtering to determine which users like similar movies
for i, row in ratings.iterrows():
# sets "row" to a dataframe so I can iterate through it
row = row.to_frame()
for a, user in row.iterrows():
index = 0
# iterates through every user
for b, user2 in row.iterrows():
# makes sure it is not checking itself
if b == a:
index += 1
continue
# Makes sure that they actually rated the movie
elif ratings.at[i,b] < 0:
index += 1
continue
# If the rating is over 10, set it to 10
elif ratings.at[i,b] > 10:
ratings.at[i,b] = 10
# If the ratings are the same, add 2 to the similarity score
if ratings.at[i,b] == ratings.at[i,a]:
usertable.at[a,index] = usertable.at[a,index] + 2
# If they are within 2, add 1 to the similarity score
elif ratings.at[i,b] < ratings.at[i,a] and ratings.at[i,b] > ratings.at[i,a] - 2 or ratings.at[i,b] > ratings.at[i,a] and ratings.at[i,b] < ratings.at[i,a] + 2:
usertable.at[a,index] = usertable.at[a,index] + 1
# If they are far apart, subtract 1 from the similarity score
else:
usertable.at[a,index] = usertable.at[a,index] - 1
index += 1
# Iterates through every movie
for i, row in ratings.iterrows():
index = 0
row = row.to_frame()
# Creates the matches list, which will store the top 5 most similar people
matches = [(-500, 0),(-500, 0), (-500, 0),(-500, 0),(-500, 0)]
for c, row2 in usertable.iloc[0].items():
# Makes sure that it is not checking itself
if c == 0:
continue
# Makes sure that they actually rated the movie
elif ratings.at[i,c] < 0:
continue
# If the person is more similar than someone in the top five, adjust accordingly
if row2 > matches[0][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = matches[0]
matches[0] = (row2, c)
elif row2 > matches[1][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = (row2, c)
elif row2 > matches[2][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = (row2, c)
elif row2 > matches[3][0]:
matches[4] = matches[3]
matches[3] = (row2, c)
elif row2 > matches[4][0]:
matches[4] = (row2, c)
# Takes a weighted average of the top 5's ratings of the same movie
index2 = 5
weighted = [0,0,0,0,0]
number = 0
for d in matches:
# If less than 5 people rated the movie, it will ignore the placeholder values
if d[0] == -500:
continue
# Gets the person's rating of the movie
point = ratings.at[i,d[1]]
# Adds the rating to the weighted average
weighted[matches.index(d)] = point * index2
number += index2
index2 -= 1
# Divides the sum of the ratings by the number of times it was multiplied by to create the average
if number != 0:
result.at[i,0] = sum(weighted) / number
# If no one else rated the movie, guess the rating is 6
else:
result.at[i,0] = 6
# Does the same thing, but for the rest of the people
for a, user in row.iterrows():
matches = [(-500, 0),(-500, 0), (-500, 0),(-500, 0),(-500, 0)]
for c, row2 in usertable.iloc[a].items():
if c == a:
continue
elif ratings.at[i,c] < 0:
continue
if row2 > matches[0][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = matches[0]
matches[0] = (row2, c)
elif row2 > matches[1][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = (row2, c)
elif row2 > matches[2][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = (row2, c)
elif row2 > matches[3][0]:
matches[4] = matches[3]
matches[3] = (row2, c)
elif row2 > matches[4][0]:
matches[4] = (row2, c)
index2 = 5
weighted = [0,0,0,0,0]
number = 0
for d in matches:
if d[0] == -500:
continue
point = ratings.at[i,d[1]]
weighted[matches.index(d)] = point * index2
number += index2
index2 -= 1
if number != 0:
result.at[i,a] = sum(weighted) / number
else:
result.at[i,a] = 6
# Creates the dataframe that will be used to calculate the average difference between the predicted and actual ratings
difference = pd.DataFrame(data=0, index=range(ratings.shape[0]), columns=range(ratings.shape[1]))
# Iterates through every rating
for i, row in ratings.iterrows():
row = row.to_frame()
for a, column in row.iterrows():
# If the person has not rated the movie, it will not be included in the average
if ratings.at[i,a] < 0:
continue
# Calculates the difference between the predicted and actual rating
difference.at[i,a] = result.at[i,a] - ratings.at[i,a]
# Calculates the average difference between the predicted and actual ratings
mean = 0
index = 0
for i, row in difference.iterrows():
row = row.to_frame()
for a, column in row.iterrows():
# Ignores data points that aren't ratings
if difference.at[i,a] == 0:
continue
# Adds the difference to the mean
mean += abs(difference.at[i,a])
index += 1
# Calculates the average difference
mean = mean / index
# Prints out and writes the results to a file
print(result)
print(difference)
print(mean)
result.to_csv('result.csv')
difference.to_csv('difference.csv')
f = open('mean.txt', 'x')
f.write("mean:" + str(mean))
f.close()
# Calls the function
guessRating(ratings, movies, people)
|
import pandas as pd
import numpy as np
'''
This is how I originally imported the data.
data = pd.read_csv('./ml/data.csv', sep=',', header=None)
ratings = pd.read_csv('./ml/ratings.csv', sep=',', header=None)
people = pd.read_csv('./ml/people.csv', sep=',', header=None)
movies = data.loc[1:57, 2]
ratings = ratings.astype(float)
ratings.to_numpy()
movies.to_frame()
'''
# Add numpy arrays here (ratings is just a rectangle of the numerical ratings, people is a row of initials and names, movies is a row of movie names):
'''
ratings = []
movies = []
people = []
'''
def guessRating(ratings, movies, people):
# using pandas because I feel more comfortable with it
result = pd.DataFrame()
usertable = pd.DataFrame(data=3, index=range(people.shape[1]), columns=range(people.shape[1]))
ratings = pd.DataFrame(ratings)
print(people)
# Uses a simplified version of collaborative filtering to determine which users like similar movies
for i, row in ratings.iterrows():
# sets "row" to a dataframe so I can iterate through it
row = row.to_frame()
for a, user in row.iterrows():
index = 0
# iterates through every user
for b, user2 in row.iterrows():
# makes sure it is not checking itself
if b == a:
index += 1
continue
# Makes sure that they actually rated the movie
elif ratings.at[i,b] < 0:
index += 1
continue
# If the rating is over 10, set it to 10
elif ratings.at[i,b] > 10:
ratings.at[i,b] = 10
# If the ratings are the same, add 2 to the similarity score
if ratings.at[i,b] == ratings.at[i,a]:
usertable.at[a,index] = usertable.at[a,index] + 2
# If they are within 2, add 1 to the similarity score
elif ratings.at[i,b] < ratings.at[i,a] and ratings.at[i,b] > ratings.at[i,a] - 2 or ratings.at[i,b] > ratings.at[i,a] and ratings.at[i,b] < ratings.at[i,a] + 2:
usertable.at[a,index] = usertable.at[a,index] + 1
# If they are far apart, subtract 1 from the similarity score
else:
usertable.at[a,index] = usertable.at[a,index] - 1
index += 1
# Iterates through every movie
for i, row in ratings.iterrows():
index = 0
row = row.to_frame()
# Creates the matches list, which will store the top 5 most similar people
matches = [(-500, 0),(-500, 0), (-500, 0),(-500, 0),(-500, 0)]
for c, row2 in usertable.iloc[0].items():
# Makes sure that it is not checking itself
if c == 0:
continue
# Makes sure that they actually rated the movie
elif ratings.at[i,c] < 0:
continue
# If the person is more similar than someone in the top five, adjust accordingly
if row2 > matches[0][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = matches[0]
matches[0] = (row2, c)
elif row2 > matches[1][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = (row2, c)
elif row2 > matches[2][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = (row2, c)
elif row2 > matches[3][0]:
matches[4] = matches[3]
matches[3] = (row2, c)
elif row2 > matches[4][0]:
matches[4] = (row2, c)
# Takes a weighted average of the top 5's ratings of the same movie
index2 = 5
weighted = [0,0,0,0,0]
number = 0
for d in matches:
# If less than 5 people rated the movie, it will ignore the placeholder values
if d[0] == -500:
continue
# Gets the person's rating of the movie
point = ratings.at[i,d[1]]
# Adds the rating to the weighted average
weighted[matches.index(d)] = point * index2
number += index2
index2 -= 1
# Divides the sum of the ratings by the number of times it was multiplied by to create the average
if number != 0:
result.at[i,0] = sum(weighted) / number
# If no one else rated the movie, guess the rating is 6
else:
result.at[i,0] = 6
# Does the same thing, but for the rest of the people
for a, user in row.iterrows():
matches = [(-500, 0),(-500, 0), (-500, 0),(-500, 0),(-500, 0)]
for c, row2 in usertable.iloc[a].items():
if c == a:
continue
elif ratings.at[i,c] < 0:
continue
if row2 > matches[0][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = matches[0]
matches[0] = (row2, c)
elif row2 > matches[1][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = matches[1]
matches[1] = (row2, c)
elif row2 > matches[2][0]:
matches[4] = matches[3]
matches[3] = matches[2]
matches[2] = (row2, c)
elif row2 > matches[3][0]:
matches[4] = matches[3]
matches[3] = (row2, c)
elif row2 > matches[4][0]:
matches[4] = (row2, c)
index2 = 5
weighted = [0,0,0,0,0]
number = 0
for d in matches:
if d[0] == -500:
continue
point = ratings.at[i,d[1]]
weighted[matches.index(d)] = point * index2
number += index2
index2 -= 1
if number != 0:
result.at[i,a] = sum(weighted) / number
else:
result.at[i,a] = 6
# Creates the dataframe that will be used to calculate the average difference between the predicted and actual ratings
difference = pd.DataFrame(data=0, index=range(ratings.shape[0]), columns=range(ratings.shape[1]))
# Iterates through every rating
for i, row in ratings.iterrows():
row = row.to_frame()
for a, column in row.iterrows():
# If the person has not rated the movie, it will not be included in the average
if ratings.at[i,a] < 0:
continue
# Calculates the difference between the predicted and actual rating
difference.at[i,a] = result.at[i,a] - ratings.at[i,a]
# Calculates the average difference between the predicted and actual ratings
mean = 0
index = 0
for i, row in difference.iterrows():
row = row.to_frame()
for a, column in row.iterrows():
# Ignores data points that aren't ratings
if difference.at[i,a] == 0:
continue
# Adds the difference to the mean
mean += abs(difference.at[i,a])
index += 1
# Calculates the average difference
mean = mean / index
# Prints out and writes the results to a file
print(result)
print(difference)
print(mean)
result.to_csv('result.csv')
difference.to_csv('difference.csv')
f = open('mean.txt', 'x')
f.write("mean:" + str(mean))
f.close()
# Calls the function
guessRating(ratings, movies, people)
|
en
| 0.903597
|
This is how I originally imported the data. data = pd.read_csv('./ml/data.csv', sep=',', header=None) ratings = pd.read_csv('./ml/ratings.csv', sep=',', header=None) people = pd.read_csv('./ml/people.csv', sep=',', header=None) movies = data.loc[1:57, 2] ratings = ratings.astype(float) ratings.to_numpy() movies.to_frame() # Add numpy arrays here (ratings is just a rectangle of the numerical ratings, people is a row of initials and names, movies is a row of movie names): ratings = [] movies = [] people = [] # using pandas because I feel more comfortable with it # Uses a simplified version of collaborative filtering to determine which users like similar movies # sets "row" to a dataframe so I can iterate through it # iterates through every user # makes sure it is not checking itself # Makes sure that they actually rated the movie # If the rating is over 10, set it to 10 # If the ratings are the same, add 2 to the similarity score # If they are within 2, add 1 to the similarity score # If they are far apart, subtract 1 from the similarity score # Iterates through every movie # Creates the matches list, which will store the top 5 most similar people # Makes sure that it is not checking itself # Makes sure that they actually rated the movie # If the person is more similar than someone in the top five, adjust accordingly # Takes a weighted average of the top 5's ratings of the same movie # If less than 5 people rated the movie, it will ignore the placeholder values # Gets the person's rating of the movie # Adds the rating to the weighted average # Divides the sum of the ratings by the number of times it was multiplied by to create the average # If no one else rated the movie, guess the rating is 6 # Does the same thing, but for the rest of the people # Creates the dataframe that will be used to calculate the average difference between the predicted and actual ratings # Iterates through every rating # If the person has not rated the movie, it will not be included in the average # Calculates the difference between the predicted and actual rating # Calculates the average difference between the predicted and actual ratings # Ignores data points that aren't ratings # Adds the difference to the mean # Calculates the average difference # Prints out and writes the results to a file # Calls the function
| 3.802667
| 4
|
sort/performance.py
|
dsysoev/fun-with-algorithms
| 11
|
6626590
|
# coding: utf-8
import os
import timeit
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('seaborn')
import argparse
import tempfile
import pandas as pd
# noinspection PyUnresolvedReferences
from insertionsort import insertionsort
# noinspection PyUnresolvedReferences
from mergesort import mergesort
# noinspection PyUnresolvedReferences
from heapsort import heapsort
# noinspection PyUnresolvedReferences
from quicksort import quicksort
# noinspection PyUnresolvedReferences
from quicksort import quicksort_random
# noinspection PyUnresolvedReferences
from quicksort import quicksort_median
# noinspection PyUnresolvedReferences
from countingsort import countingsort
# noinspection PyUnresolvedReferences
from radixsort import radixsort
# noinspection PyUnresolvedReferences
from bucketsort import bucketsort
def get_performance_data():
skip_algorithm_list = []
data = {'numbers': []}
max_num = 2 ** (FLAGS.max_degree + 1)
for i in range(1, FLAGS.max_degree + 1):
n = 2 ** i
a = np.random.randint(-max_num, max_num, size=(n)).tolist()
data['numbers'].append(n)
for algorithm, desc in [
('insertionsort', 'insertion sort'),
('mergesort', 'merge sort'),
('heapsort', 'heap sort'),
('quicksort', 'quick sort'),
('countingsort', 'counting sort'),
('radixsort', 'radix sort'),
('bucketsort', 'bucket sort'),
]:
# skip slow algorithms and set NaN
if algorithm in skip_algorithm_list:
duration = float('NaN')
else:
duration = timeit.Timer(
algorithm + '({})'.format(a),
"""from __main__ import {}""".format(algorithm)
).timeit(number=100)
if desc not in data:
data[desc] = []
# if algorithm work more than max_duration_time
# add it to skip_algorithm_list
if duration > FLAGS.max_duration_time:
skip_algorithm_list.append(algorithm)
data[desc].append(duration)
return data
def read_df():
""" read results file and return dataframe """
# check results file
if not os.path.isfile(FLAGS.results_file):
raise IOError("No such file '{}'".format(FLAGS.results_file))
# read DataFrame
return pd.read_csv(FLAGS.results_file, index_col='numbers')
def plot_chart():
""" Read result file and plot chart """
results = read_df()
# plot chart
fig, (ax1, ax2) = plt.subplots(2)
for name in results.columns:
results[name].plot(ax=ax1)
for name in results.columns:
(results[name] / results.index).plot(ax=ax2)
ax1.set_title('Comparison of execution speed for sort algorithms (for 100 launch)')
ax1.set_ylabel('duration, s')
ax1.set_xscale('log')
ax2.set_ylabel('duration / length of array')
ax2.set_xscale('log')
ax2.set_yscale('log')
for ax in (ax1, ax2):
ax.set_xlabel('length of array')
ax.legend()
plt.show()
def main():
if FLAGS.force or not os.path.isfile(FLAGS.results_file):
if not os.path.isdir(os.path.dirname(FLAGS.results_file)):
# make folders in tmp
os.makedirs(os.path.dirname(FLAGS.results_file))
# get data
data = get_performance_data()
dataframe = pd.DataFrame(data).set_index('numbers')
# save it to tmp folder
dataframe.to_csv(FLAGS.results_file, header=True)
print('data saved to {} file'.format(FLAGS.results_file))
plot_chart()
if __name__ in "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true')
parser.add_argument('--max_degree', type=int, default=13)
parser.add_argument('--max_duration_time', type=float, default=1.)
parser.add_argument(
'--results_file',
type=str,
default=os.path.join(tempfile.gettempdir(),
'fun-with-algorithms',
'sort.csv'),
help='File with results')
FLAGS, unparsed = parser.parse_known_args()
main()
|
# coding: utf-8
import os
import timeit
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('seaborn')
import argparse
import tempfile
import pandas as pd
# noinspection PyUnresolvedReferences
from insertionsort import insertionsort
# noinspection PyUnresolvedReferences
from mergesort import mergesort
# noinspection PyUnresolvedReferences
from heapsort import heapsort
# noinspection PyUnresolvedReferences
from quicksort import quicksort
# noinspection PyUnresolvedReferences
from quicksort import quicksort_random
# noinspection PyUnresolvedReferences
from quicksort import quicksort_median
# noinspection PyUnresolvedReferences
from countingsort import countingsort
# noinspection PyUnresolvedReferences
from radixsort import radixsort
# noinspection PyUnresolvedReferences
from bucketsort import bucketsort
def get_performance_data():
skip_algorithm_list = []
data = {'numbers': []}
max_num = 2 ** (FLAGS.max_degree + 1)
for i in range(1, FLAGS.max_degree + 1):
n = 2 ** i
a = np.random.randint(-max_num, max_num, size=(n)).tolist()
data['numbers'].append(n)
for algorithm, desc in [
('insertionsort', 'insertion sort'),
('mergesort', 'merge sort'),
('heapsort', 'heap sort'),
('quicksort', 'quick sort'),
('countingsort', 'counting sort'),
('radixsort', 'radix sort'),
('bucketsort', 'bucket sort'),
]:
# skip slow algorithms and set NaN
if algorithm in skip_algorithm_list:
duration = float('NaN')
else:
duration = timeit.Timer(
algorithm + '({})'.format(a),
"""from __main__ import {}""".format(algorithm)
).timeit(number=100)
if desc not in data:
data[desc] = []
# if algorithm work more than max_duration_time
# add it to skip_algorithm_list
if duration > FLAGS.max_duration_time:
skip_algorithm_list.append(algorithm)
data[desc].append(duration)
return data
def read_df():
""" read results file and return dataframe """
# check results file
if not os.path.isfile(FLAGS.results_file):
raise IOError("No such file '{}'".format(FLAGS.results_file))
# read DataFrame
return pd.read_csv(FLAGS.results_file, index_col='numbers')
def plot_chart():
""" Read result file and plot chart """
results = read_df()
# plot chart
fig, (ax1, ax2) = plt.subplots(2)
for name in results.columns:
results[name].plot(ax=ax1)
for name in results.columns:
(results[name] / results.index).plot(ax=ax2)
ax1.set_title('Comparison of execution speed for sort algorithms (for 100 launch)')
ax1.set_ylabel('duration, s')
ax1.set_xscale('log')
ax2.set_ylabel('duration / length of array')
ax2.set_xscale('log')
ax2.set_yscale('log')
for ax in (ax1, ax2):
ax.set_xlabel('length of array')
ax.legend()
plt.show()
def main():
if FLAGS.force or not os.path.isfile(FLAGS.results_file):
if not os.path.isdir(os.path.dirname(FLAGS.results_file)):
# make folders in tmp
os.makedirs(os.path.dirname(FLAGS.results_file))
# get data
data = get_performance_data()
dataframe = pd.DataFrame(data).set_index('numbers')
# save it to tmp folder
dataframe.to_csv(FLAGS.results_file, header=True)
print('data saved to {} file'.format(FLAGS.results_file))
plot_chart()
if __name__ in "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true')
parser.add_argument('--max_degree', type=int, default=13)
parser.add_argument('--max_duration_time', type=float, default=1.)
parser.add_argument(
'--results_file',
type=str,
default=os.path.join(tempfile.gettempdir(),
'fun-with-algorithms',
'sort.csv'),
help='File with results')
FLAGS, unparsed = parser.parse_known_args()
main()
|
en
| 0.638377
|
# coding: utf-8 # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # skip slow algorithms and set NaN from __main__ import {} # if algorithm work more than max_duration_time # add it to skip_algorithm_list read results file and return dataframe # check results file # read DataFrame Read result file and plot chart # plot chart # make folders in tmp # get data # save it to tmp folder
| 2.447037
| 2
|
examples/ibllib/ephys_qc_raw.py
|
SebastianBruijns/ibllib
| 0
|
6626591
|
<reponame>SebastianBruijns/ibllib<gh_stars>0
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from ibllib.ephys import ephysqc
import alf.io
def _plot_spectra(outpath, typ, savefig=True):
spec = alf.io.load_object(outpath, '_iblqc_ephysSpectralDensity' + typ.upper())
sns.set_style("whitegrid")
plt.figure(figsize=[9, 4.5])
ax = plt.axes()
ax.plot(spec['freqs'], 20 * np.log10(spec['power'] + 1e-14),
linewidth=0.5, color=[0.5, 0.5, 0.5])
ax.plot(spec['freqs'], 20 * np.log10(np.median(spec['power'] + 1e-14, axis=1)), label='median')
ax.set_xlabel(r'Frequency (Hz)')
ax.set_ylabel(r'dB rel to $V^2.$Hz$^{-1}$')
if typ == 'ap':
ax.set_ylim([-275, -125])
elif typ == 'lf':
ax.set_ylim([-260, -60])
ax.legend()
if savefig:
plt.savefig(outpath / (typ + '_spec.png'), dpi=150)
def _plot_rmsmap(outfil, typ, savefig=True):
rmsmap = alf.io.load_object(outpath, '_iblqc_ephysTimeRms' + typ.upper())
plt.figure(figsize=[12, 4.5])
axim = plt.axes([0.2, 0.1, 0.7, 0.8])
axrms = plt.axes([0.05, 0.1, 0.15, 0.8])
axcb = plt.axes([0.92, 0.1, 0.02, 0.8])
axrms.plot(np.median(rmsmap['rms'], axis=0)[:-1] * 1e6, np.arange(1, rmsmap['rms'].shape[1]))
axrms.set_ylim(0, rmsmap['rms'].shape[1])
im = axim.imshow(20 * np.log10(rmsmap['rms'].T + 1e-15), aspect='auto', origin='lower',
extent=[rmsmap['timestamps'][0], rmsmap['timestamps'][-1],
0, rmsmap['rms'].shape[1]])
axim.set_xlabel(r'Time (s)')
axim.set_ylabel(r'Channel Number')
plt.colorbar(im, cax=axcb)
if typ == 'ap':
im.set_clim(-110, -90)
axrms.set_xlim(100, 0)
elif typ == 'lf':
im.set_clim(-100, -60)
axrms.set_xlim(500, 0)
axim.set_xlim(0, 4000)
if savefig:
plt.savefig(outpath / (typ + '_rms.png'), dpi=150)
if __name__ == "__main__":
fbin = Path('/mnt/s1/Data/Subjects/ZM_1735/2019-08-01/001/raw_ephys_data/probe_left/'
'_iblrig_ephysData.raw_g0_t0.imec.ap.bin')
ephysqc.extract_rmsmap(fbin) # make sure you send a path for the time being and not a string
typ = 'lf'
outpath = fbin.parent
_plot_spectra(outpath, typ)
_plot_rmsmap(outpath, typ)
|
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from ibllib.ephys import ephysqc
import alf.io
def _plot_spectra(outpath, typ, savefig=True):
spec = alf.io.load_object(outpath, '_iblqc_ephysSpectralDensity' + typ.upper())
sns.set_style("whitegrid")
plt.figure(figsize=[9, 4.5])
ax = plt.axes()
ax.plot(spec['freqs'], 20 * np.log10(spec['power'] + 1e-14),
linewidth=0.5, color=[0.5, 0.5, 0.5])
ax.plot(spec['freqs'], 20 * np.log10(np.median(spec['power'] + 1e-14, axis=1)), label='median')
ax.set_xlabel(r'Frequency (Hz)')
ax.set_ylabel(r'dB rel to $V^2.$Hz$^{-1}$')
if typ == 'ap':
ax.set_ylim([-275, -125])
elif typ == 'lf':
ax.set_ylim([-260, -60])
ax.legend()
if savefig:
plt.savefig(outpath / (typ + '_spec.png'), dpi=150)
def _plot_rmsmap(outfil, typ, savefig=True):
rmsmap = alf.io.load_object(outpath, '_iblqc_ephysTimeRms' + typ.upper())
plt.figure(figsize=[12, 4.5])
axim = plt.axes([0.2, 0.1, 0.7, 0.8])
axrms = plt.axes([0.05, 0.1, 0.15, 0.8])
axcb = plt.axes([0.92, 0.1, 0.02, 0.8])
axrms.plot(np.median(rmsmap['rms'], axis=0)[:-1] * 1e6, np.arange(1, rmsmap['rms'].shape[1]))
axrms.set_ylim(0, rmsmap['rms'].shape[1])
im = axim.imshow(20 * np.log10(rmsmap['rms'].T + 1e-15), aspect='auto', origin='lower',
extent=[rmsmap['timestamps'][0], rmsmap['timestamps'][-1],
0, rmsmap['rms'].shape[1]])
axim.set_xlabel(r'Time (s)')
axim.set_ylabel(r'Channel Number')
plt.colorbar(im, cax=axcb)
if typ == 'ap':
im.set_clim(-110, -90)
axrms.set_xlim(100, 0)
elif typ == 'lf':
im.set_clim(-100, -60)
axrms.set_xlim(500, 0)
axim.set_xlim(0, 4000)
if savefig:
plt.savefig(outpath / (typ + '_rms.png'), dpi=150)
if __name__ == "__main__":
fbin = Path('/mnt/s1/Data/Subjects/ZM_1735/2019-08-01/001/raw_ephys_data/probe_left/'
'_iblrig_ephysData.raw_g0_t0.imec.ap.bin')
ephysqc.extract_rmsmap(fbin) # make sure you send a path for the time being and not a string
typ = 'lf'
outpath = fbin.parent
_plot_spectra(outpath, typ)
_plot_rmsmap(outpath, typ)
|
en
| 0.938101
|
# make sure you send a path for the time being and not a string
| 2.349291
| 2
|
problems/leetcode/66.py
|
JayMonari/py-personal
| 0
|
6626592
|
<reponame>JayMonari/py-personal
from typing import List
def plus_one(digits: List[int]) -> List[int]:
remainder = False
for idx in reversed(range(len(digits))):
digits[idx] += 1
if digits[idx] != 10:
remainder = False
break
remainder = True
digits[idx] = 0
if remainder:
digits.insert(0, 1)
return digits
|
from typing import List
def plus_one(digits: List[int]) -> List[int]:
remainder = False
for idx in reversed(range(len(digits))):
digits[idx] += 1
if digits[idx] != 10:
remainder = False
break
remainder = True
digits[idx] = 0
if remainder:
digits.insert(0, 1)
return digits
|
none
| 1
| 3.573251
| 4
|
|
pyparrot/DroneVision.py
|
DavidMutchler/pyparrot
| 0
|
6626593
|
"""
DroneVision is separated from the main Mambo/Bebop class to enable the use of the drone without the FPV camera.
If you want to do vision processing, you will need to create a DroneVision object to capture the
video stream.
Note that this module relies on the opencv module and the ffmpeg program
Ffmpeg write the images out to the images directory and then they are read in from the user thread. The DroneVisionGUI
does not save copies of the images and instead shows you the images on the screen (they are saved to memory only).
While you can see the images in real-time from this program using VisionServer, if you need copies of the images,
you will want to use the ffmpeg approach. If you want a smaller delay on your image data for real-time control, you likely want
to use libvlc and DroneVisionGUI.
Author: <NAME>, <EMAIL>
"""
import cv2
import threading
import time
import subprocess
import os
from os.path import join
import inspect
from pyparrot.utils.NonBlockingStreamReader import NonBlockingStreamReader
class DroneVision:
def __init__(self, drone_object, is_bebop, buffer_size=200):
"""
Setup your vision object and initialize your buffers. You won't start seeing pictures
until you call open_video.
:param drone_object reference to the drone (mambo or bebop) object
:param is_bebop: True if it is a bebop and false if it is a mambo
:param buffer_size: number of frames to buffer in memory. Defaults to 10.
"""
self.fps = 30
self.buffer_size = buffer_size
self.drone_object = drone_object
self.is_bebop = is_bebop
# initialize a buffer (will contain the last buffer_size vision objects)
self.buffer = [None] * buffer_size
self.buffer_index = 0
# setup the thread for monitoring the vision (but don't start it until we connect in open_video)
self.vision_thread = threading.Thread(target=self._buffer_vision,
args=(buffer_size, ))
self.user_vision_thread = None
self.vision_running = True
# the vision thread starts opencv on these files. That will happen inside the other thread
# so here we just sent the image index to 1 ( to start)
self.image_index = 1
def set_user_callback_function(self, user_callback_function=None, user_callback_args=None):
"""
Set the (optional) user callback function for handling the new vision frames. This is
run in a separate thread that starts when you start the vision buffering
:param user_callback_function: function
:param user_callback_args: arguments to the function
:return:
"""
self.user_vision_thread = threading.Thread(target=self._user_callback,
args=(user_callback_function, user_callback_args))
def open_video(self):
"""
Open the video stream using ffmpeg for capturing and processing. The address for the stream
is the same for all Mambos and is documented here:
http://forum.developer.parrot.com/t/streaming-address-of-mambo-fpv-for-videoprojection/6442/6
Remember that this will only work if you have connected to the wifi for your mambo!
Note that the old method tried to open the stream directly into opencv but there are known issues
with rtsp streams in opencv. We bypassed opencv to use ffmpeg directly and then opencv is used to
process the output of ffmpeg
:return True if the vision opened correctly and False otherwise
"""
# start the stream on the bebop
if (self.is_bebop):
self.drone_object.start_video_stream()
# we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp
# get the path for the config files
fullPath = inspect.getfile(DroneVision)
shortPathIndex = fullPath.rfind("/")
if (shortPathIndex == -1):
# handle Windows paths
shortPathIndex = fullPath.rfind("\\")
print(shortPathIndex)
shortPath = fullPath[0:shortPathIndex]
self.imagePath = join(shortPath, "images")
self.utilPath = join(shortPath, "utils")
print(self.imagePath)
print(self.utilPath)
# the first step is to open the rtsp stream through ffmpeg first
# this step creates a directory full of images, one per frame
print("Opening ffmpeg")
if (self.is_bebop):
cmdStr = "ffmpeg -protocol_whitelist \"file,rtp,udp\" -i %s/bebop.sdp -r 30 image_" % self.utilPath + "%03d.png &"
print(cmdStr)
self.ffmpeg_process = \
subprocess.Popen(cmdStr, shell=True, cwd=self.imagePath, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
else:
self.ffmpeg_process = \
subprocess.Popen("ffmpeg -i rtsp://192.168.99.1/media/stream2 -r 30 image_%03d.png &",
shell=True, cwd=self.imagePath, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# immediately start the vision buffering (before we even know if it succeeded since waiting puts us behind)
self._start_video_buffering()
# open non-blocking readers to look for errors or success
print("Opening non-blocking readers")
stderr_reader = NonBlockingStreamReader(self.ffmpeg_process.stderr)
stdout_reader = NonBlockingStreamReader(self.ffmpeg_process.stdout)
# look for success in the stdout
# If it starts correctly, it will have the following output in the stdout
# Stream mapping:
# Stream #0:0 -> #0:0 (h264 (native) -> png (native))
# if it fails, it has the following in stderr
# Output file #0 does not contain any stream
success = False
while (not success):
line = stderr_reader.readline()
if (line is not None):
line_str = line.decode("utf-8")
print(line_str)
if line_str.find("Stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1:
success = True
break
if line_str.find("Output file #0 does not contain any stream") > -1:
print("Having trouble connecting to the camera 1. A reboot of the mambo may help.")
break
line = stdout_reader.readline()
if (line is not None):
line_str = line.decode("utf-8")
print(line_str)
if line_str.find("Output file #0 does not contain any stream") > -1:
print("Having trouble connecting to the camera 2. A reboot of the mambo may help.")
break
if line_str.find("Stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1:
success = True
# cleanup our non-blocking readers no matter what happened
stdout_reader.finish_reader()
stderr_reader.finish_reader()
# return whether or not it worked
return success
def _start_video_buffering(self):
"""
If the video capture was successfully opened, then start the thread to buffer the stream
:return: Nothing
"""
print("starting vision thread")
self.vision_thread.start()
if (self.user_vision_thread is not None):
self.user_vision_thread.start()
def _user_callback(self, user_vision_function, user_args):
"""
Internal method to call the user vision functions
:param user_vision_function: user callback function to handle vision
:param user_args: optional arguments to the user callback function
:return:
"""
while (self.vision_running):
if (self.new_frame):
user_vision_function(user_args)
#reset the bit for a new frame
self.new_frame = False
# put the thread back to sleep for fps
# sleeping shorter to ensure we stay caught up on frames
time.sleep(1.0 / (3.0 * self.fps))
def _buffer_vision(self, buffer_size):
"""
Internal method to save valid video captures from the camera fps times a second
:param buffer_size: number of images to buffer (set in init)
:return:
"""
# start with no new data
self.new_frame = False
# when the method is first called, sometimes there is already data to catch up on
# so find the latest image in the directory and set the index to that
found_latest = False
while (not found_latest):
path = "%s/image_%03d.png" % (self.imagePath, self.image_index)
if (os.path.exists(path)) and (not os.path.isfile(path)):
# just increment through it (don't save any of these first images)
self.image_index = self.image_index + 1
else:
found_latest = True
# run forever, trying to grab the latest image
while (self.vision_running):
# grab the latest image from the ffmpeg stream
try:
# make the name for the next image
path = "%s/image_%03d.png" % (self.imagePath, self.image_index)
if (not os.path.exists(path)) and (not os.path.isfile(path)):
#print("File %s doesn't exist" % (path))
#print(os.listdir(self.imagePath))
continue
img = cv2.imread(path,1)
# sometimes cv2 returns a None object so skip putting those in the array
if (img is not None):
self.image_index = self.image_index + 1
# got a new image, save it to the buffer directly
self.buffer_index += 1
self.buffer_index %= buffer_size
#print video_frame
self.buffer[self.buffer_index] = img
self.new_frame = True
except cv2.error:
#Assuming its an empty image, so decrement the index and try again.
# print("Trying to read an empty png. Let's wait and try again.")
self.image_index = self.image_index - 1
continue
# put the thread back to sleep for faster than fps to ensure we stay on top of the frames
# coming in from ffmpeg
time.sleep(1.0 / (2.0 * self.fps))
def get_latest_valid_picture(self):
"""
Return the latest valid image (from the buffer)
:return: last valid image received from the Mambo
"""
return self.buffer[self.buffer_index]
def close_video(self):
"""
Stop the vision processing and all its helper threads
"""
# the helper threads look for this variable to be true
self.vision_running = False
# kill the ffmpeg subprocess
self.ffmpeg_process.kill()
# send the command to kill the vision stream (bebop only)
if (self.is_bebop):
self.drone_object.stop_video_stream()
|
"""
DroneVision is separated from the main Mambo/Bebop class to enable the use of the drone without the FPV camera.
If you want to do vision processing, you will need to create a DroneVision object to capture the
video stream.
Note that this module relies on the opencv module and the ffmpeg program
Ffmpeg write the images out to the images directory and then they are read in from the user thread. The DroneVisionGUI
does not save copies of the images and instead shows you the images on the screen (they are saved to memory only).
While you can see the images in real-time from this program using VisionServer, if you need copies of the images,
you will want to use the ffmpeg approach. If you want a smaller delay on your image data for real-time control, you likely want
to use libvlc and DroneVisionGUI.
Author: <NAME>, <EMAIL>
"""
import cv2
import threading
import time
import subprocess
import os
from os.path import join
import inspect
from pyparrot.utils.NonBlockingStreamReader import NonBlockingStreamReader
class DroneVision:
def __init__(self, drone_object, is_bebop, buffer_size=200):
"""
Setup your vision object and initialize your buffers. You won't start seeing pictures
until you call open_video.
:param drone_object reference to the drone (mambo or bebop) object
:param is_bebop: True if it is a bebop and false if it is a mambo
:param buffer_size: number of frames to buffer in memory. Defaults to 10.
"""
self.fps = 30
self.buffer_size = buffer_size
self.drone_object = drone_object
self.is_bebop = is_bebop
# initialize a buffer (will contain the last buffer_size vision objects)
self.buffer = [None] * buffer_size
self.buffer_index = 0
# setup the thread for monitoring the vision (but don't start it until we connect in open_video)
self.vision_thread = threading.Thread(target=self._buffer_vision,
args=(buffer_size, ))
self.user_vision_thread = None
self.vision_running = True
# the vision thread starts opencv on these files. That will happen inside the other thread
# so here we just sent the image index to 1 ( to start)
self.image_index = 1
def set_user_callback_function(self, user_callback_function=None, user_callback_args=None):
"""
Set the (optional) user callback function for handling the new vision frames. This is
run in a separate thread that starts when you start the vision buffering
:param user_callback_function: function
:param user_callback_args: arguments to the function
:return:
"""
self.user_vision_thread = threading.Thread(target=self._user_callback,
args=(user_callback_function, user_callback_args))
def open_video(self):
"""
Open the video stream using ffmpeg for capturing and processing. The address for the stream
is the same for all Mambos and is documented here:
http://forum.developer.parrot.com/t/streaming-address-of-mambo-fpv-for-videoprojection/6442/6
Remember that this will only work if you have connected to the wifi for your mambo!
Note that the old method tried to open the stream directly into opencv but there are known issues
with rtsp streams in opencv. We bypassed opencv to use ffmpeg directly and then opencv is used to
process the output of ffmpeg
:return True if the vision opened correctly and False otherwise
"""
# start the stream on the bebop
if (self.is_bebop):
self.drone_object.start_video_stream()
# we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp
# get the path for the config files
fullPath = inspect.getfile(DroneVision)
shortPathIndex = fullPath.rfind("/")
if (shortPathIndex == -1):
# handle Windows paths
shortPathIndex = fullPath.rfind("\\")
print(shortPathIndex)
shortPath = fullPath[0:shortPathIndex]
self.imagePath = join(shortPath, "images")
self.utilPath = join(shortPath, "utils")
print(self.imagePath)
print(self.utilPath)
# the first step is to open the rtsp stream through ffmpeg first
# this step creates a directory full of images, one per frame
print("Opening ffmpeg")
if (self.is_bebop):
cmdStr = "ffmpeg -protocol_whitelist \"file,rtp,udp\" -i %s/bebop.sdp -r 30 image_" % self.utilPath + "%03d.png &"
print(cmdStr)
self.ffmpeg_process = \
subprocess.Popen(cmdStr, shell=True, cwd=self.imagePath, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
else:
self.ffmpeg_process = \
subprocess.Popen("ffmpeg -i rtsp://192.168.99.1/media/stream2 -r 30 image_%03d.png &",
shell=True, cwd=self.imagePath, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# immediately start the vision buffering (before we even know if it succeeded since waiting puts us behind)
self._start_video_buffering()
# open non-blocking readers to look for errors or success
print("Opening non-blocking readers")
stderr_reader = NonBlockingStreamReader(self.ffmpeg_process.stderr)
stdout_reader = NonBlockingStreamReader(self.ffmpeg_process.stdout)
# look for success in the stdout
# If it starts correctly, it will have the following output in the stdout
# Stream mapping:
# Stream #0:0 -> #0:0 (h264 (native) -> png (native))
# if it fails, it has the following in stderr
# Output file #0 does not contain any stream
success = False
while (not success):
line = stderr_reader.readline()
if (line is not None):
line_str = line.decode("utf-8")
print(line_str)
if line_str.find("Stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1:
success = True
break
if line_str.find("Output file #0 does not contain any stream") > -1:
print("Having trouble connecting to the camera 1. A reboot of the mambo may help.")
break
line = stdout_reader.readline()
if (line is not None):
line_str = line.decode("utf-8")
print(line_str)
if line_str.find("Output file #0 does not contain any stream") > -1:
print("Having trouble connecting to the camera 2. A reboot of the mambo may help.")
break
if line_str.find("Stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1:
success = True
# cleanup our non-blocking readers no matter what happened
stdout_reader.finish_reader()
stderr_reader.finish_reader()
# return whether or not it worked
return success
def _start_video_buffering(self):
"""
If the video capture was successfully opened, then start the thread to buffer the stream
:return: Nothing
"""
print("starting vision thread")
self.vision_thread.start()
if (self.user_vision_thread is not None):
self.user_vision_thread.start()
def _user_callback(self, user_vision_function, user_args):
"""
Internal method to call the user vision functions
:param user_vision_function: user callback function to handle vision
:param user_args: optional arguments to the user callback function
:return:
"""
while (self.vision_running):
if (self.new_frame):
user_vision_function(user_args)
#reset the bit for a new frame
self.new_frame = False
# put the thread back to sleep for fps
# sleeping shorter to ensure we stay caught up on frames
time.sleep(1.0 / (3.0 * self.fps))
def _buffer_vision(self, buffer_size):
"""
Internal method to save valid video captures from the camera fps times a second
:param buffer_size: number of images to buffer (set in init)
:return:
"""
# start with no new data
self.new_frame = False
# when the method is first called, sometimes there is already data to catch up on
# so find the latest image in the directory and set the index to that
found_latest = False
while (not found_latest):
path = "%s/image_%03d.png" % (self.imagePath, self.image_index)
if (os.path.exists(path)) and (not os.path.isfile(path)):
# just increment through it (don't save any of these first images)
self.image_index = self.image_index + 1
else:
found_latest = True
# run forever, trying to grab the latest image
while (self.vision_running):
# grab the latest image from the ffmpeg stream
try:
# make the name for the next image
path = "%s/image_%03d.png" % (self.imagePath, self.image_index)
if (not os.path.exists(path)) and (not os.path.isfile(path)):
#print("File %s doesn't exist" % (path))
#print(os.listdir(self.imagePath))
continue
img = cv2.imread(path,1)
# sometimes cv2 returns a None object so skip putting those in the array
if (img is not None):
self.image_index = self.image_index + 1
# got a new image, save it to the buffer directly
self.buffer_index += 1
self.buffer_index %= buffer_size
#print video_frame
self.buffer[self.buffer_index] = img
self.new_frame = True
except cv2.error:
#Assuming its an empty image, so decrement the index and try again.
# print("Trying to read an empty png. Let's wait and try again.")
self.image_index = self.image_index - 1
continue
# put the thread back to sleep for faster than fps to ensure we stay on top of the frames
# coming in from ffmpeg
time.sleep(1.0 / (2.0 * self.fps))
def get_latest_valid_picture(self):
"""
Return the latest valid image (from the buffer)
:return: last valid image received from the Mambo
"""
return self.buffer[self.buffer_index]
def close_video(self):
"""
Stop the vision processing and all its helper threads
"""
# the helper threads look for this variable to be true
self.vision_running = False
# kill the ffmpeg subprocess
self.ffmpeg_process.kill()
# send the command to kill the vision stream (bebop only)
if (self.is_bebop):
self.drone_object.stop_video_stream()
|
en
| 0.828027
|
DroneVision is separated from the main Mambo/Bebop class to enable the use of the drone without the FPV camera. If you want to do vision processing, you will need to create a DroneVision object to capture the video stream. Note that this module relies on the opencv module and the ffmpeg program Ffmpeg write the images out to the images directory and then they are read in from the user thread. The DroneVisionGUI does not save copies of the images and instead shows you the images on the screen (they are saved to memory only). While you can see the images in real-time from this program using VisionServer, if you need copies of the images, you will want to use the ffmpeg approach. If you want a smaller delay on your image data for real-time control, you likely want to use libvlc and DroneVisionGUI. Author: <NAME>, <EMAIL> Setup your vision object and initialize your buffers. You won't start seeing pictures until you call open_video. :param drone_object reference to the drone (mambo or bebop) object :param is_bebop: True if it is a bebop and false if it is a mambo :param buffer_size: number of frames to buffer in memory. Defaults to 10. # initialize a buffer (will contain the last buffer_size vision objects) # setup the thread for monitoring the vision (but don't start it until we connect in open_video) # the vision thread starts opencv on these files. That will happen inside the other thread # so here we just sent the image index to 1 ( to start) Set the (optional) user callback function for handling the new vision frames. This is run in a separate thread that starts when you start the vision buffering :param user_callback_function: function :param user_callback_args: arguments to the function :return: Open the video stream using ffmpeg for capturing and processing. The address for the stream is the same for all Mambos and is documented here: http://forum.developer.parrot.com/t/streaming-address-of-mambo-fpv-for-videoprojection/6442/6 Remember that this will only work if you have connected to the wifi for your mambo! Note that the old method tried to open the stream directly into opencv but there are known issues with rtsp streams in opencv. We bypassed opencv to use ffmpeg directly and then opencv is used to process the output of ffmpeg :return True if the vision opened correctly and False otherwise # start the stream on the bebop # we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp # get the path for the config files # handle Windows paths # the first step is to open the rtsp stream through ffmpeg first # this step creates a directory full of images, one per frame # immediately start the vision buffering (before we even know if it succeeded since waiting puts us behind) # open non-blocking readers to look for errors or success # look for success in the stdout # If it starts correctly, it will have the following output in the stdout # Stream mapping: # Stream #0:0 -> #0:0 (h264 (native) -> png (native)) # if it fails, it has the following in stderr # Output file #0 does not contain any stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1: #0 does not contain any stream") > -1: #0 does not contain any stream") > -1: #0:0 -> #0:0 (h264 (native) -> png (native))") > -1: # cleanup our non-blocking readers no matter what happened # return whether or not it worked If the video capture was successfully opened, then start the thread to buffer the stream :return: Nothing Internal method to call the user vision functions :param user_vision_function: user callback function to handle vision :param user_args: optional arguments to the user callback function :return: #reset the bit for a new frame # put the thread back to sleep for fps # sleeping shorter to ensure we stay caught up on frames Internal method to save valid video captures from the camera fps times a second :param buffer_size: number of images to buffer (set in init) :return: # start with no new data # when the method is first called, sometimes there is already data to catch up on # so find the latest image in the directory and set the index to that # just increment through it (don't save any of these first images) # run forever, trying to grab the latest image # grab the latest image from the ffmpeg stream # make the name for the next image #print("File %s doesn't exist" % (path)) #print(os.listdir(self.imagePath)) # sometimes cv2 returns a None object so skip putting those in the array # got a new image, save it to the buffer directly #print video_frame #Assuming its an empty image, so decrement the index and try again. # print("Trying to read an empty png. Let's wait and try again.") # put the thread back to sleep for faster than fps to ensure we stay on top of the frames # coming in from ffmpeg Return the latest valid image (from the buffer) :return: last valid image received from the Mambo Stop the vision processing and all its helper threads # the helper threads look for this variable to be true # kill the ffmpeg subprocess # send the command to kill the vision stream (bebop only)
| 3.136504
| 3
|
example_ngd.py
|
maryamhgf/backpack
| 0
|
6626594
|
import torch
import torchvision
# The main BackPACK functionalities
from backpack import backpack, extend
# The diagonal GGN extension
# from backpack.extensions import DiagGGNMC
import torch.optim as optim
from backpack.extensions import TRIAL
from torchsummary import summary
import time
# fixing HTTPS issue on Colab
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
# torch.set_default_dtype(torch.float64)
# This layer did not exist in Pytorch 1.0
# Hyperparameters
# 0: matmul
# 1: fft
# 2: conv2d
# -1: silent mode [only backpropagation]
# [7]: ordering test [v, n, v, n]
# 13: blocked version [v, n, n]
# 17: adding dropout in backward pass for large linear layers
# 666: using backpack for conv2d [not good. becauseof repeating]
MODE = 7
print('Convolution mode is:')
if MODE == 0:
print('MATMUL')
elif MODE == 1:
print('FFT')
elif MODE == 2:
print('CONV2D')
elif MODE == -1:
print('Silent mode: no computation done in backward pass.')
BATCH_SIZE = 64
EPOCHS = 1
PLOT = False
num_classes = 10
STEP_SIZE = 0.01
DAMPING = 1.0
MAX_ITER = 60000//BATCH_SIZE
torch.manual_seed(0)
bc = BATCH_SIZE * num_classes
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print('Selected Device:', device)
print('BATCH_SIZE:', BATCH_SIZE)
mnist_loader = torch.utils.data.dataloader.DataLoader(
torchvision.datasets.MNIST(
'./data',
train=True,
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,)
)
])),
batch_size=BATCH_SIZE,
shuffle=True
)
##### base model from backpack website:
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 50, 3, 1, padding = (1,1)),
torch.nn.ReLU(),
torch.nn.Conv2d(50, 5, 3, 1, padding = (1,1)),
torch.nn.ReLU(),
torch.nn.Flatten(),
torch.nn.Linear(28*28*5, 20),
torch.nn.ReLU(),
torch.nn.Linear(20, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 10),
).to(device)
##### fully connected network. Test for linear timings.
# model = torch.nn.Sequential(
# torch.nn.Flatten(),
# torch.nn.Linear(784, 1000),
# torch.nn.ReLU(),
# torch.nn.Linear(1000, 1000),
# torch.nn.ReLU(),
# torch.nn.Linear(1000, 500),
# torch.nn.ReLU(),
# torch.nn.Linear(500, 10),
# ).to(device)
summary(model, ( 1, 28, 28))
loss_function = torch.nn.CrossEntropyLoss()
def get_accuracy(output, targets):
"""Helper function to print the accuracy"""
predictions = output.argmax(dim=1, keepdim=True).view_as(targets)
return predictions.eq(targets).float().mean().item()
# class TrialOptimizer(torch.optim.Optimizer):
# def __init__(self, parameters, step_size, damping):
# super().__init__(
# parameters,
# dict(step_size=step_size, damping=damping)
# )
# def step(self):
# for group in self.param_groups:
# for p in group["params"]:
# step_direction = p.grad / (p.trial + group["damping"])
# p.data.add_(-group["step_size"], step_direction)
# return loss
extend(model)
extend(loss_function)
# optimizer = TrialOptimizer(
# model.parameters(),
# step_size=STEP_SIZE,
# damping=DAMPING
# )
optimizer = optim.SGD(model.parameters(), lr=STEP_SIZE)
def get_diff(A, B):
''' returns relative error between A and B
'''
# return torch.norm(A - B)/torch.norm(A)
return torch.norm(A - B)/torch.norm(A)
def naive_seq():
jac_list = []
for j in range(num_classes):
for i in range(BATCH_SIZE):
output[i,j].backward(retain_graph=True)
L = []
for name, param in model.named_parameters():
L.append(param.grad.view(1, -1))
param.grad = None
jac_list.append(torch.cat(L, 1))
jac = torch.cat(jac_list, 0)
JJT = torch.matmul(jac, jac.permute(1,0))/BATCH_SIZE
return JJT
def naive_vmap():
I_N = torch.eye(num_classes)
# torch._C._debug_only_display_vmap_fallback_warnings(True)
L = []
def get_jacobian(v):
j = torch.autograd.grad(output[i,:], model.parameters(), v, retain_graph = True)
jac_persample = []
for j_ in j:
jac_persample.append(j_.view( -1))
for name, param in model.named_parameters():
param.grad = None
return torch.cat(jac_persample, 0)
for i in range(BATCH_SIZE):
jacobian = torch.vmap(get_jacobian)(I_N)
L.append(jacobian)
jac = torch.cat(L, 0)
jac = jac.reshape(BATCH_SIZE, num_classes, -1)
jac = jac.permute(1, 0 , 2)
jac = jac.reshape(BATCH_SIZE * num_classes, -1)
JJT = torch.matmul(jac, jac.permute(1,0))/BATCH_SIZE
return JJT
def optimal_JJT(RESHAPE):
jac_list = 0
jac_list_linear = 0
jac_list_conv = 0
bc = BATCH_SIZE * num_classes
L = []
with backpack(TRIAL(MODE)):
loss = loss_function(output, y)
loss.backward(retain_graph=True)
for name, param in model.named_parameters():
trial_vals = param.trial
# print('var name and shape:', name,' ', param.shape)
if RESHAPE: # not useful
# trial_vals = trial_vals.permute(1, 0, 3, 2) # reshaping to [n, v, n, v]
vs = [2, 1, 0, 7,6,5,4,3, 9, 8]
# print(trial_vals.shape)
trial_vals = trial_vals[vs, :, :, :]
trial_vals = trial_vals[:, :, vs, :]
# print(trial_vals.shape)
L.append([trial_vals / BATCH_SIZE, name])
# if '0' not in name and '2' not in name and '4' not in name :
# jac_list_linear += trial_vals.reshape(bc, bc)
# else:
# jac_list_conv += trial_vals.reshape(bc, bc)
jac_list += trial_vals.reshape(bc, bc)
param.trial = None
# param.grad = None
JJT = jac_list / BATCH_SIZE
JJT_linear = jac_list_linear / BATCH_SIZE
JJT_conv = jac_list_conv / BATCH_SIZE
# if torch.allclose(JJT, JJT_conv + JJT_linear) == False:
# print('JJT:', JJT)
# print('JJT_conv:', JJT_conv)
# print('JJT_linear:', JJT_linear)
return JJT, L, JJT_linear, JJT_conv
def optimal_JJT_blk():
jac_list = 0
bc = BATCH_SIZE * num_classes
# L = []
with backpack(TRIAL(MODE)):
loss = loss_function(output, y)
loss.backward(retain_graph=True)
for name, param in model.named_parameters():
trial_vals = param.trial
# L.append([trial_vals / BATCH_SIZE, name])
jac_list += torch.block_diag(*trial_vals)
param.trial = None
JJT = jac_list / BATCH_SIZE
return JJT
acc_list = []
time_list = []
loss_list = []
epoch_time_list = []
start_time= time.time()
for epoch in range(EPOCHS):
start_time_epoch = time.time()
for batch_idx, (x, y) in enumerate(mnist_loader):
# y, indices = torch.sort(y)
# x = x[indices, :, :, :]
x, y = x.to(device), y.to(device)
output = model(x)
accuracy = get_accuracy(output, y)
######## calling individual function for JJT computation
### Our extension
JJT_opt, L, JJT_linear, JJT_conv = optimal_JJT(False)
# x = torch.ones(1, BATCH_SIZE, BATCH_SIZE)
# x = x.repeat(num_classes, 1, 1)
# eye_blk = torch.block_diag(*x)
# JJT_opt_blk = JJT_opt * eye_blk
# JJT_conv_blk = JJT_conv * eye_blk
# JJT_fused = JJT_conv_blk + JJT_linear
### Blocked NGD version
# start_time = time.time()
# JJT_opt_blk = optimal_JJT_blk()
# print(torch.norm(JJT_opt))
# print(JJT_opt)
# time_opt = time.time() - start_time
# plotting NGD kernel for some iterations
if PLOT and batch_idx in [2, 10, 50, 600] :
# JJT_opt_blk = optimal_JJT_blk()
JJT_opt, L, _, _ = optimal_JJT(True)
x = torch.ones(1, BATCH_SIZE, BATCH_SIZE)
x = x.repeat(num_classes, 1, 1)
eye_blk = torch.block_diag(*x)
diff = JJT_opt - JJT_opt*eye_blk
# u, s, vh = torch.linalg.svd(diff)
# s_normal = torch.cumsum(s, dim = 0)/torch.sum(s)
# print(s_normal.numpy())
# fig, ax = plt.subplots()
# im = ax.plot(s_normal)
# print(s)
# fig.colorbar(im, orientation='horizontal')
# plt.show()
fig, ax = plt.subplots()
im = ax.imshow(JJT_opt - JJT_opt*eye_blk , cmap='viridis')
fig.colorbar(im, orientation='horizontal')
plt.show()
# fig.suptitle('NGD Kernel')
if(1==1):
bc = BATCH_SIZE * num_classes
for i in range(6):
c = i * 2
fig, axs = plt.subplots(1, 2)
for row in range(2):
ax = axs[row]
data = L[row + c][0].reshape(bc, bc)
print('name:', L[row + c][1])
print('max data:', torch.max(data))
print('min data:', torch.min(data))
print('average data:', torch.mean(data))
print('norm data:', torch.norm(data))
ax.set_title(L[row + c][1])
pcm = ax.imshow(data, cmap='viridis')
fig.colorbar(pcm, ax=ax)
plt.show()
### naive loop which is current PyTorch approach
# start_time = time.time()
# JJT_naive_seq = naive_seq()
# print(torch.norm(JJT_naive_seq - JJT_opt)/(bc*bc))
# time_seq = time.time() - start_time
# print('naive:', JJT_naive_seq )
### vamp is slow and not worth it
# start_time = time.time()
# JJT_naive_vmap = naive_vmap()
# time_vmap = time.time() - start_time
# applying one step for optimization
loss = loss_function(output, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch_idx % 50 == 0:
acc_list.append(accuracy)
time_list.append(time.time() - start_time)
loss_list.append(loss)
# print('Seq vs vmap error:', get_diff(JJT_naive_seq, JJT_naive_vmap))
# print('opt vs seq error:', get_diff(JJT_naive_seq, JJT_opt))
# print('opt vs linear error:', get_diff(JJT_opt, JJT_linear))
# print('opt vs conv error:', get_diff(JJT_opt, JJT_conv))
# print('opt vs blocked error:', get_diff(JJT_opt, JJT_opt_blk))
# print('opt vs fused error:', get_diff(JJT_opt, JJT_fused))
# print(torch.allclose(JJT_naive_seq, JJT_opt) )
# print('Jacobian Computation Time [Sequential]:', time_seq)
# print('Jacobian Computation Time [Optimal]:', time_opt)
# print('Jacobian Computation Time [VMAP]:', time_vmap)
# print('Speedup over sequential:', time_seq/ time_opt)
print('Elapsed time:', time.time() - start_time_epoch)
print(
"Iteration %3.d/%d " % (batch_idx, MAX_ITER) +
"Minibatch Loss %.3f " % (loss) +
"Accuracy %.0f" % (accuracy * 100) + "%"
)
if batch_idx >= MAX_ITER:
break
epoch_time = time.time() - start_time_epoch
epoch_time_list.append(epoch_time)
print('Elapsed time for epoch %d time: %.3f' % (epoch , epoch_time))
print('Epoch times : ', epoch_time_list)
print('Time(s) ACC. LOSS')
for i in range(len(time_list)):
print('%.3f, %.3f, %.3f' %(time_list[i], acc_list[i], loss_list[i].item()))
|
import torch
import torchvision
# The main BackPACK functionalities
from backpack import backpack, extend
# The diagonal GGN extension
# from backpack.extensions import DiagGGNMC
import torch.optim as optim
from backpack.extensions import TRIAL
from torchsummary import summary
import time
# fixing HTTPS issue on Colab
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
# torch.set_default_dtype(torch.float64)
# This layer did not exist in Pytorch 1.0
# Hyperparameters
# 0: matmul
# 1: fft
# 2: conv2d
# -1: silent mode [only backpropagation]
# [7]: ordering test [v, n, v, n]
# 13: blocked version [v, n, n]
# 17: adding dropout in backward pass for large linear layers
# 666: using backpack for conv2d [not good. becauseof repeating]
MODE = 7
print('Convolution mode is:')
if MODE == 0:
print('MATMUL')
elif MODE == 1:
print('FFT')
elif MODE == 2:
print('CONV2D')
elif MODE == -1:
print('Silent mode: no computation done in backward pass.')
BATCH_SIZE = 64
EPOCHS = 1
PLOT = False
num_classes = 10
STEP_SIZE = 0.01
DAMPING = 1.0
MAX_ITER = 60000//BATCH_SIZE
torch.manual_seed(0)
bc = BATCH_SIZE * num_classes
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print('Selected Device:', device)
print('BATCH_SIZE:', BATCH_SIZE)
mnist_loader = torch.utils.data.dataloader.DataLoader(
torchvision.datasets.MNIST(
'./data',
train=True,
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,)
)
])),
batch_size=BATCH_SIZE,
shuffle=True
)
##### base model from backpack website:
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 50, 3, 1, padding = (1,1)),
torch.nn.ReLU(),
torch.nn.Conv2d(50, 5, 3, 1, padding = (1,1)),
torch.nn.ReLU(),
torch.nn.Flatten(),
torch.nn.Linear(28*28*5, 20),
torch.nn.ReLU(),
torch.nn.Linear(20, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 10),
).to(device)
##### fully connected network. Test for linear timings.
# model = torch.nn.Sequential(
# torch.nn.Flatten(),
# torch.nn.Linear(784, 1000),
# torch.nn.ReLU(),
# torch.nn.Linear(1000, 1000),
# torch.nn.ReLU(),
# torch.nn.Linear(1000, 500),
# torch.nn.ReLU(),
# torch.nn.Linear(500, 10),
# ).to(device)
summary(model, ( 1, 28, 28))
loss_function = torch.nn.CrossEntropyLoss()
def get_accuracy(output, targets):
"""Helper function to print the accuracy"""
predictions = output.argmax(dim=1, keepdim=True).view_as(targets)
return predictions.eq(targets).float().mean().item()
# class TrialOptimizer(torch.optim.Optimizer):
# def __init__(self, parameters, step_size, damping):
# super().__init__(
# parameters,
# dict(step_size=step_size, damping=damping)
# )
# def step(self):
# for group in self.param_groups:
# for p in group["params"]:
# step_direction = p.grad / (p.trial + group["damping"])
# p.data.add_(-group["step_size"], step_direction)
# return loss
extend(model)
extend(loss_function)
# optimizer = TrialOptimizer(
# model.parameters(),
# step_size=STEP_SIZE,
# damping=DAMPING
# )
optimizer = optim.SGD(model.parameters(), lr=STEP_SIZE)
def get_diff(A, B):
''' returns relative error between A and B
'''
# return torch.norm(A - B)/torch.norm(A)
return torch.norm(A - B)/torch.norm(A)
def naive_seq():
jac_list = []
for j in range(num_classes):
for i in range(BATCH_SIZE):
output[i,j].backward(retain_graph=True)
L = []
for name, param in model.named_parameters():
L.append(param.grad.view(1, -1))
param.grad = None
jac_list.append(torch.cat(L, 1))
jac = torch.cat(jac_list, 0)
JJT = torch.matmul(jac, jac.permute(1,0))/BATCH_SIZE
return JJT
def naive_vmap():
I_N = torch.eye(num_classes)
# torch._C._debug_only_display_vmap_fallback_warnings(True)
L = []
def get_jacobian(v):
j = torch.autograd.grad(output[i,:], model.parameters(), v, retain_graph = True)
jac_persample = []
for j_ in j:
jac_persample.append(j_.view( -1))
for name, param in model.named_parameters():
param.grad = None
return torch.cat(jac_persample, 0)
for i in range(BATCH_SIZE):
jacobian = torch.vmap(get_jacobian)(I_N)
L.append(jacobian)
jac = torch.cat(L, 0)
jac = jac.reshape(BATCH_SIZE, num_classes, -1)
jac = jac.permute(1, 0 , 2)
jac = jac.reshape(BATCH_SIZE * num_classes, -1)
JJT = torch.matmul(jac, jac.permute(1,0))/BATCH_SIZE
return JJT
def optimal_JJT(RESHAPE):
jac_list = 0
jac_list_linear = 0
jac_list_conv = 0
bc = BATCH_SIZE * num_classes
L = []
with backpack(TRIAL(MODE)):
loss = loss_function(output, y)
loss.backward(retain_graph=True)
for name, param in model.named_parameters():
trial_vals = param.trial
# print('var name and shape:', name,' ', param.shape)
if RESHAPE: # not useful
# trial_vals = trial_vals.permute(1, 0, 3, 2) # reshaping to [n, v, n, v]
vs = [2, 1, 0, 7,6,5,4,3, 9, 8]
# print(trial_vals.shape)
trial_vals = trial_vals[vs, :, :, :]
trial_vals = trial_vals[:, :, vs, :]
# print(trial_vals.shape)
L.append([trial_vals / BATCH_SIZE, name])
# if '0' not in name and '2' not in name and '4' not in name :
# jac_list_linear += trial_vals.reshape(bc, bc)
# else:
# jac_list_conv += trial_vals.reshape(bc, bc)
jac_list += trial_vals.reshape(bc, bc)
param.trial = None
# param.grad = None
JJT = jac_list / BATCH_SIZE
JJT_linear = jac_list_linear / BATCH_SIZE
JJT_conv = jac_list_conv / BATCH_SIZE
# if torch.allclose(JJT, JJT_conv + JJT_linear) == False:
# print('JJT:', JJT)
# print('JJT_conv:', JJT_conv)
# print('JJT_linear:', JJT_linear)
return JJT, L, JJT_linear, JJT_conv
def optimal_JJT_blk():
jac_list = 0
bc = BATCH_SIZE * num_classes
# L = []
with backpack(TRIAL(MODE)):
loss = loss_function(output, y)
loss.backward(retain_graph=True)
for name, param in model.named_parameters():
trial_vals = param.trial
# L.append([trial_vals / BATCH_SIZE, name])
jac_list += torch.block_diag(*trial_vals)
param.trial = None
JJT = jac_list / BATCH_SIZE
return JJT
acc_list = []
time_list = []
loss_list = []
epoch_time_list = []
start_time= time.time()
for epoch in range(EPOCHS):
start_time_epoch = time.time()
for batch_idx, (x, y) in enumerate(mnist_loader):
# y, indices = torch.sort(y)
# x = x[indices, :, :, :]
x, y = x.to(device), y.to(device)
output = model(x)
accuracy = get_accuracy(output, y)
######## calling individual function for JJT computation
### Our extension
JJT_opt, L, JJT_linear, JJT_conv = optimal_JJT(False)
# x = torch.ones(1, BATCH_SIZE, BATCH_SIZE)
# x = x.repeat(num_classes, 1, 1)
# eye_blk = torch.block_diag(*x)
# JJT_opt_blk = JJT_opt * eye_blk
# JJT_conv_blk = JJT_conv * eye_blk
# JJT_fused = JJT_conv_blk + JJT_linear
### Blocked NGD version
# start_time = time.time()
# JJT_opt_blk = optimal_JJT_blk()
# print(torch.norm(JJT_opt))
# print(JJT_opt)
# time_opt = time.time() - start_time
# plotting NGD kernel for some iterations
if PLOT and batch_idx in [2, 10, 50, 600] :
# JJT_opt_blk = optimal_JJT_blk()
JJT_opt, L, _, _ = optimal_JJT(True)
x = torch.ones(1, BATCH_SIZE, BATCH_SIZE)
x = x.repeat(num_classes, 1, 1)
eye_blk = torch.block_diag(*x)
diff = JJT_opt - JJT_opt*eye_blk
# u, s, vh = torch.linalg.svd(diff)
# s_normal = torch.cumsum(s, dim = 0)/torch.sum(s)
# print(s_normal.numpy())
# fig, ax = plt.subplots()
# im = ax.plot(s_normal)
# print(s)
# fig.colorbar(im, orientation='horizontal')
# plt.show()
fig, ax = plt.subplots()
im = ax.imshow(JJT_opt - JJT_opt*eye_blk , cmap='viridis')
fig.colorbar(im, orientation='horizontal')
plt.show()
# fig.suptitle('NGD Kernel')
if(1==1):
bc = BATCH_SIZE * num_classes
for i in range(6):
c = i * 2
fig, axs = plt.subplots(1, 2)
for row in range(2):
ax = axs[row]
data = L[row + c][0].reshape(bc, bc)
print('name:', L[row + c][1])
print('max data:', torch.max(data))
print('min data:', torch.min(data))
print('average data:', torch.mean(data))
print('norm data:', torch.norm(data))
ax.set_title(L[row + c][1])
pcm = ax.imshow(data, cmap='viridis')
fig.colorbar(pcm, ax=ax)
plt.show()
### naive loop which is current PyTorch approach
# start_time = time.time()
# JJT_naive_seq = naive_seq()
# print(torch.norm(JJT_naive_seq - JJT_opt)/(bc*bc))
# time_seq = time.time() - start_time
# print('naive:', JJT_naive_seq )
### vamp is slow and not worth it
# start_time = time.time()
# JJT_naive_vmap = naive_vmap()
# time_vmap = time.time() - start_time
# applying one step for optimization
loss = loss_function(output, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch_idx % 50 == 0:
acc_list.append(accuracy)
time_list.append(time.time() - start_time)
loss_list.append(loss)
# print('Seq vs vmap error:', get_diff(JJT_naive_seq, JJT_naive_vmap))
# print('opt vs seq error:', get_diff(JJT_naive_seq, JJT_opt))
# print('opt vs linear error:', get_diff(JJT_opt, JJT_linear))
# print('opt vs conv error:', get_diff(JJT_opt, JJT_conv))
# print('opt vs blocked error:', get_diff(JJT_opt, JJT_opt_blk))
# print('opt vs fused error:', get_diff(JJT_opt, JJT_fused))
# print(torch.allclose(JJT_naive_seq, JJT_opt) )
# print('Jacobian Computation Time [Sequential]:', time_seq)
# print('Jacobian Computation Time [Optimal]:', time_opt)
# print('Jacobian Computation Time [VMAP]:', time_vmap)
# print('Speedup over sequential:', time_seq/ time_opt)
print('Elapsed time:', time.time() - start_time_epoch)
print(
"Iteration %3.d/%d " % (batch_idx, MAX_ITER) +
"Minibatch Loss %.3f " % (loss) +
"Accuracy %.0f" % (accuracy * 100) + "%"
)
if batch_idx >= MAX_ITER:
break
epoch_time = time.time() - start_time_epoch
epoch_time_list.append(epoch_time)
print('Elapsed time for epoch %d time: %.3f' % (epoch , epoch_time))
print('Epoch times : ', epoch_time_list)
print('Time(s) ACC. LOSS')
for i in range(len(time_list)):
print('%.3f, %.3f, %.3f' %(time_list[i], acc_list[i], loss_list[i].item()))
|
en
| 0.433603
|
# The main BackPACK functionalities # The diagonal GGN extension # from backpack.extensions import DiagGGNMC # fixing HTTPS issue on Colab # torch.set_default_dtype(torch.float64) # This layer did not exist in Pytorch 1.0 # Hyperparameters # 0: matmul # 1: fft # 2: conv2d # -1: silent mode [only backpropagation] # [7]: ordering test [v, n, v, n] # 13: blocked version [v, n, n] # 17: adding dropout in backward pass for large linear layers # 666: using backpack for conv2d [not good. becauseof repeating] ##### base model from backpack website: ##### fully connected network. Test for linear timings. # model = torch.nn.Sequential( # torch.nn.Flatten(), # torch.nn.Linear(784, 1000), # torch.nn.ReLU(), # torch.nn.Linear(1000, 1000), # torch.nn.ReLU(), # torch.nn.Linear(1000, 500), # torch.nn.ReLU(), # torch.nn.Linear(500, 10), # ).to(device) Helper function to print the accuracy # class TrialOptimizer(torch.optim.Optimizer): # def __init__(self, parameters, step_size, damping): # super().__init__( # parameters, # dict(step_size=step_size, damping=damping) # ) # def step(self): # for group in self.param_groups: # for p in group["params"]: # step_direction = p.grad / (p.trial + group["damping"]) # p.data.add_(-group["step_size"], step_direction) # return loss # optimizer = TrialOptimizer( # model.parameters(), # step_size=STEP_SIZE, # damping=DAMPING # ) returns relative error between A and B # return torch.norm(A - B)/torch.norm(A) # torch._C._debug_only_display_vmap_fallback_warnings(True) # print('var name and shape:', name,' ', param.shape) # not useful # trial_vals = trial_vals.permute(1, 0, 3, 2) # reshaping to [n, v, n, v] # print(trial_vals.shape) # print(trial_vals.shape) # if '0' not in name and '2' not in name and '4' not in name : # jac_list_linear += trial_vals.reshape(bc, bc) # else: # jac_list_conv += trial_vals.reshape(bc, bc) # param.grad = None # if torch.allclose(JJT, JJT_conv + JJT_linear) == False: # print('JJT:', JJT) # print('JJT_conv:', JJT_conv) # print('JJT_linear:', JJT_linear) # L = [] # L.append([trial_vals / BATCH_SIZE, name]) # y, indices = torch.sort(y) # x = x[indices, :, :, :] ######## calling individual function for JJT computation ### Our extension # x = torch.ones(1, BATCH_SIZE, BATCH_SIZE) # x = x.repeat(num_classes, 1, 1) # eye_blk = torch.block_diag(*x) # JJT_opt_blk = JJT_opt * eye_blk # JJT_conv_blk = JJT_conv * eye_blk # JJT_fused = JJT_conv_blk + JJT_linear ### Blocked NGD version # start_time = time.time() # JJT_opt_blk = optimal_JJT_blk() # print(torch.norm(JJT_opt)) # print(JJT_opt) # time_opt = time.time() - start_time # plotting NGD kernel for some iterations # JJT_opt_blk = optimal_JJT_blk() # u, s, vh = torch.linalg.svd(diff) # s_normal = torch.cumsum(s, dim = 0)/torch.sum(s) # print(s_normal.numpy()) # fig, ax = plt.subplots() # im = ax.plot(s_normal) # print(s) # fig.colorbar(im, orientation='horizontal') # plt.show() # fig.suptitle('NGD Kernel') ### naive loop which is current PyTorch approach # start_time = time.time() # JJT_naive_seq = naive_seq() # print(torch.norm(JJT_naive_seq - JJT_opt)/(bc*bc)) # time_seq = time.time() - start_time # print('naive:', JJT_naive_seq ) ### vamp is slow and not worth it # start_time = time.time() # JJT_naive_vmap = naive_vmap() # time_vmap = time.time() - start_time # applying one step for optimization # print('Seq vs vmap error:', get_diff(JJT_naive_seq, JJT_naive_vmap)) # print('opt vs seq error:', get_diff(JJT_naive_seq, JJT_opt)) # print('opt vs linear error:', get_diff(JJT_opt, JJT_linear)) # print('opt vs conv error:', get_diff(JJT_opt, JJT_conv)) # print('opt vs blocked error:', get_diff(JJT_opt, JJT_opt_blk)) # print('opt vs fused error:', get_diff(JJT_opt, JJT_fused)) # print(torch.allclose(JJT_naive_seq, JJT_opt) ) # print('Jacobian Computation Time [Sequential]:', time_seq) # print('Jacobian Computation Time [Optimal]:', time_opt) # print('Jacobian Computation Time [VMAP]:', time_vmap) # print('Speedup over sequential:', time_seq/ time_opt)
| 2.255942
| 2
|
odl_video/envs.py
|
mitodl/odl-video-service
| 3
|
6626595
|
<reponame>mitodl/odl-video-service<filename>odl_video/envs.py
"""Functions reading and parsing environment variables"""
from ast import literal_eval
import os
from django.core.exceptions import ImproperlyConfigured
class EnvironmentVariableParseException(ImproperlyConfigured):
"""Environment variable was not parsed correctly"""
def get_string(name, default):
"""
Get an environment variable as a string.
Args:
name (str): An environment variable name
default (str): The default value to use if the environment variable doesn't exist.
Returns:
str:
The environment variable value, or the default
"""
return os.environ.get(name, default)
def get_bool(name, default):
"""
Get an environment variable as a boolean.
Args:
name (str): An environment variable name
default (bool): The default value to use if the environment variable doesn't exist.
Returns:
bool:
The environment variable value parsed as a bool
"""
value = os.environ.get(name)
if value is None:
return default
parsed_value = value.lower()
if parsed_value == "true":
return True
elif parsed_value == "false":
return False
raise EnvironmentVariableParseException(
"Expected value in {name}={value} to be a boolean".format(
name=name,
value=value,
)
)
def get_int(name, default):
"""
Get an environment variable as an int.
Args:
name (str): An environment variable name
default (int): The default value to use if the environment variable doesn't exist.
Returns:
int:
The environment variable value parsed as an int
"""
value = os.environ.get(name)
if value is None:
return default
try:
parsed_value = int(value)
except ValueError as ex:
raise EnvironmentVariableParseException(
"Expected value in {name}={value} to be an int".format(
name=name,
value=value,
)
) from ex
return parsed_value
def get_list_of_str(name, default):
"""
Get an environment variable as a list of strings.
Args:
name (str): An environment variable name
default (list): The default value to use if the environment variable doesn't exist.
Returns:
list of str:
The environment variable value parsed as a list of strings
"""
value = os.environ.get(name)
if value is None:
return default
parse_exception = EnvironmentVariableParseException(
"Expected value in {name}={value} to be a list of str".format(
name=name,
value=value,
)
)
try:
parsed_value = literal_eval(value)
except (ValueError, SyntaxError) as ex:
raise parse_exception from ex
if not isinstance(parsed_value, list):
raise parse_exception
for item in parsed_value:
if not isinstance(item, str):
raise parse_exception
return parsed_value
def get_any(name, default):
"""
Get an environment variable as a bool, int, or a string.
Args:
name (str): An environment variable name
default (any): The default value to use if the environment variable doesn't exist.
Returns:
any:
The environment variable value parsed as a bool, int, or a string
"""
try:
return get_bool(name, default)
except EnvironmentVariableParseException:
try:
return get_int(name, default)
except EnvironmentVariableParseException:
return get_string(name, default)
def get_key(name, default):
"""
Get an environment variable as a string representing a private or public key.
The difference is that keys are automatically escaped and they need to be unescaped and
encoded into bytestrings.
Args:
name (str): An environment variable name
default (str): The default value to use if the environment variable doesn't exist.
Returns:
bytes: The environment variable value, or the default as bytestring
"""
value = get_string(name, default)
if not isinstance(value, str):
return value
return value.encode().decode("unicode_escape").encode()
def parse_env(env_file):
"""
Parse the env file and set the values in the runtime environment.
Simplifies deployment by ensuring env values are present without
duplicating across multiple locations and not requiring explicit
sourcing.
Args:
env_file (str): path to a file consisting of key=value pairs
Returns:
None: No return value
"""
try:
with open(env_file) as envsettings:
for line in envsettings:
k, v = line.rstrip("\n").lstrip("export ").split("=", maxsplit=1)
os.environ.setdefault(k, v)
except FileNotFoundError:
pass
|
"""Functions reading and parsing environment variables"""
from ast import literal_eval
import os
from django.core.exceptions import ImproperlyConfigured
class EnvironmentVariableParseException(ImproperlyConfigured):
"""Environment variable was not parsed correctly"""
def get_string(name, default):
"""
Get an environment variable as a string.
Args:
name (str): An environment variable name
default (str): The default value to use if the environment variable doesn't exist.
Returns:
str:
The environment variable value, or the default
"""
return os.environ.get(name, default)
def get_bool(name, default):
"""
Get an environment variable as a boolean.
Args:
name (str): An environment variable name
default (bool): The default value to use if the environment variable doesn't exist.
Returns:
bool:
The environment variable value parsed as a bool
"""
value = os.environ.get(name)
if value is None:
return default
parsed_value = value.lower()
if parsed_value == "true":
return True
elif parsed_value == "false":
return False
raise EnvironmentVariableParseException(
"Expected value in {name}={value} to be a boolean".format(
name=name,
value=value,
)
)
def get_int(name, default):
"""
Get an environment variable as an int.
Args:
name (str): An environment variable name
default (int): The default value to use if the environment variable doesn't exist.
Returns:
int:
The environment variable value parsed as an int
"""
value = os.environ.get(name)
if value is None:
return default
try:
parsed_value = int(value)
except ValueError as ex:
raise EnvironmentVariableParseException(
"Expected value in {name}={value} to be an int".format(
name=name,
value=value,
)
) from ex
return parsed_value
def get_list_of_str(name, default):
"""
Get an environment variable as a list of strings.
Args:
name (str): An environment variable name
default (list): The default value to use if the environment variable doesn't exist.
Returns:
list of str:
The environment variable value parsed as a list of strings
"""
value = os.environ.get(name)
if value is None:
return default
parse_exception = EnvironmentVariableParseException(
"Expected value in {name}={value} to be a list of str".format(
name=name,
value=value,
)
)
try:
parsed_value = literal_eval(value)
except (ValueError, SyntaxError) as ex:
raise parse_exception from ex
if not isinstance(parsed_value, list):
raise parse_exception
for item in parsed_value:
if not isinstance(item, str):
raise parse_exception
return parsed_value
def get_any(name, default):
"""
Get an environment variable as a bool, int, or a string.
Args:
name (str): An environment variable name
default (any): The default value to use if the environment variable doesn't exist.
Returns:
any:
The environment variable value parsed as a bool, int, or a string
"""
try:
return get_bool(name, default)
except EnvironmentVariableParseException:
try:
return get_int(name, default)
except EnvironmentVariableParseException:
return get_string(name, default)
def get_key(name, default):
"""
Get an environment variable as a string representing a private or public key.
The difference is that keys are automatically escaped and they need to be unescaped and
encoded into bytestrings.
Args:
name (str): An environment variable name
default (str): The default value to use if the environment variable doesn't exist.
Returns:
bytes: The environment variable value, or the default as bytestring
"""
value = get_string(name, default)
if not isinstance(value, str):
return value
return value.encode().decode("unicode_escape").encode()
def parse_env(env_file):
"""
Parse the env file and set the values in the runtime environment.
Simplifies deployment by ensuring env values are present without
duplicating across multiple locations and not requiring explicit
sourcing.
Args:
env_file (str): path to a file consisting of key=value pairs
Returns:
None: No return value
"""
try:
with open(env_file) as envsettings:
for line in envsettings:
k, v = line.rstrip("\n").lstrip("export ").split("=", maxsplit=1)
os.environ.setdefault(k, v)
except FileNotFoundError:
pass
|
en
| 0.755499
|
Functions reading and parsing environment variables Environment variable was not parsed correctly Get an environment variable as a string. Args: name (str): An environment variable name default (str): The default value to use if the environment variable doesn't exist. Returns: str: The environment variable value, or the default Get an environment variable as a boolean. Args: name (str): An environment variable name default (bool): The default value to use if the environment variable doesn't exist. Returns: bool: The environment variable value parsed as a bool Get an environment variable as an int. Args: name (str): An environment variable name default (int): The default value to use if the environment variable doesn't exist. Returns: int: The environment variable value parsed as an int Get an environment variable as a list of strings. Args: name (str): An environment variable name default (list): The default value to use if the environment variable doesn't exist. Returns: list of str: The environment variable value parsed as a list of strings Get an environment variable as a bool, int, or a string. Args: name (str): An environment variable name default (any): The default value to use if the environment variable doesn't exist. Returns: any: The environment variable value parsed as a bool, int, or a string Get an environment variable as a string representing a private or public key. The difference is that keys are automatically escaped and they need to be unescaped and encoded into bytestrings. Args: name (str): An environment variable name default (str): The default value to use if the environment variable doesn't exist. Returns: bytes: The environment variable value, or the default as bytestring Parse the env file and set the values in the runtime environment. Simplifies deployment by ensuring env values are present without duplicating across multiple locations and not requiring explicit sourcing. Args: env_file (str): path to a file consisting of key=value pairs Returns: None: No return value
| 3.761736
| 4
|
Beginner/URI_2311.py
|
rbshadow/Python_URI
| 3
|
6626596
|
<gh_stars>1-10
def math():
test_case = int(input())
for i in range(test_case):
count = 0
name = input()
degree = float(input())
lis_t = list(map(float, input().strip().split()))
maximum = max(lis_t)
minimum = min(lis_t)
lis_t.remove(maximum)
lis_t.remove(minimum)
for j in lis_t:
count += j
result = (count * degree).__format__('.2f')
print(name, result)
if __name__ == '__main__':
math()
|
def math():
test_case = int(input())
for i in range(test_case):
count = 0
name = input()
degree = float(input())
lis_t = list(map(float, input().strip().split()))
maximum = max(lis_t)
minimum = min(lis_t)
lis_t.remove(maximum)
lis_t.remove(minimum)
for j in lis_t:
count += j
result = (count * degree).__format__('.2f')
print(name, result)
if __name__ == '__main__':
math()
|
none
| 1
| 3.597315
| 4
|
|
dis_cover/analysis/__init__.py
|
louismerlin/dis-cover
| 2
|
6626597
|
"""Method and class related to the analysis"""
from .analysis import analyze, CppClass
|
"""Method and class related to the analysis"""
from .analysis import analyze, CppClass
|
en
| 0.883281
|
Method and class related to the analysis
| 0.989775
| 1
|
lib/spack/spack/operating_systems/_operating_system.py
|
LiamBindle/spack
| 2,360
|
6626598
|
<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.lang
import spack.util.spack_yaml as syaml
@llnl.util.lang.lazy_lexicographic_ordering
class OperatingSystem(object):
"""Base class for all the Operating Systems.
On a multiple architecture machine, the architecture spec field can be set to
build a package against any target and operating system that is present on the
platform. On Cray platforms or any other architecture that has different front
and back end environments, the operating system will determine the method of
compiler detection.
There are two different types of compiler detection:
1. Through the $PATH env variable (front-end detection)
2. Through the module system. (back-end detection)
Depending on which operating system is specified, the compiler will be detected
using one of those methods.
For platforms such as linux and darwin, the operating system is autodetected.
"""
def __init__(self, name, version):
self.name = name.replace('-', '_')
self.version = str(version).replace('-', '_')
def __str__(self):
return "%s%s" % (self.name, self.version)
def __repr__(self):
return self.__str__()
def _cmp_iter(self):
yield self.name
yield self.version
def to_dict(self):
return syaml.syaml_dict([
('name', self.name),
('version', self.version)
])
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.lang
import spack.util.spack_yaml as syaml
@llnl.util.lang.lazy_lexicographic_ordering
class OperatingSystem(object):
"""Base class for all the Operating Systems.
On a multiple architecture machine, the architecture spec field can be set to
build a package against any target and operating system that is present on the
platform. On Cray platforms or any other architecture that has different front
and back end environments, the operating system will determine the method of
compiler detection.
There are two different types of compiler detection:
1. Through the $PATH env variable (front-end detection)
2. Through the module system. (back-end detection)
Depending on which operating system is specified, the compiler will be detected
using one of those methods.
For platforms such as linux and darwin, the operating system is autodetected.
"""
def __init__(self, name, version):
self.name = name.replace('-', '_')
self.version = str(version).replace('-', '_')
def __str__(self):
return "%s%s" % (self.name, self.version)
def __repr__(self):
return self.__str__()
def _cmp_iter(self):
yield self.name
yield self.version
def to_dict(self):
return syaml.syaml_dict([
('name', self.name),
('version', self.version)
])
|
en
| 0.879129
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Base class for all the Operating Systems. On a multiple architecture machine, the architecture spec field can be set to build a package against any target and operating system that is present on the platform. On Cray platforms or any other architecture that has different front and back end environments, the operating system will determine the method of compiler detection. There are two different types of compiler detection: 1. Through the $PATH env variable (front-end detection) 2. Through the module system. (back-end detection) Depending on which operating system is specified, the compiler will be detected using one of those methods. For platforms such as linux and darwin, the operating system is autodetected.
| 2.204532
| 2
|
eth/tools/builder/chain/builders.py
|
gsalgado/py-evm
| 1
|
6626599
|
<reponame>gsalgado/py-evm
import functools
import time
from typing import (
cast,
Any,
Callable,
Dict,
Iterable,
Tuple,
Type,
Union,
)
from eth_utils.toolz import (
curry,
merge,
pipe,
)
from eth_typing import (
Address,
BlockNumber,
Hash32,
)
from eth_utils import (
to_dict,
to_tuple,
ValidationError,
)
from eth import constants
from eth.abc import (
AtomicDatabaseAPI,
BlockAPI,
BlockHeaderAPI,
ChainAPI,
MiningChainAPI,
VirtualMachineAPI,
)
from eth.db.atomic import AtomicDB
from eth.db.backends.memory import (
MemoryDB,
)
from eth.rlp.headers import (
HeaderParams,
)
from eth.tools.mining import POWMiningMixin
from eth.tools._utils.mappings import (
deep_merge,
)
from eth.tools._utils.normalization import (
normalize_state,
)
from eth.typing import (
AccountState,
GeneralState,
VMFork,
VMConfiguration,
)
from eth.validation import (
validate_vm_configuration,
)
from eth.vm.forks import (
FrontierVM,
HomesteadVM,
TangerineWhistleVM,
SpuriousDragonVM,
ByzantiumVM,
ConstantinopleVM,
PetersburgVM,
IstanbulVM,
)
def build(obj: Any, *applicators: Callable[..., Any]) -> Any:
"""
Run the provided object through the series of applicator functions.
If ``obj`` is an instances of :class:`~eth.chains.base.BaseChain` the
applicators will be run on a copy of the chain and thus will not mutate the
provided chain instance.
"""
if isinstance(obj, ChainAPI):
return pipe(obj, copy(), *applicators)
else:
return pipe(obj, *applicators)
#
# Constructors (creation of chain classes)
#
@curry
def name(class_name: str, chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Assign the given name to the chain class.
"""
return chain_class.configure(__name__=class_name)
@curry
def chain_id(chain_id: int, chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Set the ``chain_id`` for the chain class.
"""
return chain_class.configure(chain_id=chain_id)
@curry
def fork_at(vm_class: Type[VirtualMachineAPI],
at_block: Union[int, BlockNumber],
chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Adds the ``vm_class`` to the chain's ``vm_configuration``.
.. code-block:: python
from eth.chains.base import MiningChain
from eth.tools.builder.chain import build, fork_at
FrontierOnlyChain = build(MiningChain, fork_at(FrontierVM, 0))
# these two classes are functionally equivalent.
class FrontierOnlyChain(MiningChain):
vm_configuration = (
(0, FrontierVM),
)
.. note:: This function is curriable.
The following pre-curried versions of this function are available as well,
one for each mainnet fork.
* :func:`~eth.tools.builder.chain.frontier_at`
* :func:`~eth.tools.builder.chain.homestead_at`
* :func:`~eth.tools.builder.chain.tangerine_whistle_at`
* :func:`~eth.tools.builder.chain.spurious_dragon_at`
* :func:`~eth.tools.builder.chain.byzantium_at`
* :func:`~eth.tools.builder.chain.constantinople_at`
* :func:`~eth.tools.builder.chain.petersburg_at`
* :func:`~eth.tools.builder.chain.istanbul_at`
* :func:`~eth.tools.builder.chain.latest_mainnet_at` - whatever the latest mainnet VM is
"""
if chain_class.vm_configuration is not None:
base_configuration = chain_class.vm_configuration
else:
base_configuration = tuple()
vm_configuration = base_configuration + ((BlockNumber(at_block), vm_class),)
validate_vm_configuration(vm_configuration)
return chain_class.configure(vm_configuration=vm_configuration)
def _is_homestead(vm_class: Type[VirtualMachineAPI]) -> bool:
if not issubclass(vm_class, HomesteadVM):
# It isn't a subclass of the HomesteadVM
return False
elif issubclass(vm_class, TangerineWhistleVM):
# It is a subclass of on of the subsequent forks
return False
else:
return True
@to_tuple
def _set_vm_dao_support_false(vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
if _is_homestead(vm_class):
yield fork_block, vm_class.configure(support_dao_fork=False)
else:
yield fork_block, vm_class
@curry
def disable_dao_fork(chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Set the ``support_dao_fork`` flag to ``False`` on the
:class:`~eth.vm.forks.homestead.HomesteadVM`. Requires that presence of
the :class:`~eth.vm.forks.homestead.HomesteadVM` in the
``vm_configuration``
"""
homstead_vms_found = any(
_is_homestead(vm_class) for _, vm_class in chain_class.vm_configuration
)
if not homstead_vms_found:
raise ValidationError("No HomesteadVM found in vm_configuration.")
vm_configuration = _set_vm_dao_support_false(chain_class.vm_configuration)
return chain_class.configure(vm_configuration=vm_configuration)
@to_tuple
def _set_vm_dao_fork_block_number(dao_fork_block_number: BlockNumber,
vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
if _is_homestead(vm_class):
yield fork_block, vm_class.configure(
support_dao_fork=True,
_dao_fork_block_number=dao_fork_block_number,
)
else:
yield fork_block, vm_class
@curry
def dao_fork_at(dao_fork_block_number: BlockNumber,
chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Set the block number on which the DAO fork will happen. Requires that a
version of the :class:`~eth.vm.forks.homestead.HomesteadVM` is present in
the chain's ``vm_configuration``
"""
homstead_vms_found = any(
_is_homestead(vm_class) for _, vm_class in chain_class.vm_configuration
)
if not homstead_vms_found:
raise ValidationError("No HomesteadVM found in vm_configuration.")
vm_configuration = _set_vm_dao_fork_block_number(
dao_fork_block_number,
chain_class.vm_configuration,
)
return chain_class.configure(vm_configuration=vm_configuration)
frontier_at = fork_at(FrontierVM)
homestead_at = fork_at(HomesteadVM)
tangerine_whistle_at = fork_at(TangerineWhistleVM)
spurious_dragon_at = fork_at(SpuriousDragonVM)
byzantium_at = fork_at(ByzantiumVM)
constantinople_at = fork_at(ConstantinopleVM)
petersburg_at = fork_at(PetersburgVM)
istanbul_at = fork_at(IstanbulVM)
latest_mainnet_at = petersburg_at
GENESIS_DEFAULTS = cast(
Tuple[Tuple[str, Union[BlockNumber, int, None, bytes, Address, Hash32]], ...],
(
('difficulty', 1),
('extra_data', constants.GENESIS_EXTRA_DATA),
('gas_limit', constants.GENESIS_GAS_LIMIT),
('gas_used', 0),
('bloom', 0),
('mix_hash', constants.ZERO_HASH32),
('nonce', constants.GENESIS_NONCE),
('block_number', constants.GENESIS_BLOCK_NUMBER),
('parent_hash', constants.GENESIS_PARENT_HASH),
('receipt_root', constants.BLANK_ROOT_HASH),
('uncles_hash', constants.EMPTY_UNCLE_HASH),
('state_root', constants.BLANK_ROOT_HASH),
('transaction_root', constants.BLANK_ROOT_HASH),
)
)
@to_dict
def _get_default_genesis_params(genesis_state: AccountState,
) -> Iterable[Tuple[str, Union[BlockNumber, int, None, bytes, Address, Hash32]]]: # noqa: E501
for key, value in GENESIS_DEFAULTS:
if key == 'state_root' and genesis_state:
# leave out the `state_root` if a genesis state was specified
pass
else:
yield key, value
yield 'timestamp', int(time.time()) # populate the timestamp value at runtime
@to_tuple
def _mix_in_pow_mining(vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
vm_class_with_pow_mining = type(vm_class.__name__, (POWMiningMixin, vm_class), {})
yield fork_block, vm_class_with_pow_mining
@curry
def enable_pow_mining(chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Inject on demand generation of the proof of work mining seal on newly
mined blocks into each of the chain's vms.
"""
if not chain_class.vm_configuration:
raise ValidationError("Chain class has no vm_configuration")
vm_configuration = _mix_in_pow_mining(chain_class.vm_configuration)
return chain_class.configure(vm_configuration=vm_configuration)
class NoChainSealValidationMixin:
@classmethod
def validate_seal(cls, block: BlockAPI) -> None:
pass
class NoVMSealValidationMixin:
@classmethod
def validate_seal(cls, header: BlockHeaderAPI) -> None:
pass
@to_tuple
def _mix_in_disable_seal_validation(vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
if issubclass(vm_class, NoVMSealValidationMixin):
# Seal validation already disabled, hence nothing to change
vm_class_without_seal_validation = vm_class
else:
vm_class_without_seal_validation = type(
vm_class.__name__,
(NoVMSealValidationMixin, vm_class),
{},
)
yield fork_block, vm_class_without_seal_validation
@curry
def disable_pow_check(chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Disable the proof of work validation check for each of the chain's vms.
This allows for block mining without generation of the proof of work seal.
.. note::
blocks mined this way will not be importable on any chain that does not
have proof of work disabled.
"""
if not chain_class.vm_configuration:
raise ValidationError("Chain class has no vm_configuration")
if issubclass(chain_class, NoChainSealValidationMixin):
# Seal validation already disabled, hence nothing to change
chain_class_without_seal_validation = chain_class
else:
chain_class_without_seal_validation = type(
chain_class.__name__,
(chain_class, NoChainSealValidationMixin),
{},
)
return chain_class_without_seal_validation.configure( # type: ignore
vm_configuration=_mix_in_disable_seal_validation(
chain_class_without_seal_validation.vm_configuration # type: ignore
),
)
#
# Initializers (initialization of chain state and chain class instantiation)
#
def _fill_and_normalize_state(simple_state: GeneralState) -> AccountState:
base_state = normalize_state(simple_state)
defaults = {address: {
"balance": 0,
"nonce": 0,
"code": b"",
"storage": {},
} for address in base_state.keys()}
state = deep_merge(defaults, base_state)
return state
@curry
def genesis(chain_class: ChainAPI,
db: AtomicDatabaseAPI=None,
params: Dict[str, HeaderParams]=None,
state: GeneralState=None) -> ChainAPI:
"""
Initialize the given chain class with the given genesis header parameters
and chain state.
"""
if state is None:
genesis_state: AccountState = {}
else:
genesis_state = _fill_and_normalize_state(state)
genesis_params_defaults = _get_default_genesis_params(genesis_state)
if params is None:
genesis_params = genesis_params_defaults
else:
genesis_params = merge(genesis_params_defaults, params)
if db is None:
base_db: AtomicDatabaseAPI = AtomicDB()
else:
base_db = db
return chain_class.from_genesis(base_db, genesis_params, genesis_state)
#
# Builders (build actual block chain)
#
@curry
def mine_block(chain: MiningChainAPI, **kwargs: Any) -> MiningChainAPI:
"""
Mine a new block on the chain. Header parameters for the new block can be
overridden using keyword arguments.
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError('`mine_block` may only be used on MiningChain instances')
chain.mine_block(**kwargs)
return chain
@curry
def mine_blocks(num_blocks: int, chain: MiningChainAPI) -> MiningChainAPI:
"""
Variadic argument version of :func:`~eth.tools.builder.chain.mine_block`
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError('`mine_block` may only be used on MiningChain instances')
for _ in range(num_blocks):
chain.mine_block()
return chain
@curry
def import_block(block: BlockAPI, chain: ChainAPI) -> ChainAPI:
"""
Import the provided ``block`` into the chain.
"""
chain.import_block(block)
return chain
def import_blocks(*blocks: BlockAPI) -> Callable[[ChainAPI], ChainAPI]:
"""
Variadic argument version of :func:`~eth.tools.builder.chain.import_block`
"""
@functools.wraps(import_blocks)
def _import_blocks(chain: ChainAPI) -> ChainAPI:
for block in blocks:
chain.import_block(block)
return chain
return _import_blocks
@curry
def copy(chain: MiningChainAPI) -> MiningChainAPI:
"""
Make a copy of the chain at the given state. Actions performed on the
resulting chain will not affect the original chain.
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError("`at_block_number` may only be used with 'MiningChain")
base_db = chain.chaindb.db
if not isinstance(base_db, AtomicDB):
raise ValidationError(f"Unsupported database type: {type(base_db)}")
if isinstance(base_db.wrapped_db, MemoryDB):
db = AtomicDB(MemoryDB(base_db.wrapped_db.kv_store.copy()))
else:
raise ValidationError(f"Unsupported wrapped database: {type(base_db.wrapped_db)}")
chain_copy = type(chain)(db, chain.header)
return chain_copy
def chain_split(*splits: Iterable[Callable[..., Any]]) -> Callable[[ChainAPI], Iterable[ChainAPI]]: # noqa: E501
"""
Construct and execute multiple concurrent forks of the chain.
Any number of forks may be executed. For each fork, provide an iterable of
commands.
Returns the resulting chain objects for each fork.
.. code-block:: python
chain_a, chain_b = build(
mining_chain,
chain_split(
(mine_block(extra_data=b'chain-a'), mine_block()),
(mine_block(extra_data=b'chain-b'), mine_block(), mine_block()),
),
)
"""
if not splits:
raise ValidationError("Cannot use `chain_split` without providing at least one split")
@functools.wraps(chain_split)
@to_tuple
def _chain_split(chain: ChainAPI) -> Iterable[ChainAPI]:
for split_fns in splits:
result = build(
chain,
*split_fns,
)
yield result
return _chain_split
@curry
def at_block_number(block_number: Union[int, BlockNumber], chain: MiningChainAPI) -> MiningChainAPI:
"""
Rewind the chain back to the given block number. Calls to things like
``get_canonical_head`` will still return the canonical head of the chain,
however, you can use ``mine_block`` to mine fork chains.
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError("`at_block_number` may only be used with 'MiningChain")
at_block = chain.get_canonical_block_by_number(BlockNumber(block_number))
db = chain.chaindb.db
chain_at_block = type(chain)(db, chain.create_header_from_parent(at_block.header))
return chain_at_block
|
import functools
import time
from typing import (
cast,
Any,
Callable,
Dict,
Iterable,
Tuple,
Type,
Union,
)
from eth_utils.toolz import (
curry,
merge,
pipe,
)
from eth_typing import (
Address,
BlockNumber,
Hash32,
)
from eth_utils import (
to_dict,
to_tuple,
ValidationError,
)
from eth import constants
from eth.abc import (
AtomicDatabaseAPI,
BlockAPI,
BlockHeaderAPI,
ChainAPI,
MiningChainAPI,
VirtualMachineAPI,
)
from eth.db.atomic import AtomicDB
from eth.db.backends.memory import (
MemoryDB,
)
from eth.rlp.headers import (
HeaderParams,
)
from eth.tools.mining import POWMiningMixin
from eth.tools._utils.mappings import (
deep_merge,
)
from eth.tools._utils.normalization import (
normalize_state,
)
from eth.typing import (
AccountState,
GeneralState,
VMFork,
VMConfiguration,
)
from eth.validation import (
validate_vm_configuration,
)
from eth.vm.forks import (
FrontierVM,
HomesteadVM,
TangerineWhistleVM,
SpuriousDragonVM,
ByzantiumVM,
ConstantinopleVM,
PetersburgVM,
IstanbulVM,
)
def build(obj: Any, *applicators: Callable[..., Any]) -> Any:
"""
Run the provided object through the series of applicator functions.
If ``obj`` is an instances of :class:`~eth.chains.base.BaseChain` the
applicators will be run on a copy of the chain and thus will not mutate the
provided chain instance.
"""
if isinstance(obj, ChainAPI):
return pipe(obj, copy(), *applicators)
else:
return pipe(obj, *applicators)
#
# Constructors (creation of chain classes)
#
@curry
def name(class_name: str, chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Assign the given name to the chain class.
"""
return chain_class.configure(__name__=class_name)
@curry
def chain_id(chain_id: int, chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Set the ``chain_id`` for the chain class.
"""
return chain_class.configure(chain_id=chain_id)
@curry
def fork_at(vm_class: Type[VirtualMachineAPI],
at_block: Union[int, BlockNumber],
chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Adds the ``vm_class`` to the chain's ``vm_configuration``.
.. code-block:: python
from eth.chains.base import MiningChain
from eth.tools.builder.chain import build, fork_at
FrontierOnlyChain = build(MiningChain, fork_at(FrontierVM, 0))
# these two classes are functionally equivalent.
class FrontierOnlyChain(MiningChain):
vm_configuration = (
(0, FrontierVM),
)
.. note:: This function is curriable.
The following pre-curried versions of this function are available as well,
one for each mainnet fork.
* :func:`~eth.tools.builder.chain.frontier_at`
* :func:`~eth.tools.builder.chain.homestead_at`
* :func:`~eth.tools.builder.chain.tangerine_whistle_at`
* :func:`~eth.tools.builder.chain.spurious_dragon_at`
* :func:`~eth.tools.builder.chain.byzantium_at`
* :func:`~eth.tools.builder.chain.constantinople_at`
* :func:`~eth.tools.builder.chain.petersburg_at`
* :func:`~eth.tools.builder.chain.istanbul_at`
* :func:`~eth.tools.builder.chain.latest_mainnet_at` - whatever the latest mainnet VM is
"""
if chain_class.vm_configuration is not None:
base_configuration = chain_class.vm_configuration
else:
base_configuration = tuple()
vm_configuration = base_configuration + ((BlockNumber(at_block), vm_class),)
validate_vm_configuration(vm_configuration)
return chain_class.configure(vm_configuration=vm_configuration)
def _is_homestead(vm_class: Type[VirtualMachineAPI]) -> bool:
if not issubclass(vm_class, HomesteadVM):
# It isn't a subclass of the HomesteadVM
return False
elif issubclass(vm_class, TangerineWhistleVM):
# It is a subclass of on of the subsequent forks
return False
else:
return True
@to_tuple
def _set_vm_dao_support_false(vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
if _is_homestead(vm_class):
yield fork_block, vm_class.configure(support_dao_fork=False)
else:
yield fork_block, vm_class
@curry
def disable_dao_fork(chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Set the ``support_dao_fork`` flag to ``False`` on the
:class:`~eth.vm.forks.homestead.HomesteadVM`. Requires that presence of
the :class:`~eth.vm.forks.homestead.HomesteadVM` in the
``vm_configuration``
"""
homstead_vms_found = any(
_is_homestead(vm_class) for _, vm_class in chain_class.vm_configuration
)
if not homstead_vms_found:
raise ValidationError("No HomesteadVM found in vm_configuration.")
vm_configuration = _set_vm_dao_support_false(chain_class.vm_configuration)
return chain_class.configure(vm_configuration=vm_configuration)
@to_tuple
def _set_vm_dao_fork_block_number(dao_fork_block_number: BlockNumber,
vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
if _is_homestead(vm_class):
yield fork_block, vm_class.configure(
support_dao_fork=True,
_dao_fork_block_number=dao_fork_block_number,
)
else:
yield fork_block, vm_class
@curry
def dao_fork_at(dao_fork_block_number: BlockNumber,
chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Set the block number on which the DAO fork will happen. Requires that a
version of the :class:`~eth.vm.forks.homestead.HomesteadVM` is present in
the chain's ``vm_configuration``
"""
homstead_vms_found = any(
_is_homestead(vm_class) for _, vm_class in chain_class.vm_configuration
)
if not homstead_vms_found:
raise ValidationError("No HomesteadVM found in vm_configuration.")
vm_configuration = _set_vm_dao_fork_block_number(
dao_fork_block_number,
chain_class.vm_configuration,
)
return chain_class.configure(vm_configuration=vm_configuration)
frontier_at = fork_at(FrontierVM)
homestead_at = fork_at(HomesteadVM)
tangerine_whistle_at = fork_at(TangerineWhistleVM)
spurious_dragon_at = fork_at(SpuriousDragonVM)
byzantium_at = fork_at(ByzantiumVM)
constantinople_at = fork_at(ConstantinopleVM)
petersburg_at = fork_at(PetersburgVM)
istanbul_at = fork_at(IstanbulVM)
latest_mainnet_at = petersburg_at
GENESIS_DEFAULTS = cast(
Tuple[Tuple[str, Union[BlockNumber, int, None, bytes, Address, Hash32]], ...],
(
('difficulty', 1),
('extra_data', constants.GENESIS_EXTRA_DATA),
('gas_limit', constants.GENESIS_GAS_LIMIT),
('gas_used', 0),
('bloom', 0),
('mix_hash', constants.ZERO_HASH32),
('nonce', constants.GENESIS_NONCE),
('block_number', constants.GENESIS_BLOCK_NUMBER),
('parent_hash', constants.GENESIS_PARENT_HASH),
('receipt_root', constants.BLANK_ROOT_HASH),
('uncles_hash', constants.EMPTY_UNCLE_HASH),
('state_root', constants.BLANK_ROOT_HASH),
('transaction_root', constants.BLANK_ROOT_HASH),
)
)
@to_dict
def _get_default_genesis_params(genesis_state: AccountState,
) -> Iterable[Tuple[str, Union[BlockNumber, int, None, bytes, Address, Hash32]]]: # noqa: E501
for key, value in GENESIS_DEFAULTS:
if key == 'state_root' and genesis_state:
# leave out the `state_root` if a genesis state was specified
pass
else:
yield key, value
yield 'timestamp', int(time.time()) # populate the timestamp value at runtime
@to_tuple
def _mix_in_pow_mining(vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
vm_class_with_pow_mining = type(vm_class.__name__, (POWMiningMixin, vm_class), {})
yield fork_block, vm_class_with_pow_mining
@curry
def enable_pow_mining(chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Inject on demand generation of the proof of work mining seal on newly
mined blocks into each of the chain's vms.
"""
if not chain_class.vm_configuration:
raise ValidationError("Chain class has no vm_configuration")
vm_configuration = _mix_in_pow_mining(chain_class.vm_configuration)
return chain_class.configure(vm_configuration=vm_configuration)
class NoChainSealValidationMixin:
@classmethod
def validate_seal(cls, block: BlockAPI) -> None:
pass
class NoVMSealValidationMixin:
@classmethod
def validate_seal(cls, header: BlockHeaderAPI) -> None:
pass
@to_tuple
def _mix_in_disable_seal_validation(vm_configuration: VMConfiguration) -> Iterable[VMFork]:
for fork_block, vm_class in vm_configuration:
if issubclass(vm_class, NoVMSealValidationMixin):
# Seal validation already disabled, hence nothing to change
vm_class_without_seal_validation = vm_class
else:
vm_class_without_seal_validation = type(
vm_class.__name__,
(NoVMSealValidationMixin, vm_class),
{},
)
yield fork_block, vm_class_without_seal_validation
@curry
def disable_pow_check(chain_class: Type[ChainAPI]) -> Type[ChainAPI]:
"""
Disable the proof of work validation check for each of the chain's vms.
This allows for block mining without generation of the proof of work seal.
.. note::
blocks mined this way will not be importable on any chain that does not
have proof of work disabled.
"""
if not chain_class.vm_configuration:
raise ValidationError("Chain class has no vm_configuration")
if issubclass(chain_class, NoChainSealValidationMixin):
# Seal validation already disabled, hence nothing to change
chain_class_without_seal_validation = chain_class
else:
chain_class_without_seal_validation = type(
chain_class.__name__,
(chain_class, NoChainSealValidationMixin),
{},
)
return chain_class_without_seal_validation.configure( # type: ignore
vm_configuration=_mix_in_disable_seal_validation(
chain_class_without_seal_validation.vm_configuration # type: ignore
),
)
#
# Initializers (initialization of chain state and chain class instantiation)
#
def _fill_and_normalize_state(simple_state: GeneralState) -> AccountState:
base_state = normalize_state(simple_state)
defaults = {address: {
"balance": 0,
"nonce": 0,
"code": b"",
"storage": {},
} for address in base_state.keys()}
state = deep_merge(defaults, base_state)
return state
@curry
def genesis(chain_class: ChainAPI,
db: AtomicDatabaseAPI=None,
params: Dict[str, HeaderParams]=None,
state: GeneralState=None) -> ChainAPI:
"""
Initialize the given chain class with the given genesis header parameters
and chain state.
"""
if state is None:
genesis_state: AccountState = {}
else:
genesis_state = _fill_and_normalize_state(state)
genesis_params_defaults = _get_default_genesis_params(genesis_state)
if params is None:
genesis_params = genesis_params_defaults
else:
genesis_params = merge(genesis_params_defaults, params)
if db is None:
base_db: AtomicDatabaseAPI = AtomicDB()
else:
base_db = db
return chain_class.from_genesis(base_db, genesis_params, genesis_state)
#
# Builders (build actual block chain)
#
@curry
def mine_block(chain: MiningChainAPI, **kwargs: Any) -> MiningChainAPI:
"""
Mine a new block on the chain. Header parameters for the new block can be
overridden using keyword arguments.
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError('`mine_block` may only be used on MiningChain instances')
chain.mine_block(**kwargs)
return chain
@curry
def mine_blocks(num_blocks: int, chain: MiningChainAPI) -> MiningChainAPI:
"""
Variadic argument version of :func:`~eth.tools.builder.chain.mine_block`
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError('`mine_block` may only be used on MiningChain instances')
for _ in range(num_blocks):
chain.mine_block()
return chain
@curry
def import_block(block: BlockAPI, chain: ChainAPI) -> ChainAPI:
"""
Import the provided ``block`` into the chain.
"""
chain.import_block(block)
return chain
def import_blocks(*blocks: BlockAPI) -> Callable[[ChainAPI], ChainAPI]:
"""
Variadic argument version of :func:`~eth.tools.builder.chain.import_block`
"""
@functools.wraps(import_blocks)
def _import_blocks(chain: ChainAPI) -> ChainAPI:
for block in blocks:
chain.import_block(block)
return chain
return _import_blocks
@curry
def copy(chain: MiningChainAPI) -> MiningChainAPI:
"""
Make a copy of the chain at the given state. Actions performed on the
resulting chain will not affect the original chain.
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError("`at_block_number` may only be used with 'MiningChain")
base_db = chain.chaindb.db
if not isinstance(base_db, AtomicDB):
raise ValidationError(f"Unsupported database type: {type(base_db)}")
if isinstance(base_db.wrapped_db, MemoryDB):
db = AtomicDB(MemoryDB(base_db.wrapped_db.kv_store.copy()))
else:
raise ValidationError(f"Unsupported wrapped database: {type(base_db.wrapped_db)}")
chain_copy = type(chain)(db, chain.header)
return chain_copy
def chain_split(*splits: Iterable[Callable[..., Any]]) -> Callable[[ChainAPI], Iterable[ChainAPI]]: # noqa: E501
"""
Construct and execute multiple concurrent forks of the chain.
Any number of forks may be executed. For each fork, provide an iterable of
commands.
Returns the resulting chain objects for each fork.
.. code-block:: python
chain_a, chain_b = build(
mining_chain,
chain_split(
(mine_block(extra_data=b'chain-a'), mine_block()),
(mine_block(extra_data=b'chain-b'), mine_block(), mine_block()),
),
)
"""
if not splits:
raise ValidationError("Cannot use `chain_split` without providing at least one split")
@functools.wraps(chain_split)
@to_tuple
def _chain_split(chain: ChainAPI) -> Iterable[ChainAPI]:
for split_fns in splits:
result = build(
chain,
*split_fns,
)
yield result
return _chain_split
@curry
def at_block_number(block_number: Union[int, BlockNumber], chain: MiningChainAPI) -> MiningChainAPI:
"""
Rewind the chain back to the given block number. Calls to things like
``get_canonical_head`` will still return the canonical head of the chain,
however, you can use ``mine_block`` to mine fork chains.
"""
if not isinstance(chain, MiningChainAPI):
raise ValidationError("`at_block_number` may only be used with 'MiningChain")
at_block = chain.get_canonical_block_by_number(BlockNumber(block_number))
db = chain.chaindb.db
chain_at_block = type(chain)(db, chain.create_header_from_parent(at_block.header))
return chain_at_block
|
en
| 0.672402
|
Run the provided object through the series of applicator functions. If ``obj`` is an instances of :class:`~eth.chains.base.BaseChain` the applicators will be run on a copy of the chain and thus will not mutate the provided chain instance. # # Constructors (creation of chain classes) # Assign the given name to the chain class. Set the ``chain_id`` for the chain class. Adds the ``vm_class`` to the chain's ``vm_configuration``. .. code-block:: python from eth.chains.base import MiningChain from eth.tools.builder.chain import build, fork_at FrontierOnlyChain = build(MiningChain, fork_at(FrontierVM, 0)) # these two classes are functionally equivalent. class FrontierOnlyChain(MiningChain): vm_configuration = ( (0, FrontierVM), ) .. note:: This function is curriable. The following pre-curried versions of this function are available as well, one for each mainnet fork. * :func:`~eth.tools.builder.chain.frontier_at` * :func:`~eth.tools.builder.chain.homestead_at` * :func:`~eth.tools.builder.chain.tangerine_whistle_at` * :func:`~eth.tools.builder.chain.spurious_dragon_at` * :func:`~eth.tools.builder.chain.byzantium_at` * :func:`~eth.tools.builder.chain.constantinople_at` * :func:`~eth.tools.builder.chain.petersburg_at` * :func:`~eth.tools.builder.chain.istanbul_at` * :func:`~eth.tools.builder.chain.latest_mainnet_at` - whatever the latest mainnet VM is # It isn't a subclass of the HomesteadVM # It is a subclass of on of the subsequent forks Set the ``support_dao_fork`` flag to ``False`` on the :class:`~eth.vm.forks.homestead.HomesteadVM`. Requires that presence of the :class:`~eth.vm.forks.homestead.HomesteadVM` in the ``vm_configuration`` Set the block number on which the DAO fork will happen. Requires that a version of the :class:`~eth.vm.forks.homestead.HomesteadVM` is present in the chain's ``vm_configuration`` # noqa: E501 # leave out the `state_root` if a genesis state was specified # populate the timestamp value at runtime Inject on demand generation of the proof of work mining seal on newly mined blocks into each of the chain's vms. # Seal validation already disabled, hence nothing to change Disable the proof of work validation check for each of the chain's vms. This allows for block mining without generation of the proof of work seal. .. note:: blocks mined this way will not be importable on any chain that does not have proof of work disabled. # Seal validation already disabled, hence nothing to change # type: ignore # type: ignore # # Initializers (initialization of chain state and chain class instantiation) # Initialize the given chain class with the given genesis header parameters and chain state. # # Builders (build actual block chain) # Mine a new block on the chain. Header parameters for the new block can be overridden using keyword arguments. Variadic argument version of :func:`~eth.tools.builder.chain.mine_block` Import the provided ``block`` into the chain. Variadic argument version of :func:`~eth.tools.builder.chain.import_block` Make a copy of the chain at the given state. Actions performed on the resulting chain will not affect the original chain. # noqa: E501 Construct and execute multiple concurrent forks of the chain. Any number of forks may be executed. For each fork, provide an iterable of commands. Returns the resulting chain objects for each fork. .. code-block:: python chain_a, chain_b = build( mining_chain, chain_split( (mine_block(extra_data=b'chain-a'), mine_block()), (mine_block(extra_data=b'chain-b'), mine_block(), mine_block()), ), ) Rewind the chain back to the given block number. Calls to things like ``get_canonical_head`` will still return the canonical head of the chain, however, you can use ``mine_block`` to mine fork chains.
| 1.948448
| 2
|
airflow_log_grepper/log_grepper.py
|
7yl4r/airflow_log_grepper
| 0
|
6626600
|
# script which outputs status overview of airflow worker logs searched for
# known error strings.
# good for use as a telegraf exec to monitor airflow via graphite/influxdb.
# Rather than using this with telegraf, a cronjob combined with the
# following format puts less strain on the db here (NOTE that the
# crontab period _must_ match the graphite retention schema (eg:
# `*.exec.per_ten_min.*` matches with `*/10 * * * * *`)):
import os
import re
import sys
import operator
import pprint
from glob import glob
import json
pp = pprint.PrettyPrinter(indent=4)
logdir = "/home/airflow/logs"
def matchwalk_re(regex, directory):
'''Yield path/filenames matching some regular expression
from https://stackoverflow.com/a/49681926/1483986
'''
sep = os.path.sep
pattern = re.compile(regex)
for p, _, f in os.walk(directory):
for i in range(len(f)):
if pattern.search(os.path.join(p, f[i])):
yield '{0}{1}{2}'.format(p, sep, f[i])
# else:
# print(p)
def get_logfiles(base_log_path, dag_glob, task_glob):
"""
Returns iterator of files in airflow DAG log directory.
Expects dir structure like:
/logs/dag_id/task_id/{task_instance_dt}/{n}.log
"""
full_glob = "{}/{}/{}/*/*.log".format(
base_log_path, dag_glob, task_glob
)
print("grepping logs matching glob :\n\t{}".format(full_glob))
for log_path in glob(full_glob):
yield log_path
def get_greps_from_config_json(json_config_fpath):
"""
json config file should be named with a filename like `${dag_glob}.json`
and look like:
{
"task_glob_1": [
{
"match_key_1": "string to grep for #1",
"match_key_2": "other string to grep for"
}
],
"task_glob_2": [{...}]
}
"""
# DAG glob comes from filename
dag_glob = os.path.basename(json_config_fpath).replace(".json", "")
with open(json_config_fpath) as json_file:
greps_by_task_globs_dict = json.load(json_file)
return dag_glob, greps_by_task_globs_dict
def progressbar(it, prefix="", size=60, file=sys.stdout):
"""
ASCII progress bar based on https://stackoverflow.com/a/34482761/1483986
"""
count = len(it)
def show(j):
try:
x = int(size*j/count)
except ZeroDivisionError:
x = count
file.write(
"%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count)
)
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def get_grepped_log_counts(greps_json_file, base_log_path):
"""
returns sorted dict of counts for all log classifications
"""
dag_glob, greps_by_task_globs = get_greps_from_config_json(greps_json_file)
counts = {}
# iterate over each task
print("{} tasks glob strings found".format(len(greps_by_task_globs)))
never_matched_files = []
for task_glob, greps in greps_by_task_globs.items():
print("\t{}".format(task_glob))
# import pdb; pdb.set_trace()
for key, strin in list(greps.items()):
assert key not in counts # no duplicate keys!
counts[key] = 0
counts['success'] = 0
counts['unmatched'] = 0
print("{} grep strings for this task glob".format(len(greps)))
# search this task's logfiles
unmatched_files = []
log_files = list(get_logfiles(base_log_path, dag_glob, task_glob))
for i in progressbar(range(len(log_files))):
file = log_files[i]
# grep the file for strings
# print(files) #entry.name)
matches = []
fstr = open(file).read()
# special case for successful run:
if fstr.strip().endswith("Command exited with return code 0"):
counts['success'] += 1
matches.append('success')
for grep_key, grep_str in list(greps.items()):
if grep_str in open(file).read():
counts[grep_key] += 1
matches.append(grep_key)
# print(grep_key)
if len(matches) == 1:
pass
elif len(matches) > 1:
# print('ERR: multiple matches!:{}'.format(matches))
# print(file)
for key in matches:
counts[key] -= 1
multimatch_key = '_AND_'.join(matches)
counts[multimatch_key] = counts.get(multimatch_key, 0) + 1
else: # matches < 1:
unmatched_files.append(file.replace(base_log_path, ""))
else:
# keep unmatched_files from this search & previous
never_matched_files.extend(
unmatched_files
)
if len(never_matched_files) > 0:
print("{} UNMATCHED files! First 10:".format(
len(never_matched_files)
))
pp.pprint(never_matched_files[:10])
counts['unmatched'] = len(never_matched_files)
print("\n" + "-"*100)
sorted_counts = sorted(counts.items(), key=operator.itemgetter(1))
pp.pprint(sorted_counts)
return sorted_counts
|
# script which outputs status overview of airflow worker logs searched for
# known error strings.
# good for use as a telegraf exec to monitor airflow via graphite/influxdb.
# Rather than using this with telegraf, a cronjob combined with the
# following format puts less strain on the db here (NOTE that the
# crontab period _must_ match the graphite retention schema (eg:
# `*.exec.per_ten_min.*` matches with `*/10 * * * * *`)):
import os
import re
import sys
import operator
import pprint
from glob import glob
import json
pp = pprint.PrettyPrinter(indent=4)
logdir = "/home/airflow/logs"
def matchwalk_re(regex, directory):
'''Yield path/filenames matching some regular expression
from https://stackoverflow.com/a/49681926/1483986
'''
sep = os.path.sep
pattern = re.compile(regex)
for p, _, f in os.walk(directory):
for i in range(len(f)):
if pattern.search(os.path.join(p, f[i])):
yield '{0}{1}{2}'.format(p, sep, f[i])
# else:
# print(p)
def get_logfiles(base_log_path, dag_glob, task_glob):
"""
Returns iterator of files in airflow DAG log directory.
Expects dir structure like:
/logs/dag_id/task_id/{task_instance_dt}/{n}.log
"""
full_glob = "{}/{}/{}/*/*.log".format(
base_log_path, dag_glob, task_glob
)
print("grepping logs matching glob :\n\t{}".format(full_glob))
for log_path in glob(full_glob):
yield log_path
def get_greps_from_config_json(json_config_fpath):
"""
json config file should be named with a filename like `${dag_glob}.json`
and look like:
{
"task_glob_1": [
{
"match_key_1": "string to grep for #1",
"match_key_2": "other string to grep for"
}
],
"task_glob_2": [{...}]
}
"""
# DAG glob comes from filename
dag_glob = os.path.basename(json_config_fpath).replace(".json", "")
with open(json_config_fpath) as json_file:
greps_by_task_globs_dict = json.load(json_file)
return dag_glob, greps_by_task_globs_dict
def progressbar(it, prefix="", size=60, file=sys.stdout):
"""
ASCII progress bar based on https://stackoverflow.com/a/34482761/1483986
"""
count = len(it)
def show(j):
try:
x = int(size*j/count)
except ZeroDivisionError:
x = count
file.write(
"%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count)
)
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def get_grepped_log_counts(greps_json_file, base_log_path):
"""
returns sorted dict of counts for all log classifications
"""
dag_glob, greps_by_task_globs = get_greps_from_config_json(greps_json_file)
counts = {}
# iterate over each task
print("{} tasks glob strings found".format(len(greps_by_task_globs)))
never_matched_files = []
for task_glob, greps in greps_by_task_globs.items():
print("\t{}".format(task_glob))
# import pdb; pdb.set_trace()
for key, strin in list(greps.items()):
assert key not in counts # no duplicate keys!
counts[key] = 0
counts['success'] = 0
counts['unmatched'] = 0
print("{} grep strings for this task glob".format(len(greps)))
# search this task's logfiles
unmatched_files = []
log_files = list(get_logfiles(base_log_path, dag_glob, task_glob))
for i in progressbar(range(len(log_files))):
file = log_files[i]
# grep the file for strings
# print(files) #entry.name)
matches = []
fstr = open(file).read()
# special case for successful run:
if fstr.strip().endswith("Command exited with return code 0"):
counts['success'] += 1
matches.append('success')
for grep_key, grep_str in list(greps.items()):
if grep_str in open(file).read():
counts[grep_key] += 1
matches.append(grep_key)
# print(grep_key)
if len(matches) == 1:
pass
elif len(matches) > 1:
# print('ERR: multiple matches!:{}'.format(matches))
# print(file)
for key in matches:
counts[key] -= 1
multimatch_key = '_AND_'.join(matches)
counts[multimatch_key] = counts.get(multimatch_key, 0) + 1
else: # matches < 1:
unmatched_files.append(file.replace(base_log_path, ""))
else:
# keep unmatched_files from this search & previous
never_matched_files.extend(
unmatched_files
)
if len(never_matched_files) > 0:
print("{} UNMATCHED files! First 10:".format(
len(never_matched_files)
))
pp.pprint(never_matched_files[:10])
counts['unmatched'] = len(never_matched_files)
print("\n" + "-"*100)
sorted_counts = sorted(counts.items(), key=operator.itemgetter(1))
pp.pprint(sorted_counts)
return sorted_counts
|
en
| 0.733921
|
# script which outputs status overview of airflow worker logs searched for # known error strings. # good for use as a telegraf exec to monitor airflow via graphite/influxdb. # Rather than using this with telegraf, a cronjob combined with the # following format puts less strain on the db here (NOTE that the # crontab period _must_ match the graphite retention schema (eg: # `*.exec.per_ten_min.*` matches with `*/10 * * * * *`)): Yield path/filenames matching some regular expression from https://stackoverflow.com/a/49681926/1483986 # else: # print(p) Returns iterator of files in airflow DAG log directory. Expects dir structure like: /logs/dag_id/task_id/{task_instance_dt}/{n}.log json config file should be named with a filename like `${dag_glob}.json` and look like: { "task_glob_1": [ { "match_key_1": "string to grep for #1", "match_key_2": "other string to grep for" } ], "task_glob_2": [{...}] } # DAG glob comes from filename ASCII progress bar based on https://stackoverflow.com/a/34482761/1483986 returns sorted dict of counts for all log classifications # iterate over each task # import pdb; pdb.set_trace() # no duplicate keys! # search this task's logfiles # grep the file for strings # print(files) #entry.name) # special case for successful run: # print(grep_key) # print('ERR: multiple matches!:{}'.format(matches)) # print(file) # matches < 1: # keep unmatched_files from this search & previous
| 2.452925
| 2
|
engage-analytics/lei.py
|
oliveriopt/mood-analytics
| 0
|
6626601
|
<reponame>oliveriopt/mood-analytics
#!/usr/bin/env python3
import time
import sys
import shutil
import logging.config
from src.lei.src.read_path import *
from src.lei.src.read_source_files import ImportLeiFile
from src.lei.src.read_transform_file import *
from src.lei.src.split_file import *
from src.lei.src.inject_sql import *
from src.utilities import set_log_level, get_project_path
# Logger
logging.config.fileConfig("%s/logging.ini" % get_project_path())
set_log_level()
start = time.time()
sys.setrecursionlimit(100000)
path = str(TakePath.define_path())
### IMPORT FILE AND DOWNLOAD THE FILE TO /DATA/ FOLDER
pathFile_lei2 = ImportLeiFile.run_import_unzip_file(path, type_lei_file="lei2")
pathFile_rr = ImportLeiFile.run_import_unzip_file(path, type_lei_file="rr")
## SPLIT BIG FILE JSON
Split.split_json(source_filepath=pathFile_lei2, dest_folder=path + cons.FOLDER_TO_STORAGE_DATA,
split_file_prefix=path + cons.FOLDER_TO_STORAGE_DATA \
+ cons.PREFIX_TEMPORARY_FILE, records_per_file=cons.REGISTERS_PER_FILE,
type_file="lei2")
Split.split_json(source_filepath=pathFile_rr, dest_folder=path + cons.FOLDER_TO_STORAGE_DATA,
split_file_prefix=path + cons.FOLDER_TO_STORAGE_DATA \
+ cons.PREFIX_TEMPORARY_FILE_RR, records_per_file=10000, type_file="rr")
list_file = sorted(os.listdir(path + cons.FOLDER_TO_STORAGE_DATA))[1:]
list_file_rr = [s for s in list_file if "rr" in s]
list_file = list_file[0:len(list_file) - len(list_file_rr)]
conn = Connector(user="root", pw="", host="localhost", database=cons.DATABASE_NAME)
conn.open_connection()
### READ FILES JSON ALREADY SPLITTED
for i in list_file:
first_step = False
entity, other_names, other_add = ParserJSON.read_files(path + cons.FOLDER_TO_STORAGE_DATA + str(i))
if i != "test_file_00": first_step = True
InjectSQLtoDB.inject_to_db_entity(conn, entity, other_names, other_add, first_step)
for i in list_file_rr:
relationships = ParserJSON.read_files_rr(path + cons.FOLDER_TO_STORAGE_DATA + str(i))
InjectSQLtoDB.inject_to_db_rr(conn, relationships)
## DELETE TEMP FILE
shutil.rmtree("%s%s" % (path, cons.FOLDER_TO_STORAGE_DATA))
logger.info(msg="Process time: " + str((time.time() - start)) + " sec.")
conn.close_connection()
|
#!/usr/bin/env python3
import time
import sys
import shutil
import logging.config
from src.lei.src.read_path import *
from src.lei.src.read_source_files import ImportLeiFile
from src.lei.src.read_transform_file import *
from src.lei.src.split_file import *
from src.lei.src.inject_sql import *
from src.utilities import set_log_level, get_project_path
# Logger
logging.config.fileConfig("%s/logging.ini" % get_project_path())
set_log_level()
start = time.time()
sys.setrecursionlimit(100000)
path = str(TakePath.define_path())
### IMPORT FILE AND DOWNLOAD THE FILE TO /DATA/ FOLDER
pathFile_lei2 = ImportLeiFile.run_import_unzip_file(path, type_lei_file="lei2")
pathFile_rr = ImportLeiFile.run_import_unzip_file(path, type_lei_file="rr")
## SPLIT BIG FILE JSON
Split.split_json(source_filepath=pathFile_lei2, dest_folder=path + cons.FOLDER_TO_STORAGE_DATA,
split_file_prefix=path + cons.FOLDER_TO_STORAGE_DATA \
+ cons.PREFIX_TEMPORARY_FILE, records_per_file=cons.REGISTERS_PER_FILE,
type_file="lei2")
Split.split_json(source_filepath=pathFile_rr, dest_folder=path + cons.FOLDER_TO_STORAGE_DATA,
split_file_prefix=path + cons.FOLDER_TO_STORAGE_DATA \
+ cons.PREFIX_TEMPORARY_FILE_RR, records_per_file=10000, type_file="rr")
list_file = sorted(os.listdir(path + cons.FOLDER_TO_STORAGE_DATA))[1:]
list_file_rr = [s for s in list_file if "rr" in s]
list_file = list_file[0:len(list_file) - len(list_file_rr)]
conn = Connector(user="root", pw="", host="localhost", database=cons.DATABASE_NAME)
conn.open_connection()
### READ FILES JSON ALREADY SPLITTED
for i in list_file:
first_step = False
entity, other_names, other_add = ParserJSON.read_files(path + cons.FOLDER_TO_STORAGE_DATA + str(i))
if i != "test_file_00": first_step = True
InjectSQLtoDB.inject_to_db_entity(conn, entity, other_names, other_add, first_step)
for i in list_file_rr:
relationships = ParserJSON.read_files_rr(path + cons.FOLDER_TO_STORAGE_DATA + str(i))
InjectSQLtoDB.inject_to_db_rr(conn, relationships)
## DELETE TEMP FILE
shutil.rmtree("%s%s" % (path, cons.FOLDER_TO_STORAGE_DATA))
logger.info(msg="Process time: " + str((time.time() - start)) + " sec.")
conn.close_connection()
|
en
| 0.153477
|
#!/usr/bin/env python3 # Logger ### IMPORT FILE AND DOWNLOAD THE FILE TO /DATA/ FOLDER ## SPLIT BIG FILE JSON ### READ FILES JSON ALREADY SPLITTED ## DELETE TEMP FILE
| 2.222398
| 2
|
python/ray/tune/integration/mlflow.py
|
siddgoel/ray
| 22
|
6626602
|
from typing import Dict, Callable, Optional
import logging
import ray
from ray.tune.trainable import Trainable
from ray.tune.logger import Logger, LoggerCallback
from ray.tune.result import TRAINING_ITERATION, TIMESTEPS_TOTAL
from ray.tune.trial import Trial
from ray.util.annotations import Deprecated
from ray.util.ml_utils.mlflow import MLflowLoggerUtil
logger = logging.getLogger(__name__)
class MLflowLoggerCallback(LoggerCallback):
"""MLflow Logger to automatically log Tune results and config to MLflow.
MLflow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune ``LoggerCallback``
sends information (config parameters, training results & metrics,
and artifacts) to MLflow for automatic experiment tracking.
Args:
tracking_uri: The tracking URI for where to manage experiments
and runs. This can either be a local file path or a remote server.
This arg gets passed directly to mlflow
initialization. When using Tune in a multi-node setting, make sure
to set this to a remote server and not a local file path.
registry_uri: The registry URI that gets passed directly to
mlflow initialization.
experiment_name: The experiment name to use for this Tune run.
If the experiment with the name already exists with MLflow,
it will be reused. If not, a new experiment will be created with
that name.
tags: An optional dictionary of string keys and values to set
as tags on the run
save_artifact: If set to True, automatically save the entire
contents of the Tune local_dir as an artifact to the
corresponding run in MlFlow.
Example:
.. code-block:: python
from ray.tune.integration.mlflow import MLflowLoggerCallback
tags = { "user_name" : "John",
"git_commit_hash" : "abc123"}
tune.run(
train_fn,
config={
# define search space here
"parameter_1": tune.choice([1, 2, 3]),
"parameter_2": tune.choice([4, 5, 6]),
},
callbacks=[MLflowLoggerCallback(
experiment_name="experiment1",
tags=tags,
save_artifact=True)])
"""
def __init__(
self,
tracking_uri: Optional[str] = None,
registry_uri: Optional[str] = None,
experiment_name: Optional[str] = None,
tags: Optional[Dict] = None,
save_artifact: bool = False,
):
self.tracking_uri = tracking_uri
self.registry_uri = registry_uri
self.experiment_name = experiment_name
self.tags = tags
self.should_save_artifact = save_artifact
self.mlflow_util = MLflowLoggerUtil()
if ray.util.client.ray.is_connected():
logger.warning(
"When using MLflowLoggerCallback with Ray Client, "
"it is recommended to use a remote tracking "
"server. If you are using a MLflow tracking server "
"backed by the local filesystem, then it must be "
"setup on the server side and not on the client "
"side."
)
def setup(self, *args, **kwargs):
# Setup the mlflow logging util.
self.mlflow_util.setup_mlflow(
tracking_uri=self.tracking_uri,
registry_uri=self.registry_uri,
experiment_name=self.experiment_name,
)
if self.tags is None:
# Create empty dictionary for tags if not given explicitly
self.tags = {}
self._trial_runs = {}
def log_trial_start(self, trial: "Trial"):
# Create run if not already exists.
if trial not in self._trial_runs:
# Set trial name in tags
tags = self.tags.copy()
tags["trial_name"] = str(trial)
run = self.mlflow_util.start_run(tags=tags, run_name=str(trial))
self._trial_runs[trial] = run.info.run_id
run_id = self._trial_runs[trial]
# Log the config parameters.
config = trial.config
self.mlflow_util.log_params(run_id=run_id, params_to_log=config)
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
run_id = self._trial_runs[trial]
self.mlflow_util.log_metrics(run_id=run_id, metrics_to_log=result, step=step)
def log_trial_end(self, trial: "Trial", failed: bool = False):
run_id = self._trial_runs[trial]
# Log the artifact if set_artifact is set to True.
if self.should_save_artifact:
self.mlflow_util.save_artifacts(run_id=run_id, dir=trial.logdir)
# Stop the run once trial finishes.
status = "FINISHED" if not failed else "FAILED"
self.mlflow_util.end_run(run_id=run_id, status=status)
@Deprecated
class MLflowLogger(Logger):
"""MLflow logger using the deprecated Logger API.
Requires the experiment configuration to have a MLflow Experiment ID
or manually set the proper environment variables.
"""
def _init(self):
raise DeprecationWarning(
"The legacy MLflowLogger has been "
"deprecated. Use the MLflowLoggerCallback "
"instead."
)
def mlflow_mixin(func: Callable):
"""mlflow_mixin
MLflow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune Trainable mixin helps
initialize the MLflow API for use with the ``Trainable`` class or the
``@mlflow_mixin`` function API. This mixin automatically configures MLflow
and creates a run in the same process as each Tune trial. You can then
use the mlflow API inside the your training function and it will
automatically get reported to the correct run.
For basic usage, just prepend your training function with the
``@mlflow_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
...
mlflow.log_metric(...)
You can also use MlFlow's autologging feature if using a training
framework like Pytorch Lightning, XGBoost, etc. More information can be
found here
(https://mlflow.org/docs/latest/tracking.html#automatic-logging).
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
mlflow.autolog()
xgboost_results = xgb.train(config, ...)
The MlFlow configuration is done by passing a ``mlflow`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``mlflow`` config entry is used to
configure MlFlow. Here are the keys you can pass in to this config entry:
Args:
tracking_uri (str): The tracking URI for MLflow tracking. If using
Tune in a multi-node setting, make sure to use a remote server for
tracking.
experiment_id (str): The id of an already created MLflow experiment.
All logs from all trials in ``tune.run`` will be reported to this
experiment. If this is not provided or the experiment with this
id does not exist, you must provide an``experiment_name``. This
parameter takes precedence over ``experiment_name``.
experiment_name (str): The name of an already existing MLflow
experiment. All logs from all trials in ``tune.run`` will be
reported to this experiment. If this is not provided, you must
provide a valid ``experiment_id``.
token (optional, str): A token to use for HTTP authentication when
logging to a remote tracking server. This is useful when you
want to log to a Databricks server, for example. This value will
be used to set the MLFLOW_TRACKING_TOKEN environment variable on
all the remote training processes.
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.mlflow import mlflow_mixin
import mlflow
# Create the MlFlow expriment.
mlflow.create_experiment("my_experiment")
@mlflow_mixin
def train_fn(config):
for i in range(10):
loss = config["a"] + config["b"]
mlflow.log_metric(key="loss", value=loss)
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# mlflow configuration
"mlflow": {
"experiment_name": "my_experiment",
"tracking_uri": mlflow.get_tracking_uri()
}
})
"""
if ray.util.client.ray.is_connected():
logger.warning(
"When using mlflow_mixin with Ray Client, "
"it is recommended to use a remote tracking "
"server. If you are using a MLflow tracking server "
"backed by the local filesystem, then it must be "
"setup on the server side and not on the client "
"side."
)
if hasattr(func, "__mixins__"):
func.__mixins__ = func.__mixins__ + (MLflowTrainableMixin,)
else:
func.__mixins__ = (MLflowTrainableMixin,)
return func
class MLflowTrainableMixin:
def __init__(self, config: Dict, *args, **kwargs):
self.mlflow_util = MLflowLoggerUtil()
if not isinstance(self, Trainable):
raise ValueError(
"The `MLflowTrainableMixin` can only be used as a mixin "
"for `tune.Trainable` classes. Please make sure your "
"class inherits from both. For example: "
"`class YourTrainable(MLflowTrainableMixin)`."
)
super().__init__(config, *args, **kwargs)
_config = config.copy()
try:
mlflow_config = _config.pop("mlflow").copy()
except KeyError as e:
raise ValueError(
"MLflow mixin specified but no configuration has been passed. "
"Make sure to include a `mlflow` key in your `config` dict "
"containing at least a `tracking_uri` and either "
"`experiment_name` or `experiment_id` specification."
) from e
tracking_uri = mlflow_config.pop("tracking_uri", None)
if tracking_uri is None:
raise ValueError(
"MLflow mixin specified but no "
"tracking_uri has been "
"passed in. Make sure to include a `mlflow` "
"key in your `config` dict containing at "
"least a `tracking_uri`"
)
# Set the tracking token if one is passed in.
tracking_token = mlflow_config.pop("token", None)
experiment_id = mlflow_config.pop("experiment_id", None)
experiment_name = mlflow_config.pop("experiment_name", None)
# This initialization happens in each of the Trainables/workers.
# So we have to set `create_experiment_if_not_exists` to False.
# Otherwise there might be race conditions when each worker tries to
# create the same experiment.
# For the mixin, the experiment must be created beforehand.
self.mlflow_util.setup_mlflow(
tracking_uri=tracking_uri,
experiment_id=experiment_id,
experiment_name=experiment_name,
tracking_token=tracking_token,
create_experiment_if_not_exists=False,
)
run_name = self.trial_name + "_" + self.trial_id
run_name = run_name.replace("/", "_")
self.mlflow_util.start_run(set_active=True, run_name=run_name)
def stop(self):
self.mlflow_util.end_run()
|
from typing import Dict, Callable, Optional
import logging
import ray
from ray.tune.trainable import Trainable
from ray.tune.logger import Logger, LoggerCallback
from ray.tune.result import TRAINING_ITERATION, TIMESTEPS_TOTAL
from ray.tune.trial import Trial
from ray.util.annotations import Deprecated
from ray.util.ml_utils.mlflow import MLflowLoggerUtil
logger = logging.getLogger(__name__)
class MLflowLoggerCallback(LoggerCallback):
"""MLflow Logger to automatically log Tune results and config to MLflow.
MLflow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune ``LoggerCallback``
sends information (config parameters, training results & metrics,
and artifacts) to MLflow for automatic experiment tracking.
Args:
tracking_uri: The tracking URI for where to manage experiments
and runs. This can either be a local file path or a remote server.
This arg gets passed directly to mlflow
initialization. When using Tune in a multi-node setting, make sure
to set this to a remote server and not a local file path.
registry_uri: The registry URI that gets passed directly to
mlflow initialization.
experiment_name: The experiment name to use for this Tune run.
If the experiment with the name already exists with MLflow,
it will be reused. If not, a new experiment will be created with
that name.
tags: An optional dictionary of string keys and values to set
as tags on the run
save_artifact: If set to True, automatically save the entire
contents of the Tune local_dir as an artifact to the
corresponding run in MlFlow.
Example:
.. code-block:: python
from ray.tune.integration.mlflow import MLflowLoggerCallback
tags = { "user_name" : "John",
"git_commit_hash" : "abc123"}
tune.run(
train_fn,
config={
# define search space here
"parameter_1": tune.choice([1, 2, 3]),
"parameter_2": tune.choice([4, 5, 6]),
},
callbacks=[MLflowLoggerCallback(
experiment_name="experiment1",
tags=tags,
save_artifact=True)])
"""
def __init__(
self,
tracking_uri: Optional[str] = None,
registry_uri: Optional[str] = None,
experiment_name: Optional[str] = None,
tags: Optional[Dict] = None,
save_artifact: bool = False,
):
self.tracking_uri = tracking_uri
self.registry_uri = registry_uri
self.experiment_name = experiment_name
self.tags = tags
self.should_save_artifact = save_artifact
self.mlflow_util = MLflowLoggerUtil()
if ray.util.client.ray.is_connected():
logger.warning(
"When using MLflowLoggerCallback with Ray Client, "
"it is recommended to use a remote tracking "
"server. If you are using a MLflow tracking server "
"backed by the local filesystem, then it must be "
"setup on the server side and not on the client "
"side."
)
def setup(self, *args, **kwargs):
# Setup the mlflow logging util.
self.mlflow_util.setup_mlflow(
tracking_uri=self.tracking_uri,
registry_uri=self.registry_uri,
experiment_name=self.experiment_name,
)
if self.tags is None:
# Create empty dictionary for tags if not given explicitly
self.tags = {}
self._trial_runs = {}
def log_trial_start(self, trial: "Trial"):
# Create run if not already exists.
if trial not in self._trial_runs:
# Set trial name in tags
tags = self.tags.copy()
tags["trial_name"] = str(trial)
run = self.mlflow_util.start_run(tags=tags, run_name=str(trial))
self._trial_runs[trial] = run.info.run_id
run_id = self._trial_runs[trial]
# Log the config parameters.
config = trial.config
self.mlflow_util.log_params(run_id=run_id, params_to_log=config)
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
run_id = self._trial_runs[trial]
self.mlflow_util.log_metrics(run_id=run_id, metrics_to_log=result, step=step)
def log_trial_end(self, trial: "Trial", failed: bool = False):
run_id = self._trial_runs[trial]
# Log the artifact if set_artifact is set to True.
if self.should_save_artifact:
self.mlflow_util.save_artifacts(run_id=run_id, dir=trial.logdir)
# Stop the run once trial finishes.
status = "FINISHED" if not failed else "FAILED"
self.mlflow_util.end_run(run_id=run_id, status=status)
@Deprecated
class MLflowLogger(Logger):
"""MLflow logger using the deprecated Logger API.
Requires the experiment configuration to have a MLflow Experiment ID
or manually set the proper environment variables.
"""
def _init(self):
raise DeprecationWarning(
"The legacy MLflowLogger has been "
"deprecated. Use the MLflowLoggerCallback "
"instead."
)
def mlflow_mixin(func: Callable):
"""mlflow_mixin
MLflow (https://mlflow.org) Tracking is an open source library for
recording and querying experiments. This Ray Tune Trainable mixin helps
initialize the MLflow API for use with the ``Trainable`` class or the
``@mlflow_mixin`` function API. This mixin automatically configures MLflow
and creates a run in the same process as each Tune trial. You can then
use the mlflow API inside the your training function and it will
automatically get reported to the correct run.
For basic usage, just prepend your training function with the
``@mlflow_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
...
mlflow.log_metric(...)
You can also use MlFlow's autologging feature if using a training
framework like Pytorch Lightning, XGBoost, etc. More information can be
found here
(https://mlflow.org/docs/latest/tracking.html#automatic-logging).
.. code-block:: python
from ray.tune.integration.mlflow import mlflow_mixin
@mlflow_mixin
def train_fn(config):
mlflow.autolog()
xgboost_results = xgb.train(config, ...)
The MlFlow configuration is done by passing a ``mlflow`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``mlflow`` config entry is used to
configure MlFlow. Here are the keys you can pass in to this config entry:
Args:
tracking_uri (str): The tracking URI for MLflow tracking. If using
Tune in a multi-node setting, make sure to use a remote server for
tracking.
experiment_id (str): The id of an already created MLflow experiment.
All logs from all trials in ``tune.run`` will be reported to this
experiment. If this is not provided or the experiment with this
id does not exist, you must provide an``experiment_name``. This
parameter takes precedence over ``experiment_name``.
experiment_name (str): The name of an already existing MLflow
experiment. All logs from all trials in ``tune.run`` will be
reported to this experiment. If this is not provided, you must
provide a valid ``experiment_id``.
token (optional, str): A token to use for HTTP authentication when
logging to a remote tracking server. This is useful when you
want to log to a Databricks server, for example. This value will
be used to set the MLFLOW_TRACKING_TOKEN environment variable on
all the remote training processes.
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.mlflow import mlflow_mixin
import mlflow
# Create the MlFlow expriment.
mlflow.create_experiment("my_experiment")
@mlflow_mixin
def train_fn(config):
for i in range(10):
loss = config["a"] + config["b"]
mlflow.log_metric(key="loss", value=loss)
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# mlflow configuration
"mlflow": {
"experiment_name": "my_experiment",
"tracking_uri": mlflow.get_tracking_uri()
}
})
"""
if ray.util.client.ray.is_connected():
logger.warning(
"When using mlflow_mixin with Ray Client, "
"it is recommended to use a remote tracking "
"server. If you are using a MLflow tracking server "
"backed by the local filesystem, then it must be "
"setup on the server side and not on the client "
"side."
)
if hasattr(func, "__mixins__"):
func.__mixins__ = func.__mixins__ + (MLflowTrainableMixin,)
else:
func.__mixins__ = (MLflowTrainableMixin,)
return func
class MLflowTrainableMixin:
def __init__(self, config: Dict, *args, **kwargs):
self.mlflow_util = MLflowLoggerUtil()
if not isinstance(self, Trainable):
raise ValueError(
"The `MLflowTrainableMixin` can only be used as a mixin "
"for `tune.Trainable` classes. Please make sure your "
"class inherits from both. For example: "
"`class YourTrainable(MLflowTrainableMixin)`."
)
super().__init__(config, *args, **kwargs)
_config = config.copy()
try:
mlflow_config = _config.pop("mlflow").copy()
except KeyError as e:
raise ValueError(
"MLflow mixin specified but no configuration has been passed. "
"Make sure to include a `mlflow` key in your `config` dict "
"containing at least a `tracking_uri` and either "
"`experiment_name` or `experiment_id` specification."
) from e
tracking_uri = mlflow_config.pop("tracking_uri", None)
if tracking_uri is None:
raise ValueError(
"MLflow mixin specified but no "
"tracking_uri has been "
"passed in. Make sure to include a `mlflow` "
"key in your `config` dict containing at "
"least a `tracking_uri`"
)
# Set the tracking token if one is passed in.
tracking_token = mlflow_config.pop("token", None)
experiment_id = mlflow_config.pop("experiment_id", None)
experiment_name = mlflow_config.pop("experiment_name", None)
# This initialization happens in each of the Trainables/workers.
# So we have to set `create_experiment_if_not_exists` to False.
# Otherwise there might be race conditions when each worker tries to
# create the same experiment.
# For the mixin, the experiment must be created beforehand.
self.mlflow_util.setup_mlflow(
tracking_uri=tracking_uri,
experiment_id=experiment_id,
experiment_name=experiment_name,
tracking_token=tracking_token,
create_experiment_if_not_exists=False,
)
run_name = self.trial_name + "_" + self.trial_id
run_name = run_name.replace("/", "_")
self.mlflow_util.start_run(set_active=True, run_name=run_name)
def stop(self):
self.mlflow_util.end_run()
|
en
| 0.718278
|
MLflow Logger to automatically log Tune results and config to MLflow. MLflow (https://mlflow.org) Tracking is an open source library for recording and querying experiments. This Ray Tune ``LoggerCallback`` sends information (config parameters, training results & metrics, and artifacts) to MLflow for automatic experiment tracking. Args: tracking_uri: The tracking URI for where to manage experiments and runs. This can either be a local file path or a remote server. This arg gets passed directly to mlflow initialization. When using Tune in a multi-node setting, make sure to set this to a remote server and not a local file path. registry_uri: The registry URI that gets passed directly to mlflow initialization. experiment_name: The experiment name to use for this Tune run. If the experiment with the name already exists with MLflow, it will be reused. If not, a new experiment will be created with that name. tags: An optional dictionary of string keys and values to set as tags on the run save_artifact: If set to True, automatically save the entire contents of the Tune local_dir as an artifact to the corresponding run in MlFlow. Example: .. code-block:: python from ray.tune.integration.mlflow import MLflowLoggerCallback tags = { "user_name" : "John", "git_commit_hash" : "abc123"} tune.run( train_fn, config={ # define search space here "parameter_1": tune.choice([1, 2, 3]), "parameter_2": tune.choice([4, 5, 6]), }, callbacks=[MLflowLoggerCallback( experiment_name="experiment1", tags=tags, save_artifact=True)]) # Setup the mlflow logging util. # Create empty dictionary for tags if not given explicitly # Create run if not already exists. # Set trial name in tags # Log the config parameters. # Log the artifact if set_artifact is set to True. # Stop the run once trial finishes. MLflow logger using the deprecated Logger API. Requires the experiment configuration to have a MLflow Experiment ID or manually set the proper environment variables. mlflow_mixin MLflow (https://mlflow.org) Tracking is an open source library for recording and querying experiments. This Ray Tune Trainable mixin helps initialize the MLflow API for use with the ``Trainable`` class or the ``@mlflow_mixin`` function API. This mixin automatically configures MLflow and creates a run in the same process as each Tune trial. You can then use the mlflow API inside the your training function and it will automatically get reported to the correct run. For basic usage, just prepend your training function with the ``@mlflow_mixin`` decorator: .. code-block:: python from ray.tune.integration.mlflow import mlflow_mixin @mlflow_mixin def train_fn(config): ... mlflow.log_metric(...) You can also use MlFlow's autologging feature if using a training framework like Pytorch Lightning, XGBoost, etc. More information can be found here (https://mlflow.org/docs/latest/tracking.html#automatic-logging). .. code-block:: python from ray.tune.integration.mlflow import mlflow_mixin @mlflow_mixin def train_fn(config): mlflow.autolog() xgboost_results = xgb.train(config, ...) The MlFlow configuration is done by passing a ``mlflow`` key to the ``config`` parameter of ``tune.run()`` (see example below). The content of the ``mlflow`` config entry is used to configure MlFlow. Here are the keys you can pass in to this config entry: Args: tracking_uri (str): The tracking URI for MLflow tracking. If using Tune in a multi-node setting, make sure to use a remote server for tracking. experiment_id (str): The id of an already created MLflow experiment. All logs from all trials in ``tune.run`` will be reported to this experiment. If this is not provided or the experiment with this id does not exist, you must provide an``experiment_name``. This parameter takes precedence over ``experiment_name``. experiment_name (str): The name of an already existing MLflow experiment. All logs from all trials in ``tune.run`` will be reported to this experiment. If this is not provided, you must provide a valid ``experiment_id``. token (optional, str): A token to use for HTTP authentication when logging to a remote tracking server. This is useful when you want to log to a Databricks server, for example. This value will be used to set the MLFLOW_TRACKING_TOKEN environment variable on all the remote training processes. Example: .. code-block:: python from ray import tune from ray.tune.integration.mlflow import mlflow_mixin import mlflow # Create the MlFlow expriment. mlflow.create_experiment("my_experiment") @mlflow_mixin def train_fn(config): for i in range(10): loss = config["a"] + config["b"] mlflow.log_metric(key="loss", value=loss) tune.report(loss=loss, done=True) tune.run( train_fn, config={ # define search space here "a": tune.choice([1, 2, 3]), "b": tune.choice([4, 5, 6]), # mlflow configuration "mlflow": { "experiment_name": "my_experiment", "tracking_uri": mlflow.get_tracking_uri() } }) # Set the tracking token if one is passed in. # This initialization happens in each of the Trainables/workers. # So we have to set `create_experiment_if_not_exists` to False. # Otherwise there might be race conditions when each worker tries to # create the same experiment. # For the mixin, the experiment must be created beforehand.
| 2.393143
| 2
|
meraki_sdk/models/two_four_ghz_settings_1_model.py
|
meraki/meraki-python-sdk
| 37
|
6626603
|
<reponame>meraki/meraki-python-sdk<filename>meraki_sdk/models/two_four_ghz_settings_1_model.py
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class TwoFourGhzSettings1Model(object):
"""Implementation of the 'TwoFourGhzSettings1' model.
Settings related to 2.4Ghz band
Attributes:
max_power (int): Sets max power (dBm) of 2.4Ghz band. Can be integer
between 5 and 30.
min_power (int): Sets min power (dBm) of 2.4Ghz band. Can be integer
between 5 and 30.
min_bitrate (float): Sets min bitrate (Mbps) of 2.4Ghz band. Can be
one of '1', '2', '5.5', '6', '9', '11', '12', '18', '24', '36',
'48' or '54'.
valid_auto_channels (list of int): Sets valid auto channels for 2.4Ghz
band. Can be one of '1', '6' or '11'.
ax_enabled (bool): Determines whether ax radio on 2.4Ghz band is on or
off. Can be either true or false. If false, we highly recommend
disabling band steering.
rxsop (int): The RX-SOP level controls the sensitivity of the radio.
It is strongly recommended to use RX-SOP only after consulting
a wireless expert. RX-SOP can be configured in the range of -65 to
-95 (dBm). A value of null will reset this to the default.
"""
# Create a mapping from Model property names to API property names
_names = {
"max_power":'maxPower',
"min_power":'minPower',
"min_bitrate":'minBitrate',
"valid_auto_channels":'validAutoChannels',
"ax_enabled":'axEnabled',
"rxsop":'rxsop'
}
def __init__(self,
max_power=None,
min_power=None,
min_bitrate=None,
valid_auto_channels=None,
ax_enabled=None,
rxsop=None):
"""Constructor for the TwoFourGhzSettings1Model class"""
# Initialize members of the class
self.max_power = max_power
self.min_power = min_power
self.min_bitrate = min_bitrate
self.valid_auto_channels = valid_auto_channels
self.ax_enabled = ax_enabled
self.rxsop = rxsop
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
max_power = dictionary.get('maxPower')
min_power = dictionary.get('minPower')
min_bitrate = dictionary.get('minBitrate')
valid_auto_channels = dictionary.get('validAutoChannels')
ax_enabled = dictionary.get('axEnabled')
rxsop = dictionary.get('rxsop')
# Return an object of this model
return cls(max_power,
min_power,
min_bitrate,
valid_auto_channels,
ax_enabled,
rxsop)
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class TwoFourGhzSettings1Model(object):
"""Implementation of the 'TwoFourGhzSettings1' model.
Settings related to 2.4Ghz band
Attributes:
max_power (int): Sets max power (dBm) of 2.4Ghz band. Can be integer
between 5 and 30.
min_power (int): Sets min power (dBm) of 2.4Ghz band. Can be integer
between 5 and 30.
min_bitrate (float): Sets min bitrate (Mbps) of 2.4Ghz band. Can be
one of '1', '2', '5.5', '6', '9', '11', '12', '18', '24', '36',
'48' or '54'.
valid_auto_channels (list of int): Sets valid auto channels for 2.4Ghz
band. Can be one of '1', '6' or '11'.
ax_enabled (bool): Determines whether ax radio on 2.4Ghz band is on or
off. Can be either true or false. If false, we highly recommend
disabling band steering.
rxsop (int): The RX-SOP level controls the sensitivity of the radio.
It is strongly recommended to use RX-SOP only after consulting
a wireless expert. RX-SOP can be configured in the range of -65 to
-95 (dBm). A value of null will reset this to the default.
"""
# Create a mapping from Model property names to API property names
_names = {
"max_power":'maxPower',
"min_power":'minPower',
"min_bitrate":'minBitrate',
"valid_auto_channels":'validAutoChannels',
"ax_enabled":'axEnabled',
"rxsop":'rxsop'
}
def __init__(self,
max_power=None,
min_power=None,
min_bitrate=None,
valid_auto_channels=None,
ax_enabled=None,
rxsop=None):
"""Constructor for the TwoFourGhzSettings1Model class"""
# Initialize members of the class
self.max_power = max_power
self.min_power = min_power
self.min_bitrate = min_bitrate
self.valid_auto_channels = valid_auto_channels
self.ax_enabled = ax_enabled
self.rxsop = rxsop
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
max_power = dictionary.get('maxPower')
min_power = dictionary.get('minPower')
min_bitrate = dictionary.get('minBitrate')
valid_auto_channels = dictionary.get('validAutoChannels')
ax_enabled = dictionary.get('axEnabled')
rxsop = dictionary.get('rxsop')
# Return an object of this model
return cls(max_power,
min_power,
min_bitrate,
valid_auto_channels,
ax_enabled,
rxsop)
|
en
| 0.747898
|
# -*- coding: utf-8 -*- meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ). Implementation of the 'TwoFourGhzSettings1' model.
Settings related to 2.4Ghz band
Attributes:
max_power (int): Sets max power (dBm) of 2.4Ghz band. Can be integer
between 5 and 30.
min_power (int): Sets min power (dBm) of 2.4Ghz band. Can be integer
between 5 and 30.
min_bitrate (float): Sets min bitrate (Mbps) of 2.4Ghz band. Can be
one of '1', '2', '5.5', '6', '9', '11', '12', '18', '24', '36',
'48' or '54'.
valid_auto_channels (list of int): Sets valid auto channels for 2.4Ghz
band. Can be one of '1', '6' or '11'.
ax_enabled (bool): Determines whether ax radio on 2.4Ghz band is on or
off. Can be either true or false. If false, we highly recommend
disabling band steering.
rxsop (int): The RX-SOP level controls the sensitivity of the radio.
It is strongly recommended to use RX-SOP only after consulting
a wireless expert. RX-SOP can be configured in the range of -65 to
-95 (dBm). A value of null will reset this to the default. # Create a mapping from Model property names to API property names Constructor for the TwoFourGhzSettings1Model class # Initialize members of the class Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class. # Extract variables from the dictionary # Return an object of this model
| 2.271135
| 2
|
statsmodels/iolib/tests/test_foreign.py
|
diego-mazon/statsmodels
| 0
|
6626604
|
<reponame>diego-mazon/statsmodels
"""
Tests for iolib/foreign.py
"""
import os
import warnings
from datetime import datetime
from io import BytesIO
from numpy.testing import assert_array_equal, assert_, assert_equal
import numpy as np
from pandas import DataFrame, isnull
import pandas.util.testing as ptesting
import pytest
from statsmodels.compat.python import asbytes
from statsmodels.iolib.foreign import (StataWriter, genfromdta,
_datetime_to_stata_elapsed, _stata_elapsed_date_to_datetime)
from statsmodels.datasets import macrodata
# Test precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
curdir = os.path.dirname(os.path.abspath(__file__))
def test_genfromdta():
# Test genfromdta vs. results/macrodta.npy created with genfromtxt.
# NOTE: Stata handles data very oddly. Round tripping from csv to dta
# to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375
# (dta/ndarray)
from .results.macrodata import macrodata_result as res2
with pytest.warns(FutureWarning):
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
assert_array_equal(res1 == res2, True)
def test_genfromdta_pandas():
from pandas.util.testing import assert_frame_equal
dta = macrodata.load_pandas().data
curdir = os.path.dirname(os.path.abspath(__file__))
with pytest.warns(FutureWarning):
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta',
pandas=True)
res1 = res1.astype(float)
assert_frame_equal(res1, dta.astype(float))
def test_stata_writer_structured():
buf = BytesIO()
dta = macrodata.load(as_pandas=False).data
dtype = dta.dtype
dt = [('year', int), ('quarter', int)] + dtype.descr[2:]
dta = dta.astype(np.dtype(dt))
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
assert_array_equal(dta, dta2)
def test_stata_writer_array():
buf = BytesIO()
dta = macrodata.load(as_pandas=False).data
dta = DataFrame.from_records(dta)
dta.columns = ["v%d" % i for i in range(1,15)]
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta.values)
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
dta = dta.to_records(index=False)
assert_array_equal(dta, dta2)
def test_missing_roundtrip():
buf = BytesIO()
dta = np.array([(np.nan, np.inf, "")],
dtype=[("double_miss", float),
("float_miss", np.float32),
("string_miss", "a1")])
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta = genfromdta(buf, missing_flt=np.nan)
assert_(isnull(dta[0][0]))
assert_(isnull(dta[0][1]))
assert_(dta[0][2] == asbytes(""))
with pytest.warns(FutureWarning):
dta = genfromdta(os.path.join(curdir, "results/data_missing.dta"),
missing_flt=-999)
assert_(np.all([dta[0][i] == -999 for i in range(5)]))
def test_stata_writer_pandas():
buf = BytesIO()
dta = macrodata.load_pandas().data
dta4 = dta.copy()
for col in ('year','quarter'):
dta[col] = dta[col].astype(np.int64)
dta4[col] = dta4[col].astype(np.int32)
# dta is int64 'i8' given to Stata writer
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta)
with warnings.catch_warnings(record=True) as w:
writer.write_file()
assert len(w) == 0
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
dta5 = DataFrame.from_records(dta2)
# dta2 is int32 'i4' returned from Stata reader
if dta5.dtypes[1] is np.dtype('int64'):
ptesting.assert_frame_equal(dta.reset_index(), dta5)
else:
# do not check index because it has different size, int32 versus int64
ptesting.assert_frame_equal(dta4, dta5[dta5.columns[1:]])
def test_stata_writer_unicode():
# make sure to test with characters outside the latin-1 encoding
pass
def test_genfromdta_datetime():
results = [(datetime(2006, 11, 19, 23, 13, 20), 1479596223000,
datetime(2010, 1, 20), datetime(2010, 1, 8),
datetime(2010, 1, 1), datetime(1974, 7, 1),
datetime(2010, 1, 1), datetime(2010, 1, 1)),
(datetime(1959, 12, 31, 20, 3, 20), -1479590,
datetime(1953, 10, 2), datetime(1948, 6, 10),
datetime(1955, 1, 1), datetime(1955, 7, 1),
datetime(1955, 1, 1), datetime(2, 1, 1))]
with pytest.warns(FutureWarning):
dta = genfromdta(os.path.join(curdir,
"results/time_series_examples.dta"))
assert_array_equal(dta[0].tolist(), results[0])
assert_array_equal(dta[1].tolist(), results[1])
with warnings.catch_warnings(record=True):
with pytest.warns(FutureWarning):
dta = genfromdta(os.path.join(curdir,
"results/time_series_examples.dta"),
pandas=True)
assert_array_equal(dta.iloc[0].tolist(), results[0])
assert_array_equal(dta.iloc[1].tolist(), results[1])
def test_date_converters():
ms = [-1479597200000, -1e6, -1e5, -100, 1e5, 1e6, 1479597200000]
days = [-1e5, -1200, -800, -365, -50, 0, 50, 365, 800, 1200, 1e5]
weeks = [-1e4, -1e2, -53, -52, -51, 0, 51, 52, 53, 1e2, 1e4]
months = [-1e4, -1e3, -100, -13, -12, -11, 0, 11, 12, 13, 100, 1e3, 1e4]
quarter = [-100, -50, -5, -4, -3, 0, 3, 4, 5, 50, 100]
half = [-50, 40, 30, 10, 3, 2, 1, 0, 1, 2, 3, 10, 30, 40, 50]
year = [1, 50, 500, 1000, 1500, 1975, 2075]
for i in ms:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tc"), "tc"), i)
for i in days:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "td"), "td"), i)
for i in weeks:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tw"), "tw"), i)
for i in months:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tm"), "tm"), i)
for i in quarter:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tq"), "tq"), i)
for i in half:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "th"), "th"), i)
for i in year:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "ty"), "ty"), i)
def test_datetime_roundtrip():
dta = np.array([(1, datetime(2010, 1, 1), 2),
(2, datetime(2010, 2, 1), 3),
(4, datetime(2010, 3, 1), 5)],
dtype=[('var1', float), ('var2', object), ('var3', float)])
buf = BytesIO()
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
assert_equal(dta, dta2)
dta = DataFrame.from_records(dta)
buf = BytesIO()
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf, pandas=True)
ptesting.assert_frame_equal(dta, dta2.drop('index', axis=1))
|
"""
Tests for iolib/foreign.py
"""
import os
import warnings
from datetime import datetime
from io import BytesIO
from numpy.testing import assert_array_equal, assert_, assert_equal
import numpy as np
from pandas import DataFrame, isnull
import pandas.util.testing as ptesting
import pytest
from statsmodels.compat.python import asbytes
from statsmodels.iolib.foreign import (StataWriter, genfromdta,
_datetime_to_stata_elapsed, _stata_elapsed_date_to_datetime)
from statsmodels.datasets import macrodata
# Test precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
curdir = os.path.dirname(os.path.abspath(__file__))
def test_genfromdta():
# Test genfromdta vs. results/macrodta.npy created with genfromtxt.
# NOTE: Stata handles data very oddly. Round tripping from csv to dta
# to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375
# (dta/ndarray)
from .results.macrodata import macrodata_result as res2
with pytest.warns(FutureWarning):
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
assert_array_equal(res1 == res2, True)
def test_genfromdta_pandas():
from pandas.util.testing import assert_frame_equal
dta = macrodata.load_pandas().data
curdir = os.path.dirname(os.path.abspath(__file__))
with pytest.warns(FutureWarning):
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta',
pandas=True)
res1 = res1.astype(float)
assert_frame_equal(res1, dta.astype(float))
def test_stata_writer_structured():
buf = BytesIO()
dta = macrodata.load(as_pandas=False).data
dtype = dta.dtype
dt = [('year', int), ('quarter', int)] + dtype.descr[2:]
dta = dta.astype(np.dtype(dt))
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
assert_array_equal(dta, dta2)
def test_stata_writer_array():
buf = BytesIO()
dta = macrodata.load(as_pandas=False).data
dta = DataFrame.from_records(dta)
dta.columns = ["v%d" % i for i in range(1,15)]
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta.values)
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
dta = dta.to_records(index=False)
assert_array_equal(dta, dta2)
def test_missing_roundtrip():
buf = BytesIO()
dta = np.array([(np.nan, np.inf, "")],
dtype=[("double_miss", float),
("float_miss", np.float32),
("string_miss", "a1")])
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta = genfromdta(buf, missing_flt=np.nan)
assert_(isnull(dta[0][0]))
assert_(isnull(dta[0][1]))
assert_(dta[0][2] == asbytes(""))
with pytest.warns(FutureWarning):
dta = genfromdta(os.path.join(curdir, "results/data_missing.dta"),
missing_flt=-999)
assert_(np.all([dta[0][i] == -999 for i in range(5)]))
def test_stata_writer_pandas():
buf = BytesIO()
dta = macrodata.load_pandas().data
dta4 = dta.copy()
for col in ('year','quarter'):
dta[col] = dta[col].astype(np.int64)
dta4[col] = dta4[col].astype(np.int32)
# dta is int64 'i8' given to Stata writer
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta)
with warnings.catch_warnings(record=True) as w:
writer.write_file()
assert len(w) == 0
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
dta5 = DataFrame.from_records(dta2)
# dta2 is int32 'i4' returned from Stata reader
if dta5.dtypes[1] is np.dtype('int64'):
ptesting.assert_frame_equal(dta.reset_index(), dta5)
else:
# do not check index because it has different size, int32 versus int64
ptesting.assert_frame_equal(dta4, dta5[dta5.columns[1:]])
def test_stata_writer_unicode():
# make sure to test with characters outside the latin-1 encoding
pass
def test_genfromdta_datetime():
results = [(datetime(2006, 11, 19, 23, 13, 20), 1479596223000,
datetime(2010, 1, 20), datetime(2010, 1, 8),
datetime(2010, 1, 1), datetime(1974, 7, 1),
datetime(2010, 1, 1), datetime(2010, 1, 1)),
(datetime(1959, 12, 31, 20, 3, 20), -1479590,
datetime(1953, 10, 2), datetime(1948, 6, 10),
datetime(1955, 1, 1), datetime(1955, 7, 1),
datetime(1955, 1, 1), datetime(2, 1, 1))]
with pytest.warns(FutureWarning):
dta = genfromdta(os.path.join(curdir,
"results/time_series_examples.dta"))
assert_array_equal(dta[0].tolist(), results[0])
assert_array_equal(dta[1].tolist(), results[1])
with warnings.catch_warnings(record=True):
with pytest.warns(FutureWarning):
dta = genfromdta(os.path.join(curdir,
"results/time_series_examples.dta"),
pandas=True)
assert_array_equal(dta.iloc[0].tolist(), results[0])
assert_array_equal(dta.iloc[1].tolist(), results[1])
def test_date_converters():
ms = [-1479597200000, -1e6, -1e5, -100, 1e5, 1e6, 1479597200000]
days = [-1e5, -1200, -800, -365, -50, 0, 50, 365, 800, 1200, 1e5]
weeks = [-1e4, -1e2, -53, -52, -51, 0, 51, 52, 53, 1e2, 1e4]
months = [-1e4, -1e3, -100, -13, -12, -11, 0, 11, 12, 13, 100, 1e3, 1e4]
quarter = [-100, -50, -5, -4, -3, 0, 3, 4, 5, 50, 100]
half = [-50, 40, 30, 10, 3, 2, 1, 0, 1, 2, 3, 10, 30, 40, 50]
year = [1, 50, 500, 1000, 1500, 1975, 2075]
for i in ms:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tc"), "tc"), i)
for i in days:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "td"), "td"), i)
for i in weeks:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tw"), "tw"), i)
for i in months:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tm"), "tm"), i)
for i in quarter:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tq"), "tq"), i)
for i in half:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "th"), "th"), i)
for i in year:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "ty"), "ty"), i)
def test_datetime_roundtrip():
dta = np.array([(1, datetime(2010, 1, 1), 2),
(2, datetime(2010, 2, 1), 3),
(4, datetime(2010, 3, 1), 5)],
dtype=[('var1', float), ('var2', object), ('var3', float)])
buf = BytesIO()
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf)
assert_equal(dta, dta2)
dta = DataFrame.from_records(dta)
buf = BytesIO()
with pytest.warns(FutureWarning):
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
with pytest.warns(FutureWarning):
dta2 = genfromdta(buf, pandas=True)
ptesting.assert_frame_equal(dta, dta2.drop('index', axis=1))
|
en
| 0.80824
|
Tests for iolib/foreign.py # Test precisions # Test genfromdta vs. results/macrodta.npy created with genfromtxt. # NOTE: Stata handles data very oddly. Round tripping from csv to dta # to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375 # (dta/ndarray) # dta is int64 'i8' given to Stata writer # dta2 is int32 'i4' returned from Stata reader # do not check index because it has different size, int32 versus int64 # make sure to test with characters outside the latin-1 encoding
| 1.960297
| 2
|
tests/infer/test_enum.py
|
adam-coogan/pyro
| 0
|
6626605
|
<filename>tests/infer/test_enum.py
from __future__ import absolute_import, division, print_function
import logging
import math
import os
import timeit
from collections import defaultdict
import pytest
import torch
from torch.autograd import grad
from torch.distributions import constraints, kl_divergence
import pyro
import pyro.distributions as dist
import pyro.optim
import pyro.poutine as poutine
from pyro.distributions.testing.rejection_gamma import ShapeAugmentedGamma
from pyro.infer import SVI, config_enumerate
from pyro.infer.enum import iter_discrete_traces
from pyro.infer.traceenum_elbo import TraceEnum_ELBO
from pyro.infer.util import LAST_CACHE_SIZE
from pyro.util import torch_isnan
from tests.common import assert_equal, skipif_param
try:
from contextlib import ExitStack # python 3
except ImportError:
from contextlib2 import ExitStack # python 2
logger = logging.getLogger(__name__)
def _skip_cuda(*args):
return skipif_param(*args,
condition="CUDA_TEST" in os.environ,
reason="https://github.com/uber/pyro/issues/1380")
@pytest.mark.parametrize("depth", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
def test_iter_discrete_traces_order(depth, graph_type):
@config_enumerate
def model(depth):
for i in range(depth):
pyro.sample("x{}".format(i), dist.Bernoulli(0.5))
traces = list(iter_discrete_traces(graph_type, model, depth))
assert len(traces) == 2 ** depth
for trace in traces:
sites = [name for name, site in trace.nodes.items() if site["type"] == "sample"]
assert sites == ["x{}".format(i) for i in range(depth)]
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
def test_iter_discrete_traces_scalar(graph_type):
pyro.clear_param_store()
@config_enumerate
def model():
p = pyro.param("p", torch.tensor(0.05))
probs = pyro.param("probs", torch.tensor([0.1, 0.2, 0.3, 0.4]))
x = pyro.sample("x", dist.Bernoulli(p))
y = pyro.sample("y", dist.Categorical(probs))
return dict(x=x, y=y)
traces = list(iter_discrete_traces(graph_type, model))
probs = pyro.param("probs")
assert len(traces) == 2 * len(probs)
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
@pytest.mark.parametrize("expand", [False, True])
def test_iter_discrete_traces_vector(expand, graph_type):
pyro.clear_param_store()
@config_enumerate(expand=expand)
def model():
p = pyro.param("p", torch.tensor([0.05, 0.15]))
probs = pyro.param("probs", torch.tensor([[0.1, 0.2, 0.3, 0.4],
[0.4, 0.3, 0.2, 0.1]]))
with pyro.plate("plate", 2):
x = pyro.sample("x", dist.Bernoulli(p))
y = pyro.sample("y", dist.Categorical(probs))
if expand:
assert x.size() == (2,)
assert y.size() == (2,)
else:
assert x.shape == (1,)
assert y.shape == (1,)
return dict(x=x, y=y)
traces = list(iter_discrete_traces(graph_type, model))
probs = pyro.param("probs")
assert len(traces) == 2 * probs.size(-1)
# The usual dist.Bernoulli avoids NANs by clamping log prob. This unsafe version
# allows us to test additional NAN avoidance in _compute_dice_elbo().
class UnsafeBernoulli(dist.Bernoulli):
def log_prob(self, value):
i = value.long()
j = torch.arange(len(self.probs), dtype=torch.long)
return torch.stack([(-self.probs).log1p(), self.probs.log()])[i, j]
@pytest.mark.parametrize('sample_shape', [(), (2,), (3, 4)])
def test_unsafe_bernoulli(sample_shape):
logits = torch.randn(10)
p = dist.Bernoulli(logits=logits)
q = UnsafeBernoulli(logits=logits)
x = p.sample(sample_shape)
assert_equal(p.log_prob(x), q.log_prob(x))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_avoid_nan(enumerate1):
pyro.clear_param_store()
def model():
p = torch.tensor([0.0, 0.5, 1.0])
with pyro.plate("batch", 3):
pyro.sample("z", UnsafeBernoulli(p))
@config_enumerate(default=enumerate1)
def guide():
p = pyro.param("p", torch.tensor([0.0, 0.5, 1.0], requires_grad=True))
with pyro.plate("batch", 3):
pyro.sample("z", UnsafeBernoulli(p))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
loss = elbo.loss(model, guide)
assert not math.isnan(loss), loss
loss = elbo.differentiable_loss(model, guide)
assert not torch_isnan(loss), loss
loss = elbo.loss_and_grads(model, guide)
assert not math.isnan(loss), loss
# A simple Gaussian mixture model, with no vectorization.
def gmm_model(data, verbose=False):
p = pyro.param("p", torch.tensor(0.3, requires_grad=True))
scale = pyro.param("scale", torch.tensor(1.0, requires_grad=True))
mus = torch.tensor([-1.0, 1.0])
for i in pyro.plate("data", len(data)):
z = pyro.sample("z_{}".format(i), dist.Bernoulli(p))
z = z.long()
if verbose:
logger.debug("M{} z_{} = {}".format(" " * i, i, z.cpu().numpy()))
pyro.sample("x_{}".format(i), dist.Normal(mus[z], scale), obs=data[i])
def gmm_guide(data, verbose=False):
for i in pyro.plate("data", len(data)):
p = pyro.param("p_{}".format(i), torch.tensor(0.6, requires_grad=True))
z = pyro.sample("z_{}".format(i), dist.Bernoulli(p))
z = z.long()
if verbose:
logger.debug("G{} z_{} = {}".format(" " * i, i, z.cpu().numpy()))
@pytest.mark.parametrize("data_size", [1, 2, 3])
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
@pytest.mark.parametrize("model", [gmm_model, gmm_guide])
def test_gmm_iter_discrete_traces(data_size, graph_type, model):
pyro.clear_param_store()
data = torch.arange(0., float(data_size))
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data, verbose=True))
# This non-vectorized version is exponential in data_size:
assert len(traces) == 2**data_size
# A Gaussian mixture model, with vectorized batching.
def gmm_batch_model(data):
p = pyro.param("p", torch.tensor([0.3], requires_grad=True))
p = torch.cat([p, 1 - p])
scale = pyro.param("scale", torch.tensor([1.0], requires_grad=True))
mus = torch.tensor([-1.0, 1.0])
with pyro.plate("data", len(data)) as batch:
n = len(batch)
z = pyro.sample("z", dist.OneHotCategorical(p).expand_by([n]))
assert z.shape[-1] == 2
loc = (z * mus).sum(-1)
pyro.sample("x", dist.Normal(loc, scale.expand(n)), obs=data[batch])
def gmm_batch_guide(data):
with pyro.plate("data", len(data)) as batch:
n = len(batch)
probs = pyro.param("probs", torch.ones(n, 1) * 0.6)
probs = torch.cat([probs, 1 - probs], dim=1)
z = pyro.sample("z", dist.OneHotCategorical(probs))
assert z.shape[-1] == 2
@pytest.mark.parametrize("data_size", [1, 2, 3])
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
@pytest.mark.parametrize("model", [gmm_batch_model, gmm_batch_guide])
def test_gmm_batch_iter_discrete_traces(model, data_size, graph_type):
pyro.clear_param_store()
data = torch.arange(0., float(data_size))
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data))
# This vectorized version is independent of data_size:
assert len(traces) == 2
@pytest.mark.parametrize("model,guide", [
(gmm_model, gmm_guide),
(gmm_batch_model, gmm_batch_guide),
], ids=["single", "batch"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_svi_step_smoke(model, guide, enumerate1):
pyro.clear_param_store()
data = torch.tensor([0.0, 1.0, 9.0])
guide = config_enumerate(guide, default=enumerate1)
optimizer = pyro.optim.Adam({"lr": .001})
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
inference = SVI(model, guide, optimizer, loss=elbo)
inference.step(data)
@pytest.mark.parametrize("model,guide", [
(gmm_model, gmm_guide),
(gmm_batch_model, gmm_batch_guide),
], ids=["single", "batch"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_differentiable_loss(model, guide, enumerate1):
pyro.clear_param_store()
data = torch.tensor([0.0, 1.0, 9.0])
guide = config_enumerate(guide, default=enumerate1)
elbo = TraceEnum_ELBO(max_plate_nesting=1,
strict_enumeration_warning=any([enumerate1]))
pyro.set_rng_seed(0)
loss = elbo.differentiable_loss(model, guide, data)
param_names = sorted(pyro.get_param_store().get_all_param_names())
actual_loss = loss.item()
actual_grads = grad(loss, [pyro.param(name).unconstrained() for name in param_names])
pyro.set_rng_seed(0)
expected_loss = elbo.loss_and_grads(model, guide, data)
expected_grads = [pyro.param(name).unconstrained().grad for name in param_names]
assert_equal(actual_loss, expected_loss)
for name, actual_grad, expected_grad in zip(param_names, actual_grads, expected_grads):
assert_equal(actual_grad, expected_grad, msg='bad {} gradient. Expected:\n{}\nActual:\n{}'.format(
name, expected_grad, actual_grad))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_svi_step_guide_uses_grad(enumerate1):
data = torch.tensor([0., 1., 3.])
def model():
scale = pyro.param("scale")
loc = pyro.sample("loc", dist.Normal(0., 10.))
pyro.sample("b", dist.Bernoulli(0.5))
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.Normal(loc, scale), obs=data)
@config_enumerate(default=enumerate1)
def guide():
p = pyro.param("p", torch.tensor(0.5), constraint=constraints.unit_interval)
scale = pyro.param("scale", torch.tensor(1.0), constraint=constraints.positive)
var = pyro.param("var", torch.tensor(1.0), constraint=constraints.positive)
x = torch.tensor(0., requires_grad=True)
prior = dist.Normal(0., 10.).log_prob(x)
likelihood = dist.Normal(x, scale).log_prob(data).sum()
loss = -(prior + likelihood)
g = grad(loss, [x], create_graph=True)[0]
H = grad(g, [x], create_graph=True)[0]
loc = x.detach() - g / H # newton step
pyro.sample("loc", dist.Normal(loc, var))
pyro.sample("b", dist.Bernoulli(p))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
inference = SVI(model, guide, pyro.optim.Adam({}), elbo)
inference.step()
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize("method", ["loss", "differentiable_loss", "loss_and_grads"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_elbo_bern(method, enumerate1, scale):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 10000
prec = 0.001 if enumerate1 else 0.2
q = pyro.param("q", torch.tensor(0.5, requires_grad=True))
kl = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25))
@poutine.scale(scale=scale)
def model():
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles]))
@config_enumerate(default=enumerate1)
@poutine.scale(scale=scale)
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
if method == "loss":
actual = elbo.loss(model, guide) / num_particles
expected = kl.item() * scale
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected),
"\n actual = {}".format(actual),
]))
else:
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual = grad(loss, [q])[0] / num_particles
elif method == "loss_and_grads":
elbo.loss_and_grads(model, guide)
actual = q.grad / num_particles
expected = grad(kl, [q])[0] * scale
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected.detach().cpu().numpy()),
"\n actual = {}".format(actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("method", ["loss", "differentiable_loss", "loss_and_grads"])
@pytest.mark.parametrize("enumerate1", [None, "parallel"])
def test_elbo_normal(method, enumerate1):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 10000
prec = 0.01
q = pyro.param("q", torch.tensor(1., requires_grad=True))
kl = kl_divergence(dist.Normal(q, 1.), dist.Normal(0., 1.))
def model():
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Normal(0., 1.).expand_by([num_particles]))
@config_enumerate(default=enumerate1, num_samples=20000)
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Normal(q, 1.).expand_by([num_particles]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
if method == "loss":
actual = elbo.loss(model, guide) / num_particles
expected = kl.item()
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected),
"\n actual = {}".format(actual),
]))
else:
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual = grad(loss, [q])[0] / num_particles
elif method == "loss_and_grads":
elbo.loss_and_grads(model, guide)
actual = q.grad / num_particles
expected = grad(kl, [q])[0]
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected.detach().cpu().numpy()),
"\n actual = {}".format(actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1,num_samples1", [
(None, None),
("sequential", None),
("parallel", None),
("parallel", 300),
])
@pytest.mark.parametrize("enumerate2,num_samples2", [
(None, None),
("sequential", None),
("parallel", None),
("parallel", 300),
])
@pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"])
def test_elbo_bern_bern(method, enumerate1, enumerate2, num_samples1, num_samples2):
pyro.clear_param_store()
if enumerate1 and enumerate2 and num_samples1 is None and num_samples2 is None:
num_particles = 1
prec = 0.001
else:
num_particles = 2 * 300 * 300
for n in [num_samples1, num_samples2]:
if n is not None:
num_particles = num_particles // n
prec = 0.2
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
def model():
pyro.sample("x1", dist.Bernoulli(0.2))
pyro.sample("x2", dist.Bernoulli(0.4))
def guide():
q = pyro.param("q")
pyro.sample("x1", dist.Bernoulli(q), infer={"enumerate": enumerate1, "num_samples": num_samples1})
pyro.sample("x2", dist.Bernoulli(q), infer={"enumerate": enumerate2, "num_samples": num_samples2})
kl = sum(kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) for p in [0.2, 0.4])
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2]))
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual_loss = loss.item()
actual_grad = grad(loss, [q])[0]
else:
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = q.grad
assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([
"\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grads = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"])
def test_elbo_berns(method, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 10000
prec = 0.001 if all([enumerate1, enumerate2, enumerate3]) else 0.1
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
def model():
pyro.sample("x1", dist.Bernoulli(0.1))
pyro.sample("x2", dist.Bernoulli(0.2))
pyro.sample("x3", dist.Bernoulli(0.3))
def guide():
q = pyro.param("q")
pyro.sample("x1", dist.Bernoulli(q), infer={"enumerate": enumerate1})
pyro.sample("x2", dist.Bernoulli(q), infer={"enumerate": enumerate2})
pyro.sample("x3", dist.Bernoulli(q), infer={"enumerate": enumerate3})
kl = sum(kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) for p in [0.1, 0.2, 0.3])
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual_loss = loss.item()
actual_grad = grad(loss, [q])[0]
else:
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = q.grad
assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([
"\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grads = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("max_plate_nesting", [0, 1])
@pytest.mark.parametrize("enumerate1", ["sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", ["sequential", "parallel"])
@pytest.mark.parametrize("enumerate3", ["sequential", "parallel"])
def test_elbo_categoricals(enumerate1, enumerate2, enumerate3, max_plate_nesting):
pyro.clear_param_store()
p1 = torch.tensor([0.6, 0.4])
p2 = torch.tensor([0.3, 0.3, 0.4])
p3 = torch.tensor([0.1, 0.2, 0.3, 0.4])
q1 = pyro.param("q1", torch.tensor([0.4, 0.6], requires_grad=True))
q2 = pyro.param("q2", torch.tensor([0.4, 0.3, 0.3], requires_grad=True))
q3 = pyro.param("q3", torch.tensor([0.4, 0.3, 0.2, 0.1], requires_grad=True))
def model():
pyro.sample("x1", dist.Categorical(p1))
pyro.sample("x2", dist.Categorical(p2))
pyro.sample("x3", dist.Categorical(p3))
def guide():
pyro.sample("x1", dist.Categorical(pyro.param("q1")), infer={"enumerate": enumerate1})
pyro.sample("x2", dist.Categorical(pyro.param("q2")), infer={"enumerate": enumerate2})
pyro.sample("x3", dist.Categorical(pyro.param("q3")), infer={"enumerate": enumerate3})
kl = (kl_divergence(dist.Categorical(q1), dist.Categorical(p1)) +
kl_divergence(dist.Categorical(q2), dist.Categorical(p2)) +
kl_divergence(dist.Categorical(q3), dist.Categorical(p3)))
expected_loss = kl.item()
expected_grads = grad(kl, [q1, q2, q3])
elbo = TraceEnum_ELBO(max_plate_nesting=max_plate_nesting,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide)
actual_grads = [q1.grad, q2.grad, q3.grad]
assert_equal(actual_loss, expected_loss, prec=0.001, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
for actual_grad, expected_grad in zip(actual_grads, expected_grads):
assert_equal(actual_grad, expected_grad, prec=0.001, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1", [None, "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "parallel"])
@pytest.mark.parametrize("enumerate3", [None, "parallel"])
@pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"])
def test_elbo_normals(method, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 100 * 10 ** sum(1 for e in [enumerate1, enumerate2, enumerate3] if not e)
prec = 0.1
q = pyro.param("q", torch.tensor(0.0, requires_grad=True))
def model():
pyro.sample("x1", dist.Normal(0.25, 1.))
pyro.sample("x2", dist.Normal(0.5, 1.))
pyro.sample("x3", dist.Normal(1., 1.))
def guide():
q = pyro.param("q")
pyro.sample("x1", dist.Normal(q, 1.), infer={"enumerate": enumerate1, "num_samples": 10})
pyro.sample("x2", dist.Normal(q, 1.), infer={"enumerate": enumerate2, "num_samples": 10})
pyro.sample("x3", dist.Normal(q, 1.), infer={"enumerate": enumerate3, "num_samples": 10})
kl = sum(kl_divergence(dist.Normal(q, 1.), dist.Normal(p, 1.)) for p in [0.25, 0.5, 1.])
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual_loss = loss.item()
actual_grad = grad(loss, [q])[0]
else:
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = q.grad
assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([
"\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grads = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("plate_dim", [1, 2])
def test_elbo_plate(plate_dim, enumerate1, enumerate2):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2]) else 10000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("y", dist.Bernoulli(p).expand_by([num_particles]))
with pyro.plate("plate", plate_dim):
pyro.sample("z", dist.Bernoulli(p).expand_by([plate_dim, num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("y", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
with pyro.plate("plate", plate_dim):
pyro.sample("z", dist.Bernoulli(q).expand_by([plate_dim, num_particles]),
infer={"enumerate": enumerate2})
kl = (1 + plate_dim) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("plate_dim", [1, 2])
def test_elbo_iplate(plate_dim, enumerate1, enumerate2):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2]) else 20000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
for i in pyro.plate("plate", plate_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
for i in pyro.plate("plate", plate_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate2})
kl = (1 + plate_dim) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate4", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [2])
def test_elbo_plate_plate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3, enumerate4):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3, enumerate4]) else 100000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
d = dist.Bernoulli(p)
context1 = pyro.plate("outer", outer_dim, dim=-1)
context2 = pyro.plate("inner", inner_dim, dim=-2)
pyro.sample("w", d)
with context1:
pyro.sample("x", d)
with context2:
pyro.sample("y", d)
with context1, context2:
pyro.sample("z", d)
def guide():
d = dist.Bernoulli(pyro.param("q"))
context1 = pyro.plate("outer", outer_dim, dim=-1)
context2 = pyro.plate("inner", inner_dim, dim=-2)
pyro.sample("w", d, infer={"enumerate": enumerate1})
with context1:
pyro.sample("x", d, infer={"enumerate": enumerate2})
with context2:
pyro.sample("y", d, infer={"enumerate": enumerate3})
with context1, context2:
pyro.sample("z", d, infer={"enumerate": enumerate4})
kl_node = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
kl = (1 + outer_dim + inner_dim + outer_dim * inner_dim) * kl_node
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = pyro.param('q').grad
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [3])
def test_elbo_plate_iplate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 100000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
with pyro.plate("outer", outer_dim):
pyro.sample("y", dist.Bernoulli(p).expand_by([outer_dim, num_particles]))
for i in pyro.plate("inner", inner_dim):
pyro.sample("z_{}".format(i), dist.Bernoulli(p).expand_by([outer_dim, num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
with pyro.plate("outer", outer_dim):
pyro.sample("y", dist.Bernoulli(q).expand_by([outer_dim, num_particles]),
infer={"enumerate": enumerate2})
for i in pyro.plate("inner", inner_dim):
pyro.sample("z_{}".format(i), dist.Bernoulli(q).expand_by([outer_dim, num_particles]),
infer={"enumerate": enumerate3})
kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [2])
def test_elbo_iplate_plate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 50000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
inner_plate = pyro.plate("inner", inner_dim)
for i in pyro.plate("outer", outer_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
with inner_plate:
pyro.sample("z_{}".format(i), dist.Bernoulli(p).expand_by([inner_dim, num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
inner_plate = pyro.plate("inner", inner_dim)
for i in pyro.plate("outer", outer_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate2})
with inner_plate:
pyro.sample("z_{}".format(i), dist.Bernoulli(q).expand_by([inner_dim, num_particles]),
infer={"enumerate": enumerate3})
kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [2])
def test_elbo_iplate_iplate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 150000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
inner_iplate = pyro.plate("inner", outer_dim)
for i in pyro.plate("outer", inner_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
for j in inner_iplate:
pyro.sample("z_{}_{}".format(i, j), dist.Bernoulli(p).expand_by([num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
inner_iplate = pyro.plate("inner", inner_dim)
for i in pyro.plate("outer", outer_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate2})
for j in inner_iplate:
pyro.sample("z_{}_{}".format(i, j), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate3})
kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("pi1", [0.33, 0.43])
@pytest.mark.parametrize("pi2", [0.55, 0.27])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_non_mean_field_bern_bern_elbo_gradient(enumerate1, pi1, pi2):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 20000
def model():
with pyro.plate("particles", num_particles):
y = pyro.sample("y", dist.Bernoulli(0.33).expand_by([num_particles]))
pyro.sample("z", dist.Bernoulli(0.55 * y + 0.10))
def guide():
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.plate("particles", num_particles):
y = pyro.sample("y", dist.Bernoulli(q1).expand_by([num_particles]))
pyro.sample("z", dist.Bernoulli(q2 * y + 0.10))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1))
actual_grad_q1 = pyro.param('q1').grad / num_particles
actual_grad_q2 = pyro.param('q2').grad / num_particles
logger.info("Computing analytic gradients")
q1 = torch.tensor(pi1, requires_grad=True)
q2 = torch.tensor(pi2, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(q1), dist.Bernoulli(0.33))
elbo = elbo + q1 * kl_divergence(dist.Bernoulli(q2 + 0.10), dist.Bernoulli(0.65))
elbo = elbo + (1.0 - q1) * kl_divergence(dist.Bernoulli(0.10), dist.Bernoulli(0.10))
expected_grad_q1, expected_grad_q2 = grad(elbo, [q1, q2])
prec = 0.03 if enumerate1 is None else 0.001
assert_equal(actual_grad_q1, expected_grad_q1, prec=prec, msg="".join([
"\nq1 expected = {}".format(expected_grad_q1.data.cpu().numpy()),
"\nq1 actual = {}".format(actual_grad_q1.data.cpu().numpy()),
]))
assert_equal(actual_grad_q2, expected_grad_q2, prec=prec, msg="".join([
"\nq2 expected = {}".format(expected_grad_q2.data.cpu().numpy()),
"\nq2 actual = {}".format(actual_grad_q2.data.cpu().numpy()),
]))
@pytest.mark.parametrize("pi1", [0.33, 0.44])
@pytest.mark.parametrize("pi2", [0.55, 0.39])
@pytest.mark.parametrize("pi3", [0.22, 0.29])
@pytest.mark.parametrize("enumerate1,num_samples", [
(None, None),
("sequential", None),
("parallel", None),
("parallel", 2),
])
def test_non_mean_field_bern_normal_elbo_gradient(enumerate1, pi1, pi2, pi3, num_samples):
pyro.clear_param_store()
include_z = True
num_particles = 10000
def model():
with pyro.plate("particles", num_particles):
q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True))
y = pyro.sample("y", dist.Bernoulli(q3).expand_by([num_particles]))
if include_z:
pyro.sample("z", dist.Normal(0.55 * y + q3, 1.0))
def guide():
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.plate("particles", num_particles):
y = pyro.sample("y", dist.Bernoulli(q1).expand_by([num_particles]), infer={"enumerate": enumerate1})
if include_z:
pyro.sample("z", dist.Normal(q2 * y + 0.10, 1.0))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
elbo.loss_and_grads(model, guide)
actual_grad_q1 = pyro.param('q1').grad / num_particles
if include_z:
actual_grad_q2 = pyro.param('q2').grad / num_particles
actual_grad_q3 = pyro.param('q3').grad / num_particles
logger.info("Computing analytic gradients")
q1 = torch.tensor(pi1, requires_grad=True)
q2 = torch.tensor(pi2, requires_grad=True)
q3 = torch.tensor(pi3, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(q1), dist.Bernoulli(q3))
if include_z:
elbo = elbo + q1 * kl_divergence(dist.Normal(q2 + 0.10, 1.0), dist.Normal(q3 + 0.55, 1.0))
elbo = elbo + (1.0 - q1) * kl_divergence(dist.Normal(0.10, 1.0), dist.Normal(q3, 1.0))
expected_grad_q1, expected_grad_q2, expected_grad_q3 = grad(elbo, [q1, q2, q3])
else:
expected_grad_q1, expected_grad_q3 = grad(elbo, [q1, q3])
prec = 0.04 if enumerate1 is None else 0.02
assert_equal(actual_grad_q1, expected_grad_q1, prec=prec, msg="".join([
"\nq1 expected = {}".format(expected_grad_q1.data.cpu().numpy()),
"\nq1 actual = {}".format(actual_grad_q1.data.cpu().numpy()),
]))
if include_z:
assert_equal(actual_grad_q2, expected_grad_q2, prec=prec, msg="".join([
"\nq2 expected = {}".format(expected_grad_q2.data.cpu().numpy()),
"\nq2 actual = {}".format(actual_grad_q2.data.cpu().numpy()),
]))
assert_equal(actual_grad_q3, expected_grad_q3, prec=prec, msg="".join([
"\nq3 expected = {}".format(expected_grad_q3.data.cpu().numpy()),
"\nq3 actual = {}".format(actual_grad_q3.data.cpu().numpy()),
]))
@pytest.mark.parametrize("pi1", [0.33, 0.41])
@pytest.mark.parametrize("pi2", [0.44, 0.17])
@pytest.mark.parametrize("pi3", [0.22, 0.29])
def test_non_mean_field_normal_bern_elbo_gradient(pi1, pi2, pi3):
def model(num_particles):
with pyro.plate("particles", num_particles):
q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True))
q4 = pyro.param("q4", torch.tensor(0.5 * (pi1 + pi2), requires_grad=True))
z = pyro.sample("z", dist.Normal(q3, 1.0).expand_by([num_particles]))
zz = torch.exp(z) / (1.0 + torch.exp(z))
pyro.sample("y", dist.Bernoulli(q4 * zz))
def guide(num_particles):
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.plate("particles", num_particles):
z = pyro.sample("z", dist.Normal(q2, 1.0).expand_by([num_particles]))
zz = torch.exp(z) / (1.0 + torch.exp(z))
pyro.sample("y", dist.Bernoulli(q1 * zz))
qs = ['q1', 'q2', 'q3', 'q4']
results = {}
for ed, num_particles in zip([None, 'parallel', 'sequential'], [30000, 20000, 20000]):
pyro.clear_param_store()
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([ed]))
elbo.loss_and_grads(model, config_enumerate(guide, default=ed), num_particles)
results[str(ed)] = {}
for q in qs:
results[str(ed)]['actual_grad_%s' % q] = pyro.param(q).grad.detach().cpu().numpy() / num_particles
prec = 0.03
for ed in ['parallel', 'sequential']:
logger.info('\n*** {} ***'.format(ed))
for q in qs:
logger.info("[{}] actual: {}".format(q, results[ed]['actual_grad_%s' % q]))
assert_equal(results[ed]['actual_grad_%s' % q], results['None']['actual_grad_%s' % q], prec=prec,
msg="".join([
"\nexpected (MC estimate) = {}".format(results['None']['actual_grad_%s' % q]),
"\n actual ({} estimate) = {}".format(ed, results[ed]['actual_grad_%s' % q]),
]))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_elbo_rsvi(enumerate1):
pyro.clear_param_store()
num_particles = 40000
prec = 0.01 if enumerate1 else 0.02
q = pyro.param("q", torch.tensor(0.5, requires_grad=True))
a = pyro.param("a", torch.tensor(1.5, requires_grad=True))
kl1 = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25))
kl2 = kl_divergence(dist.Gamma(a, 1.0), dist.Gamma(0.5, 1.0))
def model():
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles]))
pyro.sample("y", dist.Gamma(0.50, 1.0).expand_by([num_particles]))
@config_enumerate(default=enumerate1)
def guide():
q = pyro.param("q")
a = pyro.param("a")
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles]))
pyro.sample("y", ShapeAugmentedGamma(a, torch.tensor(1.0)).expand_by([num_particles]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
elbo.loss_and_grads(model, guide)
actual_q = q.grad / num_particles
expected_q = grad(kl1, [q])[0]
assert_equal(actual_q, expected_q, prec=prec, msg="".join([
"\nexpected q.grad = {}".format(expected_q.detach().cpu().numpy()),
"\n actual q.grad = {}".format(actual_q.detach().cpu().numpy()),
]))
actual_a = a.grad / num_particles
expected_a = grad(kl2, [a])[0]
assert_equal(actual_a, expected_a, prec=prec, msg="".join([
"\nexpected a.grad= {}".format(expected_a.detach().cpu().numpy()),
"\n actual a.grad = {}".format(actual_a.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1,num_steps,expand", [
("sequential", 2, True),
("sequential", 2, False),
("sequential", 3, True),
("sequential", 3, False),
("parallel", 2, True),
("parallel", 2, False),
("parallel", 3, True),
("parallel", 3, False),
("parallel", 10, False),
("parallel", 20, False),
_skip_cuda("parallel", 30, False),
])
def test_elbo_hmm_in_model(enumerate1, num_steps, expand):
pyro.clear_param_store()
data = torch.ones(num_steps)
init_probs = torch.tensor([0.5, 0.5])
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.9, 0.1], [0.1, 0.9]]),
constraint=constraints.simplex)
locs = pyro.param("obs_locs", torch.tensor([-1.0, 1.0]))
scale = pyro.param("obs_scale", torch.tensor(1.0),
constraint=constraints.positive)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Normal(locs[x], scale), obs=y)
@config_enumerate(default=enumerate1, expand=expand)
def guide(data):
mean_field_probs = pyro.param("mean_field_probs", torch.ones(num_steps, 2) / 2,
constraint=constraints.simplex)
for i in pyro.markov(range(num_steps)):
pyro.sample("x_{}".format(i), dist.Categorical(mean_field_probs[i]))
elbo = TraceEnum_ELBO()
elbo.loss_and_grads(model, guide, data)
expected_unconstrained_grads = {
"transition_probs": torch.tensor([[0.2, -0.2], [-0.2, 0.2]]) * (num_steps - 1),
"obs_locs": torch.tensor([-num_steps, 0]),
"obs_scale": torch.tensor(-num_steps),
"mean_field_probs": torch.tensor([[0.5, -0.5]] * num_steps),
}
for name, value in pyro.get_param_store().named_parameters():
actual = value.grad
expected = expected_unconstrained_grads[name]
assert_equal(actual, expected, msg=''.join([
'\nexpected {}.grad = {}'.format(name, expected.cpu().numpy()),
'\n actual {}.grad = {}'.format(name, actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1,num_steps,expand", [
("sequential", 2, True),
("sequential", 2, False),
("sequential", 3, True),
("sequential", 3, False),
("parallel", 2, True),
("parallel", 2, False),
("parallel", 3, True),
("parallel", 3, False),
("parallel", 10, False),
("parallel", 20, False),
_skip_cuda("parallel", 30, False),
_skip_cuda("parallel", 40, False),
_skip_cuda("parallel", 50, False),
])
def test_elbo_hmm_in_guide(enumerate1, num_steps, expand):
pyro.clear_param_store()
data = torch.ones(num_steps)
init_probs = torch.tensor([0.5, 0.5])
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y)
@config_enumerate(default=enumerate1, expand=expand)
def guide(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
elbo = TraceEnum_ELBO()
elbo.loss_and_grads(model, guide, data)
# These golden values simply test agreement between parallel and sequential.
expected_grads = {
2: {
"transition_probs": [[0.1029949, -0.1029949], [0.1029949, -0.1029949]],
"emission_probs": [[0.75, -0.75], [0.25, -0.25]],
},
3: {
"transition_probs": [[0.25748726, -0.25748726], [0.25748726, -0.25748726]],
"emission_probs": [[1.125, -1.125], [0.375, -0.375]],
},
10: {
"transition_probs": [[1.64832076, -1.64832076], [1.64832076, -1.64832076]],
"emission_probs": [[3.75, -3.75], [1.25, -1.25]],
},
20: {
"transition_probs": [[3.70781687, -3.70781687], [3.70781687, -3.70781687]],
"emission_probs": [[7.5, -7.5], [2.5, -2.5]],
},
22: {
"transition_probs": [[4.11979618, -4.11979618], [4.11979618, -4.11979618]],
"emission_probs": [[8.25, -8.25], [2.75, -2.75]],
},
30: {
"transition_probs": [[5.76771452, -5.76771452], [5.76771452, -5.76771452]],
"emission_probs": [[11.25, -11.25], [3.75, -3.75]],
},
}
if num_steps not in expected_grads:
return
for name, value in pyro.get_param_store().named_parameters():
actual = value.grad
expected = torch.tensor(expected_grads[num_steps][name])
assert_equal(actual, expected, msg=''.join([
'\nexpected {}.grad = {}'.format(name, expected.cpu().numpy()),
'\n actual {}.grad = {}'.format(name, actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize('num_steps', [2, 3, 4, 5, 10, 20, _skip_cuda(30)])
def test_hmm_enumerate_model(num_steps):
data = dist.Categorical(torch.tensor([0.5, 0.5])).sample((num_steps,))
@config_enumerate(default="parallel")
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = 0
for t, y in pyro.markov(enumerate(data)):
x = pyro.sample("x_{}".format(t), dist.Categorical(transition_probs[x]))
pyro.sample("y_{}".format(t), dist.Categorical(emission_probs[x]), obs=y)
logger.debug('{}\t{}'.format(t, tuple(x.shape)))
def guide(data):
pass
elbo = TraceEnum_ELBO()
elbo.differentiable_loss(model, guide, data)
@pytest.mark.parametrize('num_steps', [2, 3, 4, 5, 10, 20, _skip_cuda(30)])
def test_hmm_enumerate_model_and_guide(num_steps):
data = dist.Categorical(torch.tensor([0.5, 0.5])).sample((num_steps,))
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5])))
logger.debug('-1\t{}'.format(tuple(x.shape)))
for t, y in pyro.markov(enumerate(data)):
x = pyro.sample("x_{}".format(t), dist.Categorical(transition_probs[x]),
infer={"enumerate": "parallel"})
pyro.sample("y_{}".format(t), dist.Categorical(emission_probs[x]), obs=y)
logger.debug('{}\t{}'.format(t, tuple(x.shape)))
def guide(data):
init_probs = pyro.param("init_probs",
torch.tensor([0.75, 0.25]),
constraint=constraints.simplex)
pyro.sample("x", dist.Categorical(init_probs),
infer={"enumerate": "parallel"})
elbo = TraceEnum_ELBO()
elbo.differentiable_loss(model, guide, data)
def _check_loss_and_grads(expected_loss, actual_loss):
assert_equal(actual_loss, expected_loss,
msg='Expected:\n{}\nActual:\n{}'.format(expected_loss.detach().cpu().numpy(),
actual_loss.detach().cpu().numpy()))
names = pyro.get_param_store().get_all_param_names()
params = [pyro.param(name).unconstrained() for name in names]
actual_grads = grad(actual_loss, params, allow_unused=True, retain_graph=True)
expected_grads = grad(expected_loss, params, allow_unused=True, retain_graph=True)
for name, actual_grad, expected_grad in zip(names, actual_grads, expected_grads):
if actual_grad is None or expected_grad is None:
continue
assert_equal(actual_grad, expected_grad,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(name,
expected_grad.detach().cpu().numpy(),
actual_grad.detach().cpu().numpy()))
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_1(scale):
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([0.3, 0.7]),
constraint=constraints.simplex)
@poutine.scale(scale=scale)
def auto_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z), obs=torch.tensor(0))
@poutine.scale(scale=scale)
def hand_model():
probs_x = pyro.param("model_probs_x")
probs_z = pyro.param("model_probs_z")
pyro.sample("x", dist.Categorical(probs_x))
pyro.sample("z", dist.Categorical(probs_z), obs=torch.tensor(0))
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def guide():
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_2(scale):
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
@poutine.scale(scale=scale)
def auto_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=torch.tensor(0))
@poutine.scale(scale=scale)
def hand_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
probs_yz = probs_y.mm(probs_z)
x = pyro.sample("x", dist.Categorical(probs_x))
pyro.sample("z", dist.Categorical(probs_yz[x]), obs=torch.tensor(0))
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def guide():
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_3(scale):
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
def auto_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=torch.tensor(0))
def hand_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
probs_yz = probs_y.mm(probs_z)
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
pyro.sample("z", dist.Categorical(probs_yz[x]), obs=torch.tensor(0))
@config_enumerate(default="parallel")
def guide():
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('num_samples,num_masked',
[(1, 1), (2, 2), (3, 2)],
ids=["single", "batch", "masked"])
def test_elbo_enumerate_plate_1(num_samples, num_masked, scale):
# +---------+
# x ----> y ----> z |
# | N |
# +---------+
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
def auto_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
if num_masked == num_samples:
with pyro.plate("data", len(data)):
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
else:
with pyro.plate("data", len(data)):
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
def hand_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
for i in pyro.plate("data", num_masked):
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i])
@config_enumerate(default="parallel")
def guide(data):
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,))
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('num_samples,num_masked',
[(1, 1), (2, 2), (3, 2)],
ids=["single", "batch", "masked"])
def test_elbo_enumerate_plate_2(num_samples, num_masked, scale):
# +-----------------+
# x ----> y ----> z |
# | N |
# +-----------------+
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
def auto_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
with pyro.plate("data", len(data)):
if num_masked == num_samples:
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
else:
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
def hand_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
for i in pyro.plate("data", num_masked):
y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i])
@config_enumerate(default="parallel")
def guide(data):
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,))
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide, data)
hand_loss = elbo.differentiable_loss(hand_model, guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('num_samples,num_masked',
[(1, 1), (2, 2), (3, 2)],
ids=["single", "batch", "masked"])
def test_elbo_enumerate_plate_3(num_samples, num_masked, scale):
# +-----------------------+
# | x ----> y ----> z |
# | N |
# +-----------------------+
# This plate should remain unreduced since all enumeration is in a single plate.
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
@poutine.scale(scale=scale)
def auto_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
with pyro.plate("data", len(data)):
if num_masked == num_samples:
x = pyro.sample("x", dist.Categorical(probs_x))
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
else:
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
x = pyro.sample("x", dist.Categorical(probs_x))
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
@poutine.scale(scale=scale)
@config_enumerate(default="parallel")
def auto_guide(data):
probs_x = pyro.param("guide_probs_x")
with pyro.plate("data", len(data)):
if num_masked == num_samples:
pyro.sample("x", dist.Categorical(probs_x))
else:
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
pyro.sample("x", dist.Categorical(probs_x))
@poutine.scale(scale=scale)
def hand_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
for i in pyro.plate("data", num_masked):
x = pyro.sample("x_{}".format(i), dist.Categorical(probs_x))
y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i])
@poutine.scale(scale=scale)
@config_enumerate(default="parallel")
def hand_guide(data):
probs_x = pyro.param("guide_probs_x")
for i in pyro.plate("data", num_masked):
pyro.sample("x_{}".format(i), dist.Categorical(probs_x))
data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,))
elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data)
hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('outer_obs,inner_obs',
[(False, True), (True, False), (True, True)])
def test_elbo_enumerate_plate_4(outer_obs, inner_obs, scale):
# a ---> outer_obs
# \
# +-----\------------------+
# | \ |
# | b ---> inner_obs N=2 |
# +------------------------+
# This tests two different observations, one outside and one inside an plate.
pyro.param("probs_a", torch.tensor([0.4, 0.6]), constraint=constraints.simplex)
pyro.param("probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex)
pyro.param("locs", torch.tensor([-1., 1.]))
pyro.param("scales", torch.tensor([1., 2.]), constraint=constraints.positive)
outer_data = torch.tensor(2.0)
inner_data = torch.tensor([0.5, 1.5])
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
locs = pyro.param("locs")
scales = pyro.param("scales")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
if outer_obs:
pyro.sample("outer_obs", dist.Normal(0., scales[a]),
obs=outer_data)
with pyro.plate("inner", 2):
b = pyro.sample("b", dist.Categorical(probs_b),
infer={"enumerate": "parallel"})
if inner_obs:
pyro.sample("inner_obs", dist.Normal(locs[b], scales[a]),
obs=inner_data)
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
locs = pyro.param("locs")
scales = pyro.param("scales")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
if outer_obs:
pyro.sample("outer_obs", dist.Normal(0., scales[a]),
obs=outer_data)
for i in pyro.plate("inner", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b),
infer={"enumerate": "parallel"})
if inner_obs:
pyro.sample("inner_obs_{}".format(i), dist.Normal(locs[b], scales[a]),
obs=inner_data[i])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
def test_elbo_enumerate_plate_5():
# Guide Model
# a
# +---------------|--+
# | M=2 V |
# | b ----> c |
# +------------------+
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([0.6, 0.4]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]],
[[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]]),
constraint=constraints.simplex)
pyro.param("guide_probs_b",
torch.tensor([0.8, 0.2]),
constraint=constraints.simplex)
data = torch.tensor([1, 2])
c_ind = torch.arange(3, dtype=torch.long)
@config_enumerate(default="parallel")
def model_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("b_axis", 2):
b = pyro.sample("b", dist.Categorical(probs_b))
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
def guide_plate():
probs_b = pyro.param("guide_probs_b")
with pyro.plate("b_axis", 2):
pyro.sample("b", dist.Categorical(probs_b))
@config_enumerate(default="parallel")
def model_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("b_axis", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
pyro.sample("c_{}".format(i),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[i])
@config_enumerate(default="parallel")
def guide_iplate():
probs_b = pyro.param("guide_probs_b")
for i in pyro.plate("b_axis", 2):
pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
elbo = TraceEnum_ELBO(max_plate_nesting=0)
expected_loss = elbo.differentiable_loss(model_iplate, guide_iplate)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
with pytest.raises(ValueError, match="Expected model enumeration to be no more global than guide"):
actual_loss = elbo.differentiable_loss(model_plate, guide_plate)
# This never gets run because we don't support this yet.
_check_loss_and_grads(expected_loss, actual_loss)
@pytest.mark.parametrize('enumerate1', ['parallel', 'sequential'])
def test_elbo_enumerate_plate_6(enumerate1):
# Guide Model
# +-------+
# b ----> c <---- a
# | M=2 |
# +-------+
# This tests that sequential enumeration over b works, even though
# model-side enumeration moves c into b's plate via contraction.
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([0.6, 0.4]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]],
[[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]]),
constraint=constraints.simplex)
pyro.param("guide_probs_b",
torch.tensor([0.8, 0.2]),
constraint=constraints.simplex)
data = torch.tensor([1, 2])
c_ind = torch.arange(3, dtype=torch.long)
@config_enumerate(default="parallel")
def model_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b))
with pyro.plate("b_axis", 2):
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
def model_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b))
for i in pyro.plate("b_axis", 2):
pyro.sample("c_{}".format(i),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[i])
@config_enumerate(default=enumerate1)
def guide():
probs_b = pyro.param("guide_probs_b")
pyro.sample("b", dist.Categorical(probs_b))
elbo = TraceEnum_ELBO(max_plate_nesting=0)
expected_loss = elbo.differentiable_loss(model_iplate, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
actual_loss = elbo.differentiable_loss(model_plate, guide)
_check_loss_and_grads(expected_loss, actual_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plate_7(scale):
# Guide Model
# a -----> b
# | |
# +-|--------|----------------+
# | V V |
# | c -----> d -----> e N=2 |
# +---------------------------+
# This tests a mixture of model and guide enumeration.
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_d",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]),
constraint=constraints.simplex)
pyro.param("model_probs_e",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("guide_probs_a",
torch.tensor([0.35, 0.64]),
constraint=constraints.simplex)
pyro.param("guide_probs_c",
torch.tensor([[0., 1.], [1., 0.]]), # deterministic
constraint=constraints.simplex)
d_ind = torch.arange(2, dtype=torch.long)
@poutine.scale(scale=scale)
def auto_model(data):
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
probs_d = pyro.param("model_probs_d")
probs_e = pyro.param("model_probs_e")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b[a]),
infer={"enumerate": "parallel"})
with pyro.plate("data", 2):
c = pyro.sample("c", dist.Categorical(probs_c[a]))
d = pyro.sample("d",
dist.Categorical(probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]),
infer={"enumerate": "parallel"})
pyro.sample("obs", dist.Categorical(probs_e[d]), obs=data)
@poutine.scale(scale=scale)
def auto_guide(data):
probs_a = pyro.param("guide_probs_a")
probs_c = pyro.param("guide_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
with pyro.plate("data", 2):
pyro.sample("c", dist.Categorical(probs_c[a]))
@poutine.scale(scale=scale)
def hand_model(data):
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
probs_d = pyro.param("model_probs_d")
probs_e = pyro.param("model_probs_e")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b[a]),
infer={"enumerate": "parallel"})
for i in pyro.plate("data", 2):
c = pyro.sample("c_{}".format(i), dist.Categorical(probs_c[a]))
d = pyro.sample("d_{}".format(i),
dist.Categorical(probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]),
infer={"enumerate": "parallel"})
pyro.sample("obs_{}".format(i), dist.Categorical(probs_e[d]), obs=data[i])
@poutine.scale(scale=scale)
def hand_guide(data):
probs_a = pyro.param("guide_probs_a")
probs_c = pyro.param("guide_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
for i in pyro.plate("data", 2):
pyro.sample("c_{}".format(i), dist.Categorical(probs_c[a]))
data = torch.tensor([0, 0])
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_1(scale):
# +-----------------+
# | a ----> b M=2 |
# +-----------------+
# +-----------------+
# | c ----> d N=3 |
# +-----------------+
# This tests two unrelated plates.
# Each should remain uncontracted.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([0.75, 0.25]),
constraint=constraints.simplex)
pyro.param("probs_d",
torch.tensor([[0.4, 0.6], [0.3, 0.7]]),
constraint=constraints.simplex)
b_data = torch.tensor([0, 1])
d_data = torch.tensor([0, 0, 1])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
with pyro.plate("a_axis", 2):
a = pyro.sample("a", dist.Categorical(probs_a))
pyro.sample("b", dist.Categorical(probs_b[a]), obs=b_data)
with pyro.plate("c_axis", 3):
c = pyro.sample("c", dist.Categorical(probs_c))
pyro.sample("d", dist.Categorical(probs_d[c]), obs=d_data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
for i in pyro.plate("a_axis", 2):
a = pyro.sample("a_{}".format(i), dist.Categorical(probs_a))
pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]), obs=b_data[i])
for j in pyro.plate("c_axis", 3):
c = pyro.sample("c_{}".format(j), dist.Categorical(probs_c))
pyro.sample("d_{}".format(j), dist.Categorical(probs_d[c]), obs=d_data[j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_2(scale):
# +---------+ +---------+
# | b <---- a ----> c |
# | M=2 | | N=3 |
# +---------+ +---------+
# This tests two different plates with recycled dimension.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
b_data = torch.tensor([0, 1])
c_data = torch.tensor([0, 0, 1])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("b_axis", 2):
pyro.sample("b", dist.Categorical(probs_b[a]),
obs=b_data)
with pyro.plate("c_axis", 3):
pyro.sample("c", dist.Categorical(probs_c[a]),
obs=c_data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("b_axis", 2):
pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]),
obs=b_data[i])
for j in pyro.plate("c_axis", 3):
pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a]),
obs=c_data[j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_3(scale):
# +--------------------+
# | +----------+ |
# a -------> b | |
# | | N=2 | |
# | +----------+ M=2 |
# +--------------------+
# This is tests the case of multiple plate contractions in
# a single step.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
data = torch.tensor([[0, 1], [0, 0]])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
with pyro.plate("inner", 2):
pyro.sample("b", dist.Categorical(probs_b[a]),
obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
for j in inner:
pyro.sample("b_{}_{}".format(i, j), dist.Categorical(probs_b[a]),
obs=data[i, j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=2)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_4(scale):
# +--------------------+
# | +----------+ |
# a ----> b ----> c | |
# | | N=2 | |
# | M=2 +----------+ |
# +--------------------+
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.4, 0.6], [0.3, 0.7]]),
constraint=constraints.simplex)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with pyro.plate("inner", 2):
pyro.sample("c", dist.Categorical(probs_c[b]), obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]))
for j in inner:
pyro.sample("c_{}_{}".format(i, j), dist.Categorical(probs_c[b]),
obs=data[i, j])
def guide(data):
pass
data = torch.tensor([[0, 1], [0, 0]])
elbo = TraceEnum_ELBO(max_plate_nesting=2)
auto_loss = elbo.differentiable_loss(auto_model, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_5(scale):
# a
# | \
# +--|---\------------+
# | V +-\--------+ |
# | b ----> c | |
# | | N=2 | |
# | M=2 +----------+ |
# +-------------------+
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]],
[[0.2, 0.8], [0.1, 0.9]]]),
constraint=constraints.simplex)
data = torch.tensor([[0, 1], [0, 0]])
c_ind = torch.arange(2, dtype=torch.long)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with pyro.plate("inner", 2):
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]))
for j in inner:
pyro.sample("c_{}_{}".format(i, j),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[i, j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=2)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_6(scale):
# +----------+
# | M=2 |
# a ----> b |
# | | | |
# +--|-------|--+ |
# | V | V | |
# | c ----> d | |
# | | | |
# | N=2 +------|---+
# +-------------+
# This tests different ways of mixing two independence contexts,
# where each can be either sequential or vectorized plate.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("probs_d",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]),
constraint=constraints.simplex)
d_ind = torch.arange(2, dtype=torch.long)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
for i in b_axis:
for j in c_axis:
pyro.sample("d_{}_{}".format(i, j),
dist.Categorical(probs_d[b[i].unsqueeze(-1), c[j].unsqueeze(-1), d_ind]),
obs=data[i, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
for i in b_axis:
with c_axis:
pyro.sample("d_{}".format(i),
dist.Categorical(probs_d[b[i].unsqueeze(-1), c.unsqueeze(-1), d_ind]),
obs=data[i])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
with b_axis:
for j in c_axis:
pyro.sample("d_{}".format(j),
dist.Categorical(probs_d[b.unsqueeze(-1), c[j].unsqueeze(-1), d_ind]),
obs=data[:, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2, dim=-1)
c_axis = pyro.plate("c_axis", 2, dim=-2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
with b_axis, c_axis:
pyro.sample("d",
dist.Categorical(probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]),
obs=data)
def guide(data):
pass
# Check that either one of the sequential plates can be promoted to be vectorized.
data = torch.tensor([[0, 1], [0, 0]])
elbo = TraceEnum_ELBO(max_plate_nesting=0)
loss_iplate_iplate = elbo.differentiable_loss(model_iplate_iplate, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
loss_plate_iplate = elbo.differentiable_loss(model_plate_iplate, guide, data)
loss_iplate_plate = elbo.differentiable_loss(model_iplate_plate, guide, data)
_check_loss_and_grads(loss_iplate_iplate, loss_plate_iplate)
_check_loss_and_grads(loss_iplate_iplate, loss_iplate_plate)
# But promoting both to plates should result in an error.
elbo = TraceEnum_ELBO(max_plate_nesting=2)
with pytest.raises(NotImplementedError, match="Expected tree-structured plate nesting.*"):
elbo.differentiable_loss(model_plate_plate, guide, data)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_7(scale):
# +-------------+
# | N=2 |
# a -------> c |
# | | | |
# +--|----------|--+ |
# | | | V | |
# | V | e | |
# | b ----> d | |
# | | | |
# | M=2 +---------|---+
# +----------------+
# This tests tree-structured dependencies among variables but
# non-tree dependencies among plate nestings.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("probs_d",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
pyro.param("probs_e",
torch.tensor([[0.4, 0.6], [0.3, 0.7]]),
constraint=constraints.simplex)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
for i in b_axis:
for j in c_axis:
pyro.sample("d_{}_{}".format(i, j), dist.Categorical(probs_d[b[i]]),
obs=data[i, j])
pyro.sample("e_{}_{}".format(i, j), dist.Categorical(probs_e[c[j]]),
obs=data[i, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
for i in b_axis:
with c_axis:
pyro.sample("d_{}".format(i), dist.Categorical(probs_d[b[i]]),
obs=data[i])
pyro.sample("e_{}".format(i), dist.Categorical(probs_e[c]),
obs=data[i])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
with b_axis:
for j in c_axis:
pyro.sample("d_{}".format(j), dist.Categorical(probs_d[b]),
obs=data[:, j])
pyro.sample("e_{}".format(j), dist.Categorical(probs_e[c[j]]),
obs=data[:, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2, dim=-1)
c_axis = pyro.plate("c_axis", 2, dim=-2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
with b_axis, c_axis:
pyro.sample("d", dist.Categorical(probs_d[b]), obs=data)
pyro.sample("e", dist.Categorical(probs_e[c]), obs=data)
def guide(data):
pass
# Check that any combination of sequential plates can be promoted to be vectorized.
data = torch.tensor([[0, 1], [0, 0]])
elbo = TraceEnum_ELBO(max_plate_nesting=0)
loss_iplate_iplate = elbo.differentiable_loss(model_iplate_iplate, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
loss_plate_iplate = elbo.differentiable_loss(model_plate_iplate, guide, data)
loss_iplate_plate = elbo.differentiable_loss(model_iplate_plate, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=2)
loss_plate_plate = elbo.differentiable_loss(model_plate_plate, guide, data)
_check_loss_and_grads(loss_iplate_iplate, loss_plate_iplate)
_check_loss_and_grads(loss_iplate_iplate, loss_iplate_plate)
_check_loss_and_grads(loss_iplate_iplate, loss_plate_plate)
@pytest.mark.parametrize('guide_scale', [1])
@pytest.mark.parametrize('model_scale', [1])
@pytest.mark.parametrize('outer_vectorized,inner_vectorized,xfail',
[(False, True, False), (True, False, True), (True, True, True)],
ids=['iplate-plate', 'plate-iplate', 'plate-plate'])
def test_elbo_enumerate_plates_8(model_scale, guide_scale, inner_vectorized, outer_vectorized, xfail):
# Guide Model
# a
# +-----------|--------+
# | M=2 +---|------+ |
# | | V N=2 | |
# | b ----> c | |
# | +----------+ |
# +--------------------+
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([0.6, 0.4]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]],
[[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]]),
constraint=constraints.simplex)
pyro.param("guide_probs_b",
torch.tensor([0.8, 0.2]),
constraint=constraints.simplex)
data = torch.tensor([[0, 1], [0, 2]])
c_ind = torch.arange(3, dtype=torch.long)
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_plate_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b))
with pyro.plate("inner", 2):
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_iplate_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
with inner:
pyro.sample("c_{}".format(i),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[:, i])
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_plate_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b))
for j in pyro.plate("inner", 2):
pyro.sample("c_{}".format(j),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[j])
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_iplate_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
for j in inner:
pyro.sample("c_{}_{}".format(i, j),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[j, i])
@config_enumerate(default="parallel")
@poutine.scale(scale=guide_scale)
def guide_plate():
probs_b = pyro.param("guide_probs_b")
with pyro.plate("outer", 2):
pyro.sample("b", dist.Categorical(probs_b))
@config_enumerate(default="parallel")
@poutine.scale(scale=guide_scale)
def guide_iplate():
probs_b = pyro.param("guide_probs_b")
for i in pyro.plate("outer", 2):
pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
elbo = TraceEnum_ELBO(max_plate_nesting=0)
expected_loss = elbo.differentiable_loss(model_iplate_iplate, guide_iplate)
with ExitStack() as stack:
if xfail:
stack.enter_context(pytest.raises(
ValueError,
match="Expected model enumeration to be no more global than guide"))
if inner_vectorized:
if outer_vectorized:
elbo = TraceEnum_ELBO(max_plate_nesting=2)
actual_loss = elbo.differentiable_loss(model_plate_plate, guide_plate)
else:
elbo = TraceEnum_ELBO(max_plate_nesting=1)
actual_loss = elbo.differentiable_loss(model_iplate_plate, guide_iplate)
else:
elbo = TraceEnum_ELBO(max_plate_nesting=1)
actual_loss = elbo.differentiable_loss(model_plate_iplate, guide_plate)
_check_loss_and_grads(expected_loss, actual_loss)
def test_elbo_scale():
# Consider a mixture model with two components, toggled by `which`.
def component_model(data, which, suffix=""):
loc = pyro.param("locs", torch.tensor([-1., 1.]))[which]
with pyro.plate("data" + suffix, len(data)):
pyro.sample("obs" + suffix, dist.Normal(loc, 1.), obs=data)
pyro.param("mixture_probs", torch.tensor([0.25, 0.75]), constraint=constraints.simplex)
# We can implement this in two ways.
# First consider automatic enumeration in the guide.
def auto_model(data):
mixture_probs = pyro.param("mixture_probs")
which = pyro.sample("which", dist.Categorical(mixture_probs))
component_model(data, which)
def auto_guide(data):
mixture_probs = pyro.param("mixture_probs")
pyro.sample("which", dist.Categorical(mixture_probs),
infer={"enumerate": "parallel"})
# Second consider explicit enumeration in the model, where we
# marginalize out the `which` variable by hand.
def hand_model(data):
mixture_probs = pyro.param("mixture_probs")
for which in pyro.plate("which", len(mixture_probs)):
with pyro.poutine.scale(scale=mixture_probs[which]):
component_model(data, which, suffix="_{}".format(which))
def hand_guide(data):
pass
data = dist.Normal(0., 2.).sample((3,))
elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data)
hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
def test_elbo_hmm_growth():
pyro.clear_param_store()
init_probs = torch.tensor([0.5, 0.5])
elbo = TraceEnum_ELBO(max_plate_nesting=0)
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y)
@config_enumerate(default="parallel")
def guide(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
sizes = range(2, 1 + int(os.environ.get('GROWTH_SIZE', 15)))
costs = []
times1 = []
times2 = []
for size in sizes:
data = torch.ones(size)
time0 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # compiles paths
time1 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # reuses compiled path
time2 = timeit.default_timer()
times1.append(time1 - time0)
times2.append(time2 - time1)
costs.append(LAST_CACHE_SIZE[0])
collated_costs = defaultdict(list)
for counts in costs:
for key, cost in counts.items():
collated_costs[key].append(cost)
logger.debug('\n'.join([
'HMM Growth:',
'sizes = {}'.format(repr(sizes)),
'costs = {}'.format(repr(dict(collated_costs))),
'times1 = {}'.format(repr(times1)),
'times2 = {}'.format(repr(times2)),
]))
for key, cost in collated_costs.items():
dt = 3
assert cost[-1 - dt - dt] - 2 * cost[-1 - dt] + cost[-1] == 0, \
'{} cost is not linear'.format(key)
@pytest.mark.skipif("CUDA_TEST" in os.environ, reason="https://github.com/uber/pyro/issues/1380")
def test_elbo_dbn_growth():
pyro.clear_param_store()
elbo = TraceEnum_ELBO(max_plate_nesting=0)
def model(data):
uniform = torch.tensor([0.5, 0.5])
probs_z = pyro.param("probs_z",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
for i, z in pyro.markov(enumerate(data)):
pyro.sample("x_{}".format(i), dist.Categorical(uniform))
y = pyro.sample("y_{}".format(i), dist.Categorical(uniform))
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=z)
@config_enumerate(default="parallel")
def guide(data):
probs_x = pyro.param("probs_x",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
probs_y = pyro.param("probs_y",
torch.tensor([[[0.75, 0.25], [0.45, 0.55]],
[[0.55, 0.45], [0.25, 0.75]]]),
constraint=constraints.simplex)
x = 0
y = 0
for i in pyro.markov(range(len(data))):
x = pyro.sample("x_{}".format(i), dist.Categorical(probs_x[x]))
y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x, y]))
sizes = range(2, 1 + int(os.environ.get('GROWTH_SIZE', 15)))
costs = []
times1 = []
times2 = []
for size in sizes:
data = torch.ones(size)
time0 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # compiles paths
time1 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # reuses compiled path
time2 = timeit.default_timer()
times1.append(time1 - time0)
times2.append(time2 - time1)
costs.append(LAST_CACHE_SIZE[0])
collated_costs = defaultdict(list)
for counts in costs:
for key, cost in counts.items():
collated_costs[key].append(cost)
logger.debug('\n'.join([
'DBN Growth:',
'sizes = {}'.format(repr(sizes)),
'costs = {}'.format(repr(dict(collated_costs))),
'times1 = {}'.format(repr(times1)),
'times2 = {}'.format(repr(times2)),
]))
for key, cost in collated_costs.items():
dt = 3
assert cost[-1 - dt - dt] - 2 * cost[-1 - dt] + cost[-1] == 0, \
'{} cost is not linear'.format(key)
@pytest.mark.parametrize("pi_a", [0.33])
@pytest.mark.parametrize("pi_b", [0.51, 0.77])
@pytest.mark.parametrize("pi_c", [0.37])
@pytest.mark.parametrize("N_b", [3, 4])
@pytest.mark.parametrize("N_c", [5, 6])
@pytest.mark.parametrize("enumerate1", ["sequential", "parallel"])
@pytest.mark.parametrize("expand", [True, False])
def test_bernoulli_pyramid_elbo_gradient(enumerate1, N_b, N_c, pi_a, pi_b, pi_c, expand):
pyro.clear_param_store()
def model():
a = pyro.sample("a", dist.Bernoulli(0.33))
with pyro.plate("b_plate", N_b):
b = pyro.sample("b", dist.Bernoulli(0.25 * a + 0.50))
with pyro.plate("c_plate", N_c):
pyro.sample("c", dist.Bernoulli(0.15 * a + 0.20 * b + 0.32))
def guide():
qa = pyro.param("qa", torch.tensor(pi_a, requires_grad=True))
qb = pyro.param("qb", torch.tensor(pi_b, requires_grad=True))
qc = pyro.param("qc", torch.tensor(pi_c, requires_grad=True))
pyro.sample("a", dist.Bernoulli(qa))
with pyro.plate("b_plate", N_b):
pyro.sample("b", dist.Bernoulli(qb).expand_by([N_b]))
with pyro.plate("c_plate", N_c):
pyro.sample("c", dist.Bernoulli(qc).expand_by([N_c, N_b]))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(max_plate_nesting=2,
strict_enumeration_warning=True)
elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1, expand=expand))
actual_grad_qa = pyro.param('qa').grad
actual_grad_qb = pyro.param('qb').grad
actual_grad_qc = pyro.param('qc').grad
logger.info("Computing analytic gradients")
qa = torch.tensor(pi_a, requires_grad=True)
qb = torch.tensor(pi_b, requires_grad=True)
qc = torch.tensor(pi_c, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(qa), dist.Bernoulli(0.33))
elbo = elbo + N_b * qa * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.75))
elbo = elbo + N_b * (1.0 - qa) * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.50))
elbo = elbo + N_c * N_b * qa * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.67))
elbo = elbo + N_c * N_b * (1.0 - qa) * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.52))
elbo = elbo + N_c * N_b * qa * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.47))
elbo = elbo + N_c * N_b * (1.0 - qa) * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.32))
expected_grad_qa, expected_grad_qb, expected_grad_qc = grad(elbo, [qa, qb, qc])
prec = 0.001
assert_equal(actual_grad_qa, expected_grad_qa, prec=prec, msg="".join([
"\nqa expected = {}".format(expected_grad_qa.data.cpu().numpy()),
"\nqa actual = {}".format(actual_grad_qa.data.cpu().numpy()),
]))
assert_equal(actual_grad_qb, expected_grad_qb, prec=prec, msg="".join([
"\nqb expected = {}".format(expected_grad_qb.data.cpu().numpy()),
"\nqb actual = {}".format(actual_grad_qb.data.cpu().numpy()),
]))
assert_equal(actual_grad_qc, expected_grad_qc, prec=prec, msg="".join([
"\nqc expected = {}".format(expected_grad_qc.data.cpu().numpy()),
"\nqc actual = {}".format(actual_grad_qc.data.cpu().numpy()),
]))
@pytest.mark.parametrize("pi_a", [0.33])
@pytest.mark.parametrize("pi_b", [0.51])
@pytest.mark.parametrize("pi_c", [0.37])
@pytest.mark.parametrize("pi_d", [0.29])
@pytest.mark.parametrize("b_factor", [0.03, 0.04])
@pytest.mark.parametrize("c_factor", [0.04, 0.06])
@pytest.mark.parametrize("d_offset", [0.32])
@pytest.mark.parametrize("enumerate1", ["sequential", "parallel"])
@pytest.mark.parametrize("expand", [True, False])
def test_bernoulli_non_tree_elbo_gradient(enumerate1, b_factor, c_factor, pi_a, pi_b, pi_c, pi_d,
expand, d_offset, N_b=2, N_c=2):
pyro.clear_param_store()
def model():
a = pyro.sample("a", dist.Bernoulli(0.33))
b = pyro.sample("b", dist.Bernoulli(0.25 * a + 0.50))
c = pyro.sample("c", dist.Bernoulli(0.25 * a + 0.10 * b + 0.50))
pyro.sample("d", dist.Bernoulli(b_factor * b + c_factor * c + d_offset))
def guide():
qa = pyro.param("qa", torch.tensor(pi_a, requires_grad=True))
qb = pyro.param("qb", torch.tensor(pi_b, requires_grad=True))
qc = pyro.param("qc", torch.tensor(pi_c, requires_grad=True))
qd = pyro.param("qd", torch.tensor(pi_d, requires_grad=True))
pyro.sample("a", dist.Bernoulli(qa))
pyro.sample("b", dist.Bernoulli(qb))
pyro.sample("c", dist.Bernoulli(qc))
pyro.sample("d", dist.Bernoulli(qd))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(max_plate_nesting=2,
strict_enumeration_warning=True)
elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1, expand=expand))
actual_grad_qa = pyro.param('qa').grad
actual_grad_qb = pyro.param('qb').grad
actual_grad_qc = pyro.param('qc').grad
actual_grad_qd = pyro.param('qd').grad
logger.info("Computing analytic gradients")
qa = torch.tensor(pi_a, requires_grad=True)
qb = torch.tensor(pi_b, requires_grad=True)
qc = torch.tensor(pi_c, requires_grad=True)
qd = torch.tensor(pi_d, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(qa), dist.Bernoulli(0.33))
elbo = elbo + qa * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.75))
elbo = elbo + (1.0 - qa) * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.50))
elbo = elbo + qa * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.85))
elbo = elbo + (1.0 - qa) * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.60))
elbo = elbo + qa * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.75))
elbo = elbo + (1.0 - qa) * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.50))
elbo = elbo + qb * qc * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(b_factor + c_factor + d_offset))
elbo = elbo + (1.0 - qb) * qc * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(c_factor + d_offset))
elbo = elbo + qb * (1.0 - qc) * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(b_factor + d_offset))
elbo = elbo + (1.0 - qb) * (1.0 - qc) * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(d_offset))
expected_grad_qa, expected_grad_qb, expected_grad_qc, expected_grad_qd = grad(elbo, [qa, qb, qc, qd])
prec = 0.0001
assert_equal(actual_grad_qa, expected_grad_qa, prec=prec, msg="".join([
"\nqa expected = {}".format(expected_grad_qa.data.cpu().numpy()),
"\nqa actual = {}".format(actual_grad_qa.data.cpu().numpy()),
]))
assert_equal(actual_grad_qb, expected_grad_qb, prec=prec, msg="".join([
"\nqb expected = {}".format(expected_grad_qb.data.cpu().numpy()),
"\nqb actual = {}".format(actual_grad_qb.data.cpu().numpy()),
]))
assert_equal(actual_grad_qc, expected_grad_qc, prec=prec, msg="".join([
"\nqc expected = {}".format(expected_grad_qc.data.cpu().numpy()),
"\nqc actual = {}".format(actual_grad_qc.data.cpu().numpy()),
]))
assert_equal(actual_grad_qd, expected_grad_qd, prec=prec, msg="".join([
"\nqd expected = {}".format(expected_grad_qd.data.cpu().numpy()),
"\nqd actual = {}".format(actual_grad_qd.data.cpu().numpy()),
]))
@pytest.mark.parametrize("gate", [0.1, 0.25, 0.5, 0.75, 0.9])
@pytest.mark.parametrize("rate", [0.1, 1., 3.])
def test_elbo_zip(gate, rate):
# test for ZIP distribution
def zip_model(data):
gate = pyro.param("gate")
rate = pyro.param("rate")
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.ZeroInflatedPoisson(gate, rate), obs=data)
def composite_model(data):
gate = pyro.param("gate")
rate = pyro.param("rate")
dist1 = dist.Delta(torch.tensor(0.))
dist0 = dist.Poisson(rate)
with pyro.plate("data", len(data)):
mask = pyro.sample("mask", dist.Bernoulli(gate), infer={"enumerate": "parallel"}).byte()
pyro.sample("obs", dist.MaskedMixture(mask, dist0, dist1), obs=data)
def guide(data):
pass
gate = pyro.param("gate", torch.tensor(gate), constraint=constraints.unit_interval)
rate = pyro.param("rate", torch.tensor(rate), constraint=constraints.positive)
data = torch.tensor([0., 1., 2.])
elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False)
zip_loss = elbo.differentiable_loss(zip_model, guide, data)
composite_loss = elbo.differentiable_loss(composite_model, guide, data)
_check_loss_and_grads(zip_loss, composite_loss)
@pytest.mark.parametrize("mixture,scale", [
(dist.MixtureOfDiagNormals, [[2., 1.], [1., 2], [4., 4.]]),
(dist.MixtureOfDiagNormalsSharedCovariance, [2., 1.]),
])
def test_mixture_of_diag_normals(mixture, scale):
# K = 3, D = 2
pyro.param("locs", torch.tensor([[0., 0.], [0., 1.], [0., 10.]]))
pyro.param("coord_scale", torch.tensor(scale), constraint=constraints.positive)
pyro.param("component_logits", torch.tensor([0., -1., 2.]))
data = torch.tensor([[0., 0.], [1., 1.], [2., 3.], [1., 11.]])
def auto_model():
locs = pyro.param("locs")
coord_scale = pyro.param("coord_scale")
component_logits = pyro.param("component_logits")
with pyro.plate("data", len(data)):
pyro.sample("obs", mixture(locs, coord_scale, component_logits), obs=data)
def hand_model():
locs = pyro.param("locs")
coord_scale = pyro.param("coord_scale")
component_logits = pyro.param("component_logits")
with pyro.plate("data", len(data), dim=-2):
which = pyro.sample("mask", dist.Categorical(logits=component_logits),
infer={"enumerate": "parallel"})
with pyro.plate("components", len(component_logits), dim=-1) as component_ind:
with poutine.mask(mask=(which == component_ind)):
pyro.sample("obs", dist.Normal(locs, coord_scale).independent(1),
obs=data.unsqueeze(-2))
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize("Dist, prior", [
(dist.Bernoulli, 0.2),
(dist.Categorical, [0.2, 0.8]),
(dist.Categorical, [0.2, 0.3, 0.5]),
(dist.Categorical, [0.2, 0.3, 0.3, 0.2]),
(dist.OneHotCategorical, [0.2, 0.8]),
(dist.OneHotCategorical, [0.2, 0.3, 0.5]),
(dist.OneHotCategorical, [0.2, 0.3, 0.3, 0.2]),
])
def test_compute_marginals_single(Dist, prior):
prior = torch.tensor(prior)
data = torch.tensor([0., 0.1, 0.2, 0.9, 1.0, 1.1])
@config_enumerate(default="parallel")
def model():
locs = torch.tensor([-1., 0., 1., 2.])
x = pyro.sample("x", Dist(prior))
if Dist is dist.Bernoulli:
x = x.long()
elif Dist is dist.OneHotCategorical:
x = x.max(-1)[1]
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.Normal(locs[x], 1.), obs=data)
# First compute marginals using an empty guide.
def empty_guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
marginals = elbo.compute_marginals(model, empty_guide)
assert len(marginals) == 1
assert type(marginals["x"]) is Dist
probs = marginals["x"].probs
assert probs.shape == prior.shape
# Next insert the computed marginals in an enumerating guide
# and ensure that they are exact, or at least locally optimal.
pyro.param("probs", probs)
@config_enumerate(default="parallel")
def exact_guide():
probs = pyro.param("probs")
pyro.sample("x", Dist(probs))
loss = elbo.differentiable_loss(model, exact_guide)
assert_equal(grad(loss, [pyro.param("probs")])[0], torch.zeros_like(probs))
@pytest.mark.parametrize('ok,enumerate_guide,num_particles,vectorize_particles', [
(True, None, 1, False),
(False, "sequential", 1, False),
(False, "parallel", 1, False),
(False, None, 2, False),
(False, None, 2, True),
])
def test_compute_marginals_restrictions(ok, enumerate_guide, num_particles, vectorize_particles):
@config_enumerate(default="parallel")
def model():
w = pyro.sample("w", dist.Bernoulli(0.1))
x = pyro.sample("x", dist.Bernoulli(0.2))
y = pyro.sample("y", dist.Bernoulli(0.3))
z = pyro.sample("z", dist.Bernoulli(0.4))
pyro.sample("obs", dist.Normal(0., 1.), obs=w + x + y + z)
@config_enumerate(default=enumerate_guide)
def guide():
pyro.sample("w", dist.Bernoulli(0.4))
pyro.sample("y", dist.Bernoulli(0.7))
# Check that the ELBO works fine.
elbo = TraceEnum_ELBO(max_plate_nesting=0,
num_particles=num_particles,
vectorize_particles=vectorize_particles)
loss = elbo.loss(model, guide)
assert not torch_isnan(loss)
if ok:
marginals = elbo.compute_marginals(model, guide)
assert set(marginals.keys()) == {"x", "z"}
else:
with pytest.raises(NotImplementedError, match="compute_marginals"):
elbo.compute_marginals(model, guide)
@pytest.mark.parametrize('size', [1, 2, 3, 4, 10, 20, _skip_cuda(30)])
def test_compute_marginals_hmm(size):
@config_enumerate(default="parallel")
def model(data):
transition_probs = torch.tensor([[0.75, 0.25], [0.25, 0.75]])
emission_probs = torch.tensor([[0.75, 0.25], [0.25, 0.75]])
x = torch.tensor(0)
for i in pyro.markov(range(len(data) + 1)):
if i < len(data):
x = pyro.sample("x_{}".format(i), dist.Categorical(transition_probs[x]))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=data[i])
else:
pyro.sample("x_{}".format(i), dist.Categorical(transition_probs[x]),
obs=torch.tensor(1))
def guide(data):
pass
data = torch.zeros(size, dtype=torch.long)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
marginals = elbo.compute_marginals(model, guide, data)
assert set(marginals.keys()) == {"x_{}".format(i) for i in range(size)}
for i in range(size):
d = marginals["x_{}".format(i)]
assert d.batch_shape == ()
# The x's should be monotonically increasing, since we've observed x[-1]==0
# and x[size]==1, and since the y's are constant.
for i in range(size - 1):
d1 = marginals["x_{}".format(i)]
d2 = marginals["x_{}".format(i + 1)]
assert d1.probs[0] > d2.probs[0]
assert d1.probs[1] < d2.probs[1]
@pytest.mark.parametrize("data", [
[None, None],
[torch.tensor(0.), None],
[None, torch.tensor(0.)],
[torch.tensor(0.), torch.tensor(0)],
])
def test_backwardsample_posterior_smoke(data):
@config_enumerate(default="parallel")
def model(data):
xs = list(data)
zs = []
for i in range(2):
K = i + 2 # number of mixture components
zs.append(pyro.sample("z_{}".format(i),
dist.Categorical(torch.ones(K))))
if i == 0:
loc = pyro.param("loc", torch.randn(K))[zs[i]]
xs[i] = pyro.sample("x_{}".format(i),
dist.Normal(loc, 1.), obs=data[i])
elif i == 1:
logits = pyro.param("logits", torch.randn(K, 2))[zs[i]]
xs[i] = pyro.sample("x_{}".format(i),
dist.Categorical(logits=logits),
obs=data[i])
z12 = zs[0] + 2 * zs[1]
pyro.sample("z_12", dist.Categorical(torch.arange(6.)), obs=z12)
return xs, zs
def guide(data):
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
xs, zs = elbo.sample_posterior(model, guide, data)
for x, datum in zip(xs, data):
assert datum is None or datum is x
for z in zs:
assert z.shape == ()
def test_backwardsample_posterior_2():
num_particles = 10000
@config_enumerate(default="parallel")
def model(data):
with pyro.plate("particles", num_particles):
p_z = torch.tensor([0.1, 0.9])
x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5])))
z = pyro.sample("z", dist.Bernoulli(p_z[x]), obs=data)
return x, z
def guide(data):
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
x, z = elbo.sample_posterior(model, guide, data=torch.zeros(num_particles))
expected = 0.9
actual = (x.type_as(z) == z).float().mean().item()
assert abs(expected - actual) < 0.05
def test_backwardsample_posterior_3():
num_particles = 10000
@config_enumerate(default="parallel")
def model(data):
with pyro.plate("particles", num_particles):
p_z = torch.tensor([[0.9, 0.1], [0.1, 0.9]])
x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5])))
y = pyro.sample("y", dist.Categorical(torch.tensor([0.5, 0.5])))
z = pyro.sample("z", dist.Bernoulli(p_z[x, y]), obs=data)
return x, y, z
def guide(data):
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
x, y, z = elbo.sample_posterior(model, guide, data=torch.ones(num_particles))
expected = 0.9
actual = (x == y).float().mean().item()
assert abs(expected - actual) < 0.05
x, y, z = elbo.sample_posterior(model, guide, data=torch.zeros(num_particles))
expected = 0.1
actual = (x == y).float().mean().item()
assert abs(expected - actual) < 0.05
@pytest.mark.parametrize('ok,enumerate_guide,num_particles,vectorize_particles', [
(True, None, 1, False),
(False, "sequential", 1, False),
(False, "parallel", 1, False),
(False, None, 2, False),
(False, None, 2, True),
])
def test_backwardsample_posterior_restrictions(ok, enumerate_guide, num_particles, vectorize_particles):
@config_enumerate(default="parallel")
def model():
w = pyro.sample("w", dist.Bernoulli(0.1))
x = pyro.sample("x", dist.Bernoulli(0.2))
y = pyro.sample("y", dist.Bernoulli(0.3))
z = pyro.sample("z", dist.Bernoulli(0.4))
pyro.sample("obs", dist.Normal(0., 1.), obs=w + x + y + z)
return w, x, y, z
@config_enumerate(default=enumerate_guide)
def guide():
pyro.sample("w", dist.Bernoulli(0.4))
pyro.sample("y", dist.Bernoulli(0.7))
# Check that the ELBO works fine.
elbo = TraceEnum_ELBO(max_plate_nesting=0,
num_particles=num_particles,
vectorize_particles=vectorize_particles)
loss = elbo.loss(model, guide)
assert not torch_isnan(loss)
if ok:
w, x, y, z = elbo.sample_posterior(model, guide)
assert w.shape == ()
assert x.shape == ()
assert y.shape == ()
assert z.shape == ()
else:
with pytest.raises(NotImplementedError, match="sample_posterior"):
elbo.sample_posterior(model, guide)
|
<filename>tests/infer/test_enum.py
from __future__ import absolute_import, division, print_function
import logging
import math
import os
import timeit
from collections import defaultdict
import pytest
import torch
from torch.autograd import grad
from torch.distributions import constraints, kl_divergence
import pyro
import pyro.distributions as dist
import pyro.optim
import pyro.poutine as poutine
from pyro.distributions.testing.rejection_gamma import ShapeAugmentedGamma
from pyro.infer import SVI, config_enumerate
from pyro.infer.enum import iter_discrete_traces
from pyro.infer.traceenum_elbo import TraceEnum_ELBO
from pyro.infer.util import LAST_CACHE_SIZE
from pyro.util import torch_isnan
from tests.common import assert_equal, skipif_param
try:
from contextlib import ExitStack # python 3
except ImportError:
from contextlib2 import ExitStack # python 2
logger = logging.getLogger(__name__)
def _skip_cuda(*args):
return skipif_param(*args,
condition="CUDA_TEST" in os.environ,
reason="https://github.com/uber/pyro/issues/1380")
@pytest.mark.parametrize("depth", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
def test_iter_discrete_traces_order(depth, graph_type):
@config_enumerate
def model(depth):
for i in range(depth):
pyro.sample("x{}".format(i), dist.Bernoulli(0.5))
traces = list(iter_discrete_traces(graph_type, model, depth))
assert len(traces) == 2 ** depth
for trace in traces:
sites = [name for name, site in trace.nodes.items() if site["type"] == "sample"]
assert sites == ["x{}".format(i) for i in range(depth)]
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
def test_iter_discrete_traces_scalar(graph_type):
pyro.clear_param_store()
@config_enumerate
def model():
p = pyro.param("p", torch.tensor(0.05))
probs = pyro.param("probs", torch.tensor([0.1, 0.2, 0.3, 0.4]))
x = pyro.sample("x", dist.Bernoulli(p))
y = pyro.sample("y", dist.Categorical(probs))
return dict(x=x, y=y)
traces = list(iter_discrete_traces(graph_type, model))
probs = pyro.param("probs")
assert len(traces) == 2 * len(probs)
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
@pytest.mark.parametrize("expand", [False, True])
def test_iter_discrete_traces_vector(expand, graph_type):
pyro.clear_param_store()
@config_enumerate(expand=expand)
def model():
p = pyro.param("p", torch.tensor([0.05, 0.15]))
probs = pyro.param("probs", torch.tensor([[0.1, 0.2, 0.3, 0.4],
[0.4, 0.3, 0.2, 0.1]]))
with pyro.plate("plate", 2):
x = pyro.sample("x", dist.Bernoulli(p))
y = pyro.sample("y", dist.Categorical(probs))
if expand:
assert x.size() == (2,)
assert y.size() == (2,)
else:
assert x.shape == (1,)
assert y.shape == (1,)
return dict(x=x, y=y)
traces = list(iter_discrete_traces(graph_type, model))
probs = pyro.param("probs")
assert len(traces) == 2 * probs.size(-1)
# The usual dist.Bernoulli avoids NANs by clamping log prob. This unsafe version
# allows us to test additional NAN avoidance in _compute_dice_elbo().
class UnsafeBernoulli(dist.Bernoulli):
def log_prob(self, value):
i = value.long()
j = torch.arange(len(self.probs), dtype=torch.long)
return torch.stack([(-self.probs).log1p(), self.probs.log()])[i, j]
@pytest.mark.parametrize('sample_shape', [(), (2,), (3, 4)])
def test_unsafe_bernoulli(sample_shape):
logits = torch.randn(10)
p = dist.Bernoulli(logits=logits)
q = UnsafeBernoulli(logits=logits)
x = p.sample(sample_shape)
assert_equal(p.log_prob(x), q.log_prob(x))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_avoid_nan(enumerate1):
pyro.clear_param_store()
def model():
p = torch.tensor([0.0, 0.5, 1.0])
with pyro.plate("batch", 3):
pyro.sample("z", UnsafeBernoulli(p))
@config_enumerate(default=enumerate1)
def guide():
p = pyro.param("p", torch.tensor([0.0, 0.5, 1.0], requires_grad=True))
with pyro.plate("batch", 3):
pyro.sample("z", UnsafeBernoulli(p))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
loss = elbo.loss(model, guide)
assert not math.isnan(loss), loss
loss = elbo.differentiable_loss(model, guide)
assert not torch_isnan(loss), loss
loss = elbo.loss_and_grads(model, guide)
assert not math.isnan(loss), loss
# A simple Gaussian mixture model, with no vectorization.
def gmm_model(data, verbose=False):
p = pyro.param("p", torch.tensor(0.3, requires_grad=True))
scale = pyro.param("scale", torch.tensor(1.0, requires_grad=True))
mus = torch.tensor([-1.0, 1.0])
for i in pyro.plate("data", len(data)):
z = pyro.sample("z_{}".format(i), dist.Bernoulli(p))
z = z.long()
if verbose:
logger.debug("M{} z_{} = {}".format(" " * i, i, z.cpu().numpy()))
pyro.sample("x_{}".format(i), dist.Normal(mus[z], scale), obs=data[i])
def gmm_guide(data, verbose=False):
for i in pyro.plate("data", len(data)):
p = pyro.param("p_{}".format(i), torch.tensor(0.6, requires_grad=True))
z = pyro.sample("z_{}".format(i), dist.Bernoulli(p))
z = z.long()
if verbose:
logger.debug("G{} z_{} = {}".format(" " * i, i, z.cpu().numpy()))
@pytest.mark.parametrize("data_size", [1, 2, 3])
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
@pytest.mark.parametrize("model", [gmm_model, gmm_guide])
def test_gmm_iter_discrete_traces(data_size, graph_type, model):
pyro.clear_param_store()
data = torch.arange(0., float(data_size))
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data, verbose=True))
# This non-vectorized version is exponential in data_size:
assert len(traces) == 2**data_size
# A Gaussian mixture model, with vectorized batching.
def gmm_batch_model(data):
p = pyro.param("p", torch.tensor([0.3], requires_grad=True))
p = torch.cat([p, 1 - p])
scale = pyro.param("scale", torch.tensor([1.0], requires_grad=True))
mus = torch.tensor([-1.0, 1.0])
with pyro.plate("data", len(data)) as batch:
n = len(batch)
z = pyro.sample("z", dist.OneHotCategorical(p).expand_by([n]))
assert z.shape[-1] == 2
loc = (z * mus).sum(-1)
pyro.sample("x", dist.Normal(loc, scale.expand(n)), obs=data[batch])
def gmm_batch_guide(data):
with pyro.plate("data", len(data)) as batch:
n = len(batch)
probs = pyro.param("probs", torch.ones(n, 1) * 0.6)
probs = torch.cat([probs, 1 - probs], dim=1)
z = pyro.sample("z", dist.OneHotCategorical(probs))
assert z.shape[-1] == 2
@pytest.mark.parametrize("data_size", [1, 2, 3])
@pytest.mark.parametrize("graph_type", ["flat", "dense"])
@pytest.mark.parametrize("model", [gmm_batch_model, gmm_batch_guide])
def test_gmm_batch_iter_discrete_traces(model, data_size, graph_type):
pyro.clear_param_store()
data = torch.arange(0., float(data_size))
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data))
# This vectorized version is independent of data_size:
assert len(traces) == 2
@pytest.mark.parametrize("model,guide", [
(gmm_model, gmm_guide),
(gmm_batch_model, gmm_batch_guide),
], ids=["single", "batch"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_svi_step_smoke(model, guide, enumerate1):
pyro.clear_param_store()
data = torch.tensor([0.0, 1.0, 9.0])
guide = config_enumerate(guide, default=enumerate1)
optimizer = pyro.optim.Adam({"lr": .001})
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
inference = SVI(model, guide, optimizer, loss=elbo)
inference.step(data)
@pytest.mark.parametrize("model,guide", [
(gmm_model, gmm_guide),
(gmm_batch_model, gmm_batch_guide),
], ids=["single", "batch"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_differentiable_loss(model, guide, enumerate1):
pyro.clear_param_store()
data = torch.tensor([0.0, 1.0, 9.0])
guide = config_enumerate(guide, default=enumerate1)
elbo = TraceEnum_ELBO(max_plate_nesting=1,
strict_enumeration_warning=any([enumerate1]))
pyro.set_rng_seed(0)
loss = elbo.differentiable_loss(model, guide, data)
param_names = sorted(pyro.get_param_store().get_all_param_names())
actual_loss = loss.item()
actual_grads = grad(loss, [pyro.param(name).unconstrained() for name in param_names])
pyro.set_rng_seed(0)
expected_loss = elbo.loss_and_grads(model, guide, data)
expected_grads = [pyro.param(name).unconstrained().grad for name in param_names]
assert_equal(actual_loss, expected_loss)
for name, actual_grad, expected_grad in zip(param_names, actual_grads, expected_grads):
assert_equal(actual_grad, expected_grad, msg='bad {} gradient. Expected:\n{}\nActual:\n{}'.format(
name, expected_grad, actual_grad))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_svi_step_guide_uses_grad(enumerate1):
data = torch.tensor([0., 1., 3.])
def model():
scale = pyro.param("scale")
loc = pyro.sample("loc", dist.Normal(0., 10.))
pyro.sample("b", dist.Bernoulli(0.5))
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.Normal(loc, scale), obs=data)
@config_enumerate(default=enumerate1)
def guide():
p = pyro.param("p", torch.tensor(0.5), constraint=constraints.unit_interval)
scale = pyro.param("scale", torch.tensor(1.0), constraint=constraints.positive)
var = pyro.param("var", torch.tensor(1.0), constraint=constraints.positive)
x = torch.tensor(0., requires_grad=True)
prior = dist.Normal(0., 10.).log_prob(x)
likelihood = dist.Normal(x, scale).log_prob(data).sum()
loss = -(prior + likelihood)
g = grad(loss, [x], create_graph=True)[0]
H = grad(g, [x], create_graph=True)[0]
loc = x.detach() - g / H # newton step
pyro.sample("loc", dist.Normal(loc, var))
pyro.sample("b", dist.Bernoulli(p))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
inference = SVI(model, guide, pyro.optim.Adam({}), elbo)
inference.step()
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize("method", ["loss", "differentiable_loss", "loss_and_grads"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_elbo_bern(method, enumerate1, scale):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 10000
prec = 0.001 if enumerate1 else 0.2
q = pyro.param("q", torch.tensor(0.5, requires_grad=True))
kl = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25))
@poutine.scale(scale=scale)
def model():
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles]))
@config_enumerate(default=enumerate1)
@poutine.scale(scale=scale)
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
if method == "loss":
actual = elbo.loss(model, guide) / num_particles
expected = kl.item() * scale
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected),
"\n actual = {}".format(actual),
]))
else:
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual = grad(loss, [q])[0] / num_particles
elif method == "loss_and_grads":
elbo.loss_and_grads(model, guide)
actual = q.grad / num_particles
expected = grad(kl, [q])[0] * scale
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected.detach().cpu().numpy()),
"\n actual = {}".format(actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("method", ["loss", "differentiable_loss", "loss_and_grads"])
@pytest.mark.parametrize("enumerate1", [None, "parallel"])
def test_elbo_normal(method, enumerate1):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 10000
prec = 0.01
q = pyro.param("q", torch.tensor(1., requires_grad=True))
kl = kl_divergence(dist.Normal(q, 1.), dist.Normal(0., 1.))
def model():
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Normal(0., 1.).expand_by([num_particles]))
@config_enumerate(default=enumerate1, num_samples=20000)
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Normal(q, 1.).expand_by([num_particles]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
if method == "loss":
actual = elbo.loss(model, guide) / num_particles
expected = kl.item()
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected),
"\n actual = {}".format(actual),
]))
else:
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual = grad(loss, [q])[0] / num_particles
elif method == "loss_and_grads":
elbo.loss_and_grads(model, guide)
actual = q.grad / num_particles
expected = grad(kl, [q])[0]
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected.detach().cpu().numpy()),
"\n actual = {}".format(actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1,num_samples1", [
(None, None),
("sequential", None),
("parallel", None),
("parallel", 300),
])
@pytest.mark.parametrize("enumerate2,num_samples2", [
(None, None),
("sequential", None),
("parallel", None),
("parallel", 300),
])
@pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"])
def test_elbo_bern_bern(method, enumerate1, enumerate2, num_samples1, num_samples2):
pyro.clear_param_store()
if enumerate1 and enumerate2 and num_samples1 is None and num_samples2 is None:
num_particles = 1
prec = 0.001
else:
num_particles = 2 * 300 * 300
for n in [num_samples1, num_samples2]:
if n is not None:
num_particles = num_particles // n
prec = 0.2
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
def model():
pyro.sample("x1", dist.Bernoulli(0.2))
pyro.sample("x2", dist.Bernoulli(0.4))
def guide():
q = pyro.param("q")
pyro.sample("x1", dist.Bernoulli(q), infer={"enumerate": enumerate1, "num_samples": num_samples1})
pyro.sample("x2", dist.Bernoulli(q), infer={"enumerate": enumerate2, "num_samples": num_samples2})
kl = sum(kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) for p in [0.2, 0.4])
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2]))
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual_loss = loss.item()
actual_grad = grad(loss, [q])[0]
else:
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = q.grad
assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([
"\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grads = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"])
def test_elbo_berns(method, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 10000
prec = 0.001 if all([enumerate1, enumerate2, enumerate3]) else 0.1
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
def model():
pyro.sample("x1", dist.Bernoulli(0.1))
pyro.sample("x2", dist.Bernoulli(0.2))
pyro.sample("x3", dist.Bernoulli(0.3))
def guide():
q = pyro.param("q")
pyro.sample("x1", dist.Bernoulli(q), infer={"enumerate": enumerate1})
pyro.sample("x2", dist.Bernoulli(q), infer={"enumerate": enumerate2})
pyro.sample("x3", dist.Bernoulli(q), infer={"enumerate": enumerate3})
kl = sum(kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p)) for p in [0.1, 0.2, 0.3])
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual_loss = loss.item()
actual_grad = grad(loss, [q])[0]
else:
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = q.grad
assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([
"\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grads = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("max_plate_nesting", [0, 1])
@pytest.mark.parametrize("enumerate1", ["sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", ["sequential", "parallel"])
@pytest.mark.parametrize("enumerate3", ["sequential", "parallel"])
def test_elbo_categoricals(enumerate1, enumerate2, enumerate3, max_plate_nesting):
pyro.clear_param_store()
p1 = torch.tensor([0.6, 0.4])
p2 = torch.tensor([0.3, 0.3, 0.4])
p3 = torch.tensor([0.1, 0.2, 0.3, 0.4])
q1 = pyro.param("q1", torch.tensor([0.4, 0.6], requires_grad=True))
q2 = pyro.param("q2", torch.tensor([0.4, 0.3, 0.3], requires_grad=True))
q3 = pyro.param("q3", torch.tensor([0.4, 0.3, 0.2, 0.1], requires_grad=True))
def model():
pyro.sample("x1", dist.Categorical(p1))
pyro.sample("x2", dist.Categorical(p2))
pyro.sample("x3", dist.Categorical(p3))
def guide():
pyro.sample("x1", dist.Categorical(pyro.param("q1")), infer={"enumerate": enumerate1})
pyro.sample("x2", dist.Categorical(pyro.param("q2")), infer={"enumerate": enumerate2})
pyro.sample("x3", dist.Categorical(pyro.param("q3")), infer={"enumerate": enumerate3})
kl = (kl_divergence(dist.Categorical(q1), dist.Categorical(p1)) +
kl_divergence(dist.Categorical(q2), dist.Categorical(p2)) +
kl_divergence(dist.Categorical(q3), dist.Categorical(p3)))
expected_loss = kl.item()
expected_grads = grad(kl, [q1, q2, q3])
elbo = TraceEnum_ELBO(max_plate_nesting=max_plate_nesting,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide)
actual_grads = [q1.grad, q2.grad, q3.grad]
assert_equal(actual_loss, expected_loss, prec=0.001, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
for actual_grad, expected_grad in zip(actual_grads, expected_grads):
assert_equal(actual_grad, expected_grad, prec=0.001, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1", [None, "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "parallel"])
@pytest.mark.parametrize("enumerate3", [None, "parallel"])
@pytest.mark.parametrize("method", ["differentiable_loss", "loss_and_grads"])
def test_elbo_normals(method, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 100 * 10 ** sum(1 for e in [enumerate1, enumerate2, enumerate3] if not e)
prec = 0.1
q = pyro.param("q", torch.tensor(0.0, requires_grad=True))
def model():
pyro.sample("x1", dist.Normal(0.25, 1.))
pyro.sample("x2", dist.Normal(0.5, 1.))
pyro.sample("x3", dist.Normal(1., 1.))
def guide():
q = pyro.param("q")
pyro.sample("x1", dist.Normal(q, 1.), infer={"enumerate": enumerate1, "num_samples": 10})
pyro.sample("x2", dist.Normal(q, 1.), infer={"enumerate": enumerate2, "num_samples": 10})
pyro.sample("x3", dist.Normal(q, 1.), infer={"enumerate": enumerate3, "num_samples": 10})
kl = sum(kl_divergence(dist.Normal(q, 1.), dist.Normal(p, 1.)) for p in [0.25, 0.5, 1.])
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
if method == "differentiable_loss":
loss = elbo.differentiable_loss(model, guide)
actual_loss = loss.item()
actual_grad = grad(loss, [q])[0]
else:
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = q.grad
assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([
"\nexpected grads = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grads = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("plate_dim", [1, 2])
def test_elbo_plate(plate_dim, enumerate1, enumerate2):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2]) else 10000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("y", dist.Bernoulli(p).expand_by([num_particles]))
with pyro.plate("plate", plate_dim):
pyro.sample("z", dist.Bernoulli(p).expand_by([plate_dim, num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("y", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
with pyro.plate("plate", plate_dim):
pyro.sample("z", dist.Bernoulli(q).expand_by([plate_dim, num_particles]),
infer={"enumerate": enumerate2})
kl = (1 + plate_dim) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("plate_dim", [1, 2])
def test_elbo_iplate(plate_dim, enumerate1, enumerate2):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2]) else 20000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
for i in pyro.plate("plate", plate_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
for i in pyro.plate("plate", plate_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate2})
kl = (1 + plate_dim) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate4", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [2])
def test_elbo_plate_plate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3, enumerate4):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3, enumerate4]) else 100000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
d = dist.Bernoulli(p)
context1 = pyro.plate("outer", outer_dim, dim=-1)
context2 = pyro.plate("inner", inner_dim, dim=-2)
pyro.sample("w", d)
with context1:
pyro.sample("x", d)
with context2:
pyro.sample("y", d)
with context1, context2:
pyro.sample("z", d)
def guide():
d = dist.Bernoulli(pyro.param("q"))
context1 = pyro.plate("outer", outer_dim, dim=-1)
context2 = pyro.plate("inner", inner_dim, dim=-2)
pyro.sample("w", d, infer={"enumerate": enumerate1})
with context1:
pyro.sample("x", d, infer={"enumerate": enumerate2})
with context2:
pyro.sample("y", d, infer={"enumerate": enumerate3})
with context1, context2:
pyro.sample("z", d, infer={"enumerate": enumerate4})
kl_node = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
kl = (1 + outer_dim + inner_dim + outer_dim * inner_dim) * kl_node
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(num_particles=num_particles,
vectorize_particles=True,
strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide)
actual_grad = pyro.param('q').grad
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [3])
def test_elbo_plate_iplate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 100000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
with pyro.plate("outer", outer_dim):
pyro.sample("y", dist.Bernoulli(p).expand_by([outer_dim, num_particles]))
for i in pyro.plate("inner", inner_dim):
pyro.sample("z_{}".format(i), dist.Bernoulli(p).expand_by([outer_dim, num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
with pyro.plate("outer", outer_dim):
pyro.sample("y", dist.Bernoulli(q).expand_by([outer_dim, num_particles]),
infer={"enumerate": enumerate2})
for i in pyro.plate("inner", inner_dim):
pyro.sample("z_{}".format(i), dist.Bernoulli(q).expand_by([outer_dim, num_particles]),
infer={"enumerate": enumerate3})
kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [2])
def test_elbo_iplate_plate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 50000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
inner_plate = pyro.plate("inner", inner_dim)
for i in pyro.plate("outer", outer_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
with inner_plate:
pyro.sample("z_{}".format(i), dist.Bernoulli(p).expand_by([inner_dim, num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
inner_plate = pyro.plate("inner", inner_dim)
for i in pyro.plate("outer", outer_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate2})
with inner_plate:
pyro.sample("z_{}".format(i), dist.Bernoulli(q).expand_by([inner_dim, num_particles]),
infer={"enumerate": enumerate3})
kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate3", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate2", [None, "sequential", "parallel"])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
@pytest.mark.parametrize("inner_dim", [2])
@pytest.mark.parametrize("outer_dim", [2])
def test_elbo_iplate_iplate(outer_dim, inner_dim, enumerate1, enumerate2, enumerate3):
pyro.clear_param_store()
num_particles = 1 if all([enumerate1, enumerate2, enumerate3]) else 150000
q = pyro.param("q", torch.tensor(0.75, requires_grad=True))
p = 0.2693204236205713 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5
def model():
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(p).expand_by([num_particles]))
inner_iplate = pyro.plate("inner", outer_dim)
for i in pyro.plate("outer", inner_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(p).expand_by([num_particles]))
for j in inner_iplate:
pyro.sample("z_{}_{}".format(i, j), dist.Bernoulli(p).expand_by([num_particles]))
def guide():
q = pyro.param("q")
with pyro.plate("particles", num_particles):
pyro.sample("x", dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate1})
inner_iplate = pyro.plate("inner", inner_dim)
for i in pyro.plate("outer", outer_dim):
pyro.sample("y_{}".format(i), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate2})
for j in inner_iplate:
pyro.sample("z_{}_{}".format(i, j), dist.Bernoulli(q).expand_by([num_particles]),
infer={"enumerate": enumerate3})
kl = (1 + outer_dim * (1 + inner_dim)) * kl_divergence(dist.Bernoulli(q), dist.Bernoulli(p))
expected_loss = kl.item()
expected_grad = grad(kl, [q])[0]
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1, enumerate2, enumerate3]))
actual_loss = elbo.loss_and_grads(model, guide) / num_particles
actual_grad = pyro.param('q').grad / num_particles
assert_equal(actual_loss, expected_loss, prec=0.1, msg="".join([
"\nexpected loss = {}".format(expected_loss),
"\n actual loss = {}".format(actual_loss),
]))
assert_equal(actual_grad, expected_grad, prec=0.1, msg="".join([
"\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()),
"\n actual grad = {}".format(actual_grad.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("pi1", [0.33, 0.43])
@pytest.mark.parametrize("pi2", [0.55, 0.27])
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_non_mean_field_bern_bern_elbo_gradient(enumerate1, pi1, pi2):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 20000
def model():
with pyro.plate("particles", num_particles):
y = pyro.sample("y", dist.Bernoulli(0.33).expand_by([num_particles]))
pyro.sample("z", dist.Bernoulli(0.55 * y + 0.10))
def guide():
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.plate("particles", num_particles):
y = pyro.sample("y", dist.Bernoulli(q1).expand_by([num_particles]))
pyro.sample("z", dist.Bernoulli(q2 * y + 0.10))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1))
actual_grad_q1 = pyro.param('q1').grad / num_particles
actual_grad_q2 = pyro.param('q2').grad / num_particles
logger.info("Computing analytic gradients")
q1 = torch.tensor(pi1, requires_grad=True)
q2 = torch.tensor(pi2, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(q1), dist.Bernoulli(0.33))
elbo = elbo + q1 * kl_divergence(dist.Bernoulli(q2 + 0.10), dist.Bernoulli(0.65))
elbo = elbo + (1.0 - q1) * kl_divergence(dist.Bernoulli(0.10), dist.Bernoulli(0.10))
expected_grad_q1, expected_grad_q2 = grad(elbo, [q1, q2])
prec = 0.03 if enumerate1 is None else 0.001
assert_equal(actual_grad_q1, expected_grad_q1, prec=prec, msg="".join([
"\nq1 expected = {}".format(expected_grad_q1.data.cpu().numpy()),
"\nq1 actual = {}".format(actual_grad_q1.data.cpu().numpy()),
]))
assert_equal(actual_grad_q2, expected_grad_q2, prec=prec, msg="".join([
"\nq2 expected = {}".format(expected_grad_q2.data.cpu().numpy()),
"\nq2 actual = {}".format(actual_grad_q2.data.cpu().numpy()),
]))
@pytest.mark.parametrize("pi1", [0.33, 0.44])
@pytest.mark.parametrize("pi2", [0.55, 0.39])
@pytest.mark.parametrize("pi3", [0.22, 0.29])
@pytest.mark.parametrize("enumerate1,num_samples", [
(None, None),
("sequential", None),
("parallel", None),
("parallel", 2),
])
def test_non_mean_field_bern_normal_elbo_gradient(enumerate1, pi1, pi2, pi3, num_samples):
pyro.clear_param_store()
include_z = True
num_particles = 10000
def model():
with pyro.plate("particles", num_particles):
q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True))
y = pyro.sample("y", dist.Bernoulli(q3).expand_by([num_particles]))
if include_z:
pyro.sample("z", dist.Normal(0.55 * y + q3, 1.0))
def guide():
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.plate("particles", num_particles):
y = pyro.sample("y", dist.Bernoulli(q1).expand_by([num_particles]), infer={"enumerate": enumerate1})
if include_z:
pyro.sample("z", dist.Normal(q2 * y + 0.10, 1.0))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
elbo.loss_and_grads(model, guide)
actual_grad_q1 = pyro.param('q1').grad / num_particles
if include_z:
actual_grad_q2 = pyro.param('q2').grad / num_particles
actual_grad_q3 = pyro.param('q3').grad / num_particles
logger.info("Computing analytic gradients")
q1 = torch.tensor(pi1, requires_grad=True)
q2 = torch.tensor(pi2, requires_grad=True)
q3 = torch.tensor(pi3, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(q1), dist.Bernoulli(q3))
if include_z:
elbo = elbo + q1 * kl_divergence(dist.Normal(q2 + 0.10, 1.0), dist.Normal(q3 + 0.55, 1.0))
elbo = elbo + (1.0 - q1) * kl_divergence(dist.Normal(0.10, 1.0), dist.Normal(q3, 1.0))
expected_grad_q1, expected_grad_q2, expected_grad_q3 = grad(elbo, [q1, q2, q3])
else:
expected_grad_q1, expected_grad_q3 = grad(elbo, [q1, q3])
prec = 0.04 if enumerate1 is None else 0.02
assert_equal(actual_grad_q1, expected_grad_q1, prec=prec, msg="".join([
"\nq1 expected = {}".format(expected_grad_q1.data.cpu().numpy()),
"\nq1 actual = {}".format(actual_grad_q1.data.cpu().numpy()),
]))
if include_z:
assert_equal(actual_grad_q2, expected_grad_q2, prec=prec, msg="".join([
"\nq2 expected = {}".format(expected_grad_q2.data.cpu().numpy()),
"\nq2 actual = {}".format(actual_grad_q2.data.cpu().numpy()),
]))
assert_equal(actual_grad_q3, expected_grad_q3, prec=prec, msg="".join([
"\nq3 expected = {}".format(expected_grad_q3.data.cpu().numpy()),
"\nq3 actual = {}".format(actual_grad_q3.data.cpu().numpy()),
]))
@pytest.mark.parametrize("pi1", [0.33, 0.41])
@pytest.mark.parametrize("pi2", [0.44, 0.17])
@pytest.mark.parametrize("pi3", [0.22, 0.29])
def test_non_mean_field_normal_bern_elbo_gradient(pi1, pi2, pi3):
def model(num_particles):
with pyro.plate("particles", num_particles):
q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True))
q4 = pyro.param("q4", torch.tensor(0.5 * (pi1 + pi2), requires_grad=True))
z = pyro.sample("z", dist.Normal(q3, 1.0).expand_by([num_particles]))
zz = torch.exp(z) / (1.0 + torch.exp(z))
pyro.sample("y", dist.Bernoulli(q4 * zz))
def guide(num_particles):
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.plate("particles", num_particles):
z = pyro.sample("z", dist.Normal(q2, 1.0).expand_by([num_particles]))
zz = torch.exp(z) / (1.0 + torch.exp(z))
pyro.sample("y", dist.Bernoulli(q1 * zz))
qs = ['q1', 'q2', 'q3', 'q4']
results = {}
for ed, num_particles in zip([None, 'parallel', 'sequential'], [30000, 20000, 20000]):
pyro.clear_param_store()
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([ed]))
elbo.loss_and_grads(model, config_enumerate(guide, default=ed), num_particles)
results[str(ed)] = {}
for q in qs:
results[str(ed)]['actual_grad_%s' % q] = pyro.param(q).grad.detach().cpu().numpy() / num_particles
prec = 0.03
for ed in ['parallel', 'sequential']:
logger.info('\n*** {} ***'.format(ed))
for q in qs:
logger.info("[{}] actual: {}".format(q, results[ed]['actual_grad_%s' % q]))
assert_equal(results[ed]['actual_grad_%s' % q], results['None']['actual_grad_%s' % q], prec=prec,
msg="".join([
"\nexpected (MC estimate) = {}".format(results['None']['actual_grad_%s' % q]),
"\n actual ({} estimate) = {}".format(ed, results[ed]['actual_grad_%s' % q]),
]))
@pytest.mark.parametrize("enumerate1", [None, "sequential", "parallel"])
def test_elbo_rsvi(enumerate1):
pyro.clear_param_store()
num_particles = 40000
prec = 0.01 if enumerate1 else 0.02
q = pyro.param("q", torch.tensor(0.5, requires_grad=True))
a = pyro.param("a", torch.tensor(1.5, requires_grad=True))
kl1 = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25))
kl2 = kl_divergence(dist.Gamma(a, 1.0), dist.Gamma(0.5, 1.0))
def model():
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles]))
pyro.sample("y", dist.Gamma(0.50, 1.0).expand_by([num_particles]))
@config_enumerate(default=enumerate1)
def guide():
q = pyro.param("q")
a = pyro.param("a")
with pyro.plate("particles", num_particles):
pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles]))
pyro.sample("y", ShapeAugmentedGamma(a, torch.tensor(1.0)).expand_by([num_particles]))
elbo = TraceEnum_ELBO(strict_enumeration_warning=any([enumerate1]))
elbo.loss_and_grads(model, guide)
actual_q = q.grad / num_particles
expected_q = grad(kl1, [q])[0]
assert_equal(actual_q, expected_q, prec=prec, msg="".join([
"\nexpected q.grad = {}".format(expected_q.detach().cpu().numpy()),
"\n actual q.grad = {}".format(actual_q.detach().cpu().numpy()),
]))
actual_a = a.grad / num_particles
expected_a = grad(kl2, [a])[0]
assert_equal(actual_a, expected_a, prec=prec, msg="".join([
"\nexpected a.grad= {}".format(expected_a.detach().cpu().numpy()),
"\n actual a.grad = {}".format(actual_a.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1,num_steps,expand", [
("sequential", 2, True),
("sequential", 2, False),
("sequential", 3, True),
("sequential", 3, False),
("parallel", 2, True),
("parallel", 2, False),
("parallel", 3, True),
("parallel", 3, False),
("parallel", 10, False),
("parallel", 20, False),
_skip_cuda("parallel", 30, False),
])
def test_elbo_hmm_in_model(enumerate1, num_steps, expand):
pyro.clear_param_store()
data = torch.ones(num_steps)
init_probs = torch.tensor([0.5, 0.5])
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.9, 0.1], [0.1, 0.9]]),
constraint=constraints.simplex)
locs = pyro.param("obs_locs", torch.tensor([-1.0, 1.0]))
scale = pyro.param("obs_scale", torch.tensor(1.0),
constraint=constraints.positive)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Normal(locs[x], scale), obs=y)
@config_enumerate(default=enumerate1, expand=expand)
def guide(data):
mean_field_probs = pyro.param("mean_field_probs", torch.ones(num_steps, 2) / 2,
constraint=constraints.simplex)
for i in pyro.markov(range(num_steps)):
pyro.sample("x_{}".format(i), dist.Categorical(mean_field_probs[i]))
elbo = TraceEnum_ELBO()
elbo.loss_and_grads(model, guide, data)
expected_unconstrained_grads = {
"transition_probs": torch.tensor([[0.2, -0.2], [-0.2, 0.2]]) * (num_steps - 1),
"obs_locs": torch.tensor([-num_steps, 0]),
"obs_scale": torch.tensor(-num_steps),
"mean_field_probs": torch.tensor([[0.5, -0.5]] * num_steps),
}
for name, value in pyro.get_param_store().named_parameters():
actual = value.grad
expected = expected_unconstrained_grads[name]
assert_equal(actual, expected, msg=''.join([
'\nexpected {}.grad = {}'.format(name, expected.cpu().numpy()),
'\n actual {}.grad = {}'.format(name, actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize("enumerate1,num_steps,expand", [
("sequential", 2, True),
("sequential", 2, False),
("sequential", 3, True),
("sequential", 3, False),
("parallel", 2, True),
("parallel", 2, False),
("parallel", 3, True),
("parallel", 3, False),
("parallel", 10, False),
("parallel", 20, False),
_skip_cuda("parallel", 30, False),
_skip_cuda("parallel", 40, False),
_skip_cuda("parallel", 50, False),
])
def test_elbo_hmm_in_guide(enumerate1, num_steps, expand):
pyro.clear_param_store()
data = torch.ones(num_steps)
init_probs = torch.tensor([0.5, 0.5])
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y)
@config_enumerate(default=enumerate1, expand=expand)
def guide(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
elbo = TraceEnum_ELBO()
elbo.loss_and_grads(model, guide, data)
# These golden values simply test agreement between parallel and sequential.
expected_grads = {
2: {
"transition_probs": [[0.1029949, -0.1029949], [0.1029949, -0.1029949]],
"emission_probs": [[0.75, -0.75], [0.25, -0.25]],
},
3: {
"transition_probs": [[0.25748726, -0.25748726], [0.25748726, -0.25748726]],
"emission_probs": [[1.125, -1.125], [0.375, -0.375]],
},
10: {
"transition_probs": [[1.64832076, -1.64832076], [1.64832076, -1.64832076]],
"emission_probs": [[3.75, -3.75], [1.25, -1.25]],
},
20: {
"transition_probs": [[3.70781687, -3.70781687], [3.70781687, -3.70781687]],
"emission_probs": [[7.5, -7.5], [2.5, -2.5]],
},
22: {
"transition_probs": [[4.11979618, -4.11979618], [4.11979618, -4.11979618]],
"emission_probs": [[8.25, -8.25], [2.75, -2.75]],
},
30: {
"transition_probs": [[5.76771452, -5.76771452], [5.76771452, -5.76771452]],
"emission_probs": [[11.25, -11.25], [3.75, -3.75]],
},
}
if num_steps not in expected_grads:
return
for name, value in pyro.get_param_store().named_parameters():
actual = value.grad
expected = torch.tensor(expected_grads[num_steps][name])
assert_equal(actual, expected, msg=''.join([
'\nexpected {}.grad = {}'.format(name, expected.cpu().numpy()),
'\n actual {}.grad = {}'.format(name, actual.detach().cpu().numpy()),
]))
@pytest.mark.parametrize('num_steps', [2, 3, 4, 5, 10, 20, _skip_cuda(30)])
def test_hmm_enumerate_model(num_steps):
data = dist.Categorical(torch.tensor([0.5, 0.5])).sample((num_steps,))
@config_enumerate(default="parallel")
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = 0
for t, y in pyro.markov(enumerate(data)):
x = pyro.sample("x_{}".format(t), dist.Categorical(transition_probs[x]))
pyro.sample("y_{}".format(t), dist.Categorical(emission_probs[x]), obs=y)
logger.debug('{}\t{}'.format(t, tuple(x.shape)))
def guide(data):
pass
elbo = TraceEnum_ELBO()
elbo.differentiable_loss(model, guide, data)
@pytest.mark.parametrize('num_steps', [2, 3, 4, 5, 10, 20, _skip_cuda(30)])
def test_hmm_enumerate_model_and_guide(num_steps):
data = dist.Categorical(torch.tensor([0.5, 0.5])).sample((num_steps,))
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5])))
logger.debug('-1\t{}'.format(tuple(x.shape)))
for t, y in pyro.markov(enumerate(data)):
x = pyro.sample("x_{}".format(t), dist.Categorical(transition_probs[x]),
infer={"enumerate": "parallel"})
pyro.sample("y_{}".format(t), dist.Categorical(emission_probs[x]), obs=y)
logger.debug('{}\t{}'.format(t, tuple(x.shape)))
def guide(data):
init_probs = pyro.param("init_probs",
torch.tensor([0.75, 0.25]),
constraint=constraints.simplex)
pyro.sample("x", dist.Categorical(init_probs),
infer={"enumerate": "parallel"})
elbo = TraceEnum_ELBO()
elbo.differentiable_loss(model, guide, data)
def _check_loss_and_grads(expected_loss, actual_loss):
assert_equal(actual_loss, expected_loss,
msg='Expected:\n{}\nActual:\n{}'.format(expected_loss.detach().cpu().numpy(),
actual_loss.detach().cpu().numpy()))
names = pyro.get_param_store().get_all_param_names()
params = [pyro.param(name).unconstrained() for name in names]
actual_grads = grad(actual_loss, params, allow_unused=True, retain_graph=True)
expected_grads = grad(expected_loss, params, allow_unused=True, retain_graph=True)
for name, actual_grad, expected_grad in zip(names, actual_grads, expected_grads):
if actual_grad is None or expected_grad is None:
continue
assert_equal(actual_grad, expected_grad,
msg='{}\nExpected:\n{}\nActual:\n{}'.format(name,
expected_grad.detach().cpu().numpy(),
actual_grad.detach().cpu().numpy()))
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_1(scale):
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([0.3, 0.7]),
constraint=constraints.simplex)
@poutine.scale(scale=scale)
def auto_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z), obs=torch.tensor(0))
@poutine.scale(scale=scale)
def hand_model():
probs_x = pyro.param("model_probs_x")
probs_z = pyro.param("model_probs_z")
pyro.sample("x", dist.Categorical(probs_x))
pyro.sample("z", dist.Categorical(probs_z), obs=torch.tensor(0))
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def guide():
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_2(scale):
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
@poutine.scale(scale=scale)
def auto_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=torch.tensor(0))
@poutine.scale(scale=scale)
def hand_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
probs_yz = probs_y.mm(probs_z)
x = pyro.sample("x", dist.Categorical(probs_x))
pyro.sample("z", dist.Categorical(probs_yz[x]), obs=torch.tensor(0))
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def guide():
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_3(scale):
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
def auto_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=torch.tensor(0))
def hand_model():
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
probs_yz = probs_y.mm(probs_z)
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
pyro.sample("z", dist.Categorical(probs_yz[x]), obs=torch.tensor(0))
@config_enumerate(default="parallel")
def guide():
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('num_samples,num_masked',
[(1, 1), (2, 2), (3, 2)],
ids=["single", "batch", "masked"])
def test_elbo_enumerate_plate_1(num_samples, num_masked, scale):
# +---------+
# x ----> y ----> z |
# | N |
# +---------+
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
def auto_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
if num_masked == num_samples:
with pyro.plate("data", len(data)):
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
else:
with pyro.plate("data", len(data)):
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
def hand_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
for i in pyro.plate("data", num_masked):
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i])
@config_enumerate(default="parallel")
def guide(data):
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,))
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('num_samples,num_masked',
[(1, 1), (2, 2), (3, 2)],
ids=["single", "batch", "masked"])
def test_elbo_enumerate_plate_2(num_samples, num_masked, scale):
# +-----------------+
# x ----> y ----> z |
# | N |
# +-----------------+
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
def auto_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
with pyro.plate("data", len(data)):
if num_masked == num_samples:
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
else:
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
def hand_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
x = pyro.sample("x", dist.Categorical(probs_x))
with poutine.scale(scale=scale):
for i in pyro.plate("data", num_masked):
y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i])
@config_enumerate(default="parallel")
def guide(data):
probs_x = pyro.param("guide_probs_x")
pyro.sample("x", dist.Categorical(probs_x))
data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,))
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide, data)
hand_loss = elbo.differentiable_loss(hand_model, guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('num_samples,num_masked',
[(1, 1), (2, 2), (3, 2)],
ids=["single", "batch", "masked"])
def test_elbo_enumerate_plate_3(num_samples, num_masked, scale):
# +-----------------------+
# | x ----> y ----> z |
# | N |
# +-----------------------+
# This plate should remain unreduced since all enumeration is in a single plate.
pyro.param("guide_probs_x",
torch.tensor([0.1, 0.9]),
constraint=constraints.simplex)
pyro.param("model_probs_x",
torch.tensor([0.4, 0.6]),
constraint=constraints.simplex)
pyro.param("model_probs_y",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_z",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
@poutine.scale(scale=scale)
def auto_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
with pyro.plate("data", len(data)):
if num_masked == num_samples:
x = pyro.sample("x", dist.Categorical(probs_x))
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
else:
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
x = pyro.sample("x", dist.Categorical(probs_x))
y = pyro.sample("y", dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z", dist.Categorical(probs_z[y]), obs=data)
@poutine.scale(scale=scale)
@config_enumerate(default="parallel")
def auto_guide(data):
probs_x = pyro.param("guide_probs_x")
with pyro.plate("data", len(data)):
if num_masked == num_samples:
pyro.sample("x", dist.Categorical(probs_x))
else:
with poutine.mask(mask=torch.arange(num_samples) < num_masked):
pyro.sample("x", dist.Categorical(probs_x))
@poutine.scale(scale=scale)
def hand_model(data):
probs_x = pyro.param("model_probs_x")
probs_y = pyro.param("model_probs_y")
probs_z = pyro.param("model_probs_z")
for i in pyro.plate("data", num_masked):
x = pyro.sample("x_{}".format(i), dist.Categorical(probs_x))
y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x]),
infer={"enumerate": "parallel"})
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=data[i])
@poutine.scale(scale=scale)
@config_enumerate(default="parallel")
def hand_guide(data):
probs_x = pyro.param("guide_probs_x")
for i in pyro.plate("data", num_masked):
pyro.sample("x_{}".format(i), dist.Categorical(probs_x))
data = dist.Categorical(torch.tensor([0.3, 0.7])).sample((num_samples,))
elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data)
hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
@pytest.mark.parametrize('outer_obs,inner_obs',
[(False, True), (True, False), (True, True)])
def test_elbo_enumerate_plate_4(outer_obs, inner_obs, scale):
# a ---> outer_obs
# \
# +-----\------------------+
# | \ |
# | b ---> inner_obs N=2 |
# +------------------------+
# This tests two different observations, one outside and one inside an plate.
pyro.param("probs_a", torch.tensor([0.4, 0.6]), constraint=constraints.simplex)
pyro.param("probs_b", torch.tensor([0.6, 0.4]), constraint=constraints.simplex)
pyro.param("locs", torch.tensor([-1., 1.]))
pyro.param("scales", torch.tensor([1., 2.]), constraint=constraints.positive)
outer_data = torch.tensor(2.0)
inner_data = torch.tensor([0.5, 1.5])
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
locs = pyro.param("locs")
scales = pyro.param("scales")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
if outer_obs:
pyro.sample("outer_obs", dist.Normal(0., scales[a]),
obs=outer_data)
with pyro.plate("inner", 2):
b = pyro.sample("b", dist.Categorical(probs_b),
infer={"enumerate": "parallel"})
if inner_obs:
pyro.sample("inner_obs", dist.Normal(locs[b], scales[a]),
obs=inner_data)
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
locs = pyro.param("locs")
scales = pyro.param("scales")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
if outer_obs:
pyro.sample("outer_obs", dist.Normal(0., scales[a]),
obs=outer_data)
for i in pyro.plate("inner", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b),
infer={"enumerate": "parallel"})
if inner_obs:
pyro.sample("inner_obs_{}".format(i), dist.Normal(locs[b], scales[a]),
obs=inner_data[i])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
def test_elbo_enumerate_plate_5():
# Guide Model
# a
# +---------------|--+
# | M=2 V |
# | b ----> c |
# +------------------+
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([0.6, 0.4]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]],
[[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]]),
constraint=constraints.simplex)
pyro.param("guide_probs_b",
torch.tensor([0.8, 0.2]),
constraint=constraints.simplex)
data = torch.tensor([1, 2])
c_ind = torch.arange(3, dtype=torch.long)
@config_enumerate(default="parallel")
def model_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("b_axis", 2):
b = pyro.sample("b", dist.Categorical(probs_b))
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
def guide_plate():
probs_b = pyro.param("guide_probs_b")
with pyro.plate("b_axis", 2):
pyro.sample("b", dist.Categorical(probs_b))
@config_enumerate(default="parallel")
def model_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("b_axis", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
pyro.sample("c_{}".format(i),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[i])
@config_enumerate(default="parallel")
def guide_iplate():
probs_b = pyro.param("guide_probs_b")
for i in pyro.plate("b_axis", 2):
pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
elbo = TraceEnum_ELBO(max_plate_nesting=0)
expected_loss = elbo.differentiable_loss(model_iplate, guide_iplate)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
with pytest.raises(ValueError, match="Expected model enumeration to be no more global than guide"):
actual_loss = elbo.differentiable_loss(model_plate, guide_plate)
# This never gets run because we don't support this yet.
_check_loss_and_grads(expected_loss, actual_loss)
@pytest.mark.parametrize('enumerate1', ['parallel', 'sequential'])
def test_elbo_enumerate_plate_6(enumerate1):
# Guide Model
# +-------+
# b ----> c <---- a
# | M=2 |
# +-------+
# This tests that sequential enumeration over b works, even though
# model-side enumeration moves c into b's plate via contraction.
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([0.6, 0.4]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]],
[[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]]),
constraint=constraints.simplex)
pyro.param("guide_probs_b",
torch.tensor([0.8, 0.2]),
constraint=constraints.simplex)
data = torch.tensor([1, 2])
c_ind = torch.arange(3, dtype=torch.long)
@config_enumerate(default="parallel")
def model_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b))
with pyro.plate("b_axis", 2):
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
def model_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b))
for i in pyro.plate("b_axis", 2):
pyro.sample("c_{}".format(i),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[i])
@config_enumerate(default=enumerate1)
def guide():
probs_b = pyro.param("guide_probs_b")
pyro.sample("b", dist.Categorical(probs_b))
elbo = TraceEnum_ELBO(max_plate_nesting=0)
expected_loss = elbo.differentiable_loss(model_iplate, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
actual_loss = elbo.differentiable_loss(model_plate, guide)
_check_loss_and_grads(expected_loss, actual_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plate_7(scale):
# Guide Model
# a -----> b
# | |
# +-|--------|----------------+
# | V V |
# | c -----> d -----> e N=2 |
# +---------------------------+
# This tests a mixture of model and guide enumeration.
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("model_probs_d",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]),
constraint=constraints.simplex)
pyro.param("model_probs_e",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("guide_probs_a",
torch.tensor([0.35, 0.64]),
constraint=constraints.simplex)
pyro.param("guide_probs_c",
torch.tensor([[0., 1.], [1., 0.]]), # deterministic
constraint=constraints.simplex)
d_ind = torch.arange(2, dtype=torch.long)
@poutine.scale(scale=scale)
def auto_model(data):
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
probs_d = pyro.param("model_probs_d")
probs_e = pyro.param("model_probs_e")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b[a]),
infer={"enumerate": "parallel"})
with pyro.plate("data", 2):
c = pyro.sample("c", dist.Categorical(probs_c[a]))
d = pyro.sample("d",
dist.Categorical(probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]),
infer={"enumerate": "parallel"})
pyro.sample("obs", dist.Categorical(probs_e[d]), obs=data)
@poutine.scale(scale=scale)
def auto_guide(data):
probs_a = pyro.param("guide_probs_a")
probs_c = pyro.param("guide_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
with pyro.plate("data", 2):
pyro.sample("c", dist.Categorical(probs_c[a]))
@poutine.scale(scale=scale)
def hand_model(data):
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
probs_d = pyro.param("model_probs_d")
probs_e = pyro.param("model_probs_e")
a = pyro.sample("a", dist.Categorical(probs_a))
b = pyro.sample("b", dist.Categorical(probs_b[a]),
infer={"enumerate": "parallel"})
for i in pyro.plate("data", 2):
c = pyro.sample("c_{}".format(i), dist.Categorical(probs_c[a]))
d = pyro.sample("d_{}".format(i),
dist.Categorical(probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]),
infer={"enumerate": "parallel"})
pyro.sample("obs_{}".format(i), dist.Categorical(probs_e[d]), obs=data[i])
@poutine.scale(scale=scale)
def hand_guide(data):
probs_a = pyro.param("guide_probs_a")
probs_c = pyro.param("guide_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a),
infer={"enumerate": "parallel"})
for i in pyro.plate("data", 2):
pyro.sample("c_{}".format(i), dist.Categorical(probs_c[a]))
data = torch.tensor([0, 0])
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_1(scale):
# +-----------------+
# | a ----> b M=2 |
# +-----------------+
# +-----------------+
# | c ----> d N=3 |
# +-----------------+
# This tests two unrelated plates.
# Each should remain uncontracted.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([0.75, 0.25]),
constraint=constraints.simplex)
pyro.param("probs_d",
torch.tensor([[0.4, 0.6], [0.3, 0.7]]),
constraint=constraints.simplex)
b_data = torch.tensor([0, 1])
d_data = torch.tensor([0, 0, 1])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
with pyro.plate("a_axis", 2):
a = pyro.sample("a", dist.Categorical(probs_a))
pyro.sample("b", dist.Categorical(probs_b[a]), obs=b_data)
with pyro.plate("c_axis", 3):
c = pyro.sample("c", dist.Categorical(probs_c))
pyro.sample("d", dist.Categorical(probs_d[c]), obs=d_data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
for i in pyro.plate("a_axis", 2):
a = pyro.sample("a_{}".format(i), dist.Categorical(probs_a))
pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]), obs=b_data[i])
for j in pyro.plate("c_axis", 3):
c = pyro.sample("c_{}".format(j), dist.Categorical(probs_c))
pyro.sample("d_{}".format(j), dist.Categorical(probs_d[c]), obs=d_data[j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_2(scale):
# +---------+ +---------+
# | b <---- a ----> c |
# | M=2 | | N=3 |
# +---------+ +---------+
# This tests two different plates with recycled dimension.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
b_data = torch.tensor([0, 1])
c_data = torch.tensor([0, 0, 1])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("b_axis", 2):
pyro.sample("b", dist.Categorical(probs_b[a]),
obs=b_data)
with pyro.plate("c_axis", 3):
pyro.sample("c", dist.Categorical(probs_c[a]),
obs=c_data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("b_axis", 2):
pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]),
obs=b_data[i])
for j in pyro.plate("c_axis", 3):
pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a]),
obs=c_data[j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_3(scale):
# +--------------------+
# | +----------+ |
# a -------> b | |
# | | N=2 | |
# | +----------+ M=2 |
# +--------------------+
# This is tests the case of multiple plate contractions in
# a single step.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
data = torch.tensor([[0, 1], [0, 0]])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
with pyro.plate("inner", 2):
pyro.sample("b", dist.Categorical(probs_b[a]),
obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
for j in inner:
pyro.sample("b_{}_{}".format(i, j), dist.Categorical(probs_b[a]),
obs=data[i, j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=2)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_4(scale):
# +--------------------+
# | +----------+ |
# a ----> b ----> c | |
# | | N=2 | |
# | M=2 +----------+ |
# +--------------------+
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.4, 0.6], [0.3, 0.7]]),
constraint=constraints.simplex)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with pyro.plate("inner", 2):
pyro.sample("c", dist.Categorical(probs_c[b]), obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]))
for j in inner:
pyro.sample("c_{}_{}".format(i, j), dist.Categorical(probs_c[b]),
obs=data[i, j])
def guide(data):
pass
data = torch.tensor([[0, 1], [0, 0]])
elbo = TraceEnum_ELBO(max_plate_nesting=2)
auto_loss = elbo.differentiable_loss(auto_model, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_5(scale):
# a
# | \
# +--|---\------------+
# | V +-\--------+ |
# | b ----> c | |
# | | N=2 | |
# | M=2 +----------+ |
# +-------------------+
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]],
[[0.2, 0.8], [0.1, 0.9]]]),
constraint=constraints.simplex)
data = torch.tensor([[0, 1], [0, 0]])
c_ind = torch.arange(2, dtype=torch.long)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def auto_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with pyro.plate("inner", 2):
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def hand_model():
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a]))
for j in inner:
pyro.sample("c_{}_{}".format(i, j),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[i, j])
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=2)
auto_loss = elbo.differentiable_loss(auto_model, guide)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_6(scale):
# +----------+
# | M=2 |
# a ----> b |
# | | | |
# +--|-------|--+ |
# | V | V | |
# | c ----> d | |
# | | | |
# | N=2 +------|---+
# +-------------+
# This tests different ways of mixing two independence contexts,
# where each can be either sequential or vectorized plate.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("probs_d",
torch.tensor([[[0.4, 0.6], [0.3, 0.7]], [[0.3, 0.7], [0.2, 0.8]]]),
constraint=constraints.simplex)
d_ind = torch.arange(2, dtype=torch.long)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
for i in b_axis:
for j in c_axis:
pyro.sample("d_{}_{}".format(i, j),
dist.Categorical(probs_d[b[i].unsqueeze(-1), c[j].unsqueeze(-1), d_ind]),
obs=data[i, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
for i in b_axis:
with c_axis:
pyro.sample("d_{}".format(i),
dist.Categorical(probs_d[b[i].unsqueeze(-1), c.unsqueeze(-1), d_ind]),
obs=data[i])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
with b_axis:
for j in c_axis:
pyro.sample("d_{}".format(j),
dist.Categorical(probs_d[b.unsqueeze(-1), c[j].unsqueeze(-1), d_ind]),
obs=data[:, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
b_axis = pyro.plate("b_axis", 2, dim=-1)
c_axis = pyro.plate("c_axis", 2, dim=-2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
with b_axis, c_axis:
pyro.sample("d",
dist.Categorical(probs_d[b.unsqueeze(-1), c.unsqueeze(-1), d_ind]),
obs=data)
def guide(data):
pass
# Check that either one of the sequential plates can be promoted to be vectorized.
data = torch.tensor([[0, 1], [0, 0]])
elbo = TraceEnum_ELBO(max_plate_nesting=0)
loss_iplate_iplate = elbo.differentiable_loss(model_iplate_iplate, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
loss_plate_iplate = elbo.differentiable_loss(model_plate_iplate, guide, data)
loss_iplate_plate = elbo.differentiable_loss(model_iplate_plate, guide, data)
_check_loss_and_grads(loss_iplate_iplate, loss_plate_iplate)
_check_loss_and_grads(loss_iplate_iplate, loss_iplate_plate)
# But promoting both to plates should result in an error.
elbo = TraceEnum_ELBO(max_plate_nesting=2)
with pytest.raises(NotImplementedError, match="Expected tree-structured plate nesting.*"):
elbo.differentiable_loss(model_plate_plate, guide, data)
@pytest.mark.parametrize('scale', [1, 10])
def test_elbo_enumerate_plates_7(scale):
# +-------------+
# | N=2 |
# a -------> c |
# | | | |
# +--|----------|--+ |
# | | | V | |
# | V | e | |
# | b ----> d | |
# | | | |
# | M=2 +---------|---+
# +----------------+
# This tests tree-structured dependencies among variables but
# non-tree dependencies among plate nestings.
pyro.param("probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("probs_b",
torch.tensor([[0.6, 0.4], [0.4, 0.6]]),
constraint=constraints.simplex)
pyro.param("probs_c",
torch.tensor([[0.75, 0.25], [0.55, 0.45]]),
constraint=constraints.simplex)
pyro.param("probs_d",
torch.tensor([[0.3, 0.7], [0.2, 0.8]]),
constraint=constraints.simplex)
pyro.param("probs_e",
torch.tensor([[0.4, 0.6], [0.3, 0.7]]),
constraint=constraints.simplex)
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
for i in b_axis:
for j in c_axis:
pyro.sample("d_{}_{}".format(i, j), dist.Categorical(probs_d[b[i]]),
obs=data[i, j])
pyro.sample("e_{}_{}".format(i, j), dist.Categorical(probs_e[c[j]]),
obs=data[i, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_iplate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
b = [pyro.sample("b_{}".format(i), dist.Categorical(probs_b[a])) for i in b_axis]
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
for i in b_axis:
with c_axis:
pyro.sample("d_{}".format(i), dist.Categorical(probs_d[b[i]]),
obs=data[i])
pyro.sample("e_{}".format(i), dist.Categorical(probs_e[c]),
obs=data[i])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_iplate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2)
c_axis = pyro.plate("c_axis", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
c = [pyro.sample("c_{}".format(j), dist.Categorical(probs_c[a])) for j in c_axis]
with b_axis:
for j in c_axis:
pyro.sample("d_{}".format(j), dist.Categorical(probs_d[b]),
obs=data[:, j])
pyro.sample("e_{}".format(j), dist.Categorical(probs_e[c[j]]),
obs=data[:, j])
@config_enumerate(default="parallel")
@poutine.scale(scale=scale)
def model_plate_plate(data):
probs_a = pyro.param("probs_a")
probs_b = pyro.param("probs_b")
probs_c = pyro.param("probs_c")
probs_d = pyro.param("probs_d")
probs_e = pyro.param("probs_e")
b_axis = pyro.plate("b_axis", 2, dim=-1)
c_axis = pyro.plate("c_axis", 2, dim=-2)
a = pyro.sample("a", dist.Categorical(probs_a))
with b_axis:
b = pyro.sample("b", dist.Categorical(probs_b[a]))
with c_axis:
c = pyro.sample("c", dist.Categorical(probs_c[a]))
with b_axis, c_axis:
pyro.sample("d", dist.Categorical(probs_d[b]), obs=data)
pyro.sample("e", dist.Categorical(probs_e[c]), obs=data)
def guide(data):
pass
# Check that any combination of sequential plates can be promoted to be vectorized.
data = torch.tensor([[0, 1], [0, 0]])
elbo = TraceEnum_ELBO(max_plate_nesting=0)
loss_iplate_iplate = elbo.differentiable_loss(model_iplate_iplate, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=1)
loss_plate_iplate = elbo.differentiable_loss(model_plate_iplate, guide, data)
loss_iplate_plate = elbo.differentiable_loss(model_iplate_plate, guide, data)
elbo = TraceEnum_ELBO(max_plate_nesting=2)
loss_plate_plate = elbo.differentiable_loss(model_plate_plate, guide, data)
_check_loss_and_grads(loss_iplate_iplate, loss_plate_iplate)
_check_loss_and_grads(loss_iplate_iplate, loss_iplate_plate)
_check_loss_and_grads(loss_iplate_iplate, loss_plate_plate)
@pytest.mark.parametrize('guide_scale', [1])
@pytest.mark.parametrize('model_scale', [1])
@pytest.mark.parametrize('outer_vectorized,inner_vectorized,xfail',
[(False, True, False), (True, False, True), (True, True, True)],
ids=['iplate-plate', 'plate-iplate', 'plate-plate'])
def test_elbo_enumerate_plates_8(model_scale, guide_scale, inner_vectorized, outer_vectorized, xfail):
# Guide Model
# a
# +-----------|--------+
# | M=2 +---|------+ |
# | | V N=2 | |
# | b ----> c | |
# | +----------+ |
# +--------------------+
pyro.param("model_probs_a",
torch.tensor([0.45, 0.55]),
constraint=constraints.simplex)
pyro.param("model_probs_b",
torch.tensor([0.6, 0.4]),
constraint=constraints.simplex)
pyro.param("model_probs_c",
torch.tensor([[[0.4, 0.5, 0.1], [0.3, 0.5, 0.2]],
[[0.3, 0.4, 0.3], [0.4, 0.4, 0.2]]]),
constraint=constraints.simplex)
pyro.param("guide_probs_b",
torch.tensor([0.8, 0.2]),
constraint=constraints.simplex)
data = torch.tensor([[0, 1], [0, 2]])
c_ind = torch.arange(3, dtype=torch.long)
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_plate_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b))
with pyro.plate("inner", 2):
pyro.sample("c",
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data)
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_iplate_plate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
with inner:
pyro.sample("c_{}".format(i),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[:, i])
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_plate_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
a = pyro.sample("a", dist.Categorical(probs_a))
with pyro.plate("outer", 2):
b = pyro.sample("b", dist.Categorical(probs_b))
for j in pyro.plate("inner", 2):
pyro.sample("c_{}".format(j),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[j])
@config_enumerate(default="parallel")
@poutine.scale(scale=model_scale)
def model_iplate_iplate():
probs_a = pyro.param("model_probs_a")
probs_b = pyro.param("model_probs_b")
probs_c = pyro.param("model_probs_c")
inner = pyro.plate("inner", 2)
a = pyro.sample("a", dist.Categorical(probs_a))
for i in pyro.plate("outer", 2):
b = pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
for j in inner:
pyro.sample("c_{}_{}".format(i, j),
dist.Categorical(probs_c[a.unsqueeze(-1), b.unsqueeze(-1), c_ind]),
obs=data[j, i])
@config_enumerate(default="parallel")
@poutine.scale(scale=guide_scale)
def guide_plate():
probs_b = pyro.param("guide_probs_b")
with pyro.plate("outer", 2):
pyro.sample("b", dist.Categorical(probs_b))
@config_enumerate(default="parallel")
@poutine.scale(scale=guide_scale)
def guide_iplate():
probs_b = pyro.param("guide_probs_b")
for i in pyro.plate("outer", 2):
pyro.sample("b_{}".format(i), dist.Categorical(probs_b))
elbo = TraceEnum_ELBO(max_plate_nesting=0)
expected_loss = elbo.differentiable_loss(model_iplate_iplate, guide_iplate)
with ExitStack() as stack:
if xfail:
stack.enter_context(pytest.raises(
ValueError,
match="Expected model enumeration to be no more global than guide"))
if inner_vectorized:
if outer_vectorized:
elbo = TraceEnum_ELBO(max_plate_nesting=2)
actual_loss = elbo.differentiable_loss(model_plate_plate, guide_plate)
else:
elbo = TraceEnum_ELBO(max_plate_nesting=1)
actual_loss = elbo.differentiable_loss(model_iplate_plate, guide_iplate)
else:
elbo = TraceEnum_ELBO(max_plate_nesting=1)
actual_loss = elbo.differentiable_loss(model_plate_iplate, guide_plate)
_check_loss_and_grads(expected_loss, actual_loss)
def test_elbo_scale():
# Consider a mixture model with two components, toggled by `which`.
def component_model(data, which, suffix=""):
loc = pyro.param("locs", torch.tensor([-1., 1.]))[which]
with pyro.plate("data" + suffix, len(data)):
pyro.sample("obs" + suffix, dist.Normal(loc, 1.), obs=data)
pyro.param("mixture_probs", torch.tensor([0.25, 0.75]), constraint=constraints.simplex)
# We can implement this in two ways.
# First consider automatic enumeration in the guide.
def auto_model(data):
mixture_probs = pyro.param("mixture_probs")
which = pyro.sample("which", dist.Categorical(mixture_probs))
component_model(data, which)
def auto_guide(data):
mixture_probs = pyro.param("mixture_probs")
pyro.sample("which", dist.Categorical(mixture_probs),
infer={"enumerate": "parallel"})
# Second consider explicit enumeration in the model, where we
# marginalize out the `which` variable by hand.
def hand_model(data):
mixture_probs = pyro.param("mixture_probs")
for which in pyro.plate("which", len(mixture_probs)):
with pyro.poutine.scale(scale=mixture_probs[which]):
component_model(data, which, suffix="_{}".format(which))
def hand_guide(data):
pass
data = dist.Normal(0., 2.).sample((3,))
elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, auto_guide, data)
hand_loss = elbo.differentiable_loss(hand_model, hand_guide, data)
_check_loss_and_grads(hand_loss, auto_loss)
def test_elbo_hmm_growth():
pyro.clear_param_store()
init_probs = torch.tensor([0.5, 0.5])
elbo = TraceEnum_ELBO(max_plate_nesting=0)
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y)
@config_enumerate(default="parallel")
def guide(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in pyro.markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
sizes = range(2, 1 + int(os.environ.get('GROWTH_SIZE', 15)))
costs = []
times1 = []
times2 = []
for size in sizes:
data = torch.ones(size)
time0 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # compiles paths
time1 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # reuses compiled path
time2 = timeit.default_timer()
times1.append(time1 - time0)
times2.append(time2 - time1)
costs.append(LAST_CACHE_SIZE[0])
collated_costs = defaultdict(list)
for counts in costs:
for key, cost in counts.items():
collated_costs[key].append(cost)
logger.debug('\n'.join([
'HMM Growth:',
'sizes = {}'.format(repr(sizes)),
'costs = {}'.format(repr(dict(collated_costs))),
'times1 = {}'.format(repr(times1)),
'times2 = {}'.format(repr(times2)),
]))
for key, cost in collated_costs.items():
dt = 3
assert cost[-1 - dt - dt] - 2 * cost[-1 - dt] + cost[-1] == 0, \
'{} cost is not linear'.format(key)
@pytest.mark.skipif("CUDA_TEST" in os.environ, reason="https://github.com/uber/pyro/issues/1380")
def test_elbo_dbn_growth():
pyro.clear_param_store()
elbo = TraceEnum_ELBO(max_plate_nesting=0)
def model(data):
uniform = torch.tensor([0.5, 0.5])
probs_z = pyro.param("probs_z",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
for i, z in pyro.markov(enumerate(data)):
pyro.sample("x_{}".format(i), dist.Categorical(uniform))
y = pyro.sample("y_{}".format(i), dist.Categorical(uniform))
pyro.sample("z_{}".format(i), dist.Categorical(probs_z[y]), obs=z)
@config_enumerate(default="parallel")
def guide(data):
probs_x = pyro.param("probs_x",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
probs_y = pyro.param("probs_y",
torch.tensor([[[0.75, 0.25], [0.45, 0.55]],
[[0.55, 0.45], [0.25, 0.75]]]),
constraint=constraints.simplex)
x = 0
y = 0
for i in pyro.markov(range(len(data))):
x = pyro.sample("x_{}".format(i), dist.Categorical(probs_x[x]))
y = pyro.sample("y_{}".format(i), dist.Categorical(probs_y[x, y]))
sizes = range(2, 1 + int(os.environ.get('GROWTH_SIZE', 15)))
costs = []
times1 = []
times2 = []
for size in sizes:
data = torch.ones(size)
time0 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # compiles paths
time1 = timeit.default_timer()
elbo.loss_and_grads(model, guide, data) # reuses compiled path
time2 = timeit.default_timer()
times1.append(time1 - time0)
times2.append(time2 - time1)
costs.append(LAST_CACHE_SIZE[0])
collated_costs = defaultdict(list)
for counts in costs:
for key, cost in counts.items():
collated_costs[key].append(cost)
logger.debug('\n'.join([
'DBN Growth:',
'sizes = {}'.format(repr(sizes)),
'costs = {}'.format(repr(dict(collated_costs))),
'times1 = {}'.format(repr(times1)),
'times2 = {}'.format(repr(times2)),
]))
for key, cost in collated_costs.items():
dt = 3
assert cost[-1 - dt - dt] - 2 * cost[-1 - dt] + cost[-1] == 0, \
'{} cost is not linear'.format(key)
@pytest.mark.parametrize("pi_a", [0.33])
@pytest.mark.parametrize("pi_b", [0.51, 0.77])
@pytest.mark.parametrize("pi_c", [0.37])
@pytest.mark.parametrize("N_b", [3, 4])
@pytest.mark.parametrize("N_c", [5, 6])
@pytest.mark.parametrize("enumerate1", ["sequential", "parallel"])
@pytest.mark.parametrize("expand", [True, False])
def test_bernoulli_pyramid_elbo_gradient(enumerate1, N_b, N_c, pi_a, pi_b, pi_c, expand):
pyro.clear_param_store()
def model():
a = pyro.sample("a", dist.Bernoulli(0.33))
with pyro.plate("b_plate", N_b):
b = pyro.sample("b", dist.Bernoulli(0.25 * a + 0.50))
with pyro.plate("c_plate", N_c):
pyro.sample("c", dist.Bernoulli(0.15 * a + 0.20 * b + 0.32))
def guide():
qa = pyro.param("qa", torch.tensor(pi_a, requires_grad=True))
qb = pyro.param("qb", torch.tensor(pi_b, requires_grad=True))
qc = pyro.param("qc", torch.tensor(pi_c, requires_grad=True))
pyro.sample("a", dist.Bernoulli(qa))
with pyro.plate("b_plate", N_b):
pyro.sample("b", dist.Bernoulli(qb).expand_by([N_b]))
with pyro.plate("c_plate", N_c):
pyro.sample("c", dist.Bernoulli(qc).expand_by([N_c, N_b]))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(max_plate_nesting=2,
strict_enumeration_warning=True)
elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1, expand=expand))
actual_grad_qa = pyro.param('qa').grad
actual_grad_qb = pyro.param('qb').grad
actual_grad_qc = pyro.param('qc').grad
logger.info("Computing analytic gradients")
qa = torch.tensor(pi_a, requires_grad=True)
qb = torch.tensor(pi_b, requires_grad=True)
qc = torch.tensor(pi_c, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(qa), dist.Bernoulli(0.33))
elbo = elbo + N_b * qa * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.75))
elbo = elbo + N_b * (1.0 - qa) * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.50))
elbo = elbo + N_c * N_b * qa * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.67))
elbo = elbo + N_c * N_b * (1.0 - qa) * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.52))
elbo = elbo + N_c * N_b * qa * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.47))
elbo = elbo + N_c * N_b * (1.0 - qa) * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.32))
expected_grad_qa, expected_grad_qb, expected_grad_qc = grad(elbo, [qa, qb, qc])
prec = 0.001
assert_equal(actual_grad_qa, expected_grad_qa, prec=prec, msg="".join([
"\nqa expected = {}".format(expected_grad_qa.data.cpu().numpy()),
"\nqa actual = {}".format(actual_grad_qa.data.cpu().numpy()),
]))
assert_equal(actual_grad_qb, expected_grad_qb, prec=prec, msg="".join([
"\nqb expected = {}".format(expected_grad_qb.data.cpu().numpy()),
"\nqb actual = {}".format(actual_grad_qb.data.cpu().numpy()),
]))
assert_equal(actual_grad_qc, expected_grad_qc, prec=prec, msg="".join([
"\nqc expected = {}".format(expected_grad_qc.data.cpu().numpy()),
"\nqc actual = {}".format(actual_grad_qc.data.cpu().numpy()),
]))
@pytest.mark.parametrize("pi_a", [0.33])
@pytest.mark.parametrize("pi_b", [0.51])
@pytest.mark.parametrize("pi_c", [0.37])
@pytest.mark.parametrize("pi_d", [0.29])
@pytest.mark.parametrize("b_factor", [0.03, 0.04])
@pytest.mark.parametrize("c_factor", [0.04, 0.06])
@pytest.mark.parametrize("d_offset", [0.32])
@pytest.mark.parametrize("enumerate1", ["sequential", "parallel"])
@pytest.mark.parametrize("expand", [True, False])
def test_bernoulli_non_tree_elbo_gradient(enumerate1, b_factor, c_factor, pi_a, pi_b, pi_c, pi_d,
expand, d_offset, N_b=2, N_c=2):
pyro.clear_param_store()
def model():
a = pyro.sample("a", dist.Bernoulli(0.33))
b = pyro.sample("b", dist.Bernoulli(0.25 * a + 0.50))
c = pyro.sample("c", dist.Bernoulli(0.25 * a + 0.10 * b + 0.50))
pyro.sample("d", dist.Bernoulli(b_factor * b + c_factor * c + d_offset))
def guide():
qa = pyro.param("qa", torch.tensor(pi_a, requires_grad=True))
qb = pyro.param("qb", torch.tensor(pi_b, requires_grad=True))
qc = pyro.param("qc", torch.tensor(pi_c, requires_grad=True))
qd = pyro.param("qd", torch.tensor(pi_d, requires_grad=True))
pyro.sample("a", dist.Bernoulli(qa))
pyro.sample("b", dist.Bernoulli(qb))
pyro.sample("c", dist.Bernoulli(qc))
pyro.sample("d", dist.Bernoulli(qd))
logger.info("Computing gradients using surrogate loss")
elbo = TraceEnum_ELBO(max_plate_nesting=2,
strict_enumeration_warning=True)
elbo.loss_and_grads(model, config_enumerate(guide, default=enumerate1, expand=expand))
actual_grad_qa = pyro.param('qa').grad
actual_grad_qb = pyro.param('qb').grad
actual_grad_qc = pyro.param('qc').grad
actual_grad_qd = pyro.param('qd').grad
logger.info("Computing analytic gradients")
qa = torch.tensor(pi_a, requires_grad=True)
qb = torch.tensor(pi_b, requires_grad=True)
qc = torch.tensor(pi_c, requires_grad=True)
qd = torch.tensor(pi_d, requires_grad=True)
elbo = kl_divergence(dist.Bernoulli(qa), dist.Bernoulli(0.33))
elbo = elbo + qa * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.75))
elbo = elbo + (1.0 - qa) * kl_divergence(dist.Bernoulli(qb), dist.Bernoulli(0.50))
elbo = elbo + qa * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.85))
elbo = elbo + (1.0 - qa) * qb * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.60))
elbo = elbo + qa * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.75))
elbo = elbo + (1.0 - qa) * (1.0 - qb) * kl_divergence(dist.Bernoulli(qc), dist.Bernoulli(0.50))
elbo = elbo + qb * qc * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(b_factor + c_factor + d_offset))
elbo = elbo + (1.0 - qb) * qc * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(c_factor + d_offset))
elbo = elbo + qb * (1.0 - qc) * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(b_factor + d_offset))
elbo = elbo + (1.0 - qb) * (1.0 - qc) * kl_divergence(dist.Bernoulli(qd), dist.Bernoulli(d_offset))
expected_grad_qa, expected_grad_qb, expected_grad_qc, expected_grad_qd = grad(elbo, [qa, qb, qc, qd])
prec = 0.0001
assert_equal(actual_grad_qa, expected_grad_qa, prec=prec, msg="".join([
"\nqa expected = {}".format(expected_grad_qa.data.cpu().numpy()),
"\nqa actual = {}".format(actual_grad_qa.data.cpu().numpy()),
]))
assert_equal(actual_grad_qb, expected_grad_qb, prec=prec, msg="".join([
"\nqb expected = {}".format(expected_grad_qb.data.cpu().numpy()),
"\nqb actual = {}".format(actual_grad_qb.data.cpu().numpy()),
]))
assert_equal(actual_grad_qc, expected_grad_qc, prec=prec, msg="".join([
"\nqc expected = {}".format(expected_grad_qc.data.cpu().numpy()),
"\nqc actual = {}".format(actual_grad_qc.data.cpu().numpy()),
]))
assert_equal(actual_grad_qd, expected_grad_qd, prec=prec, msg="".join([
"\nqd expected = {}".format(expected_grad_qd.data.cpu().numpy()),
"\nqd actual = {}".format(actual_grad_qd.data.cpu().numpy()),
]))
@pytest.mark.parametrize("gate", [0.1, 0.25, 0.5, 0.75, 0.9])
@pytest.mark.parametrize("rate", [0.1, 1., 3.])
def test_elbo_zip(gate, rate):
# test for ZIP distribution
def zip_model(data):
gate = pyro.param("gate")
rate = pyro.param("rate")
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.ZeroInflatedPoisson(gate, rate), obs=data)
def composite_model(data):
gate = pyro.param("gate")
rate = pyro.param("rate")
dist1 = dist.Delta(torch.tensor(0.))
dist0 = dist.Poisson(rate)
with pyro.plate("data", len(data)):
mask = pyro.sample("mask", dist.Bernoulli(gate), infer={"enumerate": "parallel"}).byte()
pyro.sample("obs", dist.MaskedMixture(mask, dist0, dist1), obs=data)
def guide(data):
pass
gate = pyro.param("gate", torch.tensor(gate), constraint=constraints.unit_interval)
rate = pyro.param("rate", torch.tensor(rate), constraint=constraints.positive)
data = torch.tensor([0., 1., 2.])
elbo = TraceEnum_ELBO(max_plate_nesting=1, strict_enumeration_warning=False)
zip_loss = elbo.differentiable_loss(zip_model, guide, data)
composite_loss = elbo.differentiable_loss(composite_model, guide, data)
_check_loss_and_grads(zip_loss, composite_loss)
@pytest.mark.parametrize("mixture,scale", [
(dist.MixtureOfDiagNormals, [[2., 1.], [1., 2], [4., 4.]]),
(dist.MixtureOfDiagNormalsSharedCovariance, [2., 1.]),
])
def test_mixture_of_diag_normals(mixture, scale):
# K = 3, D = 2
pyro.param("locs", torch.tensor([[0., 0.], [0., 1.], [0., 10.]]))
pyro.param("coord_scale", torch.tensor(scale), constraint=constraints.positive)
pyro.param("component_logits", torch.tensor([0., -1., 2.]))
data = torch.tensor([[0., 0.], [1., 1.], [2., 3.], [1., 11.]])
def auto_model():
locs = pyro.param("locs")
coord_scale = pyro.param("coord_scale")
component_logits = pyro.param("component_logits")
with pyro.plate("data", len(data)):
pyro.sample("obs", mixture(locs, coord_scale, component_logits), obs=data)
def hand_model():
locs = pyro.param("locs")
coord_scale = pyro.param("coord_scale")
component_logits = pyro.param("component_logits")
with pyro.plate("data", len(data), dim=-2):
which = pyro.sample("mask", dist.Categorical(logits=component_logits),
infer={"enumerate": "parallel"})
with pyro.plate("components", len(component_logits), dim=-1) as component_ind:
with poutine.mask(mask=(which == component_ind)):
pyro.sample("obs", dist.Normal(locs, coord_scale).independent(1),
obs=data.unsqueeze(-2))
def guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=2, strict_enumeration_warning=False)
auto_loss = elbo.differentiable_loss(auto_model, guide)
hand_loss = elbo.differentiable_loss(hand_model, guide)
_check_loss_and_grads(hand_loss, auto_loss)
@pytest.mark.parametrize("Dist, prior", [
(dist.Bernoulli, 0.2),
(dist.Categorical, [0.2, 0.8]),
(dist.Categorical, [0.2, 0.3, 0.5]),
(dist.Categorical, [0.2, 0.3, 0.3, 0.2]),
(dist.OneHotCategorical, [0.2, 0.8]),
(dist.OneHotCategorical, [0.2, 0.3, 0.5]),
(dist.OneHotCategorical, [0.2, 0.3, 0.3, 0.2]),
])
def test_compute_marginals_single(Dist, prior):
prior = torch.tensor(prior)
data = torch.tensor([0., 0.1, 0.2, 0.9, 1.0, 1.1])
@config_enumerate(default="parallel")
def model():
locs = torch.tensor([-1., 0., 1., 2.])
x = pyro.sample("x", Dist(prior))
if Dist is dist.Bernoulli:
x = x.long()
elif Dist is dist.OneHotCategorical:
x = x.max(-1)[1]
with pyro.plate("data", len(data)):
pyro.sample("obs", dist.Normal(locs[x], 1.), obs=data)
# First compute marginals using an empty guide.
def empty_guide():
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
marginals = elbo.compute_marginals(model, empty_guide)
assert len(marginals) == 1
assert type(marginals["x"]) is Dist
probs = marginals["x"].probs
assert probs.shape == prior.shape
# Next insert the computed marginals in an enumerating guide
# and ensure that they are exact, or at least locally optimal.
pyro.param("probs", probs)
@config_enumerate(default="parallel")
def exact_guide():
probs = pyro.param("probs")
pyro.sample("x", Dist(probs))
loss = elbo.differentiable_loss(model, exact_guide)
assert_equal(grad(loss, [pyro.param("probs")])[0], torch.zeros_like(probs))
@pytest.mark.parametrize('ok,enumerate_guide,num_particles,vectorize_particles', [
(True, None, 1, False),
(False, "sequential", 1, False),
(False, "parallel", 1, False),
(False, None, 2, False),
(False, None, 2, True),
])
def test_compute_marginals_restrictions(ok, enumerate_guide, num_particles, vectorize_particles):
@config_enumerate(default="parallel")
def model():
w = pyro.sample("w", dist.Bernoulli(0.1))
x = pyro.sample("x", dist.Bernoulli(0.2))
y = pyro.sample("y", dist.Bernoulli(0.3))
z = pyro.sample("z", dist.Bernoulli(0.4))
pyro.sample("obs", dist.Normal(0., 1.), obs=w + x + y + z)
@config_enumerate(default=enumerate_guide)
def guide():
pyro.sample("w", dist.Bernoulli(0.4))
pyro.sample("y", dist.Bernoulli(0.7))
# Check that the ELBO works fine.
elbo = TraceEnum_ELBO(max_plate_nesting=0,
num_particles=num_particles,
vectorize_particles=vectorize_particles)
loss = elbo.loss(model, guide)
assert not torch_isnan(loss)
if ok:
marginals = elbo.compute_marginals(model, guide)
assert set(marginals.keys()) == {"x", "z"}
else:
with pytest.raises(NotImplementedError, match="compute_marginals"):
elbo.compute_marginals(model, guide)
@pytest.mark.parametrize('size', [1, 2, 3, 4, 10, 20, _skip_cuda(30)])
def test_compute_marginals_hmm(size):
@config_enumerate(default="parallel")
def model(data):
transition_probs = torch.tensor([[0.75, 0.25], [0.25, 0.75]])
emission_probs = torch.tensor([[0.75, 0.25], [0.25, 0.75]])
x = torch.tensor(0)
for i in pyro.markov(range(len(data) + 1)):
if i < len(data):
x = pyro.sample("x_{}".format(i), dist.Categorical(transition_probs[x]))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=data[i])
else:
pyro.sample("x_{}".format(i), dist.Categorical(transition_probs[x]),
obs=torch.tensor(1))
def guide(data):
pass
data = torch.zeros(size, dtype=torch.long)
elbo = TraceEnum_ELBO(max_plate_nesting=0)
marginals = elbo.compute_marginals(model, guide, data)
assert set(marginals.keys()) == {"x_{}".format(i) for i in range(size)}
for i in range(size):
d = marginals["x_{}".format(i)]
assert d.batch_shape == ()
# The x's should be monotonically increasing, since we've observed x[-1]==0
# and x[size]==1, and since the y's are constant.
for i in range(size - 1):
d1 = marginals["x_{}".format(i)]
d2 = marginals["x_{}".format(i + 1)]
assert d1.probs[0] > d2.probs[0]
assert d1.probs[1] < d2.probs[1]
@pytest.mark.parametrize("data", [
[None, None],
[torch.tensor(0.), None],
[None, torch.tensor(0.)],
[torch.tensor(0.), torch.tensor(0)],
])
def test_backwardsample_posterior_smoke(data):
@config_enumerate(default="parallel")
def model(data):
xs = list(data)
zs = []
for i in range(2):
K = i + 2 # number of mixture components
zs.append(pyro.sample("z_{}".format(i),
dist.Categorical(torch.ones(K))))
if i == 0:
loc = pyro.param("loc", torch.randn(K))[zs[i]]
xs[i] = pyro.sample("x_{}".format(i),
dist.Normal(loc, 1.), obs=data[i])
elif i == 1:
logits = pyro.param("logits", torch.randn(K, 2))[zs[i]]
xs[i] = pyro.sample("x_{}".format(i),
dist.Categorical(logits=logits),
obs=data[i])
z12 = zs[0] + 2 * zs[1]
pyro.sample("z_12", dist.Categorical(torch.arange(6.)), obs=z12)
return xs, zs
def guide(data):
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
xs, zs = elbo.sample_posterior(model, guide, data)
for x, datum in zip(xs, data):
assert datum is None or datum is x
for z in zs:
assert z.shape == ()
def test_backwardsample_posterior_2():
num_particles = 10000
@config_enumerate(default="parallel")
def model(data):
with pyro.plate("particles", num_particles):
p_z = torch.tensor([0.1, 0.9])
x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5])))
z = pyro.sample("z", dist.Bernoulli(p_z[x]), obs=data)
return x, z
def guide(data):
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
x, z = elbo.sample_posterior(model, guide, data=torch.zeros(num_particles))
expected = 0.9
actual = (x.type_as(z) == z).float().mean().item()
assert abs(expected - actual) < 0.05
def test_backwardsample_posterior_3():
num_particles = 10000
@config_enumerate(default="parallel")
def model(data):
with pyro.plate("particles", num_particles):
p_z = torch.tensor([[0.9, 0.1], [0.1, 0.9]])
x = pyro.sample("x", dist.Categorical(torch.tensor([0.5, 0.5])))
y = pyro.sample("y", dist.Categorical(torch.tensor([0.5, 0.5])))
z = pyro.sample("z", dist.Bernoulli(p_z[x, y]), obs=data)
return x, y, z
def guide(data):
pass
elbo = TraceEnum_ELBO(max_plate_nesting=1)
x, y, z = elbo.sample_posterior(model, guide, data=torch.ones(num_particles))
expected = 0.9
actual = (x == y).float().mean().item()
assert abs(expected - actual) < 0.05
x, y, z = elbo.sample_posterior(model, guide, data=torch.zeros(num_particles))
expected = 0.1
actual = (x == y).float().mean().item()
assert abs(expected - actual) < 0.05
@pytest.mark.parametrize('ok,enumerate_guide,num_particles,vectorize_particles', [
(True, None, 1, False),
(False, "sequential", 1, False),
(False, "parallel", 1, False),
(False, None, 2, False),
(False, None, 2, True),
])
def test_backwardsample_posterior_restrictions(ok, enumerate_guide, num_particles, vectorize_particles):
@config_enumerate(default="parallel")
def model():
w = pyro.sample("w", dist.Bernoulli(0.1))
x = pyro.sample("x", dist.Bernoulli(0.2))
y = pyro.sample("y", dist.Bernoulli(0.3))
z = pyro.sample("z", dist.Bernoulli(0.4))
pyro.sample("obs", dist.Normal(0., 1.), obs=w + x + y + z)
return w, x, y, z
@config_enumerate(default=enumerate_guide)
def guide():
pyro.sample("w", dist.Bernoulli(0.4))
pyro.sample("y", dist.Bernoulli(0.7))
# Check that the ELBO works fine.
elbo = TraceEnum_ELBO(max_plate_nesting=0,
num_particles=num_particles,
vectorize_particles=vectorize_particles)
loss = elbo.loss(model, guide)
assert not torch_isnan(loss)
if ok:
w, x, y, z = elbo.sample_posterior(model, guide)
assert w.shape == ()
assert x.shape == ()
assert y.shape == ()
assert z.shape == ()
else:
with pytest.raises(NotImplementedError, match="sample_posterior"):
elbo.sample_posterior(model, guide)
|
en
| 0.754966
|
# python 3 # python 2 # The usual dist.Bernoulli avoids NANs by clamping log prob. This unsafe version # allows us to test additional NAN avoidance in _compute_dice_elbo(). # A simple Gaussian mixture model, with no vectorization. # This non-vectorized version is exponential in data_size: # A Gaussian mixture model, with vectorized batching. # This vectorized version is independent of data_size: # newton step # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 # for which kl(Bernoulli(q), Bernoulli(p)) = 0.5 # These golden values simply test agreement between parallel and sequential. # +---------+ # x ----> y ----> z | # | N | # +---------+ # +-----------------+ # x ----> y ----> z | # | N | # +-----------------+ # +-----------------------+ # | x ----> y ----> z | # | N | # +-----------------------+ # This plate should remain unreduced since all enumeration is in a single plate. # a ---> outer_obs # \ # +-----\------------------+ # | \ | # | b ---> inner_obs N=2 | # +------------------------+ # This tests two different observations, one outside and one inside an plate. # Guide Model # a # +---------------|--+ # | M=2 V | # | b ----> c | # +------------------+ # This never gets run because we don't support this yet. # Guide Model # +-------+ # b ----> c <---- a # | M=2 | # +-------+ # This tests that sequential enumeration over b works, even though # model-side enumeration moves c into b's plate via contraction. # Guide Model # a -----> b # | | # +-|--------|----------------+ # | V V | # | c -----> d -----> e N=2 | # +---------------------------+ # This tests a mixture of model and guide enumeration. # deterministic # +-----------------+ # | a ----> b M=2 | # +-----------------+ # +-----------------+ # | c ----> d N=3 | # +-----------------+ # This tests two unrelated plates. # Each should remain uncontracted. # +---------+ +---------+ # | b <---- a ----> c | # | M=2 | | N=3 | # +---------+ +---------+ # This tests two different plates with recycled dimension. # +--------------------+ # | +----------+ | # a -------> b | | # | | N=2 | | # | +----------+ M=2 | # +--------------------+ # This is tests the case of multiple plate contractions in # a single step. # +--------------------+ # | +----------+ | # a ----> b ----> c | | # | | N=2 | | # | M=2 +----------+ | # +--------------------+ # a # | \ # +--|---\------------+ # | V +-\--------+ | # | b ----> c | | # | | N=2 | | # | M=2 +----------+ | # +-------------------+ # +----------+ # | M=2 | # a ----> b | # | | | | # +--|-------|--+ | # | V | V | | # | c ----> d | | # | | | | # | N=2 +------|---+ # +-------------+ # This tests different ways of mixing two independence contexts, # where each can be either sequential or vectorized plate. # Check that either one of the sequential plates can be promoted to be vectorized. # But promoting both to plates should result in an error. # +-------------+ # | N=2 | # a -------> c | # | | | | # +--|----------|--+ | # | | | V | | # | V | e | | # | b ----> d | | # | | | | # | M=2 +---------|---+ # +----------------+ # This tests tree-structured dependencies among variables but # non-tree dependencies among plate nestings. # Check that any combination of sequential plates can be promoted to be vectorized. # Guide Model # a # +-----------|--------+ # | M=2 +---|------+ | # | | V N=2 | | # | b ----> c | | # | +----------+ | # +--------------------+ # Consider a mixture model with two components, toggled by `which`. # We can implement this in two ways. # First consider automatic enumeration in the guide. # Second consider explicit enumeration in the model, where we # marginalize out the `which` variable by hand. # compiles paths # reuses compiled path # compiles paths # reuses compiled path # test for ZIP distribution # K = 3, D = 2 # First compute marginals using an empty guide. # Next insert the computed marginals in an enumerating guide # and ensure that they are exact, or at least locally optimal. # Check that the ELBO works fine. # The x's should be monotonically increasing, since we've observed x[-1]==0 # and x[size]==1, and since the y's are constant. # number of mixture components # Check that the ELBO works fine.
| 1.900388
| 2
|
dataRecovery1.py
|
ShrohanMohapatra/softMatterAlgos
| 1
|
6626606
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 27 12:23:12 2021
@author: shrohanmohapatra
"""
import mph
client = mph.start(cores=1)
model = client.load('2DGeometryExample.mph')
#print(client.names())
#print(client.models())
print(model.parameters())
for (name, value) in model.parameters().items():
description = model.description(name)
print(f'{description:20}{name} = {value}')
print(model.materials())
print(model.physics())
print(model.studies())
print(model.parameter('r0'))
print(model.datasets())
print(mph.tree(model))
try:
model.solve()
except Exception as e:
print(e)
pass
print(model.evaluate('integrate(spf.U,x,0,2*a,y,0,2*a)'))
model.save()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 27 12:23:12 2021
@author: shrohanmohapatra
"""
import mph
client = mph.start(cores=1)
model = client.load('2DGeometryExample.mph')
#print(client.names())
#print(client.models())
print(model.parameters())
for (name, value) in model.parameters().items():
description = model.description(name)
print(f'{description:20}{name} = {value}')
print(model.materials())
print(model.physics())
print(model.studies())
print(model.parameter('r0'))
print(model.datasets())
print(mph.tree(model))
try:
model.solve()
except Exception as e:
print(e)
pass
print(model.evaluate('integrate(spf.U,x,0,2*a,y,0,2*a)'))
model.save()
|
en
| 0.457991
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Sun Jun 27 12:23:12 2021 @author: shrohanmohapatra #print(client.names()) #print(client.models())
| 2.979054
| 3
|
amqp_worker/serializer.py
|
cheese-drawer/lib-python-amqp-worker
| 0
|
6626607
|
<reponame>cheese-drawer/lib-python-amqp-worker<filename>amqp_worker/serializer.py
"""Shared behavior for serializing messages sent & received by Workers."""
import gzip
from json import JSONEncoder
from logging import getLogger
from typing import (
cast,
Any,
Protocol,
Optional,
Union,
List,
Tuple,
Dict,
)
from .response import Response, OkResponse, ErrResponse
LOGGER = getLogger(__name__)
JSONEncoderTypes = Optional[Union[
int,
float,
str,
bool,
Dict[str, Any],
List[Any],
Tuple[Any]
]]
class JSONEncoderProtocol(Protocol):
"""Define expected behavior for an object capable of encoding JSON."""
def encode(self, obj: Any) -> str:
"""Encode a given object as JSON."""
def default(self, obj: Any) -> JSONEncoderTypes:
"""Convert a given object to a type that is encodable to JSON."""
class ResponseEncoder(JSONEncoder):
"""Extend JSONEncoder to parse Response objects."""
def default(self, o: Any) -> JSONEncoderTypes:
"""Convert OkResponse & ErrResponse to dictionaries."""
if isinstance(o, OkResponse):
return o.__dict__
if isinstance(o, ErrResponse):
return o.__dict__
# NOTE: Casting as built-in JSONEncoder's default method
# incorrectly has signature of Any when the documentation
# for the method states it must return either a type that is
# serializable to JSON by the default rules, or raises a
# TypeError.
return cast(JSONEncoderTypes, JSONEncoder.default(self, o))
def serialize(
serializer: JSONEncoderProtocol,
data: Response,
) -> bytes:
"""
Serialize the given data with the given serialization module.
After serializing the data to json, it is then encoded as UTF8 &
compressed using gzip.
"""
as_json = serializer.encode(data)
LOGGER.info(f'RESPONSE JSON: {as_json}')
return gzip.compress(as_json.encode('UTF8'))
def deserialize(data: bytes) -> str:
"""Decompresses the given data using gzip.
Assumes user will be deferring the rest of the deserialization to the
parent class via `super().deserialize()`
"""
return gzip.decompress(data).decode('UTF8')
|
"""Shared behavior for serializing messages sent & received by Workers."""
import gzip
from json import JSONEncoder
from logging import getLogger
from typing import (
cast,
Any,
Protocol,
Optional,
Union,
List,
Tuple,
Dict,
)
from .response import Response, OkResponse, ErrResponse
LOGGER = getLogger(__name__)
JSONEncoderTypes = Optional[Union[
int,
float,
str,
bool,
Dict[str, Any],
List[Any],
Tuple[Any]
]]
class JSONEncoderProtocol(Protocol):
"""Define expected behavior for an object capable of encoding JSON."""
def encode(self, obj: Any) -> str:
"""Encode a given object as JSON."""
def default(self, obj: Any) -> JSONEncoderTypes:
"""Convert a given object to a type that is encodable to JSON."""
class ResponseEncoder(JSONEncoder):
"""Extend JSONEncoder to parse Response objects."""
def default(self, o: Any) -> JSONEncoderTypes:
"""Convert OkResponse & ErrResponse to dictionaries."""
if isinstance(o, OkResponse):
return o.__dict__
if isinstance(o, ErrResponse):
return o.__dict__
# NOTE: Casting as built-in JSONEncoder's default method
# incorrectly has signature of Any when the documentation
# for the method states it must return either a type that is
# serializable to JSON by the default rules, or raises a
# TypeError.
return cast(JSONEncoderTypes, JSONEncoder.default(self, o))
def serialize(
serializer: JSONEncoderProtocol,
data: Response,
) -> bytes:
"""
Serialize the given data with the given serialization module.
After serializing the data to json, it is then encoded as UTF8 &
compressed using gzip.
"""
as_json = serializer.encode(data)
LOGGER.info(f'RESPONSE JSON: {as_json}')
return gzip.compress(as_json.encode('UTF8'))
def deserialize(data: bytes) -> str:
"""Decompresses the given data using gzip.
Assumes user will be deferring the rest of the deserialization to the
parent class via `super().deserialize()`
"""
return gzip.decompress(data).decode('UTF8')
|
en
| 0.828988
|
Shared behavior for serializing messages sent & received by Workers. Define expected behavior for an object capable of encoding JSON. Encode a given object as JSON. Convert a given object to a type that is encodable to JSON. Extend JSONEncoder to parse Response objects. Convert OkResponse & ErrResponse to dictionaries. # NOTE: Casting as built-in JSONEncoder's default method # incorrectly has signature of Any when the documentation # for the method states it must return either a type that is # serializable to JSON by the default rules, or raises a # TypeError. Serialize the given data with the given serialization module. After serializing the data to json, it is then encoded as UTF8 & compressed using gzip. Decompresses the given data using gzip. Assumes user will be deferring the rest of the deserialization to the parent class via `super().deserialize()`
| 2.935566
| 3
|
demisto_sdk/commands/common/tests/dashboard_test.py
|
yalonso7/demisto-sdk
| 0
|
6626608
|
<reponame>yalonso7/demisto-sdk<filename>demisto_sdk/commands/common/tests/dashboard_test.py
from typing import Optional
import pytest
from demisto_sdk.commands.common.hook_validations.dashboard import \
DashboardValidator
from demisto_sdk.commands.common.hook_validations.structure import \
StructureValidator
from mock import patch
def mock_structure(file_path=None, current_file=None, old_file=None):
# type: (Optional[str], Optional[dict], Optional[dict]) -> StructureValidator
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator(file_path)
structure.is_valid = True
structure.scheme_name = 'dashboard'
structure.file_path = file_path
structure.current_file = current_file
structure.old_file = old_file
return structure
data_is_valid_version = [
(-1, True),
(0, False),
(1, False),
]
@pytest.mark.parametrize('version, is_valid', data_is_valid_version)
def test_is_valid_version(version, is_valid):
structure = StructureValidator("")
structure.current_file = {"version": version}
validator = DashboardValidator(structure)
assert validator.is_valid_version() == is_valid, f'is_valid_version({version}) returns {not is_valid}.'
data_is_id_equal_name = [
('aa', 'aa', True),
('aa', 'ab', False),
('my-home-dashboard', 'My Dashboard', False)
]
@pytest.mark.parametrize('id_, name, is_valid', data_is_id_equal_name)
def test_is_id_equal_name(id_, name, is_valid):
structure = StructureValidator("")
structure.current_file = {"id": id_, "name": name}
validator = DashboardValidator(structure)
assert validator.is_id_equals_name() == is_valid, f'is_id_equal_name returns {not is_valid}.'
data_contains_forbidden_fields = [
({"system": False}, False),
({"isCommon": False}, False),
({"shared": False}, False),
({"owner": "Admin"}, False),
({"layout": [{"widget": {"owner": "Admin"}}]}, False),
({"layout": [{"widget": {"shared": "False"}}]}, False),
({"layout": [{"widget": {"shared4": "False"}}]}, True)
]
@pytest.mark.parametrize('current_file, is_valid', data_contains_forbidden_fields)
def test_contains_forbidden_fields(current_file, is_valid):
structure = mock_structure("", current_file)
validator = DashboardValidator(structure)
assert validator.contains_forbidden_fields() == is_valid, f'is_excluding_fields returns {not is_valid}.'
data_is_including_fields = [
({"fromDate": "1", "toDate": "2", "fromDateLicense": "3"}, True),
({"fromDate": "1", "toDate": "2"}, False),
({"fromDate": "1", "toDate": "2", "fromDateLicense": "3",
"layout": [{"widget": {"fromDate": "1", "toDate": "2", "fromDateLicense": "3"}}]}, True),
({"fromDate": "1", "toDate": "2", "fromDateLicense": "3",
"layout": [{"widget": {"name": "bla", "fromDate": "1", "fromDateLicense": "3"}}]}, False)
]
@pytest.mark.parametrize('current_file, is_valid', data_is_including_fields)
def test_is_including_fields(current_file, is_valid):
structure = mock_structure("", current_file)
validator = DashboardValidator(structure)
assert validator.is_including_fields() == is_valid, f'is_including_fields returns {not is_valid}.'
|
from typing import Optional
import pytest
from demisto_sdk.commands.common.hook_validations.dashboard import \
DashboardValidator
from demisto_sdk.commands.common.hook_validations.structure import \
StructureValidator
from mock import patch
def mock_structure(file_path=None, current_file=None, old_file=None):
# type: (Optional[str], Optional[dict], Optional[dict]) -> StructureValidator
with patch.object(StructureValidator, '__init__', lambda a, b: None):
structure = StructureValidator(file_path)
structure.is_valid = True
structure.scheme_name = 'dashboard'
structure.file_path = file_path
structure.current_file = current_file
structure.old_file = old_file
return structure
data_is_valid_version = [
(-1, True),
(0, False),
(1, False),
]
@pytest.mark.parametrize('version, is_valid', data_is_valid_version)
def test_is_valid_version(version, is_valid):
structure = StructureValidator("")
structure.current_file = {"version": version}
validator = DashboardValidator(structure)
assert validator.is_valid_version() == is_valid, f'is_valid_version({version}) returns {not is_valid}.'
data_is_id_equal_name = [
('aa', 'aa', True),
('aa', 'ab', False),
('my-home-dashboard', 'My Dashboard', False)
]
@pytest.mark.parametrize('id_, name, is_valid', data_is_id_equal_name)
def test_is_id_equal_name(id_, name, is_valid):
structure = StructureValidator("")
structure.current_file = {"id": id_, "name": name}
validator = DashboardValidator(structure)
assert validator.is_id_equals_name() == is_valid, f'is_id_equal_name returns {not is_valid}.'
data_contains_forbidden_fields = [
({"system": False}, False),
({"isCommon": False}, False),
({"shared": False}, False),
({"owner": "Admin"}, False),
({"layout": [{"widget": {"owner": "Admin"}}]}, False),
({"layout": [{"widget": {"shared": "False"}}]}, False),
({"layout": [{"widget": {"shared4": "False"}}]}, True)
]
@pytest.mark.parametrize('current_file, is_valid', data_contains_forbidden_fields)
def test_contains_forbidden_fields(current_file, is_valid):
structure = mock_structure("", current_file)
validator = DashboardValidator(structure)
assert validator.contains_forbidden_fields() == is_valid, f'is_excluding_fields returns {not is_valid}.'
data_is_including_fields = [
({"fromDate": "1", "toDate": "2", "fromDateLicense": "3"}, True),
({"fromDate": "1", "toDate": "2"}, False),
({"fromDate": "1", "toDate": "2", "fromDateLicense": "3",
"layout": [{"widget": {"fromDate": "1", "toDate": "2", "fromDateLicense": "3"}}]}, True),
({"fromDate": "1", "toDate": "2", "fromDateLicense": "3",
"layout": [{"widget": {"name": "bla", "fromDate": "1", "fromDateLicense": "3"}}]}, False)
]
@pytest.mark.parametrize('current_file, is_valid', data_is_including_fields)
def test_is_including_fields(current_file, is_valid):
structure = mock_structure("", current_file)
validator = DashboardValidator(structure)
assert validator.is_including_fields() == is_valid, f'is_including_fields returns {not is_valid}.'
|
en
| 0.236905
|
# type: (Optional[str], Optional[dict], Optional[dict]) -> StructureValidator
| 2.325068
| 2
|
src/py_tldr/core.py
|
iamgodot/pytldr
| 5
|
6626609
|
<gh_stars>1-10
import platform as platform_
import sys
from functools import partial
from os import environ
from pathlib import Path as LibPath
from typing import List
import toml
from click import Choice, Path, argument
from click import command as command_
from click import get_app_dir, option, pass_context, secho
from yaspin import yaspin
from yaspin.spinners import Spinners
from .page import DownloadError, PageFinder, PageFormatter
try:
from importlib.metadata import version
VERSION_CLI = version("py_tldr")
except ModuleNotFoundError:
from pkg_resources import get_distribution
VERSION_CLI = get_distribution("py_tldr").version
VERSION_CLIENT_SPEC = "1.5"
DEFAULT_CACHE_HOURS = 24
DEFAULT_CONFIG = {
"page_source": "https://raw.githubusercontent.com/tldr-pages/tldr/master/pages",
"language": "",
"cache": {
"enabled": True,
"timeout": DEFAULT_CACHE_HOURS,
"download_url": "https://tldr-pages.github.io/assets/tldr.zip",
},
"proxy_url": "",
}
DEFAULT_CONFIG_DIR = LibPath(get_app_dir("tldr"))
DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR / "config.toml"
DEFAULT_CACHE_DIR = LibPath.home() / ".cache" / "tldr"
info = partial(secho, bold=True, fg="green")
warn = partial(secho, bold=True, fg="yellow")
def print_version(ctx, param, value): # pylint: disable=unused-argument
if not value or ctx.resilient_parsing:
return
info(f"tldr version {VERSION_CLI}")
info(f"client specification version {VERSION_CLIENT_SPEC}")
ctx.exit()
def setup_config(ctx, param, value): # pylint: disable=unused-argument
"""Build a config dict from either default or custom path.
Currently custom config file is used without validation, so
misconfiguration may cause errors. Also note `toml` should
used as file format.
"""
config = {}
if not value or ctx.resilient_parsing:
config_dir = DEFAULT_CONFIG_DIR
config_file = DEFAULT_CONFIG_FILE
if not config_file.exists():
warn("No config file found, setting it up...")
config_dir.mkdir(parents=True, exist_ok=True)
with open(config_file, "w", encoding="utf8") as f:
toml.dump(DEFAULT_CONFIG, f)
warn(f"Config file created: {config_file}")
config = DEFAULT_CONFIG
else:
config_file = value
warn(f"Using config file from {config_file}")
if not config:
with open(config_file, encoding="utf8") as f:
config = toml.load(f)
cache = config.get("cache")
if not config.get("page_source") or not cache or not cache.get("download_url"):
warn(f"Page source and cache are required in config file: {config_file}")
sys.exit(1)
return config
@command_(context_settings={"help_option_names": ["-h", "--help"]})
@option(
"-v",
"--version",
is_flag=True,
callback=print_version,
is_eager=True,
expose_value=False,
help="Show version info and exit.",
)
@option(
"--config",
type=Path(exists=True, dir_okay=False, path_type=LibPath),
callback=setup_config,
help="Specify a config file to use.",
)
@option(
"-p",
"--platform",
type=Choice(["android", "common", "linux", "osx", "sunos", "windows"]),
help="Override current operating system.",
)
@option(
"-L",
"--language",
help="Specify language of the page(with no fallbacks), e.g. `en`.",
)
@option("-u", "--update", is_flag=True, help="Update local cache with all pages.")
@argument("command", nargs=-1)
@pass_context
def cli(ctx, config, command, platform, language, update):
"""Collaborative cheatsheets for console commands.
For subcommands such as `git commit`, just keep as it is:
tldr git commit
"""
page_finder = make_page_finder(config)
if update:
with yaspin(Spinners.arc, text="Downloading pages...") as sp:
try:
page_finder.sync()
except DownloadError:
sp.write("> Sync failed, check your network and try again.")
sys.exit(1)
sp.write("> Download complete.")
info("All caches updated.")
if not command:
if not update:
secho(ctx.get_help())
return
else:
command = "-".join(command)
content = None
languages = get_languages(language)
with yaspin(Spinners.arc, text="Searching pages...") as sp:
try:
content = page_finder.find(
command, platform or guess_os(), languages=languages
)
except DownloadError:
sp.write("> Search failed, check your network and try again.")
sys.exit(1)
if content:
sp.write("> Page found.")
else:
sp.write("> No result.")
if content:
print(PageFormatter(indent_spaces=4, start_with_new_line=True).format(content))
else:
warn("There is no available pages right now.")
warn("You can create an issue via https://github.com/tldr-pages/tldr/issues.")
sys.exit(1)
def make_page_finder(config=None) -> PageFinder:
if not config:
config = DEFAULT_CONFIG
source_url = config["page_source"]
cache_config = config["cache"]
cache_timeout = cache_config.get("timeout", DEFAULT_CACHE_HOURS)
cache_location = DEFAULT_CACHE_DIR
cache_download_url = cache_config["download_url"]
cache_enabled = cache_config.get("enabled", True)
proxy_url = config["proxy_url"]
return PageFinder(
source_url,
cache_timeout,
cache_location,
cache_download_url,
cache_enabled,
proxy_url,
)
def get_languages(language: str) -> List[str]:
"""Return language list for page matching.
If language specified, use it with no fallbacks.
Otherwise make the list based on env `LANG` and
`LANGUAGE`.
# pylint: disable=line-too-long
For detailed logic, see https://github.com/tldr-pages/tldr/blob/master/CLIENT-SPECIFICATION.md#language # noqa
"""
if language:
return [language]
def extractor(x):
return x.split("_", maxsplit=1)[0]
lang = extractor(environ.get("LANG", ""))
if not lang:
return ["en"]
languages = [
extractor(item) for item in environ.get("LANGUAGE", "").split(":") if item
]
if lang not in languages:
languages.append(lang)
if "en" not in languages:
languages.append("en")
return languages
def guess_os():
system_to_platform = {
"Linux": "linux",
"Darwin": "osx",
"Java": "sunos",
"Windows": "windows",
}
return system_to_platform.get(platform_.system(), "linux")
|
import platform as platform_
import sys
from functools import partial
from os import environ
from pathlib import Path as LibPath
from typing import List
import toml
from click import Choice, Path, argument
from click import command as command_
from click import get_app_dir, option, pass_context, secho
from yaspin import yaspin
from yaspin.spinners import Spinners
from .page import DownloadError, PageFinder, PageFormatter
try:
from importlib.metadata import version
VERSION_CLI = version("py_tldr")
except ModuleNotFoundError:
from pkg_resources import get_distribution
VERSION_CLI = get_distribution("py_tldr").version
VERSION_CLIENT_SPEC = "1.5"
DEFAULT_CACHE_HOURS = 24
DEFAULT_CONFIG = {
"page_source": "https://raw.githubusercontent.com/tldr-pages/tldr/master/pages",
"language": "",
"cache": {
"enabled": True,
"timeout": DEFAULT_CACHE_HOURS,
"download_url": "https://tldr-pages.github.io/assets/tldr.zip",
},
"proxy_url": "",
}
DEFAULT_CONFIG_DIR = LibPath(get_app_dir("tldr"))
DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR / "config.toml"
DEFAULT_CACHE_DIR = LibPath.home() / ".cache" / "tldr"
info = partial(secho, bold=True, fg="green")
warn = partial(secho, bold=True, fg="yellow")
def print_version(ctx, param, value): # pylint: disable=unused-argument
if not value or ctx.resilient_parsing:
return
info(f"tldr version {VERSION_CLI}")
info(f"client specification version {VERSION_CLIENT_SPEC}")
ctx.exit()
def setup_config(ctx, param, value): # pylint: disable=unused-argument
"""Build a config dict from either default or custom path.
Currently custom config file is used without validation, so
misconfiguration may cause errors. Also note `toml` should
used as file format.
"""
config = {}
if not value or ctx.resilient_parsing:
config_dir = DEFAULT_CONFIG_DIR
config_file = DEFAULT_CONFIG_FILE
if not config_file.exists():
warn("No config file found, setting it up...")
config_dir.mkdir(parents=True, exist_ok=True)
with open(config_file, "w", encoding="utf8") as f:
toml.dump(DEFAULT_CONFIG, f)
warn(f"Config file created: {config_file}")
config = DEFAULT_CONFIG
else:
config_file = value
warn(f"Using config file from {config_file}")
if not config:
with open(config_file, encoding="utf8") as f:
config = toml.load(f)
cache = config.get("cache")
if not config.get("page_source") or not cache or not cache.get("download_url"):
warn(f"Page source and cache are required in config file: {config_file}")
sys.exit(1)
return config
@command_(context_settings={"help_option_names": ["-h", "--help"]})
@option(
"-v",
"--version",
is_flag=True,
callback=print_version,
is_eager=True,
expose_value=False,
help="Show version info and exit.",
)
@option(
"--config",
type=Path(exists=True, dir_okay=False, path_type=LibPath),
callback=setup_config,
help="Specify a config file to use.",
)
@option(
"-p",
"--platform",
type=Choice(["android", "common", "linux", "osx", "sunos", "windows"]),
help="Override current operating system.",
)
@option(
"-L",
"--language",
help="Specify language of the page(with no fallbacks), e.g. `en`.",
)
@option("-u", "--update", is_flag=True, help="Update local cache with all pages.")
@argument("command", nargs=-1)
@pass_context
def cli(ctx, config, command, platform, language, update):
"""Collaborative cheatsheets for console commands.
For subcommands such as `git commit`, just keep as it is:
tldr git commit
"""
page_finder = make_page_finder(config)
if update:
with yaspin(Spinners.arc, text="Downloading pages...") as sp:
try:
page_finder.sync()
except DownloadError:
sp.write("> Sync failed, check your network and try again.")
sys.exit(1)
sp.write("> Download complete.")
info("All caches updated.")
if not command:
if not update:
secho(ctx.get_help())
return
else:
command = "-".join(command)
content = None
languages = get_languages(language)
with yaspin(Spinners.arc, text="Searching pages...") as sp:
try:
content = page_finder.find(
command, platform or guess_os(), languages=languages
)
except DownloadError:
sp.write("> Search failed, check your network and try again.")
sys.exit(1)
if content:
sp.write("> Page found.")
else:
sp.write("> No result.")
if content:
print(PageFormatter(indent_spaces=4, start_with_new_line=True).format(content))
else:
warn("There is no available pages right now.")
warn("You can create an issue via https://github.com/tldr-pages/tldr/issues.")
sys.exit(1)
def make_page_finder(config=None) -> PageFinder:
if not config:
config = DEFAULT_CONFIG
source_url = config["page_source"]
cache_config = config["cache"]
cache_timeout = cache_config.get("timeout", DEFAULT_CACHE_HOURS)
cache_location = DEFAULT_CACHE_DIR
cache_download_url = cache_config["download_url"]
cache_enabled = cache_config.get("enabled", True)
proxy_url = config["proxy_url"]
return PageFinder(
source_url,
cache_timeout,
cache_location,
cache_download_url,
cache_enabled,
proxy_url,
)
def get_languages(language: str) -> List[str]:
"""Return language list for page matching.
If language specified, use it with no fallbacks.
Otherwise make the list based on env `LANG` and
`LANGUAGE`.
# pylint: disable=line-too-long
For detailed logic, see https://github.com/tldr-pages/tldr/blob/master/CLIENT-SPECIFICATION.md#language # noqa
"""
if language:
return [language]
def extractor(x):
return x.split("_", maxsplit=1)[0]
lang = extractor(environ.get("LANG", ""))
if not lang:
return ["en"]
languages = [
extractor(item) for item in environ.get("LANGUAGE", "").split(":") if item
]
if lang not in languages:
languages.append(lang)
if "en" not in languages:
languages.append("en")
return languages
def guess_os():
system_to_platform = {
"Linux": "linux",
"Darwin": "osx",
"Java": "sunos",
"Windows": "windows",
}
return system_to_platform.get(platform_.system(), "linux")
|
en
| 0.741894
|
# pylint: disable=unused-argument # pylint: disable=unused-argument Build a config dict from either default or custom path. Currently custom config file is used without validation, so misconfiguration may cause errors. Also note `toml` should used as file format. Collaborative cheatsheets for console commands. For subcommands such as `git commit`, just keep as it is: tldr git commit Return language list for page matching. If language specified, use it with no fallbacks. Otherwise make the list based on env `LANG` and `LANGUAGE`. # pylint: disable=line-too-long For detailed logic, see https://github.com/tldr-pages/tldr/blob/master/CLIENT-SPECIFICATION.md#language # noqa
| 1.95175
| 2
|
Chapter 07/stock/search.py
|
bpbpublications/Time-Series-Forecasting-using-Deep-Learning
| 7
|
6626610
|
import time
from pathlib import Path
from nni.experiment import Experiment
# Search Space
fast_choices = {"_type": "choice", "_value": [3, 5, 7, 9]}
slow_choices = {"_type": "choice", "_value": [14, 20, 40]}
length_choices = {"_type": "choice", "_value": [5, 10, 20]}
ind_choices = [
{"_name": "ao", "fast": fast_choices, "slow": slow_choices},
{"_name": "apo", "fast": fast_choices, "slow": slow_choices},
{"_name": "cci", "length": length_choices},
{"_name": "cmo", "length": length_choices},
{"_name": "mom", "length": length_choices},
{"_name": "rsi", "length": length_choices},
{"_name": "tsi", "fast": fast_choices, "slow": slow_choices},
]
search_space = {
"lr": {"_type": "choice", "_value": [.01, .005, .001, .0005]},
"rnn_type": {"_type": "choice", "_value": ['rnn', 'gru']},
"rnn_hidden_size": {"_type": "choice", "_value": [8, 16, 24]},
"ind_hidden_size": {"_type": "choice", "_value": [1, 2, 4]},
"des_size": {"_type": "choice", "_value": [2, 4, 8, 16]},
"ind1": {"_type": "choice", "_value": ind_choices},
"ind2": {"_type": "choice", "_value": ind_choices},
}
max_trials = 1_000
# Search Configuration
search = Experiment('local')
# Search Name
search.config.experiment_name = 'Alg Trader'
search.config.trial_concurrency = 2
search.config.max_trial_number = max_trials
search.config.search_space = search_space
search.config.trial_command = 'python3 trial.py'
search.config.trial_code_directory = Path(__file__).parent
# Search Tuner Settings
search.config.tuner.name = 'Evolution'
search.config.tuner.class_args['optimize_mode'] = 'minimize'
search.config.tuner.class_args['population_size'] = 32
# Running Search
search.start(8080)
# Awaiting Results
executed_trials = 0
while True:
trials = search.export_data()
if executed_trials != len(trials):
executed_trials = len(trials)
print(f'\nTrials: {executed_trials} / {max_trials}', end = "")
if search.get_status() == 'DONE':
best_trial = min(trials, key = lambda t: t.value)
print(f'\nBest trial params: {best_trial.parameter}')
input("Experiment is finished. Press any key to exit...")
break
print('.', end = ""),
time.sleep(10)
|
import time
from pathlib import Path
from nni.experiment import Experiment
# Search Space
fast_choices = {"_type": "choice", "_value": [3, 5, 7, 9]}
slow_choices = {"_type": "choice", "_value": [14, 20, 40]}
length_choices = {"_type": "choice", "_value": [5, 10, 20]}
ind_choices = [
{"_name": "ao", "fast": fast_choices, "slow": slow_choices},
{"_name": "apo", "fast": fast_choices, "slow": slow_choices},
{"_name": "cci", "length": length_choices},
{"_name": "cmo", "length": length_choices},
{"_name": "mom", "length": length_choices},
{"_name": "rsi", "length": length_choices},
{"_name": "tsi", "fast": fast_choices, "slow": slow_choices},
]
search_space = {
"lr": {"_type": "choice", "_value": [.01, .005, .001, .0005]},
"rnn_type": {"_type": "choice", "_value": ['rnn', 'gru']},
"rnn_hidden_size": {"_type": "choice", "_value": [8, 16, 24]},
"ind_hidden_size": {"_type": "choice", "_value": [1, 2, 4]},
"des_size": {"_type": "choice", "_value": [2, 4, 8, 16]},
"ind1": {"_type": "choice", "_value": ind_choices},
"ind2": {"_type": "choice", "_value": ind_choices},
}
max_trials = 1_000
# Search Configuration
search = Experiment('local')
# Search Name
search.config.experiment_name = 'Alg Trader'
search.config.trial_concurrency = 2
search.config.max_trial_number = max_trials
search.config.search_space = search_space
search.config.trial_command = 'python3 trial.py'
search.config.trial_code_directory = Path(__file__).parent
# Search Tuner Settings
search.config.tuner.name = 'Evolution'
search.config.tuner.class_args['optimize_mode'] = 'minimize'
search.config.tuner.class_args['population_size'] = 32
# Running Search
search.start(8080)
# Awaiting Results
executed_trials = 0
while True:
trials = search.export_data()
if executed_trials != len(trials):
executed_trials = len(trials)
print(f'\nTrials: {executed_trials} / {max_trials}', end = "")
if search.get_status() == 'DONE':
best_trial = min(trials, key = lambda t: t.value)
print(f'\nBest trial params: {best_trial.parameter}')
input("Experiment is finished. Press any key to exit...")
break
print('.', end = ""),
time.sleep(10)
|
en
| 0.391095
|
# Search Space # Search Configuration # Search Name # Search Tuner Settings # Running Search # Awaiting Results
| 2.107445
| 2
|
pipsqueak/pip/freeze.py
|
svrana/pipsqueak
| 0
|
6626611
|
import os
import re
import logging
from pipsqueak.pip.vcs import vcs, get_src_requirement
from pipsqueak.pip.util import dist_is_editable
logger = logging.getLogger(__file__)
class FrozenRequirement(object):
def __init__(self, name, req, editable, location, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
self.location = location
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except Exception as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req,
)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
logger.warn(
"SVN editable detection based on dependency links "
"will be dropped in the future.",
)
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
logger.info("found: %s", dist.project_name)
return cls(dist.project_name, req, editable, location, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
|
import os
import re
import logging
from pipsqueak.pip.vcs import vcs, get_src_requirement
from pipsqueak.pip.util import dist_is_editable
logger = logging.getLogger(__file__)
class FrozenRequirement(object):
def __init__(self, name, req, editable, location, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
self.location = location
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except Exception as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req,
)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
logger.warn(
"SVN editable detection based on dependency links "
"will be dropped in the future.",
)
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
logger.info("found: %s", dist.project_name)
return cls(dist.project_name, req, editable, location, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
|
en
| 0.576925
|
# !! Could not determine repository location' # FIXME: could not find svn URL in dependency_links ' #egg=%s' % (
| 2.337919
| 2
|
core/evaluate_gan.py
|
brian220/sketch_part_rec
| 1
|
6626612
|
<reponame>brian220/sketch_part_rec<gh_stars>1-10
# 176 39f5eecbfb2470846666a748bda83f67
# 41753 a58f8f1bd61094b3ff2c92c2a4f65876
# 2603 27c00ec2b6ec279958e80128fd34c2b1
# 37247 484f0070df7d5375492d9da2668ec34c
# 36881 4231883e92a3c1a21c62d11641ffbd35
import json
import numpy as np
import os, sys
import torch
import torch.backends.cudnn
import torch.utils.data
import cv2
from datetime import datetime as dt
from models.networks_graphx_gan import GRAPHX_GAN_MODEL
import utils.point_cloud_visualization_old
import utils.data_loaders
import utils.data_transforms
import utils.network_utils
from pyntcloud import PyntCloud
def init_pointcloud_loader(num_points):
Z = np.random.rand(num_points) + 1.
h = np.random.uniform(10., 214., size=(num_points,))
w = np.random.uniform(10., 214., size=(num_points,))
X = (w - 111.5) / 248. * -Z
Y = (h - 111.5) / 248. * Z
X = np.reshape(X, (-1, 1))
Y = np.reshape(Y, (-1, 1))
Z = np.reshape(Z, (-1, 1))
XYZ = np.concatenate((X, Y, Z), 1)
return XYZ.astype('float32')
def evaluate_gan_net(cfg):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
eval_transforms = utils.data_transforms.Compose([
utils.data_transforms.ToTensor(),
])
dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
eval_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(
utils.data_loaders.DatasetType.TEST, eval_transforms),
batch_size=cfg.EVALUATE.BATCH_SIZE,
num_workers=1,
shuffle=False)
# Set up networks
# The parameters here need to be set in cfg
net = GRAPHX_GAN_MODEL(
cfg=cfg,
optimizer_G=lambda x: torch.optim.Adam(x,
lr=cfg.TRAIN.GENERATOR_LEARNING_RATE,
weight_decay=cfg.TRAIN.GENERATOR_WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS),
scheduler_G=lambda x: MultiStepLR(x, milestones=cfg.TRAIN.MILESTONES, gamma=cfg.TRAIN.GAMMA),
optimizer_D=lambda x: torch.optim.Adam(x,
lr=cfg.TRAIN.DISCRIMINATOR_LEARNINF_RATE,
weight_decay=cfg.TRAIN.DISCRIMINATOR_WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS),
scheduler_D=lambda x: MultiStepLR(x, milestones=cfg.TRAIN.MILESTONES, gamma=cfg.TRAIN.GAMMA)
)
if torch.cuda.is_available():
net = torch.nn.DataParallel(net).cuda()
# Load weight
# Load weight for encoder, decoder
print('[INFO] %s Loading reconstruction weights from %s ...' % (dt.now(), cfg.EVALUATE.WEIGHT_PATH))
rec_checkpoint = torch.load(cfg.EVALUATE.WEIGHT_PATH)
net.load_state_dict(rec_checkpoint['net'])
print('[INFO] Best reconstruction result at epoch %d ...' % rec_checkpoint['epoch_idx'])
epoch_id = int(rec_checkpoint['epoch_idx'])
net.eval()
# Testing loop
for sample_idx, (taxonomy_names, sample_names, rendering_images,
model_azi, model_ele,
init_point_clouds, ground_truth_point_clouds) in enumerate(eval_data_loader):
print("evaluate sample: ", sample_idx)
with torch.no_grad():
# Only one image per sample
rendering_images = torch.squeeze(rendering_images, 1)
# Get data from data loader
rendering_images = utils.network_utils.var_or_cuda(rendering_images)
model_azi = utils.network_utils.var_or_cuda(model_azi)
model_ele = utils.network_utils.var_or_cuda(model_ele)
init_point_clouds = utils.network_utils.var_or_cuda(init_point_clouds)
ground_truth_point_clouds = utils.network_utils.var_or_cuda(ground_truth_point_clouds)
loss, pred_pc = net.module.valid_step(rendering_images, init_point_clouds, ground_truth_point_clouds)
img_dir = cfg.EVALUATE.OUTPUT_FOLDER
azi = model_azi[0].detach().cpu().numpy()*180./np.pi
ele = model_ele[0].detach().cpu().numpy()*180./np.pi + 90.
sample_name = sample_names[0]
# Predict Pointcloud
p_pc = pred_pc[0].detach().cpu().numpy()
rendering_views = utils.point_cloud_visualization_old.get_point_cloud_image(p_pc,
os.path.join(img_dir, str(sample_idx), 'rec results'),
sample_idx,
cfg.EVALUATE.VERSION_ID,
"",
view=[azi, ele])
# Groundtruth Pointcloud
gt_pc = ground_truth_point_clouds[0].detach().cpu().numpy()
rendering_views = utils.point_cloud_visualization_old.get_point_cloud_image(gt_pc,
os.path.join(img_dir, str(sample_idx), 'gt'),
sample_idx,
cfg.EVALUATE.VERSION_ID,
"",
view=[azi, ele])
if sample_idx == 200:
break
|
# 176 39f5eecbfb2470846666a748bda83f67
# 41753 a58f8f1bd61094b3ff2c92c2a4f65876
# 2603 27c00ec2b6ec279958e80128fd34c2b1
# 37247 484f0070df7d5375492d9da2668ec34c
# 36881 4231883e92a3c1a21c62d11641ffbd35
import json
import numpy as np
import os, sys
import torch
import torch.backends.cudnn
import torch.utils.data
import cv2
from datetime import datetime as dt
from models.networks_graphx_gan import GRAPHX_GAN_MODEL
import utils.point_cloud_visualization_old
import utils.data_loaders
import utils.data_transforms
import utils.network_utils
from pyntcloud import PyntCloud
def init_pointcloud_loader(num_points):
Z = np.random.rand(num_points) + 1.
h = np.random.uniform(10., 214., size=(num_points,))
w = np.random.uniform(10., 214., size=(num_points,))
X = (w - 111.5) / 248. * -Z
Y = (h - 111.5) / 248. * Z
X = np.reshape(X, (-1, 1))
Y = np.reshape(Y, (-1, 1))
Z = np.reshape(Z, (-1, 1))
XYZ = np.concatenate((X, Y, Z), 1)
return XYZ.astype('float32')
def evaluate_gan_net(cfg):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
eval_transforms = utils.data_transforms.Compose([
utils.data_transforms.ToTensor(),
])
dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
eval_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(
utils.data_loaders.DatasetType.TEST, eval_transforms),
batch_size=cfg.EVALUATE.BATCH_SIZE,
num_workers=1,
shuffle=False)
# Set up networks
# The parameters here need to be set in cfg
net = GRAPHX_GAN_MODEL(
cfg=cfg,
optimizer_G=lambda x: torch.optim.Adam(x,
lr=cfg.TRAIN.GENERATOR_LEARNING_RATE,
weight_decay=cfg.TRAIN.GENERATOR_WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS),
scheduler_G=lambda x: MultiStepLR(x, milestones=cfg.TRAIN.MILESTONES, gamma=cfg.TRAIN.GAMMA),
optimizer_D=lambda x: torch.optim.Adam(x,
lr=cfg.TRAIN.DISCRIMINATOR_LEARNINF_RATE,
weight_decay=cfg.TRAIN.DISCRIMINATOR_WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS),
scheduler_D=lambda x: MultiStepLR(x, milestones=cfg.TRAIN.MILESTONES, gamma=cfg.TRAIN.GAMMA)
)
if torch.cuda.is_available():
net = torch.nn.DataParallel(net).cuda()
# Load weight
# Load weight for encoder, decoder
print('[INFO] %s Loading reconstruction weights from %s ...' % (dt.now(), cfg.EVALUATE.WEIGHT_PATH))
rec_checkpoint = torch.load(cfg.EVALUATE.WEIGHT_PATH)
net.load_state_dict(rec_checkpoint['net'])
print('[INFO] Best reconstruction result at epoch %d ...' % rec_checkpoint['epoch_idx'])
epoch_id = int(rec_checkpoint['epoch_idx'])
net.eval()
# Testing loop
for sample_idx, (taxonomy_names, sample_names, rendering_images,
model_azi, model_ele,
init_point_clouds, ground_truth_point_clouds) in enumerate(eval_data_loader):
print("evaluate sample: ", sample_idx)
with torch.no_grad():
# Only one image per sample
rendering_images = torch.squeeze(rendering_images, 1)
# Get data from data loader
rendering_images = utils.network_utils.var_or_cuda(rendering_images)
model_azi = utils.network_utils.var_or_cuda(model_azi)
model_ele = utils.network_utils.var_or_cuda(model_ele)
init_point_clouds = utils.network_utils.var_or_cuda(init_point_clouds)
ground_truth_point_clouds = utils.network_utils.var_or_cuda(ground_truth_point_clouds)
loss, pred_pc = net.module.valid_step(rendering_images, init_point_clouds, ground_truth_point_clouds)
img_dir = cfg.EVALUATE.OUTPUT_FOLDER
azi = model_azi[0].detach().cpu().numpy()*180./np.pi
ele = model_ele[0].detach().cpu().numpy()*180./np.pi + 90.
sample_name = sample_names[0]
# Predict Pointcloud
p_pc = pred_pc[0].detach().cpu().numpy()
rendering_views = utils.point_cloud_visualization_old.get_point_cloud_image(p_pc,
os.path.join(img_dir, str(sample_idx), 'rec results'),
sample_idx,
cfg.EVALUATE.VERSION_ID,
"",
view=[azi, ele])
# Groundtruth Pointcloud
gt_pc = ground_truth_point_clouds[0].detach().cpu().numpy()
rendering_views = utils.point_cloud_visualization_old.get_point_cloud_image(gt_pc,
os.path.join(img_dir, str(sample_idx), 'gt'),
sample_idx,
cfg.EVALUATE.VERSION_ID,
"",
view=[azi, ele])
if sample_idx == 200:
break
|
en
| 0.59148
|
# 176 39f5eecbfb2470846666a748bda83f67 # 41753 a58f8f1bd61094b3ff2c92c2a4f65876 # 2603 27c00ec2b6ec279958e80128fd34c2b1 # 37247 484f0070df7d5375492d9da2668ec34c # 36881 4231883e92a3c1a21c62d11641ffbd35 # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use # Set up networks # The parameters here need to be set in cfg # Load weight # Load weight for encoder, decoder # Testing loop # Only one image per sample # Get data from data loader # Predict Pointcloud # Groundtruth Pointcloud
| 1.313259
| 1
|
api.py
|
KirillDmit/xmljson
| 0
|
6626613
|
import datetime
from itertools import groupby
from urllib.request import urlopen
from json import loads
def get_date(x):
return datetime.datetime.strptime(x['timestamp'], '%Y-%m-%dT%H:%M:%SZ').date()
url = 'https://ru.wikipedia.org/w/api.php?action=query&format=json&prop=revisions&rvlimit=500&titles=%D0%93%D1%80%D0' \
'%B0%D0%B4%D1%81%D0%BA%D0%B8%D0%B9,_%D0%90%D0%BB%D0%B5%D0%BA%D1%81%D0%B0%D0%BD%D0%B4%D1%80_%D0%91%D0%BE%D1%80' \
'%D0%B8%D1%81%D0%BE%D0%B2%D0%B8%D1%87 '
stat = {}
data = loads(urlopen(url).read().decode('utf8'))
group = groupby(data['query']['pages']['183903']['revisions'], get_date)
for date, changes in group:
stat.update({date: len(list(changes))})
with open('api.txt', 'w', encoding='utf8') as file:
for date in stat:
print(date, stat[date], file=file)
#2021-11-28 - день ухода Градского из жизни
|
import datetime
from itertools import groupby
from urllib.request import urlopen
from json import loads
def get_date(x):
return datetime.datetime.strptime(x['timestamp'], '%Y-%m-%dT%H:%M:%SZ').date()
url = 'https://ru.wikipedia.org/w/api.php?action=query&format=json&prop=revisions&rvlimit=500&titles=%D0%93%D1%80%D0' \
'%B0%D0%B4%D1%81%D0%BA%D0%B8%D0%B9,_%D0%90%D0%BB%D0%B5%D0%BA%D1%81%D0%B0%D0%BD%D0%B4%D1%80_%D0%91%D0%BE%D1%80' \
'%D0%B8%D1%81%D0%BE%D0%B2%D0%B8%D1%87 '
stat = {}
data = loads(urlopen(url).read().decode('utf8'))
group = groupby(data['query']['pages']['183903']['revisions'], get_date)
for date, changes in group:
stat.update({date: len(list(changes))})
with open('api.txt', 'w', encoding='utf8') as file:
for date in stat:
print(date, stat[date], file=file)
#2021-11-28 - день ухода Градского из жизни
|
ru
| 0.9397
|
#2021-11-28 - день ухода Градского из жизни
| 3.047541
| 3
|
applications/FluidDynamicsApplication/python_scripts/stokes_solver.py
|
HubertBalcerzak/Kratos
| 0
|
6626614
|
# importing the Kratos Library
import KratosMultiphysics as kratoscore
import KratosMultiphysics.FluidDynamicsApplication as cfd
import KratosMultiphysics.python_linear_solver_factory as linear_solver_factory
def AddVariables(model_part, settings=None):
model_part.AddNodalSolutionStepVariable(kratoscore.VELOCITY)
model_part.AddNodalSolutionStepVariable(kratoscore.PRESSURE)
#model_part.AddNodalSolutionStepVariable(kratoscore.VISCOSITY)
model_part.AddNodalSolutionStepVariable(kratoscore.DENSITY)
model_part.AddNodalSolutionStepVariable(kratoscore.BODY_FORCE) #TODO: decide if it is needed. if constant it could be passed in properties
model_part.AddNodalSolutionStepVariable(kratoscore.REACTION) #in case this variable could be removed if no reactions must be computed
#model_part.AddNodalSolutionStepVariable(kratoscore.REACTION_WATER_PRESSURE) #in case this variable could be removed if no reactions must be computed
model_part.AddNodalSolutionStepVariable(kratoscore.EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(kratoscore.NORMAL) #TODO: this variable is not strictly needed by the solver - may be needed by other utilities
#model_part.AddNodalSolutionStepVariable(kratoscore.IS_STRUCTURE) #TODO: remove as deprecated!!
#model_part.AddNodalSolutionStepVariable(kratoscore.MESH_VELOCITY) #TODO: remove. needed because of the Condition used
#model_part.AddNodalSolutionStepVariable(kratoscore.ACCELERATION) #TODO: remove! needed because of the Condition used
print("variables for the monolithic solver symbolic added correctly")
def AddDofs(model_part, settings=None):
for node in model_part.Nodes:
# adding dofs
node.AddDof(kratoscore.VELOCITY_X, kratoscore.REACTION_X)
node.AddDof(kratoscore.VELOCITY_Y, kratoscore.REACTION_Y)
node.AddDof(kratoscore.VELOCITY_Z, kratoscore.REACTION_Z)
node.AddDof(kratoscore.PRESSURE, kratoscore.REACTION_WATER_PRESSURE)
print("dofs for the vms monolithic solver added correctly")
def CreateSolver(model_part, settings):
fluid_solver = StokesSolver(model_part, settings)
return fluid_solver
class StokesSolver:
def __init__(self, model_part, settings):
self.model_part = model_part
#note that all settingsuration parameters MUST be passed.
self.domain_size = settings.domain_size
self.rel_vel_tol = settings.vel_tolerance
self.abs_vel_tol = settings.abs_vel_tolerance
self.rel_pres_tol = settings.press_tolerance
self.abs_pres_tol = settings.abs_press_tolerance
self.dynamic_tau = settings.dynamic_tau
self.max_iter = settings.max_iteration
self.echo_level = settings.echo_level
self.compute_reactions = settings.compute_reactions
self.reform_dofs_at_each_step = settings.reform_dofs_at_each_step
self.linear_solver = linear_solver_factory.ConstructSolver(settings.linear_solver_settings)
time_order = 2
self.time_discretization = KratosMultiphysics.TimeDiscretization.BDF(time_order)
self.conv_criteria = cfd.VelPrCriteria(self.rel_vel_tol, self.abs_vel_tol,
self.rel_pres_tol, self.abs_pres_tol)
(self.conv_criteria).SetEchoLevel(self.echo_level)
self.time_scheme = kratoscore.ResidualBasedIncrementalUpdateStaticScheme()
builder_and_solver = kratoscore.ResidualBasedBlockBuilderAndSolver(self.linear_solver)
move_mesh_flag = False #user should NOT configure this
self.fluid_solver = kratoscore.ResidualBasedNewtonRaphsonStrategy(
self.model_part, self.time_scheme, self.linear_solver, self.conv_criteria,
builder_and_solver, self.max_iter, self.compute_reactions, self.reform_dofs_at_each_step, move_mesh_flag)
(self.fluid_solver).SetEchoLevel(self.echo_level)
self.fluid_solver.Check()
self.model_part.ProcessInfo.SetValue(kratoscore.DYNAMIC_TAU, self.dynamic_tau)
print("Construction stokes solver finished")
#
def Initialize(self):
print ("Initialization stokes solver finished")
def Solve(self):
(self.time_discretization).ComputeAndSaveBDFCoefficients(self.model_part.ProcessInfo)
self.fluid_solver.Solve()
def SetEchoLevel(self, level):
self.fluid_solver.SetEchoLevel(level)
def Clear(self):
self.fluid_solver.Clear()
def Check(self):
self.fluid_solver.Check()
|
# importing the Kratos Library
import KratosMultiphysics as kratoscore
import KratosMultiphysics.FluidDynamicsApplication as cfd
import KratosMultiphysics.python_linear_solver_factory as linear_solver_factory
def AddVariables(model_part, settings=None):
model_part.AddNodalSolutionStepVariable(kratoscore.VELOCITY)
model_part.AddNodalSolutionStepVariable(kratoscore.PRESSURE)
#model_part.AddNodalSolutionStepVariable(kratoscore.VISCOSITY)
model_part.AddNodalSolutionStepVariable(kratoscore.DENSITY)
model_part.AddNodalSolutionStepVariable(kratoscore.BODY_FORCE) #TODO: decide if it is needed. if constant it could be passed in properties
model_part.AddNodalSolutionStepVariable(kratoscore.REACTION) #in case this variable could be removed if no reactions must be computed
#model_part.AddNodalSolutionStepVariable(kratoscore.REACTION_WATER_PRESSURE) #in case this variable could be removed if no reactions must be computed
model_part.AddNodalSolutionStepVariable(kratoscore.EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(kratoscore.NORMAL) #TODO: this variable is not strictly needed by the solver - may be needed by other utilities
#model_part.AddNodalSolutionStepVariable(kratoscore.IS_STRUCTURE) #TODO: remove as deprecated!!
#model_part.AddNodalSolutionStepVariable(kratoscore.MESH_VELOCITY) #TODO: remove. needed because of the Condition used
#model_part.AddNodalSolutionStepVariable(kratoscore.ACCELERATION) #TODO: remove! needed because of the Condition used
print("variables for the monolithic solver symbolic added correctly")
def AddDofs(model_part, settings=None):
for node in model_part.Nodes:
# adding dofs
node.AddDof(kratoscore.VELOCITY_X, kratoscore.REACTION_X)
node.AddDof(kratoscore.VELOCITY_Y, kratoscore.REACTION_Y)
node.AddDof(kratoscore.VELOCITY_Z, kratoscore.REACTION_Z)
node.AddDof(kratoscore.PRESSURE, kratoscore.REACTION_WATER_PRESSURE)
print("dofs for the vms monolithic solver added correctly")
def CreateSolver(model_part, settings):
fluid_solver = StokesSolver(model_part, settings)
return fluid_solver
class StokesSolver:
def __init__(self, model_part, settings):
self.model_part = model_part
#note that all settingsuration parameters MUST be passed.
self.domain_size = settings.domain_size
self.rel_vel_tol = settings.vel_tolerance
self.abs_vel_tol = settings.abs_vel_tolerance
self.rel_pres_tol = settings.press_tolerance
self.abs_pres_tol = settings.abs_press_tolerance
self.dynamic_tau = settings.dynamic_tau
self.max_iter = settings.max_iteration
self.echo_level = settings.echo_level
self.compute_reactions = settings.compute_reactions
self.reform_dofs_at_each_step = settings.reform_dofs_at_each_step
self.linear_solver = linear_solver_factory.ConstructSolver(settings.linear_solver_settings)
time_order = 2
self.time_discretization = KratosMultiphysics.TimeDiscretization.BDF(time_order)
self.conv_criteria = cfd.VelPrCriteria(self.rel_vel_tol, self.abs_vel_tol,
self.rel_pres_tol, self.abs_pres_tol)
(self.conv_criteria).SetEchoLevel(self.echo_level)
self.time_scheme = kratoscore.ResidualBasedIncrementalUpdateStaticScheme()
builder_and_solver = kratoscore.ResidualBasedBlockBuilderAndSolver(self.linear_solver)
move_mesh_flag = False #user should NOT configure this
self.fluid_solver = kratoscore.ResidualBasedNewtonRaphsonStrategy(
self.model_part, self.time_scheme, self.linear_solver, self.conv_criteria,
builder_and_solver, self.max_iter, self.compute_reactions, self.reform_dofs_at_each_step, move_mesh_flag)
(self.fluid_solver).SetEchoLevel(self.echo_level)
self.fluid_solver.Check()
self.model_part.ProcessInfo.SetValue(kratoscore.DYNAMIC_TAU, self.dynamic_tau)
print("Construction stokes solver finished")
#
def Initialize(self):
print ("Initialization stokes solver finished")
def Solve(self):
(self.time_discretization).ComputeAndSaveBDFCoefficients(self.model_part.ProcessInfo)
self.fluid_solver.Solve()
def SetEchoLevel(self, level):
self.fluid_solver.SetEchoLevel(level)
def Clear(self):
self.fluid_solver.Clear()
def Check(self):
self.fluid_solver.Check()
|
en
| 0.581823
|
# importing the Kratos Library #model_part.AddNodalSolutionStepVariable(kratoscore.VISCOSITY) #TODO: decide if it is needed. if constant it could be passed in properties #in case this variable could be removed if no reactions must be computed #model_part.AddNodalSolutionStepVariable(kratoscore.REACTION_WATER_PRESSURE) #in case this variable could be removed if no reactions must be computed #TODO: this variable is not strictly needed by the solver - may be needed by other utilities #model_part.AddNodalSolutionStepVariable(kratoscore.IS_STRUCTURE) #TODO: remove as deprecated!! #model_part.AddNodalSolutionStepVariable(kratoscore.MESH_VELOCITY) #TODO: remove. needed because of the Condition used #model_part.AddNodalSolutionStepVariable(kratoscore.ACCELERATION) #TODO: remove! needed because of the Condition used # adding dofs #note that all settingsuration parameters MUST be passed. #user should NOT configure this #
| 2.288943
| 2
|
scrapy_poi/utils/preset_items.py
|
ygo-prometheus/bilibili_danmaku_sensor
| 0
|
6626615
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ErrorItem(scrapy.Item):
created_time = scrapy.Field()
created_time_ts = scrapy.Field()
reason = scrapy.Field()
request = scrapy.Field()
traceback = scrapy.Field()
response_text = scrapy.Field()
failure = scrapy.Field()
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ErrorItem(scrapy.Item):
created_time = scrapy.Field()
created_time_ts = scrapy.Field()
reason = scrapy.Field()
request = scrapy.Field()
traceback = scrapy.Field()
response_text = scrapy.Field()
failure = scrapy.Field()
|
en
| 0.638537
|
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html
| 2.109244
| 2
|
models/speech_silence_tpotclassifier.py
|
jim-schwoebel/pauses
| 18
|
6626616
|
import numpy as np
import json, pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
# NOTE: Make sure that the class is labeled 'target' in the data file
g=json.load(open('speech_silence_tpotclassifier_.json'))
tpot_data=g['labels']
features=g['data']
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was:0.9527629233511586
exported_pipeline = LinearSVC(C=15.0, dual=False, loss="squared_hinge", penalty="l2", tol=0.001)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('speech_silence_tpotclassifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
|
import numpy as np
import json, pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
# NOTE: Make sure that the class is labeled 'target' in the data file
g=json.load(open('speech_silence_tpotclassifier_.json'))
tpot_data=g['labels']
features=g['data']
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was:0.9527629233511586
exported_pipeline = LinearSVC(C=15.0, dual=False, loss="squared_hinge", penalty="l2", tol=0.001)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('speech_silence_tpotclassifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
|
en
| 0.894309
|
# NOTE: Make sure that the class is labeled 'target' in the data file # Average CV score on the training set was:0.9527629233511586
| 2.936551
| 3
|
.editorconfig*.py
|
bastula/regressors
| 1
|
6626617
|
<gh_stars>1-10
# http://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8
end_of_line = lf
[*.bat]
indent_style = tab
end_of_line = crlf
[LICENSE]
insert_final_newline = false
[Makefile]
indent_style = tab
|
# http://editorconfig.org
root = true
[*]
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8
end_of_line = lf
[*.bat]
indent_style = tab
end_of_line = crlf
[LICENSE]
insert_final_newline = false
[Makefile]
indent_style = tab
|
es
| 0.371743
|
# http://editorconfig.org
| 1.091883
| 1
|
100 Python Exercises/Day 3/11.py
|
elipopovadev/Tournament-Tracker
| 0
|
6626618
|
<filename>100 Python Exercises/Day 3/11.py<gh_stars>0
''' Question 11
Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not.
The numbers that are divisible by 5 are to be printed in a comma separated sequence.
Example:
0100,0011,1010,1001,1010
Then the output should be:
1010 '''
value = []
inputStr = input().split(",")
for var in inputStr:
variable_int = int(var, 2)
if variable_int % 5 == 0:
value.append(var)
print(", ".join(value))
|
<filename>100 Python Exercises/Day 3/11.py<gh_stars>0
''' Question 11
Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not.
The numbers that are divisible by 5 are to be printed in a comma separated sequence.
Example:
0100,0011,1010,1001,1010
Then the output should be:
1010 '''
value = []
inputStr = input().split(",")
for var in inputStr:
variable_int = int(var, 2)
if variable_int % 5 == 0:
value.append(var)
print(", ".join(value))
|
en
| 0.918408
|
Question 11 Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not. The numbers that are divisible by 5 are to be printed in a comma separated sequence. Example: 0100,0011,1010,1001,1010 Then the output should be: 1010
| 3.911133
| 4
|
samples/pattern_model.py
|
mode89/esn
| 3
|
6626619
|
import esn
import imp
import signals
import random
SEED = 0
PATTERN_LENGTH = 1
PATTERN_PAUSE = 0.5
OUTPUT_PULSE_AMPLITUDE = 0.9
OUTPUT_PULSE_LENGTH = 0.1
WASHOUT_TIME = 10.0
TRAIN_TIME = 100.0
VARIABLE_MAGNITUDE = True
FALSE_PATTERN = True
CONNECTIVITY = 0.5
TEACHER_FORCING = False
USE_ORTHONORMAL_MATRIX = True
TRAINING_STRATEGY = "discontinuous"
class Signal :
def __init__( self ) :
self.magnitude = 1.0
self.value = 0
self.time = 0
self.front_edge = 0
self.back_edge = -OUTPUT_PULSE_LENGTH
self.pattern_noise = \
signals.PerlinNoise( persistence=1, octave_count=7 )
if SEED > 0 :
self.pattern_noise.seed( SEED )
self.pulse_noise = \
signals.PerlinNoise( persistence=0.5, octave_count=1 )
if SEED > 0 :
self.pulse_noise.seed( SEED + 1 )
self.prev_pulse_noise = self.pulse_noise( 0 )
self.cur_pulse_noise = self.pulse_noise( 0 )
def step( self, step ) :
if self.time > ( self.front_edge + PATTERN_LENGTH + \
PATTERN_PAUSE ) and self.is_front_edge() :
self.front_edge = self.time
self.back_edge = self.front_edge + PATTERN_LENGTH
if VARIABLE_MAGNITUDE :
self.magnitude = random.uniform( 0.3, 1.0 )
if self.front_edge <= self.time and \
self.time <= self.back_edge :
self.value = self.pattern_noise( self.time - \
self.front_edge ) * 0.15 * self.magnitude
else :
self.value = 0
self.prev_pulse_noise = \
self.pulse_noise( self.time )
self.time += step
def is_front_edge( self ) :
if self.prev_pulse_noise <= 0 and \
self.pulse_noise( self.time ) > 0 :
return True
else :
return False
class Model :
def __init__( self, neuron_count ) :
self.network = esn.Network(
ins=1,
neurons=neuron_count,
outs=1,
cnctvty=CONNECTIVITY,
use_orth_mat=USE_ORTHONORMAL_MATRIX
)
self.noise = signals.PerlinNoise( persistence=0.5, octave_count=8 )
if SEED > 0 :
self.noise.seed( SEED + 2 )
self.pattern = Signal()
if FALSE_PATTERN :
self.false_pattern = Signal()
self.train_pulse = signals.GaussianPulse(
amplitude=OUTPUT_PULSE_AMPLITUDE,
width=OUTPUT_PULSE_LENGTH )
self.time = 0
def step( self, step ) :
self.pattern.step( step )
self.noise_value = self.noise( self.time ) * 0.1
self.input = self.noise_value + self.pattern.value
if FALSE_PATTERN :
self.false_pattern.step( step )
self.input += self.false_pattern.value
self.network.set_inputs( [ self.input ] )
self.network.step( step )
self.output = self.network.capture_output( 1 )[ 0 ]
self.train_output = self.train_pulse( self.time - \
self.pattern.back_edge )
if self.time > WASHOUT_TIME and self.time < TRAIN_TIME :
getattr( Model.TrainingStrategy, TRAINING_STRATEGY )( self )
print( "%10s %10s %10s %10s %10s" %
(
str( "%0.3f" % self.time ),
str( "%0.5f" % self.input ),
str( "%0.5f" % self.pattern.value ),
str( "%0.5f" % self.train_output ),
str( "%0.5f" % self.output )
)
)
self.time += step
class TrainingStrategy :
@staticmethod
def continuous( model ) :
model.network.train_online( [ model.train_output ],
TEACHER_FORCING )
@staticmethod
def discontinuous( model ) :
if model.time > model.pattern.back_edge and \
model.time < ( model.pattern.back_edge + \
OUTPUT_PULSE_LENGTH ) :
model.network.train_online( [ model.train_output ],
TEACHER_FORCING )
elif model.output > 0.3 or model.output < -0.3:
model.network.train_online( [ model.train_output ],
TEACHER_FORCING )
|
import esn
import imp
import signals
import random
SEED = 0
PATTERN_LENGTH = 1
PATTERN_PAUSE = 0.5
OUTPUT_PULSE_AMPLITUDE = 0.9
OUTPUT_PULSE_LENGTH = 0.1
WASHOUT_TIME = 10.0
TRAIN_TIME = 100.0
VARIABLE_MAGNITUDE = True
FALSE_PATTERN = True
CONNECTIVITY = 0.5
TEACHER_FORCING = False
USE_ORTHONORMAL_MATRIX = True
TRAINING_STRATEGY = "discontinuous"
class Signal :
def __init__( self ) :
self.magnitude = 1.0
self.value = 0
self.time = 0
self.front_edge = 0
self.back_edge = -OUTPUT_PULSE_LENGTH
self.pattern_noise = \
signals.PerlinNoise( persistence=1, octave_count=7 )
if SEED > 0 :
self.pattern_noise.seed( SEED )
self.pulse_noise = \
signals.PerlinNoise( persistence=0.5, octave_count=1 )
if SEED > 0 :
self.pulse_noise.seed( SEED + 1 )
self.prev_pulse_noise = self.pulse_noise( 0 )
self.cur_pulse_noise = self.pulse_noise( 0 )
def step( self, step ) :
if self.time > ( self.front_edge + PATTERN_LENGTH + \
PATTERN_PAUSE ) and self.is_front_edge() :
self.front_edge = self.time
self.back_edge = self.front_edge + PATTERN_LENGTH
if VARIABLE_MAGNITUDE :
self.magnitude = random.uniform( 0.3, 1.0 )
if self.front_edge <= self.time and \
self.time <= self.back_edge :
self.value = self.pattern_noise( self.time - \
self.front_edge ) * 0.15 * self.magnitude
else :
self.value = 0
self.prev_pulse_noise = \
self.pulse_noise( self.time )
self.time += step
def is_front_edge( self ) :
if self.prev_pulse_noise <= 0 and \
self.pulse_noise( self.time ) > 0 :
return True
else :
return False
class Model :
def __init__( self, neuron_count ) :
self.network = esn.Network(
ins=1,
neurons=neuron_count,
outs=1,
cnctvty=CONNECTIVITY,
use_orth_mat=USE_ORTHONORMAL_MATRIX
)
self.noise = signals.PerlinNoise( persistence=0.5, octave_count=8 )
if SEED > 0 :
self.noise.seed( SEED + 2 )
self.pattern = Signal()
if FALSE_PATTERN :
self.false_pattern = Signal()
self.train_pulse = signals.GaussianPulse(
amplitude=OUTPUT_PULSE_AMPLITUDE,
width=OUTPUT_PULSE_LENGTH )
self.time = 0
def step( self, step ) :
self.pattern.step( step )
self.noise_value = self.noise( self.time ) * 0.1
self.input = self.noise_value + self.pattern.value
if FALSE_PATTERN :
self.false_pattern.step( step )
self.input += self.false_pattern.value
self.network.set_inputs( [ self.input ] )
self.network.step( step )
self.output = self.network.capture_output( 1 )[ 0 ]
self.train_output = self.train_pulse( self.time - \
self.pattern.back_edge )
if self.time > WASHOUT_TIME and self.time < TRAIN_TIME :
getattr( Model.TrainingStrategy, TRAINING_STRATEGY )( self )
print( "%10s %10s %10s %10s %10s" %
(
str( "%0.3f" % self.time ),
str( "%0.5f" % self.input ),
str( "%0.5f" % self.pattern.value ),
str( "%0.5f" % self.train_output ),
str( "%0.5f" % self.output )
)
)
self.time += step
class TrainingStrategy :
@staticmethod
def continuous( model ) :
model.network.train_online( [ model.train_output ],
TEACHER_FORCING )
@staticmethod
def discontinuous( model ) :
if model.time > model.pattern.back_edge and \
model.time < ( model.pattern.back_edge + \
OUTPUT_PULSE_LENGTH ) :
model.network.train_online( [ model.train_output ],
TEACHER_FORCING )
elif model.output > 0.3 or model.output < -0.3:
model.network.train_online( [ model.train_output ],
TEACHER_FORCING )
|
none
| 1
| 2.678037
| 3
|
|
tests_python/tests_alpha/protocol.py
|
Sudha247/tezos
| 0
|
6626620
|
import datetime
from enum import Enum, auto
from typing import Optional
from copy import deepcopy
from tools import constants, utils
HASH = constants.ALPHA
DAEMON = constants.ALPHA_DAEMON
PARAMETERS = constants.ALPHA_PARAMETERS
TENDERBAKE_PARAMETERS = deepcopy(PARAMETERS)
TENDERBAKE_PARAMETERS['consensus_threshold'] = 45
TENDERBAKE_PARAMETERS['consensus_committee_size'] = 67
FOLDER = constants.ALPHA_FOLDER
PREV_HASH = constants.HANGZHOU
PREV_DAEMON = constants.HANGZHOU_DAEMON
PREV_PARAMETERS = constants.HANGZHOU_PARAMETERS
def activate(
client,
parameters=PARAMETERS,
proto=HASH,
timestamp=None,
activate_in_the_past=False,
):
utils.activate_protocol(
client, proto, parameters, timestamp, activate_in_the_past
)
class Protocol(Enum):
CURRENT = auto()
PREV = auto()
def get_parameters(protocol: Optional[Protocol] = Protocol.CURRENT):
"""
Args:
protocol (Protocol): protocol id (either CURRENT or PREV).
Defaults to CURRENT
Returns:
A fresh copy of the protocol parameters w.r.t to protocol
"""
# deepcopy call prevents any unforeseen and unwanted side effects
# on the array parameters
# e.g., bootstrap_accounts, commitments, endorsement_reward
return deepcopy(
dict((PARAMETERS if protocol is Protocol.CURRENT else PREV_PARAMETERS))
)
def get_now(client) -> str:
"""Returns the timestamp of next-to-last block,
offset by the minimum time between blocks"""
timestamp_date = client.get_block_timestamp(block='head~1')
constants = client.rpc('get', '/chains/main/blocks/head/context/constants')
delta = datetime.timedelta(seconds=int(constants['round_durations'][0]))
now_date = timestamp_date + delta
rfc3399_format = "%Y-%m-%dT%H:%M:%SZ"
return now_date.strftime(rfc3399_format)
|
import datetime
from enum import Enum, auto
from typing import Optional
from copy import deepcopy
from tools import constants, utils
HASH = constants.ALPHA
DAEMON = constants.ALPHA_DAEMON
PARAMETERS = constants.ALPHA_PARAMETERS
TENDERBAKE_PARAMETERS = deepcopy(PARAMETERS)
TENDERBAKE_PARAMETERS['consensus_threshold'] = 45
TENDERBAKE_PARAMETERS['consensus_committee_size'] = 67
FOLDER = constants.ALPHA_FOLDER
PREV_HASH = constants.HANGZHOU
PREV_DAEMON = constants.HANGZHOU_DAEMON
PREV_PARAMETERS = constants.HANGZHOU_PARAMETERS
def activate(
client,
parameters=PARAMETERS,
proto=HASH,
timestamp=None,
activate_in_the_past=False,
):
utils.activate_protocol(
client, proto, parameters, timestamp, activate_in_the_past
)
class Protocol(Enum):
CURRENT = auto()
PREV = auto()
def get_parameters(protocol: Optional[Protocol] = Protocol.CURRENT):
"""
Args:
protocol (Protocol): protocol id (either CURRENT or PREV).
Defaults to CURRENT
Returns:
A fresh copy of the protocol parameters w.r.t to protocol
"""
# deepcopy call prevents any unforeseen and unwanted side effects
# on the array parameters
# e.g., bootstrap_accounts, commitments, endorsement_reward
return deepcopy(
dict((PARAMETERS if protocol is Protocol.CURRENT else PREV_PARAMETERS))
)
def get_now(client) -> str:
"""Returns the timestamp of next-to-last block,
offset by the minimum time between blocks"""
timestamp_date = client.get_block_timestamp(block='head~1')
constants = client.rpc('get', '/chains/main/blocks/head/context/constants')
delta = datetime.timedelta(seconds=int(constants['round_durations'][0]))
now_date = timestamp_date + delta
rfc3399_format = "%Y-%m-%dT%H:%M:%SZ"
return now_date.strftime(rfc3399_format)
|
en
| 0.59812
|
Args: protocol (Protocol): protocol id (either CURRENT or PREV). Defaults to CURRENT Returns: A fresh copy of the protocol parameters w.r.t to protocol # deepcopy call prevents any unforeseen and unwanted side effects # on the array parameters # e.g., bootstrap_accounts, commitments, endorsement_reward Returns the timestamp of next-to-last block, offset by the minimum time between blocks
| 2.561329
| 3
|
bin/count_boundary_reads_star_python2.py
|
ablifedev/SUVA
| 1
|
6626621
|
<filename>bin/count_boundary_reads_star_python2.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################
# Copyright (C) 2015-2019 by ABLIFE
####################################################################################
# 名称:expression_quantity_calculation.py
# 描述:计算表达量
# 作者:程超
# 创建时间:2015-4-10
# 联系方式:<EMAIL>
####################################################################################
# 修改记录
####################################################################################
# Date Version Author ChangeLog
# 2015-4-10 v0.1 ChengChao 创建测试版本
# 2015-7-24 v1.0 ChengChao 加入对bed文件支持;对于bed格式先扫描所有
# 所有基因,记录区域信息,再扫描bed iv
#
#
#####################################################################################
"""
程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
# 导入必要的包
import re
import os
import sys
import logging
import time
import datetime
from optparse import OptionParser, OptionGroup
# reload(sys)
# sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
import signal
from matplotlib import pyplot
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../")
# print(sys.path)
from ablib.utils.tools import *
# 检查python的版本,我们需要使用python2.7
# TODO: HTSeq升级到python3版本后升级程序到python3
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
# 程序版本号
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# --- S 参数设置模块
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
# basic options
p.add_option('-t', '--totalsj', dest='totalsj',
action='store', type='string', help='totalsj file')
p.add_option('-b', '--bed', dest='bed', action='store',
type='string', help='junction file')
p.add_option('-l', '--bam', dest='bam', action='store',
type='string', help='bam file')
p.add_option('-s', '--span', dest='span', action='store',
type='int', default=4, help='boundary span,default is 4')
p.add_option('-j', '--sjreads', dest='sjreads', action='store',
type='int', default=10, help='min sjreads,default is 10')
p.add_option('-o', '--outfile', dest='outfile', default='Mapping_distribution.txt',
action='store', type='string', help='gene expression file')
p.add_option('-u', '--unstrand', dest='unstrand', default=False, action='store_true',
help='unstrand library,antisense will not be considered.')
group = OptionGroup(p, "Preset options")
# preset options
group.add_option('-O', '--outDir', dest='outDir', default='./',
action='store', type='string', help='output directory', metavar="DIR")
group.add_option('-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option('-P', '--logPrefix', dest='logPrefix', default='',
action='store', type='string', help='log file prefix')
group.add_option('-E', '--email', dest='email', default='none', action='store', type='string',
help='email address, if you want get a email when this job is finished,default is no email', metavar="EMAIL")
group.add_option('-Q', '--quiet', dest='quiet', default=False,
action='store_true', help='do not print messages to stdout')
group.add_option('-K', '--keepTemp', dest='keepTemp',
default=False, action='store_true', help='keep temp dir')
group.add_option('-T', '--test', dest='isTest', default=False,
action='store_true', help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
# pool watcher for keybord interrupt
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
# 解析参数
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sjnum = {}
# 对参数进行有效性验证和初步处理
# -----------------------------------------------------------------------------------
# --- E 参数设置模块
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S (全局)变量定义及初始化设置模块
# -----------------------------------------------------------------------------------
# 获取路径信息
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# E (全局)变量定义及初始化设置模块
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 日志模块logging初始化
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M', filename=logFilename, filemode='w')
if not opt.quiet: # 非quiet模式在屏幕打印出程序执行INFO
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter(
'[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + \
str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__) # 打印出程序的说明文档
# -----------------------------------------------------------------------------------
# E 日志模块logging初始化
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 计时器模块->Getting Start Time
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# E 计时器模块->Getting Start Time
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 类定义(若有)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# E 类定义(若有)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 功能函数定义
# -----------------------------------------------------------------------------------
# def invert_strand(iv):
# """
# :param iv: HTSeq.GenomicInterval object
# :return: HTSeq.GenomicInterval - strand is reversed
# """
# iv2 = iv.copy()
# if iv2.strand == "+":
# iv2.strand = "-"
# elif iv2.strand == "-":
# iv2.strand = "+"
# else:
# raise ValueError, "Illegal strand"
# return iv2
def getTotalBase(iv, coverage):
totalbases = 0
for iv2, value2 in coverage[iv].steps():
if value2 > 0:
totalbases += value2 * iv2.length
return totalbases
# @profile
def readChr(chr, reads):
print(chr)
reads_dict = {}
reads_dict["left"] = {}
reads_dict["right"] = {}
totalsjfile = opt.totalsj
bamfile = opt.bam
bam = HTSeq.BAM_Reader(bamfile)
reads_dict["left"] = {}
reads_dict["right"] = {}
i = 0
j = 0
for eachLine in open(totalsjfile):
line = eachLine.strip().split("\t")
# chr7 34247275 34664347 +
if line[0] != chr:
continue
# print(eachLine)
j += 1
if j > 0 and j % 1000 == 0:
sys.stderr.write("%s : %d sj processed.\n" % (chr, j))
i+=1
key = str(i)
# if line[0] == "chrM":
# continue
# if not line[0].startswith("chr"):
# continue
reads_left = 0
reads_right = 0
lss = line[0]+":"+line[1]+":"+line[3]
rss = line[0]+":"+line[2]+":"+line[3]
# if int(line[4])<opt.sjreads:
# continue
s = int(line[1])
e = int(line[2])
iv1 = HTSeq.GenomicInterval(line[0], s - 1, s + opt.span, line[3])
iv2 = HTSeq.GenomicInterval(line[0], e - 1 - opt.span, e, line[3])
name = line[0] + "\t" + line[1] + "\t" + line[2]
# chr = name.split("\t")[0]
if lss in reads_dict["left"]:
reads_left = reads_dict["left"][lss]
else:
iv = iv1
usedreads = {}
# print(">sj iv:")
# print(iv)
for r in bam[iv]:
if r.iv.length>150:
continue
# print(r.iv)
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
continue
if r.iv.start < iv.start and r.iv.end >= iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_left += 1
reads_dict["left"][lss] = reads_left
# print(reads_left)
if rss in reads_dict["right"]:
reads_right = reads_dict["right"][rss]
else:
iv = iv2
usedreads = {}
for r in bam[iv]:
if r.iv.length>150:
continue
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
continue
if r.iv.start <= iv.start and r.iv.end > iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_right += 1
reads_dict["right"][rss] = reads_right
# print(reads_right)
# if name not in sjnum:
# sjnum[name] = "0"
# # print(d[c]["left"])
# tmp=eachLine.strip() + "\t" + sjnum[name] + "\t"
# if line[3] == "+":
# tmp+=str(reads_left) + "\t" + str(reads_right) + "\n"
# else:
# tmp+=str(reads_right) + "\t" + str(reads_left) + "\n"
# reads_dict[key] = tmp
# print(reads_dict)
reads[chr] = reads_dict.copy()
del reads_dict
logging.info("done %s" % chr)
def readChr_unstrand(chr, reads):
print(chr)
reads_dict = {}
reads_dict["left"] = {}
reads_dict["right"] = {}
totalsjfile = opt.totalsj
bamfile = opt.bam
bam = HTSeq.BAM_Reader(bamfile)
reads_dict["left"] = {}
reads_dict["right"] = {}
i = 0
j = 0
for eachLine in open(totalsjfile):
line = eachLine.strip().split("\t")
# chr7 34247275 34664347 +
if line[0] != chr:
continue
# print(eachLine)
j += 1
if j > 0 and j % 1000 == 0:
sys.stderr.write("%s : %d sj processed.\n" % (chr, j))
i+=1
key = str(i)
# if line[0] == "chrM":
# continue
# if not line[0].startswith("chr"):
# continue
reads_left = 0
reads_right = 0
lss = line[0]+":"+line[1]+":"+line[3]
rss = line[0]+":"+line[2]+":"+line[3]
# if int(line[4])<opt.sjreads:
# continue
s = int(line[1])
e = int(line[2])
iv1 = HTSeq.GenomicInterval(line[0], s - 1, s + opt.span, ".")
iv2 = HTSeq.GenomicInterval(line[0], e - 1 - opt.span, e, ".")
name = line[0] + "\t" + line[1] + "\t" + line[2]
# chr = name.split("\t")[0]
if lss in reads_dict["left"]:
reads_left = reads_dict["left"][lss]
else:
iv = iv1
usedreads = {}
# print(">sj iv:")
# print(iv)
for r in bam[iv]:
if r.iv.length>150:
continue
# print(r.iv)
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
# if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
# continue
if r.iv.start < iv.start and r.iv.end >= iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_left += 1
reads_dict["left"][lss] = reads_left
# print(reads_left)
if rss in reads_dict["right"]:
reads_right = reads_dict["right"][rss]
else:
iv = iv2
usedreads = {}
for r in bam[iv]:
if r.iv.length>150:
continue
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
# if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
# continue
if r.iv.start <= iv.start and r.iv.end > iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_right += 1
reads_dict["right"][rss] = reads_right
# print(reads_right)
# if name not in sjnum:
# sjnum[name] = "0"
# # print(d[c]["left"])
# tmp=eachLine.strip() + "\t" + sjnum[name] + "\t"
# if line[3] == "+":
# tmp+=str(reads_left) + "\t" + str(reads_right) + "\n"
# else:
# tmp+=str(reads_right) + "\t" + str(reads_left) + "\n"
# reads_dict[key] = tmp
# print(reads_dict)
reads[chr] = reads_dict.copy()
del reads_dict
logging.info("done %s" % chr)
# -----------------------------------------------------------------------------------
# E 功能函数定义
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 主函数
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
# 1.读入gff文件/gtf文件/annotationDB
# 读取gff,建立database,以info中的ID信息作为gene的标识,如果db文件已经存在则不需要再提供gff选项,否则db文件会被覆盖
bedfile = opt.bed
totalsjfile = opt.totalsj
chrs = {}
for chr in os.popen("cut -f 1 " + totalsjfile + " | sort |uniq").readlines():
chr = chr.strip()
if chr == "chrM":
continue
# print(chr)
chrs[chr] = 1
for eachLine in open(bedfile):
line = eachLine.strip().split("\t")
name = line[0] + "\t" + line[1] + "\t" + line[2]
c = line[0]
if c == "chrM":
continue
sjnum[name] = line[4]
# 2.对每个染色体多线程处理,遍历每个gene,读取gene内的reads,进行计算
# reads = {}
# for chr in chrs:
# if chr == "chrM":
# continue
# if not chr.startswith("chr"):
# continue
# reads[chr] = {}
# readChr(chr, reads)
# reads = readChr()
# print(reads)
# Watcher()
# pool = multiprocessing.Pool(processes=25)
# server = multiprocessing.Manager()
# reads = server.dict()
# for chr in chrs:
# # print(chr)
# reads[chr] = {}
# pool.apply_async(readChr, args=(chr, reads))
# pool.close()
# pool.join()
# d = dict(reads).copy() ## multiprocessing.Manager的遍历效率太低
# server.shutdown()
pool = multiprocessing.Pool(processes=22,initializer=init_worker)
server = multiprocessing.Manager()
reads = server.dict()
chr_dict = readBamHeader(opt.bam)
for chr in chrs:
if chr == "chrM":
continue
reads[chr] = ""
# readChr(chr, reads, check, TMR)
# readChr(chr, reads)
if opt.unstrand:
pool.apply_async(readChr_unstrand, args=(chr, reads))
else:
pool.apply_async(readChr, args=(chr, reads))
try:
print("Waiting 10 seconds")
time.sleep(10)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
pool.join()
else:
print("Quitting normally")
pool.close()
pool.join()
# readChr()
d = dict(reads).copy() ## multiprocessing.Manager的遍历效率太低
server.shutdown()
w = open(opt.outfile, 'w')
for eachLine in open(totalsjfile):
line = eachLine.strip().split("\t")
chrome = line[0]
if chrome == "chrM":
continue
# chr7 34247275 34664347 +
lss = line[0]+":"+line[1]+":"+line[3]
rss = line[0]+":"+line[2]+":"+line[3]
reads_left = d[chrome]["left"][lss]
reads_right = d[chrome]["right"][rss]
name = line[0] + "\t" + line[1] + "\t" + line[2]
if name not in sjnum:
sjnum[name] = "0"
tmp=eachLine.strip() + "\t" + sjnum[name] + "\t"
if line[3] == "+":
tmp+=str(reads_left) + "\t" + str(reads_right) + "\n"
else:
tmp+=str(reads_right) + "\t" + str(reads_left) + "\n"
w.writelines(tmp)
w.close()
if __name__ == '__main__':
# 执行主函数
main()
# -----------------------------------------------------------------------------------
# E 主函数
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 清理临时文件夹,放在最后
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# E 清理临时文件夹,放在最后
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 计时器模块->Getting Total Run Time
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" %
(runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# S 计时器模块->Getting Total Run Time
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 发送邮件模块
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# S 发送邮件模块
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 程序运行计数器
# -----------------------------------------------------------------------------------
def countProgram(programName, startT, runT, isTest):
countProgramFile = open('/users/ablife/ablifepy/countProgram.txt', 'a')
countProgramFile.write(programName + '\t' + str(os.getlogin()) +
'\t' + str(startT) + '\t' + str(runT) + 's\t' + isTest + '\n')
countProgramFile.close()
testStr = 'P'
if opt.isTest:
testStr = 'T'
countProgram(sys.argv[0], startTime, runningTime, testStr)
# -----------------------------------------------------------------------------------
# E 程序运行计数器
# -----------------------------------------------------------------------------------
|
<filename>bin/count_boundary_reads_star_python2.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################
# Copyright (C) 2015-2019 by ABLIFE
####################################################################################
# 名称:expression_quantity_calculation.py
# 描述:计算表达量
# 作者:程超
# 创建时间:2015-4-10
# 联系方式:<EMAIL>
####################################################################################
# 修改记录
####################################################################################
# Date Version Author ChangeLog
# 2015-4-10 v0.1 ChengChao 创建测试版本
# 2015-7-24 v1.0 ChengChao 加入对bed文件支持;对于bed格式先扫描所有
# 所有基因,记录区域信息,再扫描bed iv
#
#
#####################################################################################
"""
程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
# 导入必要的包
import re
import os
import sys
import logging
import time
import datetime
from optparse import OptionParser, OptionGroup
# reload(sys)
# sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
import signal
from matplotlib import pyplot
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../")
# print(sys.path)
from ablib.utils.tools import *
# 检查python的版本,我们需要使用python2.7
# TODO: HTSeq升级到python3版本后升级程序到python3
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
# 程序版本号
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# --- S 参数设置模块
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
# basic options
p.add_option('-t', '--totalsj', dest='totalsj',
action='store', type='string', help='totalsj file')
p.add_option('-b', '--bed', dest='bed', action='store',
type='string', help='junction file')
p.add_option('-l', '--bam', dest='bam', action='store',
type='string', help='bam file')
p.add_option('-s', '--span', dest='span', action='store',
type='int', default=4, help='boundary span,default is 4')
p.add_option('-j', '--sjreads', dest='sjreads', action='store',
type='int', default=10, help='min sjreads,default is 10')
p.add_option('-o', '--outfile', dest='outfile', default='Mapping_distribution.txt',
action='store', type='string', help='gene expression file')
p.add_option('-u', '--unstrand', dest='unstrand', default=False, action='store_true',
help='unstrand library,antisense will not be considered.')
group = OptionGroup(p, "Preset options")
# preset options
group.add_option('-O', '--outDir', dest='outDir', default='./',
action='store', type='string', help='output directory', metavar="DIR")
group.add_option('-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option('-P', '--logPrefix', dest='logPrefix', default='',
action='store', type='string', help='log file prefix')
group.add_option('-E', '--email', dest='email', default='none', action='store', type='string',
help='email address, if you want get a email when this job is finished,default is no email', metavar="EMAIL")
group.add_option('-Q', '--quiet', dest='quiet', default=False,
action='store_true', help='do not print messages to stdout')
group.add_option('-K', '--keepTemp', dest='keepTemp',
default=False, action='store_true', help='keep temp dir')
group.add_option('-T', '--test', dest='isTest', default=False,
action='store_true', help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
# pool watcher for keybord interrupt
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
# 解析参数
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sjnum = {}
# 对参数进行有效性验证和初步处理
# -----------------------------------------------------------------------------------
# --- E 参数设置模块
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S (全局)变量定义及初始化设置模块
# -----------------------------------------------------------------------------------
# 获取路径信息
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# E (全局)变量定义及初始化设置模块
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 日志模块logging初始化
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M', filename=logFilename, filemode='w')
if not opt.quiet: # 非quiet模式在屏幕打印出程序执行INFO
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter(
'[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + \
str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__) # 打印出程序的说明文档
# -----------------------------------------------------------------------------------
# E 日志模块logging初始化
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 计时器模块->Getting Start Time
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# E 计时器模块->Getting Start Time
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 类定义(若有)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# E 类定义(若有)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 功能函数定义
# -----------------------------------------------------------------------------------
# def invert_strand(iv):
# """
# :param iv: HTSeq.GenomicInterval object
# :return: HTSeq.GenomicInterval - strand is reversed
# """
# iv2 = iv.copy()
# if iv2.strand == "+":
# iv2.strand = "-"
# elif iv2.strand == "-":
# iv2.strand = "+"
# else:
# raise ValueError, "Illegal strand"
# return iv2
def getTotalBase(iv, coverage):
totalbases = 0
for iv2, value2 in coverage[iv].steps():
if value2 > 0:
totalbases += value2 * iv2.length
return totalbases
# @profile
def readChr(chr, reads):
print(chr)
reads_dict = {}
reads_dict["left"] = {}
reads_dict["right"] = {}
totalsjfile = opt.totalsj
bamfile = opt.bam
bam = HTSeq.BAM_Reader(bamfile)
reads_dict["left"] = {}
reads_dict["right"] = {}
i = 0
j = 0
for eachLine in open(totalsjfile):
line = eachLine.strip().split("\t")
# chr7 34247275 34664347 +
if line[0] != chr:
continue
# print(eachLine)
j += 1
if j > 0 and j % 1000 == 0:
sys.stderr.write("%s : %d sj processed.\n" % (chr, j))
i+=1
key = str(i)
# if line[0] == "chrM":
# continue
# if not line[0].startswith("chr"):
# continue
reads_left = 0
reads_right = 0
lss = line[0]+":"+line[1]+":"+line[3]
rss = line[0]+":"+line[2]+":"+line[3]
# if int(line[4])<opt.sjreads:
# continue
s = int(line[1])
e = int(line[2])
iv1 = HTSeq.GenomicInterval(line[0], s - 1, s + opt.span, line[3])
iv2 = HTSeq.GenomicInterval(line[0], e - 1 - opt.span, e, line[3])
name = line[0] + "\t" + line[1] + "\t" + line[2]
# chr = name.split("\t")[0]
if lss in reads_dict["left"]:
reads_left = reads_dict["left"][lss]
else:
iv = iv1
usedreads = {}
# print(">sj iv:")
# print(iv)
for r in bam[iv]:
if r.iv.length>150:
continue
# print(r.iv)
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
continue
if r.iv.start < iv.start and r.iv.end >= iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_left += 1
reads_dict["left"][lss] = reads_left
# print(reads_left)
if rss in reads_dict["right"]:
reads_right = reads_dict["right"][rss]
else:
iv = iv2
usedreads = {}
for r in bam[iv]:
if r.iv.length>150:
continue
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
continue
if r.iv.start <= iv.start and r.iv.end > iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_right += 1
reads_dict["right"][rss] = reads_right
# print(reads_right)
# if name not in sjnum:
# sjnum[name] = "0"
# # print(d[c]["left"])
# tmp=eachLine.strip() + "\t" + sjnum[name] + "\t"
# if line[3] == "+":
# tmp+=str(reads_left) + "\t" + str(reads_right) + "\n"
# else:
# tmp+=str(reads_right) + "\t" + str(reads_left) + "\n"
# reads_dict[key] = tmp
# print(reads_dict)
reads[chr] = reads_dict.copy()
del reads_dict
logging.info("done %s" % chr)
def readChr_unstrand(chr, reads):
print(chr)
reads_dict = {}
reads_dict["left"] = {}
reads_dict["right"] = {}
totalsjfile = opt.totalsj
bamfile = opt.bam
bam = HTSeq.BAM_Reader(bamfile)
reads_dict["left"] = {}
reads_dict["right"] = {}
i = 0
j = 0
for eachLine in open(totalsjfile):
line = eachLine.strip().split("\t")
# chr7 34247275 34664347 +
if line[0] != chr:
continue
# print(eachLine)
j += 1
if j > 0 and j % 1000 == 0:
sys.stderr.write("%s : %d sj processed.\n" % (chr, j))
i+=1
key = str(i)
# if line[0] == "chrM":
# continue
# if not line[0].startswith("chr"):
# continue
reads_left = 0
reads_right = 0
lss = line[0]+":"+line[1]+":"+line[3]
rss = line[0]+":"+line[2]+":"+line[3]
# if int(line[4])<opt.sjreads:
# continue
s = int(line[1])
e = int(line[2])
iv1 = HTSeq.GenomicInterval(line[0], s - 1, s + opt.span, ".")
iv2 = HTSeq.GenomicInterval(line[0], e - 1 - opt.span, e, ".")
name = line[0] + "\t" + line[1] + "\t" + line[2]
# chr = name.split("\t")[0]
if lss in reads_dict["left"]:
reads_left = reads_dict["left"][lss]
else:
iv = iv1
usedreads = {}
# print(">sj iv:")
# print(iv)
for r in bam[iv]:
if r.iv.length>150:
continue
# print(r.iv)
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
# if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
# continue
if r.iv.start < iv.start and r.iv.end >= iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_left += 1
reads_dict["left"][lss] = reads_left
# print(reads_left)
if rss in reads_dict["right"]:
reads_right = reads_dict["right"][rss]
else:
iv = iv2
usedreads = {}
for r in bam[iv]:
if r.iv.length>150:
continue
flag = 0
for co in r.cigar:
if co.type == "N":
flag = 1
break
if flag == 1:
continue
# if r.iv.strand != iv.strand:
# continue
# if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")):
# continue
if r.iv.start <= iv.start and r.iv.end > iv.end:
r_name = r.read.name
if r_name in usedreads:
continue
else:
usedreads[r.read.name] = ""
reads_right += 1
reads_dict["right"][rss] = reads_right
# print(reads_right)
# if name not in sjnum:
# sjnum[name] = "0"
# # print(d[c]["left"])
# tmp=eachLine.strip() + "\t" + sjnum[name] + "\t"
# if line[3] == "+":
# tmp+=str(reads_left) + "\t" + str(reads_right) + "\n"
# else:
# tmp+=str(reads_right) + "\t" + str(reads_left) + "\n"
# reads_dict[key] = tmp
# print(reads_dict)
reads[chr] = reads_dict.copy()
del reads_dict
logging.info("done %s" % chr)
# -----------------------------------------------------------------------------------
# E 功能函数定义
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 主函数
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
# 1.读入gff文件/gtf文件/annotationDB
# 读取gff,建立database,以info中的ID信息作为gene的标识,如果db文件已经存在则不需要再提供gff选项,否则db文件会被覆盖
bedfile = opt.bed
totalsjfile = opt.totalsj
chrs = {}
for chr in os.popen("cut -f 1 " + totalsjfile + " | sort |uniq").readlines():
chr = chr.strip()
if chr == "chrM":
continue
# print(chr)
chrs[chr] = 1
for eachLine in open(bedfile):
line = eachLine.strip().split("\t")
name = line[0] + "\t" + line[1] + "\t" + line[2]
c = line[0]
if c == "chrM":
continue
sjnum[name] = line[4]
# 2.对每个染色体多线程处理,遍历每个gene,读取gene内的reads,进行计算
# reads = {}
# for chr in chrs:
# if chr == "chrM":
# continue
# if not chr.startswith("chr"):
# continue
# reads[chr] = {}
# readChr(chr, reads)
# reads = readChr()
# print(reads)
# Watcher()
# pool = multiprocessing.Pool(processes=25)
# server = multiprocessing.Manager()
# reads = server.dict()
# for chr in chrs:
# # print(chr)
# reads[chr] = {}
# pool.apply_async(readChr, args=(chr, reads))
# pool.close()
# pool.join()
# d = dict(reads).copy() ## multiprocessing.Manager的遍历效率太低
# server.shutdown()
pool = multiprocessing.Pool(processes=22,initializer=init_worker)
server = multiprocessing.Manager()
reads = server.dict()
chr_dict = readBamHeader(opt.bam)
for chr in chrs:
if chr == "chrM":
continue
reads[chr] = ""
# readChr(chr, reads, check, TMR)
# readChr(chr, reads)
if opt.unstrand:
pool.apply_async(readChr_unstrand, args=(chr, reads))
else:
pool.apply_async(readChr, args=(chr, reads))
try:
print("Waiting 10 seconds")
time.sleep(10)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
pool.join()
else:
print("Quitting normally")
pool.close()
pool.join()
# readChr()
d = dict(reads).copy() ## multiprocessing.Manager的遍历效率太低
server.shutdown()
w = open(opt.outfile, 'w')
for eachLine in open(totalsjfile):
line = eachLine.strip().split("\t")
chrome = line[0]
if chrome == "chrM":
continue
# chr7 34247275 34664347 +
lss = line[0]+":"+line[1]+":"+line[3]
rss = line[0]+":"+line[2]+":"+line[3]
reads_left = d[chrome]["left"][lss]
reads_right = d[chrome]["right"][rss]
name = line[0] + "\t" + line[1] + "\t" + line[2]
if name not in sjnum:
sjnum[name] = "0"
tmp=eachLine.strip() + "\t" + sjnum[name] + "\t"
if line[3] == "+":
tmp+=str(reads_left) + "\t" + str(reads_right) + "\n"
else:
tmp+=str(reads_right) + "\t" + str(reads_left) + "\n"
w.writelines(tmp)
w.close()
if __name__ == '__main__':
# 执行主函数
main()
# -----------------------------------------------------------------------------------
# E 主函数
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 清理临时文件夹,放在最后
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# E 清理临时文件夹,放在最后
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 计时器模块->Getting Total Run Time
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" %
(runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# S 计时器模块->Getting Total Run Time
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 发送邮件模块
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# S 发送邮件模块
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# S 程序运行计数器
# -----------------------------------------------------------------------------------
def countProgram(programName, startT, runT, isTest):
countProgramFile = open('/users/ablife/ablifepy/countProgram.txt', 'a')
countProgramFile.write(programName + '\t' + str(os.getlogin()) +
'\t' + str(startT) + '\t' + str(runT) + 's\t' + isTest + '\n')
countProgramFile.close()
testStr = 'P'
if opt.isTest:
testStr = 'T'
countProgram(sys.argv[0], startTime, runningTime, testStr)
# -----------------------------------------------------------------------------------
# E 程序运行计数器
# -----------------------------------------------------------------------------------
|
en
| 0.215688
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #################################################################################### # Copyright (C) 2015-2019 by ABLIFE #################################################################################### # 名称:expression_quantity_calculation.py # 描述:计算表达量 # 作者:程超 # 创建时间:2015-4-10 # 联系方式:<EMAIL> #################################################################################### # 修改记录 #################################################################################### # Date Version Author ChangeLog # 2015-4-10 v0.1 ChengChao 创建测试版本 # 2015-7-24 v1.0 ChengChao 加入对bed文件支持;对于bed格式先扫描所有 # 所有基因,记录区域信息,再扫描bed iv # # ##################################################################################### 程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计 # 导入必要的包 # reload(sys) # sys.setdefaultencoding('utf-8') # print(sys.path) # 检查python的版本,我们需要使用python2.7 # TODO: HTSeq升级到python3版本后升级程序到python3 # 程序版本号 # ----------------------------------------------------------------------------------- # --- S 参数设置模块 # ----------------------------------------------------------------------------------- Init for option # basic options # preset options 获得完整的命令 # pool watcher for keybord interrupt # 解析参数 # 对参数进行有效性验证和初步处理 # ----------------------------------------------------------------------------------- # --- E 参数设置模块 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S (全局)变量定义及初始化设置模块 # ----------------------------------------------------------------------------------- # 获取路径信息 # absolute script path # absolute bin path # absolute output path # absolute bin path # os.mkdir(tempPath) if not os.path.isdir(tempPath) else None # os.mkdir(resultPath) if not os.path.isdir(resultPath) else None # ----------------------------------------------------------------------------------- # E (全局)变量定义及初始化设置模块 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 日志模块logging初始化 # ----------------------------------------------------------------------------------- Init for logging # 非quiet模式在屏幕打印出程序执行INFO # define a Handler which writes INFO messages or higher to the sys.stderr # set a format which is simpler for console use # tell the handler to use this format # 打印出程序的说明文档 # ----------------------------------------------------------------------------------- # E 日志模块logging初始化 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 计时器模块->Getting Start Time # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # E 计时器模块->Getting Start Time # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 类定义(若有) # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # E 类定义(若有) # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 功能函数定义 # ----------------------------------------------------------------------------------- # def invert_strand(iv): # """ # :param iv: HTSeq.GenomicInterval object # :return: HTSeq.GenomicInterval - strand is reversed # """ # iv2 = iv.copy() # if iv2.strand == "+": # iv2.strand = "-" # elif iv2.strand == "-": # iv2.strand = "+" # else: # raise ValueError, "Illegal strand" # return iv2 # @profile # chr7 34247275 34664347 + # print(eachLine) # if line[0] == "chrM": # continue # if not line[0].startswith("chr"): # continue # if int(line[4])<opt.sjreads: # continue # chr = name.split("\t")[0] # print(">sj iv:") # print(iv) # print(r.iv) # if r.iv.strand != iv.strand: # continue # print(reads_left) # if r.iv.strand != iv.strand: # continue # print(reads_right) # if name not in sjnum: # sjnum[name] = "0" # # print(d[c]["left"]) # tmp=eachLine.strip() + "\t" + sjnum[name] + "\t" # if line[3] == "+": # tmp+=str(reads_left) + "\t" + str(reads_right) + "\n" # else: # tmp+=str(reads_right) + "\t" + str(reads_left) + "\n" # reads_dict[key] = tmp # print(reads_dict) # chr7 34247275 34664347 + # print(eachLine) # if line[0] == "chrM": # continue # if not line[0].startswith("chr"): # continue # if int(line[4])<opt.sjreads: # continue # chr = name.split("\t")[0] # print(">sj iv:") # print(iv) # print(r.iv) # if r.iv.strand != iv.strand: # continue # if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")): # continue # print(reads_left) # if r.iv.strand != iv.strand: # continue # if ((r.iv.strand != iv.strand and (not r.paired_end)) or (r.paired_end and r.iv.strand != iv.strand and r.pe_which == "first") or (r.paired_end and r.iv.strand == iv.strand and r.pe_which == "second")): # continue # print(reads_right) # if name not in sjnum: # sjnum[name] = "0" # # print(d[c]["left"]) # tmp=eachLine.strip() + "\t" + sjnum[name] + "\t" # if line[3] == "+": # tmp+=str(reads_left) + "\t" + str(reads_right) + "\n" # else: # tmp+=str(reads_right) + "\t" + str(reads_left) + "\n" # reads_dict[key] = tmp # print(reads_dict) # ----------------------------------------------------------------------------------- # E 功能函数定义 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 主函数 # ----------------------------------------------------------------------------------- # 1.读入gff文件/gtf文件/annotationDB # 读取gff,建立database,以info中的ID信息作为gene的标识,如果db文件已经存在则不需要再提供gff选项,否则db文件会被覆盖 # print(chr) # 2.对每个染色体多线程处理,遍历每个gene,读取gene内的reads,进行计算 # reads = {} # for chr in chrs: # if chr == "chrM": # continue # if not chr.startswith("chr"): # continue # reads[chr] = {} # readChr(chr, reads) # reads = readChr() # print(reads) # Watcher() # pool = multiprocessing.Pool(processes=25) # server = multiprocessing.Manager() # reads = server.dict() # for chr in chrs: # # print(chr) # reads[chr] = {} # pool.apply_async(readChr, args=(chr, reads)) # pool.close() # pool.join() # d = dict(reads).copy() ## multiprocessing.Manager的遍历效率太低 # server.shutdown() # readChr(chr, reads, check, TMR) # readChr(chr, reads) # readChr() ## multiprocessing.Manager的遍历效率太低 # chr7 34247275 34664347 + # 执行主函数 # ----------------------------------------------------------------------------------- # E 主函数 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 清理临时文件夹,放在最后 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # E 清理临时文件夹,放在最后 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 计时器模块->Getting Total Run Time # ----------------------------------------------------------------------------------- # in seconds # ----------------------------------------------------------------------------------- # S 计时器模块->Getting Total Run Time # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 发送邮件模块 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 发送邮件模块 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # S 程序运行计数器 # ----------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------- # E 程序运行计数器 # -----------------------------------------------------------------------------------
| 1.870337
| 2
|
test/augmenter/spectrogram/test_time_masking.py
|
techthiyanes/nlpaug
| 3,121
|
6626622
|
<gh_stars>1000+
import unittest
import os
from dotenv import load_dotenv
import numpy as np
from nlpaug.util import AudioLoader
import nlpaug.augmenter.spectrogram as nas
class TestTimeMasking(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '.env'))
load_dotenv(env_config_path)
# https://freewavesamples.com/yamaha-v50-rock-beat-120-bpm
cls.sample_wav_file = os.path.join(
os.environ.get("TEST_DIR"), 'res', 'audio', 'Yamaha-V50-Rock-Beat-120bpm.wav'
)
cls.num_of_freq_channel = 128
def test_no_change_source(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.TimeMaskingAug()
aug_data = aug.augment(data)
comparison = data == aug_data
self.assertFalse(comparison.all())
def test_substitute(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=self.num_of_freq_channel)
aug = nas.TimeMaskingAug(stateless=False)
aug_data = aug.augment(data)
self.assertEqual(len(data[:, aug.t0]), np.count_nonzero(data[:, aug.t0]))
self.assertEqual(0, np.count_nonzero(aug_data[:, aug.t0]))
|
import unittest
import os
from dotenv import load_dotenv
import numpy as np
from nlpaug.util import AudioLoader
import nlpaug.augmenter.spectrogram as nas
class TestTimeMasking(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '.env'))
load_dotenv(env_config_path)
# https://freewavesamples.com/yamaha-v50-rock-beat-120-bpm
cls.sample_wav_file = os.path.join(
os.environ.get("TEST_DIR"), 'res', 'audio', 'Yamaha-V50-Rock-Beat-120bpm.wav'
)
cls.num_of_freq_channel = 128
def test_no_change_source(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.TimeMaskingAug()
aug_data = aug.augment(data)
comparison = data == aug_data
self.assertFalse(comparison.all())
def test_substitute(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=self.num_of_freq_channel)
aug = nas.TimeMaskingAug(stateless=False)
aug_data = aug.augment(data)
self.assertEqual(len(data[:, aug.t0]), np.count_nonzero(data[:, aug.t0]))
self.assertEqual(0, np.count_nonzero(aug_data[:, aug.t0]))
|
en
| 0.447543
|
# https://freewavesamples.com/yamaha-v50-rock-beat-120-bpm
| 2.533675
| 3
|
sdk/python/flet/progress.py
|
flet-dev/flet
| 0
|
6626623
|
<gh_stars>0
from typing import Optional
from beartype import beartype
from flet.control import Control
class Progress(Control):
def __init__(
self,
label=None,
id=None,
ref=None,
description=None,
value=None,
bar_height=None,
width=None,
height=None,
padding=None,
margin=None,
visible=None,
disabled=None,
):
Control.__init__(
self,
id=id,
ref=ref,
width=width,
height=height,
padding=padding,
margin=margin,
visible=visible,
disabled=disabled,
)
self.value = value
self.description = description
self.label = label
self.bar_height = bar_height
def _get_control_name(self):
return "progress"
# value
@property
def value(self):
return self._get_attr("value")
@value.setter
@beartype
def value(self, value: Optional[int]):
self._set_attr("value", value)
# description
@property
def description(self):
return self._get_attr("description")
@description.setter
def description(self, value):
self._set_attr("description", value)
# bar_height
@property
def bar_height(self):
return self._get_attr("barheight")
@bar_height.setter
def bar_height(self, value):
self._set_attr("barheight", value)
# label
@property
def label(self):
return self._get_attr("label")
@label.setter
def label(self, value):
self._set_attr("label", value)
|
from typing import Optional
from beartype import beartype
from flet.control import Control
class Progress(Control):
def __init__(
self,
label=None,
id=None,
ref=None,
description=None,
value=None,
bar_height=None,
width=None,
height=None,
padding=None,
margin=None,
visible=None,
disabled=None,
):
Control.__init__(
self,
id=id,
ref=ref,
width=width,
height=height,
padding=padding,
margin=margin,
visible=visible,
disabled=disabled,
)
self.value = value
self.description = description
self.label = label
self.bar_height = bar_height
def _get_control_name(self):
return "progress"
# value
@property
def value(self):
return self._get_attr("value")
@value.setter
@beartype
def value(self, value: Optional[int]):
self._set_attr("value", value)
# description
@property
def description(self):
return self._get_attr("description")
@description.setter
def description(self, value):
self._set_attr("description", value)
# bar_height
@property
def bar_height(self):
return self._get_attr("barheight")
@bar_height.setter
def bar_height(self, value):
self._set_attr("barheight", value)
# label
@property
def label(self):
return self._get_attr("label")
@label.setter
def label(self, value):
self._set_attr("label", value)
|
en
| 0.329529
|
# value # description # bar_height # label
| 2.513537
| 3
|
test_case_generator.py
|
jeticg/CMPT411-HW2
| 0
|
6626624
|
<reponame>jeticg/CMPT411-HW2
import random
import sys
class KBTestCaseGenerator():
def __init__(self, length=10, atoms=10):
self.generate(length, atoms)
return
def generate(self, length, atoms):
self.atomList = []
self.sentenceList = []
for i in range(atoms):
self.atomList.append('a' + str(i))
for i in range(length):
sentence = [random.randint(0, atoms - 1), [], []]
for j in range(random.randint(0, atoms / 2)):
tmp = random.randint(0, atoms - 1)
if tmp != sentence[0] and tmp not in sentence[1]:
sentence[1].append(tmp)
for j in range(random.randint(0, atoms / 2)):
tmp = random.randint(0, atoms - 1)
if tmp != sentence[0] and tmp not in sentence[2]:
sentence[2].append(tmp)
self.sentenceList.append(sentence)
return
def writeToFile(self, fileName):
f = open(fileName, "w")
for sentence in self.sentenceList:
f.write("[" + self.atomList[sentence[0]] + " [")
for i in range(len(sentence[1])):
if i != len(sentence[1]) - 1:
f.write(self.atomList[sentence[1][i]] + " ")
else:
f.write(self.atomList[sentence[1][i]])
f.write("] [")
for i in range(len(sentence[2])):
if i != len(sentence[2]) - 1:
f.write(self.atomList[sentence[2][i]] + " ")
else:
f.write(self.atomList[sentence[2][i]])
f.write("]]\n")
f.close()
return
if __name__ == '__main__':
if len(sys.argv) > 2:
g = KBTestCaseGenerator(sys.argv[2], sys.argv[3])
else:
g = KBTestCaseGenerator()
g.writeToFile(sys.argv[1])
|
import random
import sys
class KBTestCaseGenerator():
def __init__(self, length=10, atoms=10):
self.generate(length, atoms)
return
def generate(self, length, atoms):
self.atomList = []
self.sentenceList = []
for i in range(atoms):
self.atomList.append('a' + str(i))
for i in range(length):
sentence = [random.randint(0, atoms - 1), [], []]
for j in range(random.randint(0, atoms / 2)):
tmp = random.randint(0, atoms - 1)
if tmp != sentence[0] and tmp not in sentence[1]:
sentence[1].append(tmp)
for j in range(random.randint(0, atoms / 2)):
tmp = random.randint(0, atoms - 1)
if tmp != sentence[0] and tmp not in sentence[2]:
sentence[2].append(tmp)
self.sentenceList.append(sentence)
return
def writeToFile(self, fileName):
f = open(fileName, "w")
for sentence in self.sentenceList:
f.write("[" + self.atomList[sentence[0]] + " [")
for i in range(len(sentence[1])):
if i != len(sentence[1]) - 1:
f.write(self.atomList[sentence[1][i]] + " ")
else:
f.write(self.atomList[sentence[1][i]])
f.write("] [")
for i in range(len(sentence[2])):
if i != len(sentence[2]) - 1:
f.write(self.atomList[sentence[2][i]] + " ")
else:
f.write(self.atomList[sentence[2][i]])
f.write("]]\n")
f.close()
return
if __name__ == '__main__':
if len(sys.argv) > 2:
g = KBTestCaseGenerator(sys.argv[2], sys.argv[3])
else:
g = KBTestCaseGenerator()
g.writeToFile(sys.argv[1])
|
none
| 1
| 3.124117
| 3
|
|
kbc_pul/popularity/entity_counting/count_to_normalized_popularity.py
|
ML-KULeuven/KBC-as-PU-Learning
| 4
|
6626625
|
from abc import abstractmethod
from functools import partial
from typing import Callable
from kbc_pul.popularity.logistic_functions import logistic_popularity_function
class AbstractCountToNormalizedPopularityMapper:
"""
Maps a value in R to [0,1]
"""
@classmethod
def is_value_normalized(cls, value) -> bool:
return 0 <= value <= 1
@abstractmethod
def map_count_to_normalized_popularity(self, count: int) -> float:
pass
class LogisticCountToNormalizedPopularityMapper(AbstractCountToNormalizedPopularityMapper):
MINIMUM_POPENSITY_SCORE_VALUE = 0.01
def __init__(self, log_growth_rate: float):
self.function: Callable = partial(
logistic_popularity_function, log_growth_rate=log_growth_rate
)
def map_count_to_normalized_popularity(self, count: int) -> float:
value = self.function(count)
if value < LogisticCountToNormalizedPopularityMapper.MINIMUM_POPENSITY_SCORE_VALUE:
value = LogisticCountToNormalizedPopularityMapper.MINIMUM_POPENSITY_SCORE_VALUE
return value
|
from abc import abstractmethod
from functools import partial
from typing import Callable
from kbc_pul.popularity.logistic_functions import logistic_popularity_function
class AbstractCountToNormalizedPopularityMapper:
"""
Maps a value in R to [0,1]
"""
@classmethod
def is_value_normalized(cls, value) -> bool:
return 0 <= value <= 1
@abstractmethod
def map_count_to_normalized_popularity(self, count: int) -> float:
pass
class LogisticCountToNormalizedPopularityMapper(AbstractCountToNormalizedPopularityMapper):
MINIMUM_POPENSITY_SCORE_VALUE = 0.01
def __init__(self, log_growth_rate: float):
self.function: Callable = partial(
logistic_popularity_function, log_growth_rate=log_growth_rate
)
def map_count_to_normalized_popularity(self, count: int) -> float:
value = self.function(count)
if value < LogisticCountToNormalizedPopularityMapper.MINIMUM_POPENSITY_SCORE_VALUE:
value = LogisticCountToNormalizedPopularityMapper.MINIMUM_POPENSITY_SCORE_VALUE
return value
|
en
| 0.800385
|
Maps a value in R to [0,1]
| 3.21108
| 3
|
traffic.py
|
jakeflo88/pythonClass
| 0
|
6626626
|
market_2nd = {'ns': 'green', 'ew': 'red'}
def switchLights(intersection):
for key in intersection.keys():
if intersection[key] == 'green':
intersection[key] = 'yellow'
elif intersection[key] == 'yellow':
intersection[key] = 'red'
elif intersection[key] == 'red':
intersection[key] = 'green'
assert 'red' in intersection.values(), 'Neither light is red!' + str(intersection)
print(market_2nd)
switchLights(market_2nd)
print(market_2nd)
|
market_2nd = {'ns': 'green', 'ew': 'red'}
def switchLights(intersection):
for key in intersection.keys():
if intersection[key] == 'green':
intersection[key] = 'yellow'
elif intersection[key] == 'yellow':
intersection[key] = 'red'
elif intersection[key] == 'red':
intersection[key] = 'green'
assert 'red' in intersection.values(), 'Neither light is red!' + str(intersection)
print(market_2nd)
switchLights(market_2nd)
print(market_2nd)
|
none
| 1
| 3.759002
| 4
|
|
my_vim_files/python27/Lib/test/test_difflib.py
|
satsaeid/dotfiles
| 0
|
6626627
|
<filename>my_vim_files/python27/Lib/test/test_difflib.py
import difflib
from test.test_support import run_unittest, findfile
import unittest
import doctest
import sys
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, group_gen.next)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
self.assertEqual("- \tI am a buggy", diff[0])
self.assertEqual("? --\n", diff[1])
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next two lines to baseline this test
#with open('test_difflib_expect.html','w') as fp:
# fp.write(actual)
with open(findfile('test_difflib_expect.html')) as fp:
self.assertEqual(actual, fp.read())
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
class TestOutputFormat(unittest.TestCase):
def test_tab_delimiter(self):
args = ['one', 'two', 'Original', 'Current',
'2005-01-26 23:30:50', '2010-04-02 10:20:52']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], [
"--- Original\t2005-01-26 23:30:50",
"+++ Current\t2010-04-02 10:20:52"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], [
"*** Original\t2005-01-26 23:30:50",
"--- Current\t2010-04-02 10:20:52"])
def test_no_trailing_tab_on_empty_filedate(self):
args = ['one', 'two', 'Original', 'Current']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"])
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(TestSFpatches, TestSFbugs, TestOutputFormat, Doctests)
if __name__ == '__main__':
test_main()
|
<filename>my_vim_files/python27/Lib/test/test_difflib.py
import difflib
from test.test_support import run_unittest, findfile
import unittest
import doctest
import sys
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, group_gen.next)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
self.assertEqual("- \tI am a buggy", diff[0])
self.assertEqual("? --\n", diff[1])
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next two lines to baseline this test
#with open('test_difflib_expect.html','w') as fp:
# fp.write(actual)
with open(findfile('test_difflib_expect.html')) as fp:
self.assertEqual(actual, fp.read())
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
class TestOutputFormat(unittest.TestCase):
def test_tab_delimiter(self):
args = ['one', 'two', 'Original', 'Current',
'2005-01-26 23:30:50', '2010-04-02 10:20:52']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], [
"--- Original\t2005-01-26 23:30:50",
"+++ Current\t2010-04-02 10:20:52"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], [
"*** Original\t2005-01-26 23:30:50",
"--- Current\t2010-04-02 10:20:52"])
def test_no_trailing_tab_on_empty_filedate(self):
args = ['one', 'two', 'Original', 'Current']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"])
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(TestSFpatches, TestSFbugs, TestOutputFormat, Doctests)
if __name__ == '__main__':
test_main()
|
en
| 0.879752
|
# Check clearing of SF bug 763023 # Check fix for bug #979794 # Check fix for bug #1488943 1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated. 1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested. \t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end # Check SF patch 914575 for generating HTML differences # temporarily uncomment next two lines to baseline this test #with open('test_difflib_expect.html','w') as fp: # fp.write(actual) # Check if the problem described in patch #1413711 exists.
| 2.495894
| 2
|
tests/sources/python/2_advanced/src/modules/auxiliar.py
|
ramonamela/compss
| 31
|
6626628
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench Tasks
========================
"""
# Imports
from pycompss.api.task import task
@task(returns=list)
def function_B(v):
import platform
return list(platform.uname())
def app2(*args):
from pycompss.api.api import compss_wait_on
result = function_B(1)
result = compss_wait_on(result)
return result
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench Tasks
========================
"""
# Imports
from pycompss.api.task import task
@task(returns=list)
def function_B(v):
import platform
return list(platform.uname())
def app2(*args):
from pycompss.api.api import compss_wait_on
result = function_B(1)
result = compss_wait_on(result)
return result
|
en
| 0.54547
|
#!/usr/bin/python # -*- coding: utf-8 -*- PyCOMPSs Testbench Tasks ======================== # Imports
| 1.904604
| 2
|
src/environments/slp_sml.py
|
grockious/lcrl
| 18
|
6626629
|
from src.environments.slippery_grid import SlipperyGrid
import numpy as np
# an example slippery grid
# only the labelling function needs to be specified
# create a SlipperyGrid object
slp_sml = SlipperyGrid(shape=[12, 10], initial_state=[2, 0], slip_probability=0.05)
# define the labellings
labels = np.empty([slp_sml.shape[0], slp_sml.shape[1]], dtype=object)
labels[0:12, 0:10] = 'safe'
labels[5][8] = labels[5][9] = labels[6][8] = labels[6][9] = 'goal1'
labels[8][8] = labels[8][9] = labels[9][8] = labels[9][9] = 'goal2'
labels[7][4] = labels[7][5] = labels[8][4] = labels[8][5] = 'goal3'
labels[8][0] = labels[8][1] = labels[9][0] = labels[9][1] = 'goal4'
# override the labels
slp_sml.labels = labels
|
from src.environments.slippery_grid import SlipperyGrid
import numpy as np
# an example slippery grid
# only the labelling function needs to be specified
# create a SlipperyGrid object
slp_sml = SlipperyGrid(shape=[12, 10], initial_state=[2, 0], slip_probability=0.05)
# define the labellings
labels = np.empty([slp_sml.shape[0], slp_sml.shape[1]], dtype=object)
labels[0:12, 0:10] = 'safe'
labels[5][8] = labels[5][9] = labels[6][8] = labels[6][9] = 'goal1'
labels[8][8] = labels[8][9] = labels[9][8] = labels[9][9] = 'goal2'
labels[7][4] = labels[7][5] = labels[8][4] = labels[8][5] = 'goal3'
labels[8][0] = labels[8][1] = labels[9][0] = labels[9][1] = 'goal4'
# override the labels
slp_sml.labels = labels
|
en
| 0.371844
|
# an example slippery grid # only the labelling function needs to be specified # create a SlipperyGrid object # define the labellings # override the labels
| 2.611243
| 3
|
tfx/orchestration/experimental/core/task_schedulers/manual_task_scheduler.py
|
avelez93/tfx
| 1
|
6626630
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A task scheduler for Manual system node."""
import threading
from typing import Optional
import attr
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
NODE_STATE_PROPERTY_KEY = '__manual_node_state__'
_POLLING_INTERVAL_SECS = 30
@attr.s(auto_attribs=True, kw_only=True)
class ManualNodeState(json_utils.Jsonable):
"""Manual node's internal state.
Attributes:
state: Current state of the manual node.
"""
# This state indicates that the manual node is waiting for the manual step to
# be completed.
WAITING = 'waiting'
# This state indicates that the manual step has been completed.
COMPLETED = 'completed'
state: str = attr.ib(
default=WAITING, validator=attr.validators.in_([WAITING, COMPLETED]))
@classmethod
def from_mlmd_value(
cls,
value: Optional[metadata_store_pb2.Value] = None) -> 'ManualNodeState':
if not value:
return ManualNodeState()
node_state_json = data_types_utils.get_metadata_value(value)
if not node_state_json:
return ManualNodeState()
return json_utils.loads(node_state_json)
def set_mlmd_value(
self, value: metadata_store_pb2.Value) -> metadata_store_pb2.Value:
data_types_utils.set_metadata_value(value, json_utils.dumps(self))
return value
class ManualTaskScheduler(task_scheduler.TaskScheduler[task_lib.ExecNodeTask]):
"""A task scheduler for Manual system node."""
def __init__(self, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline, task: task_lib.ExecNodeTask):
super().__init__(mlmd_handle, pipeline, task)
self._cancel = threading.Event()
if task.is_cancelled:
self._cancel.set()
def schedule(self) -> task_scheduler.TaskSchedulerResult:
while not self._cancel.wait(_POLLING_INTERVAL_SECS):
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=self.mlmd_handle,
execution_id=self.task.execution_id) as execution:
node_state_mlmd_value = execution.custom_properties.get(
NODE_STATE_PROPERTY_KEY)
node_state = ManualNodeState.from_mlmd_value(node_state_mlmd_value)
if node_state.state == ManualNodeState.COMPLETED:
return task_scheduler.TaskSchedulerResult(
status=status_lib.Status(code=status_lib.Code.OK),
output=task_scheduler.ExecutorNodeOutput())
return task_scheduler.TaskSchedulerResult(
status=status_lib.Status(code=status_lib.Code.CANCELLED),
output=task_scheduler.ExecutorNodeOutput())
def cancel(self) -> None:
self._cancel.set()
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A task scheduler for Manual system node."""
import threading
from typing import Optional
import attr
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import json_utils
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
NODE_STATE_PROPERTY_KEY = '__manual_node_state__'
_POLLING_INTERVAL_SECS = 30
@attr.s(auto_attribs=True, kw_only=True)
class ManualNodeState(json_utils.Jsonable):
"""Manual node's internal state.
Attributes:
state: Current state of the manual node.
"""
# This state indicates that the manual node is waiting for the manual step to
# be completed.
WAITING = 'waiting'
# This state indicates that the manual step has been completed.
COMPLETED = 'completed'
state: str = attr.ib(
default=WAITING, validator=attr.validators.in_([WAITING, COMPLETED]))
@classmethod
def from_mlmd_value(
cls,
value: Optional[metadata_store_pb2.Value] = None) -> 'ManualNodeState':
if not value:
return ManualNodeState()
node_state_json = data_types_utils.get_metadata_value(value)
if not node_state_json:
return ManualNodeState()
return json_utils.loads(node_state_json)
def set_mlmd_value(
self, value: metadata_store_pb2.Value) -> metadata_store_pb2.Value:
data_types_utils.set_metadata_value(value, json_utils.dumps(self))
return value
class ManualTaskScheduler(task_scheduler.TaskScheduler[task_lib.ExecNodeTask]):
"""A task scheduler for Manual system node."""
def __init__(self, mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline, task: task_lib.ExecNodeTask):
super().__init__(mlmd_handle, pipeline, task)
self._cancel = threading.Event()
if task.is_cancelled:
self._cancel.set()
def schedule(self) -> task_scheduler.TaskSchedulerResult:
while not self._cancel.wait(_POLLING_INTERVAL_SECS):
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=self.mlmd_handle,
execution_id=self.task.execution_id) as execution:
node_state_mlmd_value = execution.custom_properties.get(
NODE_STATE_PROPERTY_KEY)
node_state = ManualNodeState.from_mlmd_value(node_state_mlmd_value)
if node_state.state == ManualNodeState.COMPLETED:
return task_scheduler.TaskSchedulerResult(
status=status_lib.Status(code=status_lib.Code.OK),
output=task_scheduler.ExecutorNodeOutput())
return task_scheduler.TaskSchedulerResult(
status=status_lib.Status(code=status_lib.Code.CANCELLED),
output=task_scheduler.ExecutorNodeOutput())
def cancel(self) -> None:
self._cancel.set()
|
en
| 0.852087
|
# Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A task scheduler for Manual system node. Manual node's internal state. Attributes: state: Current state of the manual node. # This state indicates that the manual node is waiting for the manual step to # be completed. # This state indicates that the manual step has been completed. A task scheduler for Manual system node.
| 1.928795
| 2
|
legion/jupyterlab-plugin/legion/jupyterlab/handlers/helper.py
|
legion-platform/legion
| 19
|
6626631
|
#
# Copyright 2019 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Declaration of cloud handlers
"""
import functools
from tornado.web import HTTPError
from legion.sdk.clients.edi import EDIConnectionException, IncorrectAuthorizationToken
LEGION_X_JWT_TOKEN = 'X-<PASSWORD>'
DEFAULT_EDI_ENDPOINT = 'DEFAULT_EDI_ENDPOINT'
LEGION_OAUTH_TOKEN_COOKIE_NAME = '_legion_oauth_token'
LEGION_OAUTH_STATE_COOKIE_NAME = '_legion_oauth_state'
def decorate_handler_for_exception(function):
"""
Wrap API handler to properly handle EDI client exceptions
:param function: function to wrap
:return: wrapped function
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except IncorrectAuthorizationToken as base_exception:
raise HTTPError(log_message=str(base_exception), status_code=403) from base_exception
except EDIConnectionException as base_exception:
raise HTTPError(log_message=str(base_exception)) from base_exception
return wrapper
def decorate_async_handler_for_exception(function):
"""
Wrap async API handler to properly handle EDI client exceptions
:param function: function to wrap
:return: wrapped function
"""
@functools.wraps(function)
async def wrapper(*args, **kwargs):
try:
return await function(*args, **kwargs)
except IncorrectAuthorizationToken as base_exception:
raise HTTPError(log_message=str(base_exception), status_code=403) from base_exception
except EDIConnectionException as base_exception:
raise HTTPError(log_message=str(base_exception)) from base_exception
return wrapper
def url_join(*pieces: str) -> str:
"""
Join url parts, avoid slash duplicates or lacks
:param pieces: any number of url parts
:return: url
"""
return '/'.join(s.strip('/') for s in pieces)
|
#
# Copyright 2019 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Declaration of cloud handlers
"""
import functools
from tornado.web import HTTPError
from legion.sdk.clients.edi import EDIConnectionException, IncorrectAuthorizationToken
LEGION_X_JWT_TOKEN = 'X-<PASSWORD>'
DEFAULT_EDI_ENDPOINT = 'DEFAULT_EDI_ENDPOINT'
LEGION_OAUTH_TOKEN_COOKIE_NAME = '_legion_oauth_token'
LEGION_OAUTH_STATE_COOKIE_NAME = '_legion_oauth_state'
def decorate_handler_for_exception(function):
"""
Wrap API handler to properly handle EDI client exceptions
:param function: function to wrap
:return: wrapped function
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except IncorrectAuthorizationToken as base_exception:
raise HTTPError(log_message=str(base_exception), status_code=403) from base_exception
except EDIConnectionException as base_exception:
raise HTTPError(log_message=str(base_exception)) from base_exception
return wrapper
def decorate_async_handler_for_exception(function):
"""
Wrap async API handler to properly handle EDI client exceptions
:param function: function to wrap
:return: wrapped function
"""
@functools.wraps(function)
async def wrapper(*args, **kwargs):
try:
return await function(*args, **kwargs)
except IncorrectAuthorizationToken as base_exception:
raise HTTPError(log_message=str(base_exception), status_code=403) from base_exception
except EDIConnectionException as base_exception:
raise HTTPError(log_message=str(base_exception)) from base_exception
return wrapper
def url_join(*pieces: str) -> str:
"""
Join url parts, avoid slash duplicates or lacks
:param pieces: any number of url parts
:return: url
"""
return '/'.join(s.strip('/') for s in pieces)
|
en
| 0.738911
|
# # Copyright 2019 EPAM Systems # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Declaration of cloud handlers Wrap API handler to properly handle EDI client exceptions :param function: function to wrap :return: wrapped function Wrap async API handler to properly handle EDI client exceptions :param function: function to wrap :return: wrapped function Join url parts, avoid slash duplicates or lacks :param pieces: any number of url parts :return: url
| 1.893384
| 2
|
ColorDropperShapedFrame.py
|
Metallicow/ColorDropper
| 1
|
6626632
|
<filename>ColorDropperShapedFrame.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright (c) 2017 <NAME>
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
"""
Color dropper that follows the mouse pointer.
OnClick functionality will have to be implemented by the user.
License: MIT
"""
import wx
if 'phoenix' in wx.version():
wx.RegionFromBitmap = wx.Region
wx.EmptyBitmap = wx.Bitmap
def GetComplementaryColor(hexStr):
"""Returns complementary RGB color
Example Usage:
>>> GetComplementaryColor('#FFFFFF')
'#000000'
"""
if hexStr[0] == '#':
hexStr = hexStr[1:]
rgb = (hexStr[0:2], hexStr[2:4], hexStr[4:6])
compColor = '#'
for a in rgb:
compColor += '%02x' % (255 - int(a, 16))
## print('complementaryColor = ', compColor)
if hexStr.isupper():
return compColor.upper() # Retain case.
return compColor
class ColorDotShapedFrame(wx.Frame):
def __init__(self, parent=None,
frameColor='#000000', maskColor='#FFFFFF', dotColor='#FFFFFF', id=wx.ID_ANY, title='',
pos=wx.DefaultPosition, size=(96, 96),
style=wx.FRAME_SHAPED | wx.NO_BORDER | wx.FRAME_NO_TASKBAR | wx.STAY_ON_TOP,
name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
# self.SetDoubleBuffered(True)
self.frameColor = frameColor
self.maskColor = maskColor
self.dotColor = dotColor
self.font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# Create the bitmap and set the frame shape.
self.MakeColorDropperBitmap()
wx.CallAfter(self.SetWindowShape)
# Set up the timer which will move the text and paint the screen.
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTimer, source=self.timer)
self.timer.Start(10)
# Make sure we are using our custom paint handler.
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def MakeColorDropperBitmap(self):
dc = wx.MemoryDC(wx.EmptyBitmap(*self.GetClientSize()))
dc_SetBrush = dc.SetBrush
# dc_SetPen = dc.SetPen
maskColor = self.maskColor
dc_SetBrush(wx.Brush(maskColor))
dc.Clear()
# dc.DrawRectangle(x=0, y=0, width=self.Size[0], height=self.Size[1])
w, h = self.GetClientSize()
frameColor = self.frameColor
dc_SetBrush(wx.Brush(frameColor))
# compFrameColor = GetComplementaryColor(frameColor)
# dc.SetPen(wx.Pen(compFrameColor))
dc.SetPen(wx.Pen(frameColor))
minWH = min(w, h)
maxWH = max(w, h)
minWH2 = min(w, h)//2
maxWH2 = max(w, h)//2
dc.DrawCircle(x=w//2, y=h//2, radius=minWH2)
if w > h:
dc.DrawRectangle(x=maxWH2-minWH2, y=minWH2, width=minWH2, height=minWH2)
elif w < h:
dc.DrawRectangle(x=0, y=maxWH2, width=minWH2, height=minWH2+1)
else:
dc.DrawRectangle(x=0, y=maxWH2, width=minWH2, height=minWH2)
bmp = dc.GetAsBitmap((0, 0, w, h))
bmp.SetMaskColour(maskColor)
mask = wx.Mask(bmp, maskColor)
bmp.SetMask(mask)
self.bmp = bmp
# wx.CallAfter(self.SetWindowShape)
def DrawColorDot(self, dc):
dc_SetBrush = dc.SetBrush
dc_SetPen = dc.SetPen
w, h = self.GetClientSize()
# minWH = min(w, h)
# maxWH = max(w, h)
minWH2 = min(w, h)//2
# maxWH2 = max(w, h)//2
# Draw the color dot
dotcolor = self.dotColor
framecolor = self.frameColor
compDotColor = GetComplementaryColor(dotcolor)
compFrameColor = GetComplementaryColor(framecolor)
dc_SetBrush(wx.Brush(compFrameColor))
dc_SetPen(wx.Pen(compFrameColor))
dc.DrawCircle(x=w//2+1, y=h//2+1, radius=minWH2//4*3)
# dc_SetBrush(wx.Brush('#000000'))
# dc_SetPen(wx.Pen('#000000'))
# dc.DrawCircle(x=w//2+1, y=h//2+1, radius=minWH2//4*3)
dc_SetBrush(wx.Brush(dotcolor))
dc_SetPen(wx.Pen(dotcolor))
# dc.SetPen(wx.Pen(framecolor))
dc.DrawCircle(x=w//2, y=h//2, radius=minWH2//4*3)
# Draw the color string on the colordot
fnt = self.font
fnt.SetPixelSize((minWH2//6, minWH2//4))
dc.SetFont(fnt)
tw, th, td, te = dc.GetFullTextExtent(dotcolor, fnt)
dc.SetTextForeground(framecolor)
dc.DrawText(dotcolor, x=w//2-tw//2, y=h//2-th//2)
dc.SetTextForeground(compDotColor)
dc.DrawText(dotcolor, x=w//2-tw//2-1, y=h//2-th//2-1)
def SetWindowShape(self):
# Use the bitmap's mask to determine the region.
r = wx.RegionFromBitmap(self.bmp)
self.SetShape(r)
def OnEraseBackground(self, event):
pass # Reduce flicker with BufferedPaintDC.
def OnPaint(self, event):
# Draw the bitmap on the screen.
## dc = wx.PaintDC(self)
# dc = wx.BufferedPaintDC(self)
dc = wx.GCDC(wx.BufferedPaintDC(self))
gc_obj = dc.GetGraphicsContext()
gc_obj.SetAntialiasMode(1)
dc.DrawBitmap(self.bmp, 0, 0, useMask=True)
self.DrawColorDot(dc)
def OnTimer(self, event):
x, y = wx.GetMousePosition()
self.SetPosition((x + 4, y - self.Size[1] - 4))
sdc = wx.ScreenDC()
color = sdc.GetPixel(x, y)
# print(color)
hexcolor = color.GetAsString(wx.C2S_HTML_SYNTAX)
# print(hexcolor)
self.dotColor = hexcolor
self.Refresh()
del sdc
def __del__(self):
self.timer.Stop()
del self.timer
if __name__ == "__main__":
app = wx.App()
frame = ColorDotShapedFrame()
frame.Centre()
frame.Show(True)
# frame.SetTransparent(200)
app.MainLoop()
|
<filename>ColorDropperShapedFrame.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright (c) 2017 <NAME>
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
"""
Color dropper that follows the mouse pointer.
OnClick functionality will have to be implemented by the user.
License: MIT
"""
import wx
if 'phoenix' in wx.version():
wx.RegionFromBitmap = wx.Region
wx.EmptyBitmap = wx.Bitmap
def GetComplementaryColor(hexStr):
"""Returns complementary RGB color
Example Usage:
>>> GetComplementaryColor('#FFFFFF')
'#000000'
"""
if hexStr[0] == '#':
hexStr = hexStr[1:]
rgb = (hexStr[0:2], hexStr[2:4], hexStr[4:6])
compColor = '#'
for a in rgb:
compColor += '%02x' % (255 - int(a, 16))
## print('complementaryColor = ', compColor)
if hexStr.isupper():
return compColor.upper() # Retain case.
return compColor
class ColorDotShapedFrame(wx.Frame):
def __init__(self, parent=None,
frameColor='#000000', maskColor='#FFFFFF', dotColor='#FFFFFF', id=wx.ID_ANY, title='',
pos=wx.DefaultPosition, size=(96, 96),
style=wx.FRAME_SHAPED | wx.NO_BORDER | wx.FRAME_NO_TASKBAR | wx.STAY_ON_TOP,
name='frame'):
wx.Frame.__init__(self, parent, id, title, pos, size, style, name)
# self.SetDoubleBuffered(True)
self.frameColor = frameColor
self.maskColor = maskColor
self.dotColor = dotColor
self.font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
# Create the bitmap and set the frame shape.
self.MakeColorDropperBitmap()
wx.CallAfter(self.SetWindowShape)
# Set up the timer which will move the text and paint the screen.
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTimer, source=self.timer)
self.timer.Start(10)
# Make sure we are using our custom paint handler.
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def MakeColorDropperBitmap(self):
dc = wx.MemoryDC(wx.EmptyBitmap(*self.GetClientSize()))
dc_SetBrush = dc.SetBrush
# dc_SetPen = dc.SetPen
maskColor = self.maskColor
dc_SetBrush(wx.Brush(maskColor))
dc.Clear()
# dc.DrawRectangle(x=0, y=0, width=self.Size[0], height=self.Size[1])
w, h = self.GetClientSize()
frameColor = self.frameColor
dc_SetBrush(wx.Brush(frameColor))
# compFrameColor = GetComplementaryColor(frameColor)
# dc.SetPen(wx.Pen(compFrameColor))
dc.SetPen(wx.Pen(frameColor))
minWH = min(w, h)
maxWH = max(w, h)
minWH2 = min(w, h)//2
maxWH2 = max(w, h)//2
dc.DrawCircle(x=w//2, y=h//2, radius=minWH2)
if w > h:
dc.DrawRectangle(x=maxWH2-minWH2, y=minWH2, width=minWH2, height=minWH2)
elif w < h:
dc.DrawRectangle(x=0, y=maxWH2, width=minWH2, height=minWH2+1)
else:
dc.DrawRectangle(x=0, y=maxWH2, width=minWH2, height=minWH2)
bmp = dc.GetAsBitmap((0, 0, w, h))
bmp.SetMaskColour(maskColor)
mask = wx.Mask(bmp, maskColor)
bmp.SetMask(mask)
self.bmp = bmp
# wx.CallAfter(self.SetWindowShape)
def DrawColorDot(self, dc):
dc_SetBrush = dc.SetBrush
dc_SetPen = dc.SetPen
w, h = self.GetClientSize()
# minWH = min(w, h)
# maxWH = max(w, h)
minWH2 = min(w, h)//2
# maxWH2 = max(w, h)//2
# Draw the color dot
dotcolor = self.dotColor
framecolor = self.frameColor
compDotColor = GetComplementaryColor(dotcolor)
compFrameColor = GetComplementaryColor(framecolor)
dc_SetBrush(wx.Brush(compFrameColor))
dc_SetPen(wx.Pen(compFrameColor))
dc.DrawCircle(x=w//2+1, y=h//2+1, radius=minWH2//4*3)
# dc_SetBrush(wx.Brush('#000000'))
# dc_SetPen(wx.Pen('#000000'))
# dc.DrawCircle(x=w//2+1, y=h//2+1, radius=minWH2//4*3)
dc_SetBrush(wx.Brush(dotcolor))
dc_SetPen(wx.Pen(dotcolor))
# dc.SetPen(wx.Pen(framecolor))
dc.DrawCircle(x=w//2, y=h//2, radius=minWH2//4*3)
# Draw the color string on the colordot
fnt = self.font
fnt.SetPixelSize((minWH2//6, minWH2//4))
dc.SetFont(fnt)
tw, th, td, te = dc.GetFullTextExtent(dotcolor, fnt)
dc.SetTextForeground(framecolor)
dc.DrawText(dotcolor, x=w//2-tw//2, y=h//2-th//2)
dc.SetTextForeground(compDotColor)
dc.DrawText(dotcolor, x=w//2-tw//2-1, y=h//2-th//2-1)
def SetWindowShape(self):
# Use the bitmap's mask to determine the region.
r = wx.RegionFromBitmap(self.bmp)
self.SetShape(r)
def OnEraseBackground(self, event):
pass # Reduce flicker with BufferedPaintDC.
def OnPaint(self, event):
# Draw the bitmap on the screen.
## dc = wx.PaintDC(self)
# dc = wx.BufferedPaintDC(self)
dc = wx.GCDC(wx.BufferedPaintDC(self))
gc_obj = dc.GetGraphicsContext()
gc_obj.SetAntialiasMode(1)
dc.DrawBitmap(self.bmp, 0, 0, useMask=True)
self.DrawColorDot(dc)
def OnTimer(self, event):
x, y = wx.GetMousePosition()
self.SetPosition((x + 4, y - self.Size[1] - 4))
sdc = wx.ScreenDC()
color = sdc.GetPixel(x, y)
# print(color)
hexcolor = color.GetAsString(wx.C2S_HTML_SYNTAX)
# print(hexcolor)
self.dotColor = hexcolor
self.Refresh()
del sdc
def __del__(self):
self.timer.Stop()
del self.timer
if __name__ == "__main__":
app = wx.App()
frame = ColorDotShapedFrame()
frame.Centre()
frame.Show(True)
# frame.SetTransparent(200)
app.MainLoop()
|
en
| 0.631763
|
#!/usr/bin/env python # -*- coding: utf-8 -*- ## Copyright (c) 2017 <NAME> ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ## copies of the Software, and to permit persons to whom the Software is ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all ## copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ## SOFTWARE. Color dropper that follows the mouse pointer. OnClick functionality will have to be implemented by the user. License: MIT Returns complementary RGB color Example Usage: >>> GetComplementaryColor('#FFFFFF') '#000000' ## print('complementaryColor = ', compColor) # Retain case. # self.SetDoubleBuffered(True) # Create the bitmap and set the frame shape. # Set up the timer which will move the text and paint the screen. # Make sure we are using our custom paint handler. # dc_SetPen = dc.SetPen # dc.DrawRectangle(x=0, y=0, width=self.Size[0], height=self.Size[1]) # compFrameColor = GetComplementaryColor(frameColor) # dc.SetPen(wx.Pen(compFrameColor)) # wx.CallAfter(self.SetWindowShape) # minWH = min(w, h) # maxWH = max(w, h) # maxWH2 = max(w, h)//2 # Draw the color dot # dc_SetBrush(wx.Brush('#000000')) # dc_SetPen(wx.Pen('#000000')) # dc.DrawCircle(x=w//2+1, y=h//2+1, radius=minWH2//4*3) # dc.SetPen(wx.Pen(framecolor)) # Draw the color string on the colordot # Use the bitmap's mask to determine the region. # Reduce flicker with BufferedPaintDC. # Draw the bitmap on the screen. ## dc = wx.PaintDC(self) # dc = wx.BufferedPaintDC(self) # print(color) # print(hexcolor) # frame.SetTransparent(200)
| 2.211595
| 2
|
saga/namespace/entry.py
|
nikmagini/pilot
| 13
|
6626633
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils.signatures as rus
import saga.adaptors.base as sab
import saga.exceptions as se
import saga.session as ss
import saga.task as st
import saga.url as surl
import saga.base as sb
import saga.async as sasync
from saga.namespace.constants import *
from saga.constants import SYNC, ASYNC, TASK
# ------------------------------------------------------------------------------
#
class Entry (sb.Base, sasync.Async) :
'''
Represents a SAGA namespace entry as defined in GFD.90
The saga.namespace.Entry class represents, as the name indicates,
an entry in some (local or remote) namespace. That class offers
a number of operations on that entry, such as copy, move and remove::
# get an entry handle
entry = saga.namespace.Entry ("sftp://localhost/tmp/data/data.bin")
# copy the entry
entry.copy ("sftp://localhost/tmp/data/data.bak")
# move the entry
entry.move ("sftp://localhost/tmp/data/data.new")
'''
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional ((surl.Url, basestring)),
rus.optional (int, rus.nothing),
rus.optional (ss.Session),
rus.optional (sab.Base),
rus.optional (dict),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (rus.nothing)
def __init__ (self, url=None, flags=None, session=None,
_adaptor=None, _adaptor_state={}, _ttype=None) :
'''
:param url: Url of the (remote) entry
:type url: :class:`saga.Url`
flags: flags enum
session: saga.Session
ret: obj
Construct a new entry object
The specified entry is expected to exist -- otherwise a DoesNotExist
exception is raised. Also, the URL must point to an entry (not to
a directory), otherwise a BadParameter exception is raised.
Example::
# get an entry handle
entry = saga.namespace.Entry("sftp://localhost/tmp/data/data.bin")
# print the entry's url
print entry.get_url ()
'''
self._session = session
self._is_recursive = False # recursion guard (FIXME: NOT THREAD SAFE)
# param checks
if not session :
session = ss.Session (default=True)
if not flags : flags = 0
url = surl.Url (url)
scheme = url.scheme.lower ()
self._base = super (Entry, self)
self._base.__init__ (scheme, _adaptor, _adaptor_state,
url, flags, session, ttype=_ttype)
# --------------------------------------------------------------------------
#
@classmethod
@rus.takes ('Entry',
rus.optional ((surl.Url, basestring)),
rus.optional (int, rus.nothing),
rus.optional (ss.Session),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (st.Task)
def create (cls, url=None, flags=None, session=None, ttype=None) :
'''
url: saga.Url
flags: saga.namespace.flags enum
session: saga.Session
ttype: saga.task.type enum
ret: saga.Task
'''
# param checks
if not flags : flags = 0
if not session :
session = ss.Session (default=True)
return cls (url, flags, session, _ttype=ttype)._init_task
# ----------------------------------------------------------------
#
# namespace entry methods
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((surl.Url, st.Task))
def get_url (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: saga.Url / saga.Task
Return the complete url pointing to the entry.
The call will return the complete url pointing to
this entry as a saga.Url object::
# print URL of an entry
entry = saga.namespace.Entry("sftp://localhost/etc/passwd")
print entry.get_url()
'''
return self._adaptor.get_url (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((basestring, st.Task))
def get_cwd (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: string / saga.Task
'''
return self._adaptor.get_cwd (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((basestring, st.Task))
def get_name (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: string / saga.Task
'''
return self._adaptor.get_name (ttype=ttype)
# ----------------------------------------------------------------
#
# namespace entry / directory methods
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((bool, st.Task))
def is_dir (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: bool / saga.Task
Returns True if path is a directory, False otherwise.
Example::
# inspect an entry
dir = saga.namespace.Directory("sftp://localhost/tmp/")
if dir.is_dir ('data'):
# do something
'''
return self._adaptor.is_dir_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((bool, st.Task))
def is_entry (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: bool / saga.Task
'''
return self._adaptor.is_entry_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((bool, st.Task))
def is_link (self, ttype=None) :
'''
tgt: saga.Url / None
ttype: saga.task.type enum
ret: bool / saga.Task
'''
return self._adaptor.is_link_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((surl.Url, st.Task))
def read_link (self, ttype=None) :
'''
tgt: saga.Url / None
ttype: saga.task.type enum
ret: saga.Url / saga.Task
'''
return self._adaptor.read_link_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
(surl.Url, basestring),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def copy (self, tgt, flags=0, ttype=None) :
'''
tgt: saga.Url
flags: enum flags
ttype: saga.task.type enum
ret: None / saga.Task
Copy the entry to another location
:param target: Url of the copy target.
:param flags: Flags to use for the operation.
The entry is copied to the given target location. The target URL must
be an absolute path, and can be a target entry name or target
directory name. If the target entry exists, it is overwritten::
# copy an entry
entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin")
entry.copy ("sftp://localhost/tmp/data/data.bak")
'''
# parameter checks
if not flags : flags = 0
tgt_url = surl.Url (tgt) # ensure valid and typed Url
# async ops don't deserve a fallback (yet)
if ttype != None :
return self._adaptor.copy_self (tgt_url, flags, ttype=ttype)
# we have only sync calls here - attempt a normal call to the bound
# adaptor first (doh!)
ret = self._adaptor.copy_self (tgt_url, flags, ttype=ttype)
try :
True
except se.SagaException as e :
# if we don't have a scheme for tgt, all is in vain (adaptor
# should have handled a relative path...)
if not tgt_url.scheme :
raise e
# So, the adaptor bound to the src URL did not manage to copy the # entry.
# If the tgt has a scheme set, we try again with other matching # entry
# adaptors, by setting (a copy of) the *src* URL to the same scheme,
# in the hope that other adaptors can copy from localhost.
#
# In principle that mechanism can also be used for remote copies, but
# URL translation is way more fragile in those cases...
# check recursion guard
if self._is_recursive :
self._logger.debug("fallback recursion detected - abort")
else :
# activate recursion guard
self._is_recursive += 1
import saga.engine
engine = saga.engine.Engine ()
# find applicable adaptors we could fall back to, i.e. which
# support the tgt schema
adaptor_names = engine.find_adaptors ('saga.namespace.Entry', tgt_url.scheme)
self._logger.debug("try fallback copy to these adaptors: %s" % adaptor_names)
# build a new src url, by switching to the target schema
tmp_url = self.get_url ()
tmp_url.scheme = tgt_url.scheme
for adaptor_name in adaptor_names :
try :
self._logger.info("try fallback copy to %s" % adaptor_name)
adaptor_instance = engine.get_adaptor (adaptor_name)
# get an tgt-scheme'd adaptor for the new src url, and try copy again
adaptor = engine.bind_adaptor (self, 'saga.namespace.Entry', tgt_url.scheme,
adaptor_instance)
adaptor.init_instance ({}, tmp_url, None, self._session)
tmp = Entry (tmp_url, None, self._session, _adaptor=adaptor_instance)
ret = tmp.copy (tgt_url, flags)
# release recursion guard
self._is_recursive -= 1
# if nothing raised an exception so far, we are done.
return
except se.SagaException as e :
self._logger.info("fallback failed: %s" % e)
# didn't work, ignore this adaptor
pass
# if all was in vain, we rethrow the original exception
self._is_recursive -= 1
raise e
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
(surl.Url, basestring),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def link (self, tgt, flags=0, ttype=None) :
'''
tgt: saga.Url
flags: enum flags
ttype: saga.task.type enum
ret: None / saga.Task
'''
if not flags : flags = 0
return self._adaptor.link_self (tgt, flags, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
(surl.Url, basestring),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def move (self, tgt, flags=0, ttype=None) :
'''
:param target: Url of the move target.
:param flags: Flags to use for the operation.
ttype: saga.task.type enum
ret: None / saga.Task
Move the entry to another location
The entry is copied to the given target location. The target URL must
be an absolute path, and can be a target entry name or target
directory name. If the target entry exists, it is overwritten::
# copy an entry
entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin")
entry.move ("sftp://localhost/tmp/data/data.bak")
'''
if not flags : flags = 0
return self._adaptor.move_self (tgt, flags, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def remove (self, flags=0, ttype=None) :
'''
:param flags: Flags to use for the operation.
ttype: saga.task.type enum
ret: None / saga.Task
Reove the entry.
The entry is removed, and this object instance is then invalid for
further operations.
# remove an entry
entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin")
entry.remove ()
'''
if not flags : flags = 0
return self._adaptor.remove_self (flags, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (float),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def close (self, timeout=None, ttype=None) :
'''
timeout: float
ttype: saga.task.type enum
ret: None / saga.Task
'''
return self._adaptor.close (timeout, ttype=ttype)
# --------------------------------------------------------------------------
#
url = property (get_url) # saga.Url
cwd = property (get_cwd) # string
name = property (get_name) # string
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils.signatures as rus
import saga.adaptors.base as sab
import saga.exceptions as se
import saga.session as ss
import saga.task as st
import saga.url as surl
import saga.base as sb
import saga.async as sasync
from saga.namespace.constants import *
from saga.constants import SYNC, ASYNC, TASK
# ------------------------------------------------------------------------------
#
class Entry (sb.Base, sasync.Async) :
'''
Represents a SAGA namespace entry as defined in GFD.90
The saga.namespace.Entry class represents, as the name indicates,
an entry in some (local or remote) namespace. That class offers
a number of operations on that entry, such as copy, move and remove::
# get an entry handle
entry = saga.namespace.Entry ("sftp://localhost/tmp/data/data.bin")
# copy the entry
entry.copy ("sftp://localhost/tmp/data/data.bak")
# move the entry
entry.move ("sftp://localhost/tmp/data/data.new")
'''
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional ((surl.Url, basestring)),
rus.optional (int, rus.nothing),
rus.optional (ss.Session),
rus.optional (sab.Base),
rus.optional (dict),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (rus.nothing)
def __init__ (self, url=None, flags=None, session=None,
_adaptor=None, _adaptor_state={}, _ttype=None) :
'''
:param url: Url of the (remote) entry
:type url: :class:`saga.Url`
flags: flags enum
session: saga.Session
ret: obj
Construct a new entry object
The specified entry is expected to exist -- otherwise a DoesNotExist
exception is raised. Also, the URL must point to an entry (not to
a directory), otherwise a BadParameter exception is raised.
Example::
# get an entry handle
entry = saga.namespace.Entry("sftp://localhost/tmp/data/data.bin")
# print the entry's url
print entry.get_url ()
'''
self._session = session
self._is_recursive = False # recursion guard (FIXME: NOT THREAD SAFE)
# param checks
if not session :
session = ss.Session (default=True)
if not flags : flags = 0
url = surl.Url (url)
scheme = url.scheme.lower ()
self._base = super (Entry, self)
self._base.__init__ (scheme, _adaptor, _adaptor_state,
url, flags, session, ttype=_ttype)
# --------------------------------------------------------------------------
#
@classmethod
@rus.takes ('Entry',
rus.optional ((surl.Url, basestring)),
rus.optional (int, rus.nothing),
rus.optional (ss.Session),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns (st.Task)
def create (cls, url=None, flags=None, session=None, ttype=None) :
'''
url: saga.Url
flags: saga.namespace.flags enum
session: saga.Session
ttype: saga.task.type enum
ret: saga.Task
'''
# param checks
if not flags : flags = 0
if not session :
session = ss.Session (default=True)
return cls (url, flags, session, _ttype=ttype)._init_task
# ----------------------------------------------------------------
#
# namespace entry methods
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((surl.Url, st.Task))
def get_url (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: saga.Url / saga.Task
Return the complete url pointing to the entry.
The call will return the complete url pointing to
this entry as a saga.Url object::
# print URL of an entry
entry = saga.namespace.Entry("sftp://localhost/etc/passwd")
print entry.get_url()
'''
return self._adaptor.get_url (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((basestring, st.Task))
def get_cwd (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: string / saga.Task
'''
return self._adaptor.get_cwd (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((basestring, st.Task))
def get_name (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: string / saga.Task
'''
return self._adaptor.get_name (ttype=ttype)
# ----------------------------------------------------------------
#
# namespace entry / directory methods
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((bool, st.Task))
def is_dir (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: bool / saga.Task
Returns True if path is a directory, False otherwise.
Example::
# inspect an entry
dir = saga.namespace.Directory("sftp://localhost/tmp/")
if dir.is_dir ('data'):
# do something
'''
return self._adaptor.is_dir_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((bool, st.Task))
def is_entry (self, ttype=None) :
'''
ttype: saga.task.type enum
ret: bool / saga.Task
'''
return self._adaptor.is_entry_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((bool, st.Task))
def is_link (self, ttype=None) :
'''
tgt: saga.Url / None
ttype: saga.task.type enum
ret: bool / saga.Task
'''
return self._adaptor.is_link_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((surl.Url, st.Task))
def read_link (self, ttype=None) :
'''
tgt: saga.Url / None
ttype: saga.task.type enum
ret: saga.Url / saga.Task
'''
return self._adaptor.read_link_self (ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
(surl.Url, basestring),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def copy (self, tgt, flags=0, ttype=None) :
'''
tgt: saga.Url
flags: enum flags
ttype: saga.task.type enum
ret: None / saga.Task
Copy the entry to another location
:param target: Url of the copy target.
:param flags: Flags to use for the operation.
The entry is copied to the given target location. The target URL must
be an absolute path, and can be a target entry name or target
directory name. If the target entry exists, it is overwritten::
# copy an entry
entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin")
entry.copy ("sftp://localhost/tmp/data/data.bak")
'''
# parameter checks
if not flags : flags = 0
tgt_url = surl.Url (tgt) # ensure valid and typed Url
# async ops don't deserve a fallback (yet)
if ttype != None :
return self._adaptor.copy_self (tgt_url, flags, ttype=ttype)
# we have only sync calls here - attempt a normal call to the bound
# adaptor first (doh!)
ret = self._adaptor.copy_self (tgt_url, flags, ttype=ttype)
try :
True
except se.SagaException as e :
# if we don't have a scheme for tgt, all is in vain (adaptor
# should have handled a relative path...)
if not tgt_url.scheme :
raise e
# So, the adaptor bound to the src URL did not manage to copy the # entry.
# If the tgt has a scheme set, we try again with other matching # entry
# adaptors, by setting (a copy of) the *src* URL to the same scheme,
# in the hope that other adaptors can copy from localhost.
#
# In principle that mechanism can also be used for remote copies, but
# URL translation is way more fragile in those cases...
# check recursion guard
if self._is_recursive :
self._logger.debug("fallback recursion detected - abort")
else :
# activate recursion guard
self._is_recursive += 1
import saga.engine
engine = saga.engine.Engine ()
# find applicable adaptors we could fall back to, i.e. which
# support the tgt schema
adaptor_names = engine.find_adaptors ('saga.namespace.Entry', tgt_url.scheme)
self._logger.debug("try fallback copy to these adaptors: %s" % adaptor_names)
# build a new src url, by switching to the target schema
tmp_url = self.get_url ()
tmp_url.scheme = tgt_url.scheme
for adaptor_name in adaptor_names :
try :
self._logger.info("try fallback copy to %s" % adaptor_name)
adaptor_instance = engine.get_adaptor (adaptor_name)
# get an tgt-scheme'd adaptor for the new src url, and try copy again
adaptor = engine.bind_adaptor (self, 'saga.namespace.Entry', tgt_url.scheme,
adaptor_instance)
adaptor.init_instance ({}, tmp_url, None, self._session)
tmp = Entry (tmp_url, None, self._session, _adaptor=adaptor_instance)
ret = tmp.copy (tgt_url, flags)
# release recursion guard
self._is_recursive -= 1
# if nothing raised an exception so far, we are done.
return
except se.SagaException as e :
self._logger.info("fallback failed: %s" % e)
# didn't work, ignore this adaptor
pass
# if all was in vain, we rethrow the original exception
self._is_recursive -= 1
raise e
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
(surl.Url, basestring),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def link (self, tgt, flags=0, ttype=None) :
'''
tgt: saga.Url
flags: enum flags
ttype: saga.task.type enum
ret: None / saga.Task
'''
if not flags : flags = 0
return self._adaptor.link_self (tgt, flags, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
(surl.Url, basestring),
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def move (self, tgt, flags=0, ttype=None) :
'''
:param target: Url of the move target.
:param flags: Flags to use for the operation.
ttype: saga.task.type enum
ret: None / saga.Task
Move the entry to another location
The entry is copied to the given target location. The target URL must
be an absolute path, and can be a target entry name or target
directory name. If the target entry exists, it is overwritten::
# copy an entry
entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin")
entry.move ("sftp://localhost/tmp/data/data.bak")
'''
if not flags : flags = 0
return self._adaptor.move_self (tgt, flags, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (int, rus.nothing),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def remove (self, flags=0, ttype=None) :
'''
:param flags: Flags to use for the operation.
ttype: saga.task.type enum
ret: None / saga.Task
Reove the entry.
The entry is removed, and this object instance is then invalid for
further operations.
# remove an entry
entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin")
entry.remove ()
'''
if not flags : flags = 0
return self._adaptor.remove_self (flags, ttype=ttype)
# --------------------------------------------------------------------------
#
@rus.takes ('Entry',
rus.optional (float),
rus.optional (rus.one_of (SYNC, ASYNC, TASK)))
@rus.returns ((rus.nothing, st.Task))
def close (self, timeout=None, ttype=None) :
'''
timeout: float
ttype: saga.task.type enum
ret: None / saga.Task
'''
return self._adaptor.close (timeout, ttype=ttype)
# --------------------------------------------------------------------------
#
url = property (get_url) # saga.Url
cwd = property (get_cwd) # string
name = property (get_name) # string
|
en
| 0.502825
|
# ------------------------------------------------------------------------------ # Represents a SAGA namespace entry as defined in GFD.90 The saga.namespace.Entry class represents, as the name indicates, an entry in some (local or remote) namespace. That class offers a number of operations on that entry, such as copy, move and remove:: # get an entry handle entry = saga.namespace.Entry ("sftp://localhost/tmp/data/data.bin") # copy the entry entry.copy ("sftp://localhost/tmp/data/data.bak") # move the entry entry.move ("sftp://localhost/tmp/data/data.new") # -------------------------------------------------------------------------- # :param url: Url of the (remote) entry :type url: :class:`saga.Url` flags: flags enum session: saga.Session ret: obj Construct a new entry object The specified entry is expected to exist -- otherwise a DoesNotExist exception is raised. Also, the URL must point to an entry (not to a directory), otherwise a BadParameter exception is raised. Example:: # get an entry handle entry = saga.namespace.Entry("sftp://localhost/tmp/data/data.bin") # print the entry's url print entry.get_url () # recursion guard (FIXME: NOT THREAD SAFE) # param checks # -------------------------------------------------------------------------- # url: saga.Url flags: saga.namespace.flags enum session: saga.Session ttype: saga.task.type enum ret: saga.Task # param checks # ---------------------------------------------------------------- # # namespace entry methods # ttype: saga.task.type enum ret: saga.Url / saga.Task Return the complete url pointing to the entry. The call will return the complete url pointing to this entry as a saga.Url object:: # print URL of an entry entry = saga.namespace.Entry("sftp://localhost/etc/passwd") print entry.get_url() # -------------------------------------------------------------------------- # ttype: saga.task.type enum ret: string / saga.Task # -------------------------------------------------------------------------- # ttype: saga.task.type enum ret: string / saga.Task # ---------------------------------------------------------------- # # namespace entry / directory methods # ttype: saga.task.type enum ret: bool / saga.Task Returns True if path is a directory, False otherwise. Example:: # inspect an entry dir = saga.namespace.Directory("sftp://localhost/tmp/") if dir.is_dir ('data'): # do something # -------------------------------------------------------------------------- # ttype: saga.task.type enum ret: bool / saga.Task # -------------------------------------------------------------------------- # tgt: saga.Url / None ttype: saga.task.type enum ret: bool / saga.Task # -------------------------------------------------------------------------- # tgt: saga.Url / None ttype: saga.task.type enum ret: saga.Url / saga.Task # -------------------------------------------------------------------------- # tgt: saga.Url flags: enum flags ttype: saga.task.type enum ret: None / saga.Task Copy the entry to another location :param target: Url of the copy target. :param flags: Flags to use for the operation. The entry is copied to the given target location. The target URL must be an absolute path, and can be a target entry name or target directory name. If the target entry exists, it is overwritten:: # copy an entry entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin") entry.copy ("sftp://localhost/tmp/data/data.bak") # parameter checks # ensure valid and typed Url # async ops don't deserve a fallback (yet) # we have only sync calls here - attempt a normal call to the bound # adaptor first (doh!) # if we don't have a scheme for tgt, all is in vain (adaptor # should have handled a relative path...) # So, the adaptor bound to the src URL did not manage to copy the # entry. # If the tgt has a scheme set, we try again with other matching # entry # adaptors, by setting (a copy of) the *src* URL to the same scheme, # in the hope that other adaptors can copy from localhost. # # In principle that mechanism can also be used for remote copies, but # URL translation is way more fragile in those cases... # check recursion guard # activate recursion guard # find applicable adaptors we could fall back to, i.e. which # support the tgt schema # build a new src url, by switching to the target schema # get an tgt-scheme'd adaptor for the new src url, and try copy again # release recursion guard # if nothing raised an exception so far, we are done. # didn't work, ignore this adaptor # if all was in vain, we rethrow the original exception # -------------------------------------------------------------------------- # tgt: saga.Url flags: enum flags ttype: saga.task.type enum ret: None / saga.Task # -------------------------------------------------------------------------- # :param target: Url of the move target. :param flags: Flags to use for the operation. ttype: saga.task.type enum ret: None / saga.Task Move the entry to another location The entry is copied to the given target location. The target URL must be an absolute path, and can be a target entry name or target directory name. If the target entry exists, it is overwritten:: # copy an entry entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin") entry.move ("sftp://localhost/tmp/data/data.bak") # -------------------------------------------------------------------------- # :param flags: Flags to use for the operation. ttype: saga.task.type enum ret: None / saga.Task Reove the entry. The entry is removed, and this object instance is then invalid for further operations. # remove an entry entry = saga.namespace.Directory("sftp://localhost/tmp/data/data.bin") entry.remove () # -------------------------------------------------------------------------- # timeout: float ttype: saga.task.type enum ret: None / saga.Task # -------------------------------------------------------------------------- # # saga.Url # string # string
| 1.935638
| 2
|
ecl/tests/functional/baremetal/test_server.py
|
keiichi-hikita/eclsdk
| 0
|
6626634
|
<reponame>keiichi-hikita/eclsdk
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ecl.tests.functional import base
class TestServer(base.BaseFunctionalTest):
@classmethod
def test_01_create(cls):
server = cls.conn.baremetal.create_server(
server = {
"name":"SDK-TEST-BARE-02",
"flavorRef":"462f4f62-0b0a-4171-acfc-a3840fd50b4b",
"imageRef":"4fc19fa6-4643-44bd-a52d-5388d4d011b1",
"networks":[{"uuid":"4fdac559-914e-41b2-9e58-e5a36ba437dd"}]
}
)
cls.server_id = server.id
print server
assert isinstance(server.id, six.string_types)
def test_02_list(self):
servers = list(self.conn.baremetal.servers(
limit="10",
))
server = servers[0]
print server
self.assertIsInstance(server.id, six.string_types)
def test_03_list_detail(self):
servers = list(self.conn.baremetal.servers(details=True))
server = servers[0]
print server
self.assertIsInstance(server.id, six.string_types)
def test_04_show_server(self):
server = self.conn.baremetal.get_server("752aac2e-4b82-4d47-a7c7-fcbd0cbc86e2")
print server
self.assertIsInstance(server.OS_EXT_STS_power_state, six.string_types)
self.assertIsInstance(server.OS_EXT_STS_task_state, six.string_types)
self.assertIsInstance(server.OS_EXT_STS_vm_state, six.string_types)
self.assertIsInstance(server.OS_EXT_AZ_availability_zone, six.string_types)
self.assertIsInstance(server.created, six.string_types)
self.assertIsInstance(server.flavor, dict)
#self.assertIsInstance(server.hostId, six.string_types)
self.assertIsInstance(server.image, dict)
self.assertIsInstance(server.metadata, dict)
self.assertIsInstance(server.links, list)
self.assertIsInstance(server.progress, int)
self.assertIsInstance(server.status, six.string_types)
self.assertIsInstance(server.tenant_id, six.string_types)
self.assertIsInstance(server.updated, six.string_types)
self.assertIsInstance(server.user_id, six.string_types)
self.assertIsInstance(server.raid_arrays, list)
#self.assertIsInstance(server.lvm_volume_groups, list)
#self.assertIsInstance(server.filesystems, list)
self.assertIsInstance(server.nic_physical_ports, list)
self.assertIsInstance(server.chassis_status, dict)
def test_05_delete_server(self):
server = self.conn.baremetal.delete_server("752aac2e-4b82-4d47-a7c7-xx")
assert False
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ecl.tests.functional import base
class TestServer(base.BaseFunctionalTest):
@classmethod
def test_01_create(cls):
server = cls.conn.baremetal.create_server(
server = {
"name":"SDK-TEST-BARE-02",
"flavorRef":"462f4f62-0b0a-4171-acfc-a3840fd50b4b",
"imageRef":"4fc19fa6-4643-44bd-a52d-5388d4d011b1",
"networks":[{"uuid":"4fdac559-914e-41b2-9e58-e5a36ba437dd"}]
}
)
cls.server_id = server.id
print server
assert isinstance(server.id, six.string_types)
def test_02_list(self):
servers = list(self.conn.baremetal.servers(
limit="10",
))
server = servers[0]
print server
self.assertIsInstance(server.id, six.string_types)
def test_03_list_detail(self):
servers = list(self.conn.baremetal.servers(details=True))
server = servers[0]
print server
self.assertIsInstance(server.id, six.string_types)
def test_04_show_server(self):
server = self.conn.baremetal.get_server("752aac2e-4b82-4d47-a7c7-fcbd0cbc86e2")
print server
self.assertIsInstance(server.OS_EXT_STS_power_state, six.string_types)
self.assertIsInstance(server.OS_EXT_STS_task_state, six.string_types)
self.assertIsInstance(server.OS_EXT_STS_vm_state, six.string_types)
self.assertIsInstance(server.OS_EXT_AZ_availability_zone, six.string_types)
self.assertIsInstance(server.created, six.string_types)
self.assertIsInstance(server.flavor, dict)
#self.assertIsInstance(server.hostId, six.string_types)
self.assertIsInstance(server.image, dict)
self.assertIsInstance(server.metadata, dict)
self.assertIsInstance(server.links, list)
self.assertIsInstance(server.progress, int)
self.assertIsInstance(server.status, six.string_types)
self.assertIsInstance(server.tenant_id, six.string_types)
self.assertIsInstance(server.updated, six.string_types)
self.assertIsInstance(server.user_id, six.string_types)
self.assertIsInstance(server.raid_arrays, list)
#self.assertIsInstance(server.lvm_volume_groups, list)
#self.assertIsInstance(server.filesystems, list)
self.assertIsInstance(server.nic_physical_ports, list)
self.assertIsInstance(server.chassis_status, dict)
def test_05_delete_server(self):
server = self.conn.baremetal.delete_server("752aac2e-4b82-4d47-a7c7-xx")
assert False
|
en
| 0.791043
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #self.assertIsInstance(server.hostId, six.string_types) #self.assertIsInstance(server.lvm_volume_groups, list) #self.assertIsInstance(server.filesystems, list)
| 1.938767
| 2
|
setup.py
|
dota2tools/dtrspnsy
| 0
|
6626635
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="dtrspnsy",
version="0.0.2",
author="upgradehq",
author_email="<EMAIL>",
description="search dota 2 responses english/russian/chinese",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dota2tools/dtrspnsy",
packages=["dtrspnsy"],
install_requires=['prompt_toolkit'],
package_data={
"dtrspnsy": ["en_replics/*.json", "ru_replics/*.json", "zh_replics/*.json"]
},
entry_points={"console_scripts": ["dtrspnsy=dtrspnsy.dtrspnsy:main"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
# for pathlib and f strings
python_requires=">=3.6",
)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="dtrspnsy",
version="0.0.2",
author="upgradehq",
author_email="<EMAIL>",
description="search dota 2 responses english/russian/chinese",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dota2tools/dtrspnsy",
packages=["dtrspnsy"],
install_requires=['prompt_toolkit'],
package_data={
"dtrspnsy": ["en_replics/*.json", "ru_replics/*.json", "zh_replics/*.json"]
},
entry_points={"console_scripts": ["dtrspnsy=dtrspnsy.dtrspnsy:main"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
# for pathlib and f strings
python_requires=">=3.6",
)
|
en
| 0.782346
|
# for pathlib and f strings
| 1.589823
| 2
|
ir_datasets/datasets/dpr_w100.py
|
seanmacavaney/ir_datasets
| 0
|
6626636
|
<gh_stars>0
from typing import NamedTuple, Tuple
import ijson
import contextlib
import itertools
import ir_datasets
from ir_datasets.util import GzipExtract
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import TsvDocs, BaseQueries, TrecQrels
_logger = ir_datasets.log.easy()
NAME = 'dpr-w100'
QREL_DEFS = {
2: 'marked by human annotator as containing the answer',
1: 'contains the answer text and retrieved in the top BM25 results',
0: '"hard" negative samples',
-1: 'negative samples'
}
class DprW100Doc(NamedTuple):
doc_id: str
text: str
title: str
class DprW100Query(NamedTuple):
query_id: str
text: str
answers: Tuple[str, ]
class DprW100Manager:
def __init__(self, dlc, base_path):
self._dlc = dlc
self._base_path = base_path
self._base_path.mkdir(parents=True, exist_ok=True)
def build(self):
if (self._base_path/'queries.tsv').exists():
return # already built
with contextlib.ExitStack() as stack:
f_queries = stack.enter_context(ir_datasets.util.finialized_file(self._base_path/'queries.tsv', 'wt'))
f_qrels = stack.enter_context(ir_datasets.util.finialized_file(self._base_path/'qrels', 'wt'))
stream = stack.enter_context(self._dlc.stream())
qid_counter = itertools.count()
for record in _logger.pbar(ijson.items(stream, 'item'), 'building dpr-w100'):
qid = str(next(qid_counter))
f_queries.write('\t'.join([
qid,
record['question'].replace('\t', ' ')
] + [
a.replace('\t', ' ') for a in record['answers']
]) + '\n')
seen = set()
for ctxt in record['positive_ctxs']:
if ctxt["passage_id"] not in seen:
seen.add(ctxt["passage_id"])
rel = 2 if ctxt['score'] == 1000 else 1
f_qrels.write(f'{qid} 0 {ctxt["passage_id"]} {rel}\n')
for ctxt in record['hard_negative_ctxs']:
if ctxt["passage_id"] not in seen:
seen.add(ctxt["passage_id"])
f_qrels.write(f'{qid} 0 {ctxt["passage_id"]} 0\n')
for ctxt in record['negative_ctxs']:
if ctxt["passage_id"] not in seen:
seen.add(ctxt["passage_id"])
f_qrels.write(f'{qid} 0 {ctxt["passage_id"]} -1\n')
def file_ref(self, path):
return _ManagedDlc(self, self._base_path/path)
class _ManagedDlc:
def __init__(self, manager, path):
self._manager = manager
self._path = path
@contextlib.contextmanager
def stream(self):
self._manager.build()
with open(self._path, 'rb') as f:
yield f
def path(self):
self._manager.build()
return self._path
class DprW100Queries(BaseQueries):
def __init__(self, dlc):
self._dlc = dlc
def queries_iter(self):
with self._dlc.stream() as stream:
for line in stream:
cols = line.decode().strip().split('\t')
yield DprW100Query(cols[0], cols[1], tuple(cols[2:]))
def queries_cls(self):
return DprW100Query
def queries_namespace(self):
return NAME
def queries_lang(self):
return 'en'
def _init():
base_path = ir_datasets.util.home_path()/NAME
dlc = ir_datasets.util.DownloadConfig.context(NAME, base_path)
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
collection = TsvDocs(GzipExtract(dlc['docs']), doc_cls=DprW100Doc, namespace=NAME, lang='en', skip_first_line=True)
base = Dataset(
collection,
documentation('_'))
subsets = {}
nq_dev_manager = DprW100Manager(GzipExtract(dlc['nq-dev']), base_path/'nq-dev')
subsets['natural-questions/dev'] = Dataset(
collection,
DprW100Queries(nq_dev_manager.file_ref('queries.tsv')),
TrecQrels(nq_dev_manager.file_ref('qrels'), QREL_DEFS),
documentation('natural-questions/dev'))
nq_train_manager = DprW100Manager(GzipExtract(dlc['nq-train']), base_path/'nq-train')
subsets['natural-questions/train'] = Dataset(
collection,
DprW100Queries(nq_train_manager.file_ref('queries.tsv')),
TrecQrels(nq_train_manager.file_ref('qrels'), QREL_DEFS),
documentation('natural-questions/train'))
ir_datasets.registry.register(NAME, base)
for s in sorted(subsets):
ir_datasets.registry.register(f'{NAME}/{s}', subsets[s])
return base, subsets
base, subsets = _init()
|
from typing import NamedTuple, Tuple
import ijson
import contextlib
import itertools
import ir_datasets
from ir_datasets.util import GzipExtract
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import TsvDocs, BaseQueries, TrecQrels
_logger = ir_datasets.log.easy()
NAME = 'dpr-w100'
QREL_DEFS = {
2: 'marked by human annotator as containing the answer',
1: 'contains the answer text and retrieved in the top BM25 results',
0: '"hard" negative samples',
-1: 'negative samples'
}
class DprW100Doc(NamedTuple):
doc_id: str
text: str
title: str
class DprW100Query(NamedTuple):
query_id: str
text: str
answers: Tuple[str, ]
class DprW100Manager:
def __init__(self, dlc, base_path):
self._dlc = dlc
self._base_path = base_path
self._base_path.mkdir(parents=True, exist_ok=True)
def build(self):
if (self._base_path/'queries.tsv').exists():
return # already built
with contextlib.ExitStack() as stack:
f_queries = stack.enter_context(ir_datasets.util.finialized_file(self._base_path/'queries.tsv', 'wt'))
f_qrels = stack.enter_context(ir_datasets.util.finialized_file(self._base_path/'qrels', 'wt'))
stream = stack.enter_context(self._dlc.stream())
qid_counter = itertools.count()
for record in _logger.pbar(ijson.items(stream, 'item'), 'building dpr-w100'):
qid = str(next(qid_counter))
f_queries.write('\t'.join([
qid,
record['question'].replace('\t', ' ')
] + [
a.replace('\t', ' ') for a in record['answers']
]) + '\n')
seen = set()
for ctxt in record['positive_ctxs']:
if ctxt["passage_id"] not in seen:
seen.add(ctxt["passage_id"])
rel = 2 if ctxt['score'] == 1000 else 1
f_qrels.write(f'{qid} 0 {ctxt["passage_id"]} {rel}\n')
for ctxt in record['hard_negative_ctxs']:
if ctxt["passage_id"] not in seen:
seen.add(ctxt["passage_id"])
f_qrels.write(f'{qid} 0 {ctxt["passage_id"]} 0\n')
for ctxt in record['negative_ctxs']:
if ctxt["passage_id"] not in seen:
seen.add(ctxt["passage_id"])
f_qrels.write(f'{qid} 0 {ctxt["passage_id"]} -1\n')
def file_ref(self, path):
return _ManagedDlc(self, self._base_path/path)
class _ManagedDlc:
def __init__(self, manager, path):
self._manager = manager
self._path = path
@contextlib.contextmanager
def stream(self):
self._manager.build()
with open(self._path, 'rb') as f:
yield f
def path(self):
self._manager.build()
return self._path
class DprW100Queries(BaseQueries):
def __init__(self, dlc):
self._dlc = dlc
def queries_iter(self):
with self._dlc.stream() as stream:
for line in stream:
cols = line.decode().strip().split('\t')
yield DprW100Query(cols[0], cols[1], tuple(cols[2:]))
def queries_cls(self):
return DprW100Query
def queries_namespace(self):
return NAME
def queries_lang(self):
return 'en'
def _init():
base_path = ir_datasets.util.home_path()/NAME
dlc = ir_datasets.util.DownloadConfig.context(NAME, base_path)
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
collection = TsvDocs(GzipExtract(dlc['docs']), doc_cls=DprW100Doc, namespace=NAME, lang='en', skip_first_line=True)
base = Dataset(
collection,
documentation('_'))
subsets = {}
nq_dev_manager = DprW100Manager(GzipExtract(dlc['nq-dev']), base_path/'nq-dev')
subsets['natural-questions/dev'] = Dataset(
collection,
DprW100Queries(nq_dev_manager.file_ref('queries.tsv')),
TrecQrels(nq_dev_manager.file_ref('qrels'), QREL_DEFS),
documentation('natural-questions/dev'))
nq_train_manager = DprW100Manager(GzipExtract(dlc['nq-train']), base_path/'nq-train')
subsets['natural-questions/train'] = Dataset(
collection,
DprW100Queries(nq_train_manager.file_ref('queries.tsv')),
TrecQrels(nq_train_manager.file_ref('qrels'), QREL_DEFS),
documentation('natural-questions/train'))
ir_datasets.registry.register(NAME, base)
for s in sorted(subsets):
ir_datasets.registry.register(f'{NAME}/{s}', subsets[s])
return base, subsets
base, subsets = _init()
|
en
| 0.992582
|
# already built
| 2.085419
| 2
|
ytcc/storage.py
|
alexkohler/ytgrep
| 22
|
6626637
|
# -*- coding: UTF-8 -*-
import re
import os
import hashlib
class Storage():
def __init__(self, video_url: str) -> None:
self.video_url = video_url
def get_file_path(self) -> str:
return 'subtitle_{0}.en.vtt'.format(re.sub(
r'[^\w-]', '', hashlib.md5(str(self.video_url).encode('utf-8')).hexdigest()))
def remove_file(self) -> None:
os.remove(self.get_file_path())
|
# -*- coding: UTF-8 -*-
import re
import os
import hashlib
class Storage():
def __init__(self, video_url: str) -> None:
self.video_url = video_url
def get_file_path(self) -> str:
return 'subtitle_{0}.en.vtt'.format(re.sub(
r'[^\w-]', '', hashlib.md5(str(self.video_url).encode('utf-8')).hexdigest()))
def remove_file(self) -> None:
os.remove(self.get_file_path())
|
en
| 0.222803
|
# -*- coding: UTF-8 -*-
| 2.611652
| 3
|
PacoteDownload/Mundo 3 do curso/desafio 082.py
|
Gabriel-ER/CursoEmVideodoYoutube-Python-
| 0
|
6626638
|
lista = []
pares = []
impares = []
while True:
r = input("Enter para continuar: ").upper().strip()
if r == '':
lista.append(int(input("Número: ")))
else:
for i in lista:
if i % 2 == 0:
pares.append(i)
else:
impares.append(i)
print(f'Pares: {pares}')
print(f'Impares: {impares}')
break
|
lista = []
pares = []
impares = []
while True:
r = input("Enter para continuar: ").upper().strip()
if r == '':
lista.append(int(input("Número: ")))
else:
for i in lista:
if i % 2 == 0:
pares.append(i)
else:
impares.append(i)
print(f'Pares: {pares}')
print(f'Impares: {impares}')
break
|
none
| 1
| 3.640152
| 4
|
|
marginTrading/tests/test_marginTradingCalc_GUI/test_marginTradingCalc_GUI.py
|
sambiase/pycrypto
| 3
|
6626639
|
<reponame>sambiase/pycrypto
from marginTrading import marginTradingCalculation_GUI_v1 as mtc
import tkinter as tk
def test_init():
mtc.MarginTradingCalcGui.__init__(tk)
def test_mt_calculation():
mtc.MarginTradingCalcGui.mt_calculation(None,50,3,5,10)
def test_input_data():
mtc.MarginTradingCalcGui.input_data(tk)
def test_res_window():
mtc.MarginTradingCalcGui.res_window(None, 100, 3, 5, 10)
|
from marginTrading import marginTradingCalculation_GUI_v1 as mtc
import tkinter as tk
def test_init():
mtc.MarginTradingCalcGui.__init__(tk)
def test_mt_calculation():
mtc.MarginTradingCalcGui.mt_calculation(None,50,3,5,10)
def test_input_data():
mtc.MarginTradingCalcGui.input_data(tk)
def test_res_window():
mtc.MarginTradingCalcGui.res_window(None, 100, 3, 5, 10)
|
none
| 1
| 2.205512
| 2
|
|
tftime/layers/transformer.py
|
nagikomo/time-series-model
| 7
|
6626640
|
from tensorflow.keras import layers, Sequential
import tensorflow as tf
class PositionAdd(layers.Layer):
def build(self, input_shape):
self.pe = self.add_weight("pe", [input_shape[1], input_shape[2]],
initializer=tf.keras.initializers.zeros())
def call(self, inputs, **kwargs):
return inputs + self.pe
class MHSA(layers.Layer):
def __init__(self, dim, num_heads, use_bias=True, noise=layers.Dropout, noise_r=0, **kwargs):
super(MHSA, self).__init__(**kwargs)
self.num_heads = num_heads
self.dim = dim
self.noise = noise
self.noise_r = noise_r
self.use_bias = use_bias
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = layers.Dense(dim * 3, use_bias=use_bias)
self.qkv_reshape = layers.Reshape((-1, num_heads, head_dim))
self.qv_permute = layers.Permute((2, 1, 3))
self.k_permute = layers.Permute((2, 3, 1))
self.attn_reshape = Sequential([
layers.Permute((2, 1, 3)),
layers.Reshape((-1, dim))
])
self.proj = layers.Dense(dim)
self.drop_out = noise(noise_r)
def call(self, inputs: tf.Tensor, training=False, *args, **kwargs):
qkv = self.qkv(inputs)
q, k, v = tf.split(qkv, 3, -1)
q = self.qkv_reshape(q)
k = self.qkv_reshape(k)
v = self.qkv_reshape(v)
q = self.qv_permute(q)
k = self.k_permute(k)
v = self.qv_permute(v)
attn = tf.matmul(q, k) * self.scale
attn = tf.nn.softmax(attn, axis=-1)
x = tf.matmul(attn, v)
x = self.attn_reshape(x)
x = self.proj(x)
x = self.drop_out(x)
return x
def get_config(self):
config = {
"num_heads": self.num_heads,
"dim": self.dim,
"use_bias": self.use_bias,
"noise": self.noise,
"noise_r": self.noise_r
}
return config
class TransformerMlp(layers.Layer):
def __init__(self, dim, mlp_dim, noise=layers.Dropout, noise_r=0):
super(TransformerMlp, self).__init__()
self.dim = dim
self.mlp_dim = mlp_dim
self.noise = noise
self.noise_r = noise_r
self.dense = Sequential([
layers.Dense(mlp_dim, "gelu"),
noise(noise_r),
layers.Dense(dim),
noise(noise_r)
])
def call(self, inputs, *args, **kwargs):
return self.dense(inputs)
def get_config(self):
config = {
"dim": self.dim,
"mlp_dim": self.mlp_dim,
"noise": self.noise,
"noise_r": self.noise_r
}
return config
class Transformer(layers.Layer):
def __init__(self, dim, mlp_dim, heads, use_bias=False, noise=layers.Dropout, noise_r=0):
super(Transformer, self).__init__()
self.dim = dim
self.mlp_dim = mlp_dim
self.heads = heads
self.use_bias = use_bias
self.attn = Sequential([layers.LayerNormalization(), MHSA(dim, heads, use_bias, noise, noise_r)])
self.mlp = Sequential([layers.LayerNormalization(), TransformerMlp(dim, mlp_dim, noise, noise_r)])
def call(self, inputs, *args, **kwargs):
x = self.attn(inputs) + inputs
x = self.mlp(x) + x
return x
def get_config(self):
config = {
"dim": self.dim,
"mlp_dim": self.mlp_dim,
"heads": self.heads,
"use_bias": self.use_bias
}
return config
__all__ = [
"PositionAdd",
"MHSA",
"TransformerMlp",
"Transformer"
]
|
from tensorflow.keras import layers, Sequential
import tensorflow as tf
class PositionAdd(layers.Layer):
def build(self, input_shape):
self.pe = self.add_weight("pe", [input_shape[1], input_shape[2]],
initializer=tf.keras.initializers.zeros())
def call(self, inputs, **kwargs):
return inputs + self.pe
class MHSA(layers.Layer):
def __init__(self, dim, num_heads, use_bias=True, noise=layers.Dropout, noise_r=0, **kwargs):
super(MHSA, self).__init__(**kwargs)
self.num_heads = num_heads
self.dim = dim
self.noise = noise
self.noise_r = noise_r
self.use_bias = use_bias
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = layers.Dense(dim * 3, use_bias=use_bias)
self.qkv_reshape = layers.Reshape((-1, num_heads, head_dim))
self.qv_permute = layers.Permute((2, 1, 3))
self.k_permute = layers.Permute((2, 3, 1))
self.attn_reshape = Sequential([
layers.Permute((2, 1, 3)),
layers.Reshape((-1, dim))
])
self.proj = layers.Dense(dim)
self.drop_out = noise(noise_r)
def call(self, inputs: tf.Tensor, training=False, *args, **kwargs):
qkv = self.qkv(inputs)
q, k, v = tf.split(qkv, 3, -1)
q = self.qkv_reshape(q)
k = self.qkv_reshape(k)
v = self.qkv_reshape(v)
q = self.qv_permute(q)
k = self.k_permute(k)
v = self.qv_permute(v)
attn = tf.matmul(q, k) * self.scale
attn = tf.nn.softmax(attn, axis=-1)
x = tf.matmul(attn, v)
x = self.attn_reshape(x)
x = self.proj(x)
x = self.drop_out(x)
return x
def get_config(self):
config = {
"num_heads": self.num_heads,
"dim": self.dim,
"use_bias": self.use_bias,
"noise": self.noise,
"noise_r": self.noise_r
}
return config
class TransformerMlp(layers.Layer):
def __init__(self, dim, mlp_dim, noise=layers.Dropout, noise_r=0):
super(TransformerMlp, self).__init__()
self.dim = dim
self.mlp_dim = mlp_dim
self.noise = noise
self.noise_r = noise_r
self.dense = Sequential([
layers.Dense(mlp_dim, "gelu"),
noise(noise_r),
layers.Dense(dim),
noise(noise_r)
])
def call(self, inputs, *args, **kwargs):
return self.dense(inputs)
def get_config(self):
config = {
"dim": self.dim,
"mlp_dim": self.mlp_dim,
"noise": self.noise,
"noise_r": self.noise_r
}
return config
class Transformer(layers.Layer):
def __init__(self, dim, mlp_dim, heads, use_bias=False, noise=layers.Dropout, noise_r=0):
super(Transformer, self).__init__()
self.dim = dim
self.mlp_dim = mlp_dim
self.heads = heads
self.use_bias = use_bias
self.attn = Sequential([layers.LayerNormalization(), MHSA(dim, heads, use_bias, noise, noise_r)])
self.mlp = Sequential([layers.LayerNormalization(), TransformerMlp(dim, mlp_dim, noise, noise_r)])
def call(self, inputs, *args, **kwargs):
x = self.attn(inputs) + inputs
x = self.mlp(x) + x
return x
def get_config(self):
config = {
"dim": self.dim,
"mlp_dim": self.mlp_dim,
"heads": self.heads,
"use_bias": self.use_bias
}
return config
__all__ = [
"PositionAdd",
"MHSA",
"TransformerMlp",
"Transformer"
]
|
none
| 1
| 2.646209
| 3
|
|
remote/util.py
|
zadjii/nebula
| 2
|
6626641
|
<filename>remote/util.py
from models.Session import Session
from common_util import *
from remote.models.Cloud import Cloud
from remote.models.User import User
def get_user_from_session(db, session_id):
# rd = Error()
# sess_obj = db.session.query(Session).filter_by(uuid=session_id).first()
# if sess_obj is None:
# rd = Error('No session exists on remote for sid:{}'.format(session_id))
# else:
# user = sess_obj.user
# if user is None:
# rd = Error('No user exists on remote\'s session, sid:{}'.format(session_id))
# else:
# rd = ResultAndData(True, user)
# return rd
rd = validate_session_id(db, session_id)
if rd.success:
sess_obj = rd.data
rd = sess_obj.get_user()
return rd
def get_user_by_name(db, username):
# type: (SimpleDB, str) -> User
# _log = get_mylog()
query = db.session.query(User).filter(User.username.ilike(username))
# _log.debug('{}'.format(query.all()))
return query.first()
def get_cloud_by_name(db, uname, cname):
# type: (SimpleDB, str, str) -> Cloud
# return [cloud for cloud in db.session.query(Cloud).filter_by(name=cname)
# if cloud.owner_name() == uname]
# Hosts don't know about owner names yet, todo:15
# return db.session.query(Cloud).filter_by(name=cname).first()
clouds = [cloud
for cloud in db.session.query(Cloud).filter_by(name=cname).all()
if cloud.uname().lower() == uname.lower()]
if len(clouds) > 1:
mylog('get_cloud_by_name error '
'- There should be AT MOST one result'
'\n\t Found {}'.format([cloud.full_name() for cloud in clouds]))
return None if len(clouds) < 1 else clouds[0]
def validate_session_id(db, session_id):
# type: (SimpleDB, Any) -> ResultAndData
sess_obj = db.session.query(Session).filter_by(uuid=session_id).first()
if sess_obj is None:
msg = 'There is no session of uuid={}'.format(session_id)
return Error(msg)
if sess_obj.has_timed_out():
msg = 'Session timed out uuid={}'.format(session_id)
return Error(msg)
sess_obj.refresh()
db.session.commit()
return Success(sess_obj)
|
<filename>remote/util.py
from models.Session import Session
from common_util import *
from remote.models.Cloud import Cloud
from remote.models.User import User
def get_user_from_session(db, session_id):
# rd = Error()
# sess_obj = db.session.query(Session).filter_by(uuid=session_id).first()
# if sess_obj is None:
# rd = Error('No session exists on remote for sid:{}'.format(session_id))
# else:
# user = sess_obj.user
# if user is None:
# rd = Error('No user exists on remote\'s session, sid:{}'.format(session_id))
# else:
# rd = ResultAndData(True, user)
# return rd
rd = validate_session_id(db, session_id)
if rd.success:
sess_obj = rd.data
rd = sess_obj.get_user()
return rd
def get_user_by_name(db, username):
# type: (SimpleDB, str) -> User
# _log = get_mylog()
query = db.session.query(User).filter(User.username.ilike(username))
# _log.debug('{}'.format(query.all()))
return query.first()
def get_cloud_by_name(db, uname, cname):
# type: (SimpleDB, str, str) -> Cloud
# return [cloud for cloud in db.session.query(Cloud).filter_by(name=cname)
# if cloud.owner_name() == uname]
# Hosts don't know about owner names yet, todo:15
# return db.session.query(Cloud).filter_by(name=cname).first()
clouds = [cloud
for cloud in db.session.query(Cloud).filter_by(name=cname).all()
if cloud.uname().lower() == uname.lower()]
if len(clouds) > 1:
mylog('get_cloud_by_name error '
'- There should be AT MOST one result'
'\n\t Found {}'.format([cloud.full_name() for cloud in clouds]))
return None if len(clouds) < 1 else clouds[0]
def validate_session_id(db, session_id):
# type: (SimpleDB, Any) -> ResultAndData
sess_obj = db.session.query(Session).filter_by(uuid=session_id).first()
if sess_obj is None:
msg = 'There is no session of uuid={}'.format(session_id)
return Error(msg)
if sess_obj.has_timed_out():
msg = 'Session timed out uuid={}'.format(session_id)
return Error(msg)
sess_obj.refresh()
db.session.commit()
return Success(sess_obj)
|
en
| 0.353584
|
# rd = Error() # sess_obj = db.session.query(Session).filter_by(uuid=session_id).first() # if sess_obj is None: # rd = Error('No session exists on remote for sid:{}'.format(session_id)) # else: # user = sess_obj.user # if user is None: # rd = Error('No user exists on remote\'s session, sid:{}'.format(session_id)) # else: # rd = ResultAndData(True, user) # return rd # type: (SimpleDB, str) -> User # _log = get_mylog() # _log.debug('{}'.format(query.all())) # type: (SimpleDB, str, str) -> Cloud # return [cloud for cloud in db.session.query(Cloud).filter_by(name=cname) # if cloud.owner_name() == uname] # Hosts don't know about owner names yet, todo:15 # return db.session.query(Cloud).filter_by(name=cname).first() # type: (SimpleDB, Any) -> ResultAndData
| 2.390145
| 2
|
scripts/update-compdb.py
|
xiaohongchen1991/clang-xform
| 2
|
6626642
|
#!/usr/bin/env python
"""
update compile_commands.json file used in unit test framework
"""
import argparse
import os
import sys
import re
import json
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'json_file',
type=str,
nargs='?',
default='compile_commands.json',
help='compile_commands.json file to udpate')
args = parser.parse_args()
json_file = args.json_file
json_file = os.path.abspath(json_file)
if not os.path.exists(json_file):
sys.exit('compile_commands.json file does not exist!')
compdbs = None
with open(json_file, "r") as file:
compdbs = json.load(file)
cwd = os.getcwd()
for compdb in compdbs:
# update directory
compdb['directory'] = cwd;
# write compdbs back to compile_commands.json
with open(json_file, "w") as file:
file.write(json.dumps(compdbs, indent=4))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
#!/usr/bin/env python
"""
update compile_commands.json file used in unit test framework
"""
import argparse
import os
import sys
import re
import json
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'json_file',
type=str,
nargs='?',
default='compile_commands.json',
help='compile_commands.json file to udpate')
args = parser.parse_args()
json_file = args.json_file
json_file = os.path.abspath(json_file)
if not os.path.exists(json_file):
sys.exit('compile_commands.json file does not exist!')
compdbs = None
with open(json_file, "r") as file:
compdbs = json.load(file)
cwd = os.getcwd()
for compdb in compdbs:
# update directory
compdb['directory'] = cwd;
# write compdbs back to compile_commands.json
with open(json_file, "w") as file:
file.write(json.dumps(compdbs, indent=4))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
en
| 0.512531
|
#!/usr/bin/env python update compile_commands.json file used in unit test framework # update directory # write compdbs back to compile_commands.json
| 2.916483
| 3
|
vnpy/app/portfolio_strategy/engine.py
|
longliveh/vnpy
| 1
|
6626643
|
""""""
import importlib
import glob
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Set, Tuple, Type, Any, Callable
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor
from tzlocal import get_localzone
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
OrderData,
TradeData,
PositionData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.rqdata import rqdata_client
from vnpy.trader.converter import OffsetConverter
from vnpy.trader.database import database_manager
from .base import (
APP_NAME,
EVENT_PORTFOLIO_LOG,
EVENT_PORTFOLIO_STRATEGY
)
from .template import StrategyTemplate
class StrategyEngine(BaseEngine):
""""""
setting_filename = "portfolio_strategy_setting.json"
data_filename = "portfolio_strategy_data.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.strategy_data: Dict[str, Dict] = {}
self.classes: Dict[str, Type[StrategyTemplate]] = {}
self.strategies: Dict[str, StrategyTemplate] = {}
self.symbol_strategy_map: Dict[str, List[StrategyTemplate]] = defaultdict(list)
self.orderid_strategy_map: Dict[str, StrategyTemplate] = {}
self.init_executor: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=1)
self.vt_tradeids: Set[str] = set()
self.offset_converter: OffsetConverter = OffsetConverter(self.main_engine)
def init_engine(self):
"""
"""
self.init_rqdata()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("组合策略引擎初始化成功")
def close(self):
""""""
self.stop_all_strategies()
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
def init_rqdata(self):
"""
Init RQData client.
"""
result = rqdata_client.init()
if result:
self.write_log("RQData数据接口初始化成功")
def query_bar_from_rq(
self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime
):
"""
Query bar data from RQData.
"""
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end
)
data = rqdata_client.query_history(req)
return data
def process_tick_event(self, event: Event):
""""""
tick: TickData = event.data
strategies = self.symbol_strategy_map[tick.vt_symbol]
if not strategies:
return
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_order_event(self, event: Event):
""""""
order: OrderData = event.data
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
return
self.call_strategy_func(strategy, strategy.update_order, order)
def process_trade_event(self, event: Event):
""""""
trade: TradeData = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
return
self.call_strategy_func(strategy, strategy.update_trade, trade)
def process_position_event(self, event: Event):
""""""
position: PositionData = event.data
self.offset_converter.update_position(position)
def send_order(
self,
strategy: StrategyTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
net: bool,
):
"""
Send a new order to server.
"""
contract: ContractData = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{vt_symbol}", strategy)
return ""
# Round order price and volume to nearest incremental value
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume,
reference=f"{APP_NAME}_{strategy.strategy_name}"
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(original_req, lock, net)
# Send Orders
vt_orderids = []
for req in req_list:
req.reference = strategy.strategy_name # Add strategy name as order reference
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
return vt_orderids
def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str):
"""
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_pricetick(self, strategy: StrategyTemplate, vt_symbol: str):
"""
Return contract pricetick data.
"""
contract = self.main_engine.get_contract(vt_symbol)
if contract:
return contract.pricetick
else:
return None
def load_bars(self, strategy: StrategyTemplate, days: int, interval: Interval):
""""""
vt_symbols = strategy.vt_symbols
dts: Set[datetime] = set()
history_data: Dict[Tuple, BarData] = {}
# Load data from rqdata/gateway/database
for vt_symbol in vt_symbols:
data = self.load_bar(vt_symbol, days, interval)
for bar in data:
dts.add(bar.datetime)
history_data[(bar.datetime, vt_symbol)] = bar
# Convert data structure and push to strategy
dts = list(dts)
dts.sort()
bars = {}
for dt in dts:
for vt_symbol in vt_symbols:
bar = history_data.get((dt, vt_symbol), None)
# If bar data of vt_symbol at dt exists
if bar:
bars[vt_symbol] = bar
# Otherwise, use previous data to backfill
elif vt_symbol in bars:
old_bar = bars[vt_symbol]
bar = BarData(
symbol=old_bar.symbol,
exchange=old_bar.exchange,
datetime=dt,
open_price=old_bar.close_price,
high_price=old_bar.close_price,
low_price=old_bar.close_price,
close_price=old_bar.close_price,
gateway_name=old_bar.gateway_name
)
bars[vt_symbol] = bar
self.call_strategy_func(strategy, strategy.on_bars, bars)
def load_bar(self, vt_symbol: str, days: int, interval: Interval) -> List[BarData]:
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now(get_localzone())
start = end - timedelta(days)
contract: ContractData = self.main_engine.get_contract(vt_symbol)
data = []
# Query bars from gateway if available
if contract and contract.history_data:
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end
)
data = self.main_engine.query_history(req, contract.gateway_name)
# Try to query bars from RQData, if not found, load from database.
else:
data = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not data:
data = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
return data
def call_strategy_func(
self, strategy: StrategyTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(
self, class_name: str, strategy_name: str, vt_symbols: list, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
strategy = strategy_class(self, strategy_name, vt_symbols, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
for vt_symbol in vt_symbols:
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
self.save_strategy_setting()
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_executor.submit(self._init_strategy, strategy_name)
def _init_strategy(self, strategy_name: str):
"""
Init strategies in queue.
"""
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
return
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if name == "pos":
pos = getattr(strategy, name)
pos.update(value)
elif value:
setattr(strategy, name, value)
# Subscribe market data
for vt_symbol in strategy.vt_symbols:
contract: ContractData = self.main_engine.get_contract(vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol, exchange=contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
else:
self.write_log(f"行情订阅失败,找不到合约{vt_symbol}", strategy)
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
strategy.cancel_all()
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.save_strategy_setting()
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove from symbol strategy map
for vt_symbol in strategy.vt_symbols:
strategies = self.symbol_strategy_map[vt_symbol]
strategies.remove(strategy)
# Remove from vt_orderid strategy map
for vt_orderid in strategy.active_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
self.save_strategy_setting()
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(path1, "vnpy.app.portfolio_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for suffix in ["py", "pyd"]:
pathname = f"{path}/*.{suffix}"
for filepath in glob.glob(pathname):
stem = Path(filepath).stem
strategy_module_name = f"{module_name}.{stem}"
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, StrategyTemplate) and value is not StrategyTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: StrategyTemplate):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["vt_symbols"],
strategy_config["setting"]
)
def save_strategy_setting(self):
"""
Save setting file.
"""
strategy_setting = {}
for name, strategy in self.strategies.items():
strategy_setting[name] = {
"class_name": strategy.__class__.__name__,
"vt_symbols": strategy.vt_symbols,
"setting": strategy.get_parameters()
}
save_json(self.setting_filename, strategy_setting)
def put_strategy_event(self, strategy: StrategyTemplate):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
event = Event(EVENT_PORTFOLIO_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: StrategyTemplate = None):
"""
Create portfolio engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name=APP_NAME)
event = Event(type=EVENT_PORTFOLIO_LOG, data=log)
self.event_engine.put(event)
def send_email(self, msg: str, strategy: StrategyTemplate = None):
"""
Send email to default receiver.
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "组合策略引擎"
self.main_engine.send_email(subject, msg)
|
""""""
import importlib
import glob
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Set, Tuple, Type, Any, Callable
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor
from tzlocal import get_localzone
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
OrderData,
TradeData,
PositionData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.rqdata import rqdata_client
from vnpy.trader.converter import OffsetConverter
from vnpy.trader.database import database_manager
from .base import (
APP_NAME,
EVENT_PORTFOLIO_LOG,
EVENT_PORTFOLIO_STRATEGY
)
from .template import StrategyTemplate
class StrategyEngine(BaseEngine):
""""""
setting_filename = "portfolio_strategy_setting.json"
data_filename = "portfolio_strategy_data.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.strategy_data: Dict[str, Dict] = {}
self.classes: Dict[str, Type[StrategyTemplate]] = {}
self.strategies: Dict[str, StrategyTemplate] = {}
self.symbol_strategy_map: Dict[str, List[StrategyTemplate]] = defaultdict(list)
self.orderid_strategy_map: Dict[str, StrategyTemplate] = {}
self.init_executor: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=1)
self.vt_tradeids: Set[str] = set()
self.offset_converter: OffsetConverter = OffsetConverter(self.main_engine)
def init_engine(self):
"""
"""
self.init_rqdata()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("组合策略引擎初始化成功")
def close(self):
""""""
self.stop_all_strategies()
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
def init_rqdata(self):
"""
Init RQData client.
"""
result = rqdata_client.init()
if result:
self.write_log("RQData数据接口初始化成功")
def query_bar_from_rq(
self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime
):
"""
Query bar data from RQData.
"""
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end
)
data = rqdata_client.query_history(req)
return data
def process_tick_event(self, event: Event):
""""""
tick: TickData = event.data
strategies = self.symbol_strategy_map[tick.vt_symbol]
if not strategies:
return
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_order_event(self, event: Event):
""""""
order: OrderData = event.data
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
return
self.call_strategy_func(strategy, strategy.update_order, order)
def process_trade_event(self, event: Event):
""""""
trade: TradeData = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
return
self.call_strategy_func(strategy, strategy.update_trade, trade)
def process_position_event(self, event: Event):
""""""
position: PositionData = event.data
self.offset_converter.update_position(position)
def send_order(
self,
strategy: StrategyTemplate,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool,
net: bool,
):
"""
Send a new order to server.
"""
contract: ContractData = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{vt_symbol}", strategy)
return ""
# Round order price and volume to nearest incremental value
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume,
reference=f"{APP_NAME}_{strategy.strategy_name}"
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(original_req, lock, net)
# Send Orders
vt_orderids = []
for req in req_list:
req.reference = strategy.strategy_name # Add strategy name as order reference
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
return vt_orderids
def cancel_order(self, strategy: StrategyTemplate, vt_orderid: str):
"""
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_pricetick(self, strategy: StrategyTemplate, vt_symbol: str):
"""
Return contract pricetick data.
"""
contract = self.main_engine.get_contract(vt_symbol)
if contract:
return contract.pricetick
else:
return None
def load_bars(self, strategy: StrategyTemplate, days: int, interval: Interval):
""""""
vt_symbols = strategy.vt_symbols
dts: Set[datetime] = set()
history_data: Dict[Tuple, BarData] = {}
# Load data from rqdata/gateway/database
for vt_symbol in vt_symbols:
data = self.load_bar(vt_symbol, days, interval)
for bar in data:
dts.add(bar.datetime)
history_data[(bar.datetime, vt_symbol)] = bar
# Convert data structure and push to strategy
dts = list(dts)
dts.sort()
bars = {}
for dt in dts:
for vt_symbol in vt_symbols:
bar = history_data.get((dt, vt_symbol), None)
# If bar data of vt_symbol at dt exists
if bar:
bars[vt_symbol] = bar
# Otherwise, use previous data to backfill
elif vt_symbol in bars:
old_bar = bars[vt_symbol]
bar = BarData(
symbol=old_bar.symbol,
exchange=old_bar.exchange,
datetime=dt,
open_price=old_bar.close_price,
high_price=old_bar.close_price,
low_price=old_bar.close_price,
close_price=old_bar.close_price,
gateway_name=old_bar.gateway_name
)
bars[vt_symbol] = bar
self.call_strategy_func(strategy, strategy.on_bars, bars)
def load_bar(self, vt_symbol: str, days: int, interval: Interval) -> List[BarData]:
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now(get_localzone())
start = end - timedelta(days)
contract: ContractData = self.main_engine.get_contract(vt_symbol)
data = []
# Query bars from gateway if available
if contract and contract.history_data:
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end
)
data = self.main_engine.query_history(req, contract.gateway_name)
# Try to query bars from RQData, if not found, load from database.
else:
data = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not data:
data = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
return data
def call_strategy_func(
self, strategy: StrategyTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(
self, class_name: str, strategy_name: str, vt_symbols: list, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
strategy = strategy_class(self, strategy_name, vt_symbols, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
for vt_symbol in vt_symbols:
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
self.save_strategy_setting()
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_executor.submit(self._init_strategy, strategy_name)
def _init_strategy(self, strategy_name: str):
"""
Init strategies in queue.
"""
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
return
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if name == "pos":
pos = getattr(strategy, name)
pos.update(value)
elif value:
setattr(strategy, name, value)
# Subscribe market data
for vt_symbol in strategy.vt_symbols:
contract: ContractData = self.main_engine.get_contract(vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol, exchange=contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
else:
self.write_log(f"行情订阅失败,找不到合约{vt_symbol}", strategy)
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
strategy.cancel_all()
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.save_strategy_setting()
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove from symbol strategy map
for vt_symbol in strategy.vt_symbols:
strategies = self.symbol_strategy_map[vt_symbol]
strategies.remove(strategy)
# Remove from vt_orderid strategy map
for vt_orderid in strategy.active_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
self.save_strategy_setting()
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(path1, "vnpy.app.portfolio_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for suffix in ["py", "pyd"]:
pathname = f"{path}/*.{suffix}"
for filepath in glob.glob(pathname):
stem = Path(filepath).stem
strategy_module_name = f"{module_name}.{stem}"
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, StrategyTemplate) and value is not StrategyTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: StrategyTemplate):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["vt_symbols"],
strategy_config["setting"]
)
def save_strategy_setting(self):
"""
Save setting file.
"""
strategy_setting = {}
for name, strategy in self.strategies.items():
strategy_setting[name] = {
"class_name": strategy.__class__.__name__,
"vt_symbols": strategy.vt_symbols,
"setting": strategy.get_parameters()
}
save_json(self.setting_filename, strategy_setting)
def put_strategy_event(self, strategy: StrategyTemplate):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
event = Event(EVENT_PORTFOLIO_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: StrategyTemplate = None):
"""
Create portfolio engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name=APP_NAME)
event = Event(type=EVENT_PORTFOLIO_LOG, data=log)
self.event_engine.put(event)
def send_email(self, msg: str, strategy: StrategyTemplate = None):
"""
Send email to default receiver.
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "组合策略引擎"
self.main_engine.send_email(subject, msg)
|
en
| 0.692761
|
Init RQData client. Query bar data from RQData. # Filter duplicate trade push Send a new order to server. # Round order price and volume to nearest incremental value # Create request and send order. # Convert with offset converter # Send Orders # Add strategy name as order reference # Check if sending order successful # Save relationship between orderid and strategy. Return contract pricetick data. # Load data from rqdata/gateway/database # Convert data structure and push to strategy # If bar data of vt_symbol at dt exists # Otherwise, use previous data to backfill # Query bars from gateway if available # Try to query bars from RQData, if not found, load from database. Call function of a strategy and catch any exception raised. Add a new strategy. # Add vt_symbol to strategy map. Init a strategy. Init strategies in queue. # Call on_init function of strategy # Restore strategy data(variables) # Subscribe market data # Put event to update init completed status. Start a strategy. Stop a strategy. # Call on_stop function of the strategy # Change trading status of strategy to False # Cancel all orders of the strategy # Sync strategy variables to data file # Update GUI Edit parameters of a strategy. Remove a strategy. # Remove from symbol strategy map # Remove from vt_orderid strategy map # Remove from strategies Load strategy class from source code. Load strategy class from certain folder. Load strategy class from module file. # noqa Load strategy data from json file. Sync strategy data into json file. # Strategy status (inited, trading) should not be synced. Return names of strategy classes loaded. Get default parameters of a strategy class. Get parameters of a strategy. Load setting file. Save setting file. Put an event to update strategy status. Create portfolio engine log event. Send email to default receiver.
| 1.874012
| 2
|
release/stubs.min/System/Net/__init___parts/OpenWriteCompletedEventArgs.py
|
htlcnn/ironpython-stubs
| 182
|
6626644
|
<filename>release/stubs.min/System/Net/__init___parts/OpenWriteCompletedEventArgs.py<gh_stars>100-1000
class OpenWriteCompletedEventArgs(AsyncCompletedEventArgs):
""" Provides data for the System.Net.WebClient.OpenWriteCompleted event. """
Result=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a writable stream that is used to send data to a server.
Get: Result(self: OpenWriteCompletedEventArgs) -> Stream
"""
|
<filename>release/stubs.min/System/Net/__init___parts/OpenWriteCompletedEventArgs.py<gh_stars>100-1000
class OpenWriteCompletedEventArgs(AsyncCompletedEventArgs):
""" Provides data for the System.Net.WebClient.OpenWriteCompleted event. """
Result=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a writable stream that is used to send data to a server.
Get: Result(self: OpenWriteCompletedEventArgs) -> Stream
"""
|
en
| 0.710321
|
Provides data for the System.Net.WebClient.OpenWriteCompleted event. Gets a writable stream that is used to send data to a server.
Get: Result(self: OpenWriteCompletedEventArgs) -> Stream
| 1.608729
| 2
|
Lego-Collector-Dilemma/code.py
|
ashwin2401/ga-learner-dsmp-repo
| 1
|
6626645
|
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
df.head(5)
X = df.drop(['list_price'],axis=1)
y = df['list_price']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state=6)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
cols = X_train.columns
print (cols)
fig, axes = plt.subplots(nrows = 3 , ncols = 3, sharex='col', sharey='row')
for i in range(3):
for j in range (3):
col=cols[ i * 3 + j]
axes[i,j].scatter(X_train[col],y_train)
# code ends here
# --------------
import seaborn as sns
# Code starts here
corr = X_train.corr()
sns.heatmap(corr,annot=True,cmap = plt.cm.Reds)
plt.show()
X_train = X_train.drop(['play_star_rating','val_star_rating'], axis = 1)
X_test = X_test.drop(['play_star_rating','val_star_rating'], axis = 1)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test,y_pred)
print('MSE =',mse)
r2 = r2_score(y_test,y_pred)
print('R^2 SCORE =',r2)
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
print(residual)
plt.hist(residual)
plt.ylabel('Frequency')
plt.xlabel('Residual')
# Code ends here
|
# --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
df.head(5)
X = df.drop(['list_price'],axis=1)
y = df['list_price']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state=6)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
cols = X_train.columns
print (cols)
fig, axes = plt.subplots(nrows = 3 , ncols = 3, sharex='col', sharey='row')
for i in range(3):
for j in range (3):
col=cols[ i * 3 + j]
axes[i,j].scatter(X_train[col],y_train)
# code ends here
# --------------
import seaborn as sns
# Code starts here
corr = X_train.corr()
sns.heatmap(corr,annot=True,cmap = plt.cm.Reds)
plt.show()
X_train = X_train.drop(['play_star_rating','val_star_rating'], axis = 1)
X_test = X_test.drop(['play_star_rating','val_star_rating'], axis = 1)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test,y_pred)
print('MSE =',mse)
r2 = r2_score(y_test,y_pred)
print('R^2 SCORE =',r2)
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
print(residual)
plt.hist(residual)
plt.ylabel('Frequency')
plt.xlabel('Residual')
# Code ends here
|
en
| 0.480485
|
# -------------- # code starts here # code ends here # -------------- # code starts here # code ends here # -------------- # Code starts here # Code ends here # -------------- # Code starts here # Code ends here # -------------- # Code starts here # Code ends here
| 3.089717
| 3
|
buddy/types/structured.py
|
ucbrise/buddy
| 1
|
6626646
|
class Collection:
def __init__(self, key, value):
self.key = key,
self.value = value
class QueryString:
def __init__(self, query):
assert isinstance(query, str)
# Check that it's syntactically valid SQL
self.query = query
class Table(Collection):
def __init__(self, key, value):
super().__init__(key, value)
class Channel(Collection):
def __init__(self, key, value):
assert '@' in key or any(['@' in k for k in key])
super().__init__(key, value)
class View(QueryString):
def __init__(self, query):
super().__init__(query)
|
class Collection:
def __init__(self, key, value):
self.key = key,
self.value = value
class QueryString:
def __init__(self, query):
assert isinstance(query, str)
# Check that it's syntactically valid SQL
self.query = query
class Table(Collection):
def __init__(self, key, value):
super().__init__(key, value)
class Channel(Collection):
def __init__(self, key, value):
assert '@' in key or any(['@' in k for k in key])
super().__init__(key, value)
class View(QueryString):
def __init__(self, query):
super().__init__(query)
|
en
| 0.899595
|
# Check that it's syntactically valid SQL
| 2.916702
| 3
|
python/fmcc.py
|
wittrup/crap
| 1
|
6626647
|
from faulhaber_const import commands as FMCC
fautmel=['GTYP', 'GSER', 'VER', 'GN', 'GCL', 'GRM', 'GKN', 'RM', 'KN', 'ANSW', 'NET', 'CST', 'CO', 'SO', 'TO', 'SAVE', 'BAUD', 'NODEADR', 'GNODEADR', 'GADV', 'EN', 'DI', 'GTIMEOUT', 'TIMEOUT', 'UPTIME', 'SADV']
from crcmod.predefined import mkCrcFun as mkCrcFunPre
from crcmod import mkCrcFun
crcfuncs = {}
crcnames = ['crc-8', 'crc-8-darc', 'crc-8-i-code', 'crc-8-itu', 'crc-8-maxim', 'crc-8-rohc', 'crc-8-wcdma']
for crc_name in crcnames:
crcfuncs[crc_name] = mkCrcFunPre(crc_name)
crcfuncs['CRC-13-BBC'] = mkCrcFun(0x1CF5)
cmds = {}
for cmd in FMCC:
cmds[cmd] = {}
for crc_name, crc_func in crcfuncs.items():
cmds[cmd][crc_name] = crc_func(bytes(cmd, 'ascii'))
crccount = {}
for crc_name in crcnames:
crccount[crc_name] = [0] * 256
for cmd,value in cmds.items():
crccount[crc_name][value[crc_name]] += 1
from collections import Counter
import operator
stats = {}
for crc_name in crcnames:
stats[crc_name] = len([item for item, count in Counter(crccount[crc_name]).items() if count > 1])
crcleast = min(stats.items(), key=operator.itemgetter(1))[0]
print(crcleast, stats[crcleast])
collisions = {}
for checksum,count in enumerate(crccount[crcleast]):
if count > 1:
collisions[checksum] = []
for cmd, value in cmds.items():
if value[crcleast] == checksum:
collisions[checksum].append(cmd)
fautmelcollisions = []
for chksum, cms in collisions.items():
cnt = 0
hit = []
for c in cms:
if c in fautmel:
cnt += 1
hit.append(c)
if cnt > 0:
fautmelcollisions.append([chksum, cnt, cms, hit])
print("\n".join(map(str, fautmelcollisions)))
|
from faulhaber_const import commands as FMCC
fautmel=['GTYP', 'GSER', 'VER', 'GN', 'GCL', 'GRM', 'GKN', 'RM', 'KN', 'ANSW', 'NET', 'CST', 'CO', 'SO', 'TO', 'SAVE', 'BAUD', 'NODEADR', 'GNODEADR', 'GADV', 'EN', 'DI', 'GTIMEOUT', 'TIMEOUT', 'UPTIME', 'SADV']
from crcmod.predefined import mkCrcFun as mkCrcFunPre
from crcmod import mkCrcFun
crcfuncs = {}
crcnames = ['crc-8', 'crc-8-darc', 'crc-8-i-code', 'crc-8-itu', 'crc-8-maxim', 'crc-8-rohc', 'crc-8-wcdma']
for crc_name in crcnames:
crcfuncs[crc_name] = mkCrcFunPre(crc_name)
crcfuncs['CRC-13-BBC'] = mkCrcFun(0x1CF5)
cmds = {}
for cmd in FMCC:
cmds[cmd] = {}
for crc_name, crc_func in crcfuncs.items():
cmds[cmd][crc_name] = crc_func(bytes(cmd, 'ascii'))
crccount = {}
for crc_name in crcnames:
crccount[crc_name] = [0] * 256
for cmd,value in cmds.items():
crccount[crc_name][value[crc_name]] += 1
from collections import Counter
import operator
stats = {}
for crc_name in crcnames:
stats[crc_name] = len([item for item, count in Counter(crccount[crc_name]).items() if count > 1])
crcleast = min(stats.items(), key=operator.itemgetter(1))[0]
print(crcleast, stats[crcleast])
collisions = {}
for checksum,count in enumerate(crccount[crcleast]):
if count > 1:
collisions[checksum] = []
for cmd, value in cmds.items():
if value[crcleast] == checksum:
collisions[checksum].append(cmd)
fautmelcollisions = []
for chksum, cms in collisions.items():
cnt = 0
hit = []
for c in cms:
if c in fautmel:
cnt += 1
hit.append(c)
if cnt > 0:
fautmelcollisions.append([chksum, cnt, cms, hit])
print("\n".join(map(str, fautmelcollisions)))
|
none
| 1
| 1.940733
| 2
|
|
tests/unit/utils/test_net_thread.py
|
pyl1b/p2p0mq
| 0
|
6626648
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from unittest import TestCase
from unittest.mock import MagicMock, patch
from p2p0mq.utils.thread.netthread import KoNetThread, ThreadAuthenticator
logger = logging.getLogger('tests.p2p0mq.thread')
class TestKoNetThread(TestCase):
def setUp(self):
self.testee = KoNetThread(
app=MagicMock(),
bind_address=MagicMock(),
bind_port=MagicMock(),
context=MagicMock(),
no_encryption=MagicMock()
)
def tearDown(self):
# self.testee.context.destroy()
pass
def test_init(self):
with self.assertRaises(ValueError):
KoNetThread(
app=None,
bind_address="",
bind_port="bind_port")
self.testee = KoNetThread(
app="app",
bind_address="bind_address",
bind_port=10,
context="context",
no_encryption="no_encryption"
)
self.assertEqual(self.testee.app, "app")
self.assertEqual(self.testee.bind_address, "bind_address")
self.assertEqual(self.testee.bind_port, 10)
self.assertEqual(self.testee.context, "context")
self.assertEqual(self.testee.no_encryption, "no_encryption")
self.assertIsInstance(self.testee.stop, threading.Event)
self.assertIsInstance(self.testee.sleep, threading.Event)
self.assertIsNone(self.testee.tick)
self.assertIsInstance(self.testee, threading.Thread)
def test_create(self):
pass
def test_terminate(self):
self.assertIsNone(self.testee.socket)
self.testee.terminate()
self.testee.socket = MagicMock()
sk = self.testee.socket
self.testee.terminate()
sk.close.assert_called_once()
self.assertIsNone(self.testee.socket)
def test_address(self):
self.testee.bind_address = "1"
self.testee.bind_port = 1
self.assertEqual(self.testee.address, "tcp://1:1")
self.testee.bind_address = "1"
self.testee.bind_port = None
self.assertEqual(self.testee.address, "1")
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from unittest import TestCase
from unittest.mock import MagicMock, patch
from p2p0mq.utils.thread.netthread import KoNetThread, ThreadAuthenticator
logger = logging.getLogger('tests.p2p0mq.thread')
class TestKoNetThread(TestCase):
def setUp(self):
self.testee = KoNetThread(
app=MagicMock(),
bind_address=MagicMock(),
bind_port=MagicMock(),
context=MagicMock(),
no_encryption=MagicMock()
)
def tearDown(self):
# self.testee.context.destroy()
pass
def test_init(self):
with self.assertRaises(ValueError):
KoNetThread(
app=None,
bind_address="",
bind_port="bind_port")
self.testee = KoNetThread(
app="app",
bind_address="bind_address",
bind_port=10,
context="context",
no_encryption="no_encryption"
)
self.assertEqual(self.testee.app, "app")
self.assertEqual(self.testee.bind_address, "bind_address")
self.assertEqual(self.testee.bind_port, 10)
self.assertEqual(self.testee.context, "context")
self.assertEqual(self.testee.no_encryption, "no_encryption")
self.assertIsInstance(self.testee.stop, threading.Event)
self.assertIsInstance(self.testee.sleep, threading.Event)
self.assertIsNone(self.testee.tick)
self.assertIsInstance(self.testee, threading.Thread)
def test_create(self):
pass
def test_terminate(self):
self.assertIsNone(self.testee.socket)
self.testee.terminate()
self.testee.socket = MagicMock()
sk = self.testee.socket
self.testee.terminate()
sk.close.assert_called_once()
self.assertIsNone(self.testee.socket)
def test_address(self):
self.testee.bind_address = "1"
self.testee.bind_port = 1
self.assertEqual(self.testee.address, "tcp://1:1")
self.testee.bind_address = "1"
self.testee.bind_port = None
self.assertEqual(self.testee.address, "1")
|
en
| 0.306537
|
# -*- coding: utf-8 -*- # self.testee.context.destroy()
| 2.375974
| 2
|
jwstobsim/__init__.py
|
roberthammer/JWST-observation-simulator
| 1
|
6626649
|
<reponame>roberthammer/JWST-observation-simulator<filename>jwstobsim/__init__.py
__all__ = ['utils']
#__version__ = "0.0.1"
from .utils import *
|
__all__ = ['utils']
#__version__ = "0.0.1"
from .utils import *
|
en
| 0.465929
|
#__version__ = "0.0.1"
| 1.040872
| 1
|
aio/aio-proxy/aio_proxy/parsers/section_activite_principale.py
|
etalab/api-search-annuaire-entreprises
| 3
|
6626650
|
<gh_stars>1-10
from typing import Optional
from aio_proxy.labels.helpers import sections_codes_naf
def validate_section_activite_principale(
section_activite_principale_clean: str,
) -> Optional[str]:
"""Check the validity of section_activite_principale.
Args:
section_activite_principale_clean(str, optional):
section_activite_principale extracted and cleaned.
Returns:
None if section_activite_principale_clean is None.
section_activite_principale_clean if valid.
Raises:
ValueError: if section_activite_principale_clean not valid.
"""
if section_activite_principale_clean is None:
return None
if section_activite_principale_clean not in sections_codes_naf:
raise ValueError("Section d'activité principale non valide.")
return section_activite_principale_clean
|
from typing import Optional
from aio_proxy.labels.helpers import sections_codes_naf
def validate_section_activite_principale(
section_activite_principale_clean: str,
) -> Optional[str]:
"""Check the validity of section_activite_principale.
Args:
section_activite_principale_clean(str, optional):
section_activite_principale extracted and cleaned.
Returns:
None if section_activite_principale_clean is None.
section_activite_principale_clean if valid.
Raises:
ValueError: if section_activite_principale_clean not valid.
"""
if section_activite_principale_clean is None:
return None
if section_activite_principale_clean not in sections_codes_naf:
raise ValueError("Section d'activité principale non valide.")
return section_activite_principale_clean
|
en
| 0.590445
|
Check the validity of section_activite_principale. Args: section_activite_principale_clean(str, optional): section_activite_principale extracted and cleaned. Returns: None if section_activite_principale_clean is None. section_activite_principale_clean if valid. Raises: ValueError: if section_activite_principale_clean not valid.
| 2.583366
| 3
|