hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2df59b3624e83309ba6fde19949ccfe728cf89 | 689 | py | Python | Scripts/django-admin.py | narsimrao/django_project | 8bd6b3db69505bfc7c78de9e58058efe76505485 | [
"bzip2-1.0.6"
] | null | null | null | Scripts/django-admin.py | narsimrao/django_project | 8bd6b3db69505bfc7c78de9e58058efe76505485 | [
"bzip2-1.0.6"
] | null | null | null | Scripts/django-admin.py | narsimrao/django_project | 8bd6b3db69505bfc7c78de9e58058efe76505485 | [
"bzip2-1.0.6"
] | null | null | null | #!e:\django_projects\django_project\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 31.318182 | 80 | 0.730044 |
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| true | true |
1c2df61a7417955bda87d8d71d299b279e8a2f26 | 1,103 | py | Python | utils/sputa.py | piger/dulbecco | 8d0c1a62d64214f1962077385216f09866767720 | [
"BSD-2-Clause"
] | null | null | null | utils/sputa.py | piger/dulbecco | 8d0c1a62d64214f1962077385216f09866767720 | [
"BSD-2-Clause"
] | null | null | null | utils/sputa.py | piger/dulbecco | 8d0c1a62d64214f1962077385216f09866767720 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cPickle as pickle
import shutil
import os
import json
import sys
class PersistentDict(dict):
def __init__(self, filename, *args, **kwargs):
self.filename = filename
dict.__init__(self, *args, **kwargs)
def save(self):
tmpfile = self.filename + ".tmp"
try:
with open(tmpfile, "wb") as fd:
pickle.dump(dict(self), fd, 2)
except (OSError, pickle.PickleError):
os.remove(tmpfile)
raise
shutil.move(tmpfile, self.filename)
def load(self):
if not os.path.exists(self.filename):
return
with open(self.filename, "rb") as fd:
data = pickle.load(fd)
self.update(data)
if __name__ == '__main__':
filename = "markov.pickle"
pd = PersistentDict(filename)
pd.load()
i = 0
for key in pd:
jkey = json.dumps(key, separators=(',', ':'))
for subkey in pd[key]:
line = u"%s\n%s\n" % (jkey, subkey)
sys.stdout.write(line.encode('utf-8'))
| 23.978261 | 53 | 0.55757 |
import cPickle as pickle
import shutil
import os
import json
import sys
class PersistentDict(dict):
def __init__(self, filename, *args, **kwargs):
self.filename = filename
dict.__init__(self, *args, **kwargs)
def save(self):
tmpfile = self.filename + ".tmp"
try:
with open(tmpfile, "wb") as fd:
pickle.dump(dict(self), fd, 2)
except (OSError, pickle.PickleError):
os.remove(tmpfile)
raise
shutil.move(tmpfile, self.filename)
def load(self):
if not os.path.exists(self.filename):
return
with open(self.filename, "rb") as fd:
data = pickle.load(fd)
self.update(data)
if __name__ == '__main__':
filename = "markov.pickle"
pd = PersistentDict(filename)
pd.load()
i = 0
for key in pd:
jkey = json.dumps(key, separators=(',', ':'))
for subkey in pd[key]:
line = u"%s\n%s\n" % (jkey, subkey)
sys.stdout.write(line.encode('utf-8'))
| true | true |
1c2df64db076a8bba366965c59793e8dbaeb6a13 | 882 | py | Python | esmvalcore/cmor/_fixes/cmip5/fgoals_g2.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | null | null | null | esmvalcore/cmor/_fixes/cmip5/fgoals_g2.py | jvegreg/ESMValCore | 03eb1c942bf1dc3be98cb30c3592b42e82a94f16 | [
"Apache-2.0"
] | 2 | 2022-03-02T16:16:06.000Z | 2022-03-10T12:58:49.000Z | esmvalcore/cmor/_fixes/cmip5/fgoals_g2.py | valeriupredoi/ESMValCore | b46b948c47d8579d997b28501f8588f5531aa354 | [
"Apache-2.0"
] | null | null | null | """Fixes for FGOALS-g2 model."""
import iris
from cf_units import Unit
from ..fix import Fix
from ..shared import round_coordinates
class AllVars(Fix):
"""Fixes for all variables."""
def fix_metadata(self, cubes):
"""Fix metadata.
Fix time coordinate and round other coordinates to fix issue with
modulus in longitude coordinate.
Parameters
----------
cubes : iris.cube.CubeList
Input cubes.
Returns
-------
iris.cube.CubeList
"""
for cube in cubes:
try:
time = cube.coord('time')
except iris.exceptions.CoordinateNotFoundError:
pass
else:
time.units = Unit(time.units.name, time.units.calendar)
round_coordinates(cubes, 4, coord_names=['longitude'])
return cubes
| 22.615385 | 73 | 0.568027 | import iris
from cf_units import Unit
from ..fix import Fix
from ..shared import round_coordinates
class AllVars(Fix):
def fix_metadata(self, cubes):
for cube in cubes:
try:
time = cube.coord('time')
except iris.exceptions.CoordinateNotFoundError:
pass
else:
time.units = Unit(time.units.name, time.units.calendar)
round_coordinates(cubes, 4, coord_names=['longitude'])
return cubes
| true | true |
1c2df64e53bd14b34a66a8f182d65708eb56769f | 195 | py | Python | BAEKJOON/Python/10773.py | cmsong111/NJ_code | 2df6176d179e168a2789a825ddeb977a82eb8d97 | [
"MIT"
] | null | null | null | BAEKJOON/Python/10773.py | cmsong111/NJ_code | 2df6176d179e168a2789a825ddeb977a82eb8d97 | [
"MIT"
] | null | null | null | BAEKJOON/Python/10773.py | cmsong111/NJ_code | 2df6176d179e168a2789a825ddeb977a82eb8d97 | [
"MIT"
] | null | null | null | result = []
for i in range(int(input())):
temp = int(input())
if temp == 0:
if len(result) != 0:
result.pop()
else:
result.append(temp)
print(sum(result)) | 19.5 | 29 | 0.507692 | result = []
for i in range(int(input())):
temp = int(input())
if temp == 0:
if len(result) != 0:
result.pop()
else:
result.append(temp)
print(sum(result)) | true | true |
1c2df69976a1483f6eb5b5dc7775f32a86fd296a | 313 | py | Python | packages/pycopy/v2.11.0.1/esp8266/stubs/uselect.py | TheVinhLuong102/micropy-stubs | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | [
"MIT"
] | 18 | 2019-07-11T13:31:09.000Z | 2022-01-27T06:38:40.000Z | packages/pycopy/v2.11.0.1/esp8266/stubs/uselect.py | TheVinhLuong102/micropy-stubs | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | [
"MIT"
] | 9 | 2019-09-01T21:44:49.000Z | 2022-02-04T20:55:08.000Z | packages/pycopy/v2.11.0.1/esp8266/stubs/uselect.py | TheVinhLuong102/micropy-stubs | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | [
"MIT"
] | 6 | 2019-10-08T05:31:21.000Z | 2021-04-22T10:21:01.000Z | """
Module: 'uselect' on esp8266 v2.11.0.1 on 2019
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v2.11.0.1 on 2019-07-26', machine='ESP module with ESP8266')
# Stubber: 1.2.0
POLLERR = 8
POLLHUP = 16
POLLIN = 1
POLLOUT = 4
def poll():
pass
def select():
pass
| 19.5625 | 146 | 0.645367 |
POLLERR = 8
POLLHUP = 16
POLLIN = 1
POLLOUT = 4
def poll():
pass
def select():
pass
| true | true |
1c2df8007af113fc464b9a79dc28207ab10a761d | 227 | py | Python | image-processing-package/image-processing-my-package/utils/io.py | isabellazramos/criacao-de-pacotes-em-python | 6ee97f1365813832bd530f0df6e2159c5b2cb06d | [
"MIT"
] | null | null | null | image-processing-package/image-processing-my-package/utils/io.py | isabellazramos/criacao-de-pacotes-em-python | 6ee97f1365813832bd530f0df6e2159c5b2cb06d | [
"MIT"
] | null | null | null | image-processing-package/image-processing-my-package/utils/io.py | isabellazramos/criacao-de-pacotes-em-python | 6ee97f1365813832bd530f0df6e2159c5b2cb06d | [
"MIT"
] | 2 | 2022-03-21T20:15:46.000Z | 2022-03-31T14:50:43.000Z | #Author: Karina Tiemi Kato
from skimage.io import inread, insave
def read_image(path, is_gray = False):
image = inread(path, as_gray = is_gray)
return image
def save_image(image, path):
insave(path, image) | 25.222222 | 44 | 0.696035 |
from skimage.io import inread, insave
def read_image(path, is_gray = False):
image = inread(path, as_gray = is_gray)
return image
def save_image(image, path):
insave(path, image) | true | true |
1c2df8a4ae9fd16c06a3de5c0f9723cea7c360e8 | 7,052 | py | Python | flloat/flloat.py | marcofavorito/flloat | 75e8ec9219763eba5feb362438604693b6cc7346 | [
"Apache-2.0"
] | 3 | 2019-07-14T21:15:26.000Z | 2019-12-12T21:51:35.000Z | flloat/flloat.py | MarcoFavorito/flloat | 75e8ec9219763eba5feb362438604693b6cc7346 | [
"MIT"
] | 1 | 2019-09-03T16:35:59.000Z | 2019-09-03T16:35:59.000Z | flloat/flloat.py | MarcoFavorito/flloat | 75e8ec9219763eba5feb362438604693b6cc7346 | [
"MIT"
] | 1 | 2019-08-30T18:15:02.000Z | 2019-08-30T18:15:02.000Z | # -*- coding: utf-8 -*-
"""Main module of the pakage."""
from typing import Set, FrozenSet, Dict, cast, List
import sympy
from pythomata import SymbolicAutomaton, PropositionalInterpretation
from pythomata.impl.symbolic import SymbolicDFA
from sympy.logic.boolalg import BooleanFalse
from flloat.base import Formula
from flloat.delta import Delta
from flloat.helpers import powerset
from flloat.pl import (
PLFormula,
PLAtomic,
PLNot,
PLAnd,
PLOr,
PLImplies,
PLEquivalence,
PLTrue,
PLFalse,
to_sympy,
)
def find_atomics(formula: Formula) -> Set[PLAtomic]:
"""Find all the atomic formulas."""
res = set()
if isinstance(formula, PLFormula):
res = formula.find_atomics()
elif isinstance(formula, PLAtomic):
res.add(formula)
else:
raise TypeError("Logic error: unexpected type.")
return res
def _transform_delta(f: Formula, formula2AtomicFormula):
"""
Transform delta.
From a Propositional Formula to a Propositional Formula.
with non-propositional subformulas replaced with a "freezed" atomic formula.
"""
if isinstance(f, PLNot):
return PLNot(_transform_delta(f, formula2AtomicFormula))
# elif isinstance(f, PLBinaryOperator): #PLAnd, PLOr, PLImplies, PLEquivalence
elif isinstance(f, (PLAnd, PLOr, PLImplies, PLEquivalence)):
return type(f)(
[_transform_delta(subf, formula2AtomicFormula) for subf in f.formulas]
)
elif type(f) == PLTrue or type(f) == PLFalse:
return f
else:
return formula2AtomicFormula[f]
def _is_true(Q: FrozenSet[FrozenSet]):
if frozenset() in Q:
return True
conj = [
PLAnd([subf.s.delta(None, epsilon=True) for subf in q])
if len(q) >= 2
else next(iter(q)).s.delta(None, epsilon=True)
if len(q) == 1
else PLFalse()
for q in Q
]
if len(conj) == 0:
return False
else:
pl_conj = PLOr(conj) if len(conj) >= 2 else conj[0]
result = pl_conj.truth({})
return result
def _make_transition(
marco_q: FrozenSet[FrozenSet[PLAtomic]], i: PropositionalInterpretation
):
new_macrostate = set()
for q in marco_q:
# delta function applied to every formula in the macro state Q
delta_formulas = [cast(Delta, f.s).delta(i) for f in q]
# find atomics -> so also ldlf formulas
# replace atomic with custom object
# convert to sympy
# find the list of atoms, which are "true" atoms
# (i.e. propositional atoms) or LDLf formulas
atomics = [s for subf in delta_formulas for s in find_atomics(subf)]
atom2id = {
v: str(k) for k, v in enumerate(atomics)
} # type: Dict[PLAtomic, str]
id2atom = {v: k for k, v in atom2id.items()} # type: Dict[str, PLAtomic]
# build a map from formula to a "freezed" propositional Atomic Formula
formula2atomic_formulas = {
f: PLAtomic(atom2id[f])
if f != PLTrue() and f != PLFalse() # and not isinstance(f, PLAtomic)
else f
for f in atomics
}
# the final list of Propositional Atomic Formulas,
# one for each formula in the original macro state Q
transformed_delta_formulas = [
_transform_delta(f, formula2atomic_formulas) for f in delta_formulas
]
# the empty conjunction stands for true
if len(transformed_delta_formulas) == 0:
conjunctions = PLTrue()
elif len(transformed_delta_formulas) == 1:
conjunctions = transformed_delta_formulas[0]
else:
conjunctions = PLAnd(transformed_delta_formulas) # type: ignore
# the model in this case is the smallest set of symbols
# s.t. the conjunction of "freezed" atomic formula is true.
# alphabet = frozenset(symbol2formula)
# models = frozenset(conjunctions.minimal_models(alphabet))
formula = to_sympy(conjunctions, replace=atom2id) # type: ignore
all_models = list(sympy.satisfiable(formula, all_models=True))
if len(all_models) == 1 and all_models[0] == BooleanFalse():
models = [] # type: List[Set[str]]
elif len(all_models) == 1 and all_models[0] == {True: True}:
models = [set()]
else:
models = list(
map(lambda x: {k for k, v in x.items() if v is True}, all_models)
)
for min_model in models:
q_prime = frozenset({id2atom[s] for s in map(str, min_model)})
new_macrostate.add(q_prime)
return frozenset(new_macrostate)
def get_labels_from_macrostate(macrostate):
"""Get labels from macrostate."""
labels = set()
for states in macrostate:
for state in states:
labels = labels.union(state.s.find_labels())
return labels
def to_automaton(f) -> SymbolicDFA: # noqa: C901
"""Translate to automaton."""
f = f.to_nnf()
initial_state = frozenset({frozenset({PLAtomic(f)})})
states = {initial_state}
final_states = set()
transition_function = {} # type: Dict
all_labels = f.find_labels()
alphabet = powerset(all_labels)
if f.delta({}, epsilon=True) == PLTrue():
final_states.add(initial_state)
visited = set() # type: Set
to_be_visited = {initial_state}
while len(to_be_visited) != 0:
for q in list(to_be_visited):
to_be_visited.remove(q)
for actions_set in alphabet:
new_state = _make_transition(q, {label: True for label in actions_set})
if new_state not in states:
states.add(new_state)
to_be_visited.add(new_state)
transition_function.setdefault(q, {})[actions_set] = new_state
if new_state not in visited:
visited.add(new_state)
if _is_true(new_state):
final_states.add(new_state)
automaton = SymbolicAutomaton()
state2idx = {}
for state in states:
state_idx = automaton.create_state()
state2idx[state] = state_idx
if state == initial_state:
automaton.set_initial_state(state_idx)
if state in final_states:
automaton.set_accepting_state(state_idx, True)
for source in transition_function:
for symbol, destination in transition_function[source].items():
source_idx = state2idx[source]
dest_idx = state2idx[destination]
pos_expr = sympy.And(*map(sympy.Symbol, symbol))
neg_expr = sympy.And(
*map(
lambda x: sympy.Not(sympy.Symbol(x)), all_labels.difference(symbol)
)
)
automaton.add_transition(
(source_idx, sympy.And(pos_expr, neg_expr), dest_idx)
)
determinized = automaton.determinize()
minimized = determinized.minimize()
return minimized
| 32.497696 | 87 | 0.617413 |
from typing import Set, FrozenSet, Dict, cast, List
import sympy
from pythomata import SymbolicAutomaton, PropositionalInterpretation
from pythomata.impl.symbolic import SymbolicDFA
from sympy.logic.boolalg import BooleanFalse
from flloat.base import Formula
from flloat.delta import Delta
from flloat.helpers import powerset
from flloat.pl import (
PLFormula,
PLAtomic,
PLNot,
PLAnd,
PLOr,
PLImplies,
PLEquivalence,
PLTrue,
PLFalse,
to_sympy,
)
def find_atomics(formula: Formula) -> Set[PLAtomic]:
res = set()
if isinstance(formula, PLFormula):
res = formula.find_atomics()
elif isinstance(formula, PLAtomic):
res.add(formula)
else:
raise TypeError("Logic error: unexpected type.")
return res
def _transform_delta(f: Formula, formula2AtomicFormula):
if isinstance(f, PLNot):
return PLNot(_transform_delta(f, formula2AtomicFormula))
PLImplies, PLEquivalence)):
return type(f)(
[_transform_delta(subf, formula2AtomicFormula) for subf in f.formulas]
)
elif type(f) == PLTrue or type(f) == PLFalse:
return f
else:
return formula2AtomicFormula[f]
def _is_true(Q: FrozenSet[FrozenSet]):
if frozenset() in Q:
return True
conj = [
PLAnd([subf.s.delta(None, epsilon=True) for subf in q])
if len(q) >= 2
else next(iter(q)).s.delta(None, epsilon=True)
if len(q) == 1
else PLFalse()
for q in Q
]
if len(conj) == 0:
return False
else:
pl_conj = PLOr(conj) if len(conj) >= 2 else conj[0]
result = pl_conj.truth({})
return result
def _make_transition(
marco_q: FrozenSet[FrozenSet[PLAtomic]], i: PropositionalInterpretation
):
new_macrostate = set()
for q in marco_q:
delta_formulas = [cast(Delta, f.s).delta(i) for f in q]
atomics = [s for subf in delta_formulas for s in find_atomics(subf)]
atom2id = {
v: str(k) for k, v in enumerate(atomics)
}
id2atom = {v: k for k, v in atom2id.items()}
formula2atomic_formulas = {
f: PLAtomic(atom2id[f])
if f != PLTrue() and f != PLFalse()
else f
for f in atomics
}
transformed_delta_formulas = [
_transform_delta(f, formula2atomic_formulas) for f in delta_formulas
]
if len(transformed_delta_formulas) == 0:
conjunctions = PLTrue()
elif len(transformed_delta_formulas) == 1:
conjunctions = transformed_delta_formulas[0]
else:
conjunctions = PLAnd(transformed_delta_formulas)
formula = to_sympy(conjunctions, replace=atom2id)
all_models = list(sympy.satisfiable(formula, all_models=True))
if len(all_models) == 1 and all_models[0] == BooleanFalse():
models = []
elif len(all_models) == 1 and all_models[0] == {True: True}:
models = [set()]
else:
models = list(
map(lambda x: {k for k, v in x.items() if v is True}, all_models)
)
for min_model in models:
q_prime = frozenset({id2atom[s] for s in map(str, min_model)})
new_macrostate.add(q_prime)
return frozenset(new_macrostate)
def get_labels_from_macrostate(macrostate):
labels = set()
for states in macrostate:
for state in states:
labels = labels.union(state.s.find_labels())
return labels
def to_automaton(f) -> SymbolicDFA:
f = f.to_nnf()
initial_state = frozenset({frozenset({PLAtomic(f)})})
states = {initial_state}
final_states = set()
transition_function = {}
all_labels = f.find_labels()
alphabet = powerset(all_labels)
if f.delta({}, epsilon=True) == PLTrue():
final_states.add(initial_state)
visited = set()
to_be_visited = {initial_state}
while len(to_be_visited) != 0:
for q in list(to_be_visited):
to_be_visited.remove(q)
for actions_set in alphabet:
new_state = _make_transition(q, {label: True for label in actions_set})
if new_state not in states:
states.add(new_state)
to_be_visited.add(new_state)
transition_function.setdefault(q, {})[actions_set] = new_state
if new_state not in visited:
visited.add(new_state)
if _is_true(new_state):
final_states.add(new_state)
automaton = SymbolicAutomaton()
state2idx = {}
for state in states:
state_idx = automaton.create_state()
state2idx[state] = state_idx
if state == initial_state:
automaton.set_initial_state(state_idx)
if state in final_states:
automaton.set_accepting_state(state_idx, True)
for source in transition_function:
for symbol, destination in transition_function[source].items():
source_idx = state2idx[source]
dest_idx = state2idx[destination]
pos_expr = sympy.And(*map(sympy.Symbol, symbol))
neg_expr = sympy.And(
*map(
lambda x: sympy.Not(sympy.Symbol(x)), all_labels.difference(symbol)
)
)
automaton.add_transition(
(source_idx, sympy.And(pos_expr, neg_expr), dest_idx)
)
determinized = automaton.determinize()
minimized = determinized.minimize()
return minimized
| true | true |
1c2df8d4702fe1cbbe53da69f75b40db9ac2ed4c | 2,100 | py | Python | lib/bes/fs/file_metadata.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | lib/bes/fs/file_metadata.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | lib/bes/fs/file_metadata.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os
import os.path as path
from bes.common.check import check
from bes.sqlite.sqlite import sqlite
from bes.fs.file_util import file_util
from .detail.file_metadata_db import file_metadata_db
class file_metadata(object):
'Metadata for files using an sql db.'
DEFAULT_DB_FILENAME = '.bes_file_metadata.db'
def __init__(self, root_dir, db_filename = None):
check.check_string(root_dir)
check.check_string(db_filename, allow_none = True)
self._root_dir = root_dir
db_filename = db_filename or self.DEFAULT_DB_FILENAME
if os.sep in db_filename:
raise ValueError('db_filename should be just a filename not path: {}'.format(db_filename))
self._db_filename = path.join(self._root_dir, db_filename)
self._db = file_metadata_db(sqlite(self._db_filename))
@property
def db_filename(self):
return self._db_filename
def get_values(self, what, filename):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
return self._db.get_values(what, filename)
def replace_values(self, what, filename, values):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
self._db.replace_values(what, filename, values)
def set_value(self, what, filename, key, value):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
self._db.set_value(what, filename, key, value)
def get_value(self, what, filename, key):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
return self._db.get_value(what, filename, key)
def clear(self, what, filename):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
self._db.clear(what, filename)
def _table_name(self, what, filename):
filename = file_util.lstrip_sep(filename)
return self._db._table_name(what, filename)
| 33.333333 | 96 | 0.741429 |
import os
import os.path as path
from bes.common.check import check
from bes.sqlite.sqlite import sqlite
from bes.fs.file_util import file_util
from .detail.file_metadata_db import file_metadata_db
class file_metadata(object):
DEFAULT_DB_FILENAME = '.bes_file_metadata.db'
def __init__(self, root_dir, db_filename = None):
check.check_string(root_dir)
check.check_string(db_filename, allow_none = True)
self._root_dir = root_dir
db_filename = db_filename or self.DEFAULT_DB_FILENAME
if os.sep in db_filename:
raise ValueError('db_filename should be just a filename not path: {}'.format(db_filename))
self._db_filename = path.join(self._root_dir, db_filename)
self._db = file_metadata_db(sqlite(self._db_filename))
@property
def db_filename(self):
return self._db_filename
def get_values(self, what, filename):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
return self._db.get_values(what, filename)
def replace_values(self, what, filename, values):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
self._db.replace_values(what, filename, values)
def set_value(self, what, filename, key, value):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
self._db.set_value(what, filename, key, value)
def get_value(self, what, filename, key):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
return self._db.get_value(what, filename, key)
def clear(self, what, filename):
check.check_string(what)
check.check_string(filename)
filename = file_util.lstrip_sep(filename)
self._db.clear(what, filename)
def _table_name(self, what, filename):
filename = file_util.lstrip_sep(filename)
return self._db._table_name(what, filename)
| true | true |
1c2df8d6876849a9a0dc4d55bdff271a4397f835 | 4,006 | py | Python | app_data2.py | rongqingpin/iOS_app_data | c9beecfb3878f64568b1d9626412ba6b346934cd | [
"MIT"
] | null | null | null | app_data2.py | rongqingpin/iOS_app_data | c9beecfb3878f64568b1d9626412ba6b346934cd | [
"MIT"
] | null | null | null | app_data2.py | rongqingpin/iOS_app_data | c9beecfb3878f64568b1d9626412ba6b346934cd | [
"MIT"
] | null | null | null | import pandas as pd
import csv
import json
import re
# load the category IDs
flc = '/Users/pinqingkan/Desktop/Codes/Project_iTunes/'
#flc = '/Users/Melanie/Library/Mobile Documents/com~apple~CloudDocs/Desktop/Codes/Project_iTunes/'
fname = flc + 'IDs/iosapp_categories.csv'
X0 = pd.read_csv(fname)
# remove repetitive ones: 'games', 'magazines & newspapers', 'stickers'
X0 = X0.drop(labels = [7, 28, 67], axis = 0)
Ncatg, N = X0.shape
# creat a list of desired data
app_keys = ['trackId',
'artistId',
'artistViewUrl', 'sellerUrl',
'contentAdvisoryRating', 'trackContentRating', 'averageUserRating', 'averageUserRatingForCurrentVersion',
'userRatingCount', 'userRatingCountForCurrentVersion',
'currency', 'formattedPrice', 'price',
'currentVersionReleaseDate', 'releaseDate', 'version',
'genreIds', 'primaryGenreId',
'fileSizeBytes',
'screenshotUrls', 'ipadScreenshotUrls',
'supportedDevices']
Ndict = len(app_keys)
app_keys2 = ['trackId', 'description', 'features']
Nfeat = len(app_keys2)
url0 = 'https://itunes.apple.com/lookup?id='
# loop through the categories
for icat in range(31, 33):#range(33, Ncatg)
icategory = X0.Category.iloc[icat]
icatid = X0.ID.iloc[icat]
# record the data one file per category
if (icatid >= 7000) & (icatid < 8000): fname0 = 'games'
elif (icatid >= 13000) & (icatid < 14000): fname0 = 'magazines-newspapers'
elif icatid >= 16000: fname0 = 'stickers'
else: fname0 = icategory
print(icategory)
# load the new links
try:
fname = flc + 'isoapp_links/iosapp_' + icategory + '_links_072017.txt'
with open(fname, 'r') as file:
links = file.readlines()
napp = len(links)
for iapp in range(napp):
match = re.search('id([\d]+)\?mt', links[iapp])
if match:
iurl = url0 + match.group(1)
# load data from website
Y = pd.read_json(iurl)
# initialize the data
app_dict = dict.fromkeys(app_keys)
app_feat = dict.fromkeys(app_keys2)
if len(Y) > 0:
Y = Y['results'][0]
# format & record the data
for ikey in app_keys:
if ikey in Y.keys():
if ikey in ['screenshotUrls', 'ipadScreenshotUrls',
'supportedDevices',
'artistViewUrl', 'sellerUrl',
'genreIds']:
app_dict[ikey] = len(Y[ikey])
elif ikey in ['version']:
if len(Y[ikey].encode()) == len(Y[ikey]):
app_dict[ikey] = Y[ikey]
else:
app_dict[ikey] = Y[ikey]
else:
app_dict[ikey] = 0
# record the description info
for ikey in app_keys2:
if ikey in Y.keys():
app_feat[ikey] = Y[ikey]
else:
app_feat[ikey] = 0
# convert into dataframe
y = pd.DataFrame(app_dict, index = [0])
# record the app data
fname = flc + 'iosapp_data/app_data_' + fname0 + '.csv'
with open(fname, 'a') as file:
csvwriter = csv.writer(file, delimiter = '\t')
csvwriter.writerow(y.iloc[0,:].values)
# record the description
fname = flc + 'iosapp_data/app_descp_' + fname0 + '.json'
with open(fname, 'a') as file:
json.dump(app_feat, file)
file.write('\n')
except FileNotFoundError: continue | 38.152381 | 117 | 0.507239 | import pandas as pd
import csv
import json
import re
flc = '/Users/pinqingkan/Desktop/Codes/Project_iTunes/'
fname = flc + 'IDs/iosapp_categories.csv'
X0 = pd.read_csv(fname)
X0 = X0.drop(labels = [7, 28, 67], axis = 0)
Ncatg, N = X0.shape
app_keys = ['trackId',
'artistId',
'artistViewUrl', 'sellerUrl',
'contentAdvisoryRating', 'trackContentRating', 'averageUserRating', 'averageUserRatingForCurrentVersion',
'userRatingCount', 'userRatingCountForCurrentVersion',
'currency', 'formattedPrice', 'price',
'currentVersionReleaseDate', 'releaseDate', 'version',
'genreIds', 'primaryGenreId',
'fileSizeBytes',
'screenshotUrls', 'ipadScreenshotUrls',
'supportedDevices']
Ndict = len(app_keys)
app_keys2 = ['trackId', 'description', 'features']
Nfeat = len(app_keys2)
url0 = 'https://itunes.apple.com/lookup?id='
for icat in range(31, 33):
icategory = X0.Category.iloc[icat]
icatid = X0.ID.iloc[icat]
if (icatid >= 7000) & (icatid < 8000): fname0 = 'games'
elif (icatid >= 13000) & (icatid < 14000): fname0 = 'magazines-newspapers'
elif icatid >= 16000: fname0 = 'stickers'
else: fname0 = icategory
print(icategory)
try:
fname = flc + 'isoapp_links/iosapp_' + icategory + '_links_072017.txt'
with open(fname, 'r') as file:
links = file.readlines()
napp = len(links)
for iapp in range(napp):
match = re.search('id([\d]+)\?mt', links[iapp])
if match:
iurl = url0 + match.group(1)
Y = pd.read_json(iurl)
app_dict = dict.fromkeys(app_keys)
app_feat = dict.fromkeys(app_keys2)
if len(Y) > 0:
Y = Y['results'][0]
for ikey in app_keys:
if ikey in Y.keys():
if ikey in ['screenshotUrls', 'ipadScreenshotUrls',
'supportedDevices',
'artistViewUrl', 'sellerUrl',
'genreIds']:
app_dict[ikey] = len(Y[ikey])
elif ikey in ['version']:
if len(Y[ikey].encode()) == len(Y[ikey]):
app_dict[ikey] = Y[ikey]
else:
app_dict[ikey] = Y[ikey]
else:
app_dict[ikey] = 0
for ikey in app_keys2:
if ikey in Y.keys():
app_feat[ikey] = Y[ikey]
else:
app_feat[ikey] = 0
y = pd.DataFrame(app_dict, index = [0])
fname = flc + 'iosapp_data/app_data_' + fname0 + '.csv'
with open(fname, 'a') as file:
csvwriter = csv.writer(file, delimiter = '\t')
csvwriter.writerow(y.iloc[0,:].values)
fname = flc + 'iosapp_data/app_descp_' + fname0 + '.json'
with open(fname, 'a') as file:
json.dump(app_feat, file)
file.write('\n')
except FileNotFoundError: continue | true | true |
1c2df98873fe0c0b722513464ffa00ef6d1ec3c8 | 9,758 | py | Python | userbot/plugins/pmpermit_menu.py | midhunkm1294-bit/TeleBot | b4309fb662e834d9d3826172b69fd07d42ef83a2 | [
"MIT"
] | null | null | null | userbot/plugins/pmpermit_menu.py | midhunkm1294-bit/TeleBot | b4309fb662e834d9d3826172b69fd07d42ef83a2 | [
"MIT"
] | null | null | null | userbot/plugins/pmpermit_menu.py | midhunkm1294-bit/TeleBot | b4309fb662e834d9d3826172b69fd07d42ef83a2 | [
"MIT"
] | null | null | null | # if you change credits, you get anal cancer and get murdered by russians in 3 days.
"""
Support chatbox for pmpermit.
Used by incoming messages with trigger as /start
Will not work for already approved people.
Credits: written by TONY STARK {@MARIODEVS}
"""
import asyncio
import io
import telethon.sync
from telethon.tl.functions.users import GetFullUserRequest
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, LESS_SPAMMY
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "HEY BITCH! IT'S ME FRIDAY : @FRIDAYSUPPORTOFFICIAL"
PREV_REPLY_MESSAGE = {}
@command(pattern=r"\/start", incoming=True)
async def _(event):
chat_id = event.from_id
userid = event.sender_id
if not pmpermit_sql.is_approved(chat_id):
chat = await event.get_chat()
if event.fwd_from:
return
if event.is_private:
Nudas = ("__Please state your gender.__\n"
"`1`. FEMALE\n"
"`2`. MALE\n"
"`3`. UNKNOWN\n")
PM = ("`Hello. You are accessing the availabe menu of my pro master,`"
f"{DEFAULTUSER}.\n"
"__Let's make this smooth and let me know why you are here.__\n"
"**Choose one of the following reasons why you are here:**\n\n"
"`1`. To chat with my master\n"
"`2`. To spam my master's inbox.\n"
"`3`. To send nudes.\n"
"`4`. To enquire something\n"
"`5`. To request something\n")
ONE = ("__Okay. Your request has been registered. Do not spam my master's inbox.You can expect a reply within 24 light years. He is a busy man, unlike you probably .__\n\n"
"**⚠️ You will be blocked and reported if you spam nibba. ⚠️**\n\n"
"__Use__ `/start` __to go back to the main menu.__")
TWO = (" `███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ `\n\n**So uncool man , this is not your father home. Go bother someone else. You have been blocked and reported until further notice.**")
FOUR = ("__Okay. My master has not seen your message yet.He usually responds to people,though idk about retarted ones.__\n __He'll respond when he comes back, if he wants to.There's already a lot of pending messages😶__\n **Please do not spam unless you wish to be blocked and reported.**")
FIVE = ("`Okay. please have the basic manners as to not bother my master too much. If he wishes to help you, he will respond to you soon.`\n**Do not ask repeatdly else you will be blocked and reported.**")
LWARN = ("**This is your last warning. DO NOT send another message else you will be blocked and reported. Keep patience. My master will respond you ASAP.**\n__Use__ `/start` __to go back to the main menu.__")
async with borg.conversation(chat) as conv:
await borg.send_message(chat, PM)
chat_id = event.from_id
response = await conv.get_response(chat)
y = response.text
if y == "1":
await borg.send_message(chat, ONE)
response = await conv.get_response(chat)
await event.delete()
if not response.text == "/start":
await response.delete()
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "2":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "3":
await borg.send_message(chat, Nudas)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
x = response.text
if x == "1":
await borg.send_message(chat, "`Oh my, you're very much welcome here ;).\nPlease drop your offerings and let my master judge if you have good heart <3.`\n\n **Please don't flood my inbox, we'll have a nice convo once i come back ;D**")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif x == "2":
await borg.send_message(chat, "**You nigga gay af to send a guy like my your male nudes. \nLeave immediately else you become the ultimate gayest gay the gay world has ever seen. I will reply you when i get online.**")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif x == "3":
await borg.send_message(chat, "`Please decide a gender for yourself before sending your nudes here,\n not that i'm judging if you're a helicopter or a banana but yeah, If you are anything else than a female Homo-Sapien,\n Do not send more messages and let my master see for himself if he wants to talk with you.`")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "__You have entered an invalid command. Please send__ `/start` __again or do not send another message if you do not wish to be blocked and reported.__")
response = await conv.get_response(chat)
if not response.text.startswith("/start"):
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "4":
await borg.send_message(chat, FOUR)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
if not response.text == "/start":
await borg.send_message(chat, LWARN)
await event.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "5":
await borg.send_message(chat,FIVE)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "`You have entered an invalid command. Please send /start again or do not send another message if you do not wish to be blocked and reported.`")
response = await conv.get_response(chat)
z = response.text
if not z == "/start":
await borg.send_message(chat, LWARN)
await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
| 58.431138 | 441 | 0.563435 |
import asyncio
import io
import telethon.sync
from telethon.tl.functions.users import GetFullUserRequest
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, LESS_SPAMMY
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "HEY BITCH! IT'S ME FRIDAY : @FRIDAYSUPPORTOFFICIAL"
PREV_REPLY_MESSAGE = {}
@command(pattern=r"\/start", incoming=True)
async def _(event):
chat_id = event.from_id
userid = event.sender_id
if not pmpermit_sql.is_approved(chat_id):
chat = await event.get_chat()
if event.fwd_from:
return
if event.is_private:
Nudas = ("__Please state your gender.__\n"
"`1`. FEMALE\n"
"`2`. MALE\n"
"`3`. UNKNOWN\n")
PM = ("`Hello. You are accessing the availabe menu of my pro master,`"
f"{DEFAULTUSER}.\n"
"__Let's make this smooth and let me know why you are here.__\n"
"**Choose one of the following reasons why you are here:**\n\n"
"`1`. To chat with my master\n"
"`2`. To spam my master's inbox.\n"
"`3`. To send nudes.\n"
"`4`. To enquire something\n"
"`5`. To request something\n")
ONE = ("__Okay. Your request has been registered. Do not spam my master's inbox.You can expect a reply within 24 light years. He is a busy man, unlike you probably .__\n\n"
"**⚠️ You will be blocked and reported if you spam nibba. ⚠️**\n\n"
"__Use__ `/start` __to go back to the main menu.__")
TWO = (" `███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ `\n\n**So uncool man , this is not your father home. Go bother someone else. You have been blocked and reported until further notice.**")
FOUR = ("__Okay. My master has not seen your message yet.He usually responds to people,though idk about retarted ones.__\n __He'll respond when he comes back, if he wants to.There's already a lot of pending messages😶__\n **Please do not spam unless you wish to be blocked and reported.**")
FIVE = ("`Okay. please have the basic manners as to not bother my master too much. If he wishes to help you, he will respond to you soon.`\n**Do not ask repeatdly else you will be blocked and reported.**")
LWARN = ("**This is your last warning. DO NOT send another message else you will be blocked and reported. Keep patience. My master will respond you ASAP.**\n__Use__ `/start` __to go back to the main menu.__")
async with borg.conversation(chat) as conv:
await borg.send_message(chat, PM)
chat_id = event.from_id
response = await conv.get_response(chat)
y = response.text
if y == "1":
await borg.send_message(chat, ONE)
response = await conv.get_response(chat)
await event.delete()
if not response.text == "/start":
await response.delete()
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "2":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "3":
await borg.send_message(chat, Nudas)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
x = response.text
if x == "1":
await borg.send_message(chat, "`Oh my, you're very much welcome here ;).\nPlease drop your offerings and let my master judge if you have good heart <3.`\n\n **Please don't flood my inbox, we'll have a nice convo once i come back ;D**")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif x == "2":
await borg.send_message(chat, "**You nigga gay af to send a guy like my your male nudes. \nLeave immediately else you become the ultimate gayest gay the gay world has ever seen. I will reply you when i get online.**")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif x == "3":
await borg.send_message(chat, "`Please decide a gender for yourself before sending your nudes here,\n not that i'm judging if you're a helicopter or a banana but yeah, If you are anything else than a female Homo-Sapien,\n Do not send more messages and let my master see for himself if he wants to talk with you.`")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "__You have entered an invalid command. Please send__ `/start` __again or do not send another message if you do not wish to be blocked and reported.__")
response = await conv.get_response(chat)
if not response.text.startswith("/start"):
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "4":
await borg.send_message(chat, FOUR)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
if not response.text == "/start":
await borg.send_message(chat, LWARN)
await event.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "5":
await borg.send_message(chat,FIVE)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "`You have entered an invalid command. Please send /start again or do not send another message if you do not wish to be blocked and reported.`")
response = await conv.get_response(chat)
z = response.text
if not z == "/start":
await borg.send_message(chat, LWARN)
await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
| true | true |
1c2df992be4a199d35b73f2d802812983d8cdbe0 | 332 | py | Python | nltk/test/inference_fixt.py | smoitra87/nltk | ca357e5cdcdb137f40c45346bb8bfea618dd863f | [
"Apache-2.0"
] | 1 | 2020-07-08T11:26:30.000Z | 2020-07-08T11:26:30.000Z | nltk/test/inference_fixt.py | smoitra87/nltk | ca357e5cdcdb137f40c45346bb8bfea618dd863f | [
"Apache-2.0"
] | null | null | null | nltk/test/inference_fixt.py | smoitra87/nltk | ca357e5cdcdb137f40c45346bb8bfea618dd863f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
def setup_module(module):
from nose import SkipTest
from nltk.inference.mace import Mace
try:
m = Mace()
m._find_binary("mace4")
except LookupError as e:
raise SkipTest(
"Mace4/Prover9 is not available so inference.doctest was skipped"
) from e
| 22.133333 | 77 | 0.608434 |
def setup_module(module):
from nose import SkipTest
from nltk.inference.mace import Mace
try:
m = Mace()
m._find_binary("mace4")
except LookupError as e:
raise SkipTest(
"Mace4/Prover9 is not available so inference.doctest was skipped"
) from e
| true | true |
1c2dfb973cecc953979c917b147c635120afc5ab | 6,710 | py | Python | pyEX/stocks/batch.py | cjwang/pyEX | 1b5f40f80110afaa4809ea48fac067033c7bdf89 | [
"Apache-2.0"
] | 1 | 2020-10-11T07:05:49.000Z | 2020-10-11T07:05:49.000Z | pyEX/stocks/batch.py | cjwang/pyEX | 1b5f40f80110afaa4809ea48fac067033c7bdf89 | [
"Apache-2.0"
] | null | null | null | pyEX/stocks/batch.py | cjwang/pyEX | 1b5f40f80110afaa4809ea48fac067033c7bdf89 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import itertools
import pandas as pd
from multiprocessing.pool import ThreadPool
from ..common import _TIMEFRAME_CHART, _getJson, _raiseIfNotStr, PyEXception, _strOrDate, _toDatetime, _BATCH_TYPES
from .fundamentals import _dividendsToDF, _earningsToDF, _financialsToDF, _splitsToDF
from .news import _newsToDF
from .prices import chart, _bookToDF, _chartToDF
from .profiles import _companyToDF, _peersToDF
from .research import _statsToDF
_MAPPING = {
'book': _bookToDF,
'chart': _chartToDF,
'company': _companyToDF,
'dividends': _dividendsToDF,
'earnings': _earningsToDF,
'financials': _financialsToDF,
'stats': _statsToDF,
'news': _newsToDF,
'peers': _peersToDF,
'splits': _splitsToDF
}
def batch(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
'''Batch several data requests into one invocation
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict: results in json
'''
fields = fields or _BATCH_TYPES[:10] # limit 10
if not isinstance(symbols, [].__class__):
if not isinstance(symbols, str):
raise PyEXception('batch expects string or list of strings for symbols argument')
if isinstance(fields, str):
fields = [fields]
if range_ not in _TIMEFRAME_CHART:
raise PyEXception('Range must be in %s' % str(_TIMEFRAME_CHART))
if isinstance(symbols, str):
route = 'stock/{}/batch?types={}&range={}&last={}'.format(symbols, ','.join(fields), range_, last)
return _getJson(route, token, version, filter)
if len(symbols) > 100:
raise PyEXception('IEX will only handle up to 100 symbols at a time!')
route = 'stock/market/batch?symbols={}&types={}&range={}&last={}'.format(','.join(symbols), ','.join(fields), range_, last)
return _getJson(route, token, version, filter)
def batchDF(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
'''Batch several data requests into one invocation
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
DataFrame: results in json
'''
x = batch(symbols, fields, range_, last, token, version, filter)
ret = {}
if isinstance(symbols, str):
for field in x.keys():
ret[field] = _MAPPING.get(field, pd.io.json.json_normalize)(x[field])
else:
for symbol in x.keys():
for field in x[symbol].keys():
if field not in ret:
ret[field] = pd.DataFrame()
dat = x[symbol][field]
dat = _MAPPING.get(field, pd.io.json.json_normalize)(dat)
dat['symbol'] = symbol
ret[field] = pd.concat([ret[field], dat], sort=True)
return ret
def bulkBatch(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
'''Optimized batch to fetch as much as possible at once
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict: results in json
'''
fields = fields or _BATCH_TYPES
args = []
empty_data = []
list_orig = empty_data.__class__
if not isinstance(symbols, list_orig):
raise PyEXception('Symbols must be of type list')
for i in range(0, len(symbols), 99):
args.append((symbols[i:i + 99], fields, range_, last, token, version, filter))
pool = ThreadPool(20)
rets = pool.starmap(batch, args)
pool.close()
ret = {}
for i, d in enumerate(rets):
symbols_subset = args[i][0]
if len(d) != len(symbols_subset):
empty_data.extend(list_orig(set(symbols_subset) - set(d.keys())))
ret.update(d)
for k in empty_data:
if k not in ret:
if isinstance(fields, str):
ret[k] = {}
else:
ret[k] = {x: {} for x in fields}
return ret
def bulkBatchDF(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
'''Optimized batch to fetch as much as possible at once
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
DataFrame: results in json
'''
dat = bulkBatch(symbols, fields, range_, last, token, version, filter)
ret = {}
for symbol in dat:
for field in dat[symbol]:
if field not in ret:
ret[field] = pd.DataFrame()
d = dat[symbol][field]
d = _MAPPING[field](d)
d['symbol'] = symbol
ret[field] = pd.concat([ret[field], d], sort=True)
return ret
def bulkMinuteBars(symbol, dates, token='', version='', filter=''):
'''fetch many dates worth of minute-bars for a given symbol'''
_raiseIfNotStr(symbol)
dates = [_strOrDate(date) for date in dates]
list_orig = dates.__class__
args = []
for date in dates:
args.append((symbol, '1d', date, token, version, filter))
pool = ThreadPool(20)
rets = pool.starmap(chart, args)
pool.close()
return list_orig(itertools.chain(*rets))
def bulkMinuteBarsDF(symbol, dates, token='', version='', filter=''):
'''fetch many dates worth of minute-bars for a given symbol'''
data = bulkMinuteBars(symbol, dates, token, version, filter)
df = pd.DataFrame(data)
if df.empty:
return df
_toDatetime(df)
df.set_index(['date', 'minute'], inplace=True)
return df
| 31.209302 | 127 | 0.619821 |
import itertools
import pandas as pd
from multiprocessing.pool import ThreadPool
from ..common import _TIMEFRAME_CHART, _getJson, _raiseIfNotStr, PyEXception, _strOrDate, _toDatetime, _BATCH_TYPES
from .fundamentals import _dividendsToDF, _earningsToDF, _financialsToDF, _splitsToDF
from .news import _newsToDF
from .prices import chart, _bookToDF, _chartToDF
from .profiles import _companyToDF, _peersToDF
from .research import _statsToDF
_MAPPING = {
'book': _bookToDF,
'chart': _chartToDF,
'company': _companyToDF,
'dividends': _dividendsToDF,
'earnings': _earningsToDF,
'financials': _financialsToDF,
'stats': _statsToDF,
'news': _newsToDF,
'peers': _peersToDF,
'splits': _splitsToDF
}
def batch(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
fields = fields or _BATCH_TYPES[:10]
if not isinstance(symbols, [].__class__):
if not isinstance(symbols, str):
raise PyEXception('batch expects string or list of strings for symbols argument')
if isinstance(fields, str):
fields = [fields]
if range_ not in _TIMEFRAME_CHART:
raise PyEXception('Range must be in %s' % str(_TIMEFRAME_CHART))
if isinstance(symbols, str):
route = 'stock/{}/batch?types={}&range={}&last={}'.format(symbols, ','.join(fields), range_, last)
return _getJson(route, token, version, filter)
if len(symbols) > 100:
raise PyEXception('IEX will only handle up to 100 symbols at a time!')
route = 'stock/market/batch?symbols={}&types={}&range={}&last={}'.format(','.join(symbols), ','.join(fields), range_, last)
return _getJson(route, token, version, filter)
def batchDF(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
x = batch(symbols, fields, range_, last, token, version, filter)
ret = {}
if isinstance(symbols, str):
for field in x.keys():
ret[field] = _MAPPING.get(field, pd.io.json.json_normalize)(x[field])
else:
for symbol in x.keys():
for field in x[symbol].keys():
if field not in ret:
ret[field] = pd.DataFrame()
dat = x[symbol][field]
dat = _MAPPING.get(field, pd.io.json.json_normalize)(dat)
dat['symbol'] = symbol
ret[field] = pd.concat([ret[field], dat], sort=True)
return ret
def bulkBatch(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
fields = fields or _BATCH_TYPES
args = []
empty_data = []
list_orig = empty_data.__class__
if not isinstance(symbols, list_orig):
raise PyEXception('Symbols must be of type list')
for i in range(0, len(symbols), 99):
args.append((symbols[i:i + 99], fields, range_, last, token, version, filter))
pool = ThreadPool(20)
rets = pool.starmap(batch, args)
pool.close()
ret = {}
for i, d in enumerate(rets):
symbols_subset = args[i][0]
if len(d) != len(symbols_subset):
empty_data.extend(list_orig(set(symbols_subset) - set(d.keys())))
ret.update(d)
for k in empty_data:
if k not in ret:
if isinstance(fields, str):
ret[k] = {}
else:
ret[k] = {x: {} for x in fields}
return ret
def bulkBatchDF(symbols, fields=None, range_='1m', last=10, token='', version='', filter=''):
dat = bulkBatch(symbols, fields, range_, last, token, version, filter)
ret = {}
for symbol in dat:
for field in dat[symbol]:
if field not in ret:
ret[field] = pd.DataFrame()
d = dat[symbol][field]
d = _MAPPING[field](d)
d['symbol'] = symbol
ret[field] = pd.concat([ret[field], d], sort=True)
return ret
def bulkMinuteBars(symbol, dates, token='', version='', filter=''):
_raiseIfNotStr(symbol)
dates = [_strOrDate(date) for date in dates]
list_orig = dates.__class__
args = []
for date in dates:
args.append((symbol, '1d', date, token, version, filter))
pool = ThreadPool(20)
rets = pool.starmap(chart, args)
pool.close()
return list_orig(itertools.chain(*rets))
def bulkMinuteBarsDF(symbol, dates, token='', version='', filter=''):
data = bulkMinuteBars(symbol, dates, token, version, filter)
df = pd.DataFrame(data)
if df.empty:
return df
_toDatetime(df)
df.set_index(['date', 'minute'], inplace=True)
return df
| true | true |
1c2dfce3b790c8a524ac769021eb8e657cc7add7 | 3,816 | py | Python | UnityPy/math/Vector3.py | hydrargyrum/UnityPy | d119f5a27fa56270630ff40d7762cdf9b4abbac3 | [
"MIT"
] | null | null | null | UnityPy/math/Vector3.py | hydrargyrum/UnityPy | d119f5a27fa56270630ff40d7762cdf9b4abbac3 | [
"MIT"
] | null | null | null | UnityPy/math/Vector3.py | hydrargyrum/UnityPy | d119f5a27fa56270630ff40d7762cdf9b4abbac3 | [
"MIT"
] | null | null | null | class Vector3:
def __init__(self, x : float, y : float, z : float):
self.X = x
self.Y = y
self.Z = z
"""
using System;
using System.Runtime.InteropServices;
namespace AssetStudio
{
[StructLayout(LayoutKind.Sequential, Pack = 4)]
public struct Vector3 : IEquatable<Vector3>
{
public float X;
public float Y;
public float Z;
public Vector3(float x, float y, float z)
{
X = x;
Y = y;
Z = z;
}
public float this[int index]
{
get
{
switch (index)
{
case 0: return X;
case 1: return Y;
case 2: return Z;
default: throw new ArgumentOutOfRangeException(nameof(index), "Invalid Vector3 index!");
}
}
set
{
switch (index)
{
case 0: X = value; break;
case 1: Y = value; break;
case 2: Z = value; break;
default: throw new ArgumentOutOfRangeException(nameof(index), "Invalid Vector3 index!");
}
}
}
public override int GetHashCode()
{
return X.GetHashCode() ^ (Y.GetHashCode() << 2) ^ (Z.GetHashCode() >> 2);
}
public override bool Equals(object other)
{
if (!(other is Vector3))
return false;
return Equals((Vector3)other);
}
public bool Equals(Vector3 other)
{
return X.Equals(other.X) && Y.Equals(other.Y) && Z.Equals(other.Z);
}
public void Normalize()
{
var length = Length();
if (length > kEpsilon)
{
var invNorm = 1.0f / length;
X *= invNorm;
Y *= invNorm;
Z *= invNorm;
}
else
{
X = 0;
Y = 0;
Z = 0;
}
}
public float Length()
{
return (float)Math.Sqrt(LengthSquared());
}
public float LengthSquared()
{
return X * X + Y * Y + Z * Z;
}
public static Vector3 Zero => new Vector3();
public static Vector3 operator +(Vector3 a, Vector3 b)
{
return new Vector3(a.X + b.X, a.Y + b.Y, a.Z + b.Z);
}
public static Vector3 operator -(Vector3 a, Vector3 b)
{
return new Vector3(a.X - b.X, a.Y - b.Y, a.Z - b.Z);
}
public static Vector3 operator -(Vector3 a)
{
return new Vector3(-a.X, -a.Y, -a.Z);
}
public static Vector3 operator *(Vector3 a, float d)
{
return new Vector3(a.X * d, a.Y * d, a.Z * d);
}
public static Vector3 operator *(float d, Vector3 a)
{
return new Vector3(a.X * d, a.Y * d, a.Z * d);
}
public static Vector3 operator /(Vector3 a, float d)
{
return new Vector3(a.X / d, a.Y / d, a.Z / d);
}
public static bool operator ==(Vector3 lhs, Vector3 rhs)
{
return (lhs - rhs).LengthSquared() < kEpsilon * kEpsilon;
}
public static bool operator !=(Vector3 lhs, Vector3 rhs)
{
return !(lhs == rhs);
}
public static implicit operator Vector2(Vector3 v)
{
return new Vector2(v.X, v.Y);
}
public static implicit operator Vector4(Vector3 v)
{
return new Vector4(v.X, v.Y, v.Z, 0.0F);
}
private const float kEpsilon = 0.00001F;
}
}
""" | 24.941176 | 108 | 0.446803 | class Vector3:
def __init__(self, x : float, y : float, z : float):
self.X = x
self.Y = y
self.Z = z
| true | true |
1c2dfd57a1efc1a0c1debe4d4a7acaa87383ef5d | 61 | py | Python | auth_main/__init__.py | ajskrilla/PAS_pw_check | 056b09e2975b7e1d00c81180d4bdd71bfac91b4d | [
"Apache-2.0"
] | null | null | null | auth_main/__init__.py | ajskrilla/PAS_pw_check | 056b09e2975b7e1d00c81180d4bdd71bfac91b4d | [
"Apache-2.0"
] | null | null | null | auth_main/__init__.py | ajskrilla/PAS_pw_check | 056b09e2975b7e1d00c81180d4bdd71bfac91b4d | [
"Apache-2.0"
] | null | null | null | #from auth import saveConfig
#from auth_check import sec_test | 30.5 | 32 | 0.852459 | true | true | |
1c2dfe22798ba84d8d56669e7d65e0ff7e5d5fff | 38,583 | py | Python | graph_ZSL_w_argmin.py | kfirsalo/New-Graph-ZSL | 76ccd15e65e915858dca9d9097ddf9252e4250d3 | [
"MIT"
] | null | null | null | graph_ZSL_w_argmin.py | kfirsalo/New-Graph-ZSL | 76ccd15e65e915858dca9d9097ddf9252e4250d3 | [
"MIT"
] | null | null | null | graph_ZSL_w_argmin.py | kfirsalo/New-Graph-ZSL | 76ccd15e65e915858dca9d9097ddf9252e4250d3 | [
"MIT"
] | null | null | null | import json
import multiprocessing
from datetime import datetime
from node2vec import Node2Vec
import pandas as pd
import numpy as np
import networkx as nx
import pickle
import os
import argparse
from numpy import linalg as la
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import model_selection as sk_ms
from sklearn.metrics import confusion_matrix
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
import random
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from itertools import chain
from utils import set_gpu
from utlis_graph_zsl import hist_plot, plot_confusion_matrix, plots_2measures_vs_parameter, grid
from IMDb_data_preparation_E2V import MoviesGraph
random.seed(0)
np.random.seed(0)
HEADER = ['movie_weights',
'labels_weights',
'embedding_type',
'embedding_dimension',
'norma_type',
'class_edges_threshold',
'seen_percentage',
'data_name',
'awa2_attributes_weight',
'acc',
'seen_acc',
'unseen_acc']
class GraphImporter:
"""
class that responsible to import or create the relevant graph
"""
def __init__(self, args):
self.data_name = args.data_name
self.graph_percentage = args.graph_percentage
self.threshold = args.threshold
self.args = args
def import_imdb_multi_graph(self, weights):
"""
Make our_imdb multi graph using class
:param weights:
:return:
"""
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths, self.args.graph_percentage)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels, self.threshold)
multi_gnx = imdb.weighted_multi_graph(gnx, knowledge_gnx, labels, weights_dict)
return multi_gnx
def import_imdb_weighted_graph(self, weights):
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths, self.args.graph_percentage)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels, float(self.threshold))
weighted_graph = imdb.weighted_graph(gnx, knowledge_gnx, labels, weights_dict)
return weighted_graph
def import_graph(self):
graph = nx.MultiGraph()
data_path = self.data_name + '.txt'
path = os.path.join(self.data_name, data_path)
with open(path, 'r') as f:
for line in f:
items = line.strip().split()
att1 = str(items[0][0])
att2 = str(items[1][0])
graph.add_node(items[0], key=att1)
graph.add_node(items[1], key=att2)
sort_att = np.array([att1, att2])
sort_att = sorted(sort_att)
graph.add_edge(items[0], items[1], key=str(sort_att[0]) + str(sort_att[1]))
return graph
def import_awa2_graph(self, awa2_weights, specific_split, att_weight):
from images_graph_creator import Awa2GraphCreator, ImagesEmbeddings
weights_dict = {'classes_edges': awa2_weights[0], 'labels_edges': awa2_weights[1]}
set_gpu(self.args.gpu)
graph_preparation = ImagesEmbeddings(self.args)
dict_name_class, dict_class_name = graph_preparation.dict_name_class, graph_preparation.dict_class_name
seen_classes, unseen_classes = graph_preparation.seen_classes, graph_preparation.unseen_classes
embeds_matrix, dict_image_embed, dict_image_class = graph_preparation.images_embed_calculator()
dict_idx_image_class = {i: dict_name_class[dict_image_class[image]]
for i, image in enumerate(list(dict_image_class.keys()))}
awa2_graph_creator = Awa2GraphCreator(embeds_matrix, dict_image_embed, dict_name_class, dict_idx_image_class,
self.args.graph_percentage, self.args)
image_graph = awa2_graph_creator.create_image_graph()
kg, dict_class_nodes_translation = awa2_graph_creator.imagenet_knowledge_graph()
kg = awa2_graph_creator.attributed_graph(kg, att_weight)
seen_classes = [dict_class_nodes_translation[c] for c in seen_classes]
unseen_classes = [dict_class_nodes_translation[c] for c in unseen_classes]
split = {'seen': seen_classes, 'unseen': unseen_classes}
labels_graph = awa2_graph_creator.create_labels_graph(dict_class_nodes_translation)
awa2_graph = awa2_graph_creator.weighted_graph(image_graph, kg, labels_graph, weights_dict)
nx.write_gpickle(awa2_graph, 'awa2/train/awa2_graph')
if specific_split:
return awa2_graph, split
else:
split = None
return awa2_graph, split
class EmbeddingCreator(object):
def __init__(self, graph=None, dimension=None, args=None):
self.data_name = args.data_name
self.dim = dimension
self.graph = graph
def create_node2vec_embeddings(self):
# path1 = os.path.join(self.data_name, 'Node2Vec_embedding.pickle')
# path2 = os.path.join(self.data_name, 'Node2Vec_embedding.csv')
# if os.path.exists(path1):
# with open(path1, 'rb') as handle:
# dict_embeddings = pickle.load(handle)
# elif os.path.exists(path2):
# embedding_df = pd.read_csv(path2)
# dict_embeddings = embedding_df.to_dict(orient='list')
# with open(path2, 'wb') as handle:
# pickle.dump(dict_embeddings, handle, protocol=3)
# else:
# node2vec = Node2Vec(self.graph, dimensions=16, walk_length=30, num_walks=200, workers=1)
# model = node2vec.fit()
# nodes = list(self.graph.nodes())
# dict_embeddings = {}
# for i in range(len(nodes)):
# dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(nodes[i]))})
# with open(path1, 'wb') as handle:
# pickle.dump(dict_embeddings, handle, protocol=3)
node2vec = Node2Vec(self.graph, dimensions=self.dim, walk_length=80, num_walks=16, workers=2)
model = node2vec.fit()
nodes = list(self.graph.nodes())
dict_embeddings = {}
for i in range(len(nodes)):
dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(str(nodes[i])))})
return dict_embeddings
def create_event2vec_embeddings(self):
data_path = self.data_name + '_e2v_embeddings.txt'
path = os.path.join(self.data_name, data_path)
cond = 0
dict_embeddings = {}
with open(path, 'r') as f:
for line in f:
if cond == 1:
items = line.strip().split()
dict_embeddings[items[0]] = items[1:]
cond = 1
return dict_embeddings
def create_ogre_embeddings(self, user_initial_nodes_choice=None):
from StaticGraphEmbeddings.our_embeddings_methods.static_embeddings import StaticEmbeddings
if user_initial_nodes_choice is not None:
static_embeddings = StaticEmbeddings(self.data_name, self.graph, initial_size=100, initial_method="node2vec", method="OGRE", H=user_initial_nodes_choice,
dim=self.dim, choose="degrees", regu_val=0, weighted_reg=False, epsilon=0.1, file_tags=None)
else:
static_embeddings = StaticEmbeddings(self.data_name, self.graph, dim=self.dim)
dict_embeddings = static_embeddings.dict_embedding
return dict_embeddings
class EdgesPreparation:
def __init__(self, graph, args, split=None):
self.args = args
# self.multi_graph = multi_graph
self.split = split
self.graph = graph
self.label_edges = self.make_label_edges()
self.unseen_edges, self.test_edges, self.dict_test_edges, self.dict_train_edges, self.dict_unseen_edges \
= self.train_test_unseen_split()
def make_label_edges(self):
"""
Make a list with all the edge from type "labels_edges", i.e. edges between a movie and its class.
:return: list with labels_edges
"""
data_path = self.args.data_name + '_true_edges.pickle'
nodes = list(self.graph.nodes)
label_edges = []
for node in nodes:
if str(node)[0] == 'c':
info = self.graph._adj[node]
neighs = list(info.keys())
for neigh in neighs:
if info[neigh]['key'] == 'labels_edges':
label_edges.append([node, neigh])
try:
with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:
pickle.dump(label_edges, handle, protocol=3)
except:
pass
return label_edges
@staticmethod
def label_edges_classes_ordered(edge_data):
"""
Make a dict of classes and their labels_edges they belong to. For every label_edge
there is only one class it belongs to.
:return: a dict of classes and their labels_edges
"""
dict_class_label_edge = {}
for edge in edge_data:
if edge[0][0] == 'c':
label = edge[0]
else:
label = edge[1]
if dict_class_label_edge.get(label) is not None:
edges = dict_class_label_edge[label]
edges.append(edge)
dict_class_label_edge[label] = edges
else:
dict_class_label_edge.update({label: [edge]})
return dict_class_label_edge
def train_test_unseen_split(self): # unseen edges
ratio = self.args.ratio[0]
dict_true_edges = self.label_edges_classes_ordered(self.label_edges)
classes = list(dict_true_edges.keys())
for i, k in enumerate(sorted(dict_true_edges, key=lambda x: len(dict_true_edges[x]), reverse=True)):
classes[i] = k
seen_classes = classes[:int(self.args.seen_percentage * len(classes))]
unseen_classes = classes[int(self.args.seen_percentage * len(classes)):]
if self.split is not None:
seen_classes = self.split['seen']
unseen_classes = self.split['unseen']
# unseen_classes.append(classes[0])
unseen_edges, seen_edges, train_edges, test_edges = [], [], [], []
for c in unseen_classes:
# class_edges = list(self.graph.edges(c))
# for edge in class_edges:
# self.graph[edge[0]][edge[1]]['weight'] *= 10
for edge in dict_true_edges[c]:
unseen_edges.append(edge)
for c in seen_classes:
seen_edges_c = []
for edge in dict_true_edges[c]:
seen_edges.append(edge)
seen_edges_c.append(edge)
random.Random(4).shuffle(seen_edges_c)
train_edges_c = seen_edges_c[:int(ratio * len(seen_edges_c))]
test_edges_c = seen_edges_c[int(ratio * len(seen_edges_c)):]
for edge in train_edges_c:
train_edges.append(edge)
if len(test_edges_c) > 0:
for edge in test_edges_c:
test_edges.append(edge)
# unseen_edges = [dict_true_edges[c] for c in unseen_classes]
# seen_edges = [dict_true_edges[c] for c in seen_classes]
# random.Random(4).shuffle(seen_edges)
# train_edges = seen_edges[:int(ratio * len(seen_edges))]
# test_edges = seen_edges[int(ratio * len(seen_edges)):]
dict_train_edges = self.label_edges_classes_ordered(train_edges)
dict_test_edges = self.label_edges_classes_ordered(test_edges)
dict_unseen_edges = self.label_edges_classes_ordered(unseen_edges)
# for c in unseen_classes:
# unseen_edges.append(dict_true_edges[c])
return unseen_edges, test_edges, dict_train_edges, dict_test_edges, dict_unseen_edges
def seen_graph(self):
graph = self.graph
for edge in self.unseen_edges:
graph.remove_edge(edge[0], edge[1])
for edge in self.test_edges:
graph.remove_edge(edge[0], edge[1])
return graph
def ogre_initial_nodes(self, gnx):
train_classes = list(self.dict_train_edges.keys())
train_nodes = train_classes.copy()
for c in train_classes:
train_nodes.append(self.dict_train_edges[c][0][1])
# try:
# train_nodes.append(self.dict_train_edges[c][1][1])
# except:
# continue
intial_graph = gnx.subgraph(train_nodes)
return intial_graph
class Classifier:
def __init__(self, dict_train_true, dict_test_true, dict_unseen_edges,
dict_projections, embedding, args):
self.args = args
self.embedding = embedding
self.dict_true_edges = dict_train_true
self.dict_test_true = dict_test_true
self.dict_unseen_edges = dict_unseen_edges
self.norm = set(args.norm)
self.dict_projections = dict_projections
def edges_distance(self, edges):
"""
Calculate the distance of an edge. Take the vertices of the edge and calculate the distance between their
embeddings.
We use to calculate The distance with L1, l2, Cosine Similarity.
:param edge: the edge we want to find its distance.
:return: The distance
"""
embed_edges_0 = [self.dict_projections[edge[0]] for edge in edges]
embed_edges_1 = [self.dict_projections[edge[1]] for edge in edges]
if self.norm == set('L1 Norm'):
norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 1, axis=1)
elif self.norm == set('L2 Norm'):
norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 2, axis=1)
elif self.norm == set('cosine'):
try:
all_norms = cosine_similarity(embed_edges_0, embed_edges_1)
norms = []
for i in range(len(all_norms)):
if np.abs(all_norms[i, i]) <= 1:
norms.append(math.acos(all_norms[i, i]))
elif all_norms[i, i] > 1:
norms.append(math.acos(1))
elif all_norms[i, i] < -1:
norms.append(math.acos(-1))
# norms = [math.acos(all_norms[i, i]) if np.abs(all_norms[i, i]) < 1 else math.acos(1) for i in range(len(all_norms))]
except:
print('a')
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
final_norms = np.array(norms).reshape(-1, 1)
return final_norms
def edge_distance(self, edge):
"""
Calculate the distance of an edge. Take the vertices of the edge and calculate the distance between their
embeddings.
We use to calculate The distance with L1, l2, Cosine Similarity.
:param edge: the edge we want to find its distance.
:return: The distance
"""
try:
embd1 = np.array(self.dict_projections[edge[0]]).astype(float)
embd2 = np.array(self.dict_projections[edge[1]]).astype(float)
except:
embd1 = np.ones(self.args.embedding_dimension).astype(float)
embd2 = np.zeros(self.args.embedding_dimension).astype(float)
pass
if self.norm == set('L1 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('L2 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('cosine'):
norm = math.acos(cosine_similarity(embd1.reshape(1, -1), embd2.reshape(1, -1))[0])
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
return norm
def calculate_classifier_value(self, true_edges, false_edges):
"""
Create x and y for Logistic Regression Classifier.
self.dict_projections: A dictionary of all nodes embeddings, where keys==nodes and values==embeddings
:param true_edges: A list of true edges.
:param false_edges: A list of false edges.
:return: x_true/x_false - The feature matrix for logistic regression classifier, of true/false edge.
The i'th row is the norm score calculated for each edge.
y_true_edge/y_false_edge - The edges labels, [1,0] for true/ [0,1] for false.
Also the edge of the label is concatenate to the label.
"""
x_true = self.edges_distance(true_edges)
x_false = self.edges_distance(false_edges)
# x_true, x_false = np.array(norms_true).reshape(-1, 1), np.array(norms_false).reshape(-1, 1)
y_true_edge = np.column_stack((np.ones(shape=(len(true_edges), 1)),
np.zeros(shape=(len(true_edges), 1)))).astype(int)
y_false_edge = np.column_stack((np.zeros(shape=(len(false_edges), 1)),
np.ones(shape=(len(false_edges), 1)))).astype(int)
return x_true, x_false, y_true_edge, y_false_edge
def calculate_by_single_norm(self, true_edges, false_edges):
x_true, x_false = np.zeros(shape=(len(true_edges), 1)), np.zeros(shape=(len(false_edges), 1))
y_true_edge, y_false_edge = np.zeros(shape=(len(true_edges), 4)).astype(int), \
np.zeros(shape=(len(false_edges), 4)).astype(int)
for i, edge in enumerate(true_edges):
norm = self.edge_distance(edge)
x_true[i, 0] = norm
# y_true_edge[i, 2] = edge[0]
# y_true_edge[i, 3] = edge[1]
y_true_edge[i, 0] = str(1)
for i, edge in enumerate(false_edges):
norm = self.edge_distance(edge)
x_false[i, 0] = norm
# y_false_edge[i, 2] = edge[0]
# y_false_edge[i, 3] = edge[1]
y_false_edge[i, 1] = str(1)
return x_true, x_false, y_true_edge, y_false_edge
@staticmethod
def concat_data(x_true, x_false, y_true_edge, y_false_edge):
"""
split the data into rain and test for the true edges and the false one.
:param ratio: determine the train size.
:return: THe split data
"""
x_train, y_train = np.concatenate((x_true, x_false), axis=0), \
np.concatenate((y_true_edge, y_false_edge), axis=0)
# y_train = np.array([y_train_edge.T[0].reshape(-1, 1), y_train_edge.T[1].reshape(-1, 1)]).T.reshape(-1,
# 2).astype(
# int)
return x_train, y_train
def train(self):
"""
Prepare the data for train, also train the classifier and make the test data divide by classes.
:return: The classifier and dict_class_movie_test
"""
path2 = os.path.join(self.args.data_name, f'train/dict_{self.embedding}_{self.args.norm}.pkl')
classes = list(self.dict_true_edges.keys())
# for i, k in enumerate(sorted(self.dict_true_edges, key=lambda x: len(self.dict_true_edges[x]), reverse=True)):
# classes[i] = k
dict_class_movie_test = {}
test_classes = list(self.dict_test_true.keys())
unseen_classes = list(self.dict_unseen_edges.keys())
for c in test_classes:
dict_movie_edge = {}
for edge in self.dict_test_true[c]:
if edge[0][0] == 'c':
movie = edge[1]
else:
movie = edge[0]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
for c in unseen_classes:
dict_movie_edge = {}
for edge in self.dict_unseen_edges[c]:
if edge[0][0] == 'c':
movie = edge[1]
else:
movie = edge[0]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
# if not os.path.exists(os.path.join('Graph-ZSL', self.args.data_name)):
# os.makedirs(os.path.join('Graph-ZSL', self.args.data_name))
with open(path2, 'wb') as fid:
pickle.dump(dict_class_movie_test, fid)
return dict_class_movie_test
def evaluate(self, dict_class_movie_test):
# evaluate
classes = list(dict_class_movie_test.keys())
pred_true = []
pred = []
# for i, k in enumerate(sorted(dict_class_movie_test, key=lambda x: len(dict_class_movie_test[x]), reverse=True)):
# classes[i] = k
num_classes = len(classes)
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for c in classes:
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
# if set(self.args.embedding) != set('OGRE'):
class_test = self.edges_distance(edges)
# else:
# for i, edge in enumerate(edges):
# norm = self.edge_distance(edge)
# class_test[i, 0] = norm
# _, probs = self.predict_edge_classification(classif2, class_test)
# pred_index = np.argmax(probs.T[0])
pred_index = np.argmax(class_test)
prediction = edges[pred_index]
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if prediction[0][0] == 'c':
pred.append(prediction[0])
else:
pred.append(prediction[1])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
# TODO dict class measures for every ratio
return dict_class_measures, pred, pred_true
def evaluate_for_hist(self, dict_class_movie_test):
# evaluate
classes = list(dict_class_movie_test.keys())
hist_real_unseen_pred = np.zeros(len(classes))
hist_real_unseen_first_unseen = np.zeros(len(classes))
pred_true = []
pred = []
# for i, k in enumerate(sorted(dict_class_movie_test, key=lambda x: len(dict_class_movie_test[x]), reverse=True)):
# classes[i] = k
num_classes = len(classes)
seen_flag = np.zeros(int(self.args.seen_percentage*len(classes)))
unseen_flag = np.ones(len(classes)-int(self.args.seen_percentage*len(classes)))
classes_flag = np.concatenate((seen_flag, unseen_flag))
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for i, c in enumerate(classes):
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
# if set(self.args.embedding) != set('OGRE'):
class_test = self.edges_distance(edges)
# else:
# for j, edge in enumerate(edges):
# norm = self.edge_distance(edge)
# class_test[j, 0] = norm
# _, probs = self.predict_edge_classification(classif2, class_test)
# pred_index = np.argmax(probs.T[0])
try:
class_norm_test = np.column_stack((np.column_stack((class_test, classes)), classes_flag))
except:
print('a')
sorted_class_norm = class_norm_test[np.argsort(class_norm_test[:, 0])]
# if set(self.args.norm) == set('cosine'):
# sorted_class_norm = np.flip(sorted_class_norm)
# sort_classes = sorted_class_norm.T[0]
# else:
sort_classes = sorted_class_norm.T[1]
sort_norm = sorted_class_norm.T[0].astype(float)
sort_classes_flag = sorted_class_norm.T[2].astype(float)
# class_test[::-1].sort(axis=0)
prediction = np.array([m, sort_classes[0]])
# prediction = edges[pred_index]
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if i > int(self.args.seen_percentage*len(classes)):
place = np.where(sort_classes == c)[0][0]
hist_real_unseen_pred[place] += 1
place = np.where(sort_classes_flag == 1)[0][0]
if self.args.unseen_weight_advantage*sort_norm[place] < sort_norm[0]:
pred.append(sort_classes[place])
else:
pred.append(sort_classes[0])
# pred.append(sort_classes[0])
# if prediction[0][0] == 'c':
# pred.append(prediction[0])
# else:
# pred.append(prediction[1])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
# TODO dict class measures for every ratio
return dict_class_measures, pred, pred_true, hist_real_unseen_pred
def hist_plot_for_unseen_dist_eval(self, distances):
title = 'Histogram Of The Distance Between \n Unseen Label Norm And Predicted Norm'
x_label = f'Distance, limit:{len(distances)}'
y_label = 'Count'
hist_plot(distances, title, x_label, y_label)
plt.savefig(f'{self.args.data_name}/plots/hist_distance_real_unseen-prediction_'
f'{self.embedding}_{self.args.norm}_{int(100*self.args.seen_percentage)}_seen_percent')
def confusion_matrix_maker(self, dict_class_measures, pred, pred_true):
conf_matrix = confusion_matrix(pred_true, pred, labels=list(dict_class_measures.keys()))
seen_true_count = 0
seen_count = 0
unseen_true_count = 0
unseen_count = 0
seen_number = int(self.args.seen_percentage * len(conf_matrix))
classes = list(dict_class_measures.keys())
seen_idx = []
unseen_idx = []
for i, c in enumerate(classes):
if len(set([c]).intersection(set(self.dict_unseen_edges.keys()))) > 0:
unseen_idx.append(i)
else:
seen_idx.append(i)
for i in seen_idx:
seen_true_count += conf_matrix[i][i]
for j in range(len(classes)):
seen_count += conf_matrix[i][j]
for i in unseen_idx:
unseen_true_count += conf_matrix[i][i]
for j in range(len(conf_matrix)):
unseen_count += conf_matrix[i][j]
# for i in range(len(conf_matrix))[:seen_number]:
# seen_true_count += conf_matrix[i][i]
# for j in range(len(conf_matrix)):
# seen_count += conf_matrix[i][j]
# for i in range(len(conf_matrix))[seen_number:]:
# unseen_true_count += conf_matrix[i][i]
# for j in range(len(conf_matrix)):
# unseen_count += conf_matrix[i][j]
accuracy = (seen_true_count + unseen_true_count) / (seen_count + unseen_count)
seen_accuracy = seen_true_count / seen_count
unseen_accuracy = unseen_true_count / unseen_count
print(f'accuracy all: {accuracy}')
print(f'accuracy all seen: {seen_accuracy}')
print(f'accuracy all unseen: {unseen_accuracy}')
return accuracy, seen_accuracy, unseen_accuracy, conf_matrix
def plot_confusion_matrix_all_classes(self, conf_matrix):
plt.figure(0)
title = f'Confusion Matrix, ZSL {self.args.data_name} \n' \
f'{self.embedding} {self.args.norm} {int(100 * self.args.seen_percentage)} Percent Seen'
x_title = f"True Labels {int(100 * self.args.seen_percentage)}/{100 - int(100 * self.args.seen_percentage)}" \
f" (seen/unseen)"
y_title = f"Predicted Labels"
plot_confusion_matrix(conf_matrix, title, x_title, y_title)
plt.savefig(f'{self.args.data_name}/plots/confusion_matrix_{self.embedding}_{self.args.norm}'
f'_{int(100 * self.args.seen_percentage)}_seen_percent')
from dataclasses import dataclass
@dataclass
class InventoryItem:
"""Class for keeping track of an item in inventory."""
data_name: str
threshold: float
norm: str
embedding: str
false_per_true: str
norm: str
def define_args(params):
print(params)
weights = np.array([params['weights_movie_movie'], params['weights_movie_class']]).astype(float)
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', default=params['data_name']) # our_imdb, awa2
parser.add_argument('--threshold', default=params['threshold'])
parser.add_argument('--norm', default=params['norma_types']) # cosine / L2 Norm / L1 Norm
parser.add_argument('--embedding', default=params['embedding_type']) # Node2Vec / Event2Vec / OGRE
# embedding = params[2]
parser.add_argument('--false_per_true', default=10)
parser.add_argument('--ratio', default=[0.8])
parser.add_argument('--seen_percentage', default=float(params['seen_percentage']))
parser.add_argument('--embedding_dimension', default=int(params['embedding_dimensions']))
parser.add_argument('--unseen_weight_advantage', default=0.9)
parser.add_argument('--graph_percentage', default=1)
if params['data_name'] == 'awa2':
parser.add_argument('--awa2_attributes_weight', default=params['awa2_attributes_weight'])
import torch
cuda = torch.cuda.is_available()
parser.add_argument('--cnn', default='materials/resnet50-base.pth')
if cuda:
parser.add_argument('--gpu', default='0')
else:
parser.add_argument('--gpu', default='-1')
parser.add_argument('--consider-trains', action='store_false')
parser.add_argument('--output', default=None)
parser.add_argument('--images_threshold', default=0.10)
# embedding_dimension = params[3].astype(int)
args = parser.parse_args()
return args, weights
def obj_func_grid(params, specific_split=True, split=None): # split False or True
"""
Main Function for link prediction task.
:return:
"""
args, weights = define_args(params)
np.random.seed(0)
# ratio_arr = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
graph_maker = GraphImporter(args)
# multi_graph = graph_maker.import_imdb_multi_graph(weights)
if args.data_name == 'our_imdb':
weighted_graph = graph_maker.import_imdb_weighted_graph(weights)
elif args.data_name == 'awa2':
awa2_att_weight = params['awa2_attributes_weight']
weighted_graph, split = graph_maker.import_awa2_graph(weights, specific_split, awa2_att_weight)
else:
raise ValueError(f"Wrong name of DataSet, {args.data_name}")
edges_preparation = EdgesPreparation(weighted_graph, args, split)
# dict_true_edges = edges_preparation.label_edges_classes_ordered(edges_preparation.label_edges)
# dict_false_edges = edges_preparation.make_false_label_edges(dict_true_edges)
dict_train_true = edges_preparation.dict_train_edges
dict_test_true = edges_preparation.dict_test_edges
dict_unseen_edges = edges_preparation.dict_unseen_edges
graph = edges_preparation.seen_graph()
embeddings_maker = EmbeddingCreator(graph, args.embedding_dimension, args)
if args.embedding == 'Node2Vec':
dict_embeddings = embeddings_maker.create_node2vec_embeddings()
elif args.embedding == 'Event2Vec':
dict_embeddings = embeddings_maker.create_event2vec_embeddings()
elif args.embedding == 'OGRE':
initial_nodes = edges_preparation.ogre_initial_nodes(graph)
dict_embeddings = embeddings_maker.create_ogre_embeddings(user_initial_nodes_choice=initial_nodes)
else:
raise ValueError(f"Wrong name of embedding, {args.embedding}")
classifier = Classifier(dict_train_true, dict_test_true, dict_unseen_edges,
dict_embeddings, args.embedding, args)
dict_class_movie_test = classifier.train()
dict_class_measures_node2vec, pred, pred_true, hist_real_unseen_pred = classifier.evaluate_for_hist(dict_class_movie_test)
# classifier.hist_plot_for_unseen_dist_eval(hist_real_unseen_pred)
accuracy, seen_accuracy, unseen_accuracy, conf_matrix = classifier.confusion_matrix_maker(
dict_class_measures_node2vec, pred, pred_true)
# classifier.plot_confusion_matrix_all_classes(conf_matrix)
return accuracy, seen_accuracy, unseen_accuracy
def flatten_dict(d):
def items():
for key, value in d.items():
if isinstance(value, dict):
for subkey, subvalue in flatten_dict(value).items():
yield key + "." + subkey, subvalue
else:
yield key, value
return dict(items())
def config_to_str(config):
config = flatten_dict(config)
return [str(config.get(k, "--")) for k in HEADER]
def run_grid(grid_params, res_dir, now):
grid_params = grid_params if type(grid_params) is dict else json.load(open(grid_params, "rt"))
res_filename = os.path.join(res_dir, f"{grid_params['data_name'][0]}_grid_{now}.csv")
out = open(res_filename, "wt")
out.write(f"{','.join(HEADER)}\n")
for config in grid(grid_params):
param = {p: config[i] for i, p in enumerate(list(grid_params.keys()))}
acc, seen_acc, unseen_acc = obj_func_grid(param)
table_row = config_to_str(param)
table_row[HEADER.index('acc')] = str(acc)
table_row[HEADER.index('seen_acc')] = str(seen_acc)
table_row[HEADER.index('unseen_acc')] = str(unseen_acc)
out.write(f"{','.join(table_row)}\n")
out.close()
def main():
seen_accuracies, unseen_accuracies = [], []
parameters = {
"data_name": ['our_imdb'], # 'awa2', 'our_imdb'
"embedding_type": ["Node2Vec"],
"embedding_dimensions": [32, 64, 128, 256],
# "weights_movie_class": [1],
# "weights_movie_movie": [1],
"weights_movie_class": np.logspace(-2, 3, 6),
"weights_movie_movie": np.logspace(-2, 3, 6),
"norma_types": ['cosine'],
"threshold": [0.3, 0.6, 0.9],
"seen_percentage": [0.8],
# "seen_percentage": np.linspace(0.1, 0.9, 9)
"awa2_attributes_weight": [100] # 100 is the best for now
}
num = 0
for param in grid(parameters):
dict_param = {p: param[i] for i, p in enumerate(list(parameters.keys()))}
# param = np.array([w_m_m, w_m_c, e_type, dim, norma_type, threshold, per, data, w_att])
print(f'iteration number {num}')
num += 1
acc, seen_acc, unseen_acc = obj_func_grid(dict_param)
seen_accuracies.append(seen_acc*100)
unseen_accuracies.append(unseen_acc*100)
# print("all accuracy: ", acc)
dict_measures = {"unseen_accuracy": unseen_accuracies, "seen_accuracy": seen_accuracies}
plots_2measures_vs_parameter(dict_measures, parameters["seen_percentage"], 'seen Percentage', 'our_imdb',
'Zero Shot Learning', "Accuracy", parameters["norma_types"][0],
parameters["embedding_type"][0])
if __name__ == '__main__':
res_dir = "C:\\Users\\kfirs\\lab\\Zero Shot Learning\\New-Graph-ZSL\\grid_results"
# now = datetime.now().strftime("%d%m%y_%H%M%S")
now = "01_03_21"
parameters = {
"data_name": ['our_imdb'], # 'awa2', 'our_imdb'
"embedding_type": ["Node2Vec"],
"embedding_dimensions": [32, 64, 128, 256],
# "weights_movie_class": [1],
# "weights_movie_movie": [1],
"weights_movie_class": np.logspace(-2, 3, 6),
"weights_movie_movie": np.logspace(-2, 3, 6),
"norma_types": ['cosine'],
"threshold": [0.3, 0.6, 0.9],
"seen_percentage": [0.8],
# "seen_percentage": np.linspace(0.1, 0.9, 9)
"awa2_attributes_weight": [100] # 100 is the best for now
}
processes = []
parameters_by_procesess = []
for w_m_m in parameters["weights_movie_movie"]:
for w_m_c in parameters["weights_movie_class"]:
param_by_parameters = parameters.copy()
param_by_parameters["weights_movie_movie"] = [w_m_m]
param_by_parameters["weights_movie_class"] = [w_m_c]
parameters_by_procesess.append(param_by_parameters)
for i in range(len(parameters_by_procesess)):
proc = multiprocessing.Process(target=run_grid, args=(parameters_by_procesess[i], res_dir, now, ))
processes.append(proc)
proc.start()
for p in processes:
p.join()
| 46.995128 | 165 | 0.61509 | import json
import multiprocessing
from datetime import datetime
from node2vec import Node2Vec
import pandas as pd
import numpy as np
import networkx as nx
import pickle
import os
import argparse
from numpy import linalg as la
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import model_selection as sk_ms
from sklearn.metrics import confusion_matrix
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
import random
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from itertools import chain
from utils import set_gpu
from utlis_graph_zsl import hist_plot, plot_confusion_matrix, plots_2measures_vs_parameter, grid
from IMDb_data_preparation_E2V import MoviesGraph
random.seed(0)
np.random.seed(0)
HEADER = ['movie_weights',
'labels_weights',
'embedding_type',
'embedding_dimension',
'norma_type',
'class_edges_threshold',
'seen_percentage',
'data_name',
'awa2_attributes_weight',
'acc',
'seen_acc',
'unseen_acc']
class GraphImporter:
def __init__(self, args):
self.data_name = args.data_name
self.graph_percentage = args.graph_percentage
self.threshold = args.threshold
self.args = args
def import_imdb_multi_graph(self, weights):
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths, self.args.graph_percentage)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels, self.threshold)
multi_gnx = imdb.weighted_multi_graph(gnx, knowledge_gnx, labels, weights_dict)
return multi_gnx
def import_imdb_weighted_graph(self, weights):
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths, self.args.graph_percentage)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels, float(self.threshold))
weighted_graph = imdb.weighted_graph(gnx, knowledge_gnx, labels, weights_dict)
return weighted_graph
def import_graph(self):
graph = nx.MultiGraph()
data_path = self.data_name + '.txt'
path = os.path.join(self.data_name, data_path)
with open(path, 'r') as f:
for line in f:
items = line.strip().split()
att1 = str(items[0][0])
att2 = str(items[1][0])
graph.add_node(items[0], key=att1)
graph.add_node(items[1], key=att2)
sort_att = np.array([att1, att2])
sort_att = sorted(sort_att)
graph.add_edge(items[0], items[1], key=str(sort_att[0]) + str(sort_att[1]))
return graph
def import_awa2_graph(self, awa2_weights, specific_split, att_weight):
from images_graph_creator import Awa2GraphCreator, ImagesEmbeddings
weights_dict = {'classes_edges': awa2_weights[0], 'labels_edges': awa2_weights[1]}
set_gpu(self.args.gpu)
graph_preparation = ImagesEmbeddings(self.args)
dict_name_class, dict_class_name = graph_preparation.dict_name_class, graph_preparation.dict_class_name
seen_classes, unseen_classes = graph_preparation.seen_classes, graph_preparation.unseen_classes
embeds_matrix, dict_image_embed, dict_image_class = graph_preparation.images_embed_calculator()
dict_idx_image_class = {i: dict_name_class[dict_image_class[image]]
for i, image in enumerate(list(dict_image_class.keys()))}
awa2_graph_creator = Awa2GraphCreator(embeds_matrix, dict_image_embed, dict_name_class, dict_idx_image_class,
self.args.graph_percentage, self.args)
image_graph = awa2_graph_creator.create_image_graph()
kg, dict_class_nodes_translation = awa2_graph_creator.imagenet_knowledge_graph()
kg = awa2_graph_creator.attributed_graph(kg, att_weight)
seen_classes = [dict_class_nodes_translation[c] for c in seen_classes]
unseen_classes = [dict_class_nodes_translation[c] for c in unseen_classes]
split = {'seen': seen_classes, 'unseen': unseen_classes}
labels_graph = awa2_graph_creator.create_labels_graph(dict_class_nodes_translation)
awa2_graph = awa2_graph_creator.weighted_graph(image_graph, kg, labels_graph, weights_dict)
nx.write_gpickle(awa2_graph, 'awa2/train/awa2_graph')
if specific_split:
return awa2_graph, split
else:
split = None
return awa2_graph, split
class EmbeddingCreator(object):
def __init__(self, graph=None, dimension=None, args=None):
self.data_name = args.data_name
self.dim = dimension
self.graph = graph
def create_node2vec_embeddings(self):
node2vec = Node2Vec(self.graph, dimensions=self.dim, walk_length=80, num_walks=16, workers=2)
model = node2vec.fit()
nodes = list(self.graph.nodes())
dict_embeddings = {}
for i in range(len(nodes)):
dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(str(nodes[i])))})
return dict_embeddings
def create_event2vec_embeddings(self):
data_path = self.data_name + '_e2v_embeddings.txt'
path = os.path.join(self.data_name, data_path)
cond = 0
dict_embeddings = {}
with open(path, 'r') as f:
for line in f:
if cond == 1:
items = line.strip().split()
dict_embeddings[items[0]] = items[1:]
cond = 1
return dict_embeddings
def create_ogre_embeddings(self, user_initial_nodes_choice=None):
from StaticGraphEmbeddings.our_embeddings_methods.static_embeddings import StaticEmbeddings
if user_initial_nodes_choice is not None:
static_embeddings = StaticEmbeddings(self.data_name, self.graph, initial_size=100, initial_method="node2vec", method="OGRE", H=user_initial_nodes_choice,
dim=self.dim, choose="degrees", regu_val=0, weighted_reg=False, epsilon=0.1, file_tags=None)
else:
static_embeddings = StaticEmbeddings(self.data_name, self.graph, dim=self.dim)
dict_embeddings = static_embeddings.dict_embedding
return dict_embeddings
class EdgesPreparation:
def __init__(self, graph, args, split=None):
self.args = args
self.split = split
self.graph = graph
self.label_edges = self.make_label_edges()
self.unseen_edges, self.test_edges, self.dict_test_edges, self.dict_train_edges, self.dict_unseen_edges \
= self.train_test_unseen_split()
def make_label_edges(self):
data_path = self.args.data_name + '_true_edges.pickle'
nodes = list(self.graph.nodes)
label_edges = []
for node in nodes:
if str(node)[0] == 'c':
info = self.graph._adj[node]
neighs = list(info.keys())
for neigh in neighs:
if info[neigh]['key'] == 'labels_edges':
label_edges.append([node, neigh])
try:
with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:
pickle.dump(label_edges, handle, protocol=3)
except:
pass
return label_edges
@staticmethod
def label_edges_classes_ordered(edge_data):
dict_class_label_edge = {}
for edge in edge_data:
if edge[0][0] == 'c':
label = edge[0]
else:
label = edge[1]
if dict_class_label_edge.get(label) is not None:
edges = dict_class_label_edge[label]
edges.append(edge)
dict_class_label_edge[label] = edges
else:
dict_class_label_edge.update({label: [edge]})
return dict_class_label_edge
def train_test_unseen_split(self):
ratio = self.args.ratio[0]
dict_true_edges = self.label_edges_classes_ordered(self.label_edges)
classes = list(dict_true_edges.keys())
for i, k in enumerate(sorted(dict_true_edges, key=lambda x: len(dict_true_edges[x]), reverse=True)):
classes[i] = k
seen_classes = classes[:int(self.args.seen_percentage * len(classes))]
unseen_classes = classes[int(self.args.seen_percentage * len(classes)):]
if self.split is not None:
seen_classes = self.split['seen']
unseen_classes = self.split['unseen']
unseen_edges, seen_edges, train_edges, test_edges = [], [], [], []
for c in unseen_classes:
for edge in dict_true_edges[c]:
unseen_edges.append(edge)
for c in seen_classes:
seen_edges_c = []
for edge in dict_true_edges[c]:
seen_edges.append(edge)
seen_edges_c.append(edge)
random.Random(4).shuffle(seen_edges_c)
train_edges_c = seen_edges_c[:int(ratio * len(seen_edges_c))]
test_edges_c = seen_edges_c[int(ratio * len(seen_edges_c)):]
for edge in train_edges_c:
train_edges.append(edge)
if len(test_edges_c) > 0:
for edge in test_edges_c:
test_edges.append(edge)
dict_train_edges = self.label_edges_classes_ordered(train_edges)
dict_test_edges = self.label_edges_classes_ordered(test_edges)
dict_unseen_edges = self.label_edges_classes_ordered(unseen_edges)
return unseen_edges, test_edges, dict_train_edges, dict_test_edges, dict_unseen_edges
def seen_graph(self):
graph = self.graph
for edge in self.unseen_edges:
graph.remove_edge(edge[0], edge[1])
for edge in self.test_edges:
graph.remove_edge(edge[0], edge[1])
return graph
def ogre_initial_nodes(self, gnx):
train_classes = list(self.dict_train_edges.keys())
train_nodes = train_classes.copy()
for c in train_classes:
train_nodes.append(self.dict_train_edges[c][0][1])
intial_graph = gnx.subgraph(train_nodes)
return intial_graph
class Classifier:
def __init__(self, dict_train_true, dict_test_true, dict_unseen_edges,
dict_projections, embedding, args):
self.args = args
self.embedding = embedding
self.dict_true_edges = dict_train_true
self.dict_test_true = dict_test_true
self.dict_unseen_edges = dict_unseen_edges
self.norm = set(args.norm)
self.dict_projections = dict_projections
def edges_distance(self, edges):
embed_edges_0 = [self.dict_projections[edge[0]] for edge in edges]
embed_edges_1 = [self.dict_projections[edge[1]] for edge in edges]
if self.norm == set('L1 Norm'):
norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 1, axis=1)
elif self.norm == set('L2 Norm'):
norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 2, axis=1)
elif self.norm == set('cosine'):
try:
all_norms = cosine_similarity(embed_edges_0, embed_edges_1)
norms = []
for i in range(len(all_norms)):
if np.abs(all_norms[i, i]) <= 1:
norms.append(math.acos(all_norms[i, i]))
elif all_norms[i, i] > 1:
norms.append(math.acos(1))
elif all_norms[i, i] < -1:
norms.append(math.acos(-1))
except:
print('a')
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
final_norms = np.array(norms).reshape(-1, 1)
return final_norms
def edge_distance(self, edge):
try:
embd1 = np.array(self.dict_projections[edge[0]]).astype(float)
embd2 = np.array(self.dict_projections[edge[1]]).astype(float)
except:
embd1 = np.ones(self.args.embedding_dimension).astype(float)
embd2 = np.zeros(self.args.embedding_dimension).astype(float)
pass
if self.norm == set('L1 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('L2 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('cosine'):
norm = math.acos(cosine_similarity(embd1.reshape(1, -1), embd2.reshape(1, -1))[0])
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
return norm
def calculate_classifier_value(self, true_edges, false_edges):
x_true = self.edges_distance(true_edges)
x_false = self.edges_distance(false_edges)
y_true_edge = np.column_stack((np.ones(shape=(len(true_edges), 1)),
np.zeros(shape=(len(true_edges), 1)))).astype(int)
y_false_edge = np.column_stack((np.zeros(shape=(len(false_edges), 1)),
np.ones(shape=(len(false_edges), 1)))).astype(int)
return x_true, x_false, y_true_edge, y_false_edge
def calculate_by_single_norm(self, true_edges, false_edges):
x_true, x_false = np.zeros(shape=(len(true_edges), 1)), np.zeros(shape=(len(false_edges), 1))
y_true_edge, y_false_edge = np.zeros(shape=(len(true_edges), 4)).astype(int), \
np.zeros(shape=(len(false_edges), 4)).astype(int)
for i, edge in enumerate(true_edges):
norm = self.edge_distance(edge)
x_true[i, 0] = norm
y_true_edge[i, 0] = str(1)
for i, edge in enumerate(false_edges):
norm = self.edge_distance(edge)
x_false[i, 0] = norm
y_false_edge[i, 1] = str(1)
return x_true, x_false, y_true_edge, y_false_edge
@staticmethod
def concat_data(x_true, x_false, y_true_edge, y_false_edge):
x_train, y_train = np.concatenate((x_true, x_false), axis=0), \
np.concatenate((y_true_edge, y_false_edge), axis=0)
return x_train, y_train
def train(self):
path2 = os.path.join(self.args.data_name, f'train/dict_{self.embedding}_{self.args.norm}.pkl')
classes = list(self.dict_true_edges.keys())
dict_class_movie_test = {}
test_classes = list(self.dict_test_true.keys())
unseen_classes = list(self.dict_unseen_edges.keys())
for c in test_classes:
dict_movie_edge = {}
for edge in self.dict_test_true[c]:
if edge[0][0] == 'c':
movie = edge[1]
else:
movie = edge[0]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
for c in unseen_classes:
dict_movie_edge = {}
for edge in self.dict_unseen_edges[c]:
if edge[0][0] == 'c':
movie = edge[1]
else:
movie = edge[0]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
with open(path2, 'wb') as fid:
pickle.dump(dict_class_movie_test, fid)
return dict_class_movie_test
def evaluate(self, dict_class_movie_test):
classes = list(dict_class_movie_test.keys())
pred_true = []
pred = []
num_classes = len(classes)
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for c in classes:
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
class_test = self.edges_distance(edges)
pred_index = np.argmax(class_test)
prediction = edges[pred_index]
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if prediction[0][0] == 'c':
pred.append(prediction[0])
else:
pred.append(prediction[1])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
return dict_class_measures, pred, pred_true
def evaluate_for_hist(self, dict_class_movie_test):
classes = list(dict_class_movie_test.keys())
hist_real_unseen_pred = np.zeros(len(classes))
hist_real_unseen_first_unseen = np.zeros(len(classes))
pred_true = []
pred = []
num_classes = len(classes)
seen_flag = np.zeros(int(self.args.seen_percentage*len(classes)))
unseen_flag = np.ones(len(classes)-int(self.args.seen_percentage*len(classes)))
classes_flag = np.concatenate((seen_flag, unseen_flag))
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for i, c in enumerate(classes):
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
class_test = self.edges_distance(edges)
try:
class_norm_test = np.column_stack((np.column_stack((class_test, classes)), classes_flag))
except:
print('a')
sorted_class_norm = class_norm_test[np.argsort(class_norm_test[:, 0])]
sort_classes = sorted_class_norm.T[1]
sort_norm = sorted_class_norm.T[0].astype(float)
sort_classes_flag = sorted_class_norm.T[2].astype(float)
prediction = np.array([m, sort_classes[0]])
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if i > int(self.args.seen_percentage*len(classes)):
place = np.where(sort_classes == c)[0][0]
hist_real_unseen_pred[place] += 1
place = np.where(sort_classes_flag == 1)[0][0]
if self.args.unseen_weight_advantage*sort_norm[place] < sort_norm[0]:
pred.append(sort_classes[place])
else:
pred.append(sort_classes[0])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
return dict_class_measures, pred, pred_true, hist_real_unseen_pred
def hist_plot_for_unseen_dist_eval(self, distances):
title = 'Histogram Of The Distance Between \n Unseen Label Norm And Predicted Norm'
x_label = f'Distance, limit:{len(distances)}'
y_label = 'Count'
hist_plot(distances, title, x_label, y_label)
plt.savefig(f'{self.args.data_name}/plots/hist_distance_real_unseen-prediction_'
f'{self.embedding}_{self.args.norm}_{int(100*self.args.seen_percentage)}_seen_percent')
def confusion_matrix_maker(self, dict_class_measures, pred, pred_true):
conf_matrix = confusion_matrix(pred_true, pred, labels=list(dict_class_measures.keys()))
seen_true_count = 0
seen_count = 0
unseen_true_count = 0
unseen_count = 0
seen_number = int(self.args.seen_percentage * len(conf_matrix))
classes = list(dict_class_measures.keys())
seen_idx = []
unseen_idx = []
for i, c in enumerate(classes):
if len(set([c]).intersection(set(self.dict_unseen_edges.keys()))) > 0:
unseen_idx.append(i)
else:
seen_idx.append(i)
for i in seen_idx:
seen_true_count += conf_matrix[i][i]
for j in range(len(classes)):
seen_count += conf_matrix[i][j]
for i in unseen_idx:
unseen_true_count += conf_matrix[i][i]
for j in range(len(conf_matrix)):
unseen_count += conf_matrix[i][j]
accuracy = (seen_true_count + unseen_true_count) / (seen_count + unseen_count)
seen_accuracy = seen_true_count / seen_count
unseen_accuracy = unseen_true_count / unseen_count
print(f'accuracy all: {accuracy}')
print(f'accuracy all seen: {seen_accuracy}')
print(f'accuracy all unseen: {unseen_accuracy}')
return accuracy, seen_accuracy, unseen_accuracy, conf_matrix
def plot_confusion_matrix_all_classes(self, conf_matrix):
plt.figure(0)
title = f'Confusion Matrix, ZSL {self.args.data_name} \n' \
f'{self.embedding} {self.args.norm} {int(100 * self.args.seen_percentage)} Percent Seen'
x_title = f"True Labels {int(100 * self.args.seen_percentage)}/{100 - int(100 * self.args.seen_percentage)}" \
f" (seen/unseen)"
y_title = f"Predicted Labels"
plot_confusion_matrix(conf_matrix, title, x_title, y_title)
plt.savefig(f'{self.args.data_name}/plots/confusion_matrix_{self.embedding}_{self.args.norm}'
f'_{int(100 * self.args.seen_percentage)}_seen_percent')
from dataclasses import dataclass
@dataclass
class InventoryItem:
data_name: str
threshold: float
norm: str
embedding: str
false_per_true: str
norm: str
def define_args(params):
print(params)
weights = np.array([params['weights_movie_movie'], params['weights_movie_class']]).astype(float)
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', default=params['data_name'])
parser.add_argument('--threshold', default=params['threshold'])
parser.add_argument('--norm', default=params['norma_types'])
parser.add_argument('--embedding', default=params['embedding_type'])
parser.add_argument('--false_per_true', default=10)
parser.add_argument('--ratio', default=[0.8])
parser.add_argument('--seen_percentage', default=float(params['seen_percentage']))
parser.add_argument('--embedding_dimension', default=int(params['embedding_dimensions']))
parser.add_argument('--unseen_weight_advantage', default=0.9)
parser.add_argument('--graph_percentage', default=1)
if params['data_name'] == 'awa2':
parser.add_argument('--awa2_attributes_weight', default=params['awa2_attributes_weight'])
import torch
cuda = torch.cuda.is_available()
parser.add_argument('--cnn', default='materials/resnet50-base.pth')
if cuda:
parser.add_argument('--gpu', default='0')
else:
parser.add_argument('--gpu', default='-1')
parser.add_argument('--consider-trains', action='store_false')
parser.add_argument('--output', default=None)
parser.add_argument('--images_threshold', default=0.10)
args = parser.parse_args()
return args, weights
def obj_func_grid(params, specific_split=True, split=None):
args, weights = define_args(params)
np.random.seed(0)
graph_maker = GraphImporter(args)
if args.data_name == 'our_imdb':
weighted_graph = graph_maker.import_imdb_weighted_graph(weights)
elif args.data_name == 'awa2':
awa2_att_weight = params['awa2_attributes_weight']
weighted_graph, split = graph_maker.import_awa2_graph(weights, specific_split, awa2_att_weight)
else:
raise ValueError(f"Wrong name of DataSet, {args.data_name}")
edges_preparation = EdgesPreparation(weighted_graph, args, split)
dict_train_true = edges_preparation.dict_train_edges
dict_test_true = edges_preparation.dict_test_edges
dict_unseen_edges = edges_preparation.dict_unseen_edges
graph = edges_preparation.seen_graph()
embeddings_maker = EmbeddingCreator(graph, args.embedding_dimension, args)
if args.embedding == 'Node2Vec':
dict_embeddings = embeddings_maker.create_node2vec_embeddings()
elif args.embedding == 'Event2Vec':
dict_embeddings = embeddings_maker.create_event2vec_embeddings()
elif args.embedding == 'OGRE':
initial_nodes = edges_preparation.ogre_initial_nodes(graph)
dict_embeddings = embeddings_maker.create_ogre_embeddings(user_initial_nodes_choice=initial_nodes)
else:
raise ValueError(f"Wrong name of embedding, {args.embedding}")
classifier = Classifier(dict_train_true, dict_test_true, dict_unseen_edges,
dict_embeddings, args.embedding, args)
dict_class_movie_test = classifier.train()
dict_class_measures_node2vec, pred, pred_true, hist_real_unseen_pred = classifier.evaluate_for_hist(dict_class_movie_test)
accuracy, seen_accuracy, unseen_accuracy, conf_matrix = classifier.confusion_matrix_maker(
dict_class_measures_node2vec, pred, pred_true)
return accuracy, seen_accuracy, unseen_accuracy
def flatten_dict(d):
def items():
for key, value in d.items():
if isinstance(value, dict):
for subkey, subvalue in flatten_dict(value).items():
yield key + "." + subkey, subvalue
else:
yield key, value
return dict(items())
def config_to_str(config):
config = flatten_dict(config)
return [str(config.get(k, "--")) for k in HEADER]
def run_grid(grid_params, res_dir, now):
grid_params = grid_params if type(grid_params) is dict else json.load(open(grid_params, "rt"))
res_filename = os.path.join(res_dir, f"{grid_params['data_name'][0]}_grid_{now}.csv")
out = open(res_filename, "wt")
out.write(f"{','.join(HEADER)}\n")
for config in grid(grid_params):
param = {p: config[i] for i, p in enumerate(list(grid_params.keys()))}
acc, seen_acc, unseen_acc = obj_func_grid(param)
table_row = config_to_str(param)
table_row[HEADER.index('acc')] = str(acc)
table_row[HEADER.index('seen_acc')] = str(seen_acc)
table_row[HEADER.index('unseen_acc')] = str(unseen_acc)
out.write(f"{','.join(table_row)}\n")
out.close()
def main():
seen_accuracies, unseen_accuracies = [], []
parameters = {
"data_name": ['our_imdb'],
"embedding_type": ["Node2Vec"],
"embedding_dimensions": [32, 64, 128, 256],
"weights_movie_class": np.logspace(-2, 3, 6),
"weights_movie_movie": np.logspace(-2, 3, 6),
"norma_types": ['cosine'],
"threshold": [0.3, 0.6, 0.9],
"seen_percentage": [0.8],
"awa2_attributes_weight": [100]
}
num = 0
for param in grid(parameters):
dict_param = {p: param[i] for i, p in enumerate(list(parameters.keys()))}
print(f'iteration number {num}')
num += 1
acc, seen_acc, unseen_acc = obj_func_grid(dict_param)
seen_accuracies.append(seen_acc*100)
unseen_accuracies.append(unseen_acc*100)
dict_measures = {"unseen_accuracy": unseen_accuracies, "seen_accuracy": seen_accuracies}
plots_2measures_vs_parameter(dict_measures, parameters["seen_percentage"], 'seen Percentage', 'our_imdb',
'Zero Shot Learning', "Accuracy", parameters["norma_types"][0],
parameters["embedding_type"][0])
if __name__ == '__main__':
res_dir = "C:\\Users\\kfirs\\lab\\Zero Shot Learning\\New-Graph-ZSL\\grid_results"
now = "01_03_21"
parameters = {
"data_name": ['our_imdb'],
"embedding_type": ["Node2Vec"],
"embedding_dimensions": [32, 64, 128, 256],
"weights_movie_class": np.logspace(-2, 3, 6),
"weights_movie_movie": np.logspace(-2, 3, 6),
"norma_types": ['cosine'],
"threshold": [0.3, 0.6, 0.9],
"seen_percentage": [0.8],
"awa2_attributes_weight": [100]
}
processes = []
parameters_by_procesess = []
for w_m_m in parameters["weights_movie_movie"]:
for w_m_c in parameters["weights_movie_class"]:
param_by_parameters = parameters.copy()
param_by_parameters["weights_movie_movie"] = [w_m_m]
param_by_parameters["weights_movie_class"] = [w_m_c]
parameters_by_procesess.append(param_by_parameters)
for i in range(len(parameters_by_procesess)):
proc = multiprocessing.Process(target=run_grid, args=(parameters_by_procesess[i], res_dir, now, ))
processes.append(proc)
proc.start()
for p in processes:
p.join()
| true | true |
1c2dfe42c5f11130ad1de80135a4af445e0dabd3 | 927 | py | Python | overwatch/database/zodbDatabaseFactory.py | ostr00000/OVERWATCH | ebf69402b9b1b9e3b92cb96f013692072c2c69f2 | [
"BSD-3-Clause"
] | null | null | null | overwatch/database/zodbDatabaseFactory.py | ostr00000/OVERWATCH | ebf69402b9b1b9e3b92cb96f013692072c2c69f2 | [
"BSD-3-Clause"
] | null | null | null | overwatch/database/zodbDatabaseFactory.py | ostr00000/OVERWATCH | ebf69402b9b1b9e3b92cb96f013692072c2c69f2 | [
"BSD-3-Clause"
] | null | null | null | """
.. code-author: Mateusz Piwowarczyk <>, AGH University of Science and Technology
"""
import zodburi
import ZODB
from overwatch.database.databaseFactory import DatabaseFactory
from overwatch.database.zodbDatabase import ZodbDatabase
class ZodbDatabaseFactory(DatabaseFactory):
def __init__(self, databaseLocation):
DatabaseFactory.__init__(self)
self.databaseLocation = databaseLocation
self.instance = None
def initializeDB(self):
# Get the database
# See: http://docs.pylonsproject.org/projects/zodburi/en/latest/
# storage = ZODB.FileStorage.FileStorage(os.path.join(dirPrefix,"overwatch.fs"))
storage_factory, dbArgs = zodburi.resolve_uri(self.databaseLocation)
storage = storage_factory()
db = ZODB.DB(storage, **dbArgs)
connection = db.open()
dbRoot = connection.root()
return ZodbDatabase(dbRoot, connection)
| 34.333333 | 88 | 0.713053 | import zodburi
import ZODB
from overwatch.database.databaseFactory import DatabaseFactory
from overwatch.database.zodbDatabase import ZodbDatabase
class ZodbDatabaseFactory(DatabaseFactory):
def __init__(self, databaseLocation):
DatabaseFactory.__init__(self)
self.databaseLocation = databaseLocation
self.instance = None
def initializeDB(self):
storage_factory, dbArgs = zodburi.resolve_uri(self.databaseLocation)
storage = storage_factory()
db = ZODB.DB(storage, **dbArgs)
connection = db.open()
dbRoot = connection.root()
return ZodbDatabase(dbRoot, connection)
| true | true |
1c2dfee0ea5f54665c78d07cdd3c70525819729b | 512 | py | Python | src/appointments_has_payment/models.py | TheCleverlaure/sicco-web | 8e734c9bfa9c99056b6abd5276b65b1e4bf21e23 | [
"bzip2-1.0.6"
] | null | null | null | src/appointments_has_payment/models.py | TheCleverlaure/sicco-web | 8e734c9bfa9c99056b6abd5276b65b1e4bf21e23 | [
"bzip2-1.0.6"
] | null | null | null | src/appointments_has_payment/models.py | TheCleverlaure/sicco-web | 8e734c9bfa9c99056b6abd5276b65b1e4bf21e23 | [
"bzip2-1.0.6"
] | null | null | null | from django.db import models
from appointments.models import Citas
from payment.models import Pago
class CitaTienePago(models.Model):
cod_cita = models.ForeignKey(Citas, models.CASCADE, db_column='Cod_Cita', primary_key=True) # Field name made lowercase.
cod_pago = models.ForeignKey(Pago, models.CASCADE, db_column='Cod_Pago', unique=True) # Field name made lowercase.
class Meta:
managed = True
db_table = 'cita_tiene_pago'
unique_together = (('cod_cita', 'cod_pago'),)
| 39.384615 | 125 | 0.728516 | from django.db import models
from appointments.models import Citas
from payment.models import Pago
class CitaTienePago(models.Model):
cod_cita = models.ForeignKey(Citas, models.CASCADE, db_column='Cod_Cita', primary_key=True)
cod_pago = models.ForeignKey(Pago, models.CASCADE, db_column='Cod_Pago', unique=True)
class Meta:
managed = True
db_table = 'cita_tiene_pago'
unique_together = (('cod_cita', 'cod_pago'),)
| true | true |
1c2dfef350f1f5adf949de096b36c9f0f279a120 | 2,696 | py | Python | addons/io_scene_swbf_msh/msh_model_triangle_strips.py | WHSnyder/SWBF-msh-Blender-Export | b56fa79a1967cdfc8c9b7928a2e5c2f7e940b289 | [
"Apache-2.0"
] | 7 | 2019-12-27T04:07:56.000Z | 2021-11-14T22:04:32.000Z | addons/io_scene_swbf_msh/msh_model_triangle_strips.py | WHSnyder/SWBF-msh-Blender-Export | b56fa79a1967cdfc8c9b7928a2e5c2f7e940b289 | [
"Apache-2.0"
] | null | null | null | addons/io_scene_swbf_msh/msh_model_triangle_strips.py | WHSnyder/SWBF-msh-Blender-Export | b56fa79a1967cdfc8c9b7928a2e5c2f7e940b289 | [
"Apache-2.0"
] | 3 | 2019-11-23T09:07:21.000Z | 2020-10-06T16:22:49.000Z | """ Contains triangle strip generation functions for GeometrySegment. """
from typing import List, Tuple
from copy import deepcopy
from .msh_model import *
def create_models_triangle_strips(models: List[Model]) -> List[Model]:
""" Create the triangle strips for a list of models geometry. """
for model in models:
if model.geometry is not None:
for segment in model.geometry:
segment.triangle_strips = create_triangle_strips(segment.triangles)
return models
def create_triangle_strips(segment_triangles: List[List[int]]) -> List[List[int]]:
""" Create the triangle strips for a list of triangles. """
triangles = deepcopy(segment_triangles)
strips: List[List[int]] = []
# The general idea here is we loop based off if 'triangles' is empty or not.
#
# For each iteration of the loop we create a new strip starting from the first
# triangle still in 'triangles'.
#
# Then we loop, attempting to find a triangle to add the strip each time. If we
# find one then we continue the loop, else we break out of it and append the
# created strip.
def create_strip() -> List[int]:
strip: List[int] = [triangles[0][0],
triangles[0][1],
triangles[0][2]]
strip_head: Tuple[int, int] = (strip[1], strip[2])
triangles.remove(triangles[0])
while True:
def find_next_vertex():
nonlocal triangles
even: bool = len(strip) % 2 == 0
for tri, edge, last_vertex in iterate_triangle_edges_last_vertex(triangles, even):
if edge == strip_head:
triangles.remove(tri)
return last_vertex
return None
next_vertex: int = find_next_vertex()
if next_vertex is None:
break
strip.append(next_vertex)
strip_head = (strip_head[1], next_vertex)
return strip
while triangles:
strips.append(create_strip())
return strips
def iterate_triangle_edges_last_vertex(triangles: List[List[int]], even: bool):
""" Generator for iterating through the of each triangle in a list edges.
Yields (triangle, edge, last_vertex). """
if even:
for tri in triangles:
yield tri, (tri[0], tri[1]), tri[2]
yield tri, (tri[0], tri[2]), tri[1]
yield tri, (tri[1], tri[2]), tri[0]
else:
for tri in triangles:
yield tri, (tri[1], tri[0]), tri[2]
yield tri, (tri[2], tri[0]), tri[1]
yield tri, (tri[2], tri[1]), tri[0]
| 32.878049 | 98 | 0.592359 |
from typing import List, Tuple
from copy import deepcopy
from .msh_model import *
def create_models_triangle_strips(models: List[Model]) -> List[Model]:
for model in models:
if model.geometry is not None:
for segment in model.geometry:
segment.triangle_strips = create_triangle_strips(segment.triangles)
return models
def create_triangle_strips(segment_triangles: List[List[int]]) -> List[List[int]]:
triangles = deepcopy(segment_triangles)
strips: List[List[int]] = []
def create_strip() -> List[int]:
strip: List[int] = [triangles[0][0],
triangles[0][1],
triangles[0][2]]
strip_head: Tuple[int, int] = (strip[1], strip[2])
triangles.remove(triangles[0])
while True:
def find_next_vertex():
nonlocal triangles
even: bool = len(strip) % 2 == 0
for tri, edge, last_vertex in iterate_triangle_edges_last_vertex(triangles, even):
if edge == strip_head:
triangles.remove(tri)
return last_vertex
return None
next_vertex: int = find_next_vertex()
if next_vertex is None:
break
strip.append(next_vertex)
strip_head = (strip_head[1], next_vertex)
return strip
while triangles:
strips.append(create_strip())
return strips
def iterate_triangle_edges_last_vertex(triangles: List[List[int]], even: bool):
if even:
for tri in triangles:
yield tri, (tri[0], tri[1]), tri[2]
yield tri, (tri[0], tri[2]), tri[1]
yield tri, (tri[1], tri[2]), tri[0]
else:
for tri in triangles:
yield tri, (tri[1], tri[0]), tri[2]
yield tri, (tri[2], tri[0]), tri[1]
yield tri, (tri[2], tri[1]), tri[0]
| true | true |
1c2e006454f1729f6cbb17222e43a1991f558056 | 852 | py | Python | ore_combinators/combinators/string.py | kraglik/ore | fca49bb8cd46bcdf3a6c8cde65cf6aed9a0bd741 | [
"MIT"
] | 1 | 2021-06-09T13:45:47.000Z | 2021-06-09T13:45:47.000Z | ore_combinators/combinators/string.py | kraglik/ore | fca49bb8cd46bcdf3a6c8cde65cf6aed9a0bd741 | [
"MIT"
] | null | null | null | ore_combinators/combinators/string.py | kraglik/ore | fca49bb8cd46bcdf3a6c8cde65cf6aed9a0bd741 | [
"MIT"
] | null | null | null | from typing import Tuple, Any
from ore_combinators.combinator import combinator
from ore_combinators.parser_state import ParserState
from ore_combinators.result import Result
from ore_combinators.error import ParserError, EndOfFileError
class string(combinator): # noqa
def __init__(self, s: str):
self._string = s
def __call__(self, state: ParserState) -> Tuple[Any, ParserState]:
initial_state = state
for char in self._string:
if state.is_at_end():
raise EndOfFileError(position=initial_state.position)
if char != state.symbol:
raise ParserError(
message="String mismatch",
position=initial_state.position
)
state = state.next()
return Result.make_value(self._string, state)
| 29.37931 | 70 | 0.647887 | from typing import Tuple, Any
from ore_combinators.combinator import combinator
from ore_combinators.parser_state import ParserState
from ore_combinators.result import Result
from ore_combinators.error import ParserError, EndOfFileError
class string(combinator):
def __init__(self, s: str):
self._string = s
def __call__(self, state: ParserState) -> Tuple[Any, ParserState]:
initial_state = state
for char in self._string:
if state.is_at_end():
raise EndOfFileError(position=initial_state.position)
if char != state.symbol:
raise ParserError(
message="String mismatch",
position=initial_state.position
)
state = state.next()
return Result.make_value(self._string, state)
| true | true |
1c2e015a72b7b341f0a82c3c4414419aa6b738bc | 9,328 | py | Python | healthyForce/MTLModels.py | HypnosPy/HypnosPy | 28b17d07ee78f7714bbbbd66f6253764addf9d94 | [
"MIT"
] | 4 | 2022-01-02T18:40:57.000Z | 2022-02-17T12:59:57.000Z | healthyForce/MTLModels.py | ippozuelo/HypnosPy | 28b17d07ee78f7714bbbbd66f6253764addf9d94 | [
"MIT"
] | 2 | 2020-11-11T07:13:56.000Z | 2020-11-11T07:38:54.000Z | healthyForce/MTLModels.py | ippozuelo/HypnosPy | 28b17d07ee78f7714bbbbd66f6253764addf9d94 | [
"MIT"
] | 2 | 2020-11-24T22:46:31.000Z | 2021-02-05T16:43:12.000Z | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
class MTL:
def __init__(self):
pass
def aggregate_losses(self, losses):
pass
def adjust_after_validation(self, losses, epoch):
pass
class MTLRandom(MTL):
def __init__(self, ntasks, verbose=1):
self.ntasks = ntasks
def aggregate_losses(self, losses):
return losses[np.random.randint(self.ntasks)]
def adjust_after_validation(self, losses, epoch):
return
class MTLUncertanty(MTL):
def __init__(self, ntasks):
super(MTLUncertanty, self).__init__()
self.ntasks = ntasks
# We have to be set in the Lightning Module
#self.logsigma = nn.Parameter(torch.zeros(self.ntasks))
self.logsigma = None
def aggregate_losses(self, losses):
"""
Input: a list/set/dict of losses
Output: a single value
"""
total_loss = 0
for i, l in enumerate(losses):
total_loss = total_loss + (l / (2. * torch.exp(self.logsigma[i])) + (self.logsigma[i]/2.))
return total_loss
def adjust_after_validation(self, losses, epoch):
return
class MTLEqual(MTL):
def __init__(self, ntasks):
super(MTLEqual, self).__init__()
self.ntasks = ntasks
def aggregate_losses(self, losses):
return sum(losses) / self.ntasks
def adjust_after_validation(self, losses, epoch):
return
class MTLDWA(MTL):
def __init__(self, ntasks, algorithm, temperature=2, min_epochs_to_start=2, verbose=1):
super(MTLDWA, self).__init__()
self.ntasks = ntasks
self.lambda_weight = torch.ones(self.ntasks)
self.loss_t_1 = torch.ones(self.ntasks)
self.loss_t_2 = torch.ones(self.ntasks)
self.temperature = torch.ones(1) * temperature
self.min_epochs_to_start = min_epochs_to_start
self.algorithm = algorithm
# Variables for ewa and trend version of DWA
self.verbose = verbose
self.max_epochs = 100
self.history = torch.zeros(self.ntasks, self.max_epochs)
self.winsize = 3
#data = np.array([0,1,200,300,-10,20,10,-20,10,-20,1000])
#ewma(data, 5, 0.9), trend(data[5:10])
def aggregate_losses(self, losses):
total_loss = 0
#self.lambda_weight = self.lambda_weight.type_as(losses[0])
for i, l in enumerate(losses):
total_loss += (self.lambda_weight[i] * l)
return total_loss / self.ntasks
def adjust_after_validation(self, losses, epoch):
for i in range(self.ntasks):
self.loss_t_2[i] = self.loss_t_1[i]
self.loss_t_1[i] = losses[i].item()
if epoch >= self.min_epochs_to_start:
if self.algorithm != "default":
saved_from_epoch = epoch - self.min_epochs_to_start
w = {}
denominator = 0
for i in range(self.ntasks):
if self.algorithm == "default":
w[i] = min(80., self.loss_t_1[i] / self.loss_t_2[i])
else:
self.history[i][saved_from_epoch] = min(80., self.loss_t_1[i] / self.loss_t_2[i])
if self.algorithm == "trend":
w[i] = trend(self.history[i][max(0, saved_from_epoch-self.winsize):saved_from_epoch])
print("values:", self.history[i][max(0, saved_from_epoch-self.winsize):saved_from_epoch])
elif self.algorithm == "ewma":
# Todo: need to implement a torch version of it
w[i] = trend(self.history[i][max(0, saved_from_epoch-self.winsize):saved_from_epoch])
if self.verbose > 0:
print("w(%d) = %.4f" % (i, w[i]))
denominator += torch.exp(w[i]/self.temperature)
for i in range(self.ntasks):
numerator = self.ntasks * torch.exp(w[i]/self.temperature)
self.lambda_weight[i] = numerator / denominator
if self.verbose > 0:
for i in range(self.ntasks):
print("Lambda (%d) = %.4f" % (i, self.lambda_weight[i]))
class MTLBandit(MTL):
def __init__(self, ntasks,
# "bandit_alg_weight_assignment"
# algorithm: [ucb, ducb]
# reward method: [l1/l2, l2/l1, l2-l1]
# loss_assignment: ["one", "priority", "all"]
strategy="bandit_ucb_l1l2_one",
min_epochs_to_start=2, verbose=1):
super(MTLBandit, self).__init__()
self.ntasks = ntasks
self.bandit_alg = strategy.split("_")[1]
self.bandit_reward_method = strategy.split("_")[2]
self.bandit_loss_assignment = strategy.split("_")[3]
self.lambda_weight = torch.ones(self.ntasks)
self.loss_t_1 = torch.ones(self.ntasks)
self.loss_t_2 = torch.ones(self.ntasks)
self.max_epochs = 100
self.current_weight = torch.zeros(self.ntasks)
self.reward = torch.zeros(self.max_epochs, self.ntasks)
self.counts = torch.zeros(self.ntasks)
self.chosen = torch.zeros(self.max_epochs, self.ntasks)
self.gammas = torch.zeros(self.max_epochs) + 0.99
self.min_epochs_to_start = min_epochs_to_start
self.verbose = verbose
def aggregate_losses(self, losses):
total_loss = 0
for i, l in enumerate(losses):
total_loss += ((self.lambda_weight[i] * l) / self.ntasks)
return total_loss
def adjust_after_validation(self, losses, epoch):
print("Current epoch:", epoch)
selected_task_i = -1
for i in range(self.ntasks):
self.loss_t_2[i] = self.loss_t_1[i]
self.loss_t_1[i] = losses[i].item()
if self.bandit_reward_method == "l1l2":
self.reward[epoch][i] = min(80., self.loss_t_1[i] / self.loss_t_2[i])
elif self.bandit_reward_method == "l2l1":
self.reward[epoch][i] = min(80., self.loss_t_2[i] / self.loss_t_1[i])
elif self.bandit_reward_method == "l2-l1":
self.reward[epoch][i] = min(80., self.loss_t_2[i] - self.loss_t_1[i])
if epoch >= self.min_epochs_to_start:
if self.bandit_alg == "ducb":
t_minus_s = get_t_minus_s(self.max_epochs, epoch)
discount = self.gammas ** t_minus_s
n_t_gamma = 0
for i in range(self.ntasks):
n_t_gamma += (discount * self.chosen[:, i]).sum()
# TODO: I could replace this 'for' by a vectorized operation.
for i in range(self.ntasks):
# UBC1
if self.bandit_alg == "ucb":
avg_reward = (self.chosen[:, i] * self.reward[:, i]).sum() / self.chosen[:, i].sum()
padding = np.sqrt(2.0 * np.log(epoch+1) / (1 + self.counts[i]))
self.current_weight[i] = avg_reward + padding
# discounted UBC -- very inefficient. Needs improvement
elif self.bandit_alg == "ducb":
N_t_gamma = (discount * self.chosen[:, i]).sum()
avg_reward = (discount * self.reward[:, i]).sum() / N_t_gamma
padding = 2.0 * np.sqrt(np.log(n_t_gamma)/N_t_gamma)
self.current_weight[i] = avg_reward + padding
else:
print("Unkonwn bandit algorithm %s. Options are 'ubc' and 'ducb'" % (self.bandit_alg))
if self.verbose > 0:
print("Current Reward(%d): %.3f (%.3f + %.3f)" % (i,
self.current_weight[i],
avg_reward,
padding
)
)
selected_task_i = torch.argmax(self.current_weight).item()
self.counts[selected_task_i] += 1
self.chosen[epoch][selected_task_i] = 1
if self.bandit_loss_assignment == "all":
for x in range(self.ntasks):
self.lambda_weight[x] = self.current_weight[x]
elif self.bandit_loss_assignment in ["one", "priority"]:
self.lambda_weight[selected_task_i] = 1
for task_j in range(self.ntasks):
if task_j != selected_task_i:
if self.bandit_loss_assignment == "priority":
self.lambda_weight[task_j] = 0.5
else:
self.lambda_weight[task_j] = 0.0
else:
# In case the algorithm has not started yet, we are "choosing" all arms
for x in range(self.ntasks):
self.chosen[epoch][x] = 1
if self.verbose > 0:
print("Selected Task:", selected_task_i)
for i in range(self.ntasks):
print("W(%d): %.3f, Counts(%d): %d" % (i, self.current_weight[i], i, self.counts[i]))
for i in range(self.ntasks):
print("Lambdas (%d) = %.4f" % (i, self.lambda_weight[i]))
| 35.603053 | 113 | 0.547491 | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
class MTL:
def __init__(self):
pass
def aggregate_losses(self, losses):
pass
def adjust_after_validation(self, losses, epoch):
pass
class MTLRandom(MTL):
def __init__(self, ntasks, verbose=1):
self.ntasks = ntasks
def aggregate_losses(self, losses):
return losses[np.random.randint(self.ntasks)]
def adjust_after_validation(self, losses, epoch):
return
class MTLUncertanty(MTL):
def __init__(self, ntasks):
super(MTLUncertanty, self).__init__()
self.ntasks = ntasks
self.logsigma = None
def aggregate_losses(self, losses):
total_loss = 0
for i, l in enumerate(losses):
total_loss = total_loss + (l / (2. * torch.exp(self.logsigma[i])) + (self.logsigma[i]/2.))
return total_loss
def adjust_after_validation(self, losses, epoch):
return
class MTLEqual(MTL):
def __init__(self, ntasks):
super(MTLEqual, self).__init__()
self.ntasks = ntasks
def aggregate_losses(self, losses):
return sum(losses) / self.ntasks
def adjust_after_validation(self, losses, epoch):
return
class MTLDWA(MTL):
def __init__(self, ntasks, algorithm, temperature=2, min_epochs_to_start=2, verbose=1):
super(MTLDWA, self).__init__()
self.ntasks = ntasks
self.lambda_weight = torch.ones(self.ntasks)
self.loss_t_1 = torch.ones(self.ntasks)
self.loss_t_2 = torch.ones(self.ntasks)
self.temperature = torch.ones(1) * temperature
self.min_epochs_to_start = min_epochs_to_start
self.algorithm = algorithm
self.verbose = verbose
self.max_epochs = 100
self.history = torch.zeros(self.ntasks, self.max_epochs)
self.winsize = 3
def aggregate_losses(self, losses):
total_loss = 0
for i, l in enumerate(losses):
total_loss += (self.lambda_weight[i] * l)
return total_loss / self.ntasks
def adjust_after_validation(self, losses, epoch):
for i in range(self.ntasks):
self.loss_t_2[i] = self.loss_t_1[i]
self.loss_t_1[i] = losses[i].item()
if epoch >= self.min_epochs_to_start:
if self.algorithm != "default":
saved_from_epoch = epoch - self.min_epochs_to_start
w = {}
denominator = 0
for i in range(self.ntasks):
if self.algorithm == "default":
w[i] = min(80., self.loss_t_1[i] / self.loss_t_2[i])
else:
self.history[i][saved_from_epoch] = min(80., self.loss_t_1[i] / self.loss_t_2[i])
if self.algorithm == "trend":
w[i] = trend(self.history[i][max(0, saved_from_epoch-self.winsize):saved_from_epoch])
print("values:", self.history[i][max(0, saved_from_epoch-self.winsize):saved_from_epoch])
elif self.algorithm == "ewma":
w[i] = trend(self.history[i][max(0, saved_from_epoch-self.winsize):saved_from_epoch])
if self.verbose > 0:
print("w(%d) = %.4f" % (i, w[i]))
denominator += torch.exp(w[i]/self.temperature)
for i in range(self.ntasks):
numerator = self.ntasks * torch.exp(w[i]/self.temperature)
self.lambda_weight[i] = numerator / denominator
if self.verbose > 0:
for i in range(self.ntasks):
print("Lambda (%d) = %.4f" % (i, self.lambda_weight[i]))
class MTLBandit(MTL):
def __init__(self, ntasks,
strategy="bandit_ucb_l1l2_one",
min_epochs_to_start=2, verbose=1):
super(MTLBandit, self).__init__()
self.ntasks = ntasks
self.bandit_alg = strategy.split("_")[1]
self.bandit_reward_method = strategy.split("_")[2]
self.bandit_loss_assignment = strategy.split("_")[3]
self.lambda_weight = torch.ones(self.ntasks)
self.loss_t_1 = torch.ones(self.ntasks)
self.loss_t_2 = torch.ones(self.ntasks)
self.max_epochs = 100
self.current_weight = torch.zeros(self.ntasks)
self.reward = torch.zeros(self.max_epochs, self.ntasks)
self.counts = torch.zeros(self.ntasks)
self.chosen = torch.zeros(self.max_epochs, self.ntasks)
self.gammas = torch.zeros(self.max_epochs) + 0.99
self.min_epochs_to_start = min_epochs_to_start
self.verbose = verbose
def aggregate_losses(self, losses):
total_loss = 0
for i, l in enumerate(losses):
total_loss += ((self.lambda_weight[i] * l) / self.ntasks)
return total_loss
def adjust_after_validation(self, losses, epoch):
print("Current epoch:", epoch)
selected_task_i = -1
for i in range(self.ntasks):
self.loss_t_2[i] = self.loss_t_1[i]
self.loss_t_1[i] = losses[i].item()
if self.bandit_reward_method == "l1l2":
self.reward[epoch][i] = min(80., self.loss_t_1[i] / self.loss_t_2[i])
elif self.bandit_reward_method == "l2l1":
self.reward[epoch][i] = min(80., self.loss_t_2[i] / self.loss_t_1[i])
elif self.bandit_reward_method == "l2-l1":
self.reward[epoch][i] = min(80., self.loss_t_2[i] - self.loss_t_1[i])
if epoch >= self.min_epochs_to_start:
if self.bandit_alg == "ducb":
t_minus_s = get_t_minus_s(self.max_epochs, epoch)
discount = self.gammas ** t_minus_s
n_t_gamma = 0
for i in range(self.ntasks):
n_t_gamma += (discount * self.chosen[:, i]).sum()
for i in range(self.ntasks):
if self.bandit_alg == "ucb":
avg_reward = (self.chosen[:, i] * self.reward[:, i]).sum() / self.chosen[:, i].sum()
padding = np.sqrt(2.0 * np.log(epoch+1) / (1 + self.counts[i]))
self.current_weight[i] = avg_reward + padding
elif self.bandit_alg == "ducb":
N_t_gamma = (discount * self.chosen[:, i]).sum()
avg_reward = (discount * self.reward[:, i]).sum() / N_t_gamma
padding = 2.0 * np.sqrt(np.log(n_t_gamma)/N_t_gamma)
self.current_weight[i] = avg_reward + padding
else:
print("Unkonwn bandit algorithm %s. Options are 'ubc' and 'ducb'" % (self.bandit_alg))
if self.verbose > 0:
print("Current Reward(%d): %.3f (%.3f + %.3f)" % (i,
self.current_weight[i],
avg_reward,
padding
)
)
selected_task_i = torch.argmax(self.current_weight).item()
self.counts[selected_task_i] += 1
self.chosen[epoch][selected_task_i] = 1
if self.bandit_loss_assignment == "all":
for x in range(self.ntasks):
self.lambda_weight[x] = self.current_weight[x]
elif self.bandit_loss_assignment in ["one", "priority"]:
self.lambda_weight[selected_task_i] = 1
for task_j in range(self.ntasks):
if task_j != selected_task_i:
if self.bandit_loss_assignment == "priority":
self.lambda_weight[task_j] = 0.5
else:
self.lambda_weight[task_j] = 0.0
else:
for x in range(self.ntasks):
self.chosen[epoch][x] = 1
if self.verbose > 0:
print("Selected Task:", selected_task_i)
for i in range(self.ntasks):
print("W(%d): %.3f, Counts(%d): %d" % (i, self.current_weight[i], i, self.counts[i]))
for i in range(self.ntasks):
print("Lambdas (%d) = %.4f" % (i, self.lambda_weight[i]))
| true | true |
1c2e015f2562f36b943e25339f4dc74362fc5ebc | 25,303 | py | Python | source/code/handlers/execution_handler.py | awslabs/aws-ops-automator | 362abd0717b48ecca7f20d8985ae7d76f045daf3 | [
"Apache-2.0"
] | 94 | 2017-08-01T05:28:45.000Z | 2021-09-10T07:18:46.000Z | source/code/handlers/execution_handler.py | aws-solutions/aws-ops-automator | 362abd0717b48ecca7f20d8985ae7d76f045daf3 | [
"Apache-2.0"
] | 27 | 2018-02-15T17:14:09.000Z | 2021-04-27T11:28:42.000Z | source/code/handlers/execution_handler.py | awslabs/aws-ops-automator | 362abd0717b48ecca7f20d8985ae7d76f045daf3 | [
"Apache-2.0"
] | 50 | 2017-08-01T05:29:04.000Z | 2021-08-11T20:09:07.000Z | ######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import json
import os
import threading
import time
from datetime import datetime, timedelta
import actions
import handlers
import services
from boto_retry import get_client_with_retries
from handlers.task_tracking_table import TaskTrackingTable
from helpers import safe_dict, safe_json, full_stack
from metrics.anonymous_metrics import send_metrics_data, allow_send_metrics
from outputs import raise_exception
from outputs.queued_logger import QueuedLogger
WARN_ADJUST_LAMBDA_MEMORY_SETTINGS_COMPLETION = "Adjust completion memory settings for task {}"
WARN_COMPLETION_CHECK_TIMEOUT = "Completion checking not completed after {} seconds"
REMAINING_COMPLETION_CHECK = 15
EXECUTE_TIME_REMAINING = 20
ERR_EXECUTION_NOT_COMPLETED = "Execution not completed after {} seconds"
ERR_BUILDING_SUBJECT_FOR_LOG_STREAM = "Error building log subject for action class {}, {}"
ERR_EXECUTING_ACTION = "Error running executing logic for action: {}"
ERR_EXECUTING_COMPLETION_CHECK = "Error running task completion check method : {}"
ERR_EXECUTION_TASK = "Error execution of {} for task {}\n {}{}"
ERR_INVALID_ACTION = "Action {} is not a valid action for the execution handler"
WARN_METRICS_DATA = "Error processing or sending metrics data ({})"
ERR_READING_S3_RESOURCES = "Error reading action resources from bucket {}, key {} for task {}, {}"
ERR_TASK_TIMEOUT = "Timeout waiting for completion of task after {}."
ERR_TIMEOUT = "Adjust execution memory settings for task {} or check boto retries"
INF_ACTION = "Executing action {} ({}) for task {} with parameters\n{}"
INF_ACTION_NOT_COMPLETED = "Action not completed after {}, waiting for next completion check"
INF_ACTION_RESULT = "Action completed in {} seconds, result is {}"
INF_FINISH_EXEC = "=== Finished execution of step {} for task with id {} ==="
INF_LAMBDA_MEMORY = "Memory limit for lambda {} executing the action is {}MB"
INF_RULE_ENABLED = "Enabling CloudWatch Events Rule \"{}\""
INF_SIMULATION_MODE_NO_RULE_ENABLED = "Completion handling not enabled as handler is running in simulation mode"
INF_START_EXEC = "=== Start step {} for task with id {} ==="
INF_STARTED_AND_WAITING_FOR_COMPLETION = "Action started with result \n{}\n Task is waiting for completion"
INF_TASK_COMPLETED = "Action completion check result is {}\n Task completed after {}"
INF_SENDING_METRICS_DATA = "Sending metrics data is {}"
LOG_STREAM = "{}-{}-{}-{}"
class ExecutionHandler(object):
"""
Class to handle event to execute an action on a resource.
"""
def __init__(self, event, context):
"""
Initializes handler.
:param event: Event to handle
:param context: Context if run within Lambda environment
"""
self._context = context
self._event = event
self.action_id = self._event[handlers.TASK_TR_ID]
self.task = self._event[handlers.TASK_TR_NAME]
self.task_timezone = self._event.get(handlers.TASK_TR_TIMEZONE, None)
self.has_completion = self._event[handlers.TASK_TR_HAS_COMPLETION]
self.action_parameters = self._event.get(handlers.TASK_TR_PARAMETERS, {})
self.dryrun = self._event.get(handlers.TASK_TR_DRYRUN)
self.interval = self._event.get(handlers.TASK_TR_INTERVAL,None)
self.metrics = self._event.get(handlers.TASK_TR_METRICS, False)
self.debug = self._event.get(handlers.TASK_TR_DEBUG)
self.started_at = int(self._event.get(handlers.TASK_TR_STARTED_TS, 0))
self.start_result = self._event.get(handlers.TASK_TR_START_RESULT, None)
self.session = services.get_session(self._event.get(handlers.TASK_TR_ASSUMED_ROLE))
self.stack_name = os.getenv(handlers.ENV_STACK_NAME)
self.stack_id = os.getenv(handlers.ENV_STACK_ID)
self.action = event[handlers.TASK_TR_ACTION]
self.tagfilter = event.get(handlers.TASK_TR_TAGFILTER, "")
self.action_properties = actions.get_action_properties(self.action)
self.action_class = actions.get_action_class(self.action)
self._stack_resources = None
self.timeout = int(self._event[handlers.TASK_TR_TIMEOUT]) * 60 if self._event.get(handlers.TASK_TR_TIMEOUT, None) not in [
None, "None"] else 0
self.execution_log_stream = self._event.get(handlers.TASK_TR_EXECUTION_LOGSTREAM)
self.assumed_role = self._event.get(handlers.TASK_TR_ASSUMED_ROLE, None)
self.events = self._event.get(handlers.TASK_TR_EVENTS, {})
if isinstance(self.events, str):
self.events = json.loads(self._event.get(handlers.TASK_TR_EVENTS, "{}").replace("u'", '"').replace("'", '"'))
self._action_resources = None
self._s3_client = None
self._action_instance = None
self._action_class = None
self._action_arguments = None
self._timer = None
self._timeout_event = None
self.__logger = None
self.__action_tracking = None
@classmethod
def is_handling_request(cls, event, _):
"""
Tests if event is handled by this handler.
:param _:
:param event: Tested event
:return: True if the event is handled by this handler
"""
return event.get(handlers.HANDLER_EVENT_ACTION, "") in [handlers.HANDLER_ACTION_EXECUTE,
handlers.HANDLER_ACTION_TEST_COMPLETION]
@property
def _logger(self):
if self.__logger is None:
# setup logging
if self.execution_log_stream is None:
if callable(getattr(self._action_class, "action_logging_subject", None)):
# noinspection PyBroadException
try:
action_subject = self._action_class.action_logging_subject(self._action_arguments,
self.action_parameters)
self.execution_log_stream = "{}-{}".format(self._event[handlers.TASK_TR_NAME], action_subject)
except Exception as ex:
print((ERR_BUILDING_SUBJECT_FOR_LOG_STREAM, str(self._action_class), ex))
action_subject = "unknown-"
self.execution_log_stream = LOG_STREAM.format(self._event[handlers.TASK_TR_NAME], action_subject,
actions.log_stream_datetime(),
self._action_arguments.get(handlers.TASK_TR_ID,"None"))
else:
self.execution_log_stream = self.execution_log_stream
self.__logger = QueuedLogger(logstream=self.execution_log_stream,
buffersize=50 if self.debug else 20,
context=self._context,
debug=self.debug)
return self.__logger
@property
def _action_tracking(self):
if self.__action_tracking is None:
self.__action_tracking = TaskTrackingTable(self._context, logger=self._logger)
return self.__action_tracking
@property
def s3_client(self):
if self._s3_client is None:
self._s3_client = get_client_with_retries("s3", ["get_object"])
return self._s3_client
@property
def action_resources(self):
if self._action_resources is None:
if not self._event.get(handlers.TASK_TR_S3_RESOURCES, False):
self._action_resources = handlers.get_item_resource_data(self._event, self._context)
else:
bucket = os.getenv(handlers.ENV_RESOURCE_BUCKET)
key = self.action_id + ".json"
try:
resp = self.s3_client.get_object_with_retries(Bucket=bucket, Key=key)
self._event[handlers.TASK_TR_RESOURCES] = resp["Body"].read().decode('utf-8')
self._action_resources = handlers.get_item_resource_data(self._event, self._context)
except Exception as ex:
raise_exception(ERR_READING_S3_RESOURCES, bucket, key, self.action_id, ex)
return self._action_resources
@property
def stack_resources(self):
"""
Reads the action stack resources
:return: Stack resources for the action
"""
if self._stack_resources is None:
self._stack_resources = {}
# test if this action has additional stack resources
resources = self.action_properties.get(actions.ACTION_STACK_RESOURCES, {})
if resources:
# name of the class
class_name = self.action_properties[actions.ACTION_CLASS_NAME][0:-len("Action")]
# actual resource names is name of class + name from class properties
logical_resource_names = [class_name + resource_name for resource_name in resources]
cfn = get_client_with_retries("cloudformation", ["list_stack_resources"], context=self._context)
args = {"StackName": self.stack_id}
while True:
# get the stack resources
cfn_resp = cfn.list_stack_resources_with_retries(**args)
for res in cfn_resp.get("StackResourceSummaries", []):
# actual name
logical_resource_id = res["LogicalResourceId"]
# test if this resource is an resource from the action properties
if logical_resource_id in logical_resource_names:
self._stack_resources[logical_resource_id[len(class_name):]] = {
i: res[i] for i in ["LogicalResourceId",
"PhysicalResourceId",
"ResourceType"]
}
# test if we've found the number of resources that we declared, in that case no need to read more
if len(list(self._stack_resources.keys())) == len(resources):
return self._stack_resources
# continuation if > 100 resources in stack
if "NextToken" in cfn_resp:
args["NextToken"] = cfn_resp["NextToken"]
else:
break
return self._stack_resources
def _handle_task_execution(self):
def execute_timed_out():
"""
Function is called when the handling of the request times out
:return:
"""
time_used = int(int(os.getenv(handlers.ENV_LAMBDA_TIMEOUT)) - self._context.get_remaining_time_in_millis() / 1000)
self._logger.error(ERR_EXECUTION_NOT_COMPLETED, time_used)
if self.action_properties.get(actions.ACTION_EXECUTE_SIZE, None) is not None:
self._logger.error(ERR_TIMEOUT, self.task)
self._timeout_event.set()
self._logger.flush()
self._timer.cancel()
def handle_metrics(result):
self._logger.info(INF_SENDING_METRICS_DATA, "enabled" if allow_send_metrics() else "disabled")
if allow_send_metrics():
try:
result_data = result if isinstance(result, dict) else json.loads(result)
if actions.METRICS_DATA in result_data:
send_metrics_data(metrics_data=result_data[actions.METRICS_DATA], logger=self._logger)
except Exception as ex:
self._logger.warning(WARN_METRICS_DATA, str(ex))
self._logger.info(INF_ACTION, self.action, self.action_id, self.task, safe_json(self.action_parameters, indent=3))
if not handlers.running_local(self._context):
self._logger.info(INF_LAMBDA_MEMORY, self._context.function_name, self._context.memory_limit_in_mb)
self._logger.debug("Setting task state to {}", handlers.STATUS_STARTED)
self._action_tracking.update_task(self.action_id, self.task, task_metrics=self.metrics, status=handlers.STATUS_STARTED)
start = time.time()
return_data = {
"task": self.task,
"action": self.action,
"id": self.action_id,
"dryrun": self.dryrun,
}
if self._context is not None:
execution_time_left = (self._context.get_remaining_time_in_millis() / 1000.00) - EXECUTE_TIME_REMAINING
self._timer = threading.Timer(execution_time_left, execute_timed_out)
self._timer.start()
try:
self._logger.debug("Start executing task")
action_result = self._action_instance.execute()
if isinstance(action_result, str):
action_result = json.loads(action_result)
finally:
if self._timer is not None:
self._timer.cancel()
if self._timeout_event.is_set():
raise Exception("Timeout execution action")
if not self._action_instance.properties.get(actions.ACTION_INTERNAL, False):
handle_metrics(action_result)
execution_time = int(time.time() - start)
self._logger.debug("Task needs{}completion", " no" if not self.has_completion else " ")
if not self.has_completion or self.dryrun:
self._logger.debug("Setting state of task to {} ", handlers.STATUS_COMPLETED)
self._action_tracking.update_task(action_id=self.action_id, task=self.task, task_metrics=self.metrics,
status=handlers.STATUS_COMPLETED,
status_data={
handlers.TASK_TR_STARTED_TS: int(start),
handlers.TASK_TR_RESULT: action_result,
handlers.TASK_TR_EXECUTION_TIME: str(execution_time),
handlers.TASK_TR_EXECUTION_LOGSTREAM: self.execution_log_stream
})
# noinspection PyBroadException
try:
self._logger.info(INF_ACTION_RESULT, execution_time, safe_json(action_result, indent=3))
except Exception:
self._logger.info(INF_ACTION_RESULT, execution_time, str(action_result))
else:
# the action has a method for testing completion of the task, set the status to waiting and store the result
# of the execution that started the action as start result that will be passed to the completion method together
self._logger.debug("Setting state of task to {} ", handlers.STATUS_WAIT_FOR_COMPLETION)
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_WAIT_FOR_COMPLETION,
status_data={
handlers.TASK_TR_LAST_WAIT_COMPLETION: datetime.now().isoformat(),
handlers.TASK_TR_STARTED_TS: int(start),
handlers.TASK_TR_START_RESULT: action_result,
handlers.TASK_TR_START_EXECUTION_TIME: str(execution_time),
handlers.TASK_TR_EXECUTION_LOGSTREAM: self.execution_log_stream
})
self._logger.info(INF_STARTED_AND_WAITING_FOR_COMPLETION, safe_json(action_result, indent=3))
if not handlers.running_local(self._context):
rule = handlers.enable_completion_cloudwatch_rule(self._context)
self._logger.info(INF_RULE_ENABLED, rule)
else:
self._logger.info(INF_SIMULATION_MODE_NO_RULE_ENABLED)
# no exception from action
return_data.update({
"result": handlers.STATUS_WAIT_FOR_COMPLETION if self.has_completion else handlers.STATUS_COMPLETED,
"action-result": str(action_result),
"datetime": datetime.now().isoformat(),
"running-time": str(execution_time),
"task-group": self._event[handlers.TASK_TR_GROUP],
"task-id": self._event[handlers.TASK_TR_ID]
})
return safe_dict(return_data)
def _handle_test_task_completion(self):
def completion_timed_out():
"""
Function is called when the handling of the request times out
:return:
"""
time_used = int(os.getenv(handlers.ENV_LAMBDA_TIMEOUT) - self._context.get_remaining_time_in_millis() / 1000)
self._logger.warning(WARN_COMPLETION_CHECK_TIMEOUT, time_used)
if self.action_properties.get(actions.ACTION_COMPLETION_SIZE, None) is not None:
self._logger.warning(WARN_ADJUST_LAMBDA_MEMORY_SETTINGS_COMPLETION, time_used, self.task)
self._timeout_event.set()
self._logger.flush()
if self._timer is not None:
self._timer.cancel()
execution_time = int(time.time()) - self.started_at
execution_time_str = str(timedelta(seconds=execution_time))
result_data = {
"task": self.task,
"action": self.action,
"id": self.action_id,
"datetime": datetime.now().isoformat(),
"running-time": execution_time
}
if self._context is not None:
execution_time_left = (self._context.get_remaining_time_in_millis() / 1000.00) - REMAINING_COMPLETION_CHECK
self._timer = threading.Timer(execution_time_left, completion_timed_out)
self._timer.start()
try:
# make one more check for completion before testing for timeout
check_result = self._action_instance.is_completed(self.start_result)
finally:
if self._timer is not None:
self._timer.cancel()
if self._timeout_event.is_set():
raise Exception("Task completion check timed out")
if check_result is not None:
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_COMPLETED,
status_data={
handlers.TASK_TR_RESULT: check_result,
handlers.TASK_TR_EXECUTION_TIME: str(execution_time)
})
self._logger.info(INF_TASK_COMPLETED, safe_json(check_result, indent=3), execution_time_str)
result_data.update({
"result": handlers.STATUS_COMPLETED,
"action-result": str(check_result)
})
elif execution_time > self.timeout:
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_TIMED_OUT,
status_data={handlers.TASK_TR_EXECUTION_TIME: str(execution_time)
})
self._logger.error(ERR_TASK_TIMEOUT, execution_time_str)
result_data.update({
"result": handlers.STATUS_TIMED_OUT
})
return result_data
else:
self._logger.info(INF_ACTION_NOT_COMPLETED, execution_time_str)
result_data.update({
"result": handlers.STATUS_WAIT_FOR_COMPLETION
})
return safe_dict(result_data)
# noinspection PyDictCreation
def handle_request(self):
"""
Handles action execute requests, creates an instance of the required action class and executes the action on the
resources passed in the event.
:return:
"""
# get class of the action, this class is needed by the _logger property
self._action_class = actions.get_action_class(self.action)
try:
self._action_arguments = {
actions.ACTION_PARAM_CONTEXT: self._context,
actions.ACTION_PARAM_EVENT: self._event,
actions.ACTION_PARAM_SESSION: self.session,
actions.ACTION_PARAM_RESOURCES: self.action_resources,
actions.ACTION_PARAM_INTERVAL: self.interval,
actions.ACTION_PARAM_DEBUG: self.debug,
actions.ACTION_PARAM_DRYRUN: self.dryrun,
actions.ACTION_PARAM_TASK_ID: self.action_id,
actions.ACTION_PARAM_TASK: self.task,
actions.ACTION_PARAM_TASK_TIMEZONE: self.task_timezone,
actions.ACTION_PARAM_STACK: self.stack_name,
actions.ACTION_PARAM_STACK_ID: self.stack_id,
actions.ACTION_PARAM_STACK_RESOURCES: self.stack_resources,
actions.ACTION_PARAM_ASSUMED_ROLE: self.assumed_role,
actions.ACTION_PARAM_STARTED_AT: self.started_at,
actions.ACTION_PARAM_TAGFILTER: self.tagfilter,
actions.ACTION_PARAM_TIMEOUT: self.timeout,
actions.ACTION_PARAM_TAG_FILTER: self.tagfilter,
actions.ACTION_PARAM_EVENTS: self.events}
# called after initialization other arguments as it is using these to construct the logger
self._action_arguments[actions.ACTION_PARAM_LOGGER] = self._logger
if self._context is not None:
self._timeout_event = threading.Event()
self._action_arguments[actions.ACTION_PARAM_TIMEOUT_EVENT] = self._timeout_event
# create the instance of the action class
self._action_instance = self._action_class(self._action_arguments, self.action_parameters)
self._logger.info(INF_START_EXEC, self._event[handlers.HANDLER_EVENT_ACTION], self.action_id)
if self._event[handlers.HANDLER_EVENT_ACTION] == handlers.HANDLER_ACTION_EXECUTE:
return self._handle_task_execution()
elif self._event[handlers.HANDLER_EVENT_ACTION] == handlers.HANDLER_ACTION_TEST_COMPLETION:
return self._handle_test_task_completion()
raise Exception(
ERR_INVALID_ACTION.format(self._event[handlers.HANDLER_EVENT_ACTION]))
except Exception as ex:
self._logger.error(ERR_EXECUTION_TASK, self._event[handlers.HANDLER_EVENT_ACTION], self.task, str(ex),
("\n" + full_stack()) if self.debug else "")
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_FAILED,
status_data={handlers.TASK_TR_ERROR: str(ex)})
finally:
self._logger.info(INF_FINISH_EXEC, self._event[handlers.HANDLER_EVENT_ACTION], self.action_id)
self._logger.flush()
| 50.50499 | 130 | 0.588152 | on_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_WAIT_FOR_COMPLETION,
status_data={
handlers.TASK_TR_LAST_WAIT_COMPLETION: datetime.now().isoformat(),
handlers.TASK_TR_STARTED_TS: int(start),
handlers.TASK_TR_START_RESULT: action_result,
handlers.TASK_TR_START_EXECUTION_TIME: str(execution_time),
handlers.TASK_TR_EXECUTION_LOGSTREAM: self.execution_log_stream
})
self._logger.info(INF_STARTED_AND_WAITING_FOR_COMPLETION, safe_json(action_result, indent=3))
if not handlers.running_local(self._context):
rule = handlers.enable_completion_cloudwatch_rule(self._context)
self._logger.info(INF_RULE_ENABLED, rule)
else:
self._logger.info(INF_SIMULATION_MODE_NO_RULE_ENABLED)
# no exception from action
return_data.update({
"result": handlers.STATUS_WAIT_FOR_COMPLETION if self.has_completion else handlers.STATUS_COMPLETED,
"action-result": str(action_result),
"datetime": datetime.now().isoformat(),
"running-time": str(execution_time),
"task-group": self._event[handlers.TASK_TR_GROUP],
"task-id": self._event[handlers.TASK_TR_ID]
})
return safe_dict(return_data)
def _handle_test_task_completion(self):
def completion_timed_out():
time_used = int(os.getenv(handlers.ENV_LAMBDA_TIMEOUT) - self._context.get_remaining_time_in_millis() / 1000)
self._logger.warning(WARN_COMPLETION_CHECK_TIMEOUT, time_used)
if self.action_properties.get(actions.ACTION_COMPLETION_SIZE, None) is not None:
self._logger.warning(WARN_ADJUST_LAMBDA_MEMORY_SETTINGS_COMPLETION, time_used, self.task)
self._timeout_event.set()
self._logger.flush()
if self._timer is not None:
self._timer.cancel()
execution_time = int(time.time()) - self.started_at
execution_time_str = str(timedelta(seconds=execution_time))
result_data = {
"task": self.task,
"action": self.action,
"id": self.action_id,
"datetime": datetime.now().isoformat(),
"running-time": execution_time
}
if self._context is not None:
execution_time_left = (self._context.get_remaining_time_in_millis() / 1000.00) - REMAINING_COMPLETION_CHECK
self._timer = threading.Timer(execution_time_left, completion_timed_out)
self._timer.start()
try:
# make one more check for completion before testing for timeout
check_result = self._action_instance.is_completed(self.start_result)
finally:
if self._timer is not None:
self._timer.cancel()
if self._timeout_event.is_set():
raise Exception("Task completion check timed out")
if check_result is not None:
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_COMPLETED,
status_data={
handlers.TASK_TR_RESULT: check_result,
handlers.TASK_TR_EXECUTION_TIME: str(execution_time)
})
self._logger.info(INF_TASK_COMPLETED, safe_json(check_result, indent=3), execution_time_str)
result_data.update({
"result": handlers.STATUS_COMPLETED,
"action-result": str(check_result)
})
elif execution_time > self.timeout:
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_TIMED_OUT,
status_data={handlers.TASK_TR_EXECUTION_TIME: str(execution_time)
})
self._logger.error(ERR_TASK_TIMEOUT, execution_time_str)
result_data.update({
"result": handlers.STATUS_TIMED_OUT
})
return result_data
else:
self._logger.info(INF_ACTION_NOT_COMPLETED, execution_time_str)
result_data.update({
"result": handlers.STATUS_WAIT_FOR_COMPLETION
})
return safe_dict(result_data)
# noinspection PyDictCreation
def handle_request(self):
# get class of the action, this class is needed by the _logger property
self._action_class = actions.get_action_class(self.action)
try:
self._action_arguments = {
actions.ACTION_PARAM_CONTEXT: self._context,
actions.ACTION_PARAM_EVENT: self._event,
actions.ACTION_PARAM_SESSION: self.session,
actions.ACTION_PARAM_RESOURCES: self.action_resources,
actions.ACTION_PARAM_INTERVAL: self.interval,
actions.ACTION_PARAM_DEBUG: self.debug,
actions.ACTION_PARAM_DRYRUN: self.dryrun,
actions.ACTION_PARAM_TASK_ID: self.action_id,
actions.ACTION_PARAM_TASK: self.task,
actions.ACTION_PARAM_TASK_TIMEZONE: self.task_timezone,
actions.ACTION_PARAM_STACK: self.stack_name,
actions.ACTION_PARAM_STACK_ID: self.stack_id,
actions.ACTION_PARAM_STACK_RESOURCES: self.stack_resources,
actions.ACTION_PARAM_ASSUMED_ROLE: self.assumed_role,
actions.ACTION_PARAM_STARTED_AT: self.started_at,
actions.ACTION_PARAM_TAGFILTER: self.tagfilter,
actions.ACTION_PARAM_TIMEOUT: self.timeout,
actions.ACTION_PARAM_TAG_FILTER: self.tagfilter,
actions.ACTION_PARAM_EVENTS: self.events}
# called after initialization other arguments as it is using these to construct the logger
self._action_arguments[actions.ACTION_PARAM_LOGGER] = self._logger
if self._context is not None:
self._timeout_event = threading.Event()
self._action_arguments[actions.ACTION_PARAM_TIMEOUT_EVENT] = self._timeout_event
# create the instance of the action class
self._action_instance = self._action_class(self._action_arguments, self.action_parameters)
self._logger.info(INF_START_EXEC, self._event[handlers.HANDLER_EVENT_ACTION], self.action_id)
if self._event[handlers.HANDLER_EVENT_ACTION] == handlers.HANDLER_ACTION_EXECUTE:
return self._handle_task_execution()
elif self._event[handlers.HANDLER_EVENT_ACTION] == handlers.HANDLER_ACTION_TEST_COMPLETION:
return self._handle_test_task_completion()
raise Exception(
ERR_INVALID_ACTION.format(self._event[handlers.HANDLER_EVENT_ACTION]))
except Exception as ex:
self._logger.error(ERR_EXECUTION_TASK, self._event[handlers.HANDLER_EVENT_ACTION], self.task, str(ex),
("\n" + full_stack()) if self.debug else "")
self._action_tracking.update_task(action_id=self.action_id,
task=self.task,
task_metrics=self.metrics,
status=handlers.STATUS_FAILED,
status_data={handlers.TASK_TR_ERROR: str(ex)})
finally:
self._logger.info(INF_FINISH_EXEC, self._event[handlers.HANDLER_EVENT_ACTION], self.action_id)
self._logger.flush()
| true | true |
1c2e01b39bd7c410b1525225e5bc812b3db81274 | 64,988 | py | Python | ansible_runner/interface.py | AlanCoding/ansible-runner | 4c6b7d0c15c62159f971522a23e4491487703472 | [
"Apache-2.0"
] | 1 | 2022-02-19T05:07:09.000Z | 2022-02-19T05:07:09.000Z | ansible_runner/interface.py | aknochow/ansible-runner | 996a00dd0cd449e129a693e53b73770a6de34e36 | [
"Apache-2.0"
] | null | null | null | ansible_runner/interface.py | aknochow/ansible-runner | 996a00dd0cd449e129a693e53b73770a6de34e36 | [
"Apache-2.0"
] | 1 | 2021-11-22T16:03:11.000Z | 2021-11-22T16:03:11.000Z | # Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import json
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.config.runner import RunnerConfig
from ansible_runner.config.command import CommandConfig
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config.ansible_cfg import AnsibleCfgConfig
from ansible_runner.config.doc import DocConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
sanitize_json_response,
signal_handler,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
# If running via the transmit-worker-process method, we must only extract things as read-only
# inside of one of these commands. That could be either transmit or worker.
if kwargs.get('streamer') not in ('worker', 'process'):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
# undo any full paths that were dumped by dump_artifacts above in the streamer case
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
if cancel_callback is None:
# attempt to load signal handler.
# will return None if we are not in the main thread
cancel_callback = signal_handler()
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str or list playbook: The playbook (either a list or dictionary of plays, or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param str module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param str module_args: The module arguments that will be supplied to ad-hoc mode.
:param str host_pattern: The host pattern to match when running in ad-hoc mode.
:param str or dict or list inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of:
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param str role: Name of the role to execute.
:param dict or list roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param str cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param str limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param int forks: Control Ansible parallel concurrency
:param int verbosity: Control how verbose the output of ansible-playbook is
:param bool quiet: Disable all output
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param str streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param io.FileIO _input: An optional file or file-like object for use as input in a streaming pipeline
:param io.FileIO _output: An optional file or file-like object for use as output in a streaming pipeline
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param str or list process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param str or list process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param str or list process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param bool resource_profiling: Enable collection of resource utilization data during playbook execution.
:param str resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure
resource utilization (default: ansible-runner)
:param float resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param float resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param float resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param str resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param str directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param bool omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param bool only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing ``rc`` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_command_config(executable_cmd, cmdline_args=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run_command() and run_command_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run_command`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rc = CommandConfig(**kwargs)
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run_command(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the foreground and return a Runner object when complete.
:param str executable_cmd: The command to be executed.
:param list cmdline_args: A list of arguments to be passed to the executable command.
:param int input_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
input file descrption to interact with the sub-process running the command.
:param int output_fd: The output file descriptor to stream the output of command execution.
:param int error_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
error file descrption to read the error received while executing the command.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. If the value of ``input_fd`` parameter
is set or the executable command is one of ``ansible-config``, ``ansible-doc`` or ``ansible-galaxy``
the default value is set to ``subprocess`` else in other cases it is set to ``pexpect``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:returns: Returns a tuple of response, error string and return code.
In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr.
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error, r.rc
def run_command_async(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run_command`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_plugin_docs_config(plugin_names, plugin_type=None, response_format=None,
snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both get_plugin_docs() and get_plugin_docs_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.get_plugin_docs`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path)
return Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback, finished_callback=finished_callback)
def get_plugin_docs(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get plugin docs in the foreground and return a Runner object when complete.
:param plugin_names: The name of the plugins to get docs.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param snippet: Show playbook snippet for specified plugin(s).
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type plugin_names: list
:type plugin_type: str
:type response_format: str
:type snippet: bool
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the value of ``response_format`` is ``json``
it returns a python dictionary object.
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_plugin_docs_async(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.get_plugin_docs`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
doc_runner_thread = threading.Thread(target=r.run)
doc_runner_thread.start()
return doc_runner_thread, r
def get_plugin_list(list_files=None, response_format=None, plugin_type=None, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get list of installed Ansible plugins.
:param list_files: The boolean parameter is set to ``True`` returns file path of the plugin along with the plugin name.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type list_files: bool
:type plugin_type: str
:type response_format: str
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the value of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_list_command(list_files=list_files, response_format=response_format, plugin_type=plugin_type,
playbook_dir=playbook_dir, module_path=module_path)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_inventory(action, inventories, response_format=None, host=None, playbook_dir=None,
vault_ids=None, vault_password_file=None, output_file=None, export=None, **kwargs):
'''
Run an ansible-inventory command to get inventory related details.
:param action: Valid values are one of ``graph``, ``host``, ``list``
``graph`` create inventory graph, ``host`` returns specific host info and works as inventory script and
``list`` output all hosts info and also works as inventory script.
:param inventories: List of inventory host path.
:param response_format: The output format for response. Valid values can be one of ``json``, ``yaml``, ``toml``.
Default is ``json``. If ``action`` is ``graph`` only allowed value is ``json``.
:param host: When ``action`` is set to ``host`` this parameter is used to get the host specific information.
:param playbook_dir: This parameter is used to sets the relative path for the inventory.
:param vault_ids: The vault identity to use.
:param vault_password_file: The vault password files to use.
:param output_file: The file path in which inventory details should be sent to.
:param export: The boolean value if set represent in a way that is optimized for export,not as an accurate
representation of how Ansible has processed it.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type action: str
:type inventories: list
:type response_format: str
:type host: str
:type playbook_dir: str
:type vault_ids: str
:type vault_password_file: str
:type output_file: str
:type export: bool
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr. If the vaue of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = InventoryConfig(**kwargs)
rd.prepare_inventory_command(action=action, inventories=inventories, response_format=response_format, host=host, playbook_dir=playbook_dir,
vault_ids=vault_ids, vault_password_file=vault_password_file, output_file=output_file, export=export)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_ansible_config(action, config_file=None, only_changed=None, **kwargs):
'''
Run an ansible-config command to get ansible configuration releated details.
:param action: Valid values are one of ``list``, ``dump``, ``view``
``list`` returns all config options, ``dump`` returns the active configuration and
``view`` returns the view of configuration file.
:param config_file: Path to configuration file, defaults to first file found in precedence. .
:param only_changed: The boolean value when set to ``True`` returns only the configurations that have changed
from the default. This parameter is applicable only when ``action`` is set to ``dump``.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible.
Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type action: str
:type config_file: str
:type only_changed: bool
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = AnsibleCfgConfig(**kwargs)
rd.prepare_ansible_config_command(action=action, config_file=config_file, only_changed=only_changed)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to set the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. If the timeout is triggered, it will force cancel the execution.
:param bool process_isolation: Enable process isolation using a container engine, such as podman.
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an Ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form ``host_dir:/container_dir:labels``. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes
(for example: started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception. If set to 'False', log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to set the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. If the timeout is triggered, it will force cancel the execution.
:param bool process_isolation: Enable process isolation using a container engine, such as podman.
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an Ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form ``host_dir:/container_dir:labels``. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes
(for example: started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception. If set to 'False', log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
| 66.928939 | 159 | 0.719471 |
import os
import json
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.config.runner import RunnerConfig
from ansible_runner.config.command import CommandConfig
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config.ansible_cfg import AnsibleCfgConfig
from ansible_runner.config.doc import DocConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
sanitize_json_response,
signal_handler,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
if kwargs.get('streamer') not in ('worker', 'process'):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
if cancel_callback is None:
cancel_callback = signal_handler()
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_command_config(executable_cmd, cmdline_args=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rc = CommandConfig(**kwargs)
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run_command(executable_cmd, cmdline_args=None, **kwargs):
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error, r.rc
def run_command_async(executable_cmd, cmdline_args=None, **kwargs):
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_plugin_docs_config(plugin_names, plugin_type=None, response_format=None,
snippet=False, playbook_dir=None, module_path=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path)
return Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback, finished_callback=finished_callback)
def get_plugin_docs(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_plugin_docs_async(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
doc_runner_thread = threading.Thread(target=r.run)
doc_runner_thread.start()
return doc_runner_thread, r
def get_plugin_list(list_files=None, response_format=None, plugin_type=None, playbook_dir=None, module_path=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_list_command(list_files=list_files, response_format=response_format, plugin_type=plugin_type,
playbook_dir=playbook_dir, module_path=module_path)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_inventory(action, inventories, response_format=None, host=None, playbook_dir=None,
vault_ids=None, vault_password_file=None, output_file=None, export=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = InventoryConfig(**kwargs)
rd.prepare_inventory_command(action=action, inventories=inventories, response_format=response_format, host=host, playbook_dir=playbook_dir,
vault_ids=vault_ids, vault_password_file=vault_password_file, output_file=output_file, export=export)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_ansible_config(action, config_file=None, only_changed=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = AnsibleCfgConfig(**kwargs)
rd.prepare_ansible_config_command(action=action, config_file=config_file, only_changed=only_changed)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error
def get_role_list(collection=None, playbook_dir=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
| true | true |
1c2e0224eddb941725f123b7e5a73c2869f807cd | 1,911 | py | Python | var/spack/repos/builtin/packages/libnotify/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/libnotify/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/libnotify/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Libnotify(MesonPackage):
"""libnotify is a library for sending desktop notifications"""
homepage = "https://github.com/GNOME/libnotify"
url = "https://github.com/GNOME/libnotify/archive/0.7.9.tar.gz"
version('0.7.9', sha256='9bd4f5fa911d27567e7cc2d2d09d69356c16703c4e8d22c0b49a5c45651f3af0')
# Libnotify is having trouble with finding the DTD and XSLT for docbook,
# which is required for both of these varients.
# variant('docbook', default=False,
# description='Build docbook docs. Currently broken')
# variant('gtkdoc', default=False,
# description='Build with gtkdoc. Currently broken')
depends_on('pkgconfig', type='build')
depends_on('glib@2.26.0:')
depends_on('gtkplus@2.90:')
depends_on('gobject-introspection')
depends_on('libxslt', type='build')
depends_on('docbook-xsl', type='build')
# depends_on('gtk-doc', when='+gtkdoc', type='build')
# depends_on('xmlto', when='+docbook', type='build')
patch('docbook-location.patch')
def meson_args(self):
# spec = self.spec
args = []
# if '+docbook' in spec:
# args.append('-Ddocbook_docs=enabled')
# else:
# args.append('-Ddocbook_docs=disabled')
args.append('-Ddocbook_docs=disabled')
# if self.run_tests:
# args.append('-Dtests=true')
# else:
# args.append('-Dtests=false')
args.append('-Dtests=false')
# if '+gtkdoc' in spec:
# args.append('-Dgtk_doc=true')
# else:
# args.append('-Dgtk_doc=false')
args.append('-Dgtk_doc=false')
return args
| 32.948276 | 95 | 0.632653 |
from spack.package import *
class Libnotify(MesonPackage):
homepage = "https://github.com/GNOME/libnotify"
url = "https://github.com/GNOME/libnotify/archive/0.7.9.tar.gz"
version('0.7.9', sha256='9bd4f5fa911d27567e7cc2d2d09d69356c16703c4e8d22c0b49a5c45651f3af0')
depends_on('pkgconfig', type='build')
depends_on('glib@2.26.0:')
depends_on('gtkplus@2.90:')
depends_on('gobject-introspection')
depends_on('libxslt', type='build')
depends_on('docbook-xsl', type='build')
patch('docbook-location.patch')
def meson_args(self):
args = []
args.append('-Ddocbook_docs=disabled')
args.append('-Dtests=false')
args.append('-Dgtk_doc=false')
return args
| true | true |
1c2e0332d2980ce4a16bec4961c875df673783a2 | 1,624 | py | Python | HackerRank/Interview Preparation Kit/Dictionaries and Hashmaps/Hash Tables: Ransom Note/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | HackerRank/Interview Preparation Kit/Dictionaries and Hashmaps/Hash Tables: Ransom Note/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | HackerRank/Interview Preparation Kit/Dictionaries and Hashmaps/Hash Tables: Ransom Note/solution.py | ltdangkhoa/Computer-Science-Fundamental | b70ba714e1dd13fcb377125e047c5fc08d3a82b3 | [
"MIT"
] | null | null | null | """solution.py"""
import math
import os
import random
import re
import sys
import timeit
# from collections import Counter
def checkMagazine(magazine, note):
"""
O(nk)
To be improved: build a hash function to store keywords as numeric
Much more simpler with built-in Python: using collections Counter
"""
# print(Counter(magazine))
# print(Counter(note))
# c_note_magazie = len(Counter(note) - Counter(magazine))
# print('Yes' if c_note_magazie == 0 else 'No')
dict_magazine = {}
for word in magazine:
if not word in dict_magazine:
dict_magazine[word] = 1
else:
dict_magazine[word] += 1
can_replicate = True
for word in note:
if dict_magazine.get(word, 0) > 1:
dict_magazine[word] -= 1
elif dict_magazine.get(word, 0) == 1:
dict_magazine.pop(word)
else:
can_replicate = False
break
print('Yes' if can_replicate else 'No')
def run_time_it():
"""Trigger timeit"""
checkMagazine(magazine, note)
if __name__ == '__main__':
INPUT_PATH = 'input/'
for filename in os.listdir(INPUT_PATH):
print('📂 %s' % (filename))
f = open(INPUT_PATH + filename, 'r')
inputs = f.readlines()
input_line = 0
mn = inputs[input_line].split()
input_line += 1
m = int(mn[0])
n = int(mn[1])
magazine = inputs[input_line].rstrip().split()
input_line += 1
note = inputs[input_line].rstrip().split()
print("⏰ %.12f seconds ⏰" % timeit.timeit(run_time_it, number=1))
| 25.375 | 73 | 0.595443 |
import math
import os
import random
import re
import sys
import timeit
def checkMagazine(magazine, note):
dict_magazine = {}
for word in magazine:
if not word in dict_magazine:
dict_magazine[word] = 1
else:
dict_magazine[word] += 1
can_replicate = True
for word in note:
if dict_magazine.get(word, 0) > 1:
dict_magazine[word] -= 1
elif dict_magazine.get(word, 0) == 1:
dict_magazine.pop(word)
else:
can_replicate = False
break
print('Yes' if can_replicate else 'No')
def run_time_it():
checkMagazine(magazine, note)
if __name__ == '__main__':
INPUT_PATH = 'input/'
for filename in os.listdir(INPUT_PATH):
print('📂 %s' % (filename))
f = open(INPUT_PATH + filename, 'r')
inputs = f.readlines()
input_line = 0
mn = inputs[input_line].split()
input_line += 1
m = int(mn[0])
n = int(mn[1])
magazine = inputs[input_line].rstrip().split()
input_line += 1
note = inputs[input_line].rstrip().split()
print("⏰ %.12f seconds ⏰" % timeit.timeit(run_time_it, number=1))
| true | true |
1c2e054ce3389cb1f00d39e8b788777118c75d52 | 793 | py | Python | test_data/files/paramatrized_test.py | aleksul/pytest-motor | 20dd246fce777f0e21d1d03244e494e818a3dd52 | [
"MIT"
] | 4 | 2021-07-10T15:21:01.000Z | 2021-07-17T12:11:06.000Z | test_data/files/paramatrized_test.py | aleksul/pytest-motor | 20dd246fce777f0e21d1d03244e494e818a3dd52 | [
"MIT"
] | 47 | 2021-07-12T13:59:19.000Z | 2022-01-31T20:49:03.000Z | test_data/files/paramatrized_test.py | aleksul/pytest-motor | 20dd246fce777f0e21d1d03244e494e818a3dd52 | [
"MIT"
] | 4 | 2021-07-13T19:38:47.000Z | 2021-07-17T13:14:46.000Z | """A test file with a paramatrized test."""
from typing import Any, Dict
import pytest
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorCollection, AsyncIOMotorDatabase
@pytest.mark.asyncio
# yapf: disable
@pytest.mark.parametrize('document', [
({}),
({'foo': 'bar'}),
({'wibble': 'wobble'}),
])
# yapf: enable
async def test_with_parametrization(motor_client: AsyncIOMotorClient, document: Dict[str,
Any]) -> None:
"""This test is parametrized."""
database: AsyncIOMotorDatabase = motor_client['database']
collection: AsyncIOMotorCollection = database['collection']
await collection.insert_one(document)
assert (await collection.count_documents({})) == 1
| 31.72 | 99 | 0.64691 | from typing import Any, Dict
import pytest
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorCollection, AsyncIOMotorDatabase
@pytest.mark.asyncio
@pytest.mark.parametrize('document', [
({}),
({'foo': 'bar'}),
({'wibble': 'wobble'}),
])
async def test_with_parametrization(motor_client: AsyncIOMotorClient, document: Dict[str,
Any]) -> None:
database: AsyncIOMotorDatabase = motor_client['database']
collection: AsyncIOMotorCollection = database['collection']
await collection.insert_one(document)
assert (await collection.count_documents({})) == 1
| true | true |
1c2e0552fb92d2d6b2fe226d4ae42a3dcd9204c9 | 2,563 | py | Python | Chap 3/ElifWorkSheet.py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | Chap 3/ElifWorkSheet.py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | Chap 3/ElifWorkSheet.py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | #David Hickox
# Question 1
car = input("What kind of car do you drive?")
if car.lower() == "toyota" or car.lower() == "honda":
msg = "you drive a fuel efficient car!"
else:
msg = "you ruin the enviroment"
print(msg)
# Question 2
name = input("What is your name?")
savings = float(input("What is your savings account balance?"))
checking = float(input("What is your checking account balance?"))
if savings >= 1500 or checking >= 3000:
msg = name.title()+" is on the list"
else:
msg = "this customer is not on the list"
print (msg)
# Question 3
name = input("What is your name?")
idnum = float(input("What is your id number?"))
gender = input("What is your gender?")
if (gender.lower() == "m" or gender.lower() == "male") and (idnum >= 4389 and idnum <= 5588):
msg = name.title()+" is an employee on the exterminate list"
else:
msg = "this employee is not on the list"
print(msg)
# Question 4
name = input("What is your name?")
age = float(input("What is your age?"))
gender = input("What is your gender?")
if (gender.lower() == "f" or gender.lower() == "female") and age > 22:
msg = name.title()+" is the droid you are looking for"
else:
msg = "this person does not qualify for the search criteria"
print (msg)
# Question 5
amount = float(input("How much did you spend? "))
if amount >= 50:
msg = "You get a 20% discount for a final price of "+str(amount-amount*.2)
elif amount >= 25:
msg = "You get a 10% discount for a final price of "+str(amount-amount*.1)
elif amount > 0:
msg = "You get a 5% discount for a final price of "+str(amount-amount*.05)
else:
msg = "You broke something"
print (msg)
#6
print("""
For Los Angeles Press 1
For Chicago Press 2
For Louisville Press 3
For New Orleans Press 4
For St. Louis Press 5
""")
city = input("What city would you like to visit the 6 Flags in?\nPlease enter a number or type the name as seen above ")
if city.lower() == "1" or city.lower() == "los angeles":
msg = "The ticket price for Los Angeles is $60"
elif city.lower() == "2" or city.lower() == "chicago":
msg = "The ticket price for Chicago is $70"
elif city.lower() == "3" or city.lower() == "louisville":
msg = "The ticket price for Louisville is $45"
elif city.lower() == "4" or city.lower() == "new orleans":
msg = "The ticket price for New Orleans is $50"
elif city.lower() == "5" or city.lower() == "st. louis":
msg = "The ticket price for St. Louis is $65"
else:
msg = "that was not an option please try again"
print(msg)
input("Press enter to exit")
| 29.45977 | 120 | 0.65119 |
car = input("What kind of car do you drive?")
if car.lower() == "toyota" or car.lower() == "honda":
msg = "you drive a fuel efficient car!"
else:
msg = "you ruin the enviroment"
print(msg)
name = input("What is your name?")
savings = float(input("What is your savings account balance?"))
checking = float(input("What is your checking account balance?"))
if savings >= 1500 or checking >= 3000:
msg = name.title()+" is on the list"
else:
msg = "this customer is not on the list"
print (msg)
name = input("What is your name?")
idnum = float(input("What is your id number?"))
gender = input("What is your gender?")
if (gender.lower() == "m" or gender.lower() == "male") and (idnum >= 4389 and idnum <= 5588):
msg = name.title()+" is an employee on the exterminate list"
else:
msg = "this employee is not on the list"
print(msg)
name = input("What is your name?")
age = float(input("What is your age?"))
gender = input("What is your gender?")
if (gender.lower() == "f" or gender.lower() == "female") and age > 22:
msg = name.title()+" is the droid you are looking for"
else:
msg = "this person does not qualify for the search criteria"
print (msg)
amount = float(input("How much did you spend? "))
if amount >= 50:
msg = "You get a 20% discount for a final price of "+str(amount-amount*.2)
elif amount >= 25:
msg = "You get a 10% discount for a final price of "+str(amount-amount*.1)
elif amount > 0:
msg = "You get a 5% discount for a final price of "+str(amount-amount*.05)
else:
msg = "You broke something"
print (msg)
print("""
For Los Angeles Press 1
For Chicago Press 2
For Louisville Press 3
For New Orleans Press 4
For St. Louis Press 5
""")
city = input("What city would you like to visit the 6 Flags in?\nPlease enter a number or type the name as seen above ")
if city.lower() == "1" or city.lower() == "los angeles":
msg = "The ticket price for Los Angeles is $60"
elif city.lower() == "2" or city.lower() == "chicago":
msg = "The ticket price for Chicago is $70"
elif city.lower() == "3" or city.lower() == "louisville":
msg = "The ticket price for Louisville is $45"
elif city.lower() == "4" or city.lower() == "new orleans":
msg = "The ticket price for New Orleans is $50"
elif city.lower() == "5" or city.lower() == "st. louis":
msg = "The ticket price for St. Louis is $65"
else:
msg = "that was not an option please try again"
print(msg)
input("Press enter to exit")
| true | true |
1c2e059a45fc0fd56329ecec30a7e9b124c7c602 | 4,735 | py | Python | awward/settings.py | ruthjomo/Awwardsapp | 8e2a517e569f788f803219a143f2ae9e5dedab13 | [
"Unlicense",
"MIT"
] | null | null | null | awward/settings.py | ruthjomo/Awwardsapp | 8e2a517e569f788f803219a143f2ae9e5dedab13 | [
"Unlicense",
"MIT"
] | null | null | null | awward/settings.py | ruthjomo/Awwardsapp | 8e2a517e569f788f803219a143f2ae9e5dedab13 | [
"Unlicense",
"MIT"
] | null | null | null | """
Django settings for awward project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from pathlib import Path
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
# Email configurations
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: BASE_DIR / 'subdir'.
# BASE_DIR = Path(__file__).resolve().parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v4oo2o1a-f-0=zbiz7voj6)2&stf4+jx@^m8(pfv5p#l%=4j5z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'awwardapp',
'bootstrap3',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'awward.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'awward.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'awwards',
'USER': 'moringa',
'PASSWORD':'Access',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = '/'
# Configure Django App for Heroku.
django_heroku.settings(locals()) | 25.456989 | 91 | 0.690602 |
from pathlib import Path
import os
import django_heroku
import dj_database_url
from decouple import config,Csv
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'v4oo2o1a-f-0=zbiz7voj6)2&stf4+jx@^m8(pfv5p#l%=4j5z'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'awwardapp',
'bootstrap3',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'awward.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'awward.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'awwards',
'USER': 'moringa',
'PASSWORD':'Access',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = '/'
# Configure Django App for Heroku.
django_heroku.settings(locals()) | true | true |
1c2e063141aeb825b131ec6f67df244c6f55158a | 878 | py | Python | openpyxl/chart/tests/test_updown_bars.py | nickpell/openpyxl | 160c730c419f3796d2208b05c3b26a2b2fc10eb1 | [
"MIT"
] | 6 | 2018-05-15T05:08:52.000Z | 2021-12-23T12:31:28.000Z | openpyxl/chart/tests/test_updown_bars.py | nickpell/openpyxl | 160c730c419f3796d2208b05c3b26a2b2fc10eb1 | [
"MIT"
] | 1 | 2019-08-27T15:27:48.000Z | 2019-08-27T15:27:48.000Z | openpyxl/chart/tests/test_updown_bars.py | nickpell/openpyxl | 160c730c419f3796d2208b05c3b26a2b2fc10eb1 | [
"MIT"
] | 6 | 2020-03-23T15:59:14.000Z | 2021-09-18T09:54:57.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def UpDownBars():
from ..updown_bars import UpDownBars
return UpDownBars
class TestUpDownBars:
def test_ctor(self, UpDownBars):
bars = UpDownBars(gapWidth=150)
xml = tostring(bars.to_tree())
expected = """
<upbars>
<gapWidth val="150"/>
</upbars>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, UpDownBars):
src = """
<upDownBars>
<gapWidth val="156"/>
</upDownBars>
"""
node = fromstring(src)
bars = UpDownBars.from_tree(node)
assert bars == UpDownBars(gapWidth=156)
| 23.105263 | 55 | 0.624146 | from __future__ import absolute_import
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def UpDownBars():
from ..updown_bars import UpDownBars
return UpDownBars
class TestUpDownBars:
def test_ctor(self, UpDownBars):
bars = UpDownBars(gapWidth=150)
xml = tostring(bars.to_tree())
expected = """
<upbars>
<gapWidth val="150"/>
</upbars>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, UpDownBars):
src = """
<upDownBars>
<gapWidth val="156"/>
</upDownBars>
"""
node = fromstring(src)
bars = UpDownBars.from_tree(node)
assert bars == UpDownBars(gapWidth=156)
| true | true |
1c2e06e968e9443337111c075182e95447ba19e6 | 937 | py | Python | BB/bbObjects/items/modules/bbJumpDriveModule.py | mwaitzman/GOF2BountyBot | b66026228b752b07ac4734ca74b60730dbd74995 | [
"MIT"
] | null | null | null | BB/bbObjects/items/modules/bbJumpDriveModule.py | mwaitzman/GOF2BountyBot | b66026228b752b07ac4734ca74b60730dbd74995 | [
"MIT"
] | null | null | null | BB/bbObjects/items/modules/bbJumpDriveModule.py | mwaitzman/GOF2BountyBot | b66026228b752b07ac4734ca74b60730dbd74995 | [
"MIT"
] | null | null | null | from . import bbModule
from ....bbConfig import bbData
class bbJumpDriveModule(bbModule.bbModule):
def __init__(self, name, aliases, value=0, wiki="", manufacturer="", icon="", emoji=""):
super(bbJumpDriveModule, self).__init__(name, aliases, value=value, wiki=wiki, manufacturer=manufacturer, icon=icon, emoji=emoji)
def getType(self):
return bbJumpDriveModule
def fromDict(moduleDict):
return bbJumpDriveModule(moduleDict["name"], moduleDict["aliases"] if "aliases" in moduleDict else [],
value=moduleDict["value"] if "value" in moduleDict else 0, wiki=moduleDict["wiki"] if "wiki" in moduleDict else "",
manufacturer=moduleDict["manufacturer"] if "manufacturer" in moduleDict else "", icon=moduleDict["icon"] if "icon" in moduleDict else bbData.rocketIcon,
emoji=moduleDict["emoji"] if "emoji" in moduleDict else "")
| 52.055556 | 180 | 0.66809 | from . import bbModule
from ....bbConfig import bbData
class bbJumpDriveModule(bbModule.bbModule):
def __init__(self, name, aliases, value=0, wiki="", manufacturer="", icon="", emoji=""):
super(bbJumpDriveModule, self).__init__(name, aliases, value=value, wiki=wiki, manufacturer=manufacturer, icon=icon, emoji=emoji)
def getType(self):
return bbJumpDriveModule
def fromDict(moduleDict):
return bbJumpDriveModule(moduleDict["name"], moduleDict["aliases"] if "aliases" in moduleDict else [],
value=moduleDict["value"] if "value" in moduleDict else 0, wiki=moduleDict["wiki"] if "wiki" in moduleDict else "",
manufacturer=moduleDict["manufacturer"] if "manufacturer" in moduleDict else "", icon=moduleDict["icon"] if "icon" in moduleDict else bbData.rocketIcon,
emoji=moduleDict["emoji"] if "emoji" in moduleDict else "")
| true | true |
1c2e06eb67387083caca4750c1afe4c37eb06687 | 3,053 | py | Python | python/classes-dealing-with-complex-numbers.py | blog-a1/hackeRRank | 72923ee08c8759bd5a10ba6c390b6755fe2bd2e2 | [
"MIT"
] | 1 | 2021-01-13T11:52:27.000Z | 2021-01-13T11:52:27.000Z | python/classes-dealing-with-complex-numbers.py | blog-a1/hackeRRank | 72923ee08c8759bd5a10ba6c390b6755fe2bd2e2 | [
"MIT"
] | null | null | null | python/classes-dealing-with-complex-numbers.py | blog-a1/hackeRRank | 72923ee08c8759bd5a10ba6c390b6755fe2bd2e2 | [
"MIT"
] | null | null | null |
from math import pow
class Complex(object):
def __init__(self, real, imaginary):
self.real=real
self.imaginary=imaginary
def __add__(self, no):
a=self.real+no.real
b=self.imaginary+no.imaginary
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __sub__(self, no):
a=self.real-no.real
b=self.imaginary-no.imaginary
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __mul__(self, no):
a=self.real*no.real-self.imaginary*no.imaginary
b=no.imaginary*self.real+self.imaginary*no.real
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __truediv__(self, no):
x=no.real**2+no.imaginary**2
a=(self.real*no.real+self.imaginary*no.imaginary)/x
b=(-no.imaginary*self.real+self.imaginary*no.real)/x
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def mod(self):
a=pow(self.real**2+self.imaginary**2, 0.5)
b=0
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| 34.303371 | 70 | 0.457255 |
from math import pow
class Complex(object):
def __init__(self, real, imaginary):
self.real=real
self.imaginary=imaginary
def __add__(self, no):
a=self.real+no.real
b=self.imaginary+no.imaginary
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __sub__(self, no):
a=self.real-no.real
b=self.imaginary-no.imaginary
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __mul__(self, no):
a=self.real*no.real-self.imaginary*no.imaginary
b=no.imaginary*self.real+self.imaginary*no.real
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __truediv__(self, no):
x=no.real**2+no.imaginary**2
a=(self.real*no.real+self.imaginary*no.imaginary)/x
b=(-no.imaginary*self.real+self.imaginary*no.real)/x
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def mod(self):
a=pow(self.real**2+self.imaginary**2, 0.5)
b=0
if a<0 and b<0:
return ("-%.2f-%.2fi"%(abs(a),abs(b)))
elif a>=0 and b<0:
return ("%.2f-%.2fi"%(abs(a),abs(b)))
elif a<0and b>=0:
return ("-%.2f+%.2fi"%(abs(a),abs(b)))
elif a>=0and b>=0:
return ("%.2f+%.2fi"%(abs(a),abs(b)))
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| true | true |
1c2e089afd686ced49da2f85d95f318c20c156ee | 2,283 | py | Python | setup.py | powellc/hacklabs | c39f05cb9ea37e98260369c09a618e7870c61f3d | [
"BSD-3-Clause"
] | null | null | null | setup.py | powellc/hacklabs | c39f05cb9ea37e98260369c09a618e7870c61f3d | [
"BSD-3-Clause"
] | 9 | 2018-02-23T13:32:33.000Z | 2018-02-23T13:32:34.000Z | setup.py | powellc/hacklabs | c39f05cb9ea37e98260369c09a618e7870c61f3d | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
version = __import__('hacklabs').__version__
install_requires = [
'setuptools',
'Django==1.6.5',
'django-configurations==0.8',
'dj-database-url==0.3.0',
'pylibmc==1.3.0',
'boto==2.9.5',
'South==1.0.0',
'django-storages==1.1.8',
'Pillow==2.5.1',
'django-cache-url==0.8.0',
'werkzeug==0.9.4',
'gunicorn==0.17.4',
'easy-thumbnails==1.2',
'django-debug-toolbar==1.1',
'django-extensions==1.3.4',
'django-braces==1.4.0',
'django-allauth==0.16.1',
'django-floppyforms==1.1.1',
'django-custom-user==0.4',
'raven==5.0.0',
'boto==2.9.5',
'django-storages==1.1.8',
'psycopg2==2.5',
'Markdown>2.2.0',
'django-sekizai>=0.7',
'django-mptt==0.6.0',
'django-bootstrap-form==3.1',
]
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name="hacklabs",
version=version,
url='http://github.com/powellc/hacklabs',
license='BSD',
platforms=['OS Independent'],
description="An hacklabs for django applications.",
author="Colin Powell",
author_email='colin.powell@gmail.com',
packages=find_packages(),
install_requires=install_requires,
include_package_data=True,
zip_safe=False,
tests_require=['tox'],
cmdclass={'test': Tox},
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
package_dir={
'hacklabs': 'hacklabs',
'hacklabs/templates': 'hacklabs/templates',
},
entry_points={
'console_scripts': [
'hacklabs = hacklabs.manage_hacklabs:main',
],
},
)
| 26.858824 | 59 | 0.599212 | from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
version = __import__('hacklabs').__version__
install_requires = [
'setuptools',
'Django==1.6.5',
'django-configurations==0.8',
'dj-database-url==0.3.0',
'pylibmc==1.3.0',
'boto==2.9.5',
'South==1.0.0',
'django-storages==1.1.8',
'Pillow==2.5.1',
'django-cache-url==0.8.0',
'werkzeug==0.9.4',
'gunicorn==0.17.4',
'easy-thumbnails==1.2',
'django-debug-toolbar==1.1',
'django-extensions==1.3.4',
'django-braces==1.4.0',
'django-allauth==0.16.1',
'django-floppyforms==1.1.1',
'django-custom-user==0.4',
'raven==5.0.0',
'boto==2.9.5',
'django-storages==1.1.8',
'psycopg2==2.5',
'Markdown>2.2.0',
'django-sekizai>=0.7',
'django-mptt==0.6.0',
'django-bootstrap-form==3.1',
]
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name="hacklabs",
version=version,
url='http://github.com/powellc/hacklabs',
license='BSD',
platforms=['OS Independent'],
description="An hacklabs for django applications.",
author="Colin Powell",
author_email='colin.powell@gmail.com',
packages=find_packages(),
install_requires=install_requires,
include_package_data=True,
zip_safe=False,
tests_require=['tox'],
cmdclass={'test': Tox},
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
package_dir={
'hacklabs': 'hacklabs',
'hacklabs/templates': 'hacklabs/templates',
},
entry_points={
'console_scripts': [
'hacklabs = hacklabs.manage_hacklabs:main',
],
},
)
| true | true |
1c2e09fc76e4f17fe7097352772c5c54fd08d6fd | 5,351 | py | Python | deepspeech/frontend/augmentor/spec_augment.py | iclementine/DeepSpeech | d0635c6592a2e787ca296e15241e7371a83ca55f | [
"Apache-2.0"
] | 1 | 2021-05-14T23:27:13.000Z | 2021-05-14T23:27:13.000Z | deepspeech/frontend/augmentor/spec_augment.py | xihuanafeng/DeepSpeech | 2bdf4c946af66cc173d638c072ba6435cd18a286 | [
"Apache-2.0"
] | null | null | null | deepspeech/frontend/augmentor/spec_augment.py | xihuanafeng/DeepSpeech | 2bdf4c946af66cc173d638c072ba6435cd18a286 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the volume perturb augmentation model."""
import numpy as np
from deepspeech.frontend.augmentor.base import AugmentorBase
from deepspeech.utils.log import Log
logger = Log(__name__).getlog()
class SpecAugmentor(AugmentorBase):
"""Augmentation model for Time warping, Frequency masking, Time masking.
SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition
https://arxiv.org/abs/1904.08779
SpecAugment on Large Scale Datasets
https://arxiv.org/abs/1912.05533
"""
def __init__(self,
rng,
F,
T,
n_freq_masks,
n_time_masks,
p=1.0,
W=40,
adaptive_number_ratio=0,
adaptive_size_ratio=0,
max_n_time_masks=20):
"""SpecAugment class.
Args:
rng (random.Random): random generator object.
F (int): parameter for frequency masking
T (int): parameter for time masking
n_freq_masks (int): number of frequency masks
n_time_masks (int): number of time masks
p (float): parameter for upperbound of the time mask
W (int): parameter for time warping
adaptive_number_ratio (float): adaptive multiplicity ratio for time masking
adaptive_size_ratio (float): adaptive size ratio for time masking
max_n_time_masks (int): maximum number of time masking
"""
super().__init__()
self._rng = rng
self.W = W
self.F = F
self.T = T
self.n_freq_masks = n_freq_masks
self.n_time_masks = n_time_masks
self.p = p
#logger.info(f"specaug: F-{F}, T-{T}, F-n-{n_freq_masks}, T-n-{n_time_masks}")
# adaptive SpecAugment
self.adaptive_number_ratio = adaptive_number_ratio
self.adaptive_size_ratio = adaptive_size_ratio
self.max_n_time_masks = max_n_time_masks
if adaptive_number_ratio > 0:
self.n_time_masks = 0
logger.info('n_time_masks is set ot zero for adaptive SpecAugment.')
if adaptive_size_ratio > 0:
self.T = 0
logger.info('T is set to zero for adaptive SpecAugment.')
self._freq_mask = None
self._time_mask = None
def librispeech_basic(self):
self.W = 80
self.F = 27
self.T = 100
self.n_freq_masks = 1
self.n_time_masks = 1
self.p = 1.0
def librispeech_double(self):
self.W = 80
self.F = 27
self.T = 100
self.n_freq_masks = 2
self.n_time_masks = 2
self.p = 1.0
def switchboard_mild(self):
self.W = 40
self.F = 15
self.T = 70
self.n_freq_masks = 2
self.n_time_masks = 2
self.p = 0.2
def switchboard_strong(self):
self.W = 40
self.F = 27
self.T = 70
self.n_freq_masks = 2
self.n_time_masks = 2
self.p = 0.2
@property
def freq_mask(self):
return self._freq_mask
@property
def time_mask(self):
return self._time_mask
def time_warp(xs, W=40):
raise NotImplementedError
def mask_freq(self, xs, replace_with_zero=False):
n_bins = xs.shape[0]
for i in range(0, self.n_freq_masks):
f = int(self._rng.uniform(low=0, high=self.F))
f_0 = int(self._rng.uniform(low=0, high=n_bins - f))
xs[f_0:f_0 + f, :] = 0
assert f_0 <= f_0 + f
self._freq_mask = (f_0, f_0 + f)
return xs
def mask_time(self, xs, replace_with_zero=False):
n_frames = xs.shape[1]
if self.adaptive_number_ratio > 0:
n_masks = int(n_frames * self.adaptive_number_ratio)
n_masks = min(n_masks, self.max_n_time_masks)
else:
n_masks = self.n_time_masks
if self.adaptive_size_ratio > 0:
T = self.adaptive_size_ratio * n_frames
else:
T = self.T
for i in range(n_masks):
t = int(self._rng.uniform(low=0, high=T))
t = min(t, int(n_frames * self.p))
t_0 = int(self._rng.uniform(low=0, high=n_frames - t))
xs[:, t_0:t_0 + t] = 0
assert t_0 <= t_0 + t
self._time_mask = (t_0, t_0 + t)
return xs
def transform_feature(self, xs: np.ndarray):
"""
Args:
xs (FloatTensor): `[F, T]`
Returns:
xs (FloatTensor): `[F, T]`
"""
# xs = self.time_warp(xs)
xs = self.mask_freq(xs)
xs = self.mask_time(xs)
return xs
| 31.292398 | 87 | 0.58419 |
import numpy as np
from deepspeech.frontend.augmentor.base import AugmentorBase
from deepspeech.utils.log import Log
logger = Log(__name__).getlog()
class SpecAugmentor(AugmentorBase):
def __init__(self,
rng,
F,
T,
n_freq_masks,
n_time_masks,
p=1.0,
W=40,
adaptive_number_ratio=0,
adaptive_size_ratio=0,
max_n_time_masks=20):
super().__init__()
self._rng = rng
self.W = W
self.F = F
self.T = T
self.n_freq_masks = n_freq_masks
self.n_time_masks = n_time_masks
self.p = p
self.adaptive_number_ratio = adaptive_number_ratio
self.adaptive_size_ratio = adaptive_size_ratio
self.max_n_time_masks = max_n_time_masks
if adaptive_number_ratio > 0:
self.n_time_masks = 0
logger.info('n_time_masks is set ot zero for adaptive SpecAugment.')
if adaptive_size_ratio > 0:
self.T = 0
logger.info('T is set to zero for adaptive SpecAugment.')
self._freq_mask = None
self._time_mask = None
def librispeech_basic(self):
self.W = 80
self.F = 27
self.T = 100
self.n_freq_masks = 1
self.n_time_masks = 1
self.p = 1.0
def librispeech_double(self):
self.W = 80
self.F = 27
self.T = 100
self.n_freq_masks = 2
self.n_time_masks = 2
self.p = 1.0
def switchboard_mild(self):
self.W = 40
self.F = 15
self.T = 70
self.n_freq_masks = 2
self.n_time_masks = 2
self.p = 0.2
def switchboard_strong(self):
self.W = 40
self.F = 27
self.T = 70
self.n_freq_masks = 2
self.n_time_masks = 2
self.p = 0.2
@property
def freq_mask(self):
return self._freq_mask
@property
def time_mask(self):
return self._time_mask
def time_warp(xs, W=40):
raise NotImplementedError
def mask_freq(self, xs, replace_with_zero=False):
n_bins = xs.shape[0]
for i in range(0, self.n_freq_masks):
f = int(self._rng.uniform(low=0, high=self.F))
f_0 = int(self._rng.uniform(low=0, high=n_bins - f))
xs[f_0:f_0 + f, :] = 0
assert f_0 <= f_0 + f
self._freq_mask = (f_0, f_0 + f)
return xs
def mask_time(self, xs, replace_with_zero=False):
n_frames = xs.shape[1]
if self.adaptive_number_ratio > 0:
n_masks = int(n_frames * self.adaptive_number_ratio)
n_masks = min(n_masks, self.max_n_time_masks)
else:
n_masks = self.n_time_masks
if self.adaptive_size_ratio > 0:
T = self.adaptive_size_ratio * n_frames
else:
T = self.T
for i in range(n_masks):
t = int(self._rng.uniform(low=0, high=T))
t = min(t, int(n_frames * self.p))
t_0 = int(self._rng.uniform(low=0, high=n_frames - t))
xs[:, t_0:t_0 + t] = 0
assert t_0 <= t_0 + t
self._time_mask = (t_0, t_0 + t)
return xs
def transform_feature(self, xs: np.ndarray):
xs = self.mask_freq(xs)
xs = self.mask_time(xs)
return xs
| true | true |
1c2e0a9cb74826110c9ce4eea4b5787e91935848 | 6,608 | py | Python | test/functional/wallet_txn_clone.py | HZapperz/JahCoin | 94f0e3f60a0846bc331f334ccab0642913b9b0bd | [
"MIT"
] | 13 | 2019-01-23T04:36:05.000Z | 2022-02-21T11:20:25.000Z | test/functional/wallet_txn_clone.py | songMW/bitcoin | 5eb32d23841bbcd8eaf7ba49dc4ddfd822bd4773 | [
"MIT"
] | null | null | null | test/functional/wallet_txn_clone.py | songMW/bitcoin | 5eb32d23841bbcd8eaf7ba49dc4ddfd822bd4773 | [
"MIT"
] | 3 | 2019-01-24T07:48:15.000Z | 2021-06-11T13:34:44.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
sync_blocks,
)
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
parser.add_argument("--segwit", dest="segwit", default=False, action="store_true",
help="Test behaviour with SegWit txn (which should fail")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
if self.options.segwit:
output_type = "p2sh-segwit"
else:
output_type = "legacy"
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress!
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 1219)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 29)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1, 1)
clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"], "sequence": rawtx1["vin"][0]["sequence"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2 * (4 + 1 + 36 + 1 + 4 + 1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16:pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0:pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0:pos0 + 16] == hex40):
output0 = clone_raw[pos0:pos0 + output_len]
output1 = clone_raw[pos0 + output_len:pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
if self.options.segwit:
assert_equal(txid1, txid1_clone)
return
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
| 44.053333 | 160 | 0.641344 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
sync_blocks,
)
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
parser.add_argument("--segwit", dest="segwit", default=False, action="store_true",
help="Test behaviour with SegWit txn (which should fail")
def setup_network(self):
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
if self.options.segwit:
output_type = "p2sh-segwit"
else:
output_type = "legacy"
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress()
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 1219)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress(address_type=output_type)
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 29)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
node1_address = self.nodes[1].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1, 1)
clone_inputs = [{"txid": rawtx1["vin"][0]["txid"], "vout": rawtx1["vin"][0]["vout"], "sequence": rawtx1["vin"][0]["sequence"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2 * (4 + 1 + 36 + 1 + 4 + 1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16:pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0:pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0:pos0 + 16] == hex40):
output0 = clone_raw[pos0:pos0 + output_len]
output1 = clone_raw[pos0 + output_len:pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
if self.options.segwit:
assert_equal(txid1, txid1_clone)
return
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
1c2e0ad3f936ed49b51ecbdc1a3f4c027568e304 | 34 | py | Python | services/users/app/api/utils/__init__.py | yuuta1999/microservice-with-flask | 6ad64341edb42c7f145aabc1e38e2619df75d444 | [
"MIT"
] | 1 | 2019-07-12T07:38:16.000Z | 2019-07-12T07:38:16.000Z | services/users/app/api/utils/__init__.py | yuuta1999/microservice-with-flask | 6ad64341edb42c7f145aabc1e38e2619df75d444 | [
"MIT"
] | 4 | 2021-03-09T09:19:49.000Z | 2022-02-26T12:14:12.000Z | services/users/app/api/utils/__init__.py | yuuta1999/microservice-with-flask | 6ad64341edb42c7f145aabc1e38e2619df75d444 | [
"MIT"
] | 1 | 2020-03-31T17:36:11.000Z | 2020-03-31T17:36:11.000Z | # users/app/api/utils/__init__.py
| 17 | 33 | 0.764706 | true | true | |
1c2e0b07893a87a3f953d0af53e08c131465a2dd | 25,200 | py | Python | AICamera/app/src/main/cpp/caffe2/python/layer_model_helper.py | blackxer/AICamera | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | [
"MIT"
] | 1 | 2020-01-10T02:56:03.000Z | 2020-01-10T02:56:03.000Z | AICamera/app/src/main/cpp/caffe2/python/layer_model_helper.py | blackxer/AICamera | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | [
"MIT"
] | null | null | null | AICamera/app/src/main/cpp/caffe2/python/layer_model_helper.py | blackxer/AICamera | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | [
"MIT"
] | null | null | null | # @package layer_model_helper
# Module caffe2.python.layer_model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, model_helper, schema, scope, utils, muji
from caffe2.python.modeling.parameter_info import (
ParameterInfo,
)
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.modeling.net_modifier import NetModifier
from caffe2.python.optimizer import get_param_device
from caffe2.python.regularizer import Regularizer, RegularizationBy
from caffe2.python.layers import layers
from caffe2.proto import caffe2_pb2
from future.utils import viewitems, viewvalues
import logging
import numpy as np
import six
import copy
logger = logging.getLogger(__name__)
class LayerModelHelper(model_helper.ModelHelper):
"""
Model helper for building models on top of layers abstractions.
Each layer is the abstraction that is higher level than Operator. Layer
is responsible for ownership of it's own parameters and can easily be
instantiated in multiple nets possible with different sets of ops.
As an example: one can easily instantiate predict and train nets from
the same set of layers, where predict net will have subset of the
operators from train net.
"""
def __init__(self, name, input_feature_schema, trainer_extra_schema,
keep_blobs=False):
''' TODO(amalevich): more documnetation on input args
'''
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
self._param_to_shape = {}
# seed default
self._seed = None
self._sequence_seed = True
# optimizer bookkeeping
self.param_to_optim = {}
self.param_to_reg = {}
self._default_optimizer = None
self._loss = None
self._prediction = []
self._output_schema = None
self._post_grad_net_modifiers = []
self._final_net_modifiers = []
# breakdown map; breakdown features are categorical (like dense) but not
# necessarily used to represent data for training
self._breakdown_map = None
# Connect Schema to self.net. That particular instance of schmea will be
# use for generation of the Layers accross the network and would be used
# for connection with Readers.
self._input_feature_schema = schema.NewRecord(
self.net,
input_feature_schema
) if not keep_blobs else input_feature_schema.clone()
self._trainer_extra_schema = schema.NewRecord(
self.net,
trainer_extra_schema
) if not keep_blobs else trainer_extra_schema.clone()
self._metrics_schema = schema.Struct()
self._preproc_output_schema = None
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
self._initialize_params = True
# additional (hard-coded) diagnose_options to report based on the model
# TODO(xlwang): it's hack!
self.ad_hoc_diagnose_blobs_and_operations = []
self.ad_hoc_plot_blobs = []
def clear_output_schema(self):
self._output_schema = None
def set_initialize_params(self, initialize_params):
self._initialize_params = initialize_params
def add_metric_field(self, name, value):
assert name not in self._metrics_schema.fields, (
"Try to add metric field twice: {}".format(name))
self._metrics_schema = self._metrics_schema + schema.Struct(
(name, value)
)
def add_ad_hoc_plot_blob(self, blob, dtype=None):
assert isinstance(
blob, (six.string_types, core.BlobReference)
), "expect type str or BlobReference, but got {}".format(type(blob))
dtype = dtype or (np.float, (1, ))
self.add_metric_field(str(blob), schema.Scalar(dtype, blob))
self.ad_hoc_plot_blobs.append(blob)
@staticmethod
def _get_global_constant_initializer_op(
blob_name, array=None, dtype=None, initializer=None
):
# to add a global constant to model, one first need to get the
# initializer
if array is not None:
assert initializer is None,\
"Only one from array and initializer should be specified"
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
# TODO: make GivenTensor generic
op_name = None
if array.dtype == np.int32:
op_name = 'GivenTensorIntFill'
elif array.dtype == np.int64:
op_name = 'GivenTensorInt64Fill'
elif array.dtype == np.str:
op_name = 'GivenTensorStringFill'
elif array.dtype == np.bool:
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(
op_name, [],
blob_name,
shape=array.shape,
values=array.flatten().tolist()
)
else:
assert initializer is not None
initializer_op = initializer(blob_name)
return initializer_op
def add_global_constant(
self, name, array=None, dtype=None, initializer=None
):
assert isinstance(name, six.string_types), (
'name should be a string as we are using it as map key')
# This is global namescope for constants. They will be created in all
# init_nets and there should be very few of them.
assert name not in self.global_constants, \
"%s already added in global_constants" % name
blob_name = self.net.NextBlob(name)
self.global_constants[name] = blob_name
initializer_op = LayerModelHelper._get_global_constant_initializer_op(
blob_name, array, dtype, initializer
)
assert blob_name not in self.global_constant_initializers, \
"there is already a initializer op associated with blob %s" % \
blob_name
self.global_constant_initializers[blob_name] = initializer_op
return blob_name
def maybe_add_global_constant(self, name, *args, **kwargs):
# To ad hoc add new global constants without duplication
# if the name was already registered in global_constants, it will not be
# added even if the intended value is different from its original value
if name in self.global_constants:
blob_name = self.global_constants[name]
initializer_op = \
LayerModelHelper._get_global_constant_initializer_op(
blob_name, *args, **kwargs
)
# check if the original initializer is the same as the one intended
# now
assert utils.OpAlmostEqual(
initializer_op,
self.global_constant_initializers[blob_name],
'debug_info'
), \
"conflict initializers for global constant %s, " \
"previous %s, now %s" % (
blob_name, str(initializer_op),
str(self.global_constant_initializers[blob_name]))
return blob_name
return self.add_global_constant(name, *args, **kwargs)
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = {}
self.add_global_constant('ONE', 1.0)
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in viewvalues(self.global_constant_initializers):
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def _validate_param_shape(self, param_name, shape):
if param_name not in self._param_to_shape:
return
ref_shape = self._param_to_shape[param_name]
if shape != ref_shape:
raise ValueError(
"Got inconsistent shapes between shared parameters "
"when trying to map a blob in scope {0} to {1}. ref_shape : "
" {2}, shape : {3}".format(
scope.CurrentNameScope(), param_name, ref_shape, shape)
)
def create_param(self, param_name, shape, initializer, optimizer=None,
ps_param=None, regularizer=None):
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, six.string_types):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise ValueError("Unsupported type for param_name")
param_blob = core.BlobReference(param_name)
if len(initializer) == 1:
init_op_args = {}
else:
assert len(initializer) == 2
init_op_args = copy.deepcopy(initializer[1])
if shape is not None:
assert 'shape' not in init_op_args
init_op_args.update({'shape': shape})
initializer_op = None
if self._initialize_params:
initializer_op = core.CreateOperator(
initializer[0],
[],
param_blob,
**init_op_args
)
param = layers.LayerParameter(
parameter=param_blob,
initializer=initializer_op,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer
)
self._validate_param_shape(param_name, shape)
self._param_to_shape[param_name] = shape
return param
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while name in self._layer_names:
name = base_name + '_auto_' + str(index)
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = \
param.optimizer or self.default_optimizer
self.params.append(param.parameter)
if isinstance(param, layers.LayerParameter):
self.param_to_reg[param.parameter] = param.regularizer
elif isinstance(param, ParameterInfo):
# TODO:
# Currently, LSTM and RNNcells, which use ModelHelper instead of
# LayerModelHelper as super class, are called in pooling_methods
# In ModelHelper, regularization is not supported in create_param
# We will unify the way of create_param of ModelHelper and
# LayerModelHelper in the future.
logger.info('regularization is unsupported for ParameterInfo object')
else:
raise ValueError(
'unknown object type besides ParameterInfo and LayerParameter: {}'
.format(param)
)
# The primary value of adding everything to self.net - generation of the
# operators right away, i.e. if error happens it'll be detected
# immediately. Other than this - create_x_net should be called.
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
def add_post_grad_net_modifiers(self, modifier):
assert modifier not in self._post_grad_net_modifiers,\
"{0} is already in {1}".format(modifier, self._post_grad_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._post_grad_net_modifiers.append(modifier)
def add_final_net_modifiers(self, modifier):
assert modifier not in self._final_net_modifiers,\
"{0} is already in {1}".format(modifier, self._final_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._final_net_modifiers.append(modifier)
@property
def seed(self):
return self._seed
@property
def sequence_seed(self):
return self._sequence_seed
def store_seed(self, seed, sequence_seed=True):
# Store seed config that will be applied to each op in the net.
self._seed = seed
# If sequence_seed is True, the i-th op has rand_seed=`seed + i`
self._sequence_seed = sequence_seed
def apply_seed(self, net):
if self._seed:
net.set_rand_seed(self._seed, self._sequence_seed)
@property
def default_optimizer(self):
return self._default_optimizer
@default_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
@property
def input_feature_schema(self):
return self._input_feature_schema
@property
def trainer_extra_schema(self):
return self._trainer_extra_schema
@property
def metrics_schema(self):
"""
Returns the schema that represents model output that should be used for
metric reporting.
During the training/evaluation this schema will be appended to the
schema that represents model output.
"""
return self._metrics_schema
@property
def output_schema(self):
assert self._output_schema is not None
return self._output_schema
@output_schema.setter
def output_schema(self, schema):
assert self._output_schema is None
self._output_schema = schema
@property
def preproc_output_schema(self):
assert self._preproc_output_schema is not None
return self._preproc_output_schema
@preproc_output_schema.setter
def preproc_output_schema(self, schema):
assert self._preproc_output_schema is None
self._preproc_output_schema = schema
@property
def prediction(self):
assert self._prediction, "model prediction is empty"
return self._prediction
def add_prediction(self, prediction, weight=1.0):
assert prediction is not None, "Added prediction should not be None"
self._prediction.append((prediction, weight))
@property
def loss(self):
assert self._loss is not None
return self._loss
@loss.setter
def loss(self, loss):
assert self._loss is None
self._loss = loss
def has_loss(self):
return self._loss is not None
def add_loss(self, loss, name='unnamed'):
assert loss is not None, "Added loss should not be None"
assert isinstance(loss, schema.Scalar) or isinstance(
loss, schema.Struct
), "Added loss should be a scalar or a struct"
if self._loss is None:
self._loss = schema.Struct((name, loss))
else:
# loss could've been set through model.loss directly which could be
# a scalar
if isinstance(self._loss, schema.Scalar):
self._loss = schema.Struct(('unnamed', self._loss))
prefix_base = name + '_auto_'
index = 0
prefix = name
while prefix in self._loss:
prefix = prefix_base + str(index)
index += 1
loss_struct = schema.Struct((prefix, loss))
self._loss = self._loss + loss_struct
def add_output_schema(self, name, value):
assert value is not None, \
'Added output schema {} should not be None'.format(name)
assert isinstance(value, schema.Scalar) or \
isinstance(value, schema.Struct), \
'Added output schema {} should be a scalar or a struct.\n\
Now it is {}.'.format(name, type(value))
if self._output_schema is None: # be the first field
self._output_schema = schema.Struct((name, value))
else: # merge with other fields
assert name not in self._output_schema.fields, \
'Output Schema Field {} already exists'.format(name)
self._output_schema = \
self._output_schema + schema.Struct((name, value))
def add_trainer_extra_schema(self, trainer_extra_schema):
trainer_extra_record = schema.NewRecord(self.net, trainer_extra_schema)
self._trainer_extra_schema += trainer_extra_record
def __getattr__(self, layer):
def is_functional_layer(layer):
if core.IsOperator(layer):
return True
elif layer.startswith('FunctionalLayer'):
return True
else:
return False
def resolve_functional_layer(layer):
if core.IsOperator(layer):
return layer
elif layer.startswith('FunctionalLayer'):
return layer[len('FunctionalLayer'):]
else:
raise ValueError(
'%s cannot be resolved as functional layer' % layer
)
if layer.startswith('__'):
raise AttributeError(layer)
# TODO(amalevich): Add add support for ifbpy inline documentation
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
new_layer = layers.create_layer(layer, self, *args, **kwargs)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
elif is_functional_layer(layer):
# TODO(xlwang): Desginated layer shadows the usage of an op as a
# single layer. To enforce using an op (e.g. Split) as functional
# layer, one can call 'model.FunctionalLayerSplit'
layer = resolve_functional_layer(layer)
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
# TODO(amalevich): Switch to net.operator as soon as it gets
# landed
net.__getattr__(layer)(in_record.field_blobs(),
out_record.field_blobs(),
**kwargs)
if 'name' not in kwargs:
kwargs['name'] = layer
new_layer = layers.create_layer(
'Functional',
self, *args, function=apply_operator,
**kwargs
)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
else:
# this needs to be an AttributeError to fit hasattr semantics
raise AttributeError(
"Trying to create non-registered layer: {}".format(layer))
@property
def layers(self):
return self._layers
def apply_regularizers_on_loss(
self,
train_net,
train_init_net,
blob_to_device=None,
):
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
assert isinstance(regularizer, Regularizer)
added_loss_blob = regularizer(train_net, train_init_net, param, grad=None,
by=RegularizationBy.ON_LOSS)
if added_loss_blob is not None:
self.add_loss(
schema.Scalar(blob=added_loss_blob),
str(added_loss_blob)
)
def apply_regularizers_after_optimizer(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
assert isinstance(regularizer, Regularizer)
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
regularizer(
train_net, train_init_net, param, grad=grad_map.get(str(param)),
by=RegularizationBy.AFTER_OPTIMIZER
)
def apply_post_grad_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
param_grad_map = {param: grad_map[param]
for param in self.param_to_optim.keys() if param in grad_map}
for modifier in self._post_grad_net_modifiers:
modifier(trainer_net, trainer_init_net, param_grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_final_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
for modifier in self._final_net_modifiers:
modifier(trainer_net, trainer_init_net, grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_optimizers(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, optimizer in viewitems(self.param_to_optim):
assert optimizer is not None, \
"default optimizer must have been set in add_layer"
# note that not all params has gradient and thus we sent None if
# gradient does not exists
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
if device is not None:
# extra info is not applicable for optimizers
del device.extra_info[:]
with core.DeviceScope(device):
optimizer(
train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
# An optimizer which allows us to do NO optimization
def NoOptim(self, *args, **kwargs):
pass
@property
def breakdown_map(self):
return self._breakdown_map
@breakdown_map.setter
def breakdown_map(self, breakdown_map):
# TODO(xlwang): provide more rich feature information in breakdown_map;
# and change the assertion accordingly
assert isinstance(breakdown_map, dict)
assert all(isinstance(k, six.string_types) for k in breakdown_map)
assert sorted(breakdown_map.values()) == list(range(len(breakdown_map)))
self._breakdown_map = breakdown_map
| 38.124054 | 88 | 0.598849 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, model_helper, schema, scope, utils, muji
from caffe2.python.modeling.parameter_info import (
ParameterInfo,
)
from caffe2.python.modeling.parameter_sharing import (
parameter_sharing_context,
)
from caffe2.python.modeling.net_modifier import NetModifier
from caffe2.python.optimizer import get_param_device
from caffe2.python.regularizer import Regularizer, RegularizationBy
from caffe2.python.layers import layers
from caffe2.proto import caffe2_pb2
from future.utils import viewitems, viewvalues
import logging
import numpy as np
import six
import copy
logger = logging.getLogger(__name__)
class LayerModelHelper(model_helper.ModelHelper):
def __init__(self, name, input_feature_schema, trainer_extra_schema,
keep_blobs=False):
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
self._param_to_shape = {}
self._seed = None
self._sequence_seed = True
self.param_to_optim = {}
self.param_to_reg = {}
self._default_optimizer = None
self._loss = None
self._prediction = []
self._output_schema = None
self._post_grad_net_modifiers = []
self._final_net_modifiers = []
self._breakdown_map = None
self._input_feature_schema = schema.NewRecord(
self.net,
input_feature_schema
) if not keep_blobs else input_feature_schema.clone()
self._trainer_extra_schema = schema.NewRecord(
self.net,
trainer_extra_schema
) if not keep_blobs else trainer_extra_schema.clone()
self._metrics_schema = schema.Struct()
self._preproc_output_schema = None
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
self._initialize_params = True
self.ad_hoc_diagnose_blobs_and_operations = []
self.ad_hoc_plot_blobs = []
def clear_output_schema(self):
self._output_schema = None
def set_initialize_params(self, initialize_params):
self._initialize_params = initialize_params
def add_metric_field(self, name, value):
assert name not in self._metrics_schema.fields, (
"Try to add metric field twice: {}".format(name))
self._metrics_schema = self._metrics_schema + schema.Struct(
(name, value)
)
def add_ad_hoc_plot_blob(self, blob, dtype=None):
assert isinstance(
blob, (six.string_types, core.BlobReference)
), "expect type str or BlobReference, but got {}".format(type(blob))
dtype = dtype or (np.float, (1, ))
self.add_metric_field(str(blob), schema.Scalar(dtype, blob))
self.ad_hoc_plot_blobs.append(blob)
@staticmethod
def _get_global_constant_initializer_op(
blob_name, array=None, dtype=None, initializer=None
):
# to add a global constant to model, one first need to get the
# initializer
if array is not None:
assert initializer is None,\
"Only one from array and initializer should be specified"
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
# TODO: make GivenTensor generic
op_name = None
if array.dtype == np.int32:
op_name = 'GivenTensorIntFill'
elif array.dtype == np.int64:
op_name = 'GivenTensorInt64Fill'
elif array.dtype == np.str:
op_name = 'GivenTensorStringFill'
elif array.dtype == np.bool:
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(
op_name, [],
blob_name,
shape=array.shape,
values=array.flatten().tolist()
)
else:
assert initializer is not None
initializer_op = initializer(blob_name)
return initializer_op
def add_global_constant(
self, name, array=None, dtype=None, initializer=None
):
assert isinstance(name, six.string_types), (
'name should be a string as we are using it as map key')
# This is global namescope for constants. They will be created in all
# init_nets and there should be very few of them.
assert name not in self.global_constants, \
"%s already added in global_constants" % name
blob_name = self.net.NextBlob(name)
self.global_constants[name] = blob_name
initializer_op = LayerModelHelper._get_global_constant_initializer_op(
blob_name, array, dtype, initializer
)
assert blob_name not in self.global_constant_initializers, \
"there is already a initializer op associated with blob %s" % \
blob_name
self.global_constant_initializers[blob_name] = initializer_op
return blob_name
def maybe_add_global_constant(self, name, *args, **kwargs):
# To ad hoc add new global constants without duplication
# if the name was already registered in global_constants, it will not be
# added even if the intended value is different from its original value
if name in self.global_constants:
blob_name = self.global_constants[name]
initializer_op = \
LayerModelHelper._get_global_constant_initializer_op(
blob_name, *args, **kwargs
)
# check if the original initializer is the same as the one intended
# now
assert utils.OpAlmostEqual(
initializer_op,
self.global_constant_initializers[blob_name],
'debug_info'
), \
"conflict initializers for global constant %s, " \
"previous %s, now %s" % (
blob_name, str(initializer_op),
str(self.global_constant_initializers[blob_name]))
return blob_name
return self.add_global_constant(name, *args, **kwargs)
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = {}
self.add_global_constant('ONE', 1.0)
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in viewvalues(self.global_constant_initializers):
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def _validate_param_shape(self, param_name, shape):
if param_name not in self._param_to_shape:
return
ref_shape = self._param_to_shape[param_name]
if shape != ref_shape:
raise ValueError(
"Got inconsistent shapes between shared parameters "
"when trying to map a blob in scope {0} to {1}. ref_shape : "
" {2}, shape : {3}".format(
scope.CurrentNameScope(), param_name, ref_shape, shape)
)
def create_param(self, param_name, shape, initializer, optimizer=None,
ps_param=None, regularizer=None):
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, six.string_types):
# Parameter name will be equal to current Namescope that got
# resolved with the respect of parameter sharing of the scopes.
param_name = parameter_sharing_context.get_parameter_name(
param_name)
else:
raise ValueError("Unsupported type for param_name")
param_blob = core.BlobReference(param_name)
if len(initializer) == 1:
init_op_args = {}
else:
assert len(initializer) == 2
init_op_args = copy.deepcopy(initializer[1])
if shape is not None:
assert 'shape' not in init_op_args
init_op_args.update({'shape': shape})
initializer_op = None
if self._initialize_params:
initializer_op = core.CreateOperator(
initializer[0],
[],
param_blob,
**init_op_args
)
param = layers.LayerParameter(
parameter=param_blob,
initializer=initializer_op,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer
)
self._validate_param_shape(param_name, shape)
self._param_to_shape[param_name] = shape
return param
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while name in self._layer_names:
name = base_name + '_auto_' + str(index)
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = \
param.optimizer or self.default_optimizer
self.params.append(param.parameter)
if isinstance(param, layers.LayerParameter):
self.param_to_reg[param.parameter] = param.regularizer
elif isinstance(param, ParameterInfo):
# TODO:
# Currently, LSTM and RNNcells, which use ModelHelper instead of
# LayerModelHelper as super class, are called in pooling_methods
# In ModelHelper, regularization is not supported in create_param
# We will unify the way of create_param of ModelHelper and
# LayerModelHelper in the future.
logger.info('regularization is unsupported for ParameterInfo object')
else:
raise ValueError(
'unknown object type besides ParameterInfo and LayerParameter: {}'
.format(param)
)
# The primary value of adding everything to self.net - generation of the
# operators right away, i.e. if error happens it'll be detected
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
def add_post_grad_net_modifiers(self, modifier):
assert modifier not in self._post_grad_net_modifiers,\
"{0} is already in {1}".format(modifier, self._post_grad_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._post_grad_net_modifiers.append(modifier)
def add_final_net_modifiers(self, modifier):
assert modifier not in self._final_net_modifiers,\
"{0} is already in {1}".format(modifier, self._final_net_modifiers)
assert isinstance(modifier, NetModifier),\
"{} has to be a NetModifier instance".format(modifier)
self._final_net_modifiers.append(modifier)
@property
def seed(self):
return self._seed
@property
def sequence_seed(self):
return self._sequence_seed
def store_seed(self, seed, sequence_seed=True):
self._seed = seed
self._sequence_seed = sequence_seed
def apply_seed(self, net):
if self._seed:
net.set_rand_seed(self._seed, self._sequence_seed)
@property
def default_optimizer(self):
return self._default_optimizer
@default_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
@property
def input_feature_schema(self):
return self._input_feature_schema
@property
def trainer_extra_schema(self):
return self._trainer_extra_schema
@property
def metrics_schema(self):
return self._metrics_schema
@property
def output_schema(self):
assert self._output_schema is not None
return self._output_schema
@output_schema.setter
def output_schema(self, schema):
assert self._output_schema is None
self._output_schema = schema
@property
def preproc_output_schema(self):
assert self._preproc_output_schema is not None
return self._preproc_output_schema
@preproc_output_schema.setter
def preproc_output_schema(self, schema):
assert self._preproc_output_schema is None
self._preproc_output_schema = schema
@property
def prediction(self):
assert self._prediction, "model prediction is empty"
return self._prediction
def add_prediction(self, prediction, weight=1.0):
assert prediction is not None, "Added prediction should not be None"
self._prediction.append((prediction, weight))
@property
def loss(self):
assert self._loss is not None
return self._loss
@loss.setter
def loss(self, loss):
assert self._loss is None
self._loss = loss
def has_loss(self):
return self._loss is not None
def add_loss(self, loss, name='unnamed'):
assert loss is not None, "Added loss should not be None"
assert isinstance(loss, schema.Scalar) or isinstance(
loss, schema.Struct
), "Added loss should be a scalar or a struct"
if self._loss is None:
self._loss = schema.Struct((name, loss))
else:
# a scalar
if isinstance(self._loss, schema.Scalar):
self._loss = schema.Struct(('unnamed', self._loss))
prefix_base = name + '_auto_'
index = 0
prefix = name
while prefix in self._loss:
prefix = prefix_base + str(index)
index += 1
loss_struct = schema.Struct((prefix, loss))
self._loss = self._loss + loss_struct
def add_output_schema(self, name, value):
assert value is not None, \
'Added output schema {} should not be None'.format(name)
assert isinstance(value, schema.Scalar) or \
isinstance(value, schema.Struct), \
'Added output schema {} should be a scalar or a struct.\n\
Now it is {}.'.format(name, type(value))
if self._output_schema is None: # be the first field
self._output_schema = schema.Struct((name, value))
else: # merge with other fields
assert name not in self._output_schema.fields, \
'Output Schema Field {} already exists'.format(name)
self._output_schema = \
self._output_schema + schema.Struct((name, value))
def add_trainer_extra_schema(self, trainer_extra_schema):
trainer_extra_record = schema.NewRecord(self.net, trainer_extra_schema)
self._trainer_extra_schema += trainer_extra_record
def __getattr__(self, layer):
def is_functional_layer(layer):
if core.IsOperator(layer):
return True
elif layer.startswith('FunctionalLayer'):
return True
else:
return False
def resolve_functional_layer(layer):
if core.IsOperator(layer):
return layer
elif layer.startswith('FunctionalLayer'):
return layer[len('FunctionalLayer'):]
else:
raise ValueError(
'%s cannot be resolved as functional layer' % layer
)
if layer.startswith('__'):
raise AttributeError(layer)
# TODO(amalevich): Add add support for ifbpy inline documentation
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
new_layer = layers.create_layer(layer, self, *args, **kwargs)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
elif is_functional_layer(layer):
# TODO(xlwang): Desginated layer shadows the usage of an op as a
# single layer. To enforce using an op (e.g. Split) as functional
# layer, one can call 'model.FunctionalLayerSplit'
layer = resolve_functional_layer(layer)
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
# TODO(amalevich): Switch to net.operator as soon as it gets
# landed
net.__getattr__(layer)(in_record.field_blobs(),
out_record.field_blobs(),
**kwargs)
if 'name' not in kwargs:
kwargs['name'] = layer
new_layer = layers.create_layer(
'Functional',
self, *args, function=apply_operator,
**kwargs
)
if kwargs.get("output_to_metrics", False):
new_layer.export_output_for_metrics()
if kwargs.get("params_to_metrics", False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
else:
# this needs to be an AttributeError to fit hasattr semantics
raise AttributeError(
"Trying to create non-registered layer: {}".format(layer))
@property
def layers(self):
return self._layers
def apply_regularizers_on_loss(
self,
train_net,
train_init_net,
blob_to_device=None,
):
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
assert isinstance(regularizer, Regularizer)
added_loss_blob = regularizer(train_net, train_init_net, param, grad=None,
by=RegularizationBy.ON_LOSS)
if added_loss_blob is not None:
self.add_loss(
schema.Scalar(blob=added_loss_blob),
str(added_loss_blob)
)
def apply_regularizers_after_optimizer(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, regularizer in viewitems(self.param_to_reg):
if regularizer is None:
continue
assert isinstance(regularizer, Regularizer)
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
regularizer(
train_net, train_init_net, param, grad=grad_map.get(str(param)),
by=RegularizationBy.AFTER_OPTIMIZER
)
def apply_post_grad_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
param_grad_map = {param: grad_map[param]
for param in self.param_to_optim.keys() if param in grad_map}
for modifier in self._post_grad_net_modifiers:
modifier(trainer_net, trainer_init_net, param_grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_final_net_modifiers(
self,
trainer_net,
trainer_init_net,
grad_map,
blob_to_device=None,
modify_output_record=False,
):
for modifier in self._final_net_modifiers:
modifier(trainer_net, trainer_init_net, grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
def apply_optimizers(
self,
train_net,
train_init_net,
grad_map,
blob_to_device=None,
):
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for param, optimizer in viewitems(self.param_to_optim):
assert optimizer is not None, \
"default optimizer must have been set in add_layer"
# note that not all params has gradient and thus we sent None if
# gradient does not exists
device = get_param_device(
param,
grad_map.get(str(param)),
param_to_device=blob_to_device,
default_device=CPU,
)
if device is not None:
# extra info is not applicable for optimizers
del device.extra_info[:]
with core.DeviceScope(device):
optimizer(
train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
# An optimizer which allows us to do NO optimization
def NoOptim(self, *args, **kwargs):
pass
@property
def breakdown_map(self):
return self._breakdown_map
@breakdown_map.setter
def breakdown_map(self, breakdown_map):
# TODO(xlwang): provide more rich feature information in breakdown_map;
# and change the assertion accordingly
assert isinstance(breakdown_map, dict)
assert all(isinstance(k, six.string_types) for k in breakdown_map)
assert sorted(breakdown_map.values()) == list(range(len(breakdown_map)))
self._breakdown_map = breakdown_map
| true | true |
1c2e0b78f96a8e24dcf04517c311fe46e9e442c9 | 486 | py | Python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py | kazrael2119/azure-sdk-for-python | 485dd7b1b5ac41c1a5b9991e402b4035b55f437a | [
"MIT"
] | 1 | 2022-02-18T01:17:27.000Z | 2022-02-18T01:17:27.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py | kazrael2119/azure-sdk-for-python | 485dd7b1b5ac41c1a5b9991e402b4035b55f437a | [
"MIT"
] | null | null | null | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py | kazrael2119/azure-sdk-for-python | 485dd7b1b5ac41c1a5b9991e402b4035b55f437a | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "2.2.1"
| 48.6 | 94 | 0.526749 |
VERSION = "2.2.1"
| true | true |
1c2e0bb59452ec4bfcecb5fd5e9c03320a4084ba | 1,267 | py | Python | anytask/groups/migrations/0001_initial.py | antselevich/anytask | b00ea8ad929f267ac4a37d1a0eaabce28c5b02cf | [
"MIT"
] | 31 | 2015-03-24T21:11:44.000Z | 2022-03-28T22:55:12.000Z | anytask/groups/migrations/0001_initial.py | antselevich/anytask | b00ea8ad929f267ac4a37d1a0eaabce28c5b02cf | [
"MIT"
] | 286 | 2015-06-11T10:32:16.000Z | 2022-03-28T12:01:04.000Z | anytask/groups/migrations/0001_initial.py | bcskda/anytask | 5a359dcb669b689fc5a4f1705f2c88cd031ab37d | [
"MIT"
] | 44 | 2015-05-23T21:30:51.000Z | 2021-11-07T12:56:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('years', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(db_index=True, max_length=191, blank=True)),
('added_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('update_time', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('students', models.ManyToManyField(to=settings.AUTH_USER_MODEL, null=True, blank=True)),
('year', models.ForeignKey(to='years.Year', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='group',
unique_together=set([('year', 'name')]),
),
]
| 35.194444 | 114 | 0.604578 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('years', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(db_index=True, max_length=191, blank=True)),
('added_time', models.DateTimeField(default=django.utils.timezone.now, auto_now_add=True)),
('update_time', models.DateTimeField(default=django.utils.timezone.now, auto_now=True)),
('students', models.ManyToManyField(to=settings.AUTH_USER_MODEL, null=True, blank=True)),
('year', models.ForeignKey(to='years.Year', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='group',
unique_together=set([('year', 'name')]),
),
]
| true | true |
1c2e0e61dd9d9febbf9044acc472737cf3b71b63 | 2,004 | py | Python | combine_alignments_all3codons_distinguish_exons_nexuspartition.py | SethMusker/HybPiper_phasing_phyloscripts_modified | bfa9e38d48c6105d660383fa1ea60fcb3e1998a4 | [
"CC0-1.0"
] | null | null | null | combine_alignments_all3codons_distinguish_exons_nexuspartition.py | SethMusker/HybPiper_phasing_phyloscripts_modified | bfa9e38d48c6105d660383fa1ea60fcb3e1998a4 | [
"CC0-1.0"
] | null | null | null | combine_alignments_all3codons_distinguish_exons_nexuspartition.py | SethMusker/HybPiper_phasing_phyloscripts_modified | bfa9e38d48c6105d660383fa1ea60fcb3e1998a4 | [
"CC0-1.0"
] | null | null | null |
#Script to combine exon and intron alignments for a gene and generate a NEXUS formatted partition file.
# Seth's edit: specify all three codon positions as separate partitions (as recommended by PartitionFinder)
# also: write to 'exons_only.fasta/partition' if no intron file exists
# NB at present this script does not work with python 3
import sys,os
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
if len(sys.argv) < 4:
print("Usage: python combine_alignments.py exon.fasta intron.fasta[or any value if no intron] geneName")
sys.exit(1)
exon_fn = sys.argv[1]
intron_fn = sys.argv[2]
geneName = sys.argv[3]
exon_dict = SeqIO.to_dict(SeqIO.parse(exon_fn,'fasta'))
exonLength = len(next(exon_dict.itervalues()))
if os.path.isfile(intron_fn):
with open("{}.combined.fasta".format(geneName),'w') as outfile:
for seq in SeqIO.parse(intron_fn,'fasta'):
intronLength = len(seq)
sampleID = seq.id.split("-")[0]
newseq = exon_dict[sampleID].seq + seq.seq
outfile.write(">{}\n{}\n".format(sampleID,newseq))
partition = """
begin sets;
charset codon1 = 1-{}\\3;
charset codon2 = 2-{}\\3;
charset codon3 = 3-{}\\3;
charset intron = {}-{};
end;
""".format(exonLength, exonLength, exonLength, exonLength+1,exonLength+intronLength)
with open("{}.combined.partition.nex".format(geneName),'w') as partitionfile:
partitionfile.write(partition)
else:
with open("{}.exon_only.fasta".format(geneName),'w') as outfile:
for sampleID in exon_dict:
newseq = exon_dict[sampleID].seq
outfile.write(">{}\n{}\n".format(sampleID,newseq))
partition = """
begin sets;
charset codon1 = 1-{}\\3;
charset codon2 = 2-{}\\3;
charset codon3 = 3-{}\\3;
end;
""".format(exonLength, exonLength, exonLength)
with open("{}.exon_only.partition.nex".format(geneName),'w') as partitionfile:
partitionfile.write(partition)
| 35.785714 | 108 | 0.667665 |
# also: write to 'exons_only.fasta/partition' if no intron file exists
# NB at present this script does not work with python 3
import sys,os
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
if len(sys.argv) < 4:
print("Usage: python combine_alignments.py exon.fasta intron.fasta[or any value if no intron] geneName")
sys.exit(1)
exon_fn = sys.argv[1]
intron_fn = sys.argv[2]
geneName = sys.argv[3]
exon_dict = SeqIO.to_dict(SeqIO.parse(exon_fn,'fasta'))
exonLength = len(next(exon_dict.itervalues()))
if os.path.isfile(intron_fn):
with open("{}.combined.fasta".format(geneName),'w') as outfile:
for seq in SeqIO.parse(intron_fn,'fasta'):
intronLength = len(seq)
sampleID = seq.id.split("-")[0]
newseq = exon_dict[sampleID].seq + seq.seq
outfile.write(">{}\n{}\n".format(sampleID,newseq))
partition = """
begin sets;
charset codon1 = 1-{}\\3;
charset codon2 = 2-{}\\3;
charset codon3 = 3-{}\\3;
charset intron = {}-{};
end;
""".format(exonLength, exonLength, exonLength, exonLength+1,exonLength+intronLength)
with open("{}.combined.partition.nex".format(geneName),'w') as partitionfile:
partitionfile.write(partition)
else:
with open("{}.exon_only.fasta".format(geneName),'w') as outfile:
for sampleID in exon_dict:
newseq = exon_dict[sampleID].seq
outfile.write(">{}\n{}\n".format(sampleID,newseq))
partition = """
begin sets;
charset codon1 = 1-{}\\3;
charset codon2 = 2-{}\\3;
charset codon3 = 3-{}\\3;
end;
""".format(exonLength, exonLength, exonLength)
with open("{}.exon_only.partition.nex".format(geneName),'w') as partitionfile:
partitionfile.write(partition)
| true | true |
1c2e0ec76ac723f9616df1e26e9f8568738a1846 | 3,049 | py | Python | docs/conf.py | ClaraCDouglas/CarbonUptakeInWG | b435f51ab64e472cced0277ad3b932e2a9c9414b | [
"MIT"
] | 2 | 2021-10-05T14:56:53.000Z | 2022-01-30T18:27:53.000Z | docs/conf.py | ClaraCDouglas/CarbonUptakeInWG | b435f51ab64e472cced0277ad3b932e2a9c9414b | [
"MIT"
] | 1 | 2021-07-14T10:34:29.000Z | 2021-07-14T10:34:29.000Z | docs/conf.py | ClaraCDouglas/CarbonUptakeInWG | b435f51ab64e472cced0277ad3b932e2a9c9414b | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import pathlib
import sys
print("python exec:", sys.executable)
print("sys.path:", sys.path)
root = pathlib.Path(__file__).parent.parent.absolute()
os.environ["PYTHONPATH"] = str(root)
sys.path.insert(0, str(root))
import carbonuptakeinwg # isort:skip
# -- Project information -----------------------------------------------------
project = "carbonuptakeinwg"
copyright = "2021, Clara Douglas"
author = "Clara Douglas"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# see https://pypi.org/project/setuptools-scm/ for details
from pkg_resources import get_distribution
release = get_distribution('carbonuptakeinwg').version
# for example take major/minor
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"nbsphinx",
"recommonmark",
"sphinx.ext.mathjax",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"numpydoc",
"nbsphinx",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"sphinxcontrib.srclinks",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "**.ipynb_checkpoints", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pangeo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- nbsphinx specific options ----------------------------------------------
# this allows notebooks to be run even if they produce errors.
nbsphinx_allow_errors = True | 35.045977 | 79 | 0.679895 |
import os
import pathlib
import sys
print("python exec:", sys.executable)
print("sys.path:", sys.path)
root = pathlib.Path(__file__).parent.parent.absolute()
os.environ["PYTHONPATH"] = str(root)
sys.path.insert(0, str(root))
import carbonuptakeinwg
project = "carbonuptakeinwg"
copyright = "2021, Clara Douglas"
author = "Clara Douglas"
# |version| and |release|, also used in various other places throughout the
# built documents.
# see https://pypi.org/project/setuptools-scm/ for details
from pkg_resources import get_distribution
release = get_distribution('carbonuptakeinwg').version
# for example take major/minor
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"nbsphinx",
"recommonmark",
"sphinx.ext.mathjax",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"numpydoc",
"nbsphinx",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"sphinxcontrib.srclinks",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "**.ipynb_checkpoints", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pangeo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- nbsphinx specific options ----------------------------------------------
# this allows notebooks to be run even if they produce errors.
nbsphinx_allow_errors = True | true | true |
1c2e0ed4c75c604321d2b67d71992aa45ce7be39 | 4,480 | py | Python | examples/python/matrix_from_duration_data.py | raphaelchaves/transitionMatrix | 6ac54c8c6ce15dc81aa5e894cfcfabb127634b33 | [
"Apache-2.0"
] | null | null | null | examples/python/matrix_from_duration_data.py | raphaelchaves/transitionMatrix | 6ac54c8c6ce15dc81aa5e894cfcfabb127634b33 | [
"Apache-2.0"
] | 2 | 2021-01-13T21:58:06.000Z | 2021-02-07T12:20:00.000Z | examples/python/matrix_from_duration_data.py | raphaelchaves/transitionMatrix | 6ac54c8c6ce15dc81aa5e894cfcfabb127634b33 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
# (c) 2017-2020 Open Risk, all rights reserved
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
Example workflows using transitionMatrix to estimate a matrix from duration type data
The datasets are produced in examples/generate_synthetic_data.py
"""
import pandas as pd
import transitionMatrix as tm
from transitionMatrix import source_path
from transitionMatrix.estimators import cohort_estimator as es
dataset_path = source_path + "datasets/"
# Select the example to run
# 1-> An example with limited data (dataset contains only one entity)
# 2-> A full example with a 2x2 matrix
# 3-> A full example with a 8x8 matrix
example = 3
if example == 1:
# An example with limited data (dataset contains only one entity)
data = pd.read_csv(dataset_path + 'synthetic_data1.csv', dtype={'State': str})
sorted_data = data.sort_values(['ID', 'Time'], ascending=[True, True])
myState = tm.StateSpace([('0', "A"), ('1', "B"), ('2', "C"), ('3', "D")])
print("> Validate data set")
print(myState.validate_dataset(dataset=sorted_data))
# Bin the data into 5 intervals
cohort_data, cohort_intervals = tm.utils.bin_timestamps(data, cohorts=5)
myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})
labels = {'Timestamp': 'Cohort', 'State': 'State', 'ID': 'ID'}
result = myEstimator.fit(cohort_data, labels=labels)
# Check significance of some estimates
# First period
myEstimator.summary(k=0)
# Last period
myEstimator.summary(k=4)
elif example == 2:
# Step 1
# Load the data set into a pandas frame
# Make sure state is read as a string and not as integer
# Second synthetic data example:
# n entities with ~10 observations each, [0,1] state, 50%/50% transition matrix
print("> Step 1: Load the data")
data = pd.read_csv(dataset_path + 'synthetic_data2.csv', dtype={'State': str})
sorted_data = data.sort_values(['ID', 'Time'], ascending=[True, True])
print(sorted_data.describe())
# Step 2
# Describe and validate the State Space against the data
print("> Step 2: Validate against state space")
myState = tm.StateSpace([('0', "Basic"), ('1', "Default")])
myState.describe()
print(myState.validate_dataset(dataset=sorted_data))
# Step 3
# Arrange the data in period cohorts
print("> Step 3: Arrange the data in period cohorts")
cohort_data, cohort_intervals = tm.utils.bin_timestamps(data, cohorts=5)
# Step 4
# Estimate matrices using method of choice
# compute confidence interval using goodman method at 95% confidence level
print("> Step 4: Estimate matrices")
myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})
labels = {'Timestamp': 'Cohort', 'State': 'State', 'ID': 'ID'}
result = myEstimator.fit(cohort_data, labels=labels)
# Step 5
# Print out the set of estimated matrices
print("> Step 5: Display results")
myMatrixSet = tm.TransitionMatrixSet(values=result, temporal_type='Incremental')
print(myMatrixSet.temporal_type)
myMatrixSet.print_matrix()
elif example == 3:
data = pd.read_csv(dataset_path + 'synthetic_data3.csv', dtype={'State': str})
sorted_data = data.sort_values(['ID', 'Time'], ascending=[True, True])
myState = tm.StateSpace([('0', "A"), ('1', "B"), ('2', "C"), ('3', "D"), ('4', "E"), ('5', "F"), ('6', "G")])
print(myState.validate_dataset(dataset=sorted_data))
cohort_data, cohort_intervals = tm.utils.bin_timestamps(data, cohorts=5)
myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})
labels = {'Timestamp': 'Cohort', 'State': 'State', 'ID': 'ID'}
result = myEstimator.fit(cohort_data, labels=labels)
myMatrixSet = tm.TransitionMatrixSet(values=result, temporal_type='Incremental')
myMatrixSet.print_matrix()
| 41.481481 | 113 | 0.69933 |
import pandas as pd
import transitionMatrix as tm
from transitionMatrix import source_path
from transitionMatrix.estimators import cohort_estimator as es
dataset_path = source_path + "datasets/"
example = 3
if example == 1:
data = pd.read_csv(dataset_path + 'synthetic_data1.csv', dtype={'State': str})
sorted_data = data.sort_values(['ID', 'Time'], ascending=[True, True])
myState = tm.StateSpace([('0', "A"), ('1', "B"), ('2', "C"), ('3', "D")])
print("> Validate data set")
print(myState.validate_dataset(dataset=sorted_data))
cohort_data, cohort_intervals = tm.utils.bin_timestamps(data, cohorts=5)
myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})
labels = {'Timestamp': 'Cohort', 'State': 'State', 'ID': 'ID'}
result = myEstimator.fit(cohort_data, labels=labels)
myEstimator.summary(k=0)
myEstimator.summary(k=4)
elif example == 2:
print("> Step 1: Load the data")
data = pd.read_csv(dataset_path + 'synthetic_data2.csv', dtype={'State': str})
sorted_data = data.sort_values(['ID', 'Time'], ascending=[True, True])
print(sorted_data.describe())
print("> Step 2: Validate against state space")
myState = tm.StateSpace([('0', "Basic"), ('1', "Default")])
myState.describe()
print(myState.validate_dataset(dataset=sorted_data))
print("> Step 3: Arrange the data in period cohorts")
cohort_data, cohort_intervals = tm.utils.bin_timestamps(data, cohorts=5)
print("> Step 4: Estimate matrices")
myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})
labels = {'Timestamp': 'Cohort', 'State': 'State', 'ID': 'ID'}
result = myEstimator.fit(cohort_data, labels=labels)
print("> Step 5: Display results")
myMatrixSet = tm.TransitionMatrixSet(values=result, temporal_type='Incremental')
print(myMatrixSet.temporal_type)
myMatrixSet.print_matrix()
elif example == 3:
data = pd.read_csv(dataset_path + 'synthetic_data3.csv', dtype={'State': str})
sorted_data = data.sort_values(['ID', 'Time'], ascending=[True, True])
myState = tm.StateSpace([('0', "A"), ('1', "B"), ('2', "C"), ('3', "D"), ('4', "E"), ('5', "F"), ('6', "G")])
print(myState.validate_dataset(dataset=sorted_data))
cohort_data, cohort_intervals = tm.utils.bin_timestamps(data, cohorts=5)
myEstimator = es.CohortEstimator(states=myState, ci={'method': 'goodman', 'alpha': 0.05})
labels = {'Timestamp': 'Cohort', 'State': 'State', 'ID': 'ID'}
result = myEstimator.fit(cohort_data, labels=labels)
myMatrixSet = tm.TransitionMatrixSet(values=result, temporal_type='Incremental')
myMatrixSet.print_matrix()
| true | true |
1c2e0f739c71c9793927448ad4aeba67410ef221 | 13,713 | py | Python | Train.py | karino2/Pytorch-Handwritten-Mathematical-Expression-Recognition | 6c6139624c71fa68a0a386a94346cfab39d0f087 | [
"MIT"
] | null | null | null | Train.py | karino2/Pytorch-Handwritten-Mathematical-Expression-Recognition | 6c6139624c71fa68a0a386a94346cfab39d0f087 | [
"MIT"
] | null | null | null | Train.py | karino2/Pytorch-Handwritten-Mathematical-Expression-Recognition | 6c6139624c71fa68a0a386a94346cfab39d0f087 | [
"MIT"
] | null | null | null | '''
Python 3.6
Pytorch 0.4
Written by Hongyu Wang in Beihang university
'''
import torch
import math
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy
import torch.utils.data as data
from data_iterator import dataIterator
from Densenet_torchvision import densenet121
from Attention_RNN import AttnDecoderRNN
import random
# compute the wer loss
def cmp_result(label,rec):
dist_mat = numpy.zeros((len(label)+1, len(rec)+1),dtype='int32')
dist_mat[0,:] = range(len(rec) + 1)
dist_mat[:,0] = range(len(label) + 1)
for i in range(1, len(label) + 1):
for j in range(1, len(rec) + 1):
hit_score = dist_mat[i-1, j-1] + (label[i-1] != rec[j-1])
ins_score = dist_mat[i,j-1] + 1
del_score = dist_mat[i-1, j] + 1
dist_mat[i,j] = min(hit_score, ins_score, del_score)
dist = dist_mat[len(label), len(rec)]
return dist, len(label)
def load_dict(dictFile):
fp=open(dictFile)
stuff=fp.readlines()
fp.close()
lexicon={}
for l in stuff:
w=l.strip().split()
lexicon[w[0]]=int(w[1])
print('total words/phones',len(lexicon))
return lexicon
datasets=['./offline-train.pkl','./train_caption.txt']
valid_datasets=['./offline-test.pkl', './test_caption.txt']
dictionaries=['./dictionary.txt']
batch_Imagesize=500000
valid_batch_Imagesize=500000
batch_size=1
maxlen=48
maxImagesize= 100000
hidden_size = 256
teacher_forcing_ratio = 0.5
worddicts = load_dict(dictionaries[0])
worddicts_r = [None] * len(worddicts)
for kk, vv in worddicts.items():
worddicts_r[vv] = kk
#load train data and test data
train,train_label = dataIterator(
datasets[0], datasets[1],worddicts,batch_size=batch_size,
batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize
)
len_train = len(train)
test,test_label = dataIterator(
valid_datasets[0],valid_datasets[1],worddicts,batch_size=batch_size,
batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize
)
class custom_dset(data.Dataset):
def __init__(self,train,train_label):
self.train = train
self.train_label = train_label
def __getitem__(self, index):
train_setting = torch.from_numpy(numpy.array(self.train[index]))
label_setting = torch.from_numpy(numpy.array(self.train_label[index])).type(torch.LongTensor)
size = train_setting.size()
train_setting = train_setting.view(1,size[2],size[3])
label_setting = label_setting.view(-1)
return train_setting,label_setting
def __len__(self):
return len(self.train)
off_image_train = custom_dset(train,train_label)
off_image_test = custom_dset(test,test_label)
# collate_fn is writting for padding imgs in batch. But now, I used batch_size=1, so this function has no effect.
def collate_fn(batch):
batch.sort(key=lambda x: len(x[1]), reverse=True)
img, label = zip(*batch)
aa1 = 0
bb1 = 0
max_len = len(label[0])
for j in range(len(img)):
size = img[j].size()
if size[1] > aa1:
aa1 = size[1]
if size[2] > bb1:
bb1 = size[2]
img_padding = torch.zeros(len(img),1,aa1,bb1).type(torch.FloatTensor)
img_mask = torch.zeros(len(img),1,aa1,bb1).type(torch.FloatTensor)
for ii in range (len(img)):
size = img[ii].size()
for ii1 in range (size[1]):
for ii2 in range (size[2]):
img_padding[ii][0][ii1][ii2] = img[ii][0][ii1][ii2]
img_mask[ii][0][ii1][ii2] = 1
img_padding = img_padding/255
# img_padding_mask = torch.cat((img_padding,img_mask),1)
label_padding = torch.zeros(len(label),max_len+1).type(torch.LongTensor)
for i in range(len(label)):
for i1 in range(len(label[i])):
label_padding[i][i1] = label[i][i1]
return img_padding, label_padding
train_loader = torch.utils.data.DataLoader(
dataset = off_image_train,
batch_size = batch_size,
shuffle = True,
collate_fn = collate_fn,
num_workers=8,
)
test_loader = torch.utils.data.DataLoader(
dataset = off_image_test,
batch_size = batch_size,
shuffle = True,
collate_fn = collate_fn,
num_workers=8,
)
def my_train(target_length,attn_decoder1,
output_highfeature, output_area,y,criterion,encoder_optimizer1,decoder_optimizer1,x_mean,dense_input):
loss = 0
# teacher_forcing is very useful in training RNN.
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_input = Variable(torch.LongTensor([[111]]))
decoder_input = decoder_input.cuda()
decoder_hidden = attn_decoder1.initHidden()
decoder_hidden = decoder_hidden*x_mean
decoder_hidden = torch.tanh(decoder_hidden)
attention_sum = Variable(torch.zeros(1,dense_input,output_area).cuda())
decoder_attention = Variable(torch.zeros(1,dense_input,output_area).cuda())
if use_teacher_forcing:
encoder_optimizer1.zero_grad()
decoder_optimizer1.zero_grad()
my_num = 0
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention, attention_sum = attn_decoder1(decoder_input,
decoder_hidden,
output_highfeature,
output_area,
attention_sum,
decoder_attention,
dense_input)
loss += criterion(decoder_output[0], y[:,di])
my_num = my_num + 1
if int(y[0][di]) == 0:
break
decoder_input = y[:,di]
loss.backward()
encoder_optimizer1.step()
decoder_optimizer1.step()
return loss.item()
else:
encoder_optimizer1.zero_grad()
decoder_optimizer1.zero_grad()
my_num = 0
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention,attention_sum= attn_decoder1(decoder_input, decoder_hidden,
output_highfeature, output_area,
attention_sum,decoder_attention,dense_input)
#print(decoder_output.size()) 1*1*112
#print(y.size()) 1*37
topv, topi = decoder_output[0][0].topk(1)
decoder_input = topi
loss += criterion(decoder_output[0], y[:,di])
my_num = my_num + 1
# if int(topi[0]) == 0:
# break
loss.backward()
encoder_optimizer1.step()
decoder_optimizer1.step()
return loss.item()
#encoder = DenseNet121().cuda()
encoder = densenet121().cuda()
pthfile = r'densenet121-a639ec97.pth'
pretrained_dict = torch.load(pthfile)
encoder_dict = encoder.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in encoder_dict}
encoder_dict.update(pretrained_dict)
encoder.load_state_dict(encoder_dict)
attn_decoder1 = AttnDecoderRNN(hidden_size,112,dropout_p=0.2).cuda()
# attn_pre = torch.load('model/attn_decoder_lr0.00009_nopadding_baseline.pkl')
# attn_dict = attn_decoder1.state_dict()
# attn_pre = {k: v for k, v in attn_pre.items() if k in attn_dict}
# attn_dict.update(attn_pre)
# attn_decoder1.load_state_dict(attn_dict)
# encoder.load_state_dict(torch.load('model/encoder_lr0.00009_nopadding.pkl'))
# attn_decoder1.load_state_dict(torch.load('model/attn_decoder_lr0.00009_nopadding.pkl'))
lr_rate = 0.00009
encoder_optimizer1 = torch.optim.Adam(encoder.parameters(), lr=lr_rate)
decoder_optimizer1 = torch.optim.Adam(attn_decoder1.parameters(), lr=lr_rate)
criterion = nn.CrossEntropyLoss()
exprate = 0
#encoder.load_state_dict(torch.load('model/encoder_lr0.00009_nopadding_pre_GN_te05_d02.pkl'))
#attn_decoder1.load_state_dict(torch.load('model/attn_decoder_lr0.00009_nopadding_pre_GN_te05_d02.pkl'))
for epoch in range(1000):
# if using SGD optimizer
# if epoch%8 == 0:
# lr_rate = lr_rate/10
# encoder_optimizer1 = torch.optim.SGD(encoder.parameters(), lr=lr_rate,momentum=0.9)
# decoder_optimizer1 = torch.optim.SGD(attn_decoder1.parameters(), lr=lr_rate,momentum=0.9)
running_loss=0
whole_loss = 0
encoder.train(mode=True)
attn_decoder1.train(mode=True)
# this is the train
for step,(x,y) in enumerate(train_loader):
x = Variable(x.cuda())
y = Variable(y.cuda())
# out is CNN featuremaps
out = encoder(x)
output_highfeature = out.squeeze(0)
x_mean = torch.mean(output_highfeature)
x_mean = float(x_mean)
# dense_input is height and output_area is width which is bb
output_area1 = output_highfeature.size()
output_area = output_area1[2]
dense_input = output_area1[1]
target_length = y.size()[1]
running_loss += my_train(target_length,attn_decoder1,output_highfeature,
output_area,y,criterion,encoder_optimizer1,decoder_optimizer1,x_mean,dense_input)
if step % 100 == 99:
pre = ((step+1)/len_train)*100
whole_loss += running_loss
running_loss = running_loss/100
print('epoch is %d, loading for %.3f%%, running_loss is %f' %(epoch,pre,running_loss))
with open("training_data/running_loss_%.5f_pre_GN_te05_d02.txt" %(lr_rate),"a") as f:
f.write("%s\n"%(str(running_loss)))
running_loss = 0
loss_all_out = whole_loss / len_train
print("epoch is %d, the whole loss is %f" % (epoch, loss_all_out))
with open("training_data/whole_loss_%.5f_pre_GN_te05_d02.txt" % (lr_rate), "a") as f:
f.write("%s\n" % (str(loss_all_out)))
# this is the prediction and compute wer loss
total_dist = 0
total_label = 0
total_line = 0
total_line_rec = 0
encoder.eval()
attn_decoder1.eval()
for step_t, (x_t, y_t) in enumerate(test_loader):
x_t = Variable(x_t.cuda())
y_t = Variable(y_t.cuda())
out_t = encoder(x_t)
output_highfeature_t = out_t.squeeze(0)
x_mean_t = torch.mean(output_highfeature_t)
x_mean_t = float(x_mean_t)
output_area_t1 = output_highfeature_t.size()
output_area_t = output_area_t1[2]
dense_input = output_area_t1[1]
target_length_t = y_t.size()[1]
decoder_input_t = Variable(torch.LongTensor([[111]]))
decoder_input_t = decoder_input_t.cuda()
decoder_hidden_t = attn_decoder1.initHidden()
decoder_hidden_t = decoder_hidden_t * x_mean_t
decoder_hidden_t = torch.tanh(decoder_hidden_t)
prediction = []
label = []
decoder_attention_t = Variable(torch.zeros(1,dense_input,output_area_t).cuda())
attention_sum_t = Variable(torch.zeros(1,dense_input,output_area_t).cuda())
for i in range(48):
decoder_output, decoder_hidden_t, decoder_attention_t, attention_sum_t = attn_decoder1(decoder_input_t,
decoder_hidden_t,
output_highfeature_t,
output_area_t,
attention_sum_t,
decoder_attention_t,dense_input)
topv, topi = decoder_output[0].topk(1)
decoder_input_t = topi
# prediction
prediction.append(int(topi[0]))
if int(topi[0]) == 0:
break
# label
for i_label in range(target_length_t):
label.append(int(y_t[0][i_label]))
#label.append(0)
dist, llen = cmp_result(label, prediction)
total_dist += dist
total_label += llen
total_line += 1
if dist == 0:
total_line_rec = total_line_rec+ 1
print('total_line_rec is',total_line_rec)
wer = float(total_dist) / total_label
sacc = float(total_line_rec) / total_line
print('wer is %.5f' % (wer))
print('sacc is %.5f ' % (sacc))
with open("training_data/wer_%.5f_pre_GN_te05_d02.txt" % (lr_rate), "a") as f:
f.write("%s\n" % (str(wer)))
if (sacc > exprate):
exprate = sacc
print(exprate)
print("saving the model....")
print('encoder_lr%.5f_nopadding_pre_GN_te05_d02_f.pkl' %(lr_rate))
torch.save(encoder.state_dict(), 'model/encoder_lr%.5f_nopadding_pre_GN_te05_d02_f.pkl'%(lr_rate))
torch.save(attn_decoder1.state_dict(), 'model/attn_decoder_lr%.5f_nopadding_pre_GN_te05_d02_f.pkl'%(lr_rate))
print("done")
else:
print('the best is %f' % (exprate))
print('the loss is bigger than before,so do not save the model')
| 37.263587 | 125 | 0.600379 | import torch
import math
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy
import torch.utils.data as data
from data_iterator import dataIterator
from Densenet_torchvision import densenet121
from Attention_RNN import AttnDecoderRNN
import random
def cmp_result(label,rec):
dist_mat = numpy.zeros((len(label)+1, len(rec)+1),dtype='int32')
dist_mat[0,:] = range(len(rec) + 1)
dist_mat[:,0] = range(len(label) + 1)
for i in range(1, len(label) + 1):
for j in range(1, len(rec) + 1):
hit_score = dist_mat[i-1, j-1] + (label[i-1] != rec[j-1])
ins_score = dist_mat[i,j-1] + 1
del_score = dist_mat[i-1, j] + 1
dist_mat[i,j] = min(hit_score, ins_score, del_score)
dist = dist_mat[len(label), len(rec)]
return dist, len(label)
def load_dict(dictFile):
fp=open(dictFile)
stuff=fp.readlines()
fp.close()
lexicon={}
for l in stuff:
w=l.strip().split()
lexicon[w[0]]=int(w[1])
print('total words/phones',len(lexicon))
return lexicon
datasets=['./offline-train.pkl','./train_caption.txt']
valid_datasets=['./offline-test.pkl', './test_caption.txt']
dictionaries=['./dictionary.txt']
batch_Imagesize=500000
valid_batch_Imagesize=500000
batch_size=1
maxlen=48
maxImagesize= 100000
hidden_size = 256
teacher_forcing_ratio = 0.5
worddicts = load_dict(dictionaries[0])
worddicts_r = [None] * len(worddicts)
for kk, vv in worddicts.items():
worddicts_r[vv] = kk
train,train_label = dataIterator(
datasets[0], datasets[1],worddicts,batch_size=batch_size,
batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize
)
len_train = len(train)
test,test_label = dataIterator(
valid_datasets[0],valid_datasets[1],worddicts,batch_size=batch_size,
batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize
)
class custom_dset(data.Dataset):
def __init__(self,train,train_label):
self.train = train
self.train_label = train_label
def __getitem__(self, index):
train_setting = torch.from_numpy(numpy.array(self.train[index]))
label_setting = torch.from_numpy(numpy.array(self.train_label[index])).type(torch.LongTensor)
size = train_setting.size()
train_setting = train_setting.view(1,size[2],size[3])
label_setting = label_setting.view(-1)
return train_setting,label_setting
def __len__(self):
return len(self.train)
off_image_train = custom_dset(train,train_label)
off_image_test = custom_dset(test,test_label)
def collate_fn(batch):
batch.sort(key=lambda x: len(x[1]), reverse=True)
img, label = zip(*batch)
aa1 = 0
bb1 = 0
max_len = len(label[0])
for j in range(len(img)):
size = img[j].size()
if size[1] > aa1:
aa1 = size[1]
if size[2] > bb1:
bb1 = size[2]
img_padding = torch.zeros(len(img),1,aa1,bb1).type(torch.FloatTensor)
img_mask = torch.zeros(len(img),1,aa1,bb1).type(torch.FloatTensor)
for ii in range (len(img)):
size = img[ii].size()
for ii1 in range (size[1]):
for ii2 in range (size[2]):
img_padding[ii][0][ii1][ii2] = img[ii][0][ii1][ii2]
img_mask[ii][0][ii1][ii2] = 1
img_padding = img_padding/255
label_padding = torch.zeros(len(label),max_len+1).type(torch.LongTensor)
for i in range(len(label)):
for i1 in range(len(label[i])):
label_padding[i][i1] = label[i][i1]
return img_padding, label_padding
train_loader = torch.utils.data.DataLoader(
dataset = off_image_train,
batch_size = batch_size,
shuffle = True,
collate_fn = collate_fn,
num_workers=8,
)
test_loader = torch.utils.data.DataLoader(
dataset = off_image_test,
batch_size = batch_size,
shuffle = True,
collate_fn = collate_fn,
num_workers=8,
)
def my_train(target_length,attn_decoder1,
output_highfeature, output_area,y,criterion,encoder_optimizer1,decoder_optimizer1,x_mean,dense_input):
loss = 0
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_input = Variable(torch.LongTensor([[111]]))
decoder_input = decoder_input.cuda()
decoder_hidden = attn_decoder1.initHidden()
decoder_hidden = decoder_hidden*x_mean
decoder_hidden = torch.tanh(decoder_hidden)
attention_sum = Variable(torch.zeros(1,dense_input,output_area).cuda())
decoder_attention = Variable(torch.zeros(1,dense_input,output_area).cuda())
if use_teacher_forcing:
encoder_optimizer1.zero_grad()
decoder_optimizer1.zero_grad()
my_num = 0
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention, attention_sum = attn_decoder1(decoder_input,
decoder_hidden,
output_highfeature,
output_area,
attention_sum,
decoder_attention,
dense_input)
loss += criterion(decoder_output[0], y[:,di])
my_num = my_num + 1
if int(y[0][di]) == 0:
break
decoder_input = y[:,di]
loss.backward()
encoder_optimizer1.step()
decoder_optimizer1.step()
return loss.item()
else:
encoder_optimizer1.zero_grad()
decoder_optimizer1.zero_grad()
my_num = 0
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention,attention_sum= attn_decoder1(decoder_input, decoder_hidden,
output_highfeature, output_area,
attention_sum,decoder_attention,dense_input)
topv, topi = decoder_output[0][0].topk(1)
decoder_input = topi
loss += criterion(decoder_output[0], y[:,di])
my_num = my_num + 1
loss.backward()
encoder_optimizer1.step()
decoder_optimizer1.step()
return loss.item()
encoder = densenet121().cuda()
pthfile = r'densenet121-a639ec97.pth'
pretrained_dict = torch.load(pthfile)
encoder_dict = encoder.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in encoder_dict}
encoder_dict.update(pretrained_dict)
encoder.load_state_dict(encoder_dict)
attn_decoder1 = AttnDecoderRNN(hidden_size,112,dropout_p=0.2).cuda()
lr_rate = 0.00009
encoder_optimizer1 = torch.optim.Adam(encoder.parameters(), lr=lr_rate)
decoder_optimizer1 = torch.optim.Adam(attn_decoder1.parameters(), lr=lr_rate)
criterion = nn.CrossEntropyLoss()
exprate = 0
for epoch in range(1000):
running_loss=0
whole_loss = 0
encoder.train(mode=True)
attn_decoder1.train(mode=True)
for step,(x,y) in enumerate(train_loader):
x = Variable(x.cuda())
y = Variable(y.cuda())
out = encoder(x)
output_highfeature = out.squeeze(0)
x_mean = torch.mean(output_highfeature)
x_mean = float(x_mean)
output_area1 = output_highfeature.size()
output_area = output_area1[2]
dense_input = output_area1[1]
target_length = y.size()[1]
running_loss += my_train(target_length,attn_decoder1,output_highfeature,
output_area,y,criterion,encoder_optimizer1,decoder_optimizer1,x_mean,dense_input)
if step % 100 == 99:
pre = ((step+1)/len_train)*100
whole_loss += running_loss
running_loss = running_loss/100
print('epoch is %d, loading for %.3f%%, running_loss is %f' %(epoch,pre,running_loss))
with open("training_data/running_loss_%.5f_pre_GN_te05_d02.txt" %(lr_rate),"a") as f:
f.write("%s\n"%(str(running_loss)))
running_loss = 0
loss_all_out = whole_loss / len_train
print("epoch is %d, the whole loss is %f" % (epoch, loss_all_out))
with open("training_data/whole_loss_%.5f_pre_GN_te05_d02.txt" % (lr_rate), "a") as f:
f.write("%s\n" % (str(loss_all_out)))
total_dist = 0
total_label = 0
total_line = 0
total_line_rec = 0
encoder.eval()
attn_decoder1.eval()
for step_t, (x_t, y_t) in enumerate(test_loader):
x_t = Variable(x_t.cuda())
y_t = Variable(y_t.cuda())
out_t = encoder(x_t)
output_highfeature_t = out_t.squeeze(0)
x_mean_t = torch.mean(output_highfeature_t)
x_mean_t = float(x_mean_t)
output_area_t1 = output_highfeature_t.size()
output_area_t = output_area_t1[2]
dense_input = output_area_t1[1]
target_length_t = y_t.size()[1]
decoder_input_t = Variable(torch.LongTensor([[111]]))
decoder_input_t = decoder_input_t.cuda()
decoder_hidden_t = attn_decoder1.initHidden()
decoder_hidden_t = decoder_hidden_t * x_mean_t
decoder_hidden_t = torch.tanh(decoder_hidden_t)
prediction = []
label = []
decoder_attention_t = Variable(torch.zeros(1,dense_input,output_area_t).cuda())
attention_sum_t = Variable(torch.zeros(1,dense_input,output_area_t).cuda())
for i in range(48):
decoder_output, decoder_hidden_t, decoder_attention_t, attention_sum_t = attn_decoder1(decoder_input_t,
decoder_hidden_t,
output_highfeature_t,
output_area_t,
attention_sum_t,
decoder_attention_t,dense_input)
topv, topi = decoder_output[0].topk(1)
decoder_input_t = topi
prediction.append(int(topi[0]))
if int(topi[0]) == 0:
break
for i_label in range(target_length_t):
label.append(int(y_t[0][i_label]))
dist, llen = cmp_result(label, prediction)
total_dist += dist
total_label += llen
total_line += 1
if dist == 0:
total_line_rec = total_line_rec+ 1
print('total_line_rec is',total_line_rec)
wer = float(total_dist) / total_label
sacc = float(total_line_rec) / total_line
print('wer is %.5f' % (wer))
print('sacc is %.5f ' % (sacc))
with open("training_data/wer_%.5f_pre_GN_te05_d02.txt" % (lr_rate), "a") as f:
f.write("%s\n" % (str(wer)))
if (sacc > exprate):
exprate = sacc
print(exprate)
print("saving the model....")
print('encoder_lr%.5f_nopadding_pre_GN_te05_d02_f.pkl' %(lr_rate))
torch.save(encoder.state_dict(), 'model/encoder_lr%.5f_nopadding_pre_GN_te05_d02_f.pkl'%(lr_rate))
torch.save(attn_decoder1.state_dict(), 'model/attn_decoder_lr%.5f_nopadding_pre_GN_te05_d02_f.pkl'%(lr_rate))
print("done")
else:
print('the best is %f' % (exprate))
print('the loss is bigger than before,so do not save the model')
| true | true |
1c2e1066fb3d5b44d2588a5b81741d29c458ba76 | 2,496 | py | Python | kratos_salome_plugin/gui/project_path_handler.py | armingeiser/KratosSalomePlugin | d402ca9edef8dff071ceabf0ebac0d858a6fbfcc | [
"BSD-3-Clause"
] | 6 | 2020-01-23T20:54:17.000Z | 2021-02-19T09:52:29.000Z | kratos_salome_plugin/gui/project_path_handler.py | armingeiser/KratosSalomePlugin | d402ca9edef8dff071ceabf0ebac0d858a6fbfcc | [
"BSD-3-Clause"
] | 20 | 2020-01-25T16:05:43.000Z | 2020-12-18T20:36:46.000Z | kratos_salome_plugin/gui/project_path_handler.py | armingeiser/KratosSalomePlugin | d402ca9edef8dff071ceabf0ebac0d858a6fbfcc | [
"BSD-3-Clause"
] | 3 | 2020-05-27T13:31:08.000Z | 2020-12-18T19:50:43.000Z | # _ __ _ ___ _ ___ _ _
# | |/ /_ _ __ _| |_ ___ __/ __| __ _| |___ _ __ ___| _ \ |_ _ __ _(_)_ _
# | ' <| '_/ _` | _/ _ (_-<__ \/ _` | / _ \ ' \/ -_) _/ | || / _` | | ' \
# |_|\_\_| \__,_|\__\___/__/___/\__,_|_\___/_|_|_\___|_| |_|\_,_\__, |_|_||_|
# |___/
# License: BSD License ; see LICENSE
#
# Main authors: Philipp Bucher (https://github.com/philbucher)
#
"""
The ProjectPathHandler is used for interacting with the user for
getting paths for opening and saving projects
"""
# python imports
from pathlib import Path
import logging
logger = logging.getLogger(__name__)
# qt imports
from PyQt5.QtWidgets import QFileDialog
# plugin imports
from kratos_salome_plugin.exceptions import UserInputError
class ProjectPathHandler:
"""TODO: using native dialogs or not?"""
def __init__(self):
# using home directory as start
self.last_path = Path.home()
def GetOpenPath(self, parent_window=None) -> Path:
"""Getting path for opening project
TODO: opening only folders with ".ksp" extension (like filtering for filenames)
"""
path = Path(QFileDialog.getExistingDirectory(
parent_window,
'Select a KSP project folder (*.ksp)',
str(self.last_path),
QFileDialog.ShowDirsOnly))
if path == Path("."):
# dialog was aborted
return Path(".")
if path.suffix != ".ksp":
raise UserInputError('Invalid project folder selected, must end with ".ksp"!')
self.last_path = path.parent
logger.debug('Opening project path: "%s"', path)
return path
def GetSavePath(self, parent_window=None) -> Path:
"""Getting path for saving project"""
path = Path(QFileDialog.getSaveFileName(parent_window, "Save KSP project", str(self.last_path))[0])
if path == Path("."):
# dialog was aborted
return Path(".")
path = path.with_suffix(".ksp")
self.last_path = path.parent
logger.debug('Saving project path: "%s"', path)
return path
# for testing / debugging
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
handler = ProjectPathHandler()
sp = handler.GetSavePath()
op = handler.GetOpenPath()
print(sp)
print(op)
sys.exit(app.exec_())
| 29.364706 | 107 | 0.584936 |
from pathlib import Path
import logging
logger = logging.getLogger(__name__)
from PyQt5.QtWidgets import QFileDialog
from kratos_salome_plugin.exceptions import UserInputError
class ProjectPathHandler:
def __init__(self):
self.last_path = Path.home()
def GetOpenPath(self, parent_window=None) -> Path:
path = Path(QFileDialog.getExistingDirectory(
parent_window,
'Select a KSP project folder (*.ksp)',
str(self.last_path),
QFileDialog.ShowDirsOnly))
if path == Path("."):
return Path(".")
if path.suffix != ".ksp":
raise UserInputError('Invalid project folder selected, must end with ".ksp"!')
self.last_path = path.parent
logger.debug('Opening project path: "%s"', path)
return path
def GetSavePath(self, parent_window=None) -> Path:
path = Path(QFileDialog.getSaveFileName(parent_window, "Save KSP project", str(self.last_path))[0])
if path == Path("."):
return Path(".")
path = path.with_suffix(".ksp")
self.last_path = path.parent
logger.debug('Saving project path: "%s"', path)
return path
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
handler = ProjectPathHandler()
sp = handler.GetSavePath()
op = handler.GetOpenPath()
print(sp)
print(op)
sys.exit(app.exec_())
| true | true |
1c2e107c782982208a14f5963f086828266d73c3 | 1,277 | py | Python | src/pre_process/ReadData.py | joaorura/k-NN_Iris_Classificator | e7d6eab400911587e4ced89fe4bb1b194e60527b | [
"MIT"
] | null | null | null | src/pre_process/ReadData.py | joaorura/k-NN_Iris_Classificator | e7d6eab400911587e4ced89fe4bb1b194e60527b | [
"MIT"
] | null | null | null | src/pre_process/ReadData.py | joaorura/k-NN_Iris_Classificator | e7d6eab400911587e4ced89fe4bb1b194e60527b | [
"MIT"
] | null | null | null | import csv
from copy import deepcopy
from utils.check_functions import check_type
class ReadData:
def _check_values(self):
check_type(self._path, str, "O campo path deve ser uma string que contem o caminho para o csv com os dados.")
def _execute(self):
with open(self._path) as file:
reader = csv.reader(file, delimiter=",")
for line in reader:
aux = list(line)
self._data["list"].append(aux)
self._data["identifiers"] = self._data["list"][0]
del self._data["list"][0]
for line in self._data["list"]:
line[0] = int(line[0])
for i in range(1, len(line) - 1):
line[i] = float(line[i])
if line[len(line) - 1] not in self._classifications:
self._classifications.append(line[len(line) - 1])
def __init__(self, path):
self._path = path
self._check_values()
self._classifications = []
self._data = {
"identifiers": None,
"list": []
}
self._execute()
def get_data(self):
return deepcopy(self._data)
def get_classifications(self):
return deepcopy(self._classifications)
| 27.76087 | 117 | 0.552858 | import csv
from copy import deepcopy
from utils.check_functions import check_type
class ReadData:
def _check_values(self):
check_type(self._path, str, "O campo path deve ser uma string que contem o caminho para o csv com os dados.")
def _execute(self):
with open(self._path) as file:
reader = csv.reader(file, delimiter=",")
for line in reader:
aux = list(line)
self._data["list"].append(aux)
self._data["identifiers"] = self._data["list"][0]
del self._data["list"][0]
for line in self._data["list"]:
line[0] = int(line[0])
for i in range(1, len(line) - 1):
line[i] = float(line[i])
if line[len(line) - 1] not in self._classifications:
self._classifications.append(line[len(line) - 1])
def __init__(self, path):
self._path = path
self._check_values()
self._classifications = []
self._data = {
"identifiers": None,
"list": []
}
self._execute()
def get_data(self):
return deepcopy(self._data)
def get_classifications(self):
return deepcopy(self._classifications)
| true | true |
1c2e107c86e64abef11fe9830e61fc754660ecb1 | 16,139 | py | Python | django_mfa/views.py | juwaini/django-mfa | 910b0f544ae5bebc02434cc6b176f74b5040f3b7 | [
"MIT"
] | null | null | null | django_mfa/views.py | juwaini/django-mfa | 910b0f544ae5bebc02434cc6b176f74b5040f3b7 | [
"MIT"
] | null | null | null | django_mfa/views.py | juwaini/django-mfa | 910b0f544ae5bebc02434cc6b176f74b5040f3b7 | [
"MIT"
] | null | null | null | import base64
import codecs
import random
import hashlib
import re
import string
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, resolve_url, get_object_or_404
from django_mfa.models import *
from . import totp
from django.views.generic import FormView, ListView, TemplateView
from django.contrib.auth import load_backend
from django.contrib import auth, messages
from django.urls import reverse, reverse_lazy
from django.utils.http import is_safe_url
from django.utils.translation import ugettext as _
from u2flib_server import u2f
from .forms import *
class OriginMixin(object):
def get_origin(self):
return '{scheme}://{host}'.format(
scheme=self.request.scheme,
host=self.request.get_host(),
)
@login_required
def security_settings(request):
twofactor_enabled = is_mfa_enabled(request.user)
u2f_enabled = is_u2f_enabled(request.user)
backup_codes = UserRecoveryCodes.objects.filter(
user=UserOTP.objects.filter(user=request.user).first()).all()
return render(request, 'django_mfa/security.html', {"prev_url": settings.LOGIN_REDIRECT_URL, "backup_codes": backup_codes, "u2f_enabled": u2f_enabled, "twofactor_enabled": twofactor_enabled})
@login_required
def configure_mfa(request):
qr_code = None
base_32_secret_utf8 = None
if request.method == "POST":
base_32_secret = base64.b32encode(
codecs.decode(codecs.encode(
'{0:020x}'.format(random.getrandbits(80))
), 'hex_codec')
)
base_32_secret_utf8 = base_32_secret.decode("utf-8")
totp_obj = totp.TOTP(base_32_secret_utf8)
try:
issuer_name = settings.MFA_ISSUER_NAME
except:
issuer_name = None
qr_code = re.sub(
r'=+$', '', totp_obj.provisioning_uri(request.user.username, issuer_name=issuer_name))
return render(request, 'django_mfa/configure.html', {"qr_code": qr_code, "secret_key": base_32_secret_utf8})
@login_required
def enable_mfa(request):
user = request.user
if is_mfa_enabled(user):
return HttpResponseRedirect(reverse("mfa:disable_mfa"))
qr_code = None
base_32_secret = None
is_verified = False
if request.method == "POST":
base_32_secret = request.POST['secret_key']
totp_obj = totp.TOTP(request.POST['secret_key'])
is_verified = totp_obj.verify(request.POST["verification_code"])
if is_verified:
request.session['verfied_otp'] = True
UserOTP.objects.get_or_create(otp_type=request.POST["otp_type"],
user=request.user,
secret_key=request.POST['secret_key'])
messages.success(
request, "You have successfully enabled multi-factor authentication on your account.")
response = redirect(reverse("mfa:recovery_codes"))
return response
else:
totp_obj = totp.TOTP(base_32_secret)
qr_code = totp_obj.provisioning_uri(request.user.email)
return render(request, 'django_mfa/configure.html', {"is_verified": is_verified, "qr_code": qr_code, "secret_key": base_32_secret})
def _generate_cookie_salt(user):
try:
otp_ = UserOTP.objects.get(user=user)
except UserOTP.DoesNotExist:
return None
# out of paranoia only use half the secret to generate the salt
uselen = int(len(otp_.secret_key) / 2)
half_secret = otp_.secret_key[:uselen]
m = hashlib.sha256()
m.update(half_secret.encode("utf-8"))
cookie_salt = m.hexdigest()
return cookie_salt
MFA_COOKIE_PREFIX = "RMB_"
# update Remember-My-Browser cookie
def update_rmb_cookie(request, response):
try:
remember_my_browser = settings.MFA_REMEMBER_MY_BROWSER
remember_days = settings.MFA_REMEMBER_DAYS
except:
remember_my_browser = False
if remember_my_browser:
# better not to reveal the username. Revealing the number seems harmless
cookie_name = MFA_COOKIE_PREFIX + str(request.user.pk)
cookie_salt = _generate_cookie_salt(request.user)
response.set_signed_cookie(cookie_name, True, salt=cookie_salt, max_age=remember_days * 24 * 3600,
secure=(not settings.DEBUG), httponly=True)
return response
# verify Remember-My-Browser cookie
# returns True if browser is trusted and no code verification needed
def verify_rmb_cookie(request):
try:
remember_my_browser = settings.MFA_REMEMBER_MY_BROWSER
max_cookie_age = settings.MFA_REMEMBER_DAYS * 24 * 3600
except:
return False
if not remember_my_browser:
return False
else:
cookie_name = MFA_COOKIE_PREFIX + str(request.user.pk)
cookie_salt = _generate_cookie_salt(request.user)
cookie_value = request.get_signed_cookie(
cookie_name, False, max_age=max_cookie_age, salt=cookie_salt)
# if the cookie value is True and the signature is good than the browser can be trusted
return cookie_value
def delete_rmb_cookie(request, response):
cookie_name = MFA_COOKIE_PREFIX + str(request.user.pk)
response.delete_cookie(cookie_name)
return response
@login_required
def disable_mfa(request):
user = request.user
if not is_mfa_enabled(user):
return HttpResponseRedirect(reverse("mfa:configure_mfa"))
if request.method == "POST":
user_mfa = user.userotp
user_mfa.delete()
messages.success(
request, "You have successfully disabled multi-factor authentication on your account.")
response = redirect(reverse('mfa:configure_mfa'))
return delete_rmb_cookie(request, response)
return render(request, 'django_mfa/disable_mfa.html')
@login_required
def verify_second_factor_totp(request):
"""
Verify a OTP request
"""
ctx = {}
if request.method == 'GET':
ctx['next'] = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
return render(request, 'django_mfa/verify_second_factor_mfa.html', ctx)
if request.method == "POST":
verification_code = request.POST.get('verification_code')
ctx['next'] = request.POST.get("next", settings.LOGIN_REDIRECT_URL)
if verification_code is None:
ctx['error_message'] = "Missing verification code."
else:
user_recovery_codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=request.user.id))
if verification_code in user_recovery_codes:
UserRecoveryCodes.objects.filter(user=UserOTP.objects.get(
user=request.user.id), secret_code=verification_code).delete()
is_verified = True
else:
otp_ = UserOTP.objects.get(user=request.user)
totp_ = totp.TOTP(otp_.secret_key)
is_verified = totp_.verify(verification_code)
if is_verified:
request.session['verfied_otp'] = True
request.session['verfied_u2f'] = True
response = redirect(request.POST.get(
"next", settings.LOGIN_REDIRECT_URL))
return update_rmb_cookie(request, response)
ctx['error_message'] = "Your code is expired or invalid."
else:
ctx['next'] = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
return render(request, 'django_mfa/verify_second_factor_mfa.html', ctx, status=400)
def generate_user_recovery_codes(user_id):
no_of_recovery_codes = 10
size_of_recovery_code = 16
recovery_codes_list = []
chars = string.ascii_uppercase + string.digits + string.ascii_lowercase
while(no_of_recovery_codes > 0):
code = ''.join(random.choice(chars)
for _ in range(size_of_recovery_code))
Total_recovery_codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=user_id))
if code not in Total_recovery_codes:
no_of_recovery_codes = no_of_recovery_codes - 1
UserRecoveryCodes.objects.create(
user=UserOTP.objects.get(user=user_id), secret_code=code)
recovery_codes_list.append(code)
return recovery_codes_list
@login_required
def recovery_codes(request):
if request.method == "GET":
if is_mfa_enabled(request.user):
if UserRecoveryCodes.objects.filter(user=UserOTP.objects.get(user=request.user.id)).exists():
codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=request.user.id))
else:
codes = generate_user_recovery_codes(request.user.id)
next_url = settings.LOGIN_REDIRECT_URL
return render(request, "django_mfa/recovery_codes.html", {"codes": codes, "next_url": next_url})
else:
return HttpResponse("please enable twofactor_authentication!")
@login_required
def verify_second_factor(request):
if request.method == "GET":
twofactor_enabled = is_mfa_enabled(request.user)
u2f_enabled = is_u2f_enabled(request.user)
if twofactor_enabled or u2f_enabled:
return render(request, 'django_mfa/verify_second_factor.html', {"u2f_enabled": u2f_enabled, "twofactor_enabled": twofactor_enabled})
@login_required
def recovery_codes_download(request):
codes_list = []
codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=request.user.id))
for i in codes:
codes_list.append(i)
codes_list.append("\n")
response = HttpResponse(
codes_list, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % 'recovery_codes.txt'
return response
class AddKeyView(OriginMixin, FormView):
template_name = 'u2f/add_key.html'
form_class = KeyRegistrationForm
success_url = reverse_lazy('mfa:u2f_keys')
def dispatch(self, request, *args, **kwargs):
return super(AddKeyView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(AddKeyView, self).get_form_kwargs()
kwargs.update(
user=self.request.user,
request=self.request,
appId=self.get_origin(),
)
return kwargs
def get_context_data(self, **kwargs):
kwargs = super(AddKeyView, self).get_context_data(**kwargs)
request = u2f.begin_registration(self.get_origin(), [
key.to_json() for key in self.request.user.u2f_keys.all()
])
self.request.session['u2f_registration_request'] = request
kwargs['registration_request'] = request
return kwargs
def form_valid(self, form):
request = self.request.session['u2f_registration_request']
response = form.cleaned_data['response']
del self.request.session['u2f_registration_request']
device, attestation_cert = u2f.complete_registration(request, response)
self.request.user.u2f_keys.create(
public_key=device['publicKey'],
key_handle=device['keyHandle'],
app_id=device['appId'],
)
self.request.session['verfied_u2f'] = True
messages.success(self.request, _("Key added."))
return super(AddKeyView, self).form_valid(form)
def get_success_url(self):
if 'next' in self.request.GET and is_safe_url(self.request.GET['next']):
return self.request.GET['next']
else:
return super(AddKeyView, self).get_success_url()
class VerifySecondFactorView(OriginMixin, TemplateView):
template_name = 'u2f/verify_second_factor_u2f.html'
@property
def form_classes(self):
ret = {}
if self.user.u2f_keys.exists():
ret['u2f'] = KeyResponseForm
return ret
def get_user(self):
try:
user_id = self.request.session['u2f_pre_verify_user_pk']
backend_path = self.request.session['u2f_pre_verify_user_backend']
self.request.session['verfied_u2f'] = False
assert backend_path in settings.AUTHENTICATION_BACKENDS
backend = load_backend(backend_path)
user = backend.get_user(user_id)
if user is not None:
user.backend = backend_path
return user
except (KeyError, AssertionError):
return None
def dispatch(self, request, *args, **kwargs):
self.user = self.get_user()
if self.user is None:
return HttpResponseRedirect(settings.LOGIN_URL)
return super(VerifySecondFactorView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
forms = self.get_forms()
form = forms[request.POST['type']]
if form.is_valid():
return self.form_valid(form, forms)
else:
return self.form_invalid(forms)
def form_invalid(self, forms):
return self.render_to_response(self.get_context_data(
forms=forms,
))
def get_form_kwargs(self):
return {
'user': self.user,
'request': self.request,
'appId': self.get_origin(),
}
def get_forms(self):
kwargs = self.get_form_kwargs()
if self.request.method == 'GET':
forms = {key: form(**kwargs)
for key, form in self.form_classes.items()}
else:
method = self.request.POST['type']
forms = {
key: form(**kwargs)
for key, form in self.form_classes.items()
if key != method
}
forms[method] = self.form_classes[method](
self.request.POST, **kwargs)
return forms
def get_context_data(self, **kwargs):
if 'forms' not in kwargs:
kwargs['forms'] = self.get_forms()
kwargs = super(VerifySecondFactorView, self).get_context_data(**kwargs)
if self.request.GET.get('admin'):
kwargs['base_template'] = 'admin/base_site.html'
else:
kwargs['base_template'] = 'u2f_base.html'
kwargs['user'] = self.user
return kwargs
def form_valid(self, form, forms):
if not form.validate_second_factor():
return self.form_invalid(forms)
del self.request.session['u2f_pre_verify_user_pk']
del self.request.session['u2f_pre_verify_user_backend']
self.request.session['verfied_otp'] = True
self.request.session['verfied_u2f'] = True
auth.login(self.request, self.user)
redirect_to = self.request.POST.get(auth.REDIRECT_FIELD_NAME,
self.request.GET.get(auth.REDIRECT_FIELD_NAME, ''))
if not is_safe_url(url=redirect_to, allowed_hosts=self.request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
class KeyManagementView(ListView):
template_name = 'u2f/key_list.html'
def get_queryset(self):
return self.request.user.u2f_keys.all()
def post(self, request):
assert 'delete' in self.request.POST
key = get_object_or_404(self.get_queryset(),
pk=self.request.POST['key_id'])
key.delete()
if self.get_queryset().exists():
messages.success(request, _("Key removed."))
else:
messages.success(request, _(
"Key removed. Two-factor auth disabled."))
return HttpResponseRedirect(reverse('mfa:u2f_keys'))
add_key = login_required(AddKeyView.as_view())
verify_second_factor_u2f = VerifySecondFactorView.as_view()
keys = login_required(KeyManagementView.as_view())
| 37.707944 | 195 | 0.656546 | import base64
import codecs
import random
import hashlib
import re
import string
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, resolve_url, get_object_or_404
from django_mfa.models import *
from . import totp
from django.views.generic import FormView, ListView, TemplateView
from django.contrib.auth import load_backend
from django.contrib import auth, messages
from django.urls import reverse, reverse_lazy
from django.utils.http import is_safe_url
from django.utils.translation import ugettext as _
from u2flib_server import u2f
from .forms import *
class OriginMixin(object):
def get_origin(self):
return '{scheme}://{host}'.format(
scheme=self.request.scheme,
host=self.request.get_host(),
)
@login_required
def security_settings(request):
twofactor_enabled = is_mfa_enabled(request.user)
u2f_enabled = is_u2f_enabled(request.user)
backup_codes = UserRecoveryCodes.objects.filter(
user=UserOTP.objects.filter(user=request.user).first()).all()
return render(request, 'django_mfa/security.html', {"prev_url": settings.LOGIN_REDIRECT_URL, "backup_codes": backup_codes, "u2f_enabled": u2f_enabled, "twofactor_enabled": twofactor_enabled})
@login_required
def configure_mfa(request):
qr_code = None
base_32_secret_utf8 = None
if request.method == "POST":
base_32_secret = base64.b32encode(
codecs.decode(codecs.encode(
'{0:020x}'.format(random.getrandbits(80))
), 'hex_codec')
)
base_32_secret_utf8 = base_32_secret.decode("utf-8")
totp_obj = totp.TOTP(base_32_secret_utf8)
try:
issuer_name = settings.MFA_ISSUER_NAME
except:
issuer_name = None
qr_code = re.sub(
r'=+$', '', totp_obj.provisioning_uri(request.user.username, issuer_name=issuer_name))
return render(request, 'django_mfa/configure.html', {"qr_code": qr_code, "secret_key": base_32_secret_utf8})
@login_required
def enable_mfa(request):
user = request.user
if is_mfa_enabled(user):
return HttpResponseRedirect(reverse("mfa:disable_mfa"))
qr_code = None
base_32_secret = None
is_verified = False
if request.method == "POST":
base_32_secret = request.POST['secret_key']
totp_obj = totp.TOTP(request.POST['secret_key'])
is_verified = totp_obj.verify(request.POST["verification_code"])
if is_verified:
request.session['verfied_otp'] = True
UserOTP.objects.get_or_create(otp_type=request.POST["otp_type"],
user=request.user,
secret_key=request.POST['secret_key'])
messages.success(
request, "You have successfully enabled multi-factor authentication on your account.")
response = redirect(reverse("mfa:recovery_codes"))
return response
else:
totp_obj = totp.TOTP(base_32_secret)
qr_code = totp_obj.provisioning_uri(request.user.email)
return render(request, 'django_mfa/configure.html', {"is_verified": is_verified, "qr_code": qr_code, "secret_key": base_32_secret})
def _generate_cookie_salt(user):
try:
otp_ = UserOTP.objects.get(user=user)
except UserOTP.DoesNotExist:
return None
uselen = int(len(otp_.secret_key) / 2)
half_secret = otp_.secret_key[:uselen]
m = hashlib.sha256()
m.update(half_secret.encode("utf-8"))
cookie_salt = m.hexdigest()
return cookie_salt
MFA_COOKIE_PREFIX = "RMB_"
def update_rmb_cookie(request, response):
try:
remember_my_browser = settings.MFA_REMEMBER_MY_BROWSER
remember_days = settings.MFA_REMEMBER_DAYS
except:
remember_my_browser = False
if remember_my_browser:
cookie_name = MFA_COOKIE_PREFIX + str(request.user.pk)
cookie_salt = _generate_cookie_salt(request.user)
response.set_signed_cookie(cookie_name, True, salt=cookie_salt, max_age=remember_days * 24 * 3600,
secure=(not settings.DEBUG), httponly=True)
return response
def verify_rmb_cookie(request):
try:
remember_my_browser = settings.MFA_REMEMBER_MY_BROWSER
max_cookie_age = settings.MFA_REMEMBER_DAYS * 24 * 3600
except:
return False
if not remember_my_browser:
return False
else:
cookie_name = MFA_COOKIE_PREFIX + str(request.user.pk)
cookie_salt = _generate_cookie_salt(request.user)
cookie_value = request.get_signed_cookie(
cookie_name, False, max_age=max_cookie_age, salt=cookie_salt)
return cookie_value
def delete_rmb_cookie(request, response):
cookie_name = MFA_COOKIE_PREFIX + str(request.user.pk)
response.delete_cookie(cookie_name)
return response
@login_required
def disable_mfa(request):
user = request.user
if not is_mfa_enabled(user):
return HttpResponseRedirect(reverse("mfa:configure_mfa"))
if request.method == "POST":
user_mfa = user.userotp
user_mfa.delete()
messages.success(
request, "You have successfully disabled multi-factor authentication on your account.")
response = redirect(reverse('mfa:configure_mfa'))
return delete_rmb_cookie(request, response)
return render(request, 'django_mfa/disable_mfa.html')
@login_required
def verify_second_factor_totp(request):
ctx = {}
if request.method == 'GET':
ctx['next'] = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
return render(request, 'django_mfa/verify_second_factor_mfa.html', ctx)
if request.method == "POST":
verification_code = request.POST.get('verification_code')
ctx['next'] = request.POST.get("next", settings.LOGIN_REDIRECT_URL)
if verification_code is None:
ctx['error_message'] = "Missing verification code."
else:
user_recovery_codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=request.user.id))
if verification_code in user_recovery_codes:
UserRecoveryCodes.objects.filter(user=UserOTP.objects.get(
user=request.user.id), secret_code=verification_code).delete()
is_verified = True
else:
otp_ = UserOTP.objects.get(user=request.user)
totp_ = totp.TOTP(otp_.secret_key)
is_verified = totp_.verify(verification_code)
if is_verified:
request.session['verfied_otp'] = True
request.session['verfied_u2f'] = True
response = redirect(request.POST.get(
"next", settings.LOGIN_REDIRECT_URL))
return update_rmb_cookie(request, response)
ctx['error_message'] = "Your code is expired or invalid."
else:
ctx['next'] = request.GET.get('next', settings.LOGIN_REDIRECT_URL)
return render(request, 'django_mfa/verify_second_factor_mfa.html', ctx, status=400)
def generate_user_recovery_codes(user_id):
no_of_recovery_codes = 10
size_of_recovery_code = 16
recovery_codes_list = []
chars = string.ascii_uppercase + string.digits + string.ascii_lowercase
while(no_of_recovery_codes > 0):
code = ''.join(random.choice(chars)
for _ in range(size_of_recovery_code))
Total_recovery_codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=user_id))
if code not in Total_recovery_codes:
no_of_recovery_codes = no_of_recovery_codes - 1
UserRecoveryCodes.objects.create(
user=UserOTP.objects.get(user=user_id), secret_code=code)
recovery_codes_list.append(code)
return recovery_codes_list
@login_required
def recovery_codes(request):
if request.method == "GET":
if is_mfa_enabled(request.user):
if UserRecoveryCodes.objects.filter(user=UserOTP.objects.get(user=request.user.id)).exists():
codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=request.user.id))
else:
codes = generate_user_recovery_codes(request.user.id)
next_url = settings.LOGIN_REDIRECT_URL
return render(request, "django_mfa/recovery_codes.html", {"codes": codes, "next_url": next_url})
else:
return HttpResponse("please enable twofactor_authentication!")
@login_required
def verify_second_factor(request):
if request.method == "GET":
twofactor_enabled = is_mfa_enabled(request.user)
u2f_enabled = is_u2f_enabled(request.user)
if twofactor_enabled or u2f_enabled:
return render(request, 'django_mfa/verify_second_factor.html', {"u2f_enabled": u2f_enabled, "twofactor_enabled": twofactor_enabled})
@login_required
def recovery_codes_download(request):
codes_list = []
codes = UserRecoveryCodes.objects.values_list('secret_code', flat=True).filter(
user=UserOTP.objects.get(user=request.user.id))
for i in codes:
codes_list.append(i)
codes_list.append("\n")
response = HttpResponse(
codes_list, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % 'recovery_codes.txt'
return response
class AddKeyView(OriginMixin, FormView):
template_name = 'u2f/add_key.html'
form_class = KeyRegistrationForm
success_url = reverse_lazy('mfa:u2f_keys')
def dispatch(self, request, *args, **kwargs):
return super(AddKeyView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(AddKeyView, self).get_form_kwargs()
kwargs.update(
user=self.request.user,
request=self.request,
appId=self.get_origin(),
)
return kwargs
def get_context_data(self, **kwargs):
kwargs = super(AddKeyView, self).get_context_data(**kwargs)
request = u2f.begin_registration(self.get_origin(), [
key.to_json() for key in self.request.user.u2f_keys.all()
])
self.request.session['u2f_registration_request'] = request
kwargs['registration_request'] = request
return kwargs
def form_valid(self, form):
request = self.request.session['u2f_registration_request']
response = form.cleaned_data['response']
del self.request.session['u2f_registration_request']
device, attestation_cert = u2f.complete_registration(request, response)
self.request.user.u2f_keys.create(
public_key=device['publicKey'],
key_handle=device['keyHandle'],
app_id=device['appId'],
)
self.request.session['verfied_u2f'] = True
messages.success(self.request, _("Key added."))
return super(AddKeyView, self).form_valid(form)
def get_success_url(self):
if 'next' in self.request.GET and is_safe_url(self.request.GET['next']):
return self.request.GET['next']
else:
return super(AddKeyView, self).get_success_url()
class VerifySecondFactorView(OriginMixin, TemplateView):
template_name = 'u2f/verify_second_factor_u2f.html'
@property
def form_classes(self):
ret = {}
if self.user.u2f_keys.exists():
ret['u2f'] = KeyResponseForm
return ret
def get_user(self):
try:
user_id = self.request.session['u2f_pre_verify_user_pk']
backend_path = self.request.session['u2f_pre_verify_user_backend']
self.request.session['verfied_u2f'] = False
assert backend_path in settings.AUTHENTICATION_BACKENDS
backend = load_backend(backend_path)
user = backend.get_user(user_id)
if user is not None:
user.backend = backend_path
return user
except (KeyError, AssertionError):
return None
def dispatch(self, request, *args, **kwargs):
self.user = self.get_user()
if self.user is None:
return HttpResponseRedirect(settings.LOGIN_URL)
return super(VerifySecondFactorView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
forms = self.get_forms()
form = forms[request.POST['type']]
if form.is_valid():
return self.form_valid(form, forms)
else:
return self.form_invalid(forms)
def form_invalid(self, forms):
return self.render_to_response(self.get_context_data(
forms=forms,
))
def get_form_kwargs(self):
return {
'user': self.user,
'request': self.request,
'appId': self.get_origin(),
}
def get_forms(self):
kwargs = self.get_form_kwargs()
if self.request.method == 'GET':
forms = {key: form(**kwargs)
for key, form in self.form_classes.items()}
else:
method = self.request.POST['type']
forms = {
key: form(**kwargs)
for key, form in self.form_classes.items()
if key != method
}
forms[method] = self.form_classes[method](
self.request.POST, **kwargs)
return forms
def get_context_data(self, **kwargs):
if 'forms' not in kwargs:
kwargs['forms'] = self.get_forms()
kwargs = super(VerifySecondFactorView, self).get_context_data(**kwargs)
if self.request.GET.get('admin'):
kwargs['base_template'] = 'admin/base_site.html'
else:
kwargs['base_template'] = 'u2f_base.html'
kwargs['user'] = self.user
return kwargs
def form_valid(self, form, forms):
if not form.validate_second_factor():
return self.form_invalid(forms)
del self.request.session['u2f_pre_verify_user_pk']
del self.request.session['u2f_pre_verify_user_backend']
self.request.session['verfied_otp'] = True
self.request.session['verfied_u2f'] = True
auth.login(self.request, self.user)
redirect_to = self.request.POST.get(auth.REDIRECT_FIELD_NAME,
self.request.GET.get(auth.REDIRECT_FIELD_NAME, ''))
if not is_safe_url(url=redirect_to, allowed_hosts=self.request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
class KeyManagementView(ListView):
template_name = 'u2f/key_list.html'
def get_queryset(self):
return self.request.user.u2f_keys.all()
def post(self, request):
assert 'delete' in self.request.POST
key = get_object_or_404(self.get_queryset(),
pk=self.request.POST['key_id'])
key.delete()
if self.get_queryset().exists():
messages.success(request, _("Key removed."))
else:
messages.success(request, _(
"Key removed. Two-factor auth disabled."))
return HttpResponseRedirect(reverse('mfa:u2f_keys'))
add_key = login_required(AddKeyView.as_view())
verify_second_factor_u2f = VerifySecondFactorView.as_view()
keys = login_required(KeyManagementView.as_view())
| true | true |
1c2e117b00b2db71fc25635adafe1b717fcbdc66 | 664 | py | Python | mod/data_process/__init__.py | Ulti-Dreisteine/data-information-measurement | 9ef777c28534867d07d9ab1a1b95d69a385043f1 | [
"MIT"
] | 1 | 2021-12-17T13:51:11.000Z | 2021-12-17T13:51:11.000Z | mod/data_process/__init__.py | Ulti-Dreisteine/data-information-measurement | 9ef777c28534867d07d9ab1a1b95d69a385043f1 | [
"MIT"
] | null | null | null | mod/data_process/__init__.py | Ulti-Dreisteine/data-information-measurement | 9ef777c28534867d07d9ab1a1b95d69a385043f1 | [
"MIT"
] | 1 | 2021-12-12T12:38:36.000Z | 2021-12-12T12:38:36.000Z | # -*- coding: utf-8 -*-
"""
Created on 2021/02/27 17:03:09
@File -> __init__.py
@Author: luolei
@Email: dreisteine262@163.com
@Describe: 初始化
"""
__all__ = ['search_nearest_neighbors_in_list']
import bisect
def search_nearest_neighbors_in_list(lst, x):
"""
寻找x在有序lst中的两侧(或单侧)邻点值.
:param x: float
:param lst: list, 必须有序排列
:return: neighbors, tuple (left_neighbor, right_neighbor)
"""
if x in lst:
return [x]
else:
if x <= lst[0]:
neighbors = [lst[0]]
elif x >= lst[-1]:
neighbors = [lst[-1]]
else:
left_idx = bisect.bisect_left(lst, x) - 1
right_idx = left_idx + 1
neighbors = [lst[left_idx], lst[right_idx]]
return neighbors | 17.945946 | 58 | 0.661145 |
__all__ = ['search_nearest_neighbors_in_list']
import bisect
def search_nearest_neighbors_in_list(lst, x):
if x in lst:
return [x]
else:
if x <= lst[0]:
neighbors = [lst[0]]
elif x >= lst[-1]:
neighbors = [lst[-1]]
else:
left_idx = bisect.bisect_left(lst, x) - 1
right_idx = left_idx + 1
neighbors = [lst[left_idx], lst[right_idx]]
return neighbors | true | true |
1c2e1363d0cbbeedc29abaa18b06b0b3d68bc7b4 | 2,231 | py | Python | carla_utils/agents/vehicle_model.py | IamWangYunKai/DG-TrajGen | 0a8aab7e1c05111a5afe43d53801c55942e9ff56 | [
"MIT"
] | 31 | 2021-09-15T00:43:43.000Z | 2022-03-27T22:57:21.000Z | carla_utils/agents/vehicle_model.py | zhangdongkun98/carla-utils | a370db53589841c8cffe95c8df43dfc036176431 | [
"MIT"
] | 1 | 2021-12-09T03:08:13.000Z | 2021-12-15T07:08:31.000Z | carla_utils/agents/vehicle_model.py | zhangdongkun98/carla-utils | a370db53589841c8cffe95c8df43dfc036176431 | [
"MIT"
] | 2 | 2021-11-26T05:45:18.000Z | 2022-01-19T12:46:41.000Z |
import numpy as np
import torch
import torch.nn as nn
from ..basic import pi2pi_numpy, pi2pi_tensor
from ..augment import State
class RealModel(object):
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def forward(self, vehicle, control):
vehicle.apply_control(control)
class BicycleModel2D(RealModel):
def __init__(self, dt, wheelbase):
self.dt, self.wheelbase = dt, wheelbase
def forward(self, state: State, action):
a, steer = action[0], action[1]
x, y, theta, v = state.x, state.y, state.theta, state.v
next_state = State(
x=x + self.dt *v * np.cos(theta),
y=y + self.dt *v * np.sin(theta),
theta=pi2pi_numpy(theta + self.dt * v * np.tan(steer) / self.wheelbase),
v=v + self.dt *a,
)
return next_state
class BicycleModel2DParallel(nn.Module):
def __init__(self, dt, wheelbase):
super(BicycleModel2DParallel, self).__init__()
self.dt, self.wheelbase = dt, wheelbase
def forward(self, state: torch.Tensor, action: torch.Tensor):
"""
Args:
state: (x, y, theta, v), torch.Size([batch_size, dim_state]
action: (a, steer), torch.Size([batch_size, dim_action])
"""
a, steer = action[:,0], action[:,1]
x, y, theta, v = state[:,0], state[:,1], state[:,2], state[:,3]
next_state = torch.stack([
x + self.dt *v * torch.cos(theta),
y + self.dt *v * torch.sin(theta),
pi2pi_tensor(theta + self.dt * v * torch.tan(steer) / self.wheelbase),
v + self.dt *a,
], dim=1)
return next_state
class SteerModel(RealModel):
def __init__(self, dt, alpha=0.0):
self.dt = dt
self.xk, self.y = 0.0, 0.0
self.alpha = alpha
def forward(self, u):
"""
u: normalized control
"""
self.y = self.xk
# alpha = np.clip(self.alpha + np.clip(np.random.normal(scale=0.2), -0.2, 0.2), 0, 1)
alpha = self.alpha
self.xk = alpha * self.xk + (1-alpha) * u
return self.y
return self.xk
| 28.602564 | 93 | 0.549978 |
import numpy as np
import torch
import torch.nn as nn
from ..basic import pi2pi_numpy, pi2pi_tensor
from ..augment import State
class RealModel(object):
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def forward(self, vehicle, control):
vehicle.apply_control(control)
class BicycleModel2D(RealModel):
def __init__(self, dt, wheelbase):
self.dt, self.wheelbase = dt, wheelbase
def forward(self, state: State, action):
a, steer = action[0], action[1]
x, y, theta, v = state.x, state.y, state.theta, state.v
next_state = State(
x=x + self.dt *v * np.cos(theta),
y=y + self.dt *v * np.sin(theta),
theta=pi2pi_numpy(theta + self.dt * v * np.tan(steer) / self.wheelbase),
v=v + self.dt *a,
)
return next_state
class BicycleModel2DParallel(nn.Module):
def __init__(self, dt, wheelbase):
super(BicycleModel2DParallel, self).__init__()
self.dt, self.wheelbase = dt, wheelbase
def forward(self, state: torch.Tensor, action: torch.Tensor):
a, steer = action[:,0], action[:,1]
x, y, theta, v = state[:,0], state[:,1], state[:,2], state[:,3]
next_state = torch.stack([
x + self.dt *v * torch.cos(theta),
y + self.dt *v * torch.sin(theta),
pi2pi_tensor(theta + self.dt * v * torch.tan(steer) / self.wheelbase),
v + self.dt *a,
], dim=1)
return next_state
class SteerModel(RealModel):
def __init__(self, dt, alpha=0.0):
self.dt = dt
self.xk, self.y = 0.0, 0.0
self.alpha = alpha
def forward(self, u):
self.y = self.xk
alpha = self.alpha
self.xk = alpha * self.xk + (1-alpha) * u
return self.y
return self.xk
| true | true |
1c2e139b9074398a0d52513986701c7d155a28ec | 10 | py | Python | test/orm/__init__.py | vollov/py-lab | 0a1a3c93c5decaa5246fab981bcc2563cc42c6d0 | [
"MIT"
] | null | null | null | test/orm/__init__.py | vollov/py-lab | 0a1a3c93c5decaa5246fab981bcc2563cc42c6d0 | [
"MIT"
] | null | null | null | test/orm/__init__.py | vollov/py-lab | 0a1a3c93c5decaa5246fab981bcc2563cc42c6d0 | [
"MIT"
] | null | null | null | import orm | 10 | 10 | 0.9 | import orm | true | true |
1c2e14776c2596c0ba1de38b0ccc53066ea2ca9a | 431 | py | Python | python/testData/hierarchy/call/Static/Constructor/main.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/hierarchy/call/Static/Constructor/main.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/hierarchy/call/Static/Constructor/main.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | class A():
def __init__(self):
invoke1(self)
invoke2(self)
def method1(self):
pass
def method2(self):
pass
def invoke1(p):
p.method1()
def invoke2(p):
p.method2()
def invokeA():
a = A()
a.method1()
a.method2()
def new_class_func():
class C():
def bar(self):
invokeA(A())
return C()
a = A()
A.__init_<caret>_(a) | 13.46875 | 28 | 0.487239 | class A():
def __init__(self):
invoke1(self)
invoke2(self)
def method1(self):
pass
def method2(self):
pass
def invoke1(p):
p.method1()
def invoke2(p):
p.method2()
def invokeA():
a = A()
a.method1()
a.method2()
def new_class_func():
class C():
def bar(self):
invokeA(A())
return C()
a = A()
A.__init_<caret>_(a) | true | true |
1c2e157292b3ec5293e01f313fef45c0f7400d12 | 5,989 | py | Python | tensorflow/python/distribute/cross_device_utils_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/distribute/cross_device_utils_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/distribute/cross_device_utils_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cross_device_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
def _assert_values_equal(self, left, right):
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
@test_util.run_in_graph_and_eager_modes
def testAggregateTensors(self):
t0 = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
self._assert_values_equal(total, result)
@test_util.run_in_graph_and_eager_modes
def testAggregateIndexedSlices(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(total, result)
@test_util.run_in_graph_and_eager_modes
def testDivideTensor(self):
t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
n = 2
expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
self._assert_values_equal(expected, result)
@test_util.run_in_graph_and_eager_modes
def testDivideIndexedSlices(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
n = 2
expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(expected, result)
@test_util.run_in_graph_and_eager_modes
def testIsIndexedSlices(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
self.assertTrue(cross_device_utils.contains_indexed_slices(t))
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_List(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
self.assertTrue(cross_device_utils.contains_indexed_slices([t0, t1]))
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_Tuple(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_PerReplica(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
device_map = value_lib.ReplicaDeviceMap(("/gpu:0", "/cpu:0"))
per_replica = value_lib.PerReplica(device_map, (t0, t1))
self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testCopyTensor(self):
with ops.device("/cpu:0"):
t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
destination = "/gpu:0"
result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
t, destination)
self._assert_values_equal(t, result)
self.assertEqual(device_util.resolve(destination),
device_util.resolve(result.device))
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testCopyIndexedSlices(self):
with ops.device("/cpu:0"):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
destination = "/gpu:0"
result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
t, destination)
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(t, result)
self.assertEqual(device_util.resolve(destination),
device_util.resolve(result.device))
if __name__ == "__main__":
test.main()
| 41.881119 | 81 | 0.68008 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase):
def _assert_values_equal(self, left, right):
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
@test_util.run_in_graph_and_eager_modes
def testAggregateTensors(self):
t0 = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
self._assert_values_equal(total, result)
@test_util.run_in_graph_and_eager_modes
def testAggregateIndexedSlices(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1])
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(total, result)
@test_util.run_in_graph_and_eager_modes
def testDivideTensor(self):
t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
n = 2
expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
self._assert_values_equal(expected, result)
@test_util.run_in_graph_and_eager_modes
def testDivideIndexedSlices(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
n = 2
expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]])
result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n)
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(expected, result)
@test_util.run_in_graph_and_eager_modes
def testIsIndexedSlices(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
self.assertTrue(cross_device_utils.contains_indexed_slices(t))
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_List(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
self.assertTrue(cross_device_utils.contains_indexed_slices([t0, t1]))
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_Tuple(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1)))
@test_util.run_in_graph_and_eager_modes
def testContainsIndexedSlices_PerReplica(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
device_map = value_lib.ReplicaDeviceMap(("/gpu:0", "/cpu:0"))
per_replica = value_lib.PerReplica(device_map, (t0, t1))
self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica))
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testCopyTensor(self):
with ops.device("/cpu:0"):
t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]])
destination = "/gpu:0"
result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
t, destination)
self._assert_values_equal(t, result)
self.assertEqual(device_util.resolve(destination),
device_util.resolve(result.device))
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testCopyIndexedSlices(self):
with ops.device("/cpu:0"):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
destination = "/gpu:0"
result = cross_device_utils.copy_tensor_or_indexed_slices_to_device(
t, destination)
self.assertIsInstance(result, ops.IndexedSlices)
self._assert_values_equal(t, result)
self.assertEqual(device_util.resolve(destination),
device_util.resolve(result.device))
if __name__ == "__main__":
test.main()
| true | true |
1c2e15f2b3b1565a30f03c85e3501e602cd8a15e | 113 | py | Python | core/__init__.py | luckylwk/neural-network-theano | 420c89e7028fcd9671866918c22a837d04387012 | [
"MIT"
] | null | null | null | core/__init__.py | luckylwk/neural-network-theano | 420c89e7028fcd9671866918c22a837d04387012 | [
"MIT"
] | null | null | null | core/__init__.py | luckylwk/neural-network-theano | 420c89e7028fcd9671866918c22a837d04387012 | [
"MIT"
] | null | null | null | from .datasets import *
from .layers import *
from .models import *
#from .trainers import *
from .utils import * | 22.6 | 24 | 0.734513 | from .datasets import *
from .layers import *
from .models import *
from .utils import * | true | true |
1c2e1629fe3fa776921a9c5d7b007b9b903c32cf | 761 | py | Python | test/log/test_LogLevelConverter.py | pip-services-python/pip-services-components-python | 428ec7a9f0f0bdfa4d39cecb2541e87b1e5d33e0 | [
"MIT"
] | null | null | null | test/log/test_LogLevelConverter.py | pip-services-python/pip-services-components-python | 428ec7a9f0f0bdfa4d39cecb2541e87b1e5d33e0 | [
"MIT"
] | null | null | null | test/log/test_LogLevelConverter.py | pip-services-python/pip-services-components-python | 428ec7a9f0f0bdfa4d39cecb2541e87b1e5d33e0 | [
"MIT"
] | 1 | 2020-03-11T21:46:42.000Z | 2020-03-11T21:46:42.000Z | # -*- coding: utf-8 -*-
"""
tests.log.test_LogLevelConverter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services3_components.log import LogLevel
from pip_services3_components.log import LogLevelConverter
class TestLogLevel:
def test_to_log_level(self):
assert LogLevelConverter.to_log_level("1") == LogLevel.Fatal
assert LogLevelConverter.to_log_level("fatal") == LogLevel.Fatal
def test_to_string(self):
assert LogLevelConverter.to_string(LogLevel.Fatal) == "FATAL"
def test_to_integer(self):
assert LogLevelConverter.to_integer(LogLevel.Fatal) == 1
| 30.44 | 93 | 0.69251 |
from pip_services3_components.log import LogLevel
from pip_services3_components.log import LogLevelConverter
class TestLogLevel:
def test_to_log_level(self):
assert LogLevelConverter.to_log_level("1") == LogLevel.Fatal
assert LogLevelConverter.to_log_level("fatal") == LogLevel.Fatal
def test_to_string(self):
assert LogLevelConverter.to_string(LogLevel.Fatal) == "FATAL"
def test_to_integer(self):
assert LogLevelConverter.to_integer(LogLevel.Fatal) == 1
| true | true |
1c2e1675329e828da8d7efd98263b261a253de83 | 11,417 | py | Python | one/util.py | OpenNebula/addon-linstor | 71cc6d5d625929f0350cec866ff07e953fcebe12 | [
"Apache-2.0"
] | 11 | 2018-10-18T19:53:52.000Z | 2021-11-08T11:42:56.000Z | one/util.py | OpenNebula/addon-linstor | 71cc6d5d625929f0350cec866ff07e953fcebe12 | [
"Apache-2.0"
] | 13 | 2018-11-26T16:15:35.000Z | 2021-08-02T18:24:14.000Z | one/util.py | OpenNebula/addon-linstor | 71cc6d5d625929f0350cec866ff07e953fcebe12 | [
"Apache-2.0"
] | 7 | 2018-11-08T03:44:59.000Z | 2021-05-16T20:47:19.000Z | # -*- coding: utf-8 -*-
"""
OpenNebula Driver for Linstor
Copyright 2018 LINBIT USA LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import os
import subprocess
import syslog
import traceback
import json
import re
REMOTES_DIR = "/var/lib/one/remotes/"
ONE_LOCATION = os.getenv("ONE_LOCATION")
if ONE_LOCATION:
REMOTES_DIR = os.path.join(ONE_LOCATION, "var/remotes")
SCRIPTS_COMMON = REMOTES_DIR + "/scripts_common.sh"
UTILS_DIR = REMOTES_DIR + "/datastore/"
LIBFS = UTILS_DIR + "libfs.sh"
DOWNLOADER = UTILS_DIR + "downloader.sh"
TM_COMMON = REMOTES_DIR + "/tm/tm_common.sh"
def _source(file, command, string_args=None):
sourced_cmd = "source {} && {}".format(file, command)
if string_args:
sourced_cmd = sourced_cmd + " {}".format(string_args)
exec_string = ["bash", "-c", sourced_cmd]
return exec_string
def error_message(msg):
syslog.syslog(syslog.LOG_ERR, "ERROR {}".format(msg))
def log_info(msg):
syslog.syslog(syslog.LOG_INFO, "INFO {}".format(msg))
def _wait_for_subp(cmd, log=True):
"""
Executes the given command and waits until finished.
:param list[str] cmd: command to execute
:param bool log: command should be logged to opennebula
:return: process return code
:rtype: int
"""
if log:
log_info("running shell command: {}".format(" ".join(cmd)))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = proc.communicate()
if proc.returncode != 0:
error_message("command {} failed: {}".format(cmd, err))
return proc.returncode
def _get_subp_out_base(cmd, log=True):
"""
Runs cmd and logs into syslog and returns output
:param list[str] cmd: shell command to run
:param bool log: if cmdn should be logged as INFO
:return: Tuple of [returncode, stdout, stderr]
:rtype: (int, str, str)
"""
if log:
log_info("running shell command: {}".format(" ".join(cmd)))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return proc.returncode, out.decode(), err.decode()
def _get_subp_out(cmd):
rc, out, err = _get_subp_out_base(cmd)
if rc != 0:
error_message("command {} failed: {}".format(cmd, err))
raise subprocess.CalledProcessError(returncode=rc, cmd=cmd, output=out, stderr=err)
return out
def exec_local_with_out(cmd):
"""
:param str cmd:
:return:
"""
return _get_subp_out(_source(SCRIPTS_COMMON, cmd))
def ssh_direct(host, cmd):
"""
Executes the given cmd on the host and returns the output of the command.
:param str host: host to execute the command
:param str cmd: Command to execute
:return: stdout of the executed command
:rtype: str
"""
return _get_subp_out(_source(SCRIPTS_COMMON, "$SSH", '"{h}" "{c}"'.format(h=host, c=cmd)))
def ssh_exec_and_log(host, cmd, error_msg):
"""
:param str host: hostname to ssh to
:param str cmd: cmd to execute
:param str error_msg: error message if cmd fails
:return:
"""
log_info("ssh '{h}' cmd: {c}".format(h=host, c=cmd))
ssh_cmd = [
'"{}"'.format(host),
'"{}"'.format(cmd),
'"{}"'.format(error_msg)
]
return _wait_for_subp(_source(SCRIPTS_COMMON, "ssh_exec_and_log", " ".join(ssh_cmd)), log=False)
def ssh_exec_and_log_with_err(host, cmd, error_msg):
"""
Runs cmd and logs into syslog and returns return code and stderr
:param str host: Where ssh should connect to
:param str cmd: command to run on host
:param str error_msg: log message if error occurs
:return: Tuple of [returncode, stderr]
:rtype: (int, str)
"""
log_info("ssh '{h}' cmd: {c}".format(h=host, c=cmd))
ssh_cmd = [
'"{}"'.format(host),
'"{}"'.format(cmd),
'"{}"'.format(error_msg)
]
# ssh_exec_and_log doesn't return stdout
rc, _, err = _get_subp_out_base(_source(SCRIPTS_COMMON, "ssh_exec_and_log", " ".join(ssh_cmd)), log=False)
return rc, err
def ssh_monitor_and_log(host, cmd, error_msg):
"""
Runs cmd and logs into syslog and returns return code, output and stderr
:param str host: Where ssh should connect to
:param str cmd: command to run on host
:param str error_msg: log message if error occurs
:return: Tuple of [returncode, stdout, stderr]
:rtype: (int, str, str)
"""
log_info("ssh '{h}' cmd: {c}".format(h=host, c=cmd))
ssh_cmd = [
'"{}"'.format(host),
'"{}"'.format(cmd),
'"{}"'.format(error_msg)
]
return _get_subp_out_base(_source(SCRIPTS_COMMON, "ssh_monitor_and_log", " ".join(ssh_cmd)), log=False)
def exec_and_log(cmd, message):
rc = _wait_for_subp(["bash", "-c", cmd])
if int(rc) != 0:
error_message(message)
return rc
def link_file(dst_host, dst_dir, dst_path, device_path, resource_name):
"""
Calls the ln command on the dst_host
:param str dst_host:
:param str dst_dir:
:param str dst_path:
:param str device_path:
:param str resource_name: Resource name for error output
:return: True if run, else throws exception
"""
link_command = 'mkdir -p {dstdir} && ln -fs {devp} {dstp}'.format(
dstdir=dst_dir, devp=device_path, dstp=dst_path)
rc = ssh_exec_and_log(
host=dst_host,
cmd=link_command,
error_msg='Error: Unable to link {} to {} on {}'.format(resource_name, dst_path, dst_host))
if rc != 0:
raise RuntimeError("Error: Unable to link {} to {} on {}".format(resource_name, dst_path, dst_host))
return True
def unlink_file(host, path):
"""
Deletes a file or path.
:param str host: host computer
:param str path: path on the host to delete
:return: True, or raises RuntimeError()
"""
unlink_command = 'set -e;if [ -d "{dst}" ]; then rm -rf "{dst}"; else rm -f "{dst}"; fi'.format(dst=path)
rc = ssh_exec_and_log(
host=host,
cmd=unlink_command,
error_msg="Error: Unable to remove symbolic link {} on {}".format(path, host))
if rc != 0:
raise RuntimeError("Error: Unable to remove symbolic link {} on {}".format(path, host))
return True
def rm_shared_safe(host, path):
"""
Deletes a file or path if it isn't on a network filesystem.
:param str host: host computer
:param str path: path on the host to delete
:return: True, or raises RuntimeError()
"""
fstype = ssh_direct(host, 'stat --file-system --format=%T "{dst}"'.format(dst=path)).strip()
if fstype and fstype not in ['nfs', 'fuseblk']:
unlink_file(host, path)
else:
log_info("filesystem is shared('{fs}'), not deleting: {p}".format(fs=fstype, p=path))
return True
def mkfs_command(string_args):
return _wait_for_subp(_source(SCRIPTS_COMMON, "mkfs_command", string_args))
def mkiso_command(string_args):
return _wait_for_subp(_source(SCRIPTS_COMMON, "$MKISOFS", string_args))
def ssh_make_path(string_args):
return _wait_for_subp(_source(SCRIPTS_COMMON, "ssh_make_path", string_args))
def set_up_datastore(string_args):
return _wait_for_subp(_source(LIBFS, "set_up_datastore", string_args))
def set_downloader_args(string_args):
return _get_subp_out(_source(LIBFS, "set_downloader_args", string_args))
def check_restricted(string_args):
return _get_subp_out(_source(LIBFS, "check_restricted", string_args))
def arg_host(string_args):
"""
Returns the host part of string_args e.g.:
example.com:/tmp/file -> example.com
:param str string_args: opennebula string args
:return: the host path of string_args
"""
split = string_args.split(":", 1)
return split[0]
def arg_path(string_args):
"""
Returns the path part of an opennebula path arg and also normalizes the path.
:param str string_args: opennebula string args
:return: the normalized path arg
"""
split = string_args.split(":", 1)
path = split[1] if len(split) > 1 else split[0]
return os.path.normpath(path)
def migrate_other(string_args):
# We're turning off logging here because this gets called with a huge
# base64 image dump and it's too noisy.
return _wait_for_subp(_source(TM_COMMON, "migrate_other", string_args), log=False)
def show_vm(vm_id):
"""
Executes the onevm show command and returns the xml output.
:param int vm_id: vm id number
:return: XML output from onevm show command
"""
return _get_subp_out(["onevm", "show", "-x", str(vm_id)])
def show_image(image_id):
return _get_subp_out(["oneimage", "show", "--xml", str(image_id)])
def show_ds(ds_id):
return _get_subp_out(["onedatastore", "show", "--xml", str(ds_id)])
def fs_size(string_args):
return _get_subp_out(
_source(LIBFS, 'UTILS_PATH="{}" fs_size'.format(UTILS_DIR), string_args)
)
def detect_image_format(host, path):
cmd = "$QEMU_IMG info --output json {p}".format(p=path)
rc, stdout, stderr = ssh_monitor_and_log(host, cmd, "qemu-img info failed for " + path)
if rc != 0:
raise RuntimeError("Error: qemu-img info failed for {}; Message {}".format(path, stdout + stderr))
img_data = json.loads(stdout)
return img_data["format"]
def _get_one_version_str():
return subprocess.check_output(["onecluster", "show", "-V"]).decode()
def _one_version_parse(version_info_str=None):
"""
Returns the opennebula version as tuple.
:param str version_info_str: string with OpenNebula version info
:return: Tuple with major, minor, patch version
:rtype: (int, int, int)
"""
output = _get_one_version_str() if version_info_str is None else version_info_str
m = re.search(r"OpenNebula (\d+)\.(\d+)\.(\d+)", output)
if m:
return int(m.group(1)), int(m.group(2)), int(m.group(3))
return 0, 0, 0
def one_version_larger(major=5, minor=0, patch=0, version_info_str=None):
inst_major, inst_minor, inst_patch = _one_version_parse(version_info_str)
if inst_major > major:
return True
elif major == inst_major:
if inst_minor > minor:
return True
if inst_minor == minor and inst_patch > patch:
return True
return False
def get_copy_command(string_args):
return DOWNLOADER + " " + string_args
def run_main(main_func):
try:
main_func()
except subprocess.CalledProcessError as cpe:
error_message(traceback.format_exc())
traceback.print_exc(file=sys.stderr)
print("ERROR: Command {c} returned error: {o}".format(c=cpe.cmd, o=cpe.stdout + cpe.stderr), file=sys.stderr)
sys.exit(2)
except Exception as err:
error_message(traceback.format_exc())
traceback.print_exc(file=sys.stderr)
print("ERROR: " + str(err), file=sys.stderr)
sys.exit(1)
| 29.57772 | 117 | 0.666287 |
from __future__ import print_function
import sys
import os
import subprocess
import syslog
import traceback
import json
import re
REMOTES_DIR = "/var/lib/one/remotes/"
ONE_LOCATION = os.getenv("ONE_LOCATION")
if ONE_LOCATION:
REMOTES_DIR = os.path.join(ONE_LOCATION, "var/remotes")
SCRIPTS_COMMON = REMOTES_DIR + "/scripts_common.sh"
UTILS_DIR = REMOTES_DIR + "/datastore/"
LIBFS = UTILS_DIR + "libfs.sh"
DOWNLOADER = UTILS_DIR + "downloader.sh"
TM_COMMON = REMOTES_DIR + "/tm/tm_common.sh"
def _source(file, command, string_args=None):
sourced_cmd = "source {} && {}".format(file, command)
if string_args:
sourced_cmd = sourced_cmd + " {}".format(string_args)
exec_string = ["bash", "-c", sourced_cmd]
return exec_string
def error_message(msg):
syslog.syslog(syslog.LOG_ERR, "ERROR {}".format(msg))
def log_info(msg):
syslog.syslog(syslog.LOG_INFO, "INFO {}".format(msg))
def _wait_for_subp(cmd, log=True):
if log:
log_info("running shell command: {}".format(" ".join(cmd)))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = proc.communicate()
if proc.returncode != 0:
error_message("command {} failed: {}".format(cmd, err))
return proc.returncode
def _get_subp_out_base(cmd, log=True):
if log:
log_info("running shell command: {}".format(" ".join(cmd)))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return proc.returncode, out.decode(), err.decode()
def _get_subp_out(cmd):
rc, out, err = _get_subp_out_base(cmd)
if rc != 0:
error_message("command {} failed: {}".format(cmd, err))
raise subprocess.CalledProcessError(returncode=rc, cmd=cmd, output=out, stderr=err)
return out
def exec_local_with_out(cmd):
return _get_subp_out(_source(SCRIPTS_COMMON, cmd))
def ssh_direct(host, cmd):
return _get_subp_out(_source(SCRIPTS_COMMON, "$SSH", '"{h}" "{c}"'.format(h=host, c=cmd)))
def ssh_exec_and_log(host, cmd, error_msg):
log_info("ssh '{h}' cmd: {c}".format(h=host, c=cmd))
ssh_cmd = [
'"{}"'.format(host),
'"{}"'.format(cmd),
'"{}"'.format(error_msg)
]
return _wait_for_subp(_source(SCRIPTS_COMMON, "ssh_exec_and_log", " ".join(ssh_cmd)), log=False)
def ssh_exec_and_log_with_err(host, cmd, error_msg):
log_info("ssh '{h}' cmd: {c}".format(h=host, c=cmd))
ssh_cmd = [
'"{}"'.format(host),
'"{}"'.format(cmd),
'"{}"'.format(error_msg)
]
rc, _, err = _get_subp_out_base(_source(SCRIPTS_COMMON, "ssh_exec_and_log", " ".join(ssh_cmd)), log=False)
return rc, err
def ssh_monitor_and_log(host, cmd, error_msg):
log_info("ssh '{h}' cmd: {c}".format(h=host, c=cmd))
ssh_cmd = [
'"{}"'.format(host),
'"{}"'.format(cmd),
'"{}"'.format(error_msg)
]
return _get_subp_out_base(_source(SCRIPTS_COMMON, "ssh_monitor_and_log", " ".join(ssh_cmd)), log=False)
def exec_and_log(cmd, message):
rc = _wait_for_subp(["bash", "-c", cmd])
if int(rc) != 0:
error_message(message)
return rc
def link_file(dst_host, dst_dir, dst_path, device_path, resource_name):
link_command = 'mkdir -p {dstdir} && ln -fs {devp} {dstp}'.format(
dstdir=dst_dir, devp=device_path, dstp=dst_path)
rc = ssh_exec_and_log(
host=dst_host,
cmd=link_command,
error_msg='Error: Unable to link {} to {} on {}'.format(resource_name, dst_path, dst_host))
if rc != 0:
raise RuntimeError("Error: Unable to link {} to {} on {}".format(resource_name, dst_path, dst_host))
return True
def unlink_file(host, path):
unlink_command = 'set -e;if [ -d "{dst}" ]; then rm -rf "{dst}"; else rm -f "{dst}"; fi'.format(dst=path)
rc = ssh_exec_and_log(
host=host,
cmd=unlink_command,
error_msg="Error: Unable to remove symbolic link {} on {}".format(path, host))
if rc != 0:
raise RuntimeError("Error: Unable to remove symbolic link {} on {}".format(path, host))
return True
def rm_shared_safe(host, path):
fstype = ssh_direct(host, 'stat --file-system --format=%T "{dst}"'.format(dst=path)).strip()
if fstype and fstype not in ['nfs', 'fuseblk']:
unlink_file(host, path)
else:
log_info("filesystem is shared('{fs}'), not deleting: {p}".format(fs=fstype, p=path))
return True
def mkfs_command(string_args):
return _wait_for_subp(_source(SCRIPTS_COMMON, "mkfs_command", string_args))
def mkiso_command(string_args):
return _wait_for_subp(_source(SCRIPTS_COMMON, "$MKISOFS", string_args))
def ssh_make_path(string_args):
return _wait_for_subp(_source(SCRIPTS_COMMON, "ssh_make_path", string_args))
def set_up_datastore(string_args):
return _wait_for_subp(_source(LIBFS, "set_up_datastore", string_args))
def set_downloader_args(string_args):
return _get_subp_out(_source(LIBFS, "set_downloader_args", string_args))
def check_restricted(string_args):
return _get_subp_out(_source(LIBFS, "check_restricted", string_args))
def arg_host(string_args):
split = string_args.split(":", 1)
return split[0]
def arg_path(string_args):
split = string_args.split(":", 1)
path = split[1] if len(split) > 1 else split[0]
return os.path.normpath(path)
def migrate_other(string_args):
# We're turning off logging here because this gets called with a huge
return _wait_for_subp(_source(TM_COMMON, "migrate_other", string_args), log=False)
def show_vm(vm_id):
return _get_subp_out(["onevm", "show", "-x", str(vm_id)])
def show_image(image_id):
return _get_subp_out(["oneimage", "show", "--xml", str(image_id)])
def show_ds(ds_id):
return _get_subp_out(["onedatastore", "show", "--xml", str(ds_id)])
def fs_size(string_args):
return _get_subp_out(
_source(LIBFS, 'UTILS_PATH="{}" fs_size'.format(UTILS_DIR), string_args)
)
def detect_image_format(host, path):
cmd = "$QEMU_IMG info --output json {p}".format(p=path)
rc, stdout, stderr = ssh_monitor_and_log(host, cmd, "qemu-img info failed for " + path)
if rc != 0:
raise RuntimeError("Error: qemu-img info failed for {}; Message {}".format(path, stdout + stderr))
img_data = json.loads(stdout)
return img_data["format"]
def _get_one_version_str():
return subprocess.check_output(["onecluster", "show", "-V"]).decode()
def _one_version_parse(version_info_str=None):
output = _get_one_version_str() if version_info_str is None else version_info_str
m = re.search(r"OpenNebula (\d+)\.(\d+)\.(\d+)", output)
if m:
return int(m.group(1)), int(m.group(2)), int(m.group(3))
return 0, 0, 0
def one_version_larger(major=5, minor=0, patch=0, version_info_str=None):
inst_major, inst_minor, inst_patch = _one_version_parse(version_info_str)
if inst_major > major:
return True
elif major == inst_major:
if inst_minor > minor:
return True
if inst_minor == minor and inst_patch > patch:
return True
return False
def get_copy_command(string_args):
return DOWNLOADER + " " + string_args
def run_main(main_func):
try:
main_func()
except subprocess.CalledProcessError as cpe:
error_message(traceback.format_exc())
traceback.print_exc(file=sys.stderr)
print("ERROR: Command {c} returned error: {o}".format(c=cpe.cmd, o=cpe.stdout + cpe.stderr), file=sys.stderr)
sys.exit(2)
except Exception as err:
error_message(traceback.format_exc())
traceback.print_exc(file=sys.stderr)
print("ERROR: " + str(err), file=sys.stderr)
sys.exit(1)
| true | true |
1c2e16a41b14402840f19096fafcac8f8c1402b8 | 6,926 | py | Python | src/sardana/sardanabase.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/sardanabase.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/sardanabase.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module is part of the Python Sardana library. It defines the base
classes for Sardana object"""
from __future__ import absolute_import
__all__ = ["SardanaBaseObject", "SardanaObjectID"]
__docformat__ = 'restructuredtext'
import weakref
from taurus.core.util.log import Logger
from sardana.sardanadefs import ElementType, Interface, InterfacesExpanded, InvalidId
from sardana.sardanaevent import EventGenerator, EventReceiver
class SardanaBaseObject(EventGenerator, EventReceiver, Logger):
"""The Sardana most abstract object. It contains only two members:
- _manager : a weak reference to the manager (pool or ms) where it
belongs
- _name : the name
- _full_name : the name (usually a tango device name, but can be
anything else.)"""
def __init__(self, **kwargs):
EventGenerator.__init__(self)
EventReceiver.__init__(self)
self._type = kwargs.pop('elem_type')
self._name = intern(kwargs.pop('name'))
self._full_name = intern(kwargs.pop('full_name'))
self._frontend = None
Logger.__init__(self, self._name)
self._manager = weakref.ref(kwargs.pop('manager'))
self._parent = weakref.ref(kwargs.pop('parent', self.manager))
def get_manager(self):
"""Return the :class:`sardana.Manager` which *owns* this sardana
object.
:return: the manager which *owns* this pool object.
:rtype: :class:`sardana.Manager`"""
return self._manager()
def get_name(self):
"""Returns this sardana object name
:return: this sardana object name
:rtype: :obj:`str`"""
return self._name
def set_name(self, name):
"""Sets sardana object name
:param: sardana object name
:type: :obj:`str`"""
self._name = name
def get_full_name(self):
"""Returns this sardana object full name
:return: this sardana object full name
:rtype: :obj:`str`"""
return self._full_name
def get_type(self):
"""Returns this sardana object type.
:return: this sardana object type
:rtype: :obj:`~sardana.sardanadefs.ElementType`"""
return self._type
def get_parent(self):
"""Returns this pool object parent.
:return: this objects parent
:rtype: :class:`~sardana.sardanabase.SardanaBaseObject`"""
return self._parent()
def get_parent_name(self):
"""Returns this sardana object parent's name.
:return: this objects parent
:rtype: :obj:`str`"""
parent = self.get_parent()
if parent and hasattr(parent, 'name'):
return parent.name
def get_frontend(self):
"""Returns this sardana frontend object or None if no frontend is
registered
:return: this objects frontend
:rtype: :obj:`object`"""
f = self._frontend
if f is None:
return None
return f()
def fire_event(self, event_type, event_value, listeners=None, protected=True):
if protected:
try:
return EventGenerator.fire_event(self, event_type, event_value,
listeners=listeners)
except:
self.warning("Error firing event <%r, %r>",
event_type, event_value)
self.debug("Details", exc_info=1)
else:
return EventGenerator.fire_event(self, event_type, event_value,
listeners=listeners)
def get_interfaces(self):
"""Returns the set of interfaces this object implements.
:return:
The set of interfaces this object implements.
:rtype:
class:`set` <:class:`sardana.sardanadefs.Interface`>"""
return InterfacesExpanded[self.get_interface()]
def get_interface(self):
"""Returns the interface this object implements.
:return:
The interface this object implements.
:rtype:
:class:`sardana.sardanadefs.Interface`"""
return Interface[ElementType[self.get_type()]]
def get_interface_names(self):
"""Returns a sequence of interface names this object implements.
:return:
The sequence of interfaces this object implements.
:rtype:
sequence<:obj:`str`>"""
return map(Interface.get, self.get_interfaces())
def serialize(self, *args, **kwargs):
kwargs['name'] = self.name
kwargs['full_name'] = self.full_name
kwargs['type'] = ElementType.whatis(self.get_type())
kwargs['manager'] = self.manager.name
kwargs['parent'] = self.get_parent_name()
kwargs['interfaces'] = self.get_interface_names()
return kwargs
def serialized(self, *args, **kwargs):
return self.manager.serialize_element(self, *args, **kwargs)
def str(self, *args, **kwargs):
return self.manager.str_element(self, *args, **kwargs)
def __str__(self):
return self._name
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self._name)
manager = property(get_manager,
doc="reference to the :class:`sardana.Manager`")
name = property(get_name, set_name, doc="object name")
full_name = property(get_full_name, doc="object full name")
frontend = property(get_frontend, doc="the object frontend")
class SardanaObjectID(object):
"""To be used by sardana objects which have an ID associated to them."""
def __init__(self, id=InvalidId):
self._id = id
def get_id(self):
"""Returns this sardana object ID
:return: this sardana object ID
:rtype: int"""
return self._id
def serialize(self, *args, **kwargs):
kwargs['id'] = self.id
return kwargs
id = property(get_id, doc="object ID")
| 32.669811 | 85 | 0.618539 | true | true | |
1c2e16a8640c5d8ca1be6a0c510284f122c75808 | 1,549 | py | Python | local-tests/run_nodes.py | kumandra/kumandra-node | eceacafde002f8a14dedfdc2ab953b213b8fb699 | [
"Apache-2.0"
] | null | null | null | local-tests/run_nodes.py | kumandra/kumandra-node | eceacafde002f8a14dedfdc2ab953b213b8fb699 | [
"Apache-2.0"
] | 8 | 2022-03-21T04:41:05.000Z | 2022-03-21T06:36:19.000Z | local-tests/run_nodes.py | kumandra/kumandra-node | eceacafde002f8a14dedfdc2ab953b213b8fb699 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
# Short script demonstrating the basic usage of `chainrunner` package.
# Reproduces (more or less) the behavior of `run_nodes.sh`.
# For running local experiments it's much more convenient to manage the chain
# using an interactive environment (Python console, Jupyter notebook etc.)
from time import sleep
from chainrunner import Chain, Seq, generate_keys, check_finalized
nodes = 4
workdir = '.'
binary = '../target/release/kumandra-node'
port = 30334
ws_port = 9944
rpc_port = 9933
phrases = ['//Alice', '//Bob', '//Charlie', '//Dave', '//Ezekiel', '//Fanny', '//George', '//Hugo']
keys_dict = generate_keys(binary, phrases)
keys = list(keys_dict.values())
nodes = min(nodes, len(phrases))
chain = Chain(workdir)
print(f'Bootstrapping chain for {nodes} nodes')
chain.bootstrap(binary,
keys[:nodes],
chain_type='local')
chain.set_flags('validator',
'unsafe-ws-external',
'unsafe-rpc-external',
'no-mdns',
port=Seq(port),
ws_port=Seq(ws_port),
rpc_port=Seq(rpc_port),
unit_creation_delay=500,
execution='Native',
rpc_cors='all',
rpc_methods='Unsafe')
addresses = [n.address() for n in chain]
chain.set_flags(bootnodes=addresses[0], public_addr=addresses)
print('Starting the chain')
chain.start('node')
print('Waiting a minute')
sleep(60)
check_finalized(chain)
print('Exiting script, leaving nodes running in the background')
| 29.226415 | 99 | 0.647515 |
# using an interactive environment (Python console, Jupyter notebook etc.)
from time import sleep
from chainrunner import Chain, Seq, generate_keys, check_finalized
nodes = 4
workdir = '.'
binary = '../target/release/kumandra-node'
port = 30334
ws_port = 9944
rpc_port = 9933
phrases = ['//Alice', '//Bob', '//Charlie', '//Dave', '//Ezekiel', '//Fanny', '//George', '//Hugo']
keys_dict = generate_keys(binary, phrases)
keys = list(keys_dict.values())
nodes = min(nodes, len(phrases))
chain = Chain(workdir)
print(f'Bootstrapping chain for {nodes} nodes')
chain.bootstrap(binary,
keys[:nodes],
chain_type='local')
chain.set_flags('validator',
'unsafe-ws-external',
'unsafe-rpc-external',
'no-mdns',
port=Seq(port),
ws_port=Seq(ws_port),
rpc_port=Seq(rpc_port),
unit_creation_delay=500,
execution='Native',
rpc_cors='all',
rpc_methods='Unsafe')
addresses = [n.address() for n in chain]
chain.set_flags(bootnodes=addresses[0], public_addr=addresses)
print('Starting the chain')
chain.start('node')
print('Waiting a minute')
sleep(60)
check_finalized(chain)
print('Exiting script, leaving nodes running in the background')
| true | true |
1c2e17c4bf587013e11f885311464167a66ecff8 | 23,032 | py | Python | Server/Python/tests/dbsserver_t/utils/DBSDataProvider.py | vkuznet/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 8 | 2015-08-14T04:01:32.000Z | 2021-06-03T00:56:42.000Z | Server/Python/tests/dbsserver_t/utils/DBSDataProvider.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 162 | 2015-01-07T21:34:47.000Z | 2021-10-13T09:42:41.000Z | Server/Python/tests/dbsserver_t/utils/DBSDataProvider.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 16 | 2015-01-22T15:27:29.000Z | 2021-04-28T09:23:28.000Z | """
Class to provide data for unit- and integration tests
"""
from itertools import izip
from collections import defaultdict
import cPickle as pickle
import getpass
import os.path
import uuid
import time
import os
import random
def create_dbs_data_provider(data_type='persistent',data_location=None):
data_types = {'persistent' : DBSDataProvider(DBSPersistentData()),
'transient' : DBSDataProvider(DBSTransientData(data_location=data_location))}
return data_types.get(data_type, None)
def sort_data(data, sort_key):
return sorted(data, key=lambda entry: entry[sort_key])
def strip_volatile_fields(data):
volatile_fields = ['block_id', 'parent_block_id', 'branch_hash_id',
'dataset_id', 'parent_dataset_id', 'data_tier_id',
'file_id', 'parent_file_id', 'file_type_id',
'primary_ds_id', 'primary_ds_type_id', 'description']
if isinstance(data, list):
return [strip_volatile_fields(entry) for entry in data]
for key in data.keys():
if key in volatile_fields:
del data[key]
return data
class DBSTransientData(object):
"""
All TestCases in a TestSuite using this class are sharing the same unixtime and unique_hash.
The unixtime and unique_hash is reset, if no instance of this class exists anymore.
Therefore, it is necessary to delete TestSuites, if one would like to use different unixtime and unique_ids
"""
unixtime = 0
unique_hash = 0
instance_count = 0
def __init__(self, data_location):
if self.instance_count == 0:
self.reset_unique_ids()
self.__class__.instance_count += 1
self.username = getpass.getuser()
self.data = {}
self.data_location = data_location
self.template_data_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/template_transient_test_data.pkl')
def __del__(self):
self.__class__.instance_count -= 1
def get_data(self, key):
if key not in self.data:
self.load_data(key)
return self.data.get(key)
def load_data(self, key):
test_data_file = file(self.data_location, "r")
pkl_test_data = pickle.load(test_data_file)
test_data_file.close()
if isinstance(pkl_test_data, dict) and key in pkl_test_data:
self.data.update(pkl_test_data)
else:
raise TypeError("Input file %s does not contain the right format!" % (self.data_location))
def save_data(self):
test_data_file = file(self.data_location, "w")
pkl_test_data = pickle.dump(self.data, test_data_file)
test_data_file.close()
def generate_data(self, key):
template_data_file = file(self.template_data_location, 'r')
template_test_data = pickle.load(template_data_file)
if not (isinstance(template_test_data, dict) and key in template_test_data):
raise TypeError("Template file %s does not contain the right format!" % (self.template_data_location))
template_data = template_test_data.get(key)
generated_data = []
for list_entry in template_data:
for entry, value in list_entry.iteritems():
if isinstance(value, str):
if value.find("@unique_id@") != -1:
list_entry[entry] = list_entry[entry].replace("@unique_id@", self.unixtime)
if value.find("@date@") != -1:
list_entry[entry] = list_entry[entry].replace("@date@", self.unixtime)
if value.find("@user@") != -1:
list_entry[entry] = list_entry[entry].replace("@user@", self.username)
if value.find("@unique_hash@") != -1:
list_entry[entry] = list_entry[entry].replace("@unique_hash@", self.unique_hash)
if value.find("@unique_id_9999@") != -1:
list_entry[entry] = list_entry[entry].replace("@unique_id_9999@", str(int(self.unixtime)%9999))
#check if string contains only digits, since DBS3 returns int's in that case,
#except for md5, adler32 and checksum
if list_entry[entry].isdigit() and entry not in ['md5', 'adler32', 'check_sum']:
list_entry[entry] = int(list_entry[entry])
generated_data.append(list_entry)
generated_data = {key : generated_data}
self.data.update(generated_data)
self.save_data()
@classmethod
def reset_unique_ids(cls):
cls.unixtime = str(int(time.time()))
cls.unique_hash = str(uuid.uuid1()).replace('-', '')
class DBSPersistentData(object):
def __init__(self, data_location=None):
self.data = {}
if data_location:
self.data_location = data_location
else:
self.data_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/persistent_test_data.pkl')
def get_data(self, key):
if key not in self.data:
self.load_data(key)
return self.data.get(key)
def load_data(self, key):
test_data_file = file(self.data_location, "r")
pkl_test_data = pickle.load(test_data_file)
test_data_file.close()
if isinstance(pkl_test_data, dict) and key in pkl_test_data:
self.data.update(pkl_test_data)
else:
raise TypeError("Input file %s does not have the right format or does not contain key %s!" % (self.data_location, key))
def save_data(self):
raise NotImplemented("You cannot overwrite persistent data!")
def generate_data(self, key):
raise NotImplemented("You cannot re-generate persistent data!")
class DBSDataProvider(object):
def __init__(self, data_store):
self.data_store = data_store
def get_acquisition_era_data(self, regenerate=False):
return self.get_data(key="acquisition_era", regenerate=regenerate)
def get_block_data(self, regenerate=False):
return self.get_data(key="block", regenerate=regenerate)
def get_block_parentage_data(self, regenerate=False):
return self.get_data(key="block_parentage", regenerate=regenerate)
def get_child_block_data(self, regenerate=False):
return self.get_data(key="child_block", regenerate=regenerate)
def get_child_dataset_data(self, regenerate=False):
return self.get_data(key="child_dataset", regenerate=regenerate)
def get_child_file_data(self, regenerate=False):
return self.get_data(key="child_file", regenerate=regenerate)
def get_dataset_data(self, regenerate=False):
return self.get_data(key="dataset", regenerate=regenerate)
def get_dataset_parentage_data(self, regenerate=False):
return self.get_data(key="dataset_parentage", regenerate=regenerate)
def get_data_tier_data(self, regenerate=False):
return self.get_data(key="data_tier", regenerate=regenerate)
def get_file_data(self, regenerate=False):
return self.get_data(key="file", regenerate=regenerate)
def get_file_lumi_data(self, regenerate=False):
return self.get_data(key="file_lumi", regenerate=regenerate)
def get_file_parentage_data(self, regenerate=False):
return self.get_data(key="file_parentage", regenerate=regenerate)
def get_output_module_config_data(self, regenerate=False):
return self.get_data(key="output_module_config", regenerate=regenerate)
def get_physics_group_data(self, regenerate=False):
return sort_data(self.get_data(key="physics_group", regenerate=regenerate), 'physics_group_name')
def get_primary_dataset_data(self, regenerate=False):
return self.get_data(key="primary_dataset", regenerate=regenerate)
def get_primary_ds_type_data(self,regenerate=False):
return sort_data(self.get_data(key="primary_ds_type", regenerate=regenerate), 'data_type')
def get_processing_era_data(self, regenerate=False):
return self.get_data(key="processing_era", regenerate=regenerate)
def get_data(self, key, regenerate):
if regenerate:
self.data_store.generate_data(key=key)
return self.data_store.get_data(key=key)
def create_child_data_provider(parent_data_provider):
parameters = (parent_data_provider._num_of_blocks,
parent_data_provider._num_of_files,
parent_data_provider._num_of_runs,
parent_data_provider._num_of_lumis)
child_data_provider = DBSBlockDataProvider(*parameters)
parent_block_dump = parent_data_provider.block_dump()
child_block_dump = child_data_provider.block_dump()
for parent_block, child_block in izip(parent_block_dump, child_block_dump):
parent_logical_file_names = (this_file['logical_file_name'] for this_file in parent_block['files'])
child_logical_file_names = (this_file['logical_file_name'] for this_file in child_block['files'])
file_parent_list = []
for parent_logical_file_name, child_logical_file_name in izip(parent_logical_file_names,
child_logical_file_names):
file_parent_list.append(dict(parent_logical_file_name=parent_logical_file_name,
logical_file_name=child_logical_file_name))
child_data_provider.file_parent_list(child_block['block']['block_name'],
file_parent_list)
return child_data_provider
class DBSBlockDataProvider(object):
def __init__(self, num_of_blocks=1, num_of_files=10, num_of_runs=10, num_of_lumis=10):
self._num_of_blocks = num_of_blocks
self._num_of_files = num_of_files
self._num_of_runs = num_of_runs
self._num_of_lumis = num_of_lumis
self._uid = uuid.uuid4().time_mid
self._tiers = ('RAW', 'GEN', 'SIM', 'RECO', 'AOD')
#set starting values for the run number and lumi section to avoid duplicated entries in a block
self._run_num = random.randint(1, 100)
self._lumi_sec = random.randint(1, 100)
self._files = {}
self._file_parents = defaultdict(list)
def load(self, filename):
"""Deserialize object from persistent data storage"""
with open(filename, 'r') as f:
self.__dict__ = pickle.load(f)
def save(self, filename):
"""Serialize object for persistent data storage"""
with open(filename, 'w') as f:
pickle.dump(self.__dict__, f)
def reset(self):
init_parameters = (self._num_of_blocks, self._num_of_files,
self._num_of_runs, self._num_of_lumis)
self.__dict__ = {}
#re-initialise values
self.__init__(*init_parameters)
def block_dump(self):
ret_val = []
for block_name in self.blocks:
files = self.files(block_name)
logical_file_names = (this_file['logical_file_name'] for this_file in files)
file_conf_list = [self._generate_file_conf(lfn) for lfn in logical_file_names]
ret_val.append( \
{'dataset_conf_list': [{'release_version' : self.release_version,
'pset_hash' : self.pset_hash,
'app_name' : self.app_name,
'output_module_label' : self.output_module_label,
'global_tag' : self.global_tag}],
'file_conf_list' : file_conf_list,
'files' : files,
'processing_era' : self.processing_era,
'primds' : self.primds,
'dataset':{'physics_group_name': self.physics_group_name,
'dataset_access_type': self.dataset_access_type,
'data_tier_name': self.tier,
'processed_ds_name': self.processed_dataset,
'xtcrosssection': self.xtc_cross_section,
'dataset': self.dataset_name},
'acquisition_era': self.acquisition_era,
'block': {'open_for_writing': self.block_is_open(block_name),
'block_name': block_name,
'file_count': len(files),
'origin_site_name': self.origin_site_name,
'block_size': sum((f['file_size'] for f in files))},
'file_parent_list': self.file_parent_list(block_name)
})
return ret_val
def files(self, block_name):
if not (hasattr(self, '_files') and block_name in self._files):
self._files[block_name] = []
num_of_created_blocks = len(self._files)
for i in xrange((num_of_created_blocks-1) * self._num_of_files,
num_of_created_blocks * self._num_of_files):
logical_file_name = self._generate_file_name(i)
self._files[block_name].append({'check_sum' : self._generate_cksum(),
'file_size' : self._generate_file_size(),
'file_lumi_list' : self._generate_file_lumi_list(),
'adler32' : self._generate_adler32(),
'event_count' : self._generate_event_count(),
'file_type' : 'EDM',
'logical_file_name' : logical_file_name,
'md5' : None,
'auto_cross_section' : self._generate_auto_cross_section()
})
return self._files[block_name]
def file_parent_list(self, block_name, file_parent_list=None):
"""
Combined Setter and getter function for self._file_parent
self._file_parents is a defaultdict returning [] if the key is not present.
Once the value is set to file_parent_list, it will return the value instead.
"""
if file_parent_list:
self._file_parents[block_name] = list(file_parent_list)
return self._file_parents[block_name]
def _generate_adler32(self):
"generates adler32 checksum"
return random.randint(1000, 9999)
def _generate_auto_cross_section(self):
"generate auto cross section for a given file, if not already available"
return random.uniform(0.0, 100.0)
def _generate_block_name(self):
"generates new block name"
return '/%s/%s/%s#%s' % (self.primary_ds_name,
self.processed_dataset,
self.tier,
uuid.uuid4())
def _generate_block_is_open(self):
"generates block is open status"
return random.randint(0, 1)
def _generate_cksum(self):
"generates checksum"
return random.randint(1000, 9999)
def _generate_event_count(self):
"generate event count for a given file, if not already available"
return random.randint(10, 10000)
def _generate_file_conf(self, logical_file_name):
return {'release_version': self.release_version,
'pset_hash': self.pset_hash,
'lfn': logical_file_name,
'app_name': self.app_name,
'output_module_label': self.output_module_label,
'global_tag': self.global_tag}
def _generate_file_name(self, file_counter):
"generates new file name"
counter = str(0).zfill(9)
return '/store/data/%s/%s/%s/%s/%s/%s_%s.root' % \
(self.acquisition_era_name,
self.primary_ds_name,
self.tier,
self.processing_version,
counter,
self._uid,
file_counter)
def _generate_file_size(self, func='gauss', params=(1000000000, 90000000)):
"generates new file size"
return int(abs(getattr(random, func)(*params)))
def _generate_file_lumi_list(self):
"generate file lumi list for a given file, if not already available"
output = []
for _ in xrange(0, self._num_of_runs):
self._run_num += 1
for _ in range(0, self._num_of_lumis):
self._lumi_sec += 1
row = dict(run_num=self._run_num, lumi_section_num=self._lumi_sec)
output.append(row)
return output
@property
def acquisition_era_name(self):
"return acquisition era name"
if not hasattr(self, '_acquisition_era_name'):
self._acquisition_era_name = "acq_era_%s" % self._uid
return self._acquisition_era_name
@property
def acquisition_era(self):
"return acquisition era object"
if not hasattr(self, '_acquisition_era'):
self._acquisition_era = {"acquisition_era_name": self.acquisition_era_name,
'start_date': 1234567890,
"description": "Test_acquisition_era"}
return self._acquisition_era
@property
def app_name(self):
"return application name"
if not hasattr(self, '_app_name'):
self._app_name = 'cmsRun%s' % self._uid
return self._app_name
@property
def blocks(self):
"return list of blocks"
if not hasattr(self, '_blocks'):
self._blocks = []
for i in xrange(self._num_of_blocks):
self._blocks.append(self._generate_block_name())
return self._blocks
def block_is_open(self, block_name):
"return block is open"
if not hasattr(self, '_block_is_open'):
self._block_is_open = {block_name : self._generate_block_is_open()}
elif block_name not in self._block_is_open:
self._block_is_open.update({block_name : self._generate_block_is_open()})
return self._block_is_open[block_name]
@property
def dataset_access_type(self):
"return dataset access type"
if not hasattr(self, '_dataset_access_type'):
self._dataset_access_type = "VALID"
return self._dataset_access_type
@property
def dataset_name(self):
"return dataset name"
if not hasattr(self, "_dataset_name"):
self._dataset_name = '/%s/%s/%s' % \
(self.primary_ds_name,
self.processed_dataset,
self.tier)
return self._dataset_name
@property
def global_tag(self):
"return global tag"
if not hasattr(self, '_global_tag'):
self._global_tag = 'dbs-unit-test-%s' % self._uid
return self._global_tag
@property
def origin_site_name(self):
"return origin site name"
if not hasattr(self, '_origin_site_name'):
self._origin_site_name = 'grid-srm.physik.rwth-aachen.de'
return self._origin_site_name
@property
def output_config(self):
"Generate DBS output config meta-data"
rec = dict(configs=\
dict(release_version=self.release_version, pset_hash=self.pset_hash, app_name=self.app_name,
output_module_label=self.output_module_label, global_tag=self.global_tag))
return rec
@property
def output_module_label(self):
"return output module label"
if not hasattr(self, '_output_module_label'):
self._output_module_label = 'Merged'
return self._output_module_label
@property
def physics_group_name(self):
"return physics group name"
if not hasattr(self, "_physics_group_name"):
self._physics_group_name = "Tracker"
return self._physics_group_name
@property
def primary_ds_name(self):
"return primary dataset name"
if not hasattr(self, '_primary_ds_name'):
self._primary_ds_name = 'unittest_web_primary_ds_name_%s' % self._uid
return self._primary_ds_name
@property
def primary_ds_type(self):
"return primary dataset type"
if not hasattr(self, '_primary_ds_type'):
primary_ds_types = ['mc', 'data']
self._primary_ds_type = primary_ds_types[random.randint(0, 1)]
return self._primary_ds_type
@property
def primds(self):
"return primary dataset object"
if not hasattr(self, '_primds'):
self._primds = {"primary_ds_type": self.primary_ds_type,
"primary_ds_name": self.primary_ds_name}
return self._primds
@property
def processed_dataset_name(self):
"return processed dataset name"
if not hasattr(self, '_processed_dataset_name'):
self._processed_dataset_name = 'unittest_web_dataset'
return self._processed_dataset_name
@property
def processed_dataset(self):
"return processed dataset path"
if not hasattr(self, '_processed_dataset'):
self._processed_dataset = '%s-%s-v%s' % \
(self.acquisition_era_name,
self.processed_dataset_name,
self.processing_version)
return self._processed_dataset
@property
def processing_era(self):
"return processing era object"
if not hasattr(self, '_processing_era'):
self._processing_era = {"processing_version": self.processing_version,
"description": "Test_proc_era"}
return self._processing_era
@property
def pset_hash(self):
"return parameter set hash"
if not hasattr(self, '_pset_hash'):
self._pset_hash = '76e303993a1c2f842159dbfeeed9a0dd%s' % self._uid
return self._pset_hash
@property
def processing_version(self):
"return processing version"
if not hasattr(self, '_processing_version'):
self._processing_version = random.randint(1, 100)
return self._processing_version
@property
def release_version(self):
"return release version"
if not hasattr(self, '_release_version'):
self._release_version = 'CMSSW_1_2_%s' % self._uid
return self._release_version
@property
def tier(self):
"return tier name"
if not hasattr(self, '_tier'):
self._tier = self._tiers[random.randint(0, len(self._tiers)-1)]
return self._tier
@property
def xtc_cross_section(self):
"return cross section value"
if not hasattr(self, '_xtc_cross_section'):
self._xtc_cross_section = random.uniform(0.0, 1000.0)
return self._xtc_cross_section
| 39.986111 | 138 | 0.622308 | from itertools import izip
from collections import defaultdict
import cPickle as pickle
import getpass
import os.path
import uuid
import time
import os
import random
def create_dbs_data_provider(data_type='persistent',data_location=None):
data_types = {'persistent' : DBSDataProvider(DBSPersistentData()),
'transient' : DBSDataProvider(DBSTransientData(data_location=data_location))}
return data_types.get(data_type, None)
def sort_data(data, sort_key):
return sorted(data, key=lambda entry: entry[sort_key])
def strip_volatile_fields(data):
volatile_fields = ['block_id', 'parent_block_id', 'branch_hash_id',
'dataset_id', 'parent_dataset_id', 'data_tier_id',
'file_id', 'parent_file_id', 'file_type_id',
'primary_ds_id', 'primary_ds_type_id', 'description']
if isinstance(data, list):
return [strip_volatile_fields(entry) for entry in data]
for key in data.keys():
if key in volatile_fields:
del data[key]
return data
class DBSTransientData(object):
unixtime = 0
unique_hash = 0
instance_count = 0
def __init__(self, data_location):
if self.instance_count == 0:
self.reset_unique_ids()
self.__class__.instance_count += 1
self.username = getpass.getuser()
self.data = {}
self.data_location = data_location
self.template_data_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/template_transient_test_data.pkl')
def __del__(self):
self.__class__.instance_count -= 1
def get_data(self, key):
if key not in self.data:
self.load_data(key)
return self.data.get(key)
def load_data(self, key):
test_data_file = file(self.data_location, "r")
pkl_test_data = pickle.load(test_data_file)
test_data_file.close()
if isinstance(pkl_test_data, dict) and key in pkl_test_data:
self.data.update(pkl_test_data)
else:
raise TypeError("Input file %s does not contain the right format!" % (self.data_location))
def save_data(self):
test_data_file = file(self.data_location, "w")
pkl_test_data = pickle.dump(self.data, test_data_file)
test_data_file.close()
def generate_data(self, key):
template_data_file = file(self.template_data_location, 'r')
template_test_data = pickle.load(template_data_file)
if not (isinstance(template_test_data, dict) and key in template_test_data):
raise TypeError("Template file %s does not contain the right format!" % (self.template_data_location))
template_data = template_test_data.get(key)
generated_data = []
for list_entry in template_data:
for entry, value in list_entry.iteritems():
if isinstance(value, str):
if value.find("@unique_id@") != -1:
list_entry[entry] = list_entry[entry].replace("@unique_id@", self.unixtime)
if value.find("@date@") != -1:
list_entry[entry] = list_entry[entry].replace("@date@", self.unixtime)
if value.find("@user@") != -1:
list_entry[entry] = list_entry[entry].replace("@user@", self.username)
if value.find("@unique_hash@") != -1:
list_entry[entry] = list_entry[entry].replace("@unique_hash@", self.unique_hash)
if value.find("@unique_id_9999@") != -1:
list_entry[entry] = list_entry[entry].replace("@unique_id_9999@", str(int(self.unixtime)%9999))
#except for md5, adler32 and checksum
if list_entry[entry].isdigit() and entry not in ['md5', 'adler32', 'check_sum']:
list_entry[entry] = int(list_entry[entry])
generated_data.append(list_entry)
generated_data = {key : generated_data}
self.data.update(generated_data)
self.save_data()
@classmethod
def reset_unique_ids(cls):
cls.unixtime = str(int(time.time()))
cls.unique_hash = str(uuid.uuid1()).replace('-', '')
class DBSPersistentData(object):
def __init__(self, data_location=None):
self.data = {}
if data_location:
self.data_location = data_location
else:
self.data_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../data/persistent_test_data.pkl')
def get_data(self, key):
if key not in self.data:
self.load_data(key)
return self.data.get(key)
def load_data(self, key):
test_data_file = file(self.data_location, "r")
pkl_test_data = pickle.load(test_data_file)
test_data_file.close()
if isinstance(pkl_test_data, dict) and key in pkl_test_data:
self.data.update(pkl_test_data)
else:
raise TypeError("Input file %s does not have the right format or does not contain key %s!" % (self.data_location, key))
def save_data(self):
raise NotImplemented("You cannot overwrite persistent data!")
def generate_data(self, key):
raise NotImplemented("You cannot re-generate persistent data!")
class DBSDataProvider(object):
def __init__(self, data_store):
self.data_store = data_store
def get_acquisition_era_data(self, regenerate=False):
return self.get_data(key="acquisition_era", regenerate=regenerate)
def get_block_data(self, regenerate=False):
return self.get_data(key="block", regenerate=regenerate)
def get_block_parentage_data(self, regenerate=False):
return self.get_data(key="block_parentage", regenerate=regenerate)
def get_child_block_data(self, regenerate=False):
return self.get_data(key="child_block", regenerate=regenerate)
def get_child_dataset_data(self, regenerate=False):
return self.get_data(key="child_dataset", regenerate=regenerate)
def get_child_file_data(self, regenerate=False):
return self.get_data(key="child_file", regenerate=regenerate)
def get_dataset_data(self, regenerate=False):
return self.get_data(key="dataset", regenerate=regenerate)
def get_dataset_parentage_data(self, regenerate=False):
return self.get_data(key="dataset_parentage", regenerate=regenerate)
def get_data_tier_data(self, regenerate=False):
return self.get_data(key="data_tier", regenerate=regenerate)
def get_file_data(self, regenerate=False):
return self.get_data(key="file", regenerate=regenerate)
def get_file_lumi_data(self, regenerate=False):
return self.get_data(key="file_lumi", regenerate=regenerate)
def get_file_parentage_data(self, regenerate=False):
return self.get_data(key="file_parentage", regenerate=regenerate)
def get_output_module_config_data(self, regenerate=False):
return self.get_data(key="output_module_config", regenerate=regenerate)
def get_physics_group_data(self, regenerate=False):
return sort_data(self.get_data(key="physics_group", regenerate=regenerate), 'physics_group_name')
def get_primary_dataset_data(self, regenerate=False):
return self.get_data(key="primary_dataset", regenerate=regenerate)
def get_primary_ds_type_data(self,regenerate=False):
return sort_data(self.get_data(key="primary_ds_type", regenerate=regenerate), 'data_type')
def get_processing_era_data(self, regenerate=False):
return self.get_data(key="processing_era", regenerate=regenerate)
def get_data(self, key, regenerate):
if regenerate:
self.data_store.generate_data(key=key)
return self.data_store.get_data(key=key)
def create_child_data_provider(parent_data_provider):
parameters = (parent_data_provider._num_of_blocks,
parent_data_provider._num_of_files,
parent_data_provider._num_of_runs,
parent_data_provider._num_of_lumis)
child_data_provider = DBSBlockDataProvider(*parameters)
parent_block_dump = parent_data_provider.block_dump()
child_block_dump = child_data_provider.block_dump()
for parent_block, child_block in izip(parent_block_dump, child_block_dump):
parent_logical_file_names = (this_file['logical_file_name'] for this_file in parent_block['files'])
child_logical_file_names = (this_file['logical_file_name'] for this_file in child_block['files'])
file_parent_list = []
for parent_logical_file_name, child_logical_file_name in izip(parent_logical_file_names,
child_logical_file_names):
file_parent_list.append(dict(parent_logical_file_name=parent_logical_file_name,
logical_file_name=child_logical_file_name))
child_data_provider.file_parent_list(child_block['block']['block_name'],
file_parent_list)
return child_data_provider
class DBSBlockDataProvider(object):
def __init__(self, num_of_blocks=1, num_of_files=10, num_of_runs=10, num_of_lumis=10):
self._num_of_blocks = num_of_blocks
self._num_of_files = num_of_files
self._num_of_runs = num_of_runs
self._num_of_lumis = num_of_lumis
self._uid = uuid.uuid4().time_mid
self._tiers = ('RAW', 'GEN', 'SIM', 'RECO', 'AOD')
#set starting values for the run number and lumi section to avoid duplicated entries in a block
self._run_num = random.randint(1, 100)
self._lumi_sec = random.randint(1, 100)
self._files = {}
self._file_parents = defaultdict(list)
def load(self, filename):
with open(filename, 'r') as f:
self.__dict__ = pickle.load(f)
def save(self, filename):
with open(filename, 'w') as f:
pickle.dump(self.__dict__, f)
def reset(self):
init_parameters = (self._num_of_blocks, self._num_of_files,
self._num_of_runs, self._num_of_lumis)
self.__dict__ = {}
#re-initialise values
self.__init__(*init_parameters)
def block_dump(self):
ret_val = []
for block_name in self.blocks:
files = self.files(block_name)
logical_file_names = (this_file['logical_file_name'] for this_file in files)
file_conf_list = [self._generate_file_conf(lfn) for lfn in logical_file_names]
ret_val.append( \
{'dataset_conf_list': [{'release_version' : self.release_version,
'pset_hash' : self.pset_hash,
'app_name' : self.app_name,
'output_module_label' : self.output_module_label,
'global_tag' : self.global_tag}],
'file_conf_list' : file_conf_list,
'files' : files,
'processing_era' : self.processing_era,
'primds' : self.primds,
'dataset':{'physics_group_name': self.physics_group_name,
'dataset_access_type': self.dataset_access_type,
'data_tier_name': self.tier,
'processed_ds_name': self.processed_dataset,
'xtcrosssection': self.xtc_cross_section,
'dataset': self.dataset_name},
'acquisition_era': self.acquisition_era,
'block': {'open_for_writing': self.block_is_open(block_name),
'block_name': block_name,
'file_count': len(files),
'origin_site_name': self.origin_site_name,
'block_size': sum((f['file_size'] for f in files))},
'file_parent_list': self.file_parent_list(block_name)
})
return ret_val
def files(self, block_name):
if not (hasattr(self, '_files') and block_name in self._files):
self._files[block_name] = []
num_of_created_blocks = len(self._files)
for i in xrange((num_of_created_blocks-1) * self._num_of_files,
num_of_created_blocks * self._num_of_files):
logical_file_name = self._generate_file_name(i)
self._files[block_name].append({'check_sum' : self._generate_cksum(),
'file_size' : self._generate_file_size(),
'file_lumi_list' : self._generate_file_lumi_list(),
'adler32' : self._generate_adler32(),
'event_count' : self._generate_event_count(),
'file_type' : 'EDM',
'logical_file_name' : logical_file_name,
'md5' : None,
'auto_cross_section' : self._generate_auto_cross_section()
})
return self._files[block_name]
def file_parent_list(self, block_name, file_parent_list=None):
if file_parent_list:
self._file_parents[block_name] = list(file_parent_list)
return self._file_parents[block_name]
def _generate_adler32(self):
return random.randint(1000, 9999)
def _generate_auto_cross_section(self):
return random.uniform(0.0, 100.0)
def _generate_block_name(self):
return '/%s/%s/%s
self.processed_dataset,
self.tier,
uuid.uuid4())
def _generate_block_is_open(self):
return random.randint(0, 1)
def _generate_cksum(self):
return random.randint(1000, 9999)
def _generate_event_count(self):
return random.randint(10, 10000)
def _generate_file_conf(self, logical_file_name):
return {'release_version': self.release_version,
'pset_hash': self.pset_hash,
'lfn': logical_file_name,
'app_name': self.app_name,
'output_module_label': self.output_module_label,
'global_tag': self.global_tag}
def _generate_file_name(self, file_counter):
counter = str(0).zfill(9)
return '/store/data/%s/%s/%s/%s/%s/%s_%s.root' % \
(self.acquisition_era_name,
self.primary_ds_name,
self.tier,
self.processing_version,
counter,
self._uid,
file_counter)
def _generate_file_size(self, func='gauss', params=(1000000000, 90000000)):
return int(abs(getattr(random, func)(*params)))
def _generate_file_lumi_list(self):
output = []
for _ in xrange(0, self._num_of_runs):
self._run_num += 1
for _ in range(0, self._num_of_lumis):
self._lumi_sec += 1
row = dict(run_num=self._run_num, lumi_section_num=self._lumi_sec)
output.append(row)
return output
@property
def acquisition_era_name(self):
if not hasattr(self, '_acquisition_era_name'):
self._acquisition_era_name = "acq_era_%s" % self._uid
return self._acquisition_era_name
@property
def acquisition_era(self):
if not hasattr(self, '_acquisition_era'):
self._acquisition_era = {"acquisition_era_name": self.acquisition_era_name,
'start_date': 1234567890,
"description": "Test_acquisition_era"}
return self._acquisition_era
@property
def app_name(self):
if not hasattr(self, '_app_name'):
self._app_name = 'cmsRun%s' % self._uid
return self._app_name
@property
def blocks(self):
if not hasattr(self, '_blocks'):
self._blocks = []
for i in xrange(self._num_of_blocks):
self._blocks.append(self._generate_block_name())
return self._blocks
def block_is_open(self, block_name):
if not hasattr(self, '_block_is_open'):
self._block_is_open = {block_name : self._generate_block_is_open()}
elif block_name not in self._block_is_open:
self._block_is_open.update({block_name : self._generate_block_is_open()})
return self._block_is_open[block_name]
@property
def dataset_access_type(self):
if not hasattr(self, '_dataset_access_type'):
self._dataset_access_type = "VALID"
return self._dataset_access_type
@property
def dataset_name(self):
if not hasattr(self, "_dataset_name"):
self._dataset_name = '/%s/%s/%s' % \
(self.primary_ds_name,
self.processed_dataset,
self.tier)
return self._dataset_name
@property
def global_tag(self):
if not hasattr(self, '_global_tag'):
self._global_tag = 'dbs-unit-test-%s' % self._uid
return self._global_tag
@property
def origin_site_name(self):
if not hasattr(self, '_origin_site_name'):
self._origin_site_name = 'grid-srm.physik.rwth-aachen.de'
return self._origin_site_name
@property
def output_config(self):
rec = dict(configs=\
dict(release_version=self.release_version, pset_hash=self.pset_hash, app_name=self.app_name,
output_module_label=self.output_module_label, global_tag=self.global_tag))
return rec
@property
def output_module_label(self):
if not hasattr(self, '_output_module_label'):
self._output_module_label = 'Merged'
return self._output_module_label
@property
def physics_group_name(self):
if not hasattr(self, "_physics_group_name"):
self._physics_group_name = "Tracker"
return self._physics_group_name
@property
def primary_ds_name(self):
if not hasattr(self, '_primary_ds_name'):
self._primary_ds_name = 'unittest_web_primary_ds_name_%s' % self._uid
return self._primary_ds_name
@property
def primary_ds_type(self):
if not hasattr(self, '_primary_ds_type'):
primary_ds_types = ['mc', 'data']
self._primary_ds_type = primary_ds_types[random.randint(0, 1)]
return self._primary_ds_type
@property
def primds(self):
if not hasattr(self, '_primds'):
self._primds = {"primary_ds_type": self.primary_ds_type,
"primary_ds_name": self.primary_ds_name}
return self._primds
@property
def processed_dataset_name(self):
if not hasattr(self, '_processed_dataset_name'):
self._processed_dataset_name = 'unittest_web_dataset'
return self._processed_dataset_name
@property
def processed_dataset(self):
if not hasattr(self, '_processed_dataset'):
self._processed_dataset = '%s-%s-v%s' % \
(self.acquisition_era_name,
self.processed_dataset_name,
self.processing_version)
return self._processed_dataset
@property
def processing_era(self):
if not hasattr(self, '_processing_era'):
self._processing_era = {"processing_version": self.processing_version,
"description": "Test_proc_era"}
return self._processing_era
@property
def pset_hash(self):
if not hasattr(self, '_pset_hash'):
self._pset_hash = '76e303993a1c2f842159dbfeeed9a0dd%s' % self._uid
return self._pset_hash
@property
def processing_version(self):
if not hasattr(self, '_processing_version'):
self._processing_version = random.randint(1, 100)
return self._processing_version
@property
def release_version(self):
if not hasattr(self, '_release_version'):
self._release_version = 'CMSSW_1_2_%s' % self._uid
return self._release_version
@property
def tier(self):
if not hasattr(self, '_tier'):
self._tier = self._tiers[random.randint(0, len(self._tiers)-1)]
return self._tier
@property
def xtc_cross_section(self):
if not hasattr(self, '_xtc_cross_section'):
self._xtc_cross_section = random.uniform(0.0, 1000.0)
return self._xtc_cross_section
| true | true |
1c2e18717f79affbbd5759b5c38a77e944e4f5e5 | 16,165 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20200401/subnet.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20200401/subnet.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20200401/subnet.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Subnet']
class Subnet(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delegations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DelegationArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_allocations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
network_security_group: Optional[pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']]] = None,
private_endpoint_network_policies: Optional[pulumi.Input[str]] = None,
private_link_service_network_policies: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input[pulumi.InputType['RouteTableArgs']]] = None,
service_endpoint_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyArgs']]]]] = None,
service_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPropertiesFormatArgs']]]]] = None,
subnet_name: Optional[pulumi.Input[str]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Subnet in a virtual network resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: The address prefix for the subnet.
:param pulumi.Input[Sequence[pulumi.Input[str]]] address_prefixes: List of address prefixes for the subnet.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DelegationArgs']]]] delegations: An array of references to the delegations on the subnet.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] ip_allocations: Array of IpAllocation which reference this subnet.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] nat_gateway: Nat gateway associated with this subnet.
:param pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']] network_security_group: The reference to the NetworkSecurityGroup resource.
:param pulumi.Input[str] private_endpoint_network_policies: Enable or Disable apply network policies on private end point in the subnet.
:param pulumi.Input[str] private_link_service_network_policies: Enable or Disable apply network policies on private link service in the subnet.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['RouteTableArgs']] route_table: The reference to the RouteTable resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyArgs']]]] service_endpoint_policies: An array of service endpoint policies.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPropertiesFormatArgs']]]] service_endpoints: An array of service endpoints.
:param pulumi.Input[str] subnet_name: The name of the subnet.
:param pulumi.Input[str] virtual_network_name: The name of the virtual network.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_prefix'] = address_prefix
__props__['address_prefixes'] = address_prefixes
__props__['delegations'] = delegations
__props__['id'] = id
__props__['ip_allocations'] = ip_allocations
__props__['name'] = name
__props__['nat_gateway'] = nat_gateway
__props__['network_security_group'] = network_security_group
__props__['private_endpoint_network_policies'] = private_endpoint_network_policies
__props__['private_link_service_network_policies'] = private_link_service_network_policies
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_table'] = route_table
__props__['service_endpoint_policies'] = service_endpoint_policies
__props__['service_endpoints'] = service_endpoints
__props__['subnet_name'] = subnet_name
if virtual_network_name is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_name'")
__props__['virtual_network_name'] = virtual_network_name
__props__['etag'] = None
__props__['ip_configuration_profiles'] = None
__props__['ip_configurations'] = None
__props__['private_endpoints'] = None
__props__['provisioning_state'] = None
__props__['purpose'] = None
__props__['resource_navigation_links'] = None
__props__['service_association_links'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:Subnet"), pulumi.Alias(type_="azure-nextgen:network/latest:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20150615:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160330:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20161201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170301:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20171101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180401:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181001:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190401:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20191101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20191201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200301:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200501:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200801:Subnet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Subnet, __self__).__init__(
'azure-nextgen:network/v20200401:Subnet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Subnet':
"""
Get an existing Subnet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Subnet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of address prefixes for the subnet.
"""
return pulumi.get(self, "address_prefixes")
@property
@pulumi.getter
def delegations(self) -> pulumi.Output[Optional[Sequence['outputs.DelegationResponse']]]:
"""
An array of references to the delegations on the subnet.
"""
return pulumi.get(self, "delegations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAllocations")
def ip_allocations(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
Array of IpAllocation which reference this subnet.
"""
return pulumi.get(self, "ip_allocations")
@property
@pulumi.getter(name="ipConfigurationProfiles")
def ip_configuration_profiles(self) -> pulumi.Output[Sequence['outputs.IPConfigurationProfileResponse']]:
"""
Array of IP configuration profiles which reference this subnet.
"""
return pulumi.get(self, "ip_configuration_profiles")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Sequence['outputs.IPConfigurationResponse']]:
"""
An array of references to the network interface IP configurations using subnet.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Nat gateway associated with this subnet.
"""
return pulumi.get(self, "nat_gateway")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> pulumi.Output[Optional['outputs.NetworkSecurityGroupResponse']]:
"""
The reference to the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter(name="privateEndpointNetworkPolicies")
def private_endpoint_network_policies(self) -> pulumi.Output[Optional[str]]:
"""
Enable or Disable apply network policies on private end point in the subnet.
"""
return pulumi.get(self, "private_endpoint_network_policies")
@property
@pulumi.getter(name="privateEndpoints")
def private_endpoints(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointResponse']]:
"""
An array of references to private endpoints.
"""
return pulumi.get(self, "private_endpoints")
@property
@pulumi.getter(name="privateLinkServiceNetworkPolicies")
def private_link_service_network_policies(self) -> pulumi.Output[Optional[str]]:
"""
Enable or Disable apply network policies on private link service in the subnet.
"""
return pulumi.get(self, "private_link_service_network_policies")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the subnet resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def purpose(self) -> pulumi.Output[str]:
"""
A read-only string identifying the intention of use for this subnet based on delegations and other user-defined properties.
"""
return pulumi.get(self, "purpose")
@property
@pulumi.getter(name="resourceNavigationLinks")
def resource_navigation_links(self) -> pulumi.Output[Sequence['outputs.ResourceNavigationLinkResponse']]:
"""
An array of references to the external resources using subnet.
"""
return pulumi.get(self, "resource_navigation_links")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> pulumi.Output[Optional['outputs.RouteTableResponse']]:
"""
The reference to the RouteTable resource.
"""
return pulumi.get(self, "route_table")
@property
@pulumi.getter(name="serviceAssociationLinks")
def service_association_links(self) -> pulumi.Output[Sequence['outputs.ServiceAssociationLinkResponse']]:
"""
An array of references to services injecting into this subnet.
"""
return pulumi.get(self, "service_association_links")
@property
@pulumi.getter(name="serviceEndpointPolicies")
def service_endpoint_policies(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]]:
"""
An array of service endpoint policies.
"""
return pulumi.get(self, "service_endpoint_policies")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPropertiesFormatResponse']]]:
"""
An array of service endpoints.
"""
return pulumi.get(self, "service_endpoints")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 54.063545 | 2,279 | 0.691432 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Subnet']
class Subnet(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
delegations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DelegationArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_allocations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
network_security_group: Optional[pulumi.Input[pulumi.InputType['NetworkSecurityGroupArgs']]] = None,
private_endpoint_network_policies: Optional[pulumi.Input[str]] = None,
private_link_service_network_policies: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input[pulumi.InputType['RouteTableArgs']]] = None,
service_endpoint_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPolicyArgs']]]]] = None,
service_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceEndpointPropertiesFormatArgs']]]]] = None,
subnet_name: Optional[pulumi.Input[str]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_prefix'] = address_prefix
__props__['address_prefixes'] = address_prefixes
__props__['delegations'] = delegations
__props__['id'] = id
__props__['ip_allocations'] = ip_allocations
__props__['name'] = name
__props__['nat_gateway'] = nat_gateway
__props__['network_security_group'] = network_security_group
__props__['private_endpoint_network_policies'] = private_endpoint_network_policies
__props__['private_link_service_network_policies'] = private_link_service_network_policies
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_table'] = route_table
__props__['service_endpoint_policies'] = service_endpoint_policies
__props__['service_endpoints'] = service_endpoints
__props__['subnet_name'] = subnet_name
if virtual_network_name is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_name'")
__props__['virtual_network_name'] = virtual_network_name
__props__['etag'] = None
__props__['ip_configuration_profiles'] = None
__props__['ip_configurations'] = None
__props__['private_endpoints'] = None
__props__['provisioning_state'] = None
__props__['purpose'] = None
__props__['resource_navigation_links'] = None
__props__['service_association_links'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:Subnet"), pulumi.Alias(type_="azure-nextgen:network/latest:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20150615:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160330:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20160901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20161201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170301:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20170901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20171101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180401:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20180801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181001:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20181201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190401:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190801:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20190901:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20191101:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20191201:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200301:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200501:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200601:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200701:Subnet"), pulumi.Alias(type_="azure-nextgen:network/v20200801:Subnet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Subnet, __self__).__init__(
'azure-nextgen:network/v20200401:Subnet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Subnet':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Subnet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "address_prefixes")
@property
@pulumi.getter
def delegations(self) -> pulumi.Output[Optional[Sequence['outputs.DelegationResponse']]]:
return pulumi.get(self, "delegations")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAllocations")
def ip_allocations(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
return pulumi.get(self, "ip_allocations")
@property
@pulumi.getter(name="ipConfigurationProfiles")
def ip_configuration_profiles(self) -> pulumi.Output[Sequence['outputs.IPConfigurationProfileResponse']]:
return pulumi.get(self, "ip_configuration_profiles")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Sequence['outputs.IPConfigurationResponse']]:
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
return pulumi.get(self, "nat_gateway")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> pulumi.Output[Optional['outputs.NetworkSecurityGroupResponse']]:
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter(name="privateEndpointNetworkPolicies")
def private_endpoint_network_policies(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "private_endpoint_network_policies")
@property
@pulumi.getter(name="privateEndpoints")
def private_endpoints(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointResponse']]:
return pulumi.get(self, "private_endpoints")
@property
@pulumi.getter(name="privateLinkServiceNetworkPolicies")
def private_link_service_network_policies(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "private_link_service_network_policies")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def purpose(self) -> pulumi.Output[str]:
return pulumi.get(self, "purpose")
@property
@pulumi.getter(name="resourceNavigationLinks")
def resource_navigation_links(self) -> pulumi.Output[Sequence['outputs.ResourceNavigationLinkResponse']]:
return pulumi.get(self, "resource_navigation_links")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> pulumi.Output[Optional['outputs.RouteTableResponse']]:
return pulumi.get(self, "route_table")
@property
@pulumi.getter(name="serviceAssociationLinks")
def service_association_links(self) -> pulumi.Output[Sequence['outputs.ServiceAssociationLinkResponse']]:
return pulumi.get(self, "service_association_links")
@property
@pulumi.getter(name="serviceEndpointPolicies")
def service_endpoint_policies(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]]:
return pulumi.get(self, "service_endpoint_policies")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceEndpointPropertiesFormatResponse']]]:
return pulumi.get(self, "service_endpoints")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
1c2e18e9cbd368c6827997206237fb7931ec5395 | 12,886 | py | Python | nemo/collections/asr/parts/submodules/multi_head_attention.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 1 | 2022-03-08T02:48:44.000Z | 2022-03-08T02:48:44.000Z | nemo/collections/asr/parts/submodules/multi_head_attention.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | 1 | 2022-03-06T14:09:02.000Z | 2022-03-06T14:09:02.000Z | nemo/collections/asr/parts/submodules/multi_head_attention.py | hamjam/NeMo | b3484d32e1317666151f931bfa39867d88ed8658 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Part of this code is adopted from https://github.com/espnet/espnet
"""
import math
import torch
import torch.nn as nn
__all__ = [
'RelPositionMultiHeadAttention',
'RelPositionalEncoding',
'PositionalEncoding',
]
class MultiHeadAttention(nn.Module):
"""Multi-Head Attention layer of Transformer.
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.s_d_k = math.sqrt(self.d_k)
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transforms query, key and value.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value (torch.Tensor): (batch, time2, size)
returns:
q (torch.Tensor): (batch, head, time1, size)
k (torch.Tensor): (batch, head, time2, size)
v (torch.Tensor): (batch, head, time2, size)
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
Args:
value (torch.Tensor): (batch, time2, size)
scores(torch.Tensor): (batch, time1, time2)
mask(torch.Tensor): (batch, time1, time2)
returns:
value (torch.Tensor): transformed `value` (batch, time2, d_model) weighted by the attention scores
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1) # (batch, 1, time1, time2)
scores = scores.masked_fill(mask, -10000.0)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) # (batch, head, time1, time2)
else:
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = x.transpose(1, 2).reshape(n_batch, -1, self.h * self.d_k) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask, pos_emb=None):
"""Compute 'Scaled Dot Product Attention'.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value(torch.Tensor): (batch, time2, size)
mask (torch.Tensor): (batch, time1, time2)
returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / self.s_d_k
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadAttention(MultiHeadAttention):
"""Multi-Head Attention layer of Transformer-XL with support of relative positional encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): number of heads
n_feat (int): size of the features
dropout_rate (float): dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable biases are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
if pos_bias_u is None or pos_bias_v is None:
self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
# nn.init.normal_(self.pos_bias_u, 0.0, 0.02)
# nn.init.normal_(self.pos_bias_v, 0.0, 0.02)
nn.init.zeros_(self.pos_bias_u)
nn.init.zeros_(self.pos_bias_v)
else:
self.pos_bias_u = pos_bias_u
self.pos_bias_v = pos_bias_v
def rel_shift(self, x):
"""Compute relative positional encoding.
Args:
x (torch.Tensor): (batch, nheads, time, 2*time-1)
"""
b, h, qlen, pos_len = x.size() # (b, h, t1, t2)
# need to add a column of zeros on the left side of last dimension to perform the relative shifting
x = torch.nn.functional.pad(x, pad=(1, 0)) # (b, h, t1, t2+1)
x = x.view(b, h, -1, qlen) # (b, h, t2+1, t1)
# need to drop the first row
x = x[:, :, 1:].view(b, h, qlen, pos_len) # (b, h, t1, t2)
return x
def forward(self, query, key, value, mask, pos_emb):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): (batch, time1, size)
key (torch.Tensor): (batch, time2, size)
value(torch.Tensor): (batch, time2, size)
mask (torch.Tensor): (batch, time1, time2)
pos_emb (torch.Tensor) : (batch, time1, size)
Returns:
output (torch.Tensor): transformed `value` (batch, time1, d_model) weighted by the query dot key attention
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
# drops extra elements in the matrix_bd to match the matrix_ac's size
matrix_bd = matrix_bd[:, :, :, : matrix_ac.size(-1)]
scores = (matrix_ac + matrix_bd) / self.s_d_k # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
class PositionalEncoding(torch.nn.Module):
"""Fixed sinusoidal positional encoding.
Args:
d_model (int): embedding dim
dropout_rate (float): dropout rate
max_len (int): maximum input length
xscale (bool): whether to scale the input by sqrt(d_model)
dropout_rate_emb (float): dropout rate for the positional embeddings
"""
def __init__(self, d_model, dropout_rate, max_len=5000, xscale=None, dropout_rate_emb=0.0):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = xscale
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.max_len = max_len
if dropout_rate_emb > 0:
self.dropout_emb = nn.Dropout(dropout_rate_emb)
else:
self.dropout_emb = None
def create_pe(self, positions):
pos_length = positions.size(0)
pe = torch.zeros(pos_length, self.d_model, device=positions.device)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32, device=positions.device)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(positions * div_term)
pe[:, 1::2] = torch.cos(positions * div_term)
pe = pe.unsqueeze(0)
if hasattr(self, 'pe'):
self.pe = pe
else:
self.register_buffer('pe', pe, persistent=False)
def extend_pe(self, length, device):
"""Reset and extend the positional encodings if needed."""
if hasattr(self, 'pe') and self.pe.size(1) >= length:
return
positions = torch.arange(0, length, dtype=torch.float32, device=device).unsqueeze(1)
self.create_pe(positions=positions)
def forward(self, x: torch.Tensor):
"""Adds positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, feature_size)
Returns:
x+pos_emb (torch.Tensor): Its shape is (batch, time, feature_size)
pos_emb (torch.Tensor): Its shape is (1, time, feature_size)
"""
if self.xscale:
x = x * self.xscale
pos_emb = self.pe[:, : x.size(1)]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
x = x + pos_emb
return self.dropout(x), pos_emb
class RelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding for TransformerXL's layers
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): embedding dim
dropout_rate (float): dropout rate
max_len (int): maximum input length
xscale (bool): whether to scale the input by sqrt(d_model)
dropout_rate_emb (float): dropout rate for the positional embeddings
"""
def extend_pe(self, length, device):
"""Reset and extend the positional encodings if needed."""
needed_size = 2 * length - 1
if hasattr(self, 'pe') and self.pe.size(1) >= needed_size:
return
# positions would be from negative numbers to positive
# positive positions would be used for left positions and negative for right positions
positions = torch.arange(length - 1, -length, -1, dtype=torch.float32, device=device).unsqueeze(1)
self.create_pe(positions=positions)
self.center_pos = torch.tensor(self.pe.size(1) // 2 + 1, dtype=torch.int32, device=device)
def forward(self, x):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, feature_size)
Returns:
x (torch.Tensor): Its shape is (batch, time, feature_size)
pos_emb (torch.Tensor): Its shape is (1, time, feature_size)
"""
if self.xscale:
x = x * self.xscale
# center_pos would be the index of position 0
# negative positions would be used for right and positive for left tokens
# for input of length L, 2*L-1 positions are needed, positions from (L-1) to -(L-1)
start_pos = self.center_pos - x.size(1)
end_pos = self.center_pos + x.size(1) - 1
pos_emb = self.pe[:, start_pos:end_pos]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
return self.dropout(x), pos_emb
| 41.169329 | 118 | 0.621527 |
import math
import torch
import torch.nn as nn
__all__ = [
'RelPositionMultiHeadAttention',
'RelPositionalEncoding',
'PositionalEncoding',
]
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, n_feat, dropout_rate):
super(MultiHeadAttention, self).__init__()
assert n_feat % n_head == 0
self.d_k = n_feat // n_head
self.s_d_k = math.sqrt(self.d_k)
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
return q, k, v
def forward_attention(self, value, scores, mask):
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask, -10000.0)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).reshape(n_batch, -1, self.h * self.d_k)
return self.linear_out(x)
def forward(self, query, key, value, mask, pos_emb=None):
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / self.s_d_k
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadAttention(MultiHeadAttention):
def __init__(self, n_head, n_feat, dropout_rate, pos_bias_u, pos_bias_v):
super().__init__(n_head, n_feat, dropout_rate)
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
if pos_bias_u is None or pos_bias_v is None:
self.pos_bias_u = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.FloatTensor(self.h, self.d_k))
nn.init.zeros_(self.pos_bias_u)
nn.init.zeros_(self.pos_bias_v)
else:
self.pos_bias_u = pos_bias_u
self.pos_bias_v = pos_bias_v
def rel_shift(self, x):
b, h, qlen, pos_len = x.size()
x = torch.nn.functional.pad(x, pad=(1, 0))
x = x.view(b, h, -1, qlen)
x = x[:, :, 1:].view(b, h, qlen, pos_len)
return x
def forward(self, query, key, value, mask, pos_emb):
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
matrix_bd = matrix_bd[:, :, :, : matrix_ac.size(-1)]
scores = (matrix_ac + matrix_bd) / self.s_d_k # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
class PositionalEncoding(torch.nn.Module):
def __init__(self, d_model, dropout_rate, max_len=5000, xscale=None, dropout_rate_emb=0.0):
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = xscale
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.max_len = max_len
if dropout_rate_emb > 0:
self.dropout_emb = nn.Dropout(dropout_rate_emb)
else:
self.dropout_emb = None
def create_pe(self, positions):
pos_length = positions.size(0)
pe = torch.zeros(pos_length, self.d_model, device=positions.device)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32, device=positions.device)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(positions * div_term)
pe[:, 1::2] = torch.cos(positions * div_term)
pe = pe.unsqueeze(0)
if hasattr(self, 'pe'):
self.pe = pe
else:
self.register_buffer('pe', pe, persistent=False)
def extend_pe(self, length, device):
if hasattr(self, 'pe') and self.pe.size(1) >= length:
return
positions = torch.arange(0, length, dtype=torch.float32, device=device).unsqueeze(1)
self.create_pe(positions=positions)
def forward(self, x: torch.Tensor):
if self.xscale:
x = x * self.xscale
pos_emb = self.pe[:, : x.size(1)]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
x = x + pos_emb
return self.dropout(x), pos_emb
class RelPositionalEncoding(PositionalEncoding):
def extend_pe(self, length, device):
needed_size = 2 * length - 1
if hasattr(self, 'pe') and self.pe.size(1) >= needed_size:
return
# positions would be from negative numbers to positive
# positive positions would be used for left positions and negative for right positions
positions = torch.arange(length - 1, -length, -1, dtype=torch.float32, device=device).unsqueeze(1)
self.create_pe(positions=positions)
self.center_pos = torch.tensor(self.pe.size(1) // 2 + 1, dtype=torch.int32, device=device)
def forward(self, x):
if self.xscale:
x = x * self.xscale
# center_pos would be the index of position 0
# negative positions would be used for right and positive for left tokens
# for input of length L, 2*L-1 positions are needed, positions from (L-1) to -(L-1)
start_pos = self.center_pos - x.size(1)
end_pos = self.center_pos + x.size(1) - 1
pos_emb = self.pe[:, start_pos:end_pos]
if self.dropout_emb:
pos_emb = self.dropout_emb(pos_emb)
return self.dropout(x), pos_emb
| true | true |
1c2e1cd8e3fac74e232aa6bb8af903e8e59a0397 | 7,768 | py | Python | examples/mujoco/train_ppo_batch_gym.py | cnheider/chainerrl | 018a29132d77e5af0f92161250c72aba10c6ce29 | [
"MIT"
] | 923 | 2017-06-01T08:27:42.000Z | 2022-03-24T02:17:04.000Z | examples/mujoco/train_ppo_batch_gym.py | hardmaru/chainerrl | 018a29132d77e5af0f92161250c72aba10c6ce29 | [
"MIT"
] | 374 | 2017-06-02T02:07:50.000Z | 2021-06-29T22:05:38.000Z | examples/mujoco/train_ppo_batch_gym.py | hardmaru/chainerrl | 018a29132d77e5af0f92161250c72aba10c6ce29 | [
"MIT"
] | 253 | 2017-06-04T10:31:50.000Z | 2022-03-19T15:20:51.000Z | """An example of training PPO against OpenAI Gym Envs.
This script is an example of training a PPO agent against OpenAI Gym envs.
Both discrete and continuous action spaces are supported.
To solve CartPole-v0, run:
python train_ppo_gym.py --env CartPole-v0
"""
import argparse
import functools
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='Hopper-v2')
parser.add_argument('--num-envs', type=int, default=1)
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=10 ** 6)
parser.add_argument('--eval-interval', type=int, default=10000)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--window-size', type=int, default=100)
parser.add_argument('--update-interval', type=int, default=2048)
parser.add_argument('--log-interval', type=int, default=1000)
parser.add_argument('--batchsize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--entropy-coef', type=float, default=0.0)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))])
# Only for getting timesteps, and obs-action spaces
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.max_episode_steps
obs_space = sample_env.observation_space
action_space = sample_env.action_space
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
winit_last = chainer.initializers.LeCunNormal(1e-2)
# Switch policy types accordingly to action space types
if isinstance(action_space, gym.spaces.Discrete):
n_actions = action_space.n
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, n_actions, initialW=winit_last),
chainerrl.distribution.SoftmaxDistribution,
)
elif isinstance(action_space, gym.spaces.Box):
action_size = action_space.low.size
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, action_size, initialW=winit_last),
chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x), # Parameterize log std
var_param_init=0, # log std = 0 => std = 1
),
)
else:
print("""\
This example only supports gym.spaces.Box or gym.spaces.Discrete action spaces.""") # NOQA
return
vf = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, 1),
)
# Combine a policy and a value function into a single model
model = chainerrl.links.Branched(policy, vf)
opt = chainer.optimizers.Adam(alpha=args.lr, eps=1e-5)
opt.setup(model)
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = PPO(model, opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps_vf=None, entropy_coef=args.entropy_coef,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_batch_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(False),
eval_env=make_batch_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
return_window_size=args.window_size,
max_episode_len=timestep_limit,
save_best_so_far_agent=False,
step_hooks=[
lr_decay_hook,
],
)
if __name__ == '__main__':
main()
| 37.892683 | 91 | 0.644954 | import argparse
import functools
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--env', type=str, default='Hopper-v2')
parser.add_argument('--num-envs', type=int, default=1)
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=10 ** 6)
parser.add_argument('--eval-interval', type=int, default=10000)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--window-size', type=int, default=100)
parser.add_argument('--update-interval', type=int, default=2048)
parser.add_argument('--log-interval', type=int, default=1000)
parser.add_argument('--batchsize', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--entropy-coef', type=float, default=0.0)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
misc.set_random_seed(args.seed, gpus=(args.gpu,))
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if not test:
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))])
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.max_episode_steps
obs_space = sample_env.observation_space
action_space = sample_env.action_space
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
winit_last = chainer.initializers.LeCunNormal(1e-2)
if isinstance(action_space, gym.spaces.Discrete):
n_actions = action_space.n
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, n_actions, initialW=winit_last),
chainerrl.distribution.SoftmaxDistribution,
)
elif isinstance(action_space, gym.spaces.Box):
action_size = action_space.low.size
policy = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, action_size, initialW=winit_last),
chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x),
var_param_init=0,
),
)
else:
print("""\
This example only supports gym.spaces.Box or gym.spaces.Discrete action spaces.""")
return
vf = chainer.Sequential(
L.Linear(None, 64),
F.tanh,
L.Linear(None, 64),
F.tanh,
L.Linear(None, 1),
)
model = chainerrl.links.Branched(policy, vf)
opt = chainer.optimizers.Adam(alpha=args.lr, eps=1e-5)
opt.setup(model)
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
agent = PPO(model, opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps_vf=None, entropy_coef=args.entropy_coef,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
env = make_batch_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(False),
eval_env=make_batch_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
return_window_size=args.window_size,
max_episode_len=timestep_limit,
save_best_so_far_agent=False,
step_hooks=[
lr_decay_hook,
],
)
if __name__ == '__main__':
main()
| true | true |
1c2e1d04399dd2ab41dcc73c61b9cb123cf6a55d | 11,815 | py | Python | hw0_release/.env/lib/python3.7/site-packages/skimage/data/__init__.py | Renhy/CS131_release | 23b92d04c4cbb122da18dc929199d3d06fb0251f | [
"MIT"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | rcnn/lib/python3.6/site-packages/skimage/data/__init__.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | rcnn/lib/python3.6/site-packages/skimage/data/__init__.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | # coding: utf-8
"""Standard test images.
For more images, see
- http://sipi.usc.edu/database/database.php
"""
import os as _os
import numpy as _np
from .. import data_dir
from ..io import imread, use_plugin
from .._shared._warnings import expected_warnings, warn
from .. import img_as_bool
from ._binary_blobs import binary_blobs
__all__ = ['load',
'astronaut',
'binary_blobs',
'camera',
'checkerboard',
'chelsea',
'clock',
'coffee',
'coins',
'horse',
'hubble_deep_field',
'immunohistochemistry',
'lfw_subset',
'logo',
'moon',
'page',
'text',
'rocket',
'stereo_motorcycle']
def load(f, as_gray=False, as_grey=None):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
as_gray : bool, optional
Convert to grayscale.
as_grey : bool or None, optional
Deprecated keyword argument. Use `as_gray` instead.
If None, `as_gray` is used.
Convert to grayscale.
Returns
-------
img : ndarray
Image loaded from ``skimage.data_dir``.
"""
if as_grey is not None:
as_gray = as_grey
warn('`as_grey` has been deprecated in favor of `as_gray`'
' and will be removed in v0.16.')
use_plugin('pil')
return imread(_os.path.join(data_dir, f), as_gray=as_gray)
def camera():
"""Gray-level "camera" image.
Often used for segmentation and denoising examples.
Returns
-------
camera : (512, 512) uint8 ndarray
Camera image.
"""
return load("camera.png")
def astronaut():
"""Color image of the astronaut Eileen Collins.
Photograph of Eileen Collins, an American astronaut. She was selected
as an astronaut in 1992 and first piloted the space shuttle STS-63 in
1995. She retired in 2006 after spending a total of 38 days, 8 hours
and 10 minutes in outer space.
This image was downloaded from the NASA Great Images database
<https://flic.kr/p/r9qvLn>`__.
No known copyright restrictions, released into the public domain.
Returns
-------
astronaut : (512, 512, 3) uint8 ndarray
Astronaut image.
"""
return load("astronaut.png")
def text():
"""Gray-level "text" image used for corner detection.
Notes
-----
This image was downloaded from Wikipedia
<http://en.wikipedia.org/wiki/File:Corner.png>`__.
No known copyright restrictions, released into the public domain.
Returns
-------
text : (172, 448) uint8 ndarray
Text image.
"""
return load("text.png")
def checkerboard():
"""Checkerboard image.
Checkerboards are often used in image calibration, since the
corner-points are easy to locate. Because of the many parallel
edges, they also visualise distortions particularly well.
Returns
-------
checkerboard : (200, 200) uint8 ndarray
Checkerboard image.
"""
return load("chessboard_GRAY.png")
def coins():
"""Greek coins from Pompeii.
This image shows several coins outlined against a gray background.
It is especially useful in, e.g. segmentation tests, where
individual objects need to be identified against a background.
The background shares enough grey levels with the coins that a
simple segmentation is not sufficient.
Notes
-----
This image was downloaded from the
`Brooklyn Museum Collection
<https://www.brooklynmuseum.org/opencollection/archives/image/51611>`__.
No known copyright restrictions.
Returns
-------
coins : (303, 384) uint8 ndarray
Coins image.
"""
return load("coins.png")
def logo():
"""Scikit-image logo, a RGBA image.
Returns
-------
logo : (500, 500, 4) uint8 ndarray
Logo image.
"""
return load("logo.png")
def moon():
"""Surface of the moon.
This low-contrast image of the surface of the moon is useful for
illustrating histogram equalization and contrast stretching.
Returns
-------
moon : (512, 512) uint8 ndarray
Moon image.
"""
return load("moon.png")
def page():
"""Scanned page.
This image of printed text is useful for demonstrations requiring uneven
background illumination.
Returns
-------
page : (191, 384) uint8 ndarray
Page image.
"""
return load("page.png")
def horse():
"""Black and white silhouette of a horse.
This image was downloaded from
`openclipart <http://openclipart.org/detail/158377/horse-by-marauder>`
Released into public domain and drawn and uploaded by Andreas Preuss
(marauder).
Returns
-------
horse : (328, 400) bool ndarray
Horse image.
"""
with expected_warnings(['Possible precision loss', 'Possible sign loss']):
return img_as_bool(load("horse.png", as_gray=True))
def clock():
"""Motion blurred clock.
This photograph of a wall clock was taken while moving the camera in an
aproximately horizontal direction. It may be used to illustrate
inverse filters and deconvolution.
Released into the public domain by the photographer (Stefan van der Walt).
Returns
-------
clock : (300, 400) uint8 ndarray
Clock image.
"""
return load("clock_motion.png")
def immunohistochemistry():
"""Immunohistochemical (IHC) staining with hematoxylin counterstaining.
This picture shows colonic glands where the IHC expression of FHL2 protein
is revealed with DAB. Hematoxylin counterstaining is applied to enhance the
negative parts of the tissue.
This image was acquired at the Center for Microscopy And Molecular Imaging
(CMMI).
No known copyright restrictions.
Returns
-------
immunohistochemistry : (512, 512, 3) uint8 ndarray
Immunohistochemistry image.
"""
return load("ihc.png")
def chelsea():
"""Chelsea the cat.
An example with texture, prominent edges in horizontal and diagonal
directions, as well as features of differing scales.
Notes
-----
No copyright restrictions. CC0 by the photographer (Stefan van der Walt).
Returns
-------
chelsea : (300, 451, 3) uint8 ndarray
Chelsea image.
"""
return load("chelsea.png")
def coffee():
"""Coffee cup.
This photograph is courtesy of Pikolo Espresso Bar.
It contains several elliptical shapes as well as varying texture (smooth
porcelain to course wood grain).
Notes
-----
No copyright restrictions. CC0 by the photographer (Rachel Michetti).
Returns
-------
coffee : (400, 600, 3) uint8 ndarray
Coffee image.
"""
return load("coffee.png")
def hubble_deep_field():
"""Hubble eXtreme Deep Field.
This photograph contains the Hubble Telescope's farthest ever view of
the universe. It can be useful as an example for multi-scale
detection.
Notes
-----
This image was downloaded from
`HubbleSite
<http://hubblesite.org/newscenter/archive/releases/2012/37/image/a/>`__.
The image was captured by NASA and `may be freely used in the public domain
<http://www.nasa.gov/audience/formedia/features/MP_Photo_Guidelines.html>`_.
Returns
-------
hubble_deep_field : (872, 1000, 3) uint8 ndarray
Hubble deep field image.
"""
return load("hubble_deep_field.jpg")
def rocket():
"""Launch photo of DSCOVR on Falcon 9 by SpaceX.
This is the launch photo of Falcon 9 carrying DSCOVR lifted off from
SpaceX's Launch Complex 40 at Cape Canaveral Air Force Station, FL.
Notes
-----
This image was downloaded from
`SpaceX Photos
<https://www.flickr.com/photos/spacexphotos/16511594820/in/photostream/>`__.
The image was captured by SpaceX and `released in the public domain
<http://arstechnica.com/tech-policy/2015/03/elon-musk-puts-spacex-photos-into-the-public-domain/>`_.
Returns
-------
rocket : (427, 640, 3) uint8 ndarray
Rocket image.
"""
return load("rocket.jpg")
def stereo_motorcycle():
"""Rectified stereo image pair with ground-truth disparities.
The two images are rectified such that every pixel in the left image has
its corresponding pixel on the same scanline in the right image. That means
that both images are warped such that they have the same orientation but a
horizontal spatial offset (baseline). The ground-truth pixel offset in
column direction is specified by the included disparity map.
The two images are part of the Middlebury 2014 stereo benchmark. The
dataset was created by Nera Nesic, Porter Westling, Xi Wang, York Kitajima,
Greg Krathwohl, and Daniel Scharstein at Middlebury College. A detailed
description of the acquisition process can be found in [1]_.
The images included here are down-sampled versions of the default exposure
images in the benchmark. The images are down-sampled by a factor of 4 using
the function `skimage.transform.downscale_local_mean`. The calibration data
in the following and the included ground-truth disparity map are valid for
the down-sampled images::
Focal length: 994.978px
Principal point x: 311.193px
Principal point y: 254.877px
Principal point dx: 31.086px
Baseline: 193.001mm
Returns
-------
img_left : (500, 741, 3) uint8 ndarray
Left stereo image.
img_right : (500, 741, 3) uint8 ndarray
Right stereo image.
disp : (500, 741, 3) float ndarray
Ground-truth disparity map, where each value describes the offset in
column direction between corresponding pixels in the left and the right
stereo images. E.g. the corresponding pixel of
``img_left[10, 10 + disp[10, 10]]`` is ``img_right[10, 10]``.
NaNs denote pixels in the left image that do not have ground-truth.
Notes
-----
The original resolution images, images with different exposure and
lighting, and ground-truth depth maps can be found at the Middlebury
website [2]_.
References
----------
.. [1] D. Scharstein, H. Hirschmueller, Y. Kitajima, G. Krathwohl, N.
Nesic, X. Wang, and P. Westling. High-resolution stereo datasets
with subpixel-accurate ground truth. In German Conference on Pattern
Recognition (GCPR 2014), Muenster, Germany, September 2014.
.. [2] http://vision.middlebury.edu/stereo/data/scenes2014/
"""
return (load("motorcycle_left.png"),
load("motorcycle_right.png"),
_np.load(_os.path.join(data_dir, "motorcycle_disp.npz"))["arr_0"])
def lfw_subset():
"""Subset of data from the LFW dataset.
This database is a subset of the LFW database containing:
* 100 faces
* 100 non-faces
The full dataset is available at [2]_.
Returns
-------
images : (200, 25, 25) uint8 ndarray
100 first images are faces and subsequent 100 are non-faces.
Notes
-----
The faces were randomly selected from the LFW dataset and the non-faces
were extracted from the background of the same dataset. The cropped ROIs
have been resized to a 25 x 25 pixels.
References
----------
.. [1] Huang, G., Mattar, M., Lee, H., & Learned-Miller, E. G. (2012).
Learning to align from scratch. In Advances in Neural Information
Processing Systems (pp. 764-772).
.. [2] http://vis-www.cs.umass.edu/lfw/
"""
return _np.load(_os.path.join(data_dir, 'lfw_subset.npy'))
| 27.412993 | 104 | 0.65019 |
import os as _os
import numpy as _np
from .. import data_dir
from ..io import imread, use_plugin
from .._shared._warnings import expected_warnings, warn
from .. import img_as_bool
from ._binary_blobs import binary_blobs
__all__ = ['load',
'astronaut',
'binary_blobs',
'camera',
'checkerboard',
'chelsea',
'clock',
'coffee',
'coins',
'horse',
'hubble_deep_field',
'immunohistochemistry',
'lfw_subset',
'logo',
'moon',
'page',
'text',
'rocket',
'stereo_motorcycle']
def load(f, as_gray=False, as_grey=None):
if as_grey is not None:
as_gray = as_grey
warn('`as_grey` has been deprecated in favor of `as_gray`'
' and will be removed in v0.16.')
use_plugin('pil')
return imread(_os.path.join(data_dir, f), as_gray=as_gray)
def camera():
return load("camera.png")
def astronaut():
return load("astronaut.png")
def text():
return load("text.png")
def checkerboard():
return load("chessboard_GRAY.png")
def coins():
return load("coins.png")
def logo():
return load("logo.png")
def moon():
return load("moon.png")
def page():
return load("page.png")
def horse():
with expected_warnings(['Possible precision loss', 'Possible sign loss']):
return img_as_bool(load("horse.png", as_gray=True))
def clock():
return load("clock_motion.png")
def immunohistochemistry():
return load("ihc.png")
def chelsea():
return load("chelsea.png")
def coffee():
return load("coffee.png")
def hubble_deep_field():
return load("hubble_deep_field.jpg")
def rocket():
return load("rocket.jpg")
def stereo_motorcycle():
return (load("motorcycle_left.png"),
load("motorcycle_right.png"),
_np.load(_os.path.join(data_dir, "motorcycle_disp.npz"))["arr_0"])
def lfw_subset():
return _np.load(_os.path.join(data_dir, 'lfw_subset.npy'))
| true | true |
1c2e1deaf306e0ed002b9293002fa360230bcd40 | 418 | py | Python | exercise8.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | 2 | 2022-01-13T15:55:58.000Z | 2022-02-11T23:18:34.000Z | exercise8.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | null | null | null | exercise8.py | iansantana00/Python-Course | 43852aa64c93099342ab4765b0fe8729a959449e | [
"MIT"
] | null | null | null | # -*- Coding: utf-8 -*-
print("VERIFICADOR DE CONSUMO")
print("-----------------------")
d = float(input("Distância percorrida pelo carro em km: "))
v = float(input("Quantidade de gasolina consumida em l: "))
c = d/v
if c < 8:
print(f"Consumo de {c}km/l, VENDA O CARRO!")
elif c > 8 and c < 14:
print(f"Consumo de {c}km/l, ECONÔMICO!")
else:
print(f"Consumo de {c}km/l, SUPER ECONÔMICO!") | 27.866667 | 60 | 0.574163 |
print("VERIFICADOR DE CONSUMO")
print("-----------------------")
d = float(input("Distância percorrida pelo carro em km: "))
v = float(input("Quantidade de gasolina consumida em l: "))
c = d/v
if c < 8:
print(f"Consumo de {c}km/l, VENDA O CARRO!")
elif c > 8 and c < 14:
print(f"Consumo de {c}km/l, ECONÔMICO!")
else:
print(f"Consumo de {c}km/l, SUPER ECONÔMICO!") | true | true |
1c2e1e065a6bb1b61bcba4f5de7902153878b816 | 502 | py | Python | app/auth/__init__.py | muli3203/blogting | 9fc048b7cf42fa7751de34317dc186909a9fa8c9 | [
"MIT"
] | null | null | null | app/auth/__init__.py | muli3203/blogting | 9fc048b7cf42fa7751de34317dc186909a9fa8c9 | [
"MIT"
] | null | null | null | app/auth/__init__.py | muli3203/blogting | 9fc048b7cf42fa7751de34317dc186909a9fa8c9 | [
"MIT"
] | 1 | 2020-02-21T13:22:36.000Z | 2020-02-21T13:22:36.000Z | # from flask_mail import Message
# from flask import render_template
# from . import mail
# def mail_message(subject,template,to,**kwargs):
# sender_email = "moringademo@gmail.com"
# email = Message(subject, sender= sender_email, recipients=[to])
# email.body = render_template(template + ".txt",**kwargs)
# email.html = render_template(template + ".html",**kwargs)
# mail.send(email)
from flask import Blueprint
auth = Blueprint('auth', __name__)
from . import views, forms | 31.375 | 69 | 0.705179 |
from flask import Blueprint
auth = Blueprint('auth', __name__)
from . import views, forms | true | true |
1c2e1ea426c69378e7820b30572031dd68c0043c | 3,285 | py | Python | iteration2/ZoomPollViewer/Poll.py | a-haruntokyer/Zoom-Poll-Data-Match | ac46cfadbb743f34411b530ce4f5bd464362e622 | [
"MIT"
] | null | null | null | iteration2/ZoomPollViewer/Poll.py | a-haruntokyer/Zoom-Poll-Data-Match | ac46cfadbb743f34411b530ce4f5bd464362e622 | [
"MIT"
] | null | null | null | iteration2/ZoomPollViewer/Poll.py | a-haruntokyer/Zoom-Poll-Data-Match | ac46cfadbb743f34411b530ce4f5bd464362e622 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
ZOOM POLL VIEWER v0.1
POLL CLASS
11 Function
3 Object
"""
from .Question import Question
class Poll:
def __init__(self, zpv, poll_name, poll_type="QUIZ"):
self.zpv = zpv
self._name = poll_name
self._type = poll_type
self._questions = []
self._number_of_questions = None
self._session_grades = {}
self._number_of_students = {}
def get_name(self):
# Returns name
return self._name
def get_type(self):
# Returns type
return self._type
def get_questions(self):
# Returns question object
return self._questions
def get_question(self, question_text):
# Returns question of given text
for question in self._questions:
if question.get_text() == question_text:
return question
question_text = self.question_formatter(question_text)
for question in self._questions:
if self.question_formatter(question.get_text()) == self.question_formatter(question_text):
return question
return False
def question_formatter(self, question_text):
result = question_text.replace(" ", "")
result = result.replace("\t", "")
result = result.replace("\n", "")
return result
def get_number_of_questions(self):
# Returns number of questions
if self._number_of_questions is None:
self._number_of_questions = len(self._questions)
return self._number_of_questions
def get_number_of_students(self):
# Returns number of student
total = 0
print(self._number_of_students)
for session in self._number_of_students:
if self._number_of_students[session] > total:
total = self._number_of_students[session]
return total
def add_question(self, question_text):
# Adds question object and returns it
question = self.get_question(question_text)
if question:
return question
else:
question = Question(self, question_text)
self._questions.append(question)
return question
def set_session_grades(self, session, grades):
# Sets grade of given session
self._session_grades[session] = grades
def get_grades_of_seesion(self, session):
# Returns grade of given session
return self._session_grades[session]
def set_session_number_of_students(self, session, number_of_students):
# Sets session attendance
self._number_of_students[session] = number_of_students
def calculate_session_average_grade(self):
# Calculates average grade
if len(self._session_grades) > 0:
for i in self._session_grades:
grades = self._session_grades[i]
break
if len(grades) > 0:
return sum(grades) / len(grades)
else:
return 0
def get_number_of_max_choices(self):
# Returns number of choices
new_max = 0
for question in self.get_questions():
if len(question._choices) > new_max:
new_max = len(question._choices)
return new_max
| 29.863636 | 102 | 0.625266 |
from .Question import Question
class Poll:
def __init__(self, zpv, poll_name, poll_type="QUIZ"):
self.zpv = zpv
self._name = poll_name
self._type = poll_type
self._questions = []
self._number_of_questions = None
self._session_grades = {}
self._number_of_students = {}
def get_name(self):
return self._name
def get_type(self):
return self._type
def get_questions(self):
return self._questions
def get_question(self, question_text):
for question in self._questions:
if question.get_text() == question_text:
return question
question_text = self.question_formatter(question_text)
for question in self._questions:
if self.question_formatter(question.get_text()) == self.question_formatter(question_text):
return question
return False
def question_formatter(self, question_text):
result = question_text.replace(" ", "")
result = result.replace("\t", "")
result = result.replace("\n", "")
return result
def get_number_of_questions(self):
if self._number_of_questions is None:
self._number_of_questions = len(self._questions)
return self._number_of_questions
def get_number_of_students(self):
total = 0
print(self._number_of_students)
for session in self._number_of_students:
if self._number_of_students[session] > total:
total = self._number_of_students[session]
return total
def add_question(self, question_text):
question = self.get_question(question_text)
if question:
return question
else:
question = Question(self, question_text)
self._questions.append(question)
return question
def set_session_grades(self, session, grades):
self._session_grades[session] = grades
def get_grades_of_seesion(self, session):
return self._session_grades[session]
def set_session_number_of_students(self, session, number_of_students):
self._number_of_students[session] = number_of_students
def calculate_session_average_grade(self):
if len(self._session_grades) > 0:
for i in self._session_grades:
grades = self._session_grades[i]
break
if len(grades) > 0:
return sum(grades) / len(grades)
else:
return 0
def get_number_of_max_choices(self):
new_max = 0
for question in self.get_questions():
if len(question._choices) > new_max:
new_max = len(question._choices)
return new_max
| true | true |
1c2e1ebe11f2d9f090f081f98954d52c29c6346a | 13,914 | py | Python | awacs/ec2.py | chizou/awacs | 335c545d13ea22488b318245af891eb427c139db | [
"BSD-2-Clause"
] | null | null | null | awacs/ec2.py | chizou/awacs | 335c545d13ea22488b318245af891eb427c139db | [
"BSD-2-Clause"
] | null | null | null | awacs/ec2.py | chizou/awacs | 335c545d13ea22488b318245af891eb427c139db | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon EC2 Spot Fleet'
prefix = 'ec2'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CancelSpotFleetRequests = Action('CancelSpotFleetRequests')
DescribeSpotFleetInstances = Action('DescribeSpotFleetInstances')
DescribeSpotFleetRequestHistory = \
Action('DescribeSpotFleetRequestHistory')
DescribeSpotFleetRequests = Action('DescribeSpotFleetRequests')
ModifySpotFleetRequest = Action('ModifySpotFleetRequest')
RequestSpotFleet = Action('RequestSpotFleet')
AcceptReservedInstancesExchangeQuote = \
Action('AcceptReservedInstancesExchangeQuote')
AcceptVpcPeeringConnection = Action('AcceptVpcPeeringConnection')
AllocateAddress = Action('AllocateAddress')
AllocateHosts = Action('AllocateHosts')
AssignIpv6Addresses = Action('AssignIpv6Addresses')
AssignPrivateIpAddresses = Action('AssignPrivateIpAddresses')
AssociateAddress = Action('AssociateAddress')
AssociateDhcpOptions = Action('AssociateDhcpOptions')
AssociateIamInstanceProfile = Action('AssociateIamInstanceProfile')
AssociateRouteTable = Action('AssociateRouteTable')
AssociateSubnetCidrBlock = Action('AssociateSubnetCidrBlock')
AssociateVpcCidrBlock = Action('AssociateVpcCidrBlock')
AttachClassicLinkVpc = Action('AttachClassicLinkVpc')
AttachInternetGateway = Action('AttachInternetGateway')
AttachNetworkInterface = Action('AttachNetworkInterface')
AttachVolume = Action('AttachVolume')
AttachVpnGateway = Action('AttachVpnGateway')
AuthorizeSecurityGroupEgress = Action('AuthorizeSecurityGroupEgress')
AuthorizeSecurityGroupIngress = Action('AuthorizeSecurityGroupIngress')
BundleInstance = Action('BundleInstance')
CancelBundleTask = Action('CancelBundleTask')
CancelConversionTask = Action('CancelConversionTask')
CancelExportTask = Action('CancelExportTask')
CancelImportTask = Action('CancelImportTask')
CancelReservedInstancesListing = Action('CancelReservedInstancesListing')
CancelSpotFleetRequests = Action('CancelSpotFleetRequests')
CancelSpotInstanceRequests = Action('CancelSpotInstanceRequests')
ConfirmProductInstance = Action('ConfirmProductInstance')
CopyImage = Action('CopyImage')
CopySnapshot = Action('CopySnapshot')
CreateCustomerGateway = Action('CreateCustomerGateway')
CreateDhcpOptions = Action('CreateDhcpOptions')
CreateEgressOnlyInternetGateway = \
Action('CreateEgressOnlyInternetGateway')
CreateFlowLogs = Action('CreateFlowLogs')
CreateFpgaImage = Action('CreateFpgaImage')
CreateImage = Action('CreateImage')
CreateInstanceExportTask = Action('CreateInstanceExportTask')
CreateInternetGateway = Action('CreateInternetGateway')
CreateKeyPair = Action('CreateKeyPair')
CreateNatGateway = Action('CreateNatGateway')
CreateNetworkAcl = Action('CreateNetworkAcl')
CreateNetworkAclEntry = Action('CreateNetworkAclEntry')
CreateNetworkInterface = Action('CreateNetworkInterface')
CreateNetworkInterfacePermission = \
Action('CreateNetworkInterfacePermission')
CreatePlacementGroup = Action('CreatePlacementGroup')
CreateReservedInstancesListing = Action('CreateReservedInstancesListing')
CreateRoute = Action('CreateRoute')
CreateRouteTable = Action('CreateRouteTable')
CreateSecurityGroup = Action('CreateSecurityGroup')
CreateSnapshot = Action('CreateSnapshot')
CreateSpotDatafeedSubscription = Action('CreateSpotDatafeedSubscription')
CreateSubnet = Action('CreateSubnet')
CreateTags = Action('CreateTags')
CreateVolume = Action('CreateVolume')
CreateVpc = Action('CreateVpc')
CreateVpcEndpoint = Action('CreateVpcEndpoint')
CreateVpcPeeringConnection = Action('CreateVpcPeeringConnection')
CreateVpnConnection = Action('CreateVpnConnection')
CreateVpnConnectionRoute = Action('CreateVpnConnectionRoute')
CreateVpnGateway = Action('CreateVpnGateway')
DeleteCustomerGateway = Action('DeleteCustomerGateway')
DeleteDhcpOptions = Action('DeleteDhcpOptions')
DeleteEgressOnlyInternetGateway = \
Action('DeleteEgressOnlyInternetGateway')
DeleteFlowLogs = Action('DeleteFlowLogs')
DeleteInternetGateway = Action('DeleteInternetGateway')
DeleteKeyPair = Action('DeleteKeyPair')
DeleteNatGateway = Action('DeleteNatGateway')
DeleteNetworkAcl = Action('DeleteNetworkAcl')
DeleteNetworkAclEntry = Action('DeleteNetworkAclEntry')
DeleteNetworkInterface = Action('DeleteNetworkInterface')
DeletePlacementGroup = Action('DeletePlacementGroup')
DeleteRoute = Action('DeleteRoute')
DeleteRouteTable = Action('DeleteRouteTable')
DeleteSecurityGroup = Action('DeleteSecurityGroup')
DeleteSnapshot = Action('DeleteSnapshot')
DeleteSpotDatafeedSubscription = Action('DeleteSpotDatafeedSubscription')
DeleteSubnet = Action('DeleteSubnet')
DeleteTags = Action('DeleteTags')
DeleteVolume = Action('DeleteVolume')
DeleteVpc = Action('DeleteVpc')
DeleteVpcEndpoints = Action('DeleteVpcEndpoints')
DeleteVpcPeeringConnection = Action('DeleteVpcPeeringConnection')
DeleteVpnConnection = Action('DeleteVpnConnection')
DeleteVpnConnectionRoute = Action('DeleteVpnConnectionRoute')
DeleteVpnGateway = Action('DeleteVpnGateway')
DeregisterImage = Action('DeregisterImage')
DescribeAccountAttributes = Action('DescribeAccountAttributes')
DescribeAddresses = Action('DescribeAddresses')
DescribeAvailabilityZones = Action('DescribeAvailabilityZones')
DescribeBundleTasks = Action('DescribeBundleTasks')
DescribeClassicLinkInstances = Action('DescribeClassicLinkInstances')
DescribeConversionTasks = Action('DescribeConversionTasks')
DescribeCustomerGateways = Action('DescribeCustomerGateways')
DescribeDhcpOptions = Action('DescribeDhcpOptions')
DescribeEgressOnlyInternetGateways = \
Action('DescribeEgressOnlyInternetGateways')
DescribeExportTasks = Action('DescribeExportTasks')
DescribeFlowLogs = Action('DescribeFlowLogs')
DescribeFpgaImages = Action('DescribeFpgaImages')
DescribeHostReservationOfferings = \
Action('DescribeHostReservationOfferings')
DescribeHostReservations = Action('DescribeHostReservations')
DescribeHosts = Action('DescribeHosts')
DescribeIamInstanceProfileAssociations = \
Action('DescribeIamInstanceProfileAssociations')
DescribeIdFormat = Action('DescribeIdFormat')
DescribeIdentityIdFormat = Action('DescribeIdentityIdFormat')
DescribeImageAttribute = Action('DescribeImageAttribute')
DescribeImages = Action('DescribeImages')
DescribeImportImageTasks = Action('DescribeImportImageTasks')
DescribeImportSnapshotTasks = Action('DescribeImportSnapshotTasks')
DescribeInstanceAttribute = Action('DescribeInstanceAttribute')
DescribeInstanceStatus = Action('DescribeInstanceStatus')
DescribeInstances = Action('DescribeInstances')
DescribeInternetGateways = Action('DescribeInternetGateways')
DescribeKeyPairs = Action('DescribeKeyPairs')
DescribeMovingAddresses = Action('DescribeMovingAddresses')
DescribeNatGateways = Action('DescribeNatGateways')
DescribeNetworkAcls = Action('DescribeNetworkAcls')
DescribeNetworkInterfaceAttribute = \
Action('DescribeNetworkInterfaceAttribute')
DescribeNetworkInterfaces = Action('DescribeNetworkInterfaces')
DescribePlacementGroups = Action('DescribePlacementGroups')
DescribePrefixLists = Action('DescribePrefixLists')
DescribeRegions = Action('DescribeRegions')
DescribeReservedInstances = Action('DescribeReservedInstances')
DescribeReservedInstancesListings = \
Action('DescribeReservedInstancesListings')
DescribeReservedInstancesModifications = \
Action('DescribeReservedInstancesModifications')
DescribeReservedInstancesOfferings = \
Action('DescribeReservedInstancesOfferings')
DescribeRouteTables = Action('DescribeRouteTables')
DescribeSecurityGroups = Action('DescribeSecurityGroups')
DescribeSnapshotAttribute = Action('DescribeSnapshotAttribute')
DescribeSnapshots = Action('DescribeSnapshots')
DescribeSpotDatafeedSubscription = \
Action('DescribeSpotDatafeedSubscription')
DescribeSpotFleetInstances = Action('DescribeSpotFleetInstances')
DescribeSpotFleetRequestHistory = \
Action('DescribeSpotFleetRequestHistory')
DescribeSpotFleetRequests = Action('DescribeSpotFleetRequests')
DescribeSpotInstanceRequests = Action('DescribeSpotInstanceRequests')
DescribeSpotPriceHistory = Action('DescribeSpotPriceHistory')
DescribeStaleSecurityGroups = Action('DescribeStaleSecurityGroups')
DescribeSubnets = Action('DescribeSubnets')
DescribeTags = Action('DescribeTags')
DescribeVolumeAttribute = Action('DescribeVolumeAttribute')
DescribeVolumeStatus = Action('DescribeVolumeStatus')
DescribeVolumes = Action('DescribeVolumes')
DescribeVolumesModifications = Action('DescribeVolumesModifications')
DescribeVpcAttribute = Action('DescribeVpcAttribute')
DescribeVpcClassicLink = Action('DescribeVpcClassicLink')
DescribeVpcClassicLinkDnsSupport = \
Action('DescribeVpcClassicLinkDnsSupport')
DescribeVpcEndpointServices = Action('DescribeVpcEndpointServices')
DescribeVpcEndpoints = Action('DescribeVpcEndpoints')
DescribeVpcPeeringConnections = Action('DescribeVpcPeeringConnections')
DescribeVpcs = Action('DescribeVpcs')
DescribeVpnConnections = Action('DescribeVpnConnections')
DescribeVpnGateways = Action('DescribeVpnGateways')
DetachClassicLinkVpc = Action('DetachClassicLinkVpc')
DetachInternetGateway = Action('DetachInternetGateway')
DetachNetworkInterface = Action('DetachNetworkInterface')
DetachVolume = Action('DetachVolume')
DetachVpnGateway = Action('DetachVpnGateway')
DisableVgwRoutePropagation = Action('DisableVgwRoutePropagation')
DisableVpcClassicLink = Action('DisableVpcClassicLink')
DisableVpcClassicLinkDnsSupport = \
Action('DisableVpcClassicLinkDnsSupport')
DisassociateAddress = Action('DisassociateAddress')
DisassociateIamInstanceProfile = Action('DisassociateIamInstanceProfile')
DisassociateRouteTable = Action('DisassociateRouteTable')
DisassociateSubnetCidrBlock = Action('DisassociateSubnetCidrBlock')
DisassociateVpcCidrBlock = Action('DisassociateVpcCidrBlock')
EnableVgwRoutePropagation = Action('EnableVgwRoutePropagation')
EnableVolumeIO = Action('EnableVolumeIO')
EnableVpcClassicLink = Action('EnableVpcClassicLink')
EnableVpcClassicLinkDnsSupport = Action('EnableVpcClassicLinkDnsSupport')
GetConsoleOutput = Action('GetConsoleOutput')
GetConsoleScreenshot = Action('GetConsoleScreenshot')
GetHostReservationPurchasePreview = \
Action('GetHostReservationPurchasePreview')
GetPasswordData = Action('GetPasswordData')
GetReservedInstancesExchangeQuote = \
Action('GetReservedInstancesExchangeQuote')
ImportImage = Action('ImportImage')
ImportInstance = Action('ImportInstance')
ImportKeyPair = Action('ImportKeyPair')
ImportSnapshot = Action('ImportSnapshot')
ImportVolume = Action('ImportVolume')
ModifyHosts = Action('ModifyHosts')
ModifyIdFormat = Action('ModifyIdFormat')
ModifyIdentityIdFormat = Action('ModifyIdentityIdFormat')
ModifyImageAttribute = Action('ModifyImageAttribute')
ModifyInstanceAttribute = Action('ModifyInstanceAttribute')
ModifyInstancePlacement = Action('ModifyInstancePlacement')
ModifyNetworkInterfaceAttribute = \
Action('ModifyNetworkInterfaceAttribute')
ModifyReservedInstances = Action('ModifyReservedInstances')
ModifySnapshotAttribute = Action('ModifySnapshotAttribute')
ModifySpotFleetRequest = Action('ModifySpotFleetRequest')
ModifySubnetAttribute = Action('ModifySubnetAttribute')
ModifyVolume = Action('ModifyVolume')
ModifyVolumeAttribute = Action('ModifyVolumeAttribute')
ModifyVpcAttribute = Action('ModifyVpcAttribute')
ModifyVpcEndpoint = Action('ModifyVpcEndpoint')
ModifyVpcPeeringConnectionOptions = \
Action('ModifyVpcPeeringConnectionOptions')
MonitorInstances = Action('MonitorInstances')
MoveAddressToVpc = Action('MoveAddressToVpc')
PurchaseHostReservation = Action('PurchaseHostReservation')
PurchaseReservedInstancesOffering = \
Action('PurchaseReservedInstancesOffering')
PurchaseScheduledInstances = Action('PurchaseScheduledInstances')
RebootInstances = Action('RebootInstances')
RegisterImage = Action('RegisterImage')
RejectVpcPeeringConnection = Action('RejectVpcPeeringConnection')
ReleaseAddress = Action('ReleaseAddress')
ReleaseHosts = Action('ReleaseHosts')
ReplaceIamInstanceProfileAssociation = \
Action('ReplaceIamInstanceProfileAssociation')
ReplaceNetworkAclAssociation = Action('ReplaceNetworkAclAssociation')
ReplaceNetworkAclEntry = Action('ReplaceNetworkAclEntry')
ReplaceRoute = Action('ReplaceRoute')
ReplaceRouteTableAssociation = Action('ReplaceRouteTableAssociation')
ReportInstanceStatus = Action('ReportInstanceStatus')
RequestSpotFleet = Action('RequestSpotFleet')
RequestSpotInstances = Action('RequestSpotInstances')
ResetImageAttribute = Action('ResetImageAttribute')
ResetInstanceAttribute = Action('ResetInstanceAttribute')
ResetNetworkInterfaceAttribute = Action('ResetNetworkInterfaceAttribute')
ResetSnapshotAttribute = Action('ResetSnapshotAttribute')
RestoreAddressToClassic = Action('RestoreAddressToClassic')
RevokeSecurityGroupEgress = Action('RevokeSecurityGroupEgress')
RevokeSecurityGroupIngress = Action('RevokeSecurityGroupIngress')
RunInstances = Action('RunInstances')
RunScheduledInstances = Action('RunScheduledInstances')
StartInstances = Action('StartInstances')
StopInstances = Action('StopInstances')
TerminateInstances = Action('TerminateInstances')
UnassignIpv6Addresses = Action('UnassignIpv6Addresses')
UnassignPrivateIpAddresses = Action('UnassignPrivateIpAddresses')
UnmonitorInstances = Action('UnmonitorInstances')
UpdateSecurityGroupRuleDescriptionsEgress = \
Action('UpdateSecurityGroupRuleDescriptionsEgress')
UpdateSecurityGroupRuleDescriptionsIngress = \
Action('UpdateSecurityGroupRuleDescriptionsIngress')
| 48.821053 | 73 | 0.840089 |
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon EC2 Spot Fleet'
prefix = 'ec2'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
CancelSpotFleetRequests = Action('CancelSpotFleetRequests')
DescribeSpotFleetInstances = Action('DescribeSpotFleetInstances')
DescribeSpotFleetRequestHistory = \
Action('DescribeSpotFleetRequestHistory')
DescribeSpotFleetRequests = Action('DescribeSpotFleetRequests')
ModifySpotFleetRequest = Action('ModifySpotFleetRequest')
RequestSpotFleet = Action('RequestSpotFleet')
AcceptReservedInstancesExchangeQuote = \
Action('AcceptReservedInstancesExchangeQuote')
AcceptVpcPeeringConnection = Action('AcceptVpcPeeringConnection')
AllocateAddress = Action('AllocateAddress')
AllocateHosts = Action('AllocateHosts')
AssignIpv6Addresses = Action('AssignIpv6Addresses')
AssignPrivateIpAddresses = Action('AssignPrivateIpAddresses')
AssociateAddress = Action('AssociateAddress')
AssociateDhcpOptions = Action('AssociateDhcpOptions')
AssociateIamInstanceProfile = Action('AssociateIamInstanceProfile')
AssociateRouteTable = Action('AssociateRouteTable')
AssociateSubnetCidrBlock = Action('AssociateSubnetCidrBlock')
AssociateVpcCidrBlock = Action('AssociateVpcCidrBlock')
AttachClassicLinkVpc = Action('AttachClassicLinkVpc')
AttachInternetGateway = Action('AttachInternetGateway')
AttachNetworkInterface = Action('AttachNetworkInterface')
AttachVolume = Action('AttachVolume')
AttachVpnGateway = Action('AttachVpnGateway')
AuthorizeSecurityGroupEgress = Action('AuthorizeSecurityGroupEgress')
AuthorizeSecurityGroupIngress = Action('AuthorizeSecurityGroupIngress')
BundleInstance = Action('BundleInstance')
CancelBundleTask = Action('CancelBundleTask')
CancelConversionTask = Action('CancelConversionTask')
CancelExportTask = Action('CancelExportTask')
CancelImportTask = Action('CancelImportTask')
CancelReservedInstancesListing = Action('CancelReservedInstancesListing')
CancelSpotFleetRequests = Action('CancelSpotFleetRequests')
CancelSpotInstanceRequests = Action('CancelSpotInstanceRequests')
ConfirmProductInstance = Action('ConfirmProductInstance')
CopyImage = Action('CopyImage')
CopySnapshot = Action('CopySnapshot')
CreateCustomerGateway = Action('CreateCustomerGateway')
CreateDhcpOptions = Action('CreateDhcpOptions')
CreateEgressOnlyInternetGateway = \
Action('CreateEgressOnlyInternetGateway')
CreateFlowLogs = Action('CreateFlowLogs')
CreateFpgaImage = Action('CreateFpgaImage')
CreateImage = Action('CreateImage')
CreateInstanceExportTask = Action('CreateInstanceExportTask')
CreateInternetGateway = Action('CreateInternetGateway')
CreateKeyPair = Action('CreateKeyPair')
CreateNatGateway = Action('CreateNatGateway')
CreateNetworkAcl = Action('CreateNetworkAcl')
CreateNetworkAclEntry = Action('CreateNetworkAclEntry')
CreateNetworkInterface = Action('CreateNetworkInterface')
CreateNetworkInterfacePermission = \
Action('CreateNetworkInterfacePermission')
CreatePlacementGroup = Action('CreatePlacementGroup')
CreateReservedInstancesListing = Action('CreateReservedInstancesListing')
CreateRoute = Action('CreateRoute')
CreateRouteTable = Action('CreateRouteTable')
CreateSecurityGroup = Action('CreateSecurityGroup')
CreateSnapshot = Action('CreateSnapshot')
CreateSpotDatafeedSubscription = Action('CreateSpotDatafeedSubscription')
CreateSubnet = Action('CreateSubnet')
CreateTags = Action('CreateTags')
CreateVolume = Action('CreateVolume')
CreateVpc = Action('CreateVpc')
CreateVpcEndpoint = Action('CreateVpcEndpoint')
CreateVpcPeeringConnection = Action('CreateVpcPeeringConnection')
CreateVpnConnection = Action('CreateVpnConnection')
CreateVpnConnectionRoute = Action('CreateVpnConnectionRoute')
CreateVpnGateway = Action('CreateVpnGateway')
DeleteCustomerGateway = Action('DeleteCustomerGateway')
DeleteDhcpOptions = Action('DeleteDhcpOptions')
DeleteEgressOnlyInternetGateway = \
Action('DeleteEgressOnlyInternetGateway')
DeleteFlowLogs = Action('DeleteFlowLogs')
DeleteInternetGateway = Action('DeleteInternetGateway')
DeleteKeyPair = Action('DeleteKeyPair')
DeleteNatGateway = Action('DeleteNatGateway')
DeleteNetworkAcl = Action('DeleteNetworkAcl')
DeleteNetworkAclEntry = Action('DeleteNetworkAclEntry')
DeleteNetworkInterface = Action('DeleteNetworkInterface')
DeletePlacementGroup = Action('DeletePlacementGroup')
DeleteRoute = Action('DeleteRoute')
DeleteRouteTable = Action('DeleteRouteTable')
DeleteSecurityGroup = Action('DeleteSecurityGroup')
DeleteSnapshot = Action('DeleteSnapshot')
DeleteSpotDatafeedSubscription = Action('DeleteSpotDatafeedSubscription')
DeleteSubnet = Action('DeleteSubnet')
DeleteTags = Action('DeleteTags')
DeleteVolume = Action('DeleteVolume')
DeleteVpc = Action('DeleteVpc')
DeleteVpcEndpoints = Action('DeleteVpcEndpoints')
DeleteVpcPeeringConnection = Action('DeleteVpcPeeringConnection')
DeleteVpnConnection = Action('DeleteVpnConnection')
DeleteVpnConnectionRoute = Action('DeleteVpnConnectionRoute')
DeleteVpnGateway = Action('DeleteVpnGateway')
DeregisterImage = Action('DeregisterImage')
DescribeAccountAttributes = Action('DescribeAccountAttributes')
DescribeAddresses = Action('DescribeAddresses')
DescribeAvailabilityZones = Action('DescribeAvailabilityZones')
DescribeBundleTasks = Action('DescribeBundleTasks')
DescribeClassicLinkInstances = Action('DescribeClassicLinkInstances')
DescribeConversionTasks = Action('DescribeConversionTasks')
DescribeCustomerGateways = Action('DescribeCustomerGateways')
DescribeDhcpOptions = Action('DescribeDhcpOptions')
DescribeEgressOnlyInternetGateways = \
Action('DescribeEgressOnlyInternetGateways')
DescribeExportTasks = Action('DescribeExportTasks')
DescribeFlowLogs = Action('DescribeFlowLogs')
DescribeFpgaImages = Action('DescribeFpgaImages')
DescribeHostReservationOfferings = \
Action('DescribeHostReservationOfferings')
DescribeHostReservations = Action('DescribeHostReservations')
DescribeHosts = Action('DescribeHosts')
DescribeIamInstanceProfileAssociations = \
Action('DescribeIamInstanceProfileAssociations')
DescribeIdFormat = Action('DescribeIdFormat')
DescribeIdentityIdFormat = Action('DescribeIdentityIdFormat')
DescribeImageAttribute = Action('DescribeImageAttribute')
DescribeImages = Action('DescribeImages')
DescribeImportImageTasks = Action('DescribeImportImageTasks')
DescribeImportSnapshotTasks = Action('DescribeImportSnapshotTasks')
DescribeInstanceAttribute = Action('DescribeInstanceAttribute')
DescribeInstanceStatus = Action('DescribeInstanceStatus')
DescribeInstances = Action('DescribeInstances')
DescribeInternetGateways = Action('DescribeInternetGateways')
DescribeKeyPairs = Action('DescribeKeyPairs')
DescribeMovingAddresses = Action('DescribeMovingAddresses')
DescribeNatGateways = Action('DescribeNatGateways')
DescribeNetworkAcls = Action('DescribeNetworkAcls')
DescribeNetworkInterfaceAttribute = \
Action('DescribeNetworkInterfaceAttribute')
DescribeNetworkInterfaces = Action('DescribeNetworkInterfaces')
DescribePlacementGroups = Action('DescribePlacementGroups')
DescribePrefixLists = Action('DescribePrefixLists')
DescribeRegions = Action('DescribeRegions')
DescribeReservedInstances = Action('DescribeReservedInstances')
DescribeReservedInstancesListings = \
Action('DescribeReservedInstancesListings')
DescribeReservedInstancesModifications = \
Action('DescribeReservedInstancesModifications')
DescribeReservedInstancesOfferings = \
Action('DescribeReservedInstancesOfferings')
DescribeRouteTables = Action('DescribeRouteTables')
DescribeSecurityGroups = Action('DescribeSecurityGroups')
DescribeSnapshotAttribute = Action('DescribeSnapshotAttribute')
DescribeSnapshots = Action('DescribeSnapshots')
DescribeSpotDatafeedSubscription = \
Action('DescribeSpotDatafeedSubscription')
DescribeSpotFleetInstances = Action('DescribeSpotFleetInstances')
DescribeSpotFleetRequestHistory = \
Action('DescribeSpotFleetRequestHistory')
DescribeSpotFleetRequests = Action('DescribeSpotFleetRequests')
DescribeSpotInstanceRequests = Action('DescribeSpotInstanceRequests')
DescribeSpotPriceHistory = Action('DescribeSpotPriceHistory')
DescribeStaleSecurityGroups = Action('DescribeStaleSecurityGroups')
DescribeSubnets = Action('DescribeSubnets')
DescribeTags = Action('DescribeTags')
DescribeVolumeAttribute = Action('DescribeVolumeAttribute')
DescribeVolumeStatus = Action('DescribeVolumeStatus')
DescribeVolumes = Action('DescribeVolumes')
DescribeVolumesModifications = Action('DescribeVolumesModifications')
DescribeVpcAttribute = Action('DescribeVpcAttribute')
DescribeVpcClassicLink = Action('DescribeVpcClassicLink')
DescribeVpcClassicLinkDnsSupport = \
Action('DescribeVpcClassicLinkDnsSupport')
DescribeVpcEndpointServices = Action('DescribeVpcEndpointServices')
DescribeVpcEndpoints = Action('DescribeVpcEndpoints')
DescribeVpcPeeringConnections = Action('DescribeVpcPeeringConnections')
DescribeVpcs = Action('DescribeVpcs')
DescribeVpnConnections = Action('DescribeVpnConnections')
DescribeVpnGateways = Action('DescribeVpnGateways')
DetachClassicLinkVpc = Action('DetachClassicLinkVpc')
DetachInternetGateway = Action('DetachInternetGateway')
DetachNetworkInterface = Action('DetachNetworkInterface')
DetachVolume = Action('DetachVolume')
DetachVpnGateway = Action('DetachVpnGateway')
DisableVgwRoutePropagation = Action('DisableVgwRoutePropagation')
DisableVpcClassicLink = Action('DisableVpcClassicLink')
DisableVpcClassicLinkDnsSupport = \
Action('DisableVpcClassicLinkDnsSupport')
DisassociateAddress = Action('DisassociateAddress')
DisassociateIamInstanceProfile = Action('DisassociateIamInstanceProfile')
DisassociateRouteTable = Action('DisassociateRouteTable')
DisassociateSubnetCidrBlock = Action('DisassociateSubnetCidrBlock')
DisassociateVpcCidrBlock = Action('DisassociateVpcCidrBlock')
EnableVgwRoutePropagation = Action('EnableVgwRoutePropagation')
EnableVolumeIO = Action('EnableVolumeIO')
EnableVpcClassicLink = Action('EnableVpcClassicLink')
EnableVpcClassicLinkDnsSupport = Action('EnableVpcClassicLinkDnsSupport')
GetConsoleOutput = Action('GetConsoleOutput')
GetConsoleScreenshot = Action('GetConsoleScreenshot')
GetHostReservationPurchasePreview = \
Action('GetHostReservationPurchasePreview')
GetPasswordData = Action('GetPasswordData')
GetReservedInstancesExchangeQuote = \
Action('GetReservedInstancesExchangeQuote')
ImportImage = Action('ImportImage')
ImportInstance = Action('ImportInstance')
ImportKeyPair = Action('ImportKeyPair')
ImportSnapshot = Action('ImportSnapshot')
ImportVolume = Action('ImportVolume')
ModifyHosts = Action('ModifyHosts')
ModifyIdFormat = Action('ModifyIdFormat')
ModifyIdentityIdFormat = Action('ModifyIdentityIdFormat')
ModifyImageAttribute = Action('ModifyImageAttribute')
ModifyInstanceAttribute = Action('ModifyInstanceAttribute')
ModifyInstancePlacement = Action('ModifyInstancePlacement')
ModifyNetworkInterfaceAttribute = \
Action('ModifyNetworkInterfaceAttribute')
ModifyReservedInstances = Action('ModifyReservedInstances')
ModifySnapshotAttribute = Action('ModifySnapshotAttribute')
ModifySpotFleetRequest = Action('ModifySpotFleetRequest')
ModifySubnetAttribute = Action('ModifySubnetAttribute')
ModifyVolume = Action('ModifyVolume')
ModifyVolumeAttribute = Action('ModifyVolumeAttribute')
ModifyVpcAttribute = Action('ModifyVpcAttribute')
ModifyVpcEndpoint = Action('ModifyVpcEndpoint')
ModifyVpcPeeringConnectionOptions = \
Action('ModifyVpcPeeringConnectionOptions')
MonitorInstances = Action('MonitorInstances')
MoveAddressToVpc = Action('MoveAddressToVpc')
PurchaseHostReservation = Action('PurchaseHostReservation')
PurchaseReservedInstancesOffering = \
Action('PurchaseReservedInstancesOffering')
PurchaseScheduledInstances = Action('PurchaseScheduledInstances')
RebootInstances = Action('RebootInstances')
RegisterImage = Action('RegisterImage')
RejectVpcPeeringConnection = Action('RejectVpcPeeringConnection')
ReleaseAddress = Action('ReleaseAddress')
ReleaseHosts = Action('ReleaseHosts')
ReplaceIamInstanceProfileAssociation = \
Action('ReplaceIamInstanceProfileAssociation')
ReplaceNetworkAclAssociation = Action('ReplaceNetworkAclAssociation')
ReplaceNetworkAclEntry = Action('ReplaceNetworkAclEntry')
ReplaceRoute = Action('ReplaceRoute')
ReplaceRouteTableAssociation = Action('ReplaceRouteTableAssociation')
ReportInstanceStatus = Action('ReportInstanceStatus')
RequestSpotFleet = Action('RequestSpotFleet')
RequestSpotInstances = Action('RequestSpotInstances')
ResetImageAttribute = Action('ResetImageAttribute')
ResetInstanceAttribute = Action('ResetInstanceAttribute')
ResetNetworkInterfaceAttribute = Action('ResetNetworkInterfaceAttribute')
ResetSnapshotAttribute = Action('ResetSnapshotAttribute')
RestoreAddressToClassic = Action('RestoreAddressToClassic')
RevokeSecurityGroupEgress = Action('RevokeSecurityGroupEgress')
RevokeSecurityGroupIngress = Action('RevokeSecurityGroupIngress')
RunInstances = Action('RunInstances')
RunScheduledInstances = Action('RunScheduledInstances')
StartInstances = Action('StartInstances')
StopInstances = Action('StopInstances')
TerminateInstances = Action('TerminateInstances')
UnassignIpv6Addresses = Action('UnassignIpv6Addresses')
UnassignPrivateIpAddresses = Action('UnassignPrivateIpAddresses')
UnmonitorInstances = Action('UnmonitorInstances')
UpdateSecurityGroupRuleDescriptionsEgress = \
Action('UpdateSecurityGroupRuleDescriptionsEgress')
UpdateSecurityGroupRuleDescriptionsIngress = \
Action('UpdateSecurityGroupRuleDescriptionsIngress')
| true | true |
1c2e217d058dd1b7ec6b25e09d68c49a4bf5dc41 | 24,975 | py | Python | tests/__init__.py | korkeatw/pythainlp | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | korkeatw/pythainlp | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | korkeatw/pythainlp | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Unit test
"""
import datetime
import os
import unittest
from collections import Counter
from nltk.corpus import wordnet as wn
from pythainlp import word_vector
from pythainlp.corpus import (
_CORPUS_PATH,
conceptnet,
countries,
download,
provinces,
remove,
thai_negations,
thai_stopwords,
thai_syllables,
thai_words,
tnc,
ttc,
wordnet,
)
from pythainlp.corpus.common import _THAI_WORDS_FILENAME
from pythainlp.soundex import lk82, metasound, soundex, udom83
from pythainlp.spell import NorvigSpellChecker, correct, spell
from pythainlp.summarize import summarize
from pythainlp.tag import perceptron, pos_tag, pos_tag_sents, unigram
from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.named_entity import ThaiNameTagger
from pythainlp.tokenize import DEFAULT_DICT_TRIE, FROZEN_DICT_TRIE, Tokenizer
from pythainlp.tokenize import deepcut as tokenize_deepcut
from pythainlp.tokenize import (
dict_trie,
dict_word_tokenize,
etcc,
longest,
multi_cut,
newmm,
)
from pythainlp.tokenize import pyicu as tokenize_pyicu
from pythainlp.tokenize import (
sent_tokenize,
subword_tokenize,
syllable_tokenize,
tcc,
word_tokenize,
)
from pythainlp.transliterate import romanize, transliterate
from pythainlp.transliterate.ipa import trans_list, xsampa_list
from pythainlp.transliterate.royin import romanize as romanize_royin
from pythainlp.util import (
arabic_digit_to_thai_digit,
bahttext,
collate,
countthai,
deletetone,
digit_to_text,
eng_to_thai,
find_keyword,
isthai,
isthaichar,
normalize,
now_reign_year,
num_to_thaiword,
rank,
reign_year_to_ad,
text_to_arabic_digit,
text_to_thai_digit,
thai_digit_to_arabic_digit,
thai_strftime,
thai_to_eng,
thaicheck,
thaiword_to_num,
)
class TestUM(unittest.TestCase):
"""
Unit test cases
ทดสอบการทำงาน
"""
# ### pythainlp.corpus
def test_conceptnet(self):
self.assertIsNotNone(conceptnet.edges("รัก"))
def test_corpus(self):
self.assertIsNotNone(countries())
self.assertIsNotNone(provinces())
self.assertIsNotNone(thai_negations())
self.assertIsNotNone(thai_stopwords())
self.assertIsNotNone(thai_syllables())
self.assertIsNotNone(thai_words())
download("test")
self.assertIsNotNone(remove("test"))
self.assertIsNotNone(remove("tnc_freq"))
def test_tnc(self):
self.assertIsNotNone(tnc.word_freqs())
self.assertIsNotNone(tnc.word_freq("นก"))
def test_ttc(self):
self.assertIsNotNone(ttc.word_freqs())
def test_wordnet(self):
self.assertIsNotNone(wordnet.langs())
self.assertEqual(
wordnet.synset("spy.n.01").lemma_names("tha"), ["สปาย", "สายลับ"]
)
self.assertIsNotNone(wordnet.synsets("นก"))
self.assertIsNotNone(wordnet.all_synsets(pos=wn.ADJ))
self.assertIsNotNone(wordnet.lemmas("นก"))
self.assertIsNotNone(wordnet.all_lemma_names(pos=wn.ADV))
self.assertIsNotNone(wordnet.lemma("cat.n.01.cat"))
self.assertEqual(wordnet.morphy("dogs"), "dog")
bird = wordnet.synset("bird.n.01")
mouse = wordnet.synset("mouse.n.01")
self.assertEqual(
wordnet.path_similarity(bird, mouse), bird.path_similarity(mouse)
)
self.assertEqual(
wordnet.wup_similarity(bird, mouse), bird.wup_similarity(mouse)
)
cat_key = wordnet.synsets("แมว")[0].lemmas()[0].key()
self.assertIsNotNone(wordnet.lemma_from_key(cat_key))
# ### pythainlp.soundex
def test_soundex(self):
self.assertIsNotNone(soundex("a", engine="lk82"))
self.assertIsNotNone(soundex("a", engine="udom83"))
self.assertIsNotNone(soundex("a", engine="metasound"))
self.assertIsNotNone(soundex("a", engine="XXX"))
self.assertEqual(lk82(None), "")
self.assertEqual(lk82(""), "")
self.assertEqual(lk82("เหตุ"), lk82("เหด"))
self.assertEqual(lk82("รถ"), "ร3000")
self.assertIsNotNone(lk82("เกาะ"))
self.assertIsNotNone(lk82("อุยกูร์"))
self.assertIsNotNone(lk82("หยากไย่"))
self.assertIsNotNone(lk82("หอ"))
self.assertEqual(lk82("น์"), "")
self.assertEqual(udom83(None), "")
self.assertEqual(udom83(""), "")
self.assertEqual(udom83("เหตุ"), udom83("เหด"))
self.assertEqual(udom83("รถ"), "ร800000")
self.assertEqual(metasound(None), "")
self.assertEqual(metasound(""), "")
self.assertEqual(metasound("เหตุ"), metasound("เหด"))
self.assertEqual(metasound("รักษ์"), metasound("รัก"))
self.assertEqual(metasound("บูรณะ"), "บ550")
self.assertEqual(metasound("คน"), "ค500")
self.assertEqual(metasound("คนA"), "ค500")
self.assertEqual(metasound("ดา"), "ด000")
self.assertIsNotNone(metasound("จะ"))
self.assertIsNotNone(metasound("ปา"))
self.assertIsNotNone(metasound("งง"))
self.assertIsNotNone(metasound("ลา"))
self.assertIsNotNone(metasound("มา"))
self.assertIsNotNone(metasound("ยา"))
self.assertIsNotNone(metasound("วา"))
self.assertIsNotNone(metasound("บูชา"))
self.assertIsNotNone(metasound("กมลา"))
self.assertIsNotNone(metasound("กาโวกาโว"))
self.assertIsNotNone(metasound("สุวรรณา"))
self.assertIsNotNone(metasound("ดอยบอย"))
# ### pythainlp.spell
def test_spell(self):
self.assertEqual(spell(None), "")
self.assertEqual(spell(""), "")
self.assertIsNotNone(spell("เน้ร"))
self.assertIsNotNone(spell("เกสมร์"))
self.assertEqual(correct(None), "")
self.assertEqual(correct(""), "")
self.assertIsNotNone(correct("ทดสอง"))
checker = NorvigSpellChecker(dict_filter="")
self.assertIsNotNone(checker.dictionary())
self.assertGreaterEqual(checker.prob("มี"), 0)
# ### pythainlp.summarize
def test_summarize(self):
text = "อาหาร หมายถึง ของแข็งหรือของเหลว "
text += "ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว "
text += "จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย "
text += "ทำให้ร่างกายเจริญเติบโต "
text += "ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย "
text += "ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ "
text += "อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"
self.assertEqual(
summarize(text=text, n=1, engine="frequency"),
["อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"],
)
self.assertIsNotNone(summarize(text, 1, engine="XX"))
# ### pythainlp.tag
def test_pos_tag(self):
tokens = ["ผม", "รัก", "คุณ"]
self.assertEqual(pos_tag(None), [])
self.assertEqual(pos_tag([]), [])
self.assertEqual(unigram.tag(None, corpus="pud"), [])
self.assertEqual(unigram.tag([], corpus="pud"), [])
self.assertEqual(unigram.tag(None, corpus="orchid"), [])
self.assertEqual(unigram.tag([], corpus="orchid"), [])
self.assertIsNotNone(pos_tag(tokens, engine="unigram", corpus="orchid"))
self.assertIsNotNone(pos_tag(tokens, engine="unigram", corpus="pud"))
self.assertIsNotNone(pos_tag([""], engine="unigram", corpus="pud"))
self.assertEqual(
pos_tag(word_tokenize("คุณกำลังประชุม"), engine="unigram"),
[("คุณ", "PPRS"), ("กำลัง", "XVBM"), ("ประชุม", "VACT")],
)
self.assertIsNotNone(pos_tag(tokens, engine="perceptron", corpus="orchid"))
self.assertIsNotNone(pos_tag(tokens, engine="perceptron", corpus="pud"))
self.assertEqual(perceptron.tag(None, corpus="pud"), [])
self.assertEqual(perceptron.tag([], corpus="pud"), [])
self.assertEqual(perceptron.tag(None, corpus="orchid"), [])
self.assertEqual(perceptron.tag([], corpus="orchid"), [])
self.assertIsNotNone(pos_tag(None, engine="artagger"))
self.assertIsNotNone(pos_tag([], engine="artagger"))
self.assertIsNotNone(pos_tag(tokens, engine="artagger"))
self.assertEqual(
pos_tag(word_tokenize("คุณกำลังประชุม"), engine="artagger"),
[("คุณ", "PPRS"), ("กำลัง", "XVBM"), ("ประชุม", "VACT")],
)
self.assertEqual(pos_tag_sents(None), [])
self.assertEqual(pos_tag_sents([]), [])
self.assertEqual(
pos_tag_sents([["ผม", "กิน", "ข้าว"], ["แมว", "วิ่ง"]]),
[
[("ผม", "PPRS"), ("กิน", "VACT"), ("ข้าว", "NCMN")],
[("แมว", "NCMN"), ("วิ่ง", "VACT")],
],
)
# ### pythainlp.tag.locations
def test_ner_locations(self):
self.assertEqual(
tag_provinces(["หนองคาย", "น่าอยู่"]),
[("หนองคาย", "B-LOCATION"), ("น่าอยู่", "O")],
)
# ### pythainlp.tag.named_entity
def test_ner(self):
ner = ThaiNameTagger()
self.assertEqual(ner.get_ner(""), [])
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(
ner.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
วิทยาเขตหนองคาย 112 หมู่ 7 บ้านหนองเดิ่น ตำบลหนองกอมเกาะ อำเภอเมือง
จังหวัดหนองคาย 43000"""
)
)
# self.assertEqual(
# ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า"),
# [
# ("แมว", "NCMN", "O"),
# ("ทำ", "VACT", "O"),
# ("อะไร", "PNTR", "O"),
# ("ตอน", "NCMN", "O"),
# ("ห้า", "VSTA", "B-TIME"),
# ("โมง", "NCMN", "I-TIME"),
# ("เช้า", "ADVN", "I-TIME"),
# ],
# )
# self.assertEqual(
# ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า", pos=False),
# [
# ("แมว", "O"),
# ("ทำ", "O"),
# ("อะไร", "O"),
# ("ตอน", "O"),
# ("ห้า", "B-TIME"),
# ("โมง", "I-TIME"),
# ("เช้า", "I-TIME"),
# ],
# )
# ### pythainlp.tokenize
def test_dict_word_tokenize(self):
self.assertEqual(dict_word_tokenize(""), [])
def test_etcc(self):
self.assertEqual(etcc.segment(""), "")
self.assertIsInstance(etcc.segment("คืนความสุข"), list)
self.assertIsNotNone(
etcc.segment(
"หมูแมวเหล่านี้ด้วยเหตุผลเชื่อมโยงทางกรรมพันธุ์"
+ "สัตว์มีแขนขาหน้าหัวเราะเพราะแข็งขืน"
)
)
def test_word_tokenize(self):
self.assertEqual(word_tokenize(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="newmm"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="mm"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="longest"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="ulmfit"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="icu"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="deepcut"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="XX"))
self.assertIsNotNone(dict_trie(()))
self.assertIsNotNone(dict_trie(("ทดสอบ", "สร้าง", "Trie")))
self.assertIsNotNone(dict_trie(["ทดสอบ", "สร้าง", "Trie"]))
self.assertIsNotNone(dict_trie(thai_words()))
self.assertIsNotNone(dict_trie(FROZEN_DICT_TRIE))
self.assertIsNotNone(
dict_trie(os.path.join(_CORPUS_PATH, _THAI_WORDS_FILENAME))
)
self.assertIsNotNone(word_tokenize("รถไฟฟ้าBTS", custom_dict=DEFAULT_DICT_TRIE))
self.assertIsNotNone(
word_tokenize("ทดสอบ", engine="deepcut", custom_dict=FROZEN_DICT_TRIE)
)
self.assertIsNotNone(
word_tokenize("ทดสอบ", engine="XX", custom_dict=FROZEN_DICT_TRIE)
)
def test_Tokenizer(self):
t_test = Tokenizer(FROZEN_DICT_TRIE)
self.assertEqual(t_test.word_tokenize(""), [])
t_test.set_tokenize_engine("longest")
self.assertEqual(t_test.word_tokenize(None), [])
t_test = Tokenizer()
self.assertEqual(t_test.word_tokenize("ก"), ["ก"])
def test_word_tokenize_icu(self):
self.assertEqual(tokenize_pyicu.segment(None), [])
self.assertEqual(tokenize_pyicu.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="icu"),
["ฉัน", "รัก", "ภาษา", "ไทย", "เพราะ", "ฉัน", "เป็น", "คน", "ไทย"],
)
def test_word_tokenize_deepcut(self):
self.assertEqual(tokenize_deepcut.segment(None), [])
self.assertEqual(tokenize_deepcut.segment(""), [])
self.assertIsNotNone(tokenize_deepcut.segment("ทดสอบ", DEFAULT_DICT_TRIE))
self.assertIsNotNone(tokenize_deepcut.segment("ทดสอบ", ["ทด", "สอบ"]))
self.assertIsNotNone(word_tokenize("ทดสอบ", engine="deepcut"))
def test_word_tokenize_longest(self):
self.assertEqual(longest.segment(None), [])
self.assertEqual(longest.segment(""), [])
self.assertIsNotNone(longest.segment("กรุงเทพฯมากๆเพราโพาง BKKฯ"))
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="longest"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
def test_word_tokenize_mm(self):
self.assertEqual(multi_cut.segment(None), [])
self.assertEqual(multi_cut.segment(""), [])
self.assertEqual(word_tokenize("", engine="mm"), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="mm"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertIsNotNone(multi_cut.mmcut("ทดสอบ"))
self.assertIsNotNone(multi_cut.find_all_segment("รถไฟฟ้ากรุงเทพมหานครBTS"))
self.assertEqual(multi_cut.find_all_segment(None), [])
def test_word_tokenize_newmm(self):
self.assertEqual(newmm.segment(None), [])
self.assertEqual(newmm.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="newmm"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertEqual(
word_tokenize(
"สวัสดีครับ สบายดีไหมครับ", engine="newmm", keep_whitespace=True
),
["สวัสดี", "ครับ", " ", "สบายดี", "ไหม", "ครับ"],
)
self.assertEqual(
word_tokenize("จุ๋มง่วงนอนยัง", engine="newmm"), ["จุ๋ม", "ง่วงนอน", "ยัง"]
)
self.assertEqual(word_tokenize("จุ๋มง่วง", engine="newmm"), ["จุ๋ม", "ง่วง"])
self.assertEqual(
word_tokenize("จุ๋ม ง่วง", engine="newmm", keep_whitespace=False),
["จุ๋ม", "ง่วง"],
)
def test_sent_tokenize(self):
self.assertEqual(sent_tokenize(None), [])
self.assertEqual(sent_tokenize(""), [])
self.assertEqual(
sent_tokenize("รักน้ำ รักปลา ", engine="whitespace"),
["รักน้ำ", "รักปลา", ""],
)
self.assertEqual(sent_tokenize("รักน้ำ รักปลา "), ["รักน้ำ", "รักปลา"])
def test_subword_tokenize(self):
self.assertEqual(subword_tokenize(None), [])
self.assertEqual(subword_tokenize(""), [])
self.assertIsNotNone(subword_tokenize("สวัสดีดาวอังคาร", engine="tcc"))
self.assertIsNotNone(subword_tokenize("สวัสดีดาวอังคาร", engine="etcc"))
def test_syllable_tokenize(self):
self.assertEqual(syllable_tokenize(None), [])
self.assertEqual(syllable_tokenize(""), [])
self.assertEqual(
syllable_tokenize("สวัสดีชาวโลก"), ["สวัส", "ดี", "ชาว", "โลก"]
)
def test_tcc(self):
self.assertEqual(tcc.segment(None), [])
self.assertEqual(tcc.segment(""), [])
self.assertEqual(tcc.segment("ประเทศไทย"), ["ป", "ระ", "เท", "ศ", "ไท", "ย"])
self.assertEqual(list(tcc.tcc("")), [])
self.assertEqual(tcc.tcc_pos(""), set())
# ### pythainlp.transliterate
def test_romanize(self):
self.assertEqual(romanize(None), "")
self.assertEqual(romanize(""), "")
self.assertEqual(romanize("แมว"), "maeo")
self.assertEqual(romanize_royin(None), "")
self.assertEqual(romanize_royin(""), "")
self.assertEqual(romanize_royin("หาย"), "hai")
self.assertEqual(romanize_royin("หมอก"), "mok")
# self.assertEqual(romanize_royin("มหา"), "maha") # not pass
# self.assertEqual(romanize_royin("หยาก"), "yak") # not pass
# self.assertEqual(romanize_royin("อยาก"), "yak") # not pass
# self.assertEqual(romanize_royin("ยมก"), "yamok") # not pass
# self.assertEqual(romanize_royin("กลัว"), "klua") # not pass
# self.assertEqual(romanize_royin("กลัว"), "klua") # not pass
self.assertEqual(romanize("แมว", engine="royin"), "maeo")
self.assertEqual(romanize("เดือน", engine="royin"), "duean")
self.assertEqual(romanize("ดู", engine="royin"), "du")
self.assertEqual(romanize("ดำ", engine="royin"), "dam")
self.assertEqual(romanize("บัว", engine="royin"), "bua")
self.assertEqual(romanize("กร", engine="royin"), "kon")
self.assertEqual(romanize("กรร", engine="royin"), "kan")
self.assertEqual(romanize("กรรม", engine="royin"), "kam")
self.assertIsNotNone(romanize("กก", engine="royin"))
self.assertIsNotNone(romanize("ฝ้าย", engine="royin"))
self.assertIsNotNone(romanize("ทีปกร", engine="royin"))
self.assertIsNotNone(romanize("กรม", engine="royin"))
self.assertIsNotNone(romanize("ธรรพ์", engine="royin"))
self.assertIsNotNone(romanize("กฏa์1์ ์", engine="royin"))
self.assertEqual(romanize("แมว", engine="thai2rom"), "maeo")
def test_transliterate(self):
self.assertEqual(transliterate(""), "")
self.assertEqual(transliterate("แมว", "pyicu"), "mæw")
self.assertEqual(transliterate("คน", engine="ipa"), "kʰon")
self.assertIsNotNone(trans_list("คน"))
self.assertIsNotNone(xsampa_list("คน"))
# ### pythainlp.util
def test_collate(self):
self.assertEqual(collate(["ไก่", "กก"]), ["กก", "ไก่"])
self.assertEqual(
collate(["ไก่", "เป็ด", "หมู", "วัว"]), ["ไก่", "เป็ด", "วัว", "หมู"]
)
def test_number(self):
self.assertEqual(
bahttext(5611116.50),
"ห้าล้านหกแสนหนึ่งหมื่นหนึ่งพันหนึ่งร้อยสิบหกบาทห้าสิบสตางค์",
)
self.assertEqual(bahttext(116), "หนึ่งร้อยสิบหกบาทถ้วน")
self.assertEqual(bahttext(0), "ศูนย์บาทถ้วน")
self.assertEqual(bahttext(None), "")
self.assertEqual(num_to_thaiword(112), "หนึ่งร้อยสิบสอง")
self.assertEqual(num_to_thaiword(0), "ศูนย์")
self.assertEqual(num_to_thaiword(None), "")
self.assertEqual(thaiword_to_num("ร้อยสิบสอง"), 112)
self.assertEqual(
thaiword_to_num(
["หก", "ล้าน", "หก", "แสน", "หกหมื่น", "หกพัน", "หกร้อย", "หกสิบ", "หก"]
),
6666666,
)
self.assertEqual(thaiword_to_num("ยี่สิบ"), 20)
self.assertEqual(thaiword_to_num("ศูนย์"), 0)
self.assertEqual(thaiword_to_num("ศูนย์อะไรนะ"), 0)
self.assertEqual(thaiword_to_num(""), None)
self.assertEqual(thaiword_to_num(None), None)
self.assertEqual(arabic_digit_to_thai_digit("ไทยแลนด์ 4.0"), "ไทยแลนด์ ๔.๐")
self.assertEqual(arabic_digit_to_thai_digit(""), "")
self.assertEqual(arabic_digit_to_thai_digit(None), "")
self.assertEqual(thai_digit_to_arabic_digit("๔๐๔ Not Found"), "404 Not Found")
self.assertEqual(thai_digit_to_arabic_digit(""), "")
self.assertEqual(thai_digit_to_arabic_digit(None), "")
self.assertEqual(digit_to_text("RFC 7258"), "RFC เจ็ดสองห้าแปด")
self.assertEqual(digit_to_text(""), "")
self.assertEqual(digit_to_text(None), "")
self.assertEqual(text_to_arabic_digit("เจ็ด"), "7")
self.assertEqual(text_to_arabic_digit(""), "")
self.assertEqual(text_to_arabic_digit(None), "")
self.assertEqual(text_to_thai_digit("เก้า"), "๙")
self.assertEqual(text_to_thai_digit(""), "")
self.assertEqual(text_to_thai_digit(None), "")
def test_keyboard(self):
self.assertEqual(eng_to_thai("l;ylfu8iy["), "สวัสดีครับ")
self.assertEqual(thai_to_eng("สวัสดีครับ"), "l;ylfu8iy[")
def test_keywords(self):
word_list = word_tokenize(
"แมวกินปลาอร่อยรู้ไหมว่าแมวเป็นแมวรู้ไหมนะแมว", engine="newmm"
)
self.assertEqual(find_keyword(word_list), {"แมว": 4})
def test_rank(self):
self.assertEqual(rank([]), None)
self.assertEqual(rank(["แมว", "คน", "แมว"]), Counter({"แมว": 2, "คน": 1}))
self.assertIsNotNone(rank(["แมว", "คน", "แมว"], exclude_stopwords=True))
# ### pythainlp.util.date
def test_date(self):
self.assertIsNotNone(now_reign_year())
self.assertEqual(reign_year_to_ad(2, 10), 2017)
self.assertIsNotNone(reign_year_to_ad(2, 9))
self.assertIsNotNone(reign_year_to_ad(2, 8))
self.assertIsNotNone(reign_year_to_ad(2, 7))
def test_thai_strftime(self):
date = datetime.datetime(1976, 10, 6, 1, 40)
self.assertEqual(thai_strftime(date, "%c"), "พ 6 ต.ค. 01:40:00 2519")
self.assertEqual(thai_strftime(date, "%c", True), "พ ๖ ต.ค. ๐๑:๔๐:๐๐ ๒๕๑๙")
self.assertEqual(
thai_strftime(date, "%Aที่ %d %B พ.ศ. %Y เวลา %H:%Mน. (%a %d-%b-%y) %% %"),
"วันพุธที่ 06 ตุลาคม พ.ศ. 2519 เวลา 01:40น. (พ 06-ต.ค.-19) % %",
)
self.assertIsNotNone(thai_strftime(date, "%A%a%B%b%C%c%D%F%G%g%v%X%x%Y%y%+"))
# ### pythainlp.util.normalize
def test_deletetone(self):
self.assertEqual(deletetone("จิ้น"), "จิน")
self.assertEqual(deletetone("เก๋า"), "เกา")
def test_normalize(self):
self.assertEqual(normalize("เเปลก"), "แปลก")
self.assertIsNotNone(normalize("พรรค์จันทร์ab์"))
# ### pythainlp.util.thai
def test_countthai(self):
self.assertEqual(countthai(""), 0)
self.assertEqual(countthai("ประเทศไทย"), 100.0)
self.assertEqual(countthai("(กกต.)", ".()"), 100.0)
self.assertEqual(countthai("(กกต.)", None), 50.0)
def test_isthaichar(self):
self.assertEqual(isthaichar("ก"), True)
self.assertEqual(isthaichar("a"), False)
self.assertEqual(isthaichar("0"), False)
def test_isthai(self):
self.assertEqual(isthai("ไทย"), True)
self.assertEqual(isthai("ไทย0"), False)
self.assertEqual(isthai("ต.ค."), True)
self.assertEqual(isthai("(ต.ค.)"), False)
self.assertEqual(isthai("ต.ค.", ignore_chars=None), False)
self.assertEqual(isthai("(ต.ค.)", ignore_chars=".()"), True)
def test_is_thaicheck(self):
self.assertEqual(thaicheck("ตา"), True)
self.assertEqual(thaicheck("ยา"), True)
self.assertEqual(thaicheck("ฆ่า"), True)
self.assertEqual(thaicheck("คน"), True)
self.assertEqual(thaicheck("กะ"), True)
self.assertEqual(thaicheck("มอ"), True)
self.assertEqual(thaicheck("มาร์ค"), False)
self.assertEqual(thaicheck("เลข"), False)
self.assertEqual(thaicheck("กะ"), True)
self.assertEqual(thaicheck("ศา"), False)
self.assertEqual(thaicheck("abc"), False)
self.assertEqual(thaicheck("ลักษ์"), False)
# ### pythainlp.word_vector
def test_thai2vec(self):
self.assertGreaterEqual(word_vector.similarity("แบคทีเรีย", "คน"), 0)
self.assertIsNotNone(word_vector.sentence_vectorizer(""))
self.assertIsNotNone(word_vector.sentence_vectorizer("เสรีภาพในการชุมนุม"))
self.assertIsNotNone(
word_vector.sentence_vectorizer("เสรีภาพในการรวมตัว\nสมาคม", use_mean=True)
)
self.assertIsNotNone(
word_vector.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
word_vector.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ประเทศไทย"]
)[0][0]
)
self.assertEqual(
word_vector.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
if __name__ == "__main__":
unittest.main()
| 38.364055 | 88 | 0.603524 |
import datetime
import os
import unittest
from collections import Counter
from nltk.corpus import wordnet as wn
from pythainlp import word_vector
from pythainlp.corpus import (
_CORPUS_PATH,
conceptnet,
countries,
download,
provinces,
remove,
thai_negations,
thai_stopwords,
thai_syllables,
thai_words,
tnc,
ttc,
wordnet,
)
from pythainlp.corpus.common import _THAI_WORDS_FILENAME
from pythainlp.soundex import lk82, metasound, soundex, udom83
from pythainlp.spell import NorvigSpellChecker, correct, spell
from pythainlp.summarize import summarize
from pythainlp.tag import perceptron, pos_tag, pos_tag_sents, unigram
from pythainlp.tag.locations import tag_provinces
from pythainlp.tag.named_entity import ThaiNameTagger
from pythainlp.tokenize import DEFAULT_DICT_TRIE, FROZEN_DICT_TRIE, Tokenizer
from pythainlp.tokenize import deepcut as tokenize_deepcut
from pythainlp.tokenize import (
dict_trie,
dict_word_tokenize,
etcc,
longest,
multi_cut,
newmm,
)
from pythainlp.tokenize import pyicu as tokenize_pyicu
from pythainlp.tokenize import (
sent_tokenize,
subword_tokenize,
syllable_tokenize,
tcc,
word_tokenize,
)
from pythainlp.transliterate import romanize, transliterate
from pythainlp.transliterate.ipa import trans_list, xsampa_list
from pythainlp.transliterate.royin import romanize as romanize_royin
from pythainlp.util import (
arabic_digit_to_thai_digit,
bahttext,
collate,
countthai,
deletetone,
digit_to_text,
eng_to_thai,
find_keyword,
isthai,
isthaichar,
normalize,
now_reign_year,
num_to_thaiword,
rank,
reign_year_to_ad,
text_to_arabic_digit,
text_to_thai_digit,
thai_digit_to_arabic_digit,
thai_strftime,
thai_to_eng,
thaicheck,
thaiword_to_num,
)
class TestUM(unittest.TestCase):
None(conceptnet.edges("รัก"))
def test_corpus(self):
self.assertIsNotNone(countries())
self.assertIsNotNone(provinces())
self.assertIsNotNone(thai_negations())
self.assertIsNotNone(thai_stopwords())
self.assertIsNotNone(thai_syllables())
self.assertIsNotNone(thai_words())
download("test")
self.assertIsNotNone(remove("test"))
self.assertIsNotNone(remove("tnc_freq"))
def test_tnc(self):
self.assertIsNotNone(tnc.word_freqs())
self.assertIsNotNone(tnc.word_freq("นก"))
def test_ttc(self):
self.assertIsNotNone(ttc.word_freqs())
def test_wordnet(self):
self.assertIsNotNone(wordnet.langs())
self.assertEqual(
wordnet.synset("spy.n.01").lemma_names("tha"), ["สปาย", "สายลับ"]
)
self.assertIsNotNone(wordnet.synsets("นก"))
self.assertIsNotNone(wordnet.all_synsets(pos=wn.ADJ))
self.assertIsNotNone(wordnet.lemmas("นก"))
self.assertIsNotNone(wordnet.all_lemma_names(pos=wn.ADV))
self.assertIsNotNone(wordnet.lemma("cat.n.01.cat"))
self.assertEqual(wordnet.morphy("dogs"), "dog")
bird = wordnet.synset("bird.n.01")
mouse = wordnet.synset("mouse.n.01")
self.assertEqual(
wordnet.path_similarity(bird, mouse), bird.path_similarity(mouse)
)
self.assertEqual(
wordnet.wup_similarity(bird, mouse), bird.wup_similarity(mouse)
)
cat_key = wordnet.synsets("แมว")[0].lemmas()[0].key()
self.assertIsNotNone(wordnet.lemma_from_key(cat_key))
oundex("a", engine="lk82"))
self.assertIsNotNone(soundex("a", engine="udom83"))
self.assertIsNotNone(soundex("a", engine="metasound"))
self.assertIsNotNone(soundex("a", engine="XXX"))
self.assertEqual(lk82(None), "")
self.assertEqual(lk82(""), "")
self.assertEqual(lk82("เหตุ"), lk82("เหด"))
self.assertEqual(lk82("รถ"), "ร3000")
self.assertIsNotNone(lk82("เกาะ"))
self.assertIsNotNone(lk82("อุยกูร์"))
self.assertIsNotNone(lk82("หยากไย่"))
self.assertIsNotNone(lk82("หอ"))
self.assertEqual(lk82("น์"), "")
self.assertEqual(udom83(None), "")
self.assertEqual(udom83(""), "")
self.assertEqual(udom83("เหตุ"), udom83("เหด"))
self.assertEqual(udom83("รถ"), "ร800000")
self.assertEqual(metasound(None), "")
self.assertEqual(metasound(""), "")
self.assertEqual(metasound("เหตุ"), metasound("เหด"))
self.assertEqual(metasound("รักษ์"), metasound("รัก"))
self.assertEqual(metasound("บูรณะ"), "บ550")
self.assertEqual(metasound("คน"), "ค500")
self.assertEqual(metasound("คนA"), "ค500")
self.assertEqual(metasound("ดา"), "ด000")
self.assertIsNotNone(metasound("จะ"))
self.assertIsNotNone(metasound("ปา"))
self.assertIsNotNone(metasound("งง"))
self.assertIsNotNone(metasound("ลา"))
self.assertIsNotNone(metasound("มา"))
self.assertIsNotNone(metasound("ยา"))
self.assertIsNotNone(metasound("วา"))
self.assertIsNotNone(metasound("บูชา"))
self.assertIsNotNone(metasound("กมลา"))
self.assertIsNotNone(metasound("กาโวกาโว"))
self.assertIsNotNone(metasound("สุวรรณา"))
self.assertIsNotNone(metasound("ดอยบอย"))
pell(None), "")
self.assertEqual(spell(""), "")
self.assertIsNotNone(spell("เน้ร"))
self.assertIsNotNone(spell("เกสมร์"))
self.assertEqual(correct(None), "")
self.assertEqual(correct(""), "")
self.assertIsNotNone(correct("ทดสอง"))
checker = NorvigSpellChecker(dict_filter="")
self.assertIsNotNone(checker.dictionary())
self.assertGreaterEqual(checker.prob("มี"), 0)
ข็งหรือของเหลว "
text += "ที่กินหรือดื่มเข้าสู่ร่างกายแล้ว "
text += "จะทำให้เกิดพลังงานและความร้อนแก่ร่างกาย "
text += "ทำให้ร่างกายเจริญเติบโต "
text += "ซ่อมแซมส่วนที่สึกหรอ ควบคุมการเปลี่ยนแปลงต่างๆ ในร่างกาย "
text += "ช่วยทำให้อวัยวะต่างๆ ทำงานได้อย่างปกติ "
text += "อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"
self.assertEqual(
summarize(text=text, n=1, engine="frequency"),
["อาหารจะต้องไม่มีพิษและไม่เกิดโทษต่อร่างกาย"],
)
self.assertIsNotNone(summarize(text, 1, engine="XX"))
"ผม", "รัก", "คุณ"]
self.assertEqual(pos_tag(None), [])
self.assertEqual(pos_tag([]), [])
self.assertEqual(unigram.tag(None, corpus="pud"), [])
self.assertEqual(unigram.tag([], corpus="pud"), [])
self.assertEqual(unigram.tag(None, corpus="orchid"), [])
self.assertEqual(unigram.tag([], corpus="orchid"), [])
self.assertIsNotNone(pos_tag(tokens, engine="unigram", corpus="orchid"))
self.assertIsNotNone(pos_tag(tokens, engine="unigram", corpus="pud"))
self.assertIsNotNone(pos_tag([""], engine="unigram", corpus="pud"))
self.assertEqual(
pos_tag(word_tokenize("คุณกำลังประชุม"), engine="unigram"),
[("คุณ", "PPRS"), ("กำลัง", "XVBM"), ("ประชุม", "VACT")],
)
self.assertIsNotNone(pos_tag(tokens, engine="perceptron", corpus="orchid"))
self.assertIsNotNone(pos_tag(tokens, engine="perceptron", corpus="pud"))
self.assertEqual(perceptron.tag(None, corpus="pud"), [])
self.assertEqual(perceptron.tag([], corpus="pud"), [])
self.assertEqual(perceptron.tag(None, corpus="orchid"), [])
self.assertEqual(perceptron.tag([], corpus="orchid"), [])
self.assertIsNotNone(pos_tag(None, engine="artagger"))
self.assertIsNotNone(pos_tag([], engine="artagger"))
self.assertIsNotNone(pos_tag(tokens, engine="artagger"))
self.assertEqual(
pos_tag(word_tokenize("คุณกำลังประชุม"), engine="artagger"),
[("คุณ", "PPRS"), ("กำลัง", "XVBM"), ("ประชุม", "VACT")],
)
self.assertEqual(pos_tag_sents(None), [])
self.assertEqual(pos_tag_sents([]), [])
self.assertEqual(
pos_tag_sents([["ผม", "กิน", "ข้าว"], ["แมว", "วิ่ง"]]),
[
[("ผม", "PPRS"), ("กิน", "VACT"), ("ข้าว", "NCMN")],
[("แมว", "NCMN"), ("วิ่ง", "VACT")],
],
)
provinces(["หนองคาย", "น่าอยู่"]),
[("หนองคาย", "B-LOCATION"), ("น่าอยู่", "O")],
)
et_ner(""), [])
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า"))
self.assertIsNotNone(ner.get_ner("แมวทำอะไรตอนห้าโมงเช้า", pos=False))
self.assertIsNotNone(
ner.get_ner(
"""คณะวิทยาศาสตร์ประยุกต์และวิศวกรรมศาสตร์ มหาวิทยาลัยขอนแก่น
วิทยาเขตหนองคาย 112 หมู่ 7 บ้านหนองเดิ่น ตำบลหนองกอมเกาะ อำเภอเมือง
จังหวัดหนองคาย 43000"""
)
)
al(dict_word_tokenize(""), [])
def test_etcc(self):
self.assertEqual(etcc.segment(""), "")
self.assertIsInstance(etcc.segment("คืนความสุข"), list)
self.assertIsNotNone(
etcc.segment(
"หมูแมวเหล่านี้ด้วยเหตุผลเชื่อมโยงทางกรรมพันธุ์"
+ "สัตว์มีแขนขาหน้าหัวเราะเพราะแข็งขืน"
)
)
def test_word_tokenize(self):
self.assertEqual(word_tokenize(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="newmm"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="mm"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="longest"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="ulmfit"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="icu"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="deepcut"))
self.assertIsNotNone(word_tokenize("หมอนทองตากลมหูว์MBK39", engine="XX"))
self.assertIsNotNone(dict_trie(()))
self.assertIsNotNone(dict_trie(("ทดสอบ", "สร้าง", "Trie")))
self.assertIsNotNone(dict_trie(["ทดสอบ", "สร้าง", "Trie"]))
self.assertIsNotNone(dict_trie(thai_words()))
self.assertIsNotNone(dict_trie(FROZEN_DICT_TRIE))
self.assertIsNotNone(
dict_trie(os.path.join(_CORPUS_PATH, _THAI_WORDS_FILENAME))
)
self.assertIsNotNone(word_tokenize("รถไฟฟ้าBTS", custom_dict=DEFAULT_DICT_TRIE))
self.assertIsNotNone(
word_tokenize("ทดสอบ", engine="deepcut", custom_dict=FROZEN_DICT_TRIE)
)
self.assertIsNotNone(
word_tokenize("ทดสอบ", engine="XX", custom_dict=FROZEN_DICT_TRIE)
)
def test_Tokenizer(self):
t_test = Tokenizer(FROZEN_DICT_TRIE)
self.assertEqual(t_test.word_tokenize(""), [])
t_test.set_tokenize_engine("longest")
self.assertEqual(t_test.word_tokenize(None), [])
t_test = Tokenizer()
self.assertEqual(t_test.word_tokenize("ก"), ["ก"])
def test_word_tokenize_icu(self):
self.assertEqual(tokenize_pyicu.segment(None), [])
self.assertEqual(tokenize_pyicu.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="icu"),
["ฉัน", "รัก", "ภาษา", "ไทย", "เพราะ", "ฉัน", "เป็น", "คน", "ไทย"],
)
def test_word_tokenize_deepcut(self):
self.assertEqual(tokenize_deepcut.segment(None), [])
self.assertEqual(tokenize_deepcut.segment(""), [])
self.assertIsNotNone(tokenize_deepcut.segment("ทดสอบ", DEFAULT_DICT_TRIE))
self.assertIsNotNone(tokenize_deepcut.segment("ทดสอบ", ["ทด", "สอบ"]))
self.assertIsNotNone(word_tokenize("ทดสอบ", engine="deepcut"))
def test_word_tokenize_longest(self):
self.assertEqual(longest.segment(None), [])
self.assertEqual(longest.segment(""), [])
self.assertIsNotNone(longest.segment("กรุงเทพฯมากๆเพราโพาง BKKฯ"))
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="longest"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
def test_word_tokenize_mm(self):
self.assertEqual(multi_cut.segment(None), [])
self.assertEqual(multi_cut.segment(""), [])
self.assertEqual(word_tokenize("", engine="mm"), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="mm"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertIsNotNone(multi_cut.mmcut("ทดสอบ"))
self.assertIsNotNone(multi_cut.find_all_segment("รถไฟฟ้ากรุงเทพมหานครBTS"))
self.assertEqual(multi_cut.find_all_segment(None), [])
def test_word_tokenize_newmm(self):
self.assertEqual(newmm.segment(None), [])
self.assertEqual(newmm.segment(""), [])
self.assertEqual(
word_tokenize("ฉันรักภาษาไทยเพราะฉันเป็นคนไทย", engine="newmm"),
["ฉัน", "รัก", "ภาษาไทย", "เพราะ", "ฉัน", "เป็น", "คนไทย"],
)
self.assertEqual(
word_tokenize(
"สวัสดีครับ สบายดีไหมครับ", engine="newmm", keep_whitespace=True
),
["สวัสดี", "ครับ", " ", "สบายดี", "ไหม", "ครับ"],
)
self.assertEqual(
word_tokenize("จุ๋มง่วงนอนยัง", engine="newmm"), ["จุ๋ม", "ง่วงนอน", "ยัง"]
)
self.assertEqual(word_tokenize("จุ๋มง่วง", engine="newmm"), ["จุ๋ม", "ง่วง"])
self.assertEqual(
word_tokenize("จุ๋ม ง่วง", engine="newmm", keep_whitespace=False),
["จุ๋ม", "ง่วง"],
)
def test_sent_tokenize(self):
self.assertEqual(sent_tokenize(None), [])
self.assertEqual(sent_tokenize(""), [])
self.assertEqual(
sent_tokenize("รักน้ำ รักปลา ", engine="whitespace"),
["รักน้ำ", "รักปลา", ""],
)
self.assertEqual(sent_tokenize("รักน้ำ รักปลา "), ["รักน้ำ", "รักปลา"])
def test_subword_tokenize(self):
self.assertEqual(subword_tokenize(None), [])
self.assertEqual(subword_tokenize(""), [])
self.assertIsNotNone(subword_tokenize("สวัสดีดาวอังคาร", engine="tcc"))
self.assertIsNotNone(subword_tokenize("สวัสดีดาวอังคาร", engine="etcc"))
def test_syllable_tokenize(self):
self.assertEqual(syllable_tokenize(None), [])
self.assertEqual(syllable_tokenize(""), [])
self.assertEqual(
syllable_tokenize("สวัสดีชาวโลก"), ["สวัส", "ดี", "ชาว", "โลก"]
)
def test_tcc(self):
self.assertEqual(tcc.segment(None), [])
self.assertEqual(tcc.segment(""), [])
self.assertEqual(tcc.segment("ประเทศไทย"), ["ป", "ระ", "เท", "ศ", "ไท", "ย"])
self.assertEqual(list(tcc.tcc("")), [])
self.assertEqual(tcc.tcc_pos(""), set())
self.assertEqual(romanize(""), "")
self.assertEqual(romanize("แมว"), "maeo")
self.assertEqual(romanize_royin(None), "")
self.assertEqual(romanize_royin(""), "")
self.assertEqual(romanize_royin("หาย"), "hai")
self.assertEqual(romanize_royin("หมอก"), "mok")
assertEqual(romanize("แมว", engine="royin"), "maeo")
self.assertEqual(romanize("เดือน", engine="royin"), "duean")
self.assertEqual(romanize("ดู", engine="royin"), "du")
self.assertEqual(romanize("ดำ", engine="royin"), "dam")
self.assertEqual(romanize("บัว", engine="royin"), "bua")
self.assertEqual(romanize("กร", engine="royin"), "kon")
self.assertEqual(romanize("กรร", engine="royin"), "kan")
self.assertEqual(romanize("กรรม", engine="royin"), "kam")
self.assertIsNotNone(romanize("กก", engine="royin"))
self.assertIsNotNone(romanize("ฝ้าย", engine="royin"))
self.assertIsNotNone(romanize("ทีปกร", engine="royin"))
self.assertIsNotNone(romanize("กรม", engine="royin"))
self.assertIsNotNone(romanize("ธรรพ์", engine="royin"))
self.assertIsNotNone(romanize("กฏa์1์ ์", engine="royin"))
self.assertEqual(romanize("แมว", engine="thai2rom"), "maeo")
def test_transliterate(self):
self.assertEqual(transliterate(""), "")
self.assertEqual(transliterate("แมว", "pyicu"), "mæw")
self.assertEqual(transliterate("คน", engine="ipa"), "kʰon")
self.assertIsNotNone(trans_list("คน"))
self.assertIsNotNone(xsampa_list("คน"))
ual(collate(["ไก่", "กก"]), ["กก", "ไก่"])
self.assertEqual(
collate(["ไก่", "เป็ด", "หมู", "วัว"]), ["ไก่", "เป็ด", "วัว", "หมู"]
)
def test_number(self):
self.assertEqual(
bahttext(5611116.50),
"ห้าล้านหกแสนหนึ่งหมื่นหนึ่งพันหนึ่งร้อยสิบหกบาทห้าสิบสตางค์",
)
self.assertEqual(bahttext(116), "หนึ่งร้อยสิบหกบาทถ้วน")
self.assertEqual(bahttext(0), "ศูนย์บาทถ้วน")
self.assertEqual(bahttext(None), "")
self.assertEqual(num_to_thaiword(112), "หนึ่งร้อยสิบสอง")
self.assertEqual(num_to_thaiword(0), "ศูนย์")
self.assertEqual(num_to_thaiword(None), "")
self.assertEqual(thaiword_to_num("ร้อยสิบสอง"), 112)
self.assertEqual(
thaiword_to_num(
["หก", "ล้าน", "หก", "แสน", "หกหมื่น", "หกพัน", "หกร้อย", "หกสิบ", "หก"]
),
6666666,
)
self.assertEqual(thaiword_to_num("ยี่สิบ"), 20)
self.assertEqual(thaiword_to_num("ศูนย์"), 0)
self.assertEqual(thaiword_to_num("ศูนย์อะไรนะ"), 0)
self.assertEqual(thaiword_to_num(""), None)
self.assertEqual(thaiword_to_num(None), None)
self.assertEqual(arabic_digit_to_thai_digit("ไทยแลนด์ 4.0"), "ไทยแลนด์ ๔.๐")
self.assertEqual(arabic_digit_to_thai_digit(""), "")
self.assertEqual(arabic_digit_to_thai_digit(None), "")
self.assertEqual(thai_digit_to_arabic_digit("๔๐๔ Not Found"), "404 Not Found")
self.assertEqual(thai_digit_to_arabic_digit(""), "")
self.assertEqual(thai_digit_to_arabic_digit(None), "")
self.assertEqual(digit_to_text("RFC 7258"), "RFC เจ็ดสองห้าแปด")
self.assertEqual(digit_to_text(""), "")
self.assertEqual(digit_to_text(None), "")
self.assertEqual(text_to_arabic_digit("เจ็ด"), "7")
self.assertEqual(text_to_arabic_digit(""), "")
self.assertEqual(text_to_arabic_digit(None), "")
self.assertEqual(text_to_thai_digit("เก้า"), "๙")
self.assertEqual(text_to_thai_digit(""), "")
self.assertEqual(text_to_thai_digit(None), "")
def test_keyboard(self):
self.assertEqual(eng_to_thai("l;ylfu8iy["), "สวัสดีครับ")
self.assertEqual(thai_to_eng("สวัสดีครับ"), "l;ylfu8iy[")
def test_keywords(self):
word_list = word_tokenize(
"แมวกินปลาอร่อยรู้ไหมว่าแมวเป็นแมวรู้ไหมนะแมว", engine="newmm"
)
self.assertEqual(find_keyword(word_list), {"แมว": 4})
def test_rank(self):
self.assertEqual(rank([]), None)
self.assertEqual(rank(["แมว", "คน", "แมว"]), Counter({"แมว": 2, "คน": 1}))
self.assertIsNotNone(rank(["แมว", "คน", "แมว"], exclude_stopwords=True))
year())
self.assertEqual(reign_year_to_ad(2, 10), 2017)
self.assertIsNotNone(reign_year_to_ad(2, 9))
self.assertIsNotNone(reign_year_to_ad(2, 8))
self.assertIsNotNone(reign_year_to_ad(2, 7))
def test_thai_strftime(self):
date = datetime.datetime(1976, 10, 6, 1, 40)
self.assertEqual(thai_strftime(date, "%c"), "พ 6 ต.ค. 01:40:00 2519")
self.assertEqual(thai_strftime(date, "%c", True), "พ ๖ ต.ค. ๐๑:๔๐:๐๐ ๒๕๑๙")
self.assertEqual(
thai_strftime(date, "%Aที่ %d %B พ.ศ. %Y เวลา %H:%Mน. (%a %d-%b-%y) %% %"),
"วันพุธที่ 06 ตุลาคม พ.ศ. 2519 เวลา 01:40น. (พ 06-ต.ค.-19) % %",
)
self.assertIsNotNone(thai_strftime(date, "%A%a%B%b%C%c%D%F%G%g%v%X%x%Y%y%+"))
น")
self.assertEqual(deletetone("เก๋า"), "เกา")
def test_normalize(self):
self.assertEqual(normalize("เเปลก"), "แปลก")
self.assertIsNotNone(normalize("พรรค์จันทร์ab์"))
(""), 0)
self.assertEqual(countthai("ประเทศไทย"), 100.0)
self.assertEqual(countthai("(กกต.)", ".()"), 100.0)
self.assertEqual(countthai("(กกต.)", None), 50.0)
def test_isthaichar(self):
self.assertEqual(isthaichar("ก"), True)
self.assertEqual(isthaichar("a"), False)
self.assertEqual(isthaichar("0"), False)
def test_isthai(self):
self.assertEqual(isthai("ไทย"), True)
self.assertEqual(isthai("ไทย0"), False)
self.assertEqual(isthai("ต.ค."), True)
self.assertEqual(isthai("(ต.ค.)"), False)
self.assertEqual(isthai("ต.ค.", ignore_chars=None), False)
self.assertEqual(isthai("(ต.ค.)", ignore_chars=".()"), True)
def test_is_thaicheck(self):
self.assertEqual(thaicheck("ตา"), True)
self.assertEqual(thaicheck("ยา"), True)
self.assertEqual(thaicheck("ฆ่า"), True)
self.assertEqual(thaicheck("คน"), True)
self.assertEqual(thaicheck("กะ"), True)
self.assertEqual(thaicheck("มอ"), True)
self.assertEqual(thaicheck("มาร์ค"), False)
self.assertEqual(thaicheck("เลข"), False)
self.assertEqual(thaicheck("กะ"), True)
self.assertEqual(thaicheck("ศา"), False)
self.assertEqual(thaicheck("abc"), False)
self.assertEqual(thaicheck("ลักษ์"), False)
or.similarity("แบคทีเรีย", "คน"), 0)
self.assertIsNotNone(word_vector.sentence_vectorizer(""))
self.assertIsNotNone(word_vector.sentence_vectorizer("เสรีภาพในการชุมนุม"))
self.assertIsNotNone(
word_vector.sentence_vectorizer("เสรีภาพในการรวมตัว\nสมาคม", use_mean=True)
)
self.assertIsNotNone(
word_vector.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
word_vector.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ประเทศไทย"]
)[0][0]
)
self.assertEqual(
word_vector.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
if __name__ == "__main__":
unittest.main()
| true | true |
1c2e22a90ad2aaad5a60b61d2ee131476440569c | 24,455 | py | Python | pytorch_src/engagement_classifier.py | PlusLabNLP/PredictiveEngagement | 214d3eb20901982d192b05b4d496420dfb273f8e | [
"MIT"
] | 13 | 2020-08-16T10:19:35.000Z | 2021-11-19T07:35:57.000Z | pytorch_src/engagement_classifier.py | PlusLabNLP/PredictiveEngagement | 214d3eb20901982d192b05b4d496420dfb273f8e | [
"MIT"
] | null | null | null | pytorch_src/engagement_classifier.py | PlusLabNLP/PredictiveEngagement | 214d3eb20901982d192b05b4d496420dfb273f8e | [
"MIT"
] | 2 | 2020-01-04T02:35:07.000Z | 2020-01-23T20:18:32.000Z | import random
import numpy as np
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, roc_auc_score
import pickle
import torch.nn as nn
import os
import csv
random.seed(1000)
np.random.seed(1000)
torch.manual_seed(1000)
# torch.backends.cudnn.benchmark = False
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.enabled = False
class Engagement_cls():
'''This class classifies each query and response pairs as 0(not engaging) or 1 (engaging)
'''
def __init__(self, train_dir, batch_size, mlp_hidden_dim, num_epochs,\
regularizer = 0.01, lr=1e-4, dropout = 0.1, optimizer="Adam",\
ftrain_queries_embed=None, ftrain_replies_embed=None, fvalid_queries_embed=None, fvalid_replies_embed=None, ftest_queries_embed=None ,ftest_replies_embed=None):
print('***************model parameters********************')
print('mlp layers {}'.format(mlp_hidden_dim))
print('learning rate {}'.format(lr))
print('drop out rate {}'.format(dropout))
print('batch size {}'.format(batch_size))
print('optimizer {}'.format(optimizer))
print('regularizer {}'.format(regularizer))
print('***************************************************')
print(ftrain_queries_embed)
print(ftrain_replies_embed)
print(fvalid_queries_embed)
print(fvalid_replies_embed)
print(ftest_queries_embed)
print(ftest_replies_embed)
self.train_dir = train_dir
self.batch_size = batch_size
self.mlp_hidden_dim = mlp_hidden_dim
self.lr = lr
self.dropout = dropout
self.num_epochs = num_epochs
self.optim = optimizer
self.reg= regularizer
self.ftrain_queries_embed = ftrain_queries_embed
self.ftrain_replies_embed =ftrain_replies_embed
self.fvalid_queries_embed= fvalid_queries_embed
self.fvalid_replies_embed = fvalid_replies_embed
self.ftest_queries_embed = ftest_queries_embed
self.ftest_replies_embed = ftest_replies_embed
def load_Bert_embeddings(self, data_dir, f_queries_embed, f_replies_embed):
'''Load sentences Bert embeddings into dictionary
'''
print('Loading Bert embeddings of sentences')
queries_vectors = {}
replies_vectors = {}
fwq = open(data_dir+f_queries_embed, 'rb')
dict_queries = pickle.load(fwq)
for query, embeds in dict_queries.items():
queries_vectors[query] = embeds[0]
fwr = open(data_dir + f_replies_embed, 'rb')
dict_replies = pickle.load(fwr)
for reply, embeds in dict_replies.items():
replies_vectors[reply] = embeds[0]
print('number of loaded embeddings is {} {}'.format(len(queries_vectors), len(replies_vectors)))
return queries_vectors, replies_vectors
def prepare_data(self, data_dir, ftrain=None, fvalid=None, ftest=None):
'''Load train/valid/test utterance pairs and get their embeddings
'''
self.data_dir = data_dir
if ftrain != None:
csv_file = open(data_dir + ftrain)
csv_reader_train = csv.reader(csv_file, delimiter=',')
self.train_queries,self.train_replies,self.train_labels = [],[],[]
next(csv_reader_train)
for row in csv_reader_train:
self.train_queries.append(row[1].split('\n')[0])
self.train_replies.append(row[2].split('\n')[0])
self.train_labels.append(int(row[3]))
print('size of train_queries {}'.format(len(self.train_queries)))
self.train_size = len(self.train_queries)
self.train_queries_embeds, self.train_replies_embeds= self.load_Bert_embeddings(data_dir, self.ftrain_queries_embed, self.ftrain_replies_embed)
if fvalid != None:
csv_file = open(data_dir + fvalid)
csv_reader_valid = csv.reader(csv_file, delimiter=',')
self.valid_queries,self.valid_replies,self.valid_labels= [],[],[]
next(csv_reader_valid)
for row in csv_reader_valid:
self.valid_queries.append(row[1].split('\n')[0])
self.valid_replies.append(row[2].split('\n')[0])
self.valid_labels.append(int(row[3]))
print('size of valid_queries {}'.format(len(self.valid_queries)))
self.valid_size = len(self.valid_queries)
self.valid_queries_embeds, self.valid_replies_embeds= self.load_Bert_embeddings(data_dir, self.fvalid_queries_embed, self.fvalid_replies_embed)
if ftest != None:
print(self.ftest_queries_embed)
print(self.ftest_replies_embed)
csv_file = open(data_dir + ftest)
csv_reader_test = csv.reader(csv_file, delimiter=',')
self.test_queries,self.test_replies,self.test_labels = [],[],[]
next(csv_reader_test)
for row in csv_reader_test:
self.test_queries.append(row[1].split('\n')[0])
self.test_replies.append(row[2].split('\n')[0])
self.test_labels.append(int(row[3]))
self.test_size = len(self.test_queries)
self.test_queries_embeds, self.test_replies_embeds= self.load_Bert_embeddings(data_dir, self.ftest_queries_embed, self.ftest_replies_embed)
filename = self.train_dir + "log_train.txt"
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.fw =open(filename, "a")
self.fw.write('***************model parameters******************** \n')
self.fw.write('mlp layers {} \n'.format(self.mlp_hidden_dim))
self.fw.write('learning rate {}\n'.format(self.lr))
self.fw.write('drop out rate {}\n'.format(self.dropout))
self.fw.write('batch size {}\n'.format(self.batch_size))
self.fw.write('optimizer {}\n'.format(self.optim))
self.fw.write('regularizer {}'.format(self.reg))
self.fw.write('***************************************************\n')
def shuffle_data(self, type='train'):
'''Shuffle queries/replies/engagement scores for train/valid/test sets
'''
if type=='train':
train_indexes = [i for i in range(self.train_size)]
random.shuffle(train_indexes)
shuffled_queries = []
shuffled_replies = []
shuffled_labels = []
shuffled_replies_len = []
shuffled_replies_num_diverse= []
for i in train_indexes:
shuffled_queries.append(self.train_queries[i])
shuffled_replies.append(self.train_replies[i])
shuffled_labels.append(self.train_labels[i])
self.train_queries = shuffled_queries
self.train_replies = shuffled_replies
self.train_labels = shuffled_labels
elif type=='valid':
valid_indexes = [i for i in range(self.valid_size)]
random.shuffle(valid_indexes)
shuffled_queries = []
shuffled_replies = []
shuffled_labels = []
for i in valid_indexes:
shuffled_queries.append(self.valid_queries[i])
shuffled_replies.append(self.valid_replies[i])
shuffled_labels.append(self.valid_labels[i])
self.valid_queries = shuffled_queries
self.valid_replies = shuffled_replies
self.valid_labels = shuffled_labels
elif type=='test':
test_indexes = [i for i in range(self.test_size)]
random.shuffle(test_indexes)
shuffled_queries = []
shuffled_replies = []
shuffled_labels = []
for i in test_indexes:
shuffled_queries.append(self.test_queries[i])
shuffled_replies.append(self.test_replies[i])
shuffled_labels.append(self.test_labels[i])
self.test_queries = shuffled_queries
self.test_replies = shuffled_replies
self.test_labels = shuffled_labels
def train(self, early_stop=50, finetune=False):
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
max_auc = 0
no_improve_epoch = 0
no_improve_in_previous_epoch = False
if finetune==False:
model_name ='best_model'
if finetune==True:
model_name ='best_model_finetuned'
#load pretrained model
model.load_state_dict(torch.load(self.train_dir + 'best_model.pt'))
info = torch.load(self.train_dir + 'best_model.info')
print('the parameters of the best trained model is ')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
print(self.lr)
if self.optim=='SGD':
optimizer = optim.SGD(model.parameters(), lr=self.lr, weight_decay=self.reg)
if self.optim=='Adam':
optimizer = optim.Adam(model.parameters(), lr=self.lr, weight_decay=self.reg)
if self.optim=='RMSprop':
optimizer = optim.RMSprop(model.parameters(), lr=self.lr, weight_decay=self.reg)
plot_train_auc = []
plot_valid_auc = []
plot_valid_loss = []
plot_train_loss = []
plot_ep = []
step=0
#Shuffle valid data once since original file first has all the utterances with engagement score=0 and then all the utterances with engagement score=1
self.shuffle_data('valid')
for e in range(self.num_epochs):
print('***********************************************')
print(e)
if no_improve_in_previous_epoch:
no_improve_epoch += 1
if no_improve_epoch >= early_stop:
break
else:
no_improve_epoch = 0
no_improve_in_previous_epoch = True
train_loss = []
train_auc = []
nonzero_total= 0
list_preds = torch.tensor([self.train_size])
list_grtuth = torch.tensor([self.train_size])
if torch.cuda.is_available():
list_preds = list_preds.cuda()
list_grtuth = list_grtuth.cuda()
self.shuffle_data('train')
for stidx in range(0, self.train_size, self.batch_size):
step+=1
model.train()
model.zero_grad()
x_q = self.train_queries[stidx:stidx + self.batch_size]
x_r = self.train_replies[stidx:stidx + self.batch_size]
y = torch.tensor(self.train_labels[stidx:stidx + self.batch_size]).long()
if torch.cuda.is_available():
y = y.cuda()
nonzero = torch.nonzero(y).size(0)
nonzero_total +=nonzero
model_output = model(x_q, x_r, self.train_queries_embeds, self.train_replies_embeds)
pred_eval = torch.argmax(model_output, 1)
list_preds = torch.cat((list_preds, pred_eval), dim=0)
list_grtuth = torch.cat((list_grtuth, y), dim=0)
#calculate weights for each class
weight = torch.tensor([y.shape[0]/(2*(y.shape[0]- nonzero)), y.shape[0]/(2*nonzero)])
if torch.cuda.is_available():
weight = weight.cuda()
#weighted loss function due bacuase of imbalanced data
loss_function = nn.CrossEntropyLoss(weight)
loss = loss_function(model_output, y)
train_loss.append(loss.data)
loss.backward()
optimizer.step()
print('number of nonzero in train is {}'.format(nonzero_total))
#calculate the evaluation metric and loss value for train data
train_auc = roc_auc_score(list_grtuth[1:].detach().cpu().numpy(), list_preds[1:].detach().cpu().numpy())
train_loss = torch.mean(torch.stack(train_loss))
# train_loss = np.mean(train_loss)
#evaluate trained model on valid data
val_loss = []
val_auc = []
nonzero_total = 0
list_preds_v = torch.tensor([self.valid_size])
list_grtuth_v = torch.tensor([self.valid_size])
if torch.cuda.is_available():
list_preds_v = list_preds_v.cuda()
list_grtuth_v = list_grtuth_v.cuda()
for stidx in range(0, self.valid_size, self.batch_size):
model.eval()
val_x_q = self.valid_queries[stidx:stidx + self.batch_size]
val_x_r = self.valid_replies[stidx:stidx + self.batch_size]
val_y = torch.tensor(self.valid_labels[stidx:stidx + self.batch_size]).long()
if torch.cuda.is_available():
val_y = val_y.cuda()
nonzero = torch.nonzero(val_y).size(0)
nonzero_total +=nonzero
model_output = model(val_x_q, val_x_r, self.valid_queries_embeds, self.valid_replies_embeds)
val_pred = torch.argmax(model_output, 1)
list_preds_v = torch.cat((list_preds_v, val_pred), dim=0)
list_grtuth_v = torch.cat((list_grtuth_v, val_y), dim=0)
weight = torch.tensor([val_y.shape[0]/(2*(val_y.shape[0]- nonzero)), val_y.shape[0]/(2*nonzero)])
if torch.cuda.is_available():
weight = weight.cuda()
loss_function = nn.CrossEntropyLoss(weight)
v_loss = loss_function(model_output, val_y)
val_loss.append(v_loss.data)
val_auc = roc_auc_score(list_grtuth_v[1:].detach().cpu().numpy(), list_preds_v[1:].detach().cpu().numpy())
# val_loss = np.mean(val_loss)
val_loss = torch.mean(torch.stack(val_loss))
print('number of nonzero in valid is {}'.format(nonzero_total))
st_improv = ''
if val_auc > max_auc:
st_improv = '*'
torch.save({'step': step, 'epoch': e, 'train_loss': train_loss, 'train_auc': train_auc, 'val_loss': val_loss, 'val_auc': val_auc }, self.train_dir+model_name+'.info')
torch.save(model.state_dict(), self.train_dir+model_name+'.pt')
max_auc = val_auc
no_improve_in_previous_epoch = False
print('epcoh {:02} - train_loss {:.4f} - train_auc {:.4f} val_loss {:.4f} - val_auc {:.4f}{}'.format(
e, train_loss, train_auc, val_loss, val_auc, st_improv))
self.fw.write('epcoh {:02} - train_loss {:.4f} - train_auc {:.4f} val_loss {:.4f} - val_auc {:.4f}{} \n'.format(
e, train_loss, train_auc, val_loss, val_auc, st_improv))
plot_train_auc.append(train_auc)
plot_valid_auc.append(val_auc)
plot_train_loss.append(train_loss)
plot_valid_loss.append(val_loss)
plot_ep.append(e)
print('#############################################')
model.load_state_dict(torch.load(self.train_dir + model_name+'.pt'))
info = torch.load(self.train_dir + model_name+'.info')
print('the parameters of the best trained model is ')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
print('Done!')
plt.figure(0)
l1 = plt.plot(plot_ep,plot_train_auc,'-r', label='Train auc')
l2 = plt.plot(plot_ep,plot_valid_auc,'-b', label='Valid auc')
plt.legend(loc='upper left')
plt.xlabel("train and valid acc for model")
plt.savefig(self.train_dir + 'model_auc.jpg')
plt.figure(1)
l1 = plt.plot(plot_ep,plot_train_loss,'-r', label='Train loss')
l2 = plt.plot(plot_ep,plot_valid_loss,'-b', label='Valid loss')
plt.legend(loc='upper left')
plt.xlabel("train and valid loss for model")
plt.savefig(self.train_dir + 'model_loss.jpg')
def test(self, fname):
'''Test the trained model on test set
'''
if not os.path.isfile(self.train_dir+'best_model.pt'):
print('There is not any trained model to be tested!\nPlease first try to train the model.')
return
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
model.load_state_dict(torch.load(self.train_dir+'best_model.pt'))
info = torch.load(self.train_dir + 'best_model.info')
model.eval()
print('begining of test')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
self.shuffle_data('test')
test_loss = []
test_auc = []
nonzero_total= 0
step = 0
list_preds_t = torch.tensor([self.test_size])
list_grtuth_t = torch.tensor([self.test_size])
if torch.cuda.is_available():
list_preds_t = list_preds_t.cuda()
list_grtuth_t = list_grtuth_t.cuda()
for stidx in range(0, self.test_size, self.batch_size):
step+=1
x_q = self.test_queries[stidx:stidx + self.batch_size]
x_r = self.test_replies[stidx:stidx + self.batch_size]
y = torch.tensor(self.test_labels[stidx:stidx + self.batch_size]).long()
if torch.cuda.is_available():
y = y.cuda()
nonzero = torch.nonzero(y).size(0)
nonzero_total +=nonzero
model_output = model(x_q, x_r, self.test_queries_embeds, self.test_replies_embeds)
pred_eval = torch.argmax(model_output, 1)
list_preds_t = torch.cat((list_preds_t, pred_eval), dim=0)
list_grtuth_t = torch.cat((list_grtuth_t, y), dim=0)
print('batch {} has {} nonzero points and {} zero points overall {} points '.format(step, nonzero, y.shape[0]- nonzero, y.shape[0]))
weight = torch.tensor([y.shape[0]/(2*(y.shape[0]- nonzero)), y.shape[0]/(2*nonzero)])
if torch.cuda.is_available():
weight = weight.cuda()
loss_function = nn.CrossEntropyLoss(weight)
loss = loss_function(model_output, y)
test_loss.append(loss.data)
print('number of nonzero in test is {}'.format(nonzero_total))
test_auc = roc_auc_score(list_grtuth_t[1:].detach().cpu().numpy(), list_preds_t[1:].detach().cpu().numpy())
print(classification_report(list_grtuth_t[1:].detach().cpu().numpy(), list_preds_t[1:].detach().cpu().numpy()))
# test_loss = np.mean(test_loss)
test_loss = torch.mean(torch.stack(test_loss))
print('Test set: test_loss: {} -- test_auc: {}'.format(test_loss, test_auc))
def generate_eng_score(self, fname_ground_truth, ofile):
'''for all pairs of queries and replies predicts engagement scores
Params:
fname_ground_truth: file includes the queries and their ground-truth replies
foname: file includes the queries, ground truth replies, generated replies (from self.test_replies) and engagement_score of queries and generated replies with following format:
query===groundtruth_reply===generated_reply===engagement_score of query and generated_reply
'''
if not os.path.isfile(self.train_dir+'best_model_finetuned.pt'):
print('There is not any finetuned model on DD dataset to be used!\nPlease first try to finetune trained model.')
return
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
model.load_state_dict(torch.load(self.train_dir + 'best_model_finetuned.pt'))
info = torch.load(self.train_dir + 'best_model_finetuned.info')
model.eval()
fw_pred_labels = open(self.data_dir + ofile, 'w')
fr_groundtruth_replies = open(self.data_dir + fname_ground_truth, 'r')
groundtruth_replies =fr_groundtruth_replies.readlines()
print('begining of prediction')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
for stidx in range(0, self.test_size, self.batch_size):
x_q = self.test_queries[stidx:stidx + self.batch_size]
x_r = self.test_replies[stidx:stidx + self.batch_size]
x_groundtruth_r = groundtruth_replies[stidx:stidx + self.batch_size]
model_output = model(x_q, x_r, self.test_queries_embeds, self.test_replies_embeds)
pred_eng = torch.nn.functional.softmax(model_output, dim=1)
for ind in range(len(x_q)):
fw_pred_labels.write(x_q[ind]+'==='+x_groundtruth_r[ind].split('\n')[0]+'==='+x_r[ind]+'==='+str(pred_eng[ind][1].item())+'\n')
print('The engagingness score for specified replies has been predicted!')
def get_eng_score(self, query, q_embed, reply, r_embed, model):
'''for a pair of query and reply predicts engagement scores
Params:
query: input query
q_embed: embeddings of query
reply: input reply
r_embed: embeddings of reply
'''
if not os.path.isfile(self.train_dir+'best_model_finetuned.pt'):
print('There is not any finetuned model on DD dataset to be used!\nPlease first try to finetune trained model.')
return
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
model.load_state_dict(torch.load(self.train_dir + 'best_model_finetuned.pt'))
info = torch.load(self.train_dir + 'best_model_finetuned.info')
model.eval()
model_output = model(query, reply, q_embed, r_embed)
pred_eng = torch.nn.functional.softmax(model_output, dim=1)
return pred_eng
class BiLSTM(nn.Module):
'''The engagement classification model is a three layer mlp classifier with having tanh as activation functions which takes the embeddings of query and reply as input and pass their average into the mlp classifier
'''
def __init__(self, mlp_hidden_dim=[128], dropout=0.2):
super(BiLSTM, self).__init__()
self.dropout = nn.Dropout(p=dropout)
num_classes=2
self.mlp_hidden_0 = nn.Linear(768, mlp_hidden_dim[0], bias=True)
self.mlp_hidden_1 = nn.Linear(mlp_hidden_dim[0], mlp_hidden_dim[1], bias=True)
self.mlp_hidden_2 = nn.Linear(mlp_hidden_dim[1], mlp_hidden_dim[2], bias=True)
self.mlp_out = nn.Linear(mlp_hidden_dim[2], num_classes, bias=True)
def forward(self, queries_input, replies_input, queries_embeds, replies_embeds):
for ind, q in enumerate(queries_input):
if q not in queries_embeds.keys():
print('the query {} embedding has not been found in the embedding file'.format(q))
X_q = torch.tensor([queries_embeds[q] for q in queries_input])
for ind, r in enumerate(replies_input):
if r not in replies_embeds.keys():
print('the reply {} embedding has not been found in the embedding file'.format(r))
X_r = torch.tensor([replies_embeds[r] for r in replies_input])
if torch.cuda.is_available():
X_q, X_r = X_q.cuda(), X_r.cuda()
mlp_input=X_q.add(X_r)
mlp_input = torch.div(mlp_input,2)
mlp_h_0 = torch.tanh(self.mlp_hidden_0(mlp_input))
mlp_h_0= self.dropout(mlp_h_0)
mlp_h_1 = torch.tanh(self.mlp_hidden_1(mlp_h_0))
mlp_h_1= self.dropout(mlp_h_1)
mlp_h_2 = torch.tanh(self.mlp_hidden_2(mlp_h_1))
mlp_h_2= self.dropout(mlp_h_2)
mlp_out= self.mlp_out(mlp_h_2)
return mlp_out
| 45.881801 | 217 | 0.604457 | import random
import numpy as np
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, roc_auc_score
import pickle
import torch.nn as nn
import os
import csv
random.seed(1000)
np.random.seed(1000)
torch.manual_seed(1000)
class Engagement_cls():
def __init__(self, train_dir, batch_size, mlp_hidden_dim, num_epochs,\
regularizer = 0.01, lr=1e-4, dropout = 0.1, optimizer="Adam",\
ftrain_queries_embed=None, ftrain_replies_embed=None, fvalid_queries_embed=None, fvalid_replies_embed=None, ftest_queries_embed=None ,ftest_replies_embed=None):
print('***************model parameters********************')
print('mlp layers {}'.format(mlp_hidden_dim))
print('learning rate {}'.format(lr))
print('drop out rate {}'.format(dropout))
print('batch size {}'.format(batch_size))
print('optimizer {}'.format(optimizer))
print('regularizer {}'.format(regularizer))
print('***************************************************')
print(ftrain_queries_embed)
print(ftrain_replies_embed)
print(fvalid_queries_embed)
print(fvalid_replies_embed)
print(ftest_queries_embed)
print(ftest_replies_embed)
self.train_dir = train_dir
self.batch_size = batch_size
self.mlp_hidden_dim = mlp_hidden_dim
self.lr = lr
self.dropout = dropout
self.num_epochs = num_epochs
self.optim = optimizer
self.reg= regularizer
self.ftrain_queries_embed = ftrain_queries_embed
self.ftrain_replies_embed =ftrain_replies_embed
self.fvalid_queries_embed= fvalid_queries_embed
self.fvalid_replies_embed = fvalid_replies_embed
self.ftest_queries_embed = ftest_queries_embed
self.ftest_replies_embed = ftest_replies_embed
def load_Bert_embeddings(self, data_dir, f_queries_embed, f_replies_embed):
print('Loading Bert embeddings of sentences')
queries_vectors = {}
replies_vectors = {}
fwq = open(data_dir+f_queries_embed, 'rb')
dict_queries = pickle.load(fwq)
for query, embeds in dict_queries.items():
queries_vectors[query] = embeds[0]
fwr = open(data_dir + f_replies_embed, 'rb')
dict_replies = pickle.load(fwr)
for reply, embeds in dict_replies.items():
replies_vectors[reply] = embeds[0]
print('number of loaded embeddings is {} {}'.format(len(queries_vectors), len(replies_vectors)))
return queries_vectors, replies_vectors
def prepare_data(self, data_dir, ftrain=None, fvalid=None, ftest=None):
self.data_dir = data_dir
if ftrain != None:
csv_file = open(data_dir + ftrain)
csv_reader_train = csv.reader(csv_file, delimiter=',')
self.train_queries,self.train_replies,self.train_labels = [],[],[]
next(csv_reader_train)
for row in csv_reader_train:
self.train_queries.append(row[1].split('\n')[0])
self.train_replies.append(row[2].split('\n')[0])
self.train_labels.append(int(row[3]))
print('size of train_queries {}'.format(len(self.train_queries)))
self.train_size = len(self.train_queries)
self.train_queries_embeds, self.train_replies_embeds= self.load_Bert_embeddings(data_dir, self.ftrain_queries_embed, self.ftrain_replies_embed)
if fvalid != None:
csv_file = open(data_dir + fvalid)
csv_reader_valid = csv.reader(csv_file, delimiter=',')
self.valid_queries,self.valid_replies,self.valid_labels= [],[],[]
next(csv_reader_valid)
for row in csv_reader_valid:
self.valid_queries.append(row[1].split('\n')[0])
self.valid_replies.append(row[2].split('\n')[0])
self.valid_labels.append(int(row[3]))
print('size of valid_queries {}'.format(len(self.valid_queries)))
self.valid_size = len(self.valid_queries)
self.valid_queries_embeds, self.valid_replies_embeds= self.load_Bert_embeddings(data_dir, self.fvalid_queries_embed, self.fvalid_replies_embed)
if ftest != None:
print(self.ftest_queries_embed)
print(self.ftest_replies_embed)
csv_file = open(data_dir + ftest)
csv_reader_test = csv.reader(csv_file, delimiter=',')
self.test_queries,self.test_replies,self.test_labels = [],[],[]
next(csv_reader_test)
for row in csv_reader_test:
self.test_queries.append(row[1].split('\n')[0])
self.test_replies.append(row[2].split('\n')[0])
self.test_labels.append(int(row[3]))
self.test_size = len(self.test_queries)
self.test_queries_embeds, self.test_replies_embeds= self.load_Bert_embeddings(data_dir, self.ftest_queries_embed, self.ftest_replies_embed)
filename = self.train_dir + "log_train.txt"
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.fw =open(filename, "a")
self.fw.write('***************model parameters******************** \n')
self.fw.write('mlp layers {} \n'.format(self.mlp_hidden_dim))
self.fw.write('learning rate {}\n'.format(self.lr))
self.fw.write('drop out rate {}\n'.format(self.dropout))
self.fw.write('batch size {}\n'.format(self.batch_size))
self.fw.write('optimizer {}\n'.format(self.optim))
self.fw.write('regularizer {}'.format(self.reg))
self.fw.write('***************************************************\n')
def shuffle_data(self, type='train'):
if type=='train':
train_indexes = [i for i in range(self.train_size)]
random.shuffle(train_indexes)
shuffled_queries = []
shuffled_replies = []
shuffled_labels = []
shuffled_replies_len = []
shuffled_replies_num_diverse= []
for i in train_indexes:
shuffled_queries.append(self.train_queries[i])
shuffled_replies.append(self.train_replies[i])
shuffled_labels.append(self.train_labels[i])
self.train_queries = shuffled_queries
self.train_replies = shuffled_replies
self.train_labels = shuffled_labels
elif type=='valid':
valid_indexes = [i for i in range(self.valid_size)]
random.shuffle(valid_indexes)
shuffled_queries = []
shuffled_replies = []
shuffled_labels = []
for i in valid_indexes:
shuffled_queries.append(self.valid_queries[i])
shuffled_replies.append(self.valid_replies[i])
shuffled_labels.append(self.valid_labels[i])
self.valid_queries = shuffled_queries
self.valid_replies = shuffled_replies
self.valid_labels = shuffled_labels
elif type=='test':
test_indexes = [i for i in range(self.test_size)]
random.shuffle(test_indexes)
shuffled_queries = []
shuffled_replies = []
shuffled_labels = []
for i in test_indexes:
shuffled_queries.append(self.test_queries[i])
shuffled_replies.append(self.test_replies[i])
shuffled_labels.append(self.test_labels[i])
self.test_queries = shuffled_queries
self.test_replies = shuffled_replies
self.test_labels = shuffled_labels
def train(self, early_stop=50, finetune=False):
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
max_auc = 0
no_improve_epoch = 0
no_improve_in_previous_epoch = False
if finetune==False:
model_name ='best_model'
if finetune==True:
model_name ='best_model_finetuned'
model.load_state_dict(torch.load(self.train_dir + 'best_model.pt'))
info = torch.load(self.train_dir + 'best_model.info')
print('the parameters of the best trained model is ')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
print(self.lr)
if self.optim=='SGD':
optimizer = optim.SGD(model.parameters(), lr=self.lr, weight_decay=self.reg)
if self.optim=='Adam':
optimizer = optim.Adam(model.parameters(), lr=self.lr, weight_decay=self.reg)
if self.optim=='RMSprop':
optimizer = optim.RMSprop(model.parameters(), lr=self.lr, weight_decay=self.reg)
plot_train_auc = []
plot_valid_auc = []
plot_valid_loss = []
plot_train_loss = []
plot_ep = []
step=0
self.shuffle_data('valid')
for e in range(self.num_epochs):
print('***********************************************')
print(e)
if no_improve_in_previous_epoch:
no_improve_epoch += 1
if no_improve_epoch >= early_stop:
break
else:
no_improve_epoch = 0
no_improve_in_previous_epoch = True
train_loss = []
train_auc = []
nonzero_total= 0
list_preds = torch.tensor([self.train_size])
list_grtuth = torch.tensor([self.train_size])
if torch.cuda.is_available():
list_preds = list_preds.cuda()
list_grtuth = list_grtuth.cuda()
self.shuffle_data('train')
for stidx in range(0, self.train_size, self.batch_size):
step+=1
model.train()
model.zero_grad()
x_q = self.train_queries[stidx:stidx + self.batch_size]
x_r = self.train_replies[stidx:stidx + self.batch_size]
y = torch.tensor(self.train_labels[stidx:stidx + self.batch_size]).long()
if torch.cuda.is_available():
y = y.cuda()
nonzero = torch.nonzero(y).size(0)
nonzero_total +=nonzero
model_output = model(x_q, x_r, self.train_queries_embeds, self.train_replies_embeds)
pred_eval = torch.argmax(model_output, 1)
list_preds = torch.cat((list_preds, pred_eval), dim=0)
list_grtuth = torch.cat((list_grtuth, y), dim=0)
weight = torch.tensor([y.shape[0]/(2*(y.shape[0]- nonzero)), y.shape[0]/(2*nonzero)])
if torch.cuda.is_available():
weight = weight.cuda()
loss_function = nn.CrossEntropyLoss(weight)
loss = loss_function(model_output, y)
train_loss.append(loss.data)
loss.backward()
optimizer.step()
print('number of nonzero in train is {}'.format(nonzero_total))
train_auc = roc_auc_score(list_grtuth[1:].detach().cpu().numpy(), list_preds[1:].detach().cpu().numpy())
train_loss = torch.mean(torch.stack(train_loss))
val_loss = []
val_auc = []
nonzero_total = 0
list_preds_v = torch.tensor([self.valid_size])
list_grtuth_v = torch.tensor([self.valid_size])
if torch.cuda.is_available():
list_preds_v = list_preds_v.cuda()
list_grtuth_v = list_grtuth_v.cuda()
for stidx in range(0, self.valid_size, self.batch_size):
model.eval()
val_x_q = self.valid_queries[stidx:stidx + self.batch_size]
val_x_r = self.valid_replies[stidx:stidx + self.batch_size]
val_y = torch.tensor(self.valid_labels[stidx:stidx + self.batch_size]).long()
if torch.cuda.is_available():
val_y = val_y.cuda()
nonzero = torch.nonzero(val_y).size(0)
nonzero_total +=nonzero
model_output = model(val_x_q, val_x_r, self.valid_queries_embeds, self.valid_replies_embeds)
val_pred = torch.argmax(model_output, 1)
list_preds_v = torch.cat((list_preds_v, val_pred), dim=0)
list_grtuth_v = torch.cat((list_grtuth_v, val_y), dim=0)
weight = torch.tensor([val_y.shape[0]/(2*(val_y.shape[0]- nonzero)), val_y.shape[0]/(2*nonzero)])
if torch.cuda.is_available():
weight = weight.cuda()
loss_function = nn.CrossEntropyLoss(weight)
v_loss = loss_function(model_output, val_y)
val_loss.append(v_loss.data)
val_auc = roc_auc_score(list_grtuth_v[1:].detach().cpu().numpy(), list_preds_v[1:].detach().cpu().numpy())
val_loss = torch.mean(torch.stack(val_loss))
print('number of nonzero in valid is {}'.format(nonzero_total))
st_improv = ''
if val_auc > max_auc:
st_improv = '*'
torch.save({'step': step, 'epoch': e, 'train_loss': train_loss, 'train_auc': train_auc, 'val_loss': val_loss, 'val_auc': val_auc }, self.train_dir+model_name+'.info')
torch.save(model.state_dict(), self.train_dir+model_name+'.pt')
max_auc = val_auc
no_improve_in_previous_epoch = False
print('epcoh {:02} - train_loss {:.4f} - train_auc {:.4f} val_loss {:.4f} - val_auc {:.4f}{}'.format(
e, train_loss, train_auc, val_loss, val_auc, st_improv))
self.fw.write('epcoh {:02} - train_loss {:.4f} - train_auc {:.4f} val_loss {:.4f} - val_auc {:.4f}{} \n'.format(
e, train_loss, train_auc, val_loss, val_auc, st_improv))
plot_train_auc.append(train_auc)
plot_valid_auc.append(val_auc)
plot_train_loss.append(train_loss)
plot_valid_loss.append(val_loss)
plot_ep.append(e)
print('#############################################')
model.load_state_dict(torch.load(self.train_dir + model_name+'.pt'))
info = torch.load(self.train_dir + model_name+'.info')
print('the parameters of the best trained model is ')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
print('Done!')
plt.figure(0)
l1 = plt.plot(plot_ep,plot_train_auc,'-r', label='Train auc')
l2 = plt.plot(plot_ep,plot_valid_auc,'-b', label='Valid auc')
plt.legend(loc='upper left')
plt.xlabel("train and valid acc for model")
plt.savefig(self.train_dir + 'model_auc.jpg')
plt.figure(1)
l1 = plt.plot(plot_ep,plot_train_loss,'-r', label='Train loss')
l2 = plt.plot(plot_ep,plot_valid_loss,'-b', label='Valid loss')
plt.legend(loc='upper left')
plt.xlabel("train and valid loss for model")
plt.savefig(self.train_dir + 'model_loss.jpg')
def test(self, fname):
if not os.path.isfile(self.train_dir+'best_model.pt'):
print('There is not any trained model to be tested!\nPlease first try to train the model.')
return
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
model.load_state_dict(torch.load(self.train_dir+'best_model.pt'))
info = torch.load(self.train_dir + 'best_model.info')
model.eval()
print('begining of test')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
self.shuffle_data('test')
test_loss = []
test_auc = []
nonzero_total= 0
step = 0
list_preds_t = torch.tensor([self.test_size])
list_grtuth_t = torch.tensor([self.test_size])
if torch.cuda.is_available():
list_preds_t = list_preds_t.cuda()
list_grtuth_t = list_grtuth_t.cuda()
for stidx in range(0, self.test_size, self.batch_size):
step+=1
x_q = self.test_queries[stidx:stidx + self.batch_size]
x_r = self.test_replies[stidx:stidx + self.batch_size]
y = torch.tensor(self.test_labels[stidx:stidx + self.batch_size]).long()
if torch.cuda.is_available():
y = y.cuda()
nonzero = torch.nonzero(y).size(0)
nonzero_total +=nonzero
model_output = model(x_q, x_r, self.test_queries_embeds, self.test_replies_embeds)
pred_eval = torch.argmax(model_output, 1)
list_preds_t = torch.cat((list_preds_t, pred_eval), dim=0)
list_grtuth_t = torch.cat((list_grtuth_t, y), dim=0)
print('batch {} has {} nonzero points and {} zero points overall {} points '.format(step, nonzero, y.shape[0]- nonzero, y.shape[0]))
weight = torch.tensor([y.shape[0]/(2*(y.shape[0]- nonzero)), y.shape[0]/(2*nonzero)])
if torch.cuda.is_available():
weight = weight.cuda()
loss_function = nn.CrossEntropyLoss(weight)
loss = loss_function(model_output, y)
test_loss.append(loss.data)
print('number of nonzero in test is {}'.format(nonzero_total))
test_auc = roc_auc_score(list_grtuth_t[1:].detach().cpu().numpy(), list_preds_t[1:].detach().cpu().numpy())
print(classification_report(list_grtuth_t[1:].detach().cpu().numpy(), list_preds_t[1:].detach().cpu().numpy()))
test_loss = torch.mean(torch.stack(test_loss))
print('Test set: test_loss: {} -- test_auc: {}'.format(test_loss, test_auc))
def generate_eng_score(self, fname_ground_truth, ofile):
if not os.path.isfile(self.train_dir+'best_model_finetuned.pt'):
print('There is not any finetuned model on DD dataset to be used!\nPlease first try to finetune trained model.')
return
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
model.load_state_dict(torch.load(self.train_dir + 'best_model_finetuned.pt'))
info = torch.load(self.train_dir + 'best_model_finetuned.info')
model.eval()
fw_pred_labels = open(self.data_dir + ofile, 'w')
fr_groundtruth_replies = open(self.data_dir + fname_ground_truth, 'r')
groundtruth_replies =fr_groundtruth_replies.readlines()
print('begining of prediction')
for name, param in model.named_parameters():
if param.requires_grad:
print (name, param.data, param.shape)
for stidx in range(0, self.test_size, self.batch_size):
x_q = self.test_queries[stidx:stidx + self.batch_size]
x_r = self.test_replies[stidx:stidx + self.batch_size]
x_groundtruth_r = groundtruth_replies[stidx:stidx + self.batch_size]
model_output = model(x_q, x_r, self.test_queries_embeds, self.test_replies_embeds)
pred_eng = torch.nn.functional.softmax(model_output, dim=1)
for ind in range(len(x_q)):
fw_pred_labels.write(x_q[ind]+'==='+x_groundtruth_r[ind].split('\n')[0]+'==='+x_r[ind]+'==='+str(pred_eng[ind][1].item())+'\n')
print('The engagingness score for specified replies has been predicted!')
def get_eng_score(self, query, q_embed, reply, r_embed, model):
if not os.path.isfile(self.train_dir+'best_model_finetuned.pt'):
print('There is not any finetuned model on DD dataset to be used!\nPlease first try to finetune trained model.')
return
model = BiLSTM(mlp_hidden_dim=self.mlp_hidden_dim, dropout=self.dropout)
if torch.cuda.is_available():
model.cuda()
model.load_state_dict(torch.load(self.train_dir + 'best_model_finetuned.pt'))
info = torch.load(self.train_dir + 'best_model_finetuned.info')
model.eval()
model_output = model(query, reply, q_embed, r_embed)
pred_eng = torch.nn.functional.softmax(model_output, dim=1)
return pred_eng
class BiLSTM(nn.Module):
def __init__(self, mlp_hidden_dim=[128], dropout=0.2):
super(BiLSTM, self).__init__()
self.dropout = nn.Dropout(p=dropout)
num_classes=2
self.mlp_hidden_0 = nn.Linear(768, mlp_hidden_dim[0], bias=True)
self.mlp_hidden_1 = nn.Linear(mlp_hidden_dim[0], mlp_hidden_dim[1], bias=True)
self.mlp_hidden_2 = nn.Linear(mlp_hidden_dim[1], mlp_hidden_dim[2], bias=True)
self.mlp_out = nn.Linear(mlp_hidden_dim[2], num_classes, bias=True)
def forward(self, queries_input, replies_input, queries_embeds, replies_embeds):
for ind, q in enumerate(queries_input):
if q not in queries_embeds.keys():
print('the query {} embedding has not been found in the embedding file'.format(q))
X_q = torch.tensor([queries_embeds[q] for q in queries_input])
for ind, r in enumerate(replies_input):
if r not in replies_embeds.keys():
print('the reply {} embedding has not been found in the embedding file'.format(r))
X_r = torch.tensor([replies_embeds[r] for r in replies_input])
if torch.cuda.is_available():
X_q, X_r = X_q.cuda(), X_r.cuda()
mlp_input=X_q.add(X_r)
mlp_input = torch.div(mlp_input,2)
mlp_h_0 = torch.tanh(self.mlp_hidden_0(mlp_input))
mlp_h_0= self.dropout(mlp_h_0)
mlp_h_1 = torch.tanh(self.mlp_hidden_1(mlp_h_0))
mlp_h_1= self.dropout(mlp_h_1)
mlp_h_2 = torch.tanh(self.mlp_hidden_2(mlp_h_1))
mlp_h_2= self.dropout(mlp_h_2)
mlp_out= self.mlp_out(mlp_h_2)
return mlp_out
| true | true |
1c2e22d242454048924dcdcb258ef19ca93433b3 | 7,544 | py | Python | tests/rados/test_9929.py | hmaheswa/cephci | b75c1e58e1222865c81c0558ff98b3708dc4236a | [
"MIT"
] | null | null | null | tests/rados/test_9929.py | hmaheswa/cephci | b75c1e58e1222865c81c0558ff98b3708dc4236a | [
"MIT"
] | null | null | null | tests/rados/test_9929.py | hmaheswa/cephci | b75c1e58e1222865c81c0558ff98b3708dc4236a | [
"MIT"
] | null | null | null | import json
import random
import time
import traceback
from ceph.rados_utils import RadosHelper
from utility.log import Log
log = Log(__name__)
def run(ceph_cluster, **kw):
"""
CEPH-9929-RADOS:
Corrupt an object in ec pool followed by
list-inconsistent-* commands
1. create a jerasure ec pool with k=4,m=2
2. create an object in the pool
3. chose any of the osd from the acting set and go to the backend
4. corrupt object attrib from the backend
5. run deep-scrub on the pool
6. rados list-inconsistent-pg <pool>
7. rados list-inconsistent-obj <pg>
Args:
ceph_cluster (ceph.ceph.Ceph): ceph cluster
"""
log.info("Running CEPH-9929")
log.info(run.__doc__)
ceph_nodes = kw.get("ceph_nodes")
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
mons = []
role = "client"
for mnode in ceph_nodes:
if mnode.role == role:
mons.append(mnode)
ctrlr = mons[0]
log.info("chosing mon {cmon} as ctrlrmon".format(cmon=ctrlr.hostname))
helper = RadosHelper(ctrlr, config, log)
"""create ec pool with k=4, m=2"""
k = 4
m = 2
pname = "eccorrupt_{rand}_{k}_{m}".format(rand=random.randint(0, 10000), k=k, m=m)
profile = pname
if build.startswith("4"):
prof_cmd = "osd erasure-code-profile set {profile} k={k} m={m} \
crush-failure-domain=osd".format(
profile=profile, k=k, m=m
)
else:
prof_cmd = "osd erasure-code-profile set {profile} k={k} m={m} \
ruleset-failure-domain=osd crush-failure-domain=osd".format(
profile=profile, k=k, m=m
)
try:
(out, err) = helper.raw_cluster_cmd(prof_cmd)
outbuf = out.read().decode()
log.info(outbuf)
log.info("created profile {ec}".format(ec=profile))
except Exception:
log.error("ec profile creation failed")
log.error(traceback.format_exc())
return 1
"""create ec pool"""
try:
helper.create_pool(pname, 1, profile)
log.info("Pool {pname} is create".format(pname=pname))
except Exception:
log.error("failed to create pool")
log.error(traceback.format_exc())
return 1
"""check whether pool exists"""
try:
helper.get_pool_num(pname)
except Exception:
log.error("Unable to find pool")
log.error(traceback.format_exc())
return 1
time.sleep(10)
oname = "OBJ_{pname}".format(pname=pname)
cmd = "osd map {pname} {obj} --format json".format(pname=pname, obj=oname)
(out, err) = helper.raw_cluster_cmd(cmd)
outbuf = out.read().decode()
log.info(outbuf)
cmdout = json.loads(outbuf)
targt_pg = cmdout["pgid"]
"""considering primary only as of now because of bug
1544680
"""
targt_osd_id = cmdout["up"][0]
"""write data and take snaps"""
putobj = "sudo rados -p {pool} put {obj} {path}".format(
pool=pname, obj=oname, path="/etc/hosts"
)
for i in range(10):
(out, err) = ctrlr.exec_command(cmd=putobj)
snapcmd = "sudo rados mksnap -p {pool} {sname}".format(
pool=pname, sname="snap" + str(i)
)
(out, err) = ctrlr.exec_command(cmd=snapcmd)
log.info("put {obj}, snap {snap}".format(obj=oname, snap="snap" + str(i)))
"""
Goto destination osd, stop the osd
use ceph-objectstore-tool to corrupt
snap info
"""
# target_osd = ceph_cluster.get_osd_by_id(targt_osd_id)
# target_osd_node = target_osd.node
target_osd_hostname = ceph_cluster.get_osd_metadata(targt_osd_id).get("hostname")
log.info(target_osd_hostname)
target_osd_node = ceph_cluster.get_node_by_hostname(target_osd_hostname)
cot_environment = target_osd_node
osd_service = ceph_cluster.get_osd_service_name(targt_osd_id)
partition_path = ceph_cluster.get_osd_metadata(targt_osd_id).get("osd_data")
helper.kill_osd(target_osd_node, osd_service)
time.sleep(10)
osd_metadata = ceph_cluster.get_osd_metadata(targt_osd_id)
osd_data = osd_metadata.get("osd_data")
osd_journal = osd_metadata.get("osd_journal")
if ceph_cluster.containerized:
docker_image_string = "{docker_registry}/{docker_image}:{docker_tag}".format(
docker_registry=ceph_cluster.ansible_config.get("ceph_docker_registry"),
docker_image=ceph_cluster.ansible_config.get("ceph_docker_image"),
docker_tag=ceph_cluster.ansible_config.get("ceph_docker_image_tag"),
)
cot_environment = helper.get_mgr_proxy_container(
target_osd_node, docker_image_string
)
out, err = cot_environment.exec_command(
cmd='mount | grep "{partition_path} "'.format(
partition_path=partition_path
),
check_ec=False,
)
device_mount_data = out.read().decode() # type: str
if not device_mount_data:
cot_environment.exec_command(
cmd="sudo mount {partition_path} {directory}".format(
partition_path=partition_path, directory=osd_data
)
)
slist_cmd = "sudo ceph-objectstore-tool --data-path \
{osd_data} --journal-path \
{osd_journal} \
--head --op list {obj}".format(
osd_data=osd_data, osd_journal=osd_journal, obj=oname
)
(out, err) = cot_environment.exec_command(cmd=slist_cmd)
outbuf = out.read().decode()
log.info(outbuf)
corrupt_cmd = "sudo ceph-objectstore-tool --data-path \
{osd_data} --journal-path \
{osd_journal} \
{outbuf} rm-attr \
snapset".format(
osd_data=osd_data, osd_journal=osd_journal, outbuf="'" + (outbuf) + "'"
)
(out, err) = cot_environment.exec_command(cmd=corrupt_cmd)
outbuf = out.read().decode()
log.info(outbuf)
helper.revive_osd(target_osd_node, osd_service)
time.sleep(10)
run_scrub = "pg deep-scrub {pgid}".format(pgid=targt_pg)
(out, err) = helper.raw_cluster_cmd(run_scrub)
outbuf = out.read().decode()
log.info(outbuf)
while "HEALTH_ERR" and "active+clean+inconsistent" not in outbuf:
status = "-s --format json"
(out, err) = helper.raw_cluster_cmd(status)
outbuf = out.read().decode()
log.info("HEALTH_ERR found as expected")
log.info("inconsistent foud as expected")
timeout = 300
found = 0
while timeout:
incon_pg = "sudo rados list-inconsistent-pg \
{pname}".format(
pname=pname
)
(out, err) = ctrlr.exec_command(cmd=incon_pg)
outbuf = out.read().decode()
log.info(outbuf)
if targt_pg not in outbuf:
time.sleep(1)
timeout = timeout - 1
else:
found = 1
break
if timeout == 0 and found == 0:
log.error("pg not listed as inconsistent")
return 1
timeout = 300
found = 0
while timeout:
incon_obj = "sudo rados list-inconsistent-obj {pg}".format(pg=targt_pg)
(out, err) = ctrlr.exec_command(cmd=incon_obj)
outbuf = out.read().decode()
log.info(outbuf)
if oname not in outbuf:
time.sleep(1)
timeout = timeout - 1
else:
found = 1
break
if timeout == 0 and found == 0:
log.error("object is not listed in inconsistent obj")
return 1
return 0
| 34.290909 | 86 | 0.615986 | import json
import random
import time
import traceback
from ceph.rados_utils import RadosHelper
from utility.log import Log
log = Log(__name__)
def run(ceph_cluster, **kw):
log.info("Running CEPH-9929")
log.info(run.__doc__)
ceph_nodes = kw.get("ceph_nodes")
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
mons = []
role = "client"
for mnode in ceph_nodes:
if mnode.role == role:
mons.append(mnode)
ctrlr = mons[0]
log.info("chosing mon {cmon} as ctrlrmon".format(cmon=ctrlr.hostname))
helper = RadosHelper(ctrlr, config, log)
k = 4
m = 2
pname = "eccorrupt_{rand}_{k}_{m}".format(rand=random.randint(0, 10000), k=k, m=m)
profile = pname
if build.startswith("4"):
prof_cmd = "osd erasure-code-profile set {profile} k={k} m={m} \
crush-failure-domain=osd".format(
profile=profile, k=k, m=m
)
else:
prof_cmd = "osd erasure-code-profile set {profile} k={k} m={m} \
ruleset-failure-domain=osd crush-failure-domain=osd".format(
profile=profile, k=k, m=m
)
try:
(out, err) = helper.raw_cluster_cmd(prof_cmd)
outbuf = out.read().decode()
log.info(outbuf)
log.info("created profile {ec}".format(ec=profile))
except Exception:
log.error("ec profile creation failed")
log.error(traceback.format_exc())
return 1
try:
helper.create_pool(pname, 1, profile)
log.info("Pool {pname} is create".format(pname=pname))
except Exception:
log.error("failed to create pool")
log.error(traceback.format_exc())
return 1
try:
helper.get_pool_num(pname)
except Exception:
log.error("Unable to find pool")
log.error(traceback.format_exc())
return 1
time.sleep(10)
oname = "OBJ_{pname}".format(pname=pname)
cmd = "osd map {pname} {obj} --format json".format(pname=pname, obj=oname)
(out, err) = helper.raw_cluster_cmd(cmd)
outbuf = out.read().decode()
log.info(outbuf)
cmdout = json.loads(outbuf)
targt_pg = cmdout["pgid"]
targt_osd_id = cmdout["up"][0]
putobj = "sudo rados -p {pool} put {obj} {path}".format(
pool=pname, obj=oname, path="/etc/hosts"
)
for i in range(10):
(out, err) = ctrlr.exec_command(cmd=putobj)
snapcmd = "sudo rados mksnap -p {pool} {sname}".format(
pool=pname, sname="snap" + str(i)
)
(out, err) = ctrlr.exec_command(cmd=snapcmd)
log.info("put {obj}, snap {snap}".format(obj=oname, snap="snap" + str(i)))
target_osd_hostname = ceph_cluster.get_osd_metadata(targt_osd_id).get("hostname")
log.info(target_osd_hostname)
target_osd_node = ceph_cluster.get_node_by_hostname(target_osd_hostname)
cot_environment = target_osd_node
osd_service = ceph_cluster.get_osd_service_name(targt_osd_id)
partition_path = ceph_cluster.get_osd_metadata(targt_osd_id).get("osd_data")
helper.kill_osd(target_osd_node, osd_service)
time.sleep(10)
osd_metadata = ceph_cluster.get_osd_metadata(targt_osd_id)
osd_data = osd_metadata.get("osd_data")
osd_journal = osd_metadata.get("osd_journal")
if ceph_cluster.containerized:
docker_image_string = "{docker_registry}/{docker_image}:{docker_tag}".format(
docker_registry=ceph_cluster.ansible_config.get("ceph_docker_registry"),
docker_image=ceph_cluster.ansible_config.get("ceph_docker_image"),
docker_tag=ceph_cluster.ansible_config.get("ceph_docker_image_tag"),
)
cot_environment = helper.get_mgr_proxy_container(
target_osd_node, docker_image_string
)
out, err = cot_environment.exec_command(
cmd='mount | grep "{partition_path} "'.format(
partition_path=partition_path
),
check_ec=False,
)
device_mount_data = out.read().decode()
if not device_mount_data:
cot_environment.exec_command(
cmd="sudo mount {partition_path} {directory}".format(
partition_path=partition_path, directory=osd_data
)
)
slist_cmd = "sudo ceph-objectstore-tool --data-path \
{osd_data} --journal-path \
{osd_journal} \
--head --op list {obj}".format(
osd_data=osd_data, osd_journal=osd_journal, obj=oname
)
(out, err) = cot_environment.exec_command(cmd=slist_cmd)
outbuf = out.read().decode()
log.info(outbuf)
corrupt_cmd = "sudo ceph-objectstore-tool --data-path \
{osd_data} --journal-path \
{osd_journal} \
{outbuf} rm-attr \
snapset".format(
osd_data=osd_data, osd_journal=osd_journal, outbuf="'" + (outbuf) + "'"
)
(out, err) = cot_environment.exec_command(cmd=corrupt_cmd)
outbuf = out.read().decode()
log.info(outbuf)
helper.revive_osd(target_osd_node, osd_service)
time.sleep(10)
run_scrub = "pg deep-scrub {pgid}".format(pgid=targt_pg)
(out, err) = helper.raw_cluster_cmd(run_scrub)
outbuf = out.read().decode()
log.info(outbuf)
while "HEALTH_ERR" and "active+clean+inconsistent" not in outbuf:
status = "-s --format json"
(out, err) = helper.raw_cluster_cmd(status)
outbuf = out.read().decode()
log.info("HEALTH_ERR found as expected")
log.info("inconsistent foud as expected")
timeout = 300
found = 0
while timeout:
incon_pg = "sudo rados list-inconsistent-pg \
{pname}".format(
pname=pname
)
(out, err) = ctrlr.exec_command(cmd=incon_pg)
outbuf = out.read().decode()
log.info(outbuf)
if targt_pg not in outbuf:
time.sleep(1)
timeout = timeout - 1
else:
found = 1
break
if timeout == 0 and found == 0:
log.error("pg not listed as inconsistent")
return 1
timeout = 300
found = 0
while timeout:
incon_obj = "sudo rados list-inconsistent-obj {pg}".format(pg=targt_pg)
(out, err) = ctrlr.exec_command(cmd=incon_obj)
outbuf = out.read().decode()
log.info(outbuf)
if oname not in outbuf:
time.sleep(1)
timeout = timeout - 1
else:
found = 1
break
if timeout == 0 and found == 0:
log.error("object is not listed in inconsistent obj")
return 1
return 0
| true | true |
1c2e22d8ae316b4a6d4048c96dab0849ac091011 | 182 | py | Python | setup.py | Hekstra-Lab/napari-labeller | 74913dce72c773df2ec94e1cb3798dd40fedf219 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Hekstra-Lab/napari-labeller | 74913dce72c773df2ec94e1cb3798dd40fedf219 | [
"BSD-3-Clause"
] | 1 | 2021-12-03T21:26:27.000Z | 2021-12-03T21:26:27.000Z | setup.py | Hekstra-Lab/napari-labeller | 74913dce72c773df2ec94e1cb3798dd40fedf219 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
# https://github.com/pypa/setuptools_scm
use_scm = {"write_to": "src/napari_labeller/_version.py"}
setup(use_scm_version=use_scm)
| 26 | 57 | 0.78022 |
from setuptools import setup
use_scm = {"write_to": "src/napari_labeller/_version.py"}
setup(use_scm_version=use_scm)
| true | true |
1c2e2463dffc444c3ccdc2e39b33c19f6d840870 | 1,604 | py | Python | official/nlp/mass/mindspore_hub_conf.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/nlp/mass/mindspore_hub_conf.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/nlp/mass/mindspore_hub_conf.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""hub config."""
import os
import mindspore.common.dtype as mstype
from config import TransformerConfig
from src.transformer import TransformerNetworkWithLoss, TransformerInferModel
def get_config(config):
config = TransformerConfig.from_json_file(config)
config.compute_type = mstype.float16
config.dtype = mstype.float32
return config
def create_network(name, *args, **kwargs):
"""create mass network."""
if name == "mass":
# get the config running dir
configDir = os.path.split(os.path.realpath(__file__))[0] + "/config/config.json"
# get the config
config = get_config(configDir)
is_training = kwargs.get("is_training", False)
if is_training:
return TransformerNetworkWithLoss(config, is_training=is_training, *args)
return TransformerInferModel(config, *args)
raise NotImplementedError(f"{name} is not implemented in the repo")
| 41.128205 | 88 | 0.698254 |
import os
import mindspore.common.dtype as mstype
from config import TransformerConfig
from src.transformer import TransformerNetworkWithLoss, TransformerInferModel
def get_config(config):
config = TransformerConfig.from_json_file(config)
config.compute_type = mstype.float16
config.dtype = mstype.float32
return config
def create_network(name, *args, **kwargs):
if name == "mass":
configDir = os.path.split(os.path.realpath(__file__))[0] + "/config/config.json"
config = get_config(configDir)
is_training = kwargs.get("is_training", False)
if is_training:
return TransformerNetworkWithLoss(config, is_training=is_training, *args)
return TransformerInferModel(config, *args)
raise NotImplementedError(f"{name} is not implemented in the repo")
| true | true |
1c2e251fe2c93f024f77f51ca40430d2ad2eb207 | 3,121 | py | Python | spacebase/user/models.py | hugoantunes/spacebase | b33c53ec093ed1ff3c0fcb6161bffeda98cc8ba6 | [
"MIT"
] | null | null | null | spacebase/user/models.py | hugoantunes/spacebase | b33c53ec093ed1ff3c0fcb6161bffeda98cc8ba6 | [
"MIT"
] | 6 | 2020-06-05T23:28:15.000Z | 2022-02-10T12:45:02.000Z | spacebase/user/models.py | hugoantunes/spacebase | b33c53ec093ed1ff3c0fcb6161bffeda98cc8ba6 | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.signals import post_save
class User(models.Model):
first_name = models.CharField(max_length=30, blank=False)
last_name = models.CharField(max_length=30, blank=False)
iban = models.CharField(max_length=34, blank=False)
class UserAddress(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
street_address = models.CharField(max_length=255)
street_address_line2 = models.CharField(max_length=255, blank=True, null=True)
zipcode = models.CharField(max_length=12, blank=True, null=True)
city = models.CharField(max_length=64)
state = models.CharField(max_length=64, blank=True, null=True)
country = models.CharField(max_length=2)
full_address = models.TextField(blank=True)
@classmethod
def dedupublicate_address(cls, old, new, attr, subset=False):
new_address_attr = getattr(new, attr)
old_address_attr = getattr(old, attr)
if old_address_attr and new_address_attr:
if old_address_attr.lower() != new_address_attr.lower():
equal = False
if subset:
return cls.find_subset(
new_address_attr, old_address_attr, new, old
)
elif old_address_attr:
return new.pk, 0, False
elif new_address_attr:
return old.pk, 1, False
return None, None, True
@classmethod
def find_subset(clas, new_address_attr, old_address_attr, new, old):
if new_address_attr.lower() in old_address_attr.lower():
return new.pk, 0, False
elif old_address_attr.lower() in new_address_attr.lower():
return old.pk, 1, False
else:
return None, 1, False
def save(self, *args, **kwargs):
streetdata = f"{self.street_address}\n{self.street_address_line2}"
self.full_address = f"{streetdata}\n{self.zipcode} {self.city} {self.state} {self.country}"
super().save(*args, **kwargs)
def address_save(sender, instance, **kwargs):
other_address = UserAddress.objects.filter(user=instance.user).exclude(pk=instance.pk)
to_remove = set()
equal = True
if other_address:
for address in other_address:
for attr in ['street_address_line2', 'street_address']:
pk, statment, equal = UserAddress.dedupublicate_address(
instance, address, attr, subset=True
)
if pk:
to_remove.add(pk)
if not equal:
if statment:
continue
else:
break
for attr in ['zipcode', 'city', 'state', 'country']:
pk, _, equal = UserAddress.dedupublicate_address(instance, address, attr)
if pk:
to_remove.add(pk)
if equal:
to_remove.add(instance.pk)
UserAddress.objects.filter(pk__in=to_remove).delete()
post_save.connect(address_save, sender=UserAddress)
| 37.60241 | 99 | 0.622877 | from django.db import models
from django.db.models.signals import post_save
class User(models.Model):
first_name = models.CharField(max_length=30, blank=False)
last_name = models.CharField(max_length=30, blank=False)
iban = models.CharField(max_length=34, blank=False)
class UserAddress(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
street_address = models.CharField(max_length=255)
street_address_line2 = models.CharField(max_length=255, blank=True, null=True)
zipcode = models.CharField(max_length=12, blank=True, null=True)
city = models.CharField(max_length=64)
state = models.CharField(max_length=64, blank=True, null=True)
country = models.CharField(max_length=2)
full_address = models.TextField(blank=True)
@classmethod
def dedupublicate_address(cls, old, new, attr, subset=False):
new_address_attr = getattr(new, attr)
old_address_attr = getattr(old, attr)
if old_address_attr and new_address_attr:
if old_address_attr.lower() != new_address_attr.lower():
equal = False
if subset:
return cls.find_subset(
new_address_attr, old_address_attr, new, old
)
elif old_address_attr:
return new.pk, 0, False
elif new_address_attr:
return old.pk, 1, False
return None, None, True
@classmethod
def find_subset(clas, new_address_attr, old_address_attr, new, old):
if new_address_attr.lower() in old_address_attr.lower():
return new.pk, 0, False
elif old_address_attr.lower() in new_address_attr.lower():
return old.pk, 1, False
else:
return None, 1, False
def save(self, *args, **kwargs):
streetdata = f"{self.street_address}\n{self.street_address_line2}"
self.full_address = f"{streetdata}\n{self.zipcode} {self.city} {self.state} {self.country}"
super().save(*args, **kwargs)
def address_save(sender, instance, **kwargs):
other_address = UserAddress.objects.filter(user=instance.user).exclude(pk=instance.pk)
to_remove = set()
equal = True
if other_address:
for address in other_address:
for attr in ['street_address_line2', 'street_address']:
pk, statment, equal = UserAddress.dedupublicate_address(
instance, address, attr, subset=True
)
if pk:
to_remove.add(pk)
if not equal:
if statment:
continue
else:
break
for attr in ['zipcode', 'city', 'state', 'country']:
pk, _, equal = UserAddress.dedupublicate_address(instance, address, attr)
if pk:
to_remove.add(pk)
if equal:
to_remove.add(instance.pk)
UserAddress.objects.filter(pk__in=to_remove).delete()
post_save.connect(address_save, sender=UserAddress)
| true | true |
1c2e2699c29161f428e7a7edc801cf5bc77c1afb | 3,008 | py | Python | www/src/Lib/browser/indexed_db.py | raspberrypieman/brython | 2cc23d1da6acda604d4a56b4c9d464eb7e374eda | [
"BSD-3-Clause"
] | 5,926 | 2015-01-01T07:45:08.000Z | 2022-03-31T12:34:38.000Z | www/src/Lib/browser/indexed_db.py | raspberrypieman/brython | 2cc23d1da6acda604d4a56b4c9d464eb7e374eda | [
"BSD-3-Clause"
] | 1,728 | 2015-01-01T01:09:12.000Z | 2022-03-30T23:25:22.000Z | www/src/Lib/browser/indexed_db.py | raspberrypieman/brython | 2cc23d1da6acda604d4a56b4c9d464eb7e374eda | [
"BSD-3-Clause"
] | 574 | 2015-01-02T01:36:10.000Z | 2022-03-26T10:18:48.000Z | class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
getattr(cursor,"continue")() # cursor.continue() is illegal
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
| 28.11215 | 76 | 0.665891 | class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
getattr(cursor,"continue")() # cursor.continue() is illegal
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
| true | true |
1c2e26f6a8e0d4b0d26ae185c527fffe209c3a78 | 7,353 | py | Python | examples/run_value_movielens.py | Ulian7/DeepCTR | d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | [
"Apache-2.0"
] | null | null | null | examples/run_value_movielens.py | Ulian7/DeepCTR | d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | [
"Apache-2.0"
] | null | null | null | examples/run_value_movielens.py | Ulian7/DeepCTR | d8f519a722a4d6a4f1fe18e04af54cfd1369c9a5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import torch
import pdb
import sys
sys.path.append('../')
from sklearn.preprocessing import LabelEncoder
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from deepctr_torch.inputs import SparseFeat, VarLenSparseFeat, get_feature_names
from deepctr_torch.models import DCNMix,DeepFM,DCN,DIN
import argparse
parser = argparse.ArgumentParser(description = 'input the model name ')
parser.add_argument('model_name',type = str,help = 'The Model We Use')
args = parser.parse_args()
import warnings
warnings.filterwarnings('ignore')
def split(x):
key_ans = x.split('|')
for key in key_ans:
if key not in key2index:
# Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
key2index[key] = len(key2index) + 1
return list(map(lambda x: key2index[x], key_ans))
if __name__ == "__main__":
data = pd.read_csv("./modified_sample.txt")
sparse_features = ["movie_id", "user_id",
"gender", "age", "occupation", "zip", ]
target = ['rating']
# 1.Label Encoding for sparse features,and process sequence features
for feat in sparse_features:
lbe = LabelEncoder()
# pdb.set_trace()
data[feat] = lbe.fit_transform(data[feat]) # fit date[feat] into a dictionary in decreasing order and transform it using the dictionary
# preprocess the sequence feature
key2index = {}
genres_list = list(map(split, data['genres'].values))
genres_length = np.array(list(map(len, genres_list)))
max_len = max(genres_length)
# Notice : padding=`post`
genres_list = pad_sequences(genres_list, maxlen=max_len, padding='post', )
# (146,5)
# 2.count #unique features for each sparse field and generate feature config for sequence feature
fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique(), embedding_dim=4)
for feat in sparse_features]
#fixlen_feature_columns是7个SparseFeat,每一个储存一个feature的信息
varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean')] # Notice : value 0 is for padding for sequence input feature
linear_feature_columns = fixlen_feature_columns #+ varlen_feature_columns
dnn_feature_columns = fixlen_feature_columns #+ varlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 3.generate input data for model
model_input = {name: data[name] for name in sparse_features} #
model_input["genres"] = genres_list
#----------------------------------------------------------------
#version:0.1-------gender_mask
#-------------------------------------------------------
age_1to18_mask = [1 if (i==0) | (i==1) else 0 for i in model_input['age'] ]
age_25to35_mask = [1 if (i==2) | (i==3) else 0 for i in model_input['age'] ]
age_45to56_mask = [1 if (i==4) | (i==5) | (i==6) else 0 for i in model_input['age'] ]
age_mask = np.array([age_1to18_mask,age_25to35_mask,age_45to56_mask])
female_mask = [1 if (i==0) else 0 for i in model_input['gender'] ]
male_mask = [1 if (i==1) else 0 for i in model_input['gender'] ]
gender_mask = np.array([female_mask,male_mask])
# pdb.set_trace()
# 4.Define Model,compile and train
device = 'cpu'
use_cuda = True
if use_cuda and torch.cuda.is_available():
print('cuda ready...')
device = 'cuda:0'
if args.model_name == 'DCNMix':
print('This Training is on The DCNMix Model....')
model = DCNMix(linear_feature_columns, dnn_feature_columns, task='binary', device=device)
elif args.model_name == 'DeepFM':
print('This Training is on the DeepFM Model...')
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary', device=device)
elif args.model_name == 'DIN':
# 排序,每一个user按照所看电影前后顺序排序
data = data.sort_values(by = ['user_id','timestamp'],ascending=[True,True])
import ipdb
ipdb.set_trace()
hist_num = np.array(data.value_counts('user_id')) # 统计 每位用户观看电影数量
user_max_len = hist_num[0]
user_index_list =[]
uid = []
ugender = []
uoccupation = []
uzip = []
uage = []
hist_iid = []
this_user = []
behavior_length = []
end = False
for i in range(len(data)): #遍历每一行数据
if data.iloc[i]['user_id'] not in uid: #该行数据不在uid中----是新出现的id
this_user.append(data.iloc[i]['movie_id'])
end = True
#print(data.iloc[i]['user_id'])
user_index_list.append(i)
uid.append(data.iloc[i]['user_id'])
uoccupation.append(data.iloc[i]['occupation'])
#uzip.append(data.iloc[i]['zip'])
uage.append(data.iloc[i]['age'])
ugender.append(data.iloc[i]['gender'])
else:
this_user.append(data.iloc[i]['movie_id'])
end = False
if(end):
hist_iid.append(this_user)
behavior_length.append(len(this_user))
this_user = []
hist_iid = pad_sequences(hist_iid,maxlen = user_max_len,padding = 'post')
#-------------------construct X and Y as Input-----------------------------------------------
feature_dict = {'user': uid, 'gender': ugender, 'movie_id': iid, '': igender,
'hist_movie_id': hist_iid, 'age':uage,'occupation':uocupation,
'zip':uzip,
"seq_length": behavior_length}
DIN_feature_columns = fixlen_feature_columns + varlen_feature_columns
DIN_feature_columns += [VarLenSparseFeat(SparseFeat('hist_movie_id', data['hist_movie_id'].nunique(), embedding_dim=4), 4, length_name="seq_length")]
DIN_feature_columns += varlen_feature_columns
DIN_behavior_feature_list = ['movie_id']
print('This Training is on the DIN Model...')
model = DIN(DIN_feature_columns, DIN_behavior_feature_list, device=device, att_weight_normalization=True)
elif args.model_name == 'DCN':
print('This Training is on the DCN Model...')
model = DCN(linear_feature_columns, dnn_feature_columns, task='binary', device=device)
model.compile("adam", "binary_crossentropy", metrics=['accuracy'], )
# import pdb
# pdb.set_trace()
# The ages are represented by 0,...6:
# id real age
# 0 1
# 1 18
# 2 25
# 3 35
# 4 45
# 5 50
# 6 56
# x[3] is the age column
if args.model_name == 'DIN':
x = {name: feature_dict[name] for name in get_feature_names(DIN_feature_columns)}
history = model.fit(model_input, data[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2)
else:
history = model.fit(model_input, data[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2)
| 41.542373 | 188 | 0.604107 | import numpy as np
import pandas as pd
import torch
import pdb
import sys
sys.path.append('../')
from sklearn.preprocessing import LabelEncoder
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from deepctr_torch.inputs import SparseFeat, VarLenSparseFeat, get_feature_names
from deepctr_torch.models import DCNMix,DeepFM,DCN,DIN
import argparse
parser = argparse.ArgumentParser(description = 'input the model name ')
parser.add_argument('model_name',type = str,help = 'The Model We Use')
args = parser.parse_args()
import warnings
warnings.filterwarnings('ignore')
def split(x):
key_ans = x.split('|')
for key in key_ans:
if key not in key2index:
key2index[key] = len(key2index) + 1
return list(map(lambda x: key2index[x], key_ans))
if __name__ == "__main__":
data = pd.read_csv("./modified_sample.txt")
sparse_features = ["movie_id", "user_id",
"gender", "age", "occupation", "zip", ]
target = ['rating']
for feat in sparse_features:
lbe = LabelEncoder()
data[feat] = lbe.fit_transform(data[feat])
key2index = {}
genres_list = list(map(split, data['genres'].values))
genres_length = np.array(list(map(len, genres_list)))
max_len = max(genres_length)
genres_list = pad_sequences(genres_list, maxlen=max_len, padding='post', )
for feat in sparse_features]
varlen_feature_columns = [VarLenSparseFeat(SparseFeat('genres', vocabulary_size=len(
key2index) + 1, embedding_dim=4), maxlen=max_len, combiner='mean')]
linear_feature_columns = fixlen_feature_columns
dnn_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
model_input = {name: data[name] for name in sparse_features}
model_input["genres"] = genres_list
age_1to18_mask = [1 if (i==0) | (i==1) else 0 for i in model_input['age'] ]
age_25to35_mask = [1 if (i==2) | (i==3) else 0 for i in model_input['age'] ]
age_45to56_mask = [1 if (i==4) | (i==5) | (i==6) else 0 for i in model_input['age'] ]
age_mask = np.array([age_1to18_mask,age_25to35_mask,age_45to56_mask])
female_mask = [1 if (i==0) else 0 for i in model_input['gender'] ]
male_mask = [1 if (i==1) else 0 for i in model_input['gender'] ]
gender_mask = np.array([female_mask,male_mask])
device = 'cpu'
use_cuda = True
if use_cuda and torch.cuda.is_available():
print('cuda ready...')
device = 'cuda:0'
if args.model_name == 'DCNMix':
print('This Training is on The DCNMix Model....')
model = DCNMix(linear_feature_columns, dnn_feature_columns, task='binary', device=device)
elif args.model_name == 'DeepFM':
print('This Training is on the DeepFM Model...')
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary', device=device)
elif args.model_name == 'DIN':
data = data.sort_values(by = ['user_id','timestamp'],ascending=[True,True])
import ipdb
ipdb.set_trace()
hist_num = np.array(data.value_counts('user_id'))
user_max_len = hist_num[0]
user_index_list =[]
uid = []
ugender = []
uoccupation = []
uzip = []
uage = []
hist_iid = []
this_user = []
behavior_length = []
end = False
for i in range(len(data)):
if data.iloc[i]['user_id'] not in uid:
this_user.append(data.iloc[i]['movie_id'])
end = True
user_index_list.append(i)
uid.append(data.iloc[i]['user_id'])
uoccupation.append(data.iloc[i]['occupation'])
uage.append(data.iloc[i]['age'])
ugender.append(data.iloc[i]['gender'])
else:
this_user.append(data.iloc[i]['movie_id'])
end = False
if(end):
hist_iid.append(this_user)
behavior_length.append(len(this_user))
this_user = []
hist_iid = pad_sequences(hist_iid,maxlen = user_max_len,padding = 'post')
feature_dict = {'user': uid, 'gender': ugender, 'movie_id': iid, '': igender,
'hist_movie_id': hist_iid, 'age':uage,'occupation':uocupation,
'zip':uzip,
"seq_length": behavior_length}
DIN_feature_columns = fixlen_feature_columns + varlen_feature_columns
DIN_feature_columns += [VarLenSparseFeat(SparseFeat('hist_movie_id', data['hist_movie_id'].nunique(), embedding_dim=4), 4, length_name="seq_length")]
DIN_feature_columns += varlen_feature_columns
DIN_behavior_feature_list = ['movie_id']
print('This Training is on the DIN Model...')
model = DIN(DIN_feature_columns, DIN_behavior_feature_list, device=device, att_weight_normalization=True)
elif args.model_name == 'DCN':
print('This Training is on the DCN Model...')
model = DCN(linear_feature_columns, dnn_feature_columns, task='binary', device=device)
model.compile("adam", "binary_crossentropy", metrics=['accuracy'], )
if args.model_name == 'DIN':
x = {name: feature_dict[name] for name in get_feature_names(DIN_feature_columns)}
history = model.fit(model_input, data[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2)
else:
history = model.fit(model_input, data[target].values, batch_size=256, epochs=10, verbose=2, validation_split=0.2)
| true | true |
1c2e27b4c45588a8f4eb9e6c487a361c90839bab | 1,597 | py | Python | tatk/policy/rule/camrest/rule.py | keshuichonglx/tatk | 7e8ad18ca98b105cb0168192bddf80b747067c1b | [
"Apache-2.0"
] | 2 | 2020-09-05T13:12:44.000Z | 2020-10-12T16:51:16.000Z | tatk/policy/rule/camrest/rule.py | keshuichonglx/tatk | 7e8ad18ca98b105cb0168192bddf80b747067c1b | [
"Apache-2.0"
] | null | null | null | tatk/policy/rule/camrest/rule.py | keshuichonglx/tatk | 7e8ad18ca98b105cb0168192bddf80b747067c1b | [
"Apache-2.0"
] | 1 | 2019-11-25T15:34:33.000Z | 2019-11-25T15:34:33.000Z | # -*- coding: utf-8 -*-
import torch
from tatk.policy.policy import Policy
from tatk.policy.rule.camrest.rule_based_camrest_bot import RuleBasedCamrestBot
from tatk.policy.rule.camrest.policy_agenda_camrest import UserPolicyAgendaCamrest
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Rule(Policy):
def __init__(self, is_train=False, character='sys'):
self.is_train = is_train
self.character = character
if character == 'sys':
self.policy = RuleBasedCamrestBot()
elif character == 'usr':
self.policy = UserPolicyAgendaCamrest()
else:
raise NotImplementedError('unknown character {}'.format(character))
def predict(self, state):
"""
Predict an system action given state.
Args:
state (dict): Dialog state. Please refer to util/state.py
Returns:
action : System act, with the form of (act_type, {slot_name_1: value_1, slot_name_2, value_2, ...})
"""
return self.policy.predict(state)
def init_session(self):
"""
Restore after one session
"""
self.policy.init_session()
def is_terminated(self):
if self.character == 'sys':
return None
return self.policy.is_terminated()
def get_reward(self):
if self.character == 'sys':
return None
return self.policy.get_reward()
def get_goal(self):
if hasattr(self.policy, 'get_goal'):
return self.policy.get_goal()
return None
| 30.711538 | 111 | 0.620539 |
import torch
from tatk.policy.policy import Policy
from tatk.policy.rule.camrest.rule_based_camrest_bot import RuleBasedCamrestBot
from tatk.policy.rule.camrest.policy_agenda_camrest import UserPolicyAgendaCamrest
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Rule(Policy):
def __init__(self, is_train=False, character='sys'):
self.is_train = is_train
self.character = character
if character == 'sys':
self.policy = RuleBasedCamrestBot()
elif character == 'usr':
self.policy = UserPolicyAgendaCamrest()
else:
raise NotImplementedError('unknown character {}'.format(character))
def predict(self, state):
return self.policy.predict(state)
def init_session(self):
self.policy.init_session()
def is_terminated(self):
if self.character == 'sys':
return None
return self.policy.is_terminated()
def get_reward(self):
if self.character == 'sys':
return None
return self.policy.get_reward()
def get_goal(self):
if hasattr(self.policy, 'get_goal'):
return self.policy.get_goal()
return None
| true | true |
1c2e2827127ca09db1965ad1e596f372ea8f2ad7 | 2,056 | py | Python | tests/infra/test_utils.py | Keendata/impala | b25e250d321f329b98e017c648df75d052497963 | [
"Apache-2.0"
] | 1,523 | 2015-01-01T03:42:24.000Z | 2022-02-06T22:24:04.000Z | tests/infra/test_utils.py | xwzbupt/impala | 97dda2b27da99367f4d07699aa046b16cda16dd4 | [
"Apache-2.0"
] | 10 | 2015-01-09T06:46:05.000Z | 2022-03-29T21:57:57.000Z | tests/infra/test_utils.py | xwzbupt/impala | 97dda2b27da99367f4d07699aa046b16cda16dd4 | [
"Apache-2.0"
] | 647 | 2015-01-02T04:01:40.000Z | 2022-03-30T15:57:35.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module contains tests for some of the tests/util code.
from tests.util.filesystem_utils import prepend_with_fs
from tests.util.parse_util import get_bytes_summary_stats_counter
def test_filesystem_utils():
# Verify that empty FS prefix gives back the same path.
path = "/fake-warehouse"
assert prepend_with_fs("", path) == path
# Verify that prepend_with_fs() is idempotent.
fs = "fakeFs://bucket"
path = "/fake-warehouse"
assert prepend_with_fs(fs, path) == fs + path
assert prepend_with_fs(fs, prepend_with_fs(fs, path)) == fs + path
def test_get_bytes_summary_stats_counter():
"""Test get_bytes_summary_stats_counter(counter_name, runtime_profile) using a dummy
runtime profile.
"""
runtime_profile = "- ExampleCounter: (Avg: 8.00 KB (8192) ; " \
"Min: 6.00 KB (6144) ; " \
"Max: 10.00 KB (10240) ; " \
"Number of samples: 4)"
summary_stats = get_bytes_summary_stats_counter("ExampleCounter",
runtime_profile)
assert len(summary_stats) == 1
assert summary_stats[0].sum == 32768 and summary_stats[0].min_value == 6144 and \
summary_stats[0].max_value == 10240 and summary_stats[0].total_num_values == 4
| 41.959184 | 87 | 0.708658 |
from tests.util.filesystem_utils import prepend_with_fs
from tests.util.parse_util import get_bytes_summary_stats_counter
def test_filesystem_utils():
path = "/fake-warehouse"
assert prepend_with_fs("", path) == path
fs = "fakeFs://bucket"
path = "/fake-warehouse"
assert prepend_with_fs(fs, path) == fs + path
assert prepend_with_fs(fs, prepend_with_fs(fs, path)) == fs + path
def test_get_bytes_summary_stats_counter():
runtime_profile = "- ExampleCounter: (Avg: 8.00 KB (8192) ; " \
"Min: 6.00 KB (6144) ; " \
"Max: 10.00 KB (10240) ; " \
"Number of samples: 4)"
summary_stats = get_bytes_summary_stats_counter("ExampleCounter",
runtime_profile)
assert len(summary_stats) == 1
assert summary_stats[0].sum == 32768 and summary_stats[0].min_value == 6144 and \
summary_stats[0].max_value == 10240 and summary_stats[0].total_num_values == 4
| true | true |
1c2e2904a3aedd99529257cced819199ac3608c7 | 2,985 | py | Python | linalg/lu_decomp.py | lpierezan/cp_playground | 818d116391b762c1ca03be772a50bb997b7958a4 | [
"Apache-2.0"
] | null | null | null | linalg/lu_decomp.py | lpierezan/cp_playground | 818d116391b762c1ca03be772a50bb997b7958a4 | [
"Apache-2.0"
] | null | null | null | linalg/lu_decomp.py | lpierezan/cp_playground | 818d116391b762c1ca03be772a50bb997b7958a4 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def lu_decomp(A):
n = len(A)
U = A.copy()
L = np.eye(n)
row_idx = list(range(n))
for col in range(n):
# find best row
best_row, best_elem = None, None
for row in range(col,n):
row_i = row_idx[row]
elem = U[row_i][col]
if best_row is None or abs(elem) > best_elem:
best_row = row
best_elem = abs(elem)
if best_elem == 0:
#raise(Exception("Not full rank."))
continue
# swap rows: best_row and col
L[row_idx[col]][col] = 0
L[row_idx[col]][best_row] = 1
L[row_idx[best_row]][best_row] = 0
L[row_idx[best_row]][col] = 1
row_idx[col], row_idx[best_row] = row_idx[best_row] , row_idx[col]
best_row_i = row_idx[col]
# normalize
for row in range(col+1,n):
row_i = row_idx[row]
coef = U[row_i][col] / U[best_row_i][col]
L[row_i][col] = coef
U[row_i] = U[row_i] - U[best_row_i] * coef
return L, U, row_idx
def solve_lu(L_,U_,p,b_):
L = L_[p]
U = U_[p]
b = b_[p]
n = len(L)
z = np.zeros(n)
x = np.zeros(n)
# Lz = b
# L[i,:i] * sum(z[:i,0] + z[i]) = b[i]
for i in range(n):
z[i] = b[i] - (L[i,:i] * z[:i]).sum()
assert np.allclose(L.dot(z),b)
# Ux = z
# U[i,i] * x[i] + sum(U[i,i+1:] * x[i+1:]) = z[i]
for i in range(n-1,-1,-1):
acc = (z[i] - (U[i,i+1:] * x[i+1:]).sum())
if(np.isclose(U[i,i], 0)):
if(np.isclose(acc,0)):
# multiple solutions
x[i] = 0.0
else:
# no solution
raise(Exception('no solution'))
else:
x[i] = acc / U[i,i]
assert np.allclose(U.dot(x),z)
return x
def test_lu():
n_teste = 500
np.random.seed(8)
while(n_teste > 0):
n_teste -= 1
n = np.random.randint(1,30)
A = np.random.random((n,n))
if(np.random.random() > 0.5):
print('Not singular.')
A[np.random.randint(0,n)] = np.random.uniform(0,100) * A[np.random.randint(0,n)]
L, U, row_idx = lu_decomp(A)
L = L[row_idx]
U = U[row_idx]
A = A[row_idx]
assert np.allclose(L.dot(U).ravel(), A.ravel())
print('LU decomposition Ok!')
def test_solve():
np.random.seed(8)
n_teste = 10
while n_teste > 0:
n_teste -= 1
n = np.random.randint(1,100)
A = np.random.random((n,n))
b = np.random.random(n)
#A[-1] = A[0]
#b[-1] = b[0]
ans_correct = np.linalg.solve(A,b)
L,U,p = lu_decomp(A)
ans = solve_lu(L,U,p,b)
assert np.allclose(A.dot(ans), b)
assert np.allclose(ans, ans_correct)
print('Solve with LU Ok!')
if __name__ == "__main__":
test_lu()
test_solve()
| 23.139535 | 92 | 0.473032 | import numpy as np
def lu_decomp(A):
n = len(A)
U = A.copy()
L = np.eye(n)
row_idx = list(range(n))
for col in range(n):
best_row, best_elem = None, None
for row in range(col,n):
row_i = row_idx[row]
elem = U[row_i][col]
if best_row is None or abs(elem) > best_elem:
best_row = row
best_elem = abs(elem)
if best_elem == 0:
continue
L[row_idx[col]][col] = 0
L[row_idx[col]][best_row] = 1
L[row_idx[best_row]][best_row] = 0
L[row_idx[best_row]][col] = 1
row_idx[col], row_idx[best_row] = row_idx[best_row] , row_idx[col]
best_row_i = row_idx[col]
for row in range(col+1,n):
row_i = row_idx[row]
coef = U[row_i][col] / U[best_row_i][col]
L[row_i][col] = coef
U[row_i] = U[row_i] - U[best_row_i] * coef
return L, U, row_idx
def solve_lu(L_,U_,p,b_):
L = L_[p]
U = U_[p]
b = b_[p]
n = len(L)
z = np.zeros(n)
x = np.zeros(n)
for i in range(n):
z[i] = b[i] - (L[i,:i] * z[:i]).sum()
assert np.allclose(L.dot(z),b)
for i in range(n-1,-1,-1):
acc = (z[i] - (U[i,i+1:] * x[i+1:]).sum())
if(np.isclose(U[i,i], 0)):
if(np.isclose(acc,0)):
x[i] = 0.0
else:
raise(Exception('no solution'))
else:
x[i] = acc / U[i,i]
assert np.allclose(U.dot(x),z)
return x
def test_lu():
n_teste = 500
np.random.seed(8)
while(n_teste > 0):
n_teste -= 1
n = np.random.randint(1,30)
A = np.random.random((n,n))
if(np.random.random() > 0.5):
print('Not singular.')
A[np.random.randint(0,n)] = np.random.uniform(0,100) * A[np.random.randint(0,n)]
L, U, row_idx = lu_decomp(A)
L = L[row_idx]
U = U[row_idx]
A = A[row_idx]
assert np.allclose(L.dot(U).ravel(), A.ravel())
print('LU decomposition Ok!')
def test_solve():
np.random.seed(8)
n_teste = 10
while n_teste > 0:
n_teste -= 1
n = np.random.randint(1,100)
A = np.random.random((n,n))
b = np.random.random(n)
ans_correct = np.linalg.solve(A,b)
L,U,p = lu_decomp(A)
ans = solve_lu(L,U,p,b)
assert np.allclose(A.dot(ans), b)
assert np.allclose(ans, ans_correct)
print('Solve with LU Ok!')
if __name__ == "__main__":
test_lu()
test_solve()
| true | true |
1c2e29327dc4182eec73106e98a86215fcaad2e2 | 15,873 | py | Python | pdm/pep517/metadata.py | danieleades/pdm-pep517 | 129697f841c0f635465caf83332c75f5e30b0c6f | [
"MIT"
] | null | null | null | pdm/pep517/metadata.py | danieleades/pdm-pep517 | 129697f841c0f635465caf83332c75f5e30b0c6f | [
"MIT"
] | null | null | null | pdm/pep517/metadata.py | danieleades/pdm-pep517 | 129697f841c0f635465caf83332c75f5e30b0c6f | [
"MIT"
] | null | null | null | import glob
import os
import re
import warnings
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
from pdm.pep517._vendor import toml
from pdm.pep517._vendor.packaging.requirements import Requirement
from pdm.pep517._vendor.packaging.version import Version
from pdm.pep517.license import license_lookup
from pdm.pep517.scm import get_version_from_scm
from pdm.pep517.utils import (
cd,
ensure_pep440_req,
find_packages_iter,
merge_marker,
safe_name,
to_filename,
)
from pdm.pep517.validator import validate_pep621
T = TypeVar("T")
class ProjectError(ValueError):
pass
class PDMDeprecatedWarning(Warning):
pass
class MetaField(Generic[T]):
def __init__(
self, name: str, fget: Optional[Callable[["Metadata", Any], T]] = None
) -> None:
self.name = name
self.fget = fget
def __get__(self, instance: "Metadata", owner: Type["Metadata"]) -> Optional[T]:
if instance is None:
return self
try:
rv = instance._metadata[self.name]
except KeyError:
return None
if self.fget is not None:
rv = self.fget(instance, rv)
return rv
def _make_version_collections(python_versions: List[str]) -> Dict[str, List[Version]]:
rv: Dict[str, List[Version]] = {}
for raw in python_versions:
version = Version(raw)
if version.minor == 0:
key = str(version.major)
else:
key = "{0.major}.{0.minor}".format(version)
rv.setdefault(key, []).append(version)
return rv
class Metadata:
"""A class that holds all metadata that Python packaging requries."""
DEFAULT_ENCODING = "utf-8"
SUPPORTED_CONTENT_TYPES = ("text/markdown", "text/x-rst", "text/plain")
def __init__(self, filepath: Union[str, Path], parse: bool = True) -> None:
self.filepath = Path(filepath).absolute()
self._tool_settings: Dict[str, Any] = {}
self._metadata: Dict[str, Any] = {}
if parse:
self._read_pyproject()
def _read_pyproject(self) -> None:
try:
data = toml.loads(self.filepath.read_text(encoding="utf-8"))
except FileNotFoundError:
raise ProjectError("pyproject.toml does not exist.")
except toml.TomlDecodeError:
raise ProjectError("The project's pyproject.toml is not valid.")
else:
if "tool" in data and "pdm" in data["tool"]:
self._tool_settings = data["tool"]["pdm"]
if "project" in data:
self._metadata = data["project"]
else:
raise ProjectError("No [project] config in pyproject.toml")
def validate(self, raising: bool = False) -> bool:
return validate_pep621(self._metadata, raising)
name: MetaField[str] = MetaField("name")
@property
def version(self) -> Optional[str]:
static_version = self._metadata.get("version")
if isinstance(static_version, str):
return static_version
dynamic_version = self._tool_settings.get("version")
if isinstance(static_version, dict):
warnings.warn(
"`version` in [project] no longer supports dynamic filling. "
"Move it to [tool.pdm] or change it to static string.\n"
"It will raise an error in the next minor release.",
PDMDeprecatedWarning,
stacklevel=2,
)
if not dynamic_version:
dynamic_version = static_version
if not dynamic_version:
return None
if not self.dynamic or "version" not in self.dynamic:
raise ProjectError(
"'version' missing from 'dynamic' fields (to let pdm-pep517 fill it)"
)
version_source = dynamic_version.get("from")
if version_source:
with self.filepath.parent.joinpath(version_source).open(
encoding="utf-8"
) as fp:
return re.findall(
r"^__version__\s*=\s*[\"'](.+?)[\"']\s*$", fp.read(), re.M
)[0]
elif dynamic_version.get("use_scm", False):
return get_version_from_scm(self.filepath.parent)
else:
return None
description: MetaField[str] = MetaField("description")
def _get_readme_file(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return value
return value.get("file", "")
def _get_readme_content(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return Path(value).read_text(encoding=self.DEFAULT_ENCODING)
if "file" in value and "text" in value:
raise ProjectError(
"readme table shouldn't specify both 'file' "
"and 'text' at the same time"
)
if "text" in value:
return value["text"]
file_path = value.get("file", "")
encoding = value.get("charset", self.DEFAULT_ENCODING)
return Path(file_path).read_text(encoding=encoding)
def _get_content_type(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
if value.lower().endswith(".md"):
return "text/markdown"
elif value.lower().endswith(".rst"):
return "text/x-rst"
raise ProjectError(f"Unsupported readme suffix: {value}")
content_type = value.get("content-type")
if not content_type:
raise ProjectError("'content-type' is missing in the readme table")
if content_type not in self.SUPPORTED_CONTENT_TYPES:
raise ProjectError(f"Unsupported readme content-type: {content_type}")
return content_type
readme: MetaField[str] = MetaField("readme", _get_readme_file)
long_description: MetaField[str] = MetaField("readme", _get_readme_content)
long_description_content_type: MetaField[str] = MetaField(
"readme", _get_content_type
)
def _get_license(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return ""
if "file" in value and "text" in value:
raise ProjectError(
"license table shouldn't specify both 'file' "
"and 'text' at the same time"
)
return (
Path(value["file"]).read_text(encoding=self.DEFAULT_ENCODING)
if "file" in value
else value.get("text", "")
)
def _get_license_type(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return value
if value.get("text", "") in license_lookup:
return value["text"]
return "UNKNOWN"
license: MetaField[str] = MetaField("license", _get_license)
license_type: MetaField[str] = MetaField("license", _get_license_type)
def _get_name(self, value: Iterable[Mapping[str, str]]) -> str:
result = []
for item in value:
if "email" not in item and "name" in item:
result.append(item["name"])
return ",".join(result)
def _get_email(self, value: Iterable[Mapping[str, str]]) -> str:
result = []
for item in value:
if "email" not in item:
continue
email = (
item["email"]
if "name" not in item
else "{name} <{email}>".format(**item)
)
result.append(email)
return ",".join(result)
author: MetaField[str] = MetaField("authors", _get_name)
author_email: MetaField[str] = MetaField("authors", _get_email)
maintainer: MetaField[str] = MetaField("maintainers", _get_name)
maintainer_email: MetaField[str] = MetaField("maintainers", _get_email)
@property
def classifiers(self) -> List[str]:
classifers = set(self._metadata.get("classifiers", []))
if self.dynamic and "classifiers" in self.dynamic:
warnings.warn(
"`classifiers` no longer supports dynamic filling, "
"please remove it from `dynamic` fields and manually "
"supply all the classifiers",
PDMDeprecatedWarning,
stacklevel=2,
)
return sorted(classifers)
keywords: MetaField[str] = MetaField("keywords")
project_urls: MetaField[Dict[str, str]] = MetaField("urls")
# Deprecate legacy metadata location
@property
def includes(self) -> List[str]:
if "includes" in self._metadata:
return self._metadata["includes"]
elif "includes" in self._tool_settings:
return self._tool_settings["includes"]
return []
@property
def source_includes(self) -> List[str]:
return self._tool_settings.get("source-includes", [])
@property
def excludes(self) -> List[str]:
if "excludes" in self._metadata:
return self._metadata["excludes"]
elif "excludes" in self._tool_settings:
return self._tool_settings["excludes"]
return []
@property
def build(self) -> Optional[str]:
if "build" in self._metadata:
return self._metadata["build"]
elif "build" in self._tool_settings:
return self._tool_settings["build"]
return None
@property
def package_dir(self) -> str:
"""A directory that will be used to looking for packages."""
if "package-dir" in self._metadata:
return self._metadata["package-dir"]
elif "package-dir" in self._tool_settings:
return self._tool_settings["package-dir"]
elif self.filepath.parent.joinpath("src").is_dir():
return "src"
return ""
@property
def editable_backend(self) -> str:
"""Currently only two backends are supported:
- editables: Proxy modules via editables(default)
- path: the legacy .pth file method
"""
return self._tool_settings.get("editable-backend", "editables")
def _convert_dependencies(self, deps: List[str]) -> List[str]:
return list(filter(None, map(ensure_pep440_req, deps)))
def _convert_optional_dependencies(
self, deps: Mapping[str, List[str]]
) -> Dict[str, List[str]]:
return {k: self._convert_dependencies(deps[k]) for k in deps}
dependencies: MetaField[List[str]] = MetaField(
"dependencies", _convert_dependencies
)
optional_dependencies: MetaField[Dict[str, List[str]]] = MetaField(
"optional-dependencies", _convert_optional_dependencies
)
dynamic: MetaField[List[str]] = MetaField("dynamic")
@property
def project_name(self) -> Optional[str]:
if self.name is None:
return None
return safe_name(self.name)
@property
def project_filename(self) -> str:
if self.name is None:
return "UNKNOWN"
return to_filename(self.project_name)
@property
def requires_extra(self) -> Dict[str, List[str]]:
"""For PKG-INFO metadata"""
if not self.optional_dependencies:
return {}
result: Dict[str, List[str]] = {}
for name, reqs in self.optional_dependencies.items():
current = result[name] = []
for r in reqs:
parsed = Requirement(r)
merge_marker(parsed, f"extra == {name!r}")
current.append(str(parsed))
return result
@property
def requires_python(self) -> str:
result = self._metadata.get("requires-python", "")
return "" if result == "*" else result
@property
def entry_points(self) -> Dict[str, List[str]]:
result = {}
settings = self._metadata
if "scripts" in settings:
result["console_scripts"] = [
f"{key} = {value}" for key, value in settings["scripts"].items()
]
if "gui-scripts" in settings:
result["gui_scripts"] = [
f"{key} = {value}" for key, value in settings["gui-scripts"].items()
]
if "entry-points" in settings:
for plugin, value in settings["entry-points"].items():
if plugin in ("console_scripts", "gui_scripts"):
raise ProjectError(
f"'project.entry-points.{plugin}'' should be defined "
f"in 'project.{plugin.replace('_', '-')}'"
)
result[plugin] = [f"{k} = {v}" for k, v in value.items()]
return result
def convert_package_paths(self) -> Dict[str, Union[List, Dict]]:
"""Return a {package_dir, packages, package_data, exclude_package_data} dict."""
packages = []
py_modules = []
package_data = {"": ["*"]}
exclude_package_data: Dict[str, List[str]] = {}
with cd(self.filepath.parent.as_posix()):
src_dir = Path(self.package_dir or ".")
if not self.includes:
packages = list(
find_packages_iter(
self.package_dir or ".",
exclude=["tests", "tests.*"],
src=src_dir,
)
)
if not packages:
py_modules = [path.name[:-3] for path in src_dir.glob("*.py")]
else:
packages_set = set()
includes = self.includes[:]
for include in includes[:]:
if include.replace("\\", "/").endswith("/*"):
include = include[:-2]
if "*" not in include and os.path.isdir(include):
dir_name = include.rstrip("/\\")
temp = list(
find_packages_iter(dir_name, src=self.package_dir or ".")
)
if os.path.isfile(os.path.join(dir_name, "__init__.py")):
temp.insert(0, dir_name)
packages_set.update(temp)
includes.remove(include)
packages[:] = list(packages_set)
for include in includes:
for path in glob.glob(include, recursive=True):
if "/" not in path.lstrip("./") and path.endswith(".py"):
# Only include top level py modules
py_modules.append(path.lstrip("./")[:-3])
if include.endswith(".py"):
continue
for package in packages:
relpath = os.path.relpath(include, package)
if not relpath.startswith(".."):
package_data.setdefault(package, []).append(relpath)
for exclude in self.excludes or []:
for package in packages:
relpath = os.path.relpath(exclude, package)
if not relpath.startswith(".."):
exclude_package_data.setdefault(package, []).append(relpath)
if packages and py_modules:
raise ProjectError(
"Can't specify packages and py_modules at the same time."
)
return {
"package_dir": {"": self.package_dir} if self.package_dir else {},
"packages": packages,
"py_modules": py_modules,
"package_data": package_data,
"exclude_package_data": exclude_package_data,
}
| 36.489655 | 88 | 0.564544 | import glob
import os
import re
import warnings
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
from pdm.pep517._vendor import toml
from pdm.pep517._vendor.packaging.requirements import Requirement
from pdm.pep517._vendor.packaging.version import Version
from pdm.pep517.license import license_lookup
from pdm.pep517.scm import get_version_from_scm
from pdm.pep517.utils import (
cd,
ensure_pep440_req,
find_packages_iter,
merge_marker,
safe_name,
to_filename,
)
from pdm.pep517.validator import validate_pep621
T = TypeVar("T")
class ProjectError(ValueError):
pass
class PDMDeprecatedWarning(Warning):
pass
class MetaField(Generic[T]):
def __init__(
self, name: str, fget: Optional[Callable[["Metadata", Any], T]] = None
) -> None:
self.name = name
self.fget = fget
def __get__(self, instance: "Metadata", owner: Type["Metadata"]) -> Optional[T]:
if instance is None:
return self
try:
rv = instance._metadata[self.name]
except KeyError:
return None
if self.fget is not None:
rv = self.fget(instance, rv)
return rv
def _make_version_collections(python_versions: List[str]) -> Dict[str, List[Version]]:
rv: Dict[str, List[Version]] = {}
for raw in python_versions:
version = Version(raw)
if version.minor == 0:
key = str(version.major)
else:
key = "{0.major}.{0.minor}".format(version)
rv.setdefault(key, []).append(version)
return rv
class Metadata:
DEFAULT_ENCODING = "utf-8"
SUPPORTED_CONTENT_TYPES = ("text/markdown", "text/x-rst", "text/plain")
def __init__(self, filepath: Union[str, Path], parse: bool = True) -> None:
self.filepath = Path(filepath).absolute()
self._tool_settings: Dict[str, Any] = {}
self._metadata: Dict[str, Any] = {}
if parse:
self._read_pyproject()
def _read_pyproject(self) -> None:
try:
data = toml.loads(self.filepath.read_text(encoding="utf-8"))
except FileNotFoundError:
raise ProjectError("pyproject.toml does not exist.")
except toml.TomlDecodeError:
raise ProjectError("The project's pyproject.toml is not valid.")
else:
if "tool" in data and "pdm" in data["tool"]:
self._tool_settings = data["tool"]["pdm"]
if "project" in data:
self._metadata = data["project"]
else:
raise ProjectError("No [project] config in pyproject.toml")
def validate(self, raising: bool = False) -> bool:
return validate_pep621(self._metadata, raising)
name: MetaField[str] = MetaField("name")
@property
def version(self) -> Optional[str]:
static_version = self._metadata.get("version")
if isinstance(static_version, str):
return static_version
dynamic_version = self._tool_settings.get("version")
if isinstance(static_version, dict):
warnings.warn(
"`version` in [project] no longer supports dynamic filling. "
"Move it to [tool.pdm] or change it to static string.\n"
"It will raise an error in the next minor release.",
PDMDeprecatedWarning,
stacklevel=2,
)
if not dynamic_version:
dynamic_version = static_version
if not dynamic_version:
return None
if not self.dynamic or "version" not in self.dynamic:
raise ProjectError(
"'version' missing from 'dynamic' fields (to let pdm-pep517 fill it)"
)
version_source = dynamic_version.get("from")
if version_source:
with self.filepath.parent.joinpath(version_source).open(
encoding="utf-8"
) as fp:
return re.findall(
r"^__version__\s*=\s*[\"'](.+?)[\"']\s*$", fp.read(), re.M
)[0]
elif dynamic_version.get("use_scm", False):
return get_version_from_scm(self.filepath.parent)
else:
return None
description: MetaField[str] = MetaField("description")
def _get_readme_file(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return value
return value.get("file", "")
def _get_readme_content(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return Path(value).read_text(encoding=self.DEFAULT_ENCODING)
if "file" in value and "text" in value:
raise ProjectError(
"readme table shouldn't specify both 'file' "
"and 'text' at the same time"
)
if "text" in value:
return value["text"]
file_path = value.get("file", "")
encoding = value.get("charset", self.DEFAULT_ENCODING)
return Path(file_path).read_text(encoding=encoding)
def _get_content_type(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
if value.lower().endswith(".md"):
return "text/markdown"
elif value.lower().endswith(".rst"):
return "text/x-rst"
raise ProjectError(f"Unsupported readme suffix: {value}")
content_type = value.get("content-type")
if not content_type:
raise ProjectError("'content-type' is missing in the readme table")
if content_type not in self.SUPPORTED_CONTENT_TYPES:
raise ProjectError(f"Unsupported readme content-type: {content_type}")
return content_type
readme: MetaField[str] = MetaField("readme", _get_readme_file)
long_description: MetaField[str] = MetaField("readme", _get_readme_content)
long_description_content_type: MetaField[str] = MetaField(
"readme", _get_content_type
)
def _get_license(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return ""
if "file" in value and "text" in value:
raise ProjectError(
"license table shouldn't specify both 'file' "
"and 'text' at the same time"
)
return (
Path(value["file"]).read_text(encoding=self.DEFAULT_ENCODING)
if "file" in value
else value.get("text", "")
)
def _get_license_type(self, value: Union[Mapping[str, str], str]) -> str:
if isinstance(value, str):
return value
if value.get("text", "") in license_lookup:
return value["text"]
return "UNKNOWN"
license: MetaField[str] = MetaField("license", _get_license)
license_type: MetaField[str] = MetaField("license", _get_license_type)
def _get_name(self, value: Iterable[Mapping[str, str]]) -> str:
result = []
for item in value:
if "email" not in item and "name" in item:
result.append(item["name"])
return ",".join(result)
def _get_email(self, value: Iterable[Mapping[str, str]]) -> str:
result = []
for item in value:
if "email" not in item:
continue
email = (
item["email"]
if "name" not in item
else "{name} <{email}>".format(**item)
)
result.append(email)
return ",".join(result)
author: MetaField[str] = MetaField("authors", _get_name)
author_email: MetaField[str] = MetaField("authors", _get_email)
maintainer: MetaField[str] = MetaField("maintainers", _get_name)
maintainer_email: MetaField[str] = MetaField("maintainers", _get_email)
@property
def classifiers(self) -> List[str]:
classifers = set(self._metadata.get("classifiers", []))
if self.dynamic and "classifiers" in self.dynamic:
warnings.warn(
"`classifiers` no longer supports dynamic filling, "
"please remove it from `dynamic` fields and manually "
"supply all the classifiers",
PDMDeprecatedWarning,
stacklevel=2,
)
return sorted(classifers)
keywords: MetaField[str] = MetaField("keywords")
project_urls: MetaField[Dict[str, str]] = MetaField("urls")
# Deprecate legacy metadata location
@property
def includes(self) -> List[str]:
if "includes" in self._metadata:
return self._metadata["includes"]
elif "includes" in self._tool_settings:
return self._tool_settings["includes"]
return []
@property
def source_includes(self) -> List[str]:
return self._tool_settings.get("source-includes", [])
@property
def excludes(self) -> List[str]:
if "excludes" in self._metadata:
return self._metadata["excludes"]
elif "excludes" in self._tool_settings:
return self._tool_settings["excludes"]
return []
@property
def build(self) -> Optional[str]:
if "build" in self._metadata:
return self._metadata["build"]
elif "build" in self._tool_settings:
return self._tool_settings["build"]
return None
@property
def package_dir(self) -> str:
if "package-dir" in self._metadata:
return self._metadata["package-dir"]
elif "package-dir" in self._tool_settings:
return self._tool_settings["package-dir"]
elif self.filepath.parent.joinpath("src").is_dir():
return "src"
return ""
@property
def editable_backend(self) -> str:
return self._tool_settings.get("editable-backend", "editables")
def _convert_dependencies(self, deps: List[str]) -> List[str]:
return list(filter(None, map(ensure_pep440_req, deps)))
def _convert_optional_dependencies(
self, deps: Mapping[str, List[str]]
) -> Dict[str, List[str]]:
return {k: self._convert_dependencies(deps[k]) for k in deps}
dependencies: MetaField[List[str]] = MetaField(
"dependencies", _convert_dependencies
)
optional_dependencies: MetaField[Dict[str, List[str]]] = MetaField(
"optional-dependencies", _convert_optional_dependencies
)
dynamic: MetaField[List[str]] = MetaField("dynamic")
@property
def project_name(self) -> Optional[str]:
if self.name is None:
return None
return safe_name(self.name)
@property
def project_filename(self) -> str:
if self.name is None:
return "UNKNOWN"
return to_filename(self.project_name)
@property
def requires_extra(self) -> Dict[str, List[str]]:
if not self.optional_dependencies:
return {}
result: Dict[str, List[str]] = {}
for name, reqs in self.optional_dependencies.items():
current = result[name] = []
for r in reqs:
parsed = Requirement(r)
merge_marker(parsed, f"extra == {name!r}")
current.append(str(parsed))
return result
@property
def requires_python(self) -> str:
result = self._metadata.get("requires-python", "")
return "" if result == "*" else result
@property
def entry_points(self) -> Dict[str, List[str]]:
result = {}
settings = self._metadata
if "scripts" in settings:
result["console_scripts"] = [
f"{key} = {value}" for key, value in settings["scripts"].items()
]
if "gui-scripts" in settings:
result["gui_scripts"] = [
f"{key} = {value}" for key, value in settings["gui-scripts"].items()
]
if "entry-points" in settings:
for plugin, value in settings["entry-points"].items():
if plugin in ("console_scripts", "gui_scripts"):
raise ProjectError(
f"'project.entry-points.{plugin}'' should be defined "
f"in 'project.{plugin.replace('_', '-')}'"
)
result[plugin] = [f"{k} = {v}" for k, v in value.items()]
return result
def convert_package_paths(self) -> Dict[str, Union[List, Dict]]:
packages = []
py_modules = []
package_data = {"": ["*"]}
exclude_package_data: Dict[str, List[str]] = {}
with cd(self.filepath.parent.as_posix()):
src_dir = Path(self.package_dir or ".")
if not self.includes:
packages = list(
find_packages_iter(
self.package_dir or ".",
exclude=["tests", "tests.*"],
src=src_dir,
)
)
if not packages:
py_modules = [path.name[:-3] for path in src_dir.glob("*.py")]
else:
packages_set = set()
includes = self.includes[:]
for include in includes[:]:
if include.replace("\\", "/").endswith("/*"):
include = include[:-2]
if "*" not in include and os.path.isdir(include):
dir_name = include.rstrip("/\\")
temp = list(
find_packages_iter(dir_name, src=self.package_dir or ".")
)
if os.path.isfile(os.path.join(dir_name, "__init__.py")):
temp.insert(0, dir_name)
packages_set.update(temp)
includes.remove(include)
packages[:] = list(packages_set)
for include in includes:
for path in glob.glob(include, recursive=True):
if "/" not in path.lstrip("./") and path.endswith(".py"):
py_modules.append(path.lstrip("./")[:-3])
if include.endswith(".py"):
continue
for package in packages:
relpath = os.path.relpath(include, package)
if not relpath.startswith(".."):
package_data.setdefault(package, []).append(relpath)
for exclude in self.excludes or []:
for package in packages:
relpath = os.path.relpath(exclude, package)
if not relpath.startswith(".."):
exclude_package_data.setdefault(package, []).append(relpath)
if packages and py_modules:
raise ProjectError(
"Can't specify packages and py_modules at the same time."
)
return {
"package_dir": {"": self.package_dir} if self.package_dir else {},
"packages": packages,
"py_modules": py_modules,
"package_data": package_data,
"exclude_package_data": exclude_package_data,
}
| true | true |
1c2e296046eb40e47029a156980229e65ae057e3 | 23,662 | py | Python | featuretools/computational_backends/pandas_backend.py | kunalvats/featuretools | 25d8a36b7d636546161122095f5d6ca793a0b974 | [
"BSD-3-Clause"
] | 1 | 2019-06-06T15:16:26.000Z | 2019-06-06T15:16:26.000Z | featuretools/computational_backends/pandas_backend.py | kunalvats/featuretools | 25d8a36b7d636546161122095f5d6ca793a0b974 | [
"BSD-3-Clause"
] | null | null | null | featuretools/computational_backends/pandas_backend.py | kunalvats/featuretools | 25d8a36b7d636546161122095f5d6ca793a0b974 | [
"BSD-3-Clause"
] | null | null | null | import cProfile
import logging
import os
import pstats
import sys
import warnings
from datetime import datetime
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from future import standard_library
from .base_backend import ComputationalBackend
from .feature_tree import FeatureTree
from featuretools import variable_types
from featuretools.entityset.relationship import Relationship
from featuretools.exceptions import UnknownFeature
from featuretools.primitives import (
AggregationPrimitive,
DirectFeature,
IdentityFeature,
TransformPrimitive
)
from featuretools.utils.gen_utils import make_tqdm_iterator
standard_library.install_aliases()
warnings.simplefilter('ignore', np.RankWarning)
warnings.simplefilter("ignore", category=RuntimeWarning)
logger = logging.getLogger('featuretools.computational_backend')
ROOT_DIR = os.path.expanduser("~")
class PandasBackend(ComputationalBackend):
def __init__(self, entityset, features):
assert len(set(f.entity.id for f in features)) == 1, \
"Features must all be defined on the same entity"
self.entityset = entityset
self.target_eid = features[0].entity.id
self.features = features
self.feature_tree = FeatureTree(entityset, features)
def __sizeof__(self):
return self.entityset.__sizeof__()
def calculate_all_features(self, instance_ids, time_last,
training_window=None, profile=False,
precalculated_features=None, ignored=None,
verbose=False):
"""
Given a list of instance ids and features with a shared time window,
generate and return a mapping of instance -> feature values.
Args:
instance_ids (list): List of instance id for which to build features.
time_last (pd.Timestamp): Last allowed time. Data from exactly this
time not allowed.
training_window (Timedelta, optional): Data older than
time_last by more than this will be ignored.
profile (bool): Enable profiler if True.
verbose (bool): Print output progress if True.
Returns:
pd.DataFrame : Pandas DataFrame of calculated feature values.
Indexed by instance_ids. Columns in same order as features
passed in.
"""
assert len(instance_ids) > 0, "0 instance ids provided"
self.instance_ids = instance_ids
self.time_last = time_last
if self.time_last is None:
self.time_last = datetime.now()
# For debugging
if profile:
pr = cProfile.Profile()
pr.enable()
if precalculated_features is None:
precalculated_features = {}
# Access the index to get the filtered data we need
target_entity = self.entityset[self.target_eid]
if ignored:
# TODO: Just want to remove entities if don't have any (sub)features defined
# on them anymore, rather than recreating
ordered_entities = FeatureTree(self.entityset, self.features, ignored=ignored).ordered_entities
else:
ordered_entities = self.feature_tree.ordered_entities
necessary_columns = self.feature_tree.necessary_columns
eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=instance_ids,
entity_columns=necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
large_eframes_by_filter = None
if any([f.uses_full_entity for f in self.feature_tree.all_features]):
large_necessary_columns = self.feature_tree.necessary_columns_for_all_values_features
large_eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=None,
entity_columns=large_necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
# Handle an empty time slice by returning a dataframe with defaults
if eframes_by_filter is None:
return self.generate_default_df(instance_ids=instance_ids)
finished_entity_ids = []
# Populate entity_frames with precalculated features
if len(precalculated_features) > 0:
for entity_id, precalc_feature_values in precalculated_features.items():
if entity_id in eframes_by_filter:
frame = eframes_by_filter[entity_id][entity_id]
eframes_by_filter[entity_id][entity_id] = pd.merge(frame,
precalc_feature_values,
left_index=True,
right_index=True)
else:
# Only features we're taking from this entity
# are precomputed
# Make sure the id variable is a column as well as an index
entity_id_var = self.entityset[entity_id].index
precalc_feature_values[entity_id_var] = precalc_feature_values.index.values
eframes_by_filter[entity_id] = {entity_id: precalc_feature_values}
finished_entity_ids.append(entity_id)
# Iterate over the top-level entities (filter entities) in sorted order
# and calculate all relevant features under each one.
if verbose:
total_groups_to_compute = sum(len(group)
for group in self.feature_tree.ordered_feature_groups.values())
pbar = make_tqdm_iterator(total=total_groups_to_compute,
desc="Computing features",
unit="feature group")
if verbose:
pbar.update(0)
for filter_eid in ordered_entities:
entity_frames = eframes_by_filter[filter_eid]
large_entity_frames = None
if large_eframes_by_filter is not None:
large_entity_frames = large_eframes_by_filter[filter_eid]
# update the current set of entity frames with the computed features
# from previously finished entities
for eid in finished_entity_ids:
# only include this frame if it's not from a descendent entity:
# descendent entity frames will have to be re-calculated.
# TODO: this check might not be necessary, depending on our
# constraints
if not self.entityset.find_backward_path(start_entity_id=filter_eid,
goal_entity_id=eid):
entity_frames[eid] = eframes_by_filter[eid][eid]
# TODO: look this over again
# precalculated features will only be placed in entity_frames,
# and it's possible that that they are the only features computed
# for an entity. In this case, the entity won't be present in
# large_eframes_by_filter. The relevant lines that this case passes
# through are 136-143
if (large_eframes_by_filter is not None and
eid in large_eframes_by_filter and eid in large_eframes_by_filter[eid]):
large_entity_frames[eid] = large_eframes_by_filter[eid][eid]
if filter_eid in self.feature_tree.ordered_feature_groups:
for group in self.feature_tree.ordered_feature_groups[filter_eid]:
if verbose:
pbar.set_postfix({'running': 0})
test_feature = group[0]
entity_id = test_feature.entity.id
input_frames_type = self.feature_tree.input_frames_type(test_feature)
input_frames = large_entity_frames
if input_frames_type == "subset_entity_frames":
input_frames = entity_frames
handler = self._feature_type_handler(test_feature)
result_frame = handler(group, input_frames)
output_frames_type = self.feature_tree.output_frames_type(test_feature)
if output_frames_type in ['full_and_subset_entity_frames', 'subset_entity_frames']:
index = entity_frames[entity_id].index
# If result_frame came from a uses_full_entity feature,
# and the input was large_entity_frames,
# then it's possible it doesn't contain some of the features
# in the output entity_frames
# We thus need to concatenate the existing frame with the result frame,
# making sure not to duplicate any columns
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in entity_frames[entity_id].columns]
entity_frames[entity_id] = pd.concat([entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if output_frames_type in ['full_and_subset_entity_frames', 'full_entity_frames']:
index = large_entity_frames[entity_id].index
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in large_entity_frames[entity_id].columns]
large_entity_frames[entity_id] = pd.concat([large_entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if verbose:
pbar.update(1)
finished_entity_ids.append(filter_eid)
if verbose:
pbar.set_postfix({'running': 0})
pbar.refresh()
sys.stdout.flush()
pbar.close()
# debugging
if profile:
pr.disable()
prof_folder_path = os.path.join(ROOT_DIR, 'prof')
if not os.path.exists(prof_folder_path):
os.mkdir(prof_folder_path)
with open(os.path.join(prof_folder_path, 'inst-%s.log' %
list(instance_ids)[0]), 'w') as f:
pstats.Stats(pr, stream=f).strip_dirs().sort_stats("cumulative", "tottime").print_stats()
df = eframes_by_filter[self.target_eid][self.target_eid]
# fill in empty rows with default values
missing_ids = [i for i in instance_ids if i not in
df[target_entity.index]]
if missing_ids:
default_df = self.generate_default_df(instance_ids=missing_ids,
extra_columns=df.columns)
df = df.append(default_df, sort=True)
df.index.name = self.entityset[self.target_eid].index
return df[[feat.get_name() for feat in self.features]]
def generate_default_df(self, instance_ids, extra_columns=None):
index_name = self.features[0].entity.index
default_row = [f.default_value for f in self.features]
default_cols = [f.get_name() for f in self.features]
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(default_matrix,
columns=default_cols,
index=instance_ids)
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if isinstance(f, TransformPrimitive):
return self._calculate_transform_features
elif isinstance(f, DirectFeature):
return self._calculate_direct_features
elif isinstance(f, AggregationPrimitive):
return self._calculate_agg_features
elif isinstance(f, IdentityFeature):
return self._calculate_identity_features
else:
raise UnknownFeature(u"{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert (entity_id in entity_frames and
features[0].get_name() in entity_frames[entity_id].columns)
return entity_frames[entity_id]
def _calculate_transform_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert len(set([f.entity.id for f in features])) == 1, \
"features must share base entity"
assert entity_id in entity_frames
frame = entity_frames[entity_id]
for f in features:
# handle when no data
if frame.shape[0] == 0:
set_default_column(frame, f)
continue
# collect only the variables we need for this transformation
variable_data = [frame[bf.get_name()].values
for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.uses_calc_time:
values = feature_func(*variable_data, time=self.time_last)
else:
values = feature_func(*variable_data)
if isinstance(values, pd.Series):
values = values.values
frame[f.get_name()] = list(values)
return frame
def _calculate_direct_features(self, features, entity_frames):
entity_id = features[0].entity.id
parent_entity_id = features[0].parent_entity.id
assert entity_id in entity_frames and parent_entity_id in entity_frames
path = self.entityset.find_forward_path(entity_id, parent_entity_id)
assert len(path) == 1, \
"Error calculating DirectFeatures, len(path) > 1"
parent_df = entity_frames[parent_entity_id]
child_df = entity_frames[entity_id]
merge_var = path[0].child_variable.id
# generate a mapping of old column names (in the parent entity) to
# new column names (in the child entity) for the merge
col_map = {path[0].parent_variable.id: merge_var}
index_as_feature = None
for f in features:
if f.base_features[0].get_name() == path[0].parent_variable.id:
index_as_feature = f
# Sometimes entityset._add_multigenerational_links adds link variables
# that would ordinarily get calculated as direct features,
# so we make sure not to attempt to calculate again
if f.get_name() in child_df.columns:
continue
col_map[f.base_features[0].get_name()] = f.get_name()
# merge the identity feature from the parent entity into the child
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
if index_as_feature is not None:
merge_df.set_index(index_as_feature.get_name(), inplace=True,
drop=False)
else:
merge_df.set_index(merge_var, inplace=True)
new_df = pd.merge(left=child_df, right=merge_df,
left_on=merge_var, right_index=True,
how='left')
return new_df
def _calculate_agg_features(self, features, entity_frames):
test_feature = features[0]
entity = test_feature.entity
child_entity = test_feature.base_features[0].entity
assert entity.id in entity_frames and child_entity.id in entity_frames
frame = entity_frames[entity.id]
base_frame = entity_frames[child_entity.id]
# Sometimes approximate features get computed in a previous filter frame
# and put in the current one dynamically,
# so there may be existing features here
features = [f for f in features if f.get_name()
not in frame.columns]
if not len(features):
return frame
# handle where clause for all functions below
where = test_feature.where
if where is not None:
base_frame = base_frame[base_frame[where.get_name()]]
relationship_path = self.entityset.find_backward_path(entity.id,
child_entity.id)
groupby_var = Relationship._get_link_variable_name(relationship_path)
# if the use_previous property exists on this feature, include only the
# instances from the child entity included in that Timedelta
use_previous = test_feature.use_previous
if use_previous and not base_frame.empty:
# Filter by use_previous values
time_last = self.time_last
if use_previous.is_absolute():
time_first = time_last - use_previous
ti = child_entity.time_index
if ti is not None:
base_frame = base_frame[base_frame[ti] >= time_first]
else:
n = use_previous.value
def last_n(df):
return df.iloc[-n:]
base_frame = base_frame.groupby(groupby_var, observed=True, sort=False).apply(last_n)
to_agg = {}
agg_rename = {}
to_apply = set()
# apply multivariable and time-dependent features as we find them, and
# save aggregable features for later
for f in features:
if _can_agg(f):
variable_id = f.base_features[0].get_name()
if variable_id not in to_agg:
to_agg[variable_id] = []
func = f.get_function()
funcname = func
if callable(func):
funcname = func.__name__
to_agg[variable_id].append(func)
# this is used below to rename columns that pandas names for us
agg_rename[u"{}-{}".format(variable_id, funcname)] = f.get_name()
continue
to_apply.add(f)
# Apply the non-aggregable functions generate a new dataframe, and merge
# it with the existing one
if len(to_apply):
wrap = agg_wrapper(to_apply, self.time_last)
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var], observed=True, sort=False).apply(wrap)
to_merge.reset_index(1, drop=True, inplace=True)
frame = pd.merge(left=frame, right=to_merge,
left_index=True,
right_index=True, how='left')
# Apply the aggregate functions to generate a new dataframe, and merge
# it with the existing one
if len(to_agg):
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var],
observed=True, sort=False).agg(to_agg)
# rename columns to the correct feature names
to_merge.columns = [agg_rename["-".join(x)] for x in to_merge.columns.ravel()]
to_merge = to_merge[list(agg_rename.values())]
# workaround for pandas bug where categories are in the wrong order
# see: https://github.com/pandas-dev/pandas/issues/22501
if pdtypes.is_categorical_dtype(frame.index):
categories = pdtypes.CategoricalDtype(categories=frame.index.categories)
to_merge.index = to_merge.index.astype(object).astype(categories)
frame = pd.merge(left=frame, right=to_merge,
left_index=True, right_index=True, how='left')
# Handle default values
# 1. handle non scalar default values
iterfeats = [f for f in features
if hasattr(f.default_value, '__iter__')]
for f in iterfeats:
nulls = pd.isnull(frame[f.get_name()])
for ni in nulls[nulls].index:
frame.at[ni, f.get_name()] = f.default_value
# 2. handle scalars default values
fillna_dict = {f.get_name(): f.default_value for f in features
if f not in iterfeats}
frame.fillna(fillna_dict, inplace=True)
# convert boolean dtypes to floats as appropriate
# pandas behavior: https://github.com/pydata/pandas/issues/3752
for f in features:
if (not f.expanding and
f.variable_type == variable_types.Numeric and
frame[f.get_name()].dtype.name in ['object', 'bool']):
frame[f.get_name()] = frame[f.get_name()].astype(float)
return frame
def _can_agg(feature):
assert isinstance(feature, AggregationPrimitive)
base_features = feature.base_features
if feature.where is not None:
base_features = [bf.get_name() for bf in base_features
if bf.get_name() != feature.where.get_name()]
if feature.uses_calc_time:
return False
return len(base_features) == 1 and not feature.expanding
def agg_wrapper(feats, time_last):
def wrap(df):
d = {}
for f in feats:
func = f.get_function()
variable_ids = [bf.get_name() for bf in f.base_features]
args = [df[v] for v in variable_ids]
if f.uses_calc_time:
d[f.get_name()] = [func(*args, time=time_last)]
else:
d[f.get_name()] = [func(*args)]
return pd.DataFrame(d)
return wrap
def set_default_column(frame, f):
default = f.default_value
if hasattr(default, '__iter__'):
length = frame.shape[0]
default = [f.default_value] * length
frame[f.get_name()] = default
| 44.645283 | 107 | 0.584946 | import cProfile
import logging
import os
import pstats
import sys
import warnings
from datetime import datetime
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from future import standard_library
from .base_backend import ComputationalBackend
from .feature_tree import FeatureTree
from featuretools import variable_types
from featuretools.entityset.relationship import Relationship
from featuretools.exceptions import UnknownFeature
from featuretools.primitives import (
AggregationPrimitive,
DirectFeature,
IdentityFeature,
TransformPrimitive
)
from featuretools.utils.gen_utils import make_tqdm_iterator
standard_library.install_aliases()
warnings.simplefilter('ignore', np.RankWarning)
warnings.simplefilter("ignore", category=RuntimeWarning)
logger = logging.getLogger('featuretools.computational_backend')
ROOT_DIR = os.path.expanduser("~")
class PandasBackend(ComputationalBackend):
def __init__(self, entityset, features):
assert len(set(f.entity.id for f in features)) == 1, \
"Features must all be defined on the same entity"
self.entityset = entityset
self.target_eid = features[0].entity.id
self.features = features
self.feature_tree = FeatureTree(entityset, features)
def __sizeof__(self):
return self.entityset.__sizeof__()
def calculate_all_features(self, instance_ids, time_last,
training_window=None, profile=False,
precalculated_features=None, ignored=None,
verbose=False):
assert len(instance_ids) > 0, "0 instance ids provided"
self.instance_ids = instance_ids
self.time_last = time_last
if self.time_last is None:
self.time_last = datetime.now()
if profile:
pr = cProfile.Profile()
pr.enable()
if precalculated_features is None:
precalculated_features = {}
target_entity = self.entityset[self.target_eid]
if ignored:
# on them anymore, rather than recreating
ordered_entities = FeatureTree(self.entityset, self.features, ignored=ignored).ordered_entities
else:
ordered_entities = self.feature_tree.ordered_entities
necessary_columns = self.feature_tree.necessary_columns
eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=instance_ids,
entity_columns=necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
large_eframes_by_filter = None
if any([f.uses_full_entity for f in self.feature_tree.all_features]):
large_necessary_columns = self.feature_tree.necessary_columns_for_all_values_features
large_eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=None,
entity_columns=large_necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
# Handle an empty time slice by returning a dataframe with defaults
if eframes_by_filter is None:
return self.generate_default_df(instance_ids=instance_ids)
finished_entity_ids = []
# Populate entity_frames with precalculated features
if len(precalculated_features) > 0:
for entity_id, precalc_feature_values in precalculated_features.items():
if entity_id in eframes_by_filter:
frame = eframes_by_filter[entity_id][entity_id]
eframes_by_filter[entity_id][entity_id] = pd.merge(frame,
precalc_feature_values,
left_index=True,
right_index=True)
else:
# Only features we're taking from this entity
entity_id_var = self.entityset[entity_id].index
precalc_feature_values[entity_id_var] = precalc_feature_values.index.values
eframes_by_filter[entity_id] = {entity_id: precalc_feature_values}
finished_entity_ids.append(entity_id)
if verbose:
total_groups_to_compute = sum(len(group)
for group in self.feature_tree.ordered_feature_groups.values())
pbar = make_tqdm_iterator(total=total_groups_to_compute,
desc="Computing features",
unit="feature group")
if verbose:
pbar.update(0)
for filter_eid in ordered_entities:
entity_frames = eframes_by_filter[filter_eid]
large_entity_frames = None
if large_eframes_by_filter is not None:
large_entity_frames = large_eframes_by_filter[filter_eid]
for eid in finished_entity_ids:
# descendent entity frames will have to be re-calculated.
# TODO: this check might not be necessary, depending on our
# constraints
if not self.entityset.find_backward_path(start_entity_id=filter_eid,
goal_entity_id=eid):
entity_frames[eid] = eframes_by_filter[eid][eid]
# TODO: look this over again
# precalculated features will only be placed in entity_frames,
# and it's possible that that they are the only features computed
# large_eframes_by_filter. The relevant lines that this case passes
# through are 136-143
if (large_eframes_by_filter is not None and
eid in large_eframes_by_filter and eid in large_eframes_by_filter[eid]):
large_entity_frames[eid] = large_eframes_by_filter[eid][eid]
if filter_eid in self.feature_tree.ordered_feature_groups:
for group in self.feature_tree.ordered_feature_groups[filter_eid]:
if verbose:
pbar.set_postfix({'running': 0})
test_feature = group[0]
entity_id = test_feature.entity.id
input_frames_type = self.feature_tree.input_frames_type(test_feature)
input_frames = large_entity_frames
if input_frames_type == "subset_entity_frames":
input_frames = entity_frames
handler = self._feature_type_handler(test_feature)
result_frame = handler(group, input_frames)
output_frames_type = self.feature_tree.output_frames_type(test_feature)
if output_frames_type in ['full_and_subset_entity_frames', 'subset_entity_frames']:
index = entity_frames[entity_id].index
# If result_frame came from a uses_full_entity feature,
# and the input was large_entity_frames,
# then it's possible it doesn't contain some of the features
# in the output entity_frames
# We thus need to concatenate the existing frame with the result frame,
# making sure not to duplicate any columns
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in entity_frames[entity_id].columns]
entity_frames[entity_id] = pd.concat([entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if output_frames_type in ['full_and_subset_entity_frames', 'full_entity_frames']:
index = large_entity_frames[entity_id].index
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in large_entity_frames[entity_id].columns]
large_entity_frames[entity_id] = pd.concat([large_entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if verbose:
pbar.update(1)
finished_entity_ids.append(filter_eid)
if verbose:
pbar.set_postfix({'running': 0})
pbar.refresh()
sys.stdout.flush()
pbar.close()
# debugging
if profile:
pr.disable()
prof_folder_path = os.path.join(ROOT_DIR, 'prof')
if not os.path.exists(prof_folder_path):
os.mkdir(prof_folder_path)
with open(os.path.join(prof_folder_path, 'inst-%s.log' %
list(instance_ids)[0]), 'w') as f:
pstats.Stats(pr, stream=f).strip_dirs().sort_stats("cumulative", "tottime").print_stats()
df = eframes_by_filter[self.target_eid][self.target_eid]
# fill in empty rows with default values
missing_ids = [i for i in instance_ids if i not in
df[target_entity.index]]
if missing_ids:
default_df = self.generate_default_df(instance_ids=missing_ids,
extra_columns=df.columns)
df = df.append(default_df, sort=True)
df.index.name = self.entityset[self.target_eid].index
return df[[feat.get_name() for feat in self.features]]
def generate_default_df(self, instance_ids, extra_columns=None):
index_name = self.features[0].entity.index
default_row = [f.default_value for f in self.features]
default_cols = [f.get_name() for f in self.features]
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(default_matrix,
columns=default_cols,
index=instance_ids)
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if isinstance(f, TransformPrimitive):
return self._calculate_transform_features
elif isinstance(f, DirectFeature):
return self._calculate_direct_features
elif isinstance(f, AggregationPrimitive):
return self._calculate_agg_features
elif isinstance(f, IdentityFeature):
return self._calculate_identity_features
else:
raise UnknownFeature(u"{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert (entity_id in entity_frames and
features[0].get_name() in entity_frames[entity_id].columns)
return entity_frames[entity_id]
def _calculate_transform_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert len(set([f.entity.id for f in features])) == 1, \
"features must share base entity"
assert entity_id in entity_frames
frame = entity_frames[entity_id]
for f in features:
# handle when no data
if frame.shape[0] == 0:
set_default_column(frame, f)
continue
# collect only the variables we need for this transformation
variable_data = [frame[bf.get_name()].values
for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.uses_calc_time:
values = feature_func(*variable_data, time=self.time_last)
else:
values = feature_func(*variable_data)
if isinstance(values, pd.Series):
values = values.values
frame[f.get_name()] = list(values)
return frame
def _calculate_direct_features(self, features, entity_frames):
entity_id = features[0].entity.id
parent_entity_id = features[0].parent_entity.id
assert entity_id in entity_frames and parent_entity_id in entity_frames
path = self.entityset.find_forward_path(entity_id, parent_entity_id)
assert len(path) == 1, \
"Error calculating DirectFeatures, len(path) > 1"
parent_df = entity_frames[parent_entity_id]
child_df = entity_frames[entity_id]
merge_var = path[0].child_variable.id
# generate a mapping of old column names (in the parent entity) to
# new column names (in the child entity) for the merge
col_map = {path[0].parent_variable.id: merge_var}
index_as_feature = None
for f in features:
if f.base_features[0].get_name() == path[0].parent_variable.id:
index_as_feature = f
# Sometimes entityset._add_multigenerational_links adds link variables
# that would ordinarily get calculated as direct features,
# so we make sure not to attempt to calculate again
if f.get_name() in child_df.columns:
continue
col_map[f.base_features[0].get_name()] = f.get_name()
# merge the identity feature from the parent entity into the child
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
if index_as_feature is not None:
merge_df.set_index(index_as_feature.get_name(), inplace=True,
drop=False)
else:
merge_df.set_index(merge_var, inplace=True)
new_df = pd.merge(left=child_df, right=merge_df,
left_on=merge_var, right_index=True,
how='left')
return new_df
def _calculate_agg_features(self, features, entity_frames):
test_feature = features[0]
entity = test_feature.entity
child_entity = test_feature.base_features[0].entity
assert entity.id in entity_frames and child_entity.id in entity_frames
frame = entity_frames[entity.id]
base_frame = entity_frames[child_entity.id]
# Sometimes approximate features get computed in a previous filter frame
# and put in the current one dynamically,
# so there may be existing features here
features = [f for f in features if f.get_name()
not in frame.columns]
if not len(features):
return frame
# handle where clause for all functions below
where = test_feature.where
if where is not None:
base_frame = base_frame[base_frame[where.get_name()]]
relationship_path = self.entityset.find_backward_path(entity.id,
child_entity.id)
groupby_var = Relationship._get_link_variable_name(relationship_path)
# if the use_previous property exists on this feature, include only the
# instances from the child entity included in that Timedelta
use_previous = test_feature.use_previous
if use_previous and not base_frame.empty:
# Filter by use_previous values
time_last = self.time_last
if use_previous.is_absolute():
time_first = time_last - use_previous
ti = child_entity.time_index
if ti is not None:
base_frame = base_frame[base_frame[ti] >= time_first]
else:
n = use_previous.value
def last_n(df):
return df.iloc[-n:]
base_frame = base_frame.groupby(groupby_var, observed=True, sort=False).apply(last_n)
to_agg = {}
agg_rename = {}
to_apply = set()
# apply multivariable and time-dependent features as we find them, and
# save aggregable features for later
for f in features:
if _can_agg(f):
variable_id = f.base_features[0].get_name()
if variable_id not in to_agg:
to_agg[variable_id] = []
func = f.get_function()
funcname = func
if callable(func):
funcname = func.__name__
to_agg[variable_id].append(func)
# this is used below to rename columns that pandas names for us
agg_rename[u"{}-{}".format(variable_id, funcname)] = f.get_name()
continue
to_apply.add(f)
# Apply the non-aggregable functions generate a new dataframe, and merge
# it with the existing one
if len(to_apply):
wrap = agg_wrapper(to_apply, self.time_last)
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var], observed=True, sort=False).apply(wrap)
to_merge.reset_index(1, drop=True, inplace=True)
frame = pd.merge(left=frame, right=to_merge,
left_index=True,
right_index=True, how='left')
# Apply the aggregate functions to generate a new dataframe, and merge
# it with the existing one
if len(to_agg):
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var],
observed=True, sort=False).agg(to_agg)
# rename columns to the correct feature names
to_merge.columns = [agg_rename["-".join(x)] for x in to_merge.columns.ravel()]
to_merge = to_merge[list(agg_rename.values())]
# workaround for pandas bug where categories are in the wrong order
# see: https://github.com/pandas-dev/pandas/issues/22501
if pdtypes.is_categorical_dtype(frame.index):
categories = pdtypes.CategoricalDtype(categories=frame.index.categories)
to_merge.index = to_merge.index.astype(object).astype(categories)
frame = pd.merge(left=frame, right=to_merge,
left_index=True, right_index=True, how='left')
# Handle default values
# 1. handle non scalar default values
iterfeats = [f for f in features
if hasattr(f.default_value, '__iter__')]
for f in iterfeats:
nulls = pd.isnull(frame[f.get_name()])
for ni in nulls[nulls].index:
frame.at[ni, f.get_name()] = f.default_value
# 2. handle scalars default values
fillna_dict = {f.get_name(): f.default_value for f in features
if f not in iterfeats}
frame.fillna(fillna_dict, inplace=True)
# convert boolean dtypes to floats as appropriate
# pandas behavior: https://github.com/pydata/pandas/issues/3752
for f in features:
if (not f.expanding and
f.variable_type == variable_types.Numeric and
frame[f.get_name()].dtype.name in ['object', 'bool']):
frame[f.get_name()] = frame[f.get_name()].astype(float)
return frame
def _can_agg(feature):
assert isinstance(feature, AggregationPrimitive)
base_features = feature.base_features
if feature.where is not None:
base_features = [bf.get_name() for bf in base_features
if bf.get_name() != feature.where.get_name()]
if feature.uses_calc_time:
return False
return len(base_features) == 1 and not feature.expanding
def agg_wrapper(feats, time_last):
def wrap(df):
d = {}
for f in feats:
func = f.get_function()
variable_ids = [bf.get_name() for bf in f.base_features]
args = [df[v] for v in variable_ids]
if f.uses_calc_time:
d[f.get_name()] = [func(*args, time=time_last)]
else:
d[f.get_name()] = [func(*args)]
return pd.DataFrame(d)
return wrap
def set_default_column(frame, f):
default = f.default_value
if hasattr(default, '__iter__'):
length = frame.shape[0]
default = [f.default_value] * length
frame[f.get_name()] = default
| true | true |
1c2e2aaab2e6d66ef8843c2a09921db66922ee45 | 6,758 | py | Python | tests/cli/test_cli_parser.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 7 | 2018-11-19T12:05:13.000Z | 2020-01-17T08:30:38.000Z | tests/cli/test_cli_parser.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2021-06-28T20:57:42.000Z | 2022-02-26T02:11:11.000Z | tests/cli/test_cli_parser.py | wegamekinglc/incubator-airflow | fc174635b0729253a86e8c877d6d8551a815a2cb | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-03-03T01:44:08.000Z | 2021-03-03T01:44:08.000Z | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import contextlib
import io
import re
from collections import Counter
from unittest import TestCase
import pytest
from airflow.cli import cli_parser
# Can not be `--snake_case` or contain uppercase letter
ILLEGAL_LONG_OPTION_PATTERN = re.compile("^--[a-z]+_[a-z]+|^--.*[A-Z].*")
# Only can be `-[a-z]` or `-[A-Z]`
LEGAL_SHORT_OPTION_PATTERN = re.compile("^-[a-zA-z]$")
cli_args = {k: v for k, v in cli_parser.__dict__.items() if k.startswith("ARG_")}
class TestCli(TestCase):
def test_arg_option_long_only(self):
"""
Test if the name of cli.args long option valid
"""
optional_long = [
arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith("-")
]
for arg in optional_long:
assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f"{arg.flags[0]} is not match"
def test_arg_option_mix_short_long(self):
"""
Test if the name of cli.args mix option (-s, --long) valid
"""
optional_mix = [
arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith("-")
]
for arg in optional_mix:
assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f"{arg.flags[0]} is not match"
assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f"{arg.flags[1]} is not match"
def test_subcommand_conflict(self):
"""
Test if each of cli.*_COMMANDS without conflict subcommand
"""
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group_name, sub in subcommand.items():
name = [command.name.lower() for command in sub]
assert len(name) == len(set(name)), f"Command group {group_name} have conflict subcommand"
def test_subcommand_arg_name_conflict(self):
"""
Test if each of cli.*_COMMANDS.arg name without conflict
"""
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]
assert [] == conflict_arg, (
f"Command group {group} function {com.name} have " f"conflict args name {conflict_arg}"
)
def test_subcommand_arg_flag_conflict(self):
"""
Test if each of cli.*_COMMANDS.arg flags without conflict
"""
subcommand = {
key: val
for key, val in cli_parser.__dict__.items()
if key.isupper() and key.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
position = [
a.flags[0] for a in com.args if (len(a.flags) == 1 and not a.flags[0].startswith("-"))
]
conflict_position = [arg for arg, count in Counter(position).items() if count > 1]
assert [] == conflict_position, (
f"Command group {group} function {com.name} have conflict "
f"position flags {conflict_position}"
)
long_option = [
a.flags[0] for a in com.args if (len(a.flags) == 1 and a.flags[0].startswith("-"))
] + [a.flags[1] for a in com.args if len(a.flags) == 2]
conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]
assert [] == conflict_long_option, (
f"Command group {group} function {com.name} have conflict "
f"long option flags {conflict_long_option}"
)
short_option = [a.flags[0] for a in com.args if len(a.flags) == 2]
conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]
assert [] == conflict_short_option, (
f"Command group {group} function {com.name} have conflict "
f"short option flags {conflict_short_option}"
)
def test_falsy_default_value(self):
arg = cli_parser.Arg(("--test",), default=0, type=int)
parser = argparse.ArgumentParser()
arg.add_to_parser(parser)
args = parser.parse_args(['--test', '10'])
assert args.test == 10
args = parser.parse_args([])
assert args.test == 0
def test_commands_and_command_group_sections(self):
parser = cli_parser.get_parser()
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with pytest.raises(SystemExit):
parser.parse_args(['--help'])
stdout = stdout.getvalue()
assert "Commands" in stdout
assert "Groups" in stdout
def test_should_display_help(self):
parser = cli_parser.get_parser()
all_command_as_args = [
command_as_args
for top_command in cli_parser.airflow_commands
for command_as_args in (
[[top_command.name]]
if isinstance(top_command, cli_parser.ActionCommand)
else [[top_command.name, nested_command.name] for nested_command in top_command.subcommands]
)
]
for cmd_args in all_command_as_args:
with pytest.raises(SystemExit):
parser.parse_args([*cmd_args, '--help'])
def test_positive_int(self):
assert 1 == cli_parser.positive_int('1')
with pytest.raises(argparse.ArgumentTypeError):
cli_parser.positive_int('0')
cli_parser.positive_int('-1')
| 39.988166 | 109 | 0.606244 |
import argparse
import contextlib
import io
import re
from collections import Counter
from unittest import TestCase
import pytest
from airflow.cli import cli_parser
ILLEGAL_LONG_OPTION_PATTERN = re.compile("^--[a-z]+_[a-z]+|^--.*[A-Z].*")
LEGAL_SHORT_OPTION_PATTERN = re.compile("^-[a-zA-z]$")
cli_args = {k: v for k, v in cli_parser.__dict__.items() if k.startswith("ARG_")}
class TestCli(TestCase):
def test_arg_option_long_only(self):
optional_long = [
arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith("-")
]
for arg in optional_long:
assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f"{arg.flags[0]} is not match"
def test_arg_option_mix_short_long(self):
optional_mix = [
arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith("-")
]
for arg in optional_mix:
assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f"{arg.flags[0]} is not match"
assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f"{arg.flags[1]} is not match"
def test_subcommand_conflict(self):
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group_name, sub in subcommand.items():
name = [command.name.lower() for command in sub]
assert len(name) == len(set(name)), f"Command group {group_name} have conflict subcommand"
def test_subcommand_arg_name_conflict(self):
subcommand = {
var: cli_parser.__dict__.get(var)
for var in cli_parser.__dict__
if var.isupper() and var.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
conflict_arg = [arg for arg, count in Counter(com.args).items() if count > 1]
assert [] == conflict_arg, (
f"Command group {group} function {com.name} have " f"conflict args name {conflict_arg}"
)
def test_subcommand_arg_flag_conflict(self):
subcommand = {
key: val
for key, val in cli_parser.__dict__.items()
if key.isupper() and key.startswith("COMMANDS")
}
for group, command in subcommand.items():
for com in command:
position = [
a.flags[0] for a in com.args if (len(a.flags) == 1 and not a.flags[0].startswith("-"))
]
conflict_position = [arg for arg, count in Counter(position).items() if count > 1]
assert [] == conflict_position, (
f"Command group {group} function {com.name} have conflict "
f"position flags {conflict_position}"
)
long_option = [
a.flags[0] for a in com.args if (len(a.flags) == 1 and a.flags[0].startswith("-"))
] + [a.flags[1] for a in com.args if len(a.flags) == 2]
conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]
assert [] == conflict_long_option, (
f"Command group {group} function {com.name} have conflict "
f"long option flags {conflict_long_option}"
)
short_option = [a.flags[0] for a in com.args if len(a.flags) == 2]
conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]
assert [] == conflict_short_option, (
f"Command group {group} function {com.name} have conflict "
f"short option flags {conflict_short_option}"
)
def test_falsy_default_value(self):
arg = cli_parser.Arg(("--test",), default=0, type=int)
parser = argparse.ArgumentParser()
arg.add_to_parser(parser)
args = parser.parse_args(['--test', '10'])
assert args.test == 10
args = parser.parse_args([])
assert args.test == 0
def test_commands_and_command_group_sections(self):
parser = cli_parser.get_parser()
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with pytest.raises(SystemExit):
parser.parse_args(['--help'])
stdout = stdout.getvalue()
assert "Commands" in stdout
assert "Groups" in stdout
def test_should_display_help(self):
parser = cli_parser.get_parser()
all_command_as_args = [
command_as_args
for top_command in cli_parser.airflow_commands
for command_as_args in (
[[top_command.name]]
if isinstance(top_command, cli_parser.ActionCommand)
else [[top_command.name, nested_command.name] for nested_command in top_command.subcommands]
)
]
for cmd_args in all_command_as_args:
with pytest.raises(SystemExit):
parser.parse_args([*cmd_args, '--help'])
def test_positive_int(self):
assert 1 == cli_parser.positive_int('1')
with pytest.raises(argparse.ArgumentTypeError):
cli_parser.positive_int('0')
cli_parser.positive_int('-1')
| true | true |
1c2e2bf8cd12b512636be05fa4bca246f5094a61 | 325 | py | Python | custom_components/react/enums.py | gertjanstulp/ha-mapper | 9cc84a4856e5f3e45077fd7d2586188b199f83d8 | [
"Apache-2.0"
] | null | null | null | custom_components/react/enums.py | gertjanstulp/ha-mapper | 9cc84a4856e5f3e45077fd7d2586188b199f83d8 | [
"Apache-2.0"
] | null | null | null | custom_components/react/enums.py | gertjanstulp/ha-mapper | 9cc84a4856e5f3e45077fd7d2586188b199f83d8 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class ReactStage(str, Enum):
SETUP = "setup"
STARTUP = "startup"
WAITING = "waiting"
RUNNING = "running"
BACKGROUND = "background"
class ReactDisabledReason(str, Enum):
REMOVED = "removed"
LOAD_REACT = "load_react"
RESTORE = "restore"
CONSTRAINTS = "constraints"
| 19.117647 | 37 | 0.655385 | from enum import Enum
class ReactStage(str, Enum):
SETUP = "setup"
STARTUP = "startup"
WAITING = "waiting"
RUNNING = "running"
BACKGROUND = "background"
class ReactDisabledReason(str, Enum):
REMOVED = "removed"
LOAD_REACT = "load_react"
RESTORE = "restore"
CONSTRAINTS = "constraints"
| true | true |
1c2e2c3f297014f04fdf8b9a69ec8380ed2aa5c6 | 1,866 | py | Python | airflow_concert/motif/bigquery_query_job.py | NiltonDuarte/AirflowConcert | a2503a981f24be32d1478a7bb07620568b5e277a | [
"MIT"
] | null | null | null | airflow_concert/motif/bigquery_query_job.py | NiltonDuarte/AirflowConcert | a2503a981f24be32d1478a7bb07620568b5e277a | [
"MIT"
] | null | null | null | airflow_concert/motif/bigquery_query_job.py | NiltonDuarte/AirflowConcert | a2503a981f24be32d1478a7bb07620568b5e277a | [
"MIT"
] | null | null | null | from typing import Optional
from airflow_concert.motif.motif_base import MotifBase
from airflow_concert.motif.mixins.bigquery_job import BigQueryJobMixin, BigQueryTimePartitioning
from airflow_concert.phrase.protocols import PExecuteQueryMotif
class BigQueryQueryJobMotif(MotifBase, BigQueryJobMixin, PExecuteQueryMotif):
def __init__(self, sql_query=None, config=None, name=None,
destination_table=None,
create_disposition=None,
write_disposition=None,
time_partitioning: Optional[BigQueryTimePartitioning] = None):
super().__init__(name=name, config=config)
self.sql_query = sql_query
self.destination_table = destination_table
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.time_partitioning = time_partitioning
def setup(self, sql_query,
destination_table=None,
create_disposition="CREATE_IF_NEEDED",
write_disposition=None,
time_partitioning: Optional[BigQueryTimePartitioning] = None):
self.sql_query = sql_query
self.destination_table = destination_table
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.time_partitioning = time_partitioning
def build(self, dag, phrase_group):
bigquery_job_operator = self.insert_job_operator(
dag, phrase_group,
self.query_configuration(
sql_query=self.sql_query,
destination_table=self.destination_table,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
time_partitioning=self.time_partitioning
)
)
return bigquery_job_operator
| 43.395349 | 96 | 0.695606 | from typing import Optional
from airflow_concert.motif.motif_base import MotifBase
from airflow_concert.motif.mixins.bigquery_job import BigQueryJobMixin, BigQueryTimePartitioning
from airflow_concert.phrase.protocols import PExecuteQueryMotif
class BigQueryQueryJobMotif(MotifBase, BigQueryJobMixin, PExecuteQueryMotif):
def __init__(self, sql_query=None, config=None, name=None,
destination_table=None,
create_disposition=None,
write_disposition=None,
time_partitioning: Optional[BigQueryTimePartitioning] = None):
super().__init__(name=name, config=config)
self.sql_query = sql_query
self.destination_table = destination_table
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.time_partitioning = time_partitioning
def setup(self, sql_query,
destination_table=None,
create_disposition="CREATE_IF_NEEDED",
write_disposition=None,
time_partitioning: Optional[BigQueryTimePartitioning] = None):
self.sql_query = sql_query
self.destination_table = destination_table
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.time_partitioning = time_partitioning
def build(self, dag, phrase_group):
bigquery_job_operator = self.insert_job_operator(
dag, phrase_group,
self.query_configuration(
sql_query=self.sql_query,
destination_table=self.destination_table,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
time_partitioning=self.time_partitioning
)
)
return bigquery_job_operator
| true | true |
1c2e2cd1ed9af9f97446de32934f76fb590241e6 | 441 | py | Python | githistorydata/filechanges.py | webedx-spark/git-history-data | 14b60ab229b840c14be37d48aade28e462f29cb0 | [
"BSD-2-Clause"
] | 15 | 2015-12-30T03:01:59.000Z | 2021-04-18T12:32:36.000Z | githistorydata/filechanges.py | webedx-spark/git-history-data | 14b60ab229b840c14be37d48aade28e462f29cb0 | [
"BSD-2-Clause"
] | 2 | 2016-02-08T18:37:26.000Z | 2020-01-28T09:51:58.000Z | githistorydata/filechanges.py | webedx-spark/git-history-data | 14b60ab229b840c14be37d48aade28e462f29cb0 | [
"BSD-2-Clause"
] | 6 | 2016-01-09T02:57:33.000Z | 2021-05-27T00:55:01.000Z |
class FileChanges( object ):
def __init__( self, added, removed, name ):
self.added = added
self.removed = removed
self.name = name
def __str__( self ):
return "+%d -%d %s" % ( self.added, self.removed, self.name )
def __eq__( self, other ):
return (
self.added == other.added
and self.removed == other.removed
and self.name == other.name
)
| 24.5 | 69 | 0.537415 |
class FileChanges( object ):
def __init__( self, added, removed, name ):
self.added = added
self.removed = removed
self.name = name
def __str__( self ):
return "+%d -%d %s" % ( self.added, self.removed, self.name )
def __eq__( self, other ):
return (
self.added == other.added
and self.removed == other.removed
and self.name == other.name
)
| true | true |
1c2e2f46711fc360cb68a5335f3b09e7bd551c6b | 3,763 | py | Python | server/fullstackChallenge/settings.py | thiagobrez/newsWebsite | 130f01d29dd776eaa096080982274bb27d19ad8f | [
"MIT"
] | null | null | null | server/fullstackChallenge/settings.py | thiagobrez/newsWebsite | 130f01d29dd776eaa096080982274bb27d19ad8f | [
"MIT"
] | 7 | 2020-09-07T18:44:00.000Z | 2022-02-10T19:05:41.000Z | server/fullstackChallenge/settings.py | thiagobrez/newsWebsite | 130f01d29dd776eaa096080982274bb27d19ad8f | [
"MIT"
] | null | null | null | """
Django settings for fullstackChallenge project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', cast=bool)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'newsWebsite',
'rest_framework',
'corsheaders',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'fullstackChallenge.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fullstackChallenge.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'America/Sao_Paulo'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
CORS_ORIGIN_ALLOW_ALL = True
CELERY_BROKER_URL = 'amqp://localhost'
CELERY_BEAT_SCHEDULE = {
'get-news-every-24-hours': {
'task': 'newsWebsite.tasks.get_news',
'schedule': 86400
},
}
| 25.598639 | 91 | 0.688546 |
import os
from decouple import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', cast=bool)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'newsWebsite',
'rest_framework',
'corsheaders',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'fullstackChallenge.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fullstackChallenge.wsgi.application'
S = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
CORS_ORIGIN_ALLOW_ALL = True
CELERY_BROKER_URL = 'amqp://localhost'
CELERY_BEAT_SCHEDULE = {
'get-news-every-24-hours': {
'task': 'newsWebsite.tasks.get_news',
'schedule': 86400
},
}
| true | true |
1c2e310f57502b89e03733838eb5436fa364367d | 3,051 | py | Python | tests/test_tutorial/test_handling_errors/test_tutorial002.py | 0x20bf-org/fastapi | 46a1d68387b2bfb6513bfe956e84fc99767d737a | [
"MIT"
] | null | null | null | tests/test_tutorial/test_handling_errors/test_tutorial002.py | 0x20bf-org/fastapi | 46a1d68387b2bfb6513bfe956e84fc99767d737a | [
"MIT"
] | null | null | null | tests/test_tutorial/test_handling_errors/test_tutorial002.py | 0x20bf-org/fastapi | 46a1d68387b2bfb6513bfe956e84fc99767d737a | [
"MIT"
] | null | null | null | from docs_src.handling_errors.tutorial002 import app
from fastapi.testclient import TestClient
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items-header/{item_id}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Item Header",
"operationId": "read_item_header_items_header__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_get_item_header():
response = client.get("/items-header/foo")
assert response.status_code == 200, response.text
assert response.json() == {"item": "The Foo Wrestlers"}
def test_get_item_not_found_header():
response = client.get("/items-header/bar")
assert response.status_code == 404, response.text
assert response.headers.get("x-error") == "There goes my error"
assert response.json() == {"detail": "Item not found"}
| 33.9 | 86 | 0.405113 | from docs_src.handling_errors.tutorial002 import app
from fastapi.testclient import TestClient
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items-header/{item_id}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Item Header",
"operationId": "read_item_header_items_header__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_get_item_header():
response = client.get("/items-header/foo")
assert response.status_code == 200, response.text
assert response.json() == {"item": "The Foo Wrestlers"}
def test_get_item_not_found_header():
response = client.get("/items-header/bar")
assert response.status_code == 404, response.text
assert response.headers.get("x-error") == "There goes my error"
assert response.json() == {"detail": "Item not found"}
| true | true |
1c2e318001a9ef9165216d8e65a780d5afcf3532 | 3,618 | py | Python | pycritty/commands/run.py | binRick/pycritty | ae27e61fe597c22e6830d62533e11d64bf06a3ae | [
"MIT"
] | null | null | null | pycritty/commands/run.py | binRick/pycritty | ae27e61fe597c22e6830d62533e11d64bf06a3ae | [
"MIT"
] | null | null | null | pycritty/commands/run.py | binRick/pycritty | ae27e61fe597c22e6830d62533e11d64bf06a3ae | [
"MIT"
] | null | null | null | from typing import Dict, Any, Union
import sys
from pathlib import Path
from .. import PycrittyError
from .command import Command
from ..io import log, yio
from ..resources import config_file, saves_dir
from ..resources.resource import ConfigFile
from rich import print, pretty, inspect
from rich.console import Console
from .pycritty import Pycritty
console = Console()
class RunConfig(Command):
def run_config(
self,
config_name: str,
read_from: Union[str, Path, ConfigFile] = config_file,
dest_parent=saves_dir,
override=False
):
dest_file = ConfigFile(dest_parent.get_or_create(), config_name, ConfigFile.YAML)
if dest_file.exists() and not override:
raise PycrittyError(
f'Config "{config_name}" already exists, use -o to override'
)
conf = yio.read_yaml(read_from)
if conf is None or len(conf) < 1:
log.warn(f'"{read_from}" has no content')
else:
dest_file.create()
yio.write_yaml(conf, dest_file)
log.ok('Config saved =>', log.Color.BLUE, dest_file)
if False:
print(conf)
console.print("Hello", "World!", style="bold red")
console.print(":smiley: :vampire: :pile_of_poo: :thumbs_up: :raccoon:")
def execute(self, args: Dict[str, Any]):
#inspect(args, all=True)
new_conf = Pycritty()
if False:
pass
if True:
if 'change_base_config' in dict(args).keys():
new_conf.base_config=args['change_base_config']
if 'change_host' in dict(args).keys():
# new_conf.host = args['change_host']
new_conf.change_host(args['change_host'])
if 'change_font' in dict(args).keys():
new_conf.font =args['change_font']
if 'change_shell' in dict(args).keys():
new_conf.shell = args['change_shell']
if 'change_user' in dict(args).keys():
new_conf.change_user(args['change_user'])
if 'change_font_size' in dict(args).keys():
new_conf.font_size = args['change_font_size']
if 'change_theme' in dict(args).keys():
new_conf.theme = args['change_theme']
if 'change_args' in dict(args).keys():
new_conf.change_args(args['change_args'])
if 'change_position' in dict(args).keys():
new_conf.change_position(args['change_position'])
if True:
exec_cmd = f'eval ssh '
exec_cmd = new_conf.get_ssh_cmd()
exec_cmd = f"{exec_cmd} \"{new_conf.config['shell']['program']}"
for a in new_conf.config['shell']['args']:
exec_cmd = f"{exec_cmd} {a}"
exec_cmd = f"{exec_cmd}\""
print(exec_cmd)
Args = ['bash','--noprofile','--norc', exec_cmd]
new_conf.config['shell']['args'] = Args
new_conf.config['shell']['program'] = 'env'
if False:
print(new_conf.config)
if False:
pass
if True:
print(new_conf.config['shell'])
print('args>', dict(args))
if False:
inspect(new_conf, private=True, methods=True)
console.print(f"\nRUNNING > :smiley: \n#{exec_cmd}\n", style="bold yellow")
# print(new_conf)
#>>> conf.apply()
#config_name = actions['name']
#override = 'override' in actions
#self.run_config(config_name, override=override)
| 38.084211 | 89 | 0.571034 | from typing import Dict, Any, Union
import sys
from pathlib import Path
from .. import PycrittyError
from .command import Command
from ..io import log, yio
from ..resources import config_file, saves_dir
from ..resources.resource import ConfigFile
from rich import print, pretty, inspect
from rich.console import Console
from .pycritty import Pycritty
console = Console()
class RunConfig(Command):
def run_config(
self,
config_name: str,
read_from: Union[str, Path, ConfigFile] = config_file,
dest_parent=saves_dir,
override=False
):
dest_file = ConfigFile(dest_parent.get_or_create(), config_name, ConfigFile.YAML)
if dest_file.exists() and not override:
raise PycrittyError(
f'Config "{config_name}" already exists, use -o to override'
)
conf = yio.read_yaml(read_from)
if conf is None or len(conf) < 1:
log.warn(f'"{read_from}" has no content')
else:
dest_file.create()
yio.write_yaml(conf, dest_file)
log.ok('Config saved =>', log.Color.BLUE, dest_file)
if False:
print(conf)
console.print("Hello", "World!", style="bold red")
console.print(":smiley: :vampire: :pile_of_poo: :thumbs_up: :raccoon:")
def execute(self, args: Dict[str, Any]):
new_conf = Pycritty()
if False:
pass
if True:
if 'change_base_config' in dict(args).keys():
new_conf.base_config=args['change_base_config']
if 'change_host' in dict(args).keys():
new_conf.change_host(args['change_host'])
if 'change_font' in dict(args).keys():
new_conf.font =args['change_font']
if 'change_shell' in dict(args).keys():
new_conf.shell = args['change_shell']
if 'change_user' in dict(args).keys():
new_conf.change_user(args['change_user'])
if 'change_font_size' in dict(args).keys():
new_conf.font_size = args['change_font_size']
if 'change_theme' in dict(args).keys():
new_conf.theme = args['change_theme']
if 'change_args' in dict(args).keys():
new_conf.change_args(args['change_args'])
if 'change_position' in dict(args).keys():
new_conf.change_position(args['change_position'])
if True:
exec_cmd = f'eval ssh '
exec_cmd = new_conf.get_ssh_cmd()
exec_cmd = f"{exec_cmd} \"{new_conf.config['shell']['program']}"
for a in new_conf.config['shell']['args']:
exec_cmd = f"{exec_cmd} {a}"
exec_cmd = f"{exec_cmd}\""
print(exec_cmd)
Args = ['bash','--noprofile','--norc', exec_cmd]
new_conf.config['shell']['args'] = Args
new_conf.config['shell']['program'] = 'env'
if False:
print(new_conf.config)
if False:
pass
if True:
print(new_conf.config['shell'])
print('args>', dict(args))
if False:
inspect(new_conf, private=True, methods=True)
console.print(f"\nRUNNING > :smiley: \n#{exec_cmd}\n", style="bold yellow")
| true | true |
1c2e31e4d91019f9a9d7ed49e73b96ac5eae1835 | 1,715 | py | Python | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716225144.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716225144.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716225144.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Decorator Parametors
In the previous ideos we saw some built-in decorators that can handle some arguments:
@wraps(fn) @lru_cache(maxsize=256) <\
def inner(): def factorial(n): \
... ... \>function call
This should look quite differient grom the decorators we have been creating and using:
@timed <----------- no function call
def Fibonacci(n):
...
"""
from symbol import parameters
from time import perf_counter
from unittest import result
def timed(fn):
from time import perf_counter
def inner(*arhs, **kwarrgs):
total_elapse = 0
for i in range(10): # hardcoded value 10 # need to pass as a parameter
start = perf_counter()
result = fn(*args, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / 10
print(avg_elapsed)
return result
return inner
"""
@timed
def my_func(): or my_func = timed(my_func)
...
On e Approach to passing (line 24) as a parameter
/ < extra parameter
def timed(fn, reps):
from time import perf_counter
def inner(*args, **kwargs):
total_elapsed = 0 / free variable
for i in range(reps): <
start = perf_counter()
result = fn(*ars, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / reps
print(avg_elapsed)
return result
return inner
my_func = timed(my_func, 10) # @timed
# def my_func():
...
"""
| 27.66129 | 89 | 0.542857 | from symbol import parameters
from time import perf_counter
from unittest import result
def timed(fn):
from time import perf_counter
def inner(*arhs, **kwarrgs):
total_elapse = 0
for i in range(10): nter()
result = fn(*args, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / 10
print(avg_elapsed)
return result
return inner
| true | true |
1c2e3231607b33d5619c4351426be2731fa7fda7 | 1,272 | py | Python | setup.py | aiven/aiven-db-migrate | e3751aadfc8ad9a011d64ba5dec96118e52c68b2 | [
"Apache-2.0"
] | 10 | 2020-08-03T17:45:18.000Z | 2022-02-18T00:12:17.000Z | setup.py | aiven/aiven-db-migrate | e3751aadfc8ad9a011d64ba5dec96118e52c68b2 | [
"Apache-2.0"
] | 12 | 2020-08-27T15:15:54.000Z | 2022-02-24T16:11:18.000Z | setup.py | aiven/aiven-db-migrate | e3751aadfc8ad9a011d64ba5dec96118e52c68b2 | [
"Apache-2.0"
] | 1 | 2022-01-07T12:00:17.000Z | 2022-01-07T12:00:17.000Z | # Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/
from importlib.machinery import SourceFileLoader
from setuptools import find_packages, setup
import sys
def get_version():
return SourceFileLoader("version", "aiven_db_migrate/migrate/version.py").load_module().__version__
setup(
author="Aiven",
author_email="support@aiven.io",
entry_points={
"console_scripts": [
"pg_migrate = aiven_db_migrate.migrate.pgmigrate:main",
],
},
install_requires=[
"psycopg2"
],
license="Apache 2.0",
name="aiven-db-migrate",
packages=find_packages(exclude=["test"]),
platforms=["POSIX", "MacOS", "Windows"],
description="Aiven database migration tool",
long_description=open("README.md").read(),
url="https://aiven.io/",
version=get_version(),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 29.581395 | 103 | 0.636006 |
from importlib.machinery import SourceFileLoader
from setuptools import find_packages, setup
import sys
def get_version():
return SourceFileLoader("version", "aiven_db_migrate/migrate/version.py").load_module().__version__
setup(
author="Aiven",
author_email="support@aiven.io",
entry_points={
"console_scripts": [
"pg_migrate = aiven_db_migrate.migrate.pgmigrate:main",
],
},
install_requires=[
"psycopg2"
],
license="Apache 2.0",
name="aiven-db-migrate",
packages=find_packages(exclude=["test"]),
platforms=["POSIX", "MacOS", "Windows"],
description="Aiven database migration tool",
long_description=open("README.md").read(),
url="https://aiven.io/",
version=get_version(),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| true | true |
1c2e3323d8a8bfa2b1d3b5d79d47be08567a9379 | 16,309 | py | Python | heat/engine/parser.py | devcamcar/heat | 0f1bd5d29102318e62b5a10281d809807bd3b163 | [
"Apache-2.0"
] | 1 | 2015-05-11T04:54:30.000Z | 2015-05-11T04:54:30.000Z | heat/engine/parser.py | devcamcar/heat | 0f1bd5d29102318e62b5a10281d809807bd3b163 | [
"Apache-2.0"
] | null | null | null | heat/engine/parser.py | devcamcar/heat | 0f1bd5d29102318e62b5a10281d809807bd3b163 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import json
import logging
import sys
from heat.common import exception
from heat.engine import resources
from heat.engine import instance
from heat.engine import volume
from heat.engine import eip
from heat.engine import security_group
from heat.engine import wait_condition
from heat.db import api as db_api
logger = logging.getLogger(__file__)
class Stack(object):
IN_PROGRESS = 'IN_PROGRESS'
CREATE_FAILED = 'CREATE_FAILED'
CREATE_COMPLETE = 'CREATE_COMPLETE'
DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS'
DELETE_FAILED = 'DELETE_FAILED'
DELETE_COMPLETE = 'DELETE_COMPLETE'
def __init__(self, stack_name, template, stack_id=0, parms=None,
metadata_server=None):
self.id = stack_id
self.t = template
self.parms = self.t.get('Parameters', {})
self.maps = self.t.get('Mappings', {})
self.outputs = self.t.get('Outputs', {})
self.res = {}
self.doc = None
self.name = stack_name
self.parsed_template_id = 0
self.metadata_server = metadata_server
self.parms['AWS::StackName'] = {"Description": "AWS StackName",
"Type": "String",
"Value": stack_name}
self.parms['AWS::Region'] = {"Description": "AWS Regions",
"Type": "String",
"Default": "ap-southeast-1",
"AllowedValues": ["us-east-1", "us-west-1", "us-west-2",
"sa-east-1", "eu-west-1", "ap-southeast-1",
"ap-northeast-1"],
"ConstraintDescription": "must be a valid EC2 instance type."}
if parms != None:
self._apply_user_parameters(parms)
if isinstance(parms['KeyStoneCreds'], (basestring, unicode)):
self.creds = eval(parms['KeyStoneCreds'])
else:
self.creds = parms['KeyStoneCreds']
self.resources = {}
for r in self.t['Resources']:
type = self.t['Resources'][r]['Type']
if type == 'AWS::EC2::Instance':
self.resources[r] = instance.Instance(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::Volume':
self.resources[r] = volume.Volume(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::VolumeAttachment':
self.resources[r] = volume.VolumeAttachment(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::EIP':
self.resources[r] = eip.ElasticIp(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::EIPAssociation':
self.resources[r] = eip.ElasticIpAssociation(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::SecurityGroup':
self.resources[r] = security_group.SecurityGroup(r,
self.t['Resources'][r], self)
elif type == 'AWS::CloudFormation::WaitConditionHandle':
self.resources[r] = wait_condition.WaitConditionHandle(r,
self.t['Resources'][r], self)
elif type == 'AWS::CloudFormation::WaitCondition':
self.resources[r] = wait_condition.WaitCondition(r,
self.t['Resources'][r], self)
else:
self.resources[r] = resources.GenericResource(r,
self.t['Resources'][r], self)
self.calulate_dependencies(self.t['Resources'][r],
self.resources[r])
def validate(self):
'''
http://docs.amazonwebservices.com/AWSCloudFormation/latest/ \
APIReference/API_ValidateTemplate.html
'''
# TODO(sdake) Should return line number of invalid reference
response = None
try:
order = self.get_create_order()
except KeyError:
res = 'A Ref operation referenced a non-existent key '\
'[%s]' % sys.exc_value
response = {'ValidateTemplateResult': {
'Description': 'Malformed Query Response [%s]' % (res),
'Parameters': []}}
return response
for r in order:
try:
res = self.resources[r].validate()
if res:
err_str = 'Malformed Query Response [%s]' % (res)
response = {'ValidateTemplateResult': {
'Description': err_str,
'Parameters': []}}
return response
except Exception as ex:
logger.exception('validate')
failed = True
if response == None:
response = {'ValidateTemplateResult': {
'Description': 'Successfully validated',
'Parameters': []}}
for p in self.parms:
jp = {'member': {}}
res = jp['member']
res['NoEcho'] = 'false'
res['ParameterKey'] = p
res['Description'] = self.parms[p].get('Description', '')
res['DefaultValue'] = self.parms[p].get('Default', '')
response['ValidateTemplateResult']['Parameters'].append(res)
return response
def resource_append_deps(self, resource, order_list):
'''
For the given resource first append it's dependancies then
it's self to order_list.
'''
for r in resource.depends_on:
self.resource_append_deps(self.resources[r], order_list)
if not resource.name in order_list:
order_list.append(resource.name)
def get_create_order(self):
'''
return a list of Resource names in the correct order
for startup.
'''
order = []
for r in self.t['Resources']:
if self.t['Resources'][r]['Type'] == 'AWS::EC2::Volume' or \
self.t['Resources'][r]['Type'] == 'AWS::EC2::EIP':
if len(self.resources[r].depends_on) == 0:
order.append(r)
for r in self.t['Resources']:
self.resource_append_deps(self.resources[r], order)
return order
def update_parsed_template(self):
'''
Update the parsed template after each resource has been
created, so commands like describe will work.
'''
if self.parsed_template_id == 0:
stack = db_api.stack_get(None, self.name)
if stack:
self.parsed_template_id = stack.raw_template.parsed_template.id
else:
return
pt = db_api.parsed_template_get(None, self.parsed_template_id)
if pt:
pt.template = self.t
pt.save()
else:
logger.warn('Cant find parsed template to update %d' % \
self.parsed_template_id)
def status_set(self, new_status, reason='change in resource state'):
self.t['stack_status'] = new_status
self.update_parsed_template()
def create_blocking(self):
'''
create all the resources in the order specified by get_create_order
'''
order = self.get_create_order()
failed = False
self.status_set(self.IN_PROGRESS)
for r in order:
failed_str = self.resources[r].CREATE_FAILED
if not failed:
try:
self.resources[r].create()
except Exception as ex:
logger.exception('create')
failed = True
self.resources[r].state_set(failed_str, str(ex))
try:
self.update_parsed_template()
except Exception as ex:
logger.exception('update_parsed_template')
else:
self.resources[r].state_set(failed_str)
if failed:
self.status_set(self.CREATE_FAILED)
else:
self.status_set(self.CREATE_COMPLETE)
self.update_parsed_template()
def create(self):
pool = eventlet.GreenPool()
pool.spawn_n(self.create_blocking)
def delete_blocking(self):
'''
delete all the resources in the reverse order specified by
get_create_order().
'''
self.status_set(self.DELETE_IN_PROGRESS)
order = self.get_create_order()
order.reverse()
for r in order:
try:
self.resources[r].delete()
db_api.resource_get(None, self.resources[r].id).delete()
except Exception as ex:
logger.error('delete: %s' % str(ex))
db_api.stack_delete(None, self.name)
self.status_set(self.DELETE_COMPLETE)
def delete(self):
pool = eventlet.GreenPool()
pool.spawn_n(self.delete_blocking)
def get_outputs(self):
for r in self.resources:
self.resources[r].reload()
self.resolve_attributes(self.outputs)
self.resolve_joins(self.outputs)
outs = []
for o in self.outputs:
out = {}
out['Description'] = self.outputs[o].get('Description',
'No description given')
out['OutputKey'] = o
out['OutputValue'] = self.outputs[o].get('Value', '')
outs.append(out)
return outs
def calulate_dependencies(self, s, r):
if isinstance(s, dict):
for i in s:
if i == 'Fn::GetAtt':
#print '%s seems to depend on %s' % (r.name, s[i][0])
#r.depends_on.append(s[i][0])
pass
elif i == 'Ref':
#print '%s Refences %s' % (r.name, s[i])
r.depends_on.append(s[i])
elif i == 'DependsOn' or i == 'Ref':
#print '%s DependsOn on %s' % (r.name, s[i])
r.depends_on.append(s[i])
else:
self.calulate_dependencies(s[i], r)
elif isinstance(s, list):
for index, item in enumerate(s):
self.calulate_dependencies(item, r)
def _apply_user_parameter(self, key, value):
logger.debug('appling user parameter %s=%s ' % (key, value))
if not key in self.parms:
self.parms[key] = {}
self.parms[key]['Value'] = value
def _apply_user_parameters(self, parms):
for p in parms:
if 'Parameters.member.' in p and 'ParameterKey' in p:
s = p.split('.')
try:
key_name = 'Parameters.member.%s.ParameterKey' % s[2]
value_name = 'Parameters.member.%s.ParameterValue' % s[2]
self._apply_user_parameter(parms[key_name],
parms[value_name])
except Exception:
logger.error('Could not apply parameter %s' % p)
def parameter_get(self, key):
if self.parms[key] == None:
raise exception.UserParameterMissing(key=key)
elif 'Value' in self.parms[key]:
return self.parms[key]['Value']
elif 'Default' in self.parms[key]:
return self.parms[key]['Default']
else:
raise exception.UserParameterMissing(key=key)
def resolve_static_refs(self, s):
'''
looking for { "Ref": "str" }
'''
if isinstance(s, dict):
for i in s:
if i == 'Ref' and \
isinstance(s[i], (basestring, unicode)) and \
s[i] in self.parms:
return self.parameter_get(s[i])
else:
s[i] = self.resolve_static_refs(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
#print 'resolve_static_refs %d %s' % (index, item)
s[index] = self.resolve_static_refs(item)
return s
def resolve_find_in_map(self, s):
'''
looking for { "Fn::FindInMap": ["str", "str"] }
'''
if isinstance(s, dict):
for i in s:
if i == 'Fn::FindInMap':
obj = self.maps
if isinstance(s[i], list):
#print 'map list: %s' % s[i]
for index, item in enumerate(s[i]):
if isinstance(item, dict):
item = self.resolve_find_in_map(item)
#print 'map item dict: %s' % (item)
else:
pass
#print 'map item str: %s' % (item)
obj = obj[item]
else:
obj = obj[s[i]]
return obj
else:
s[i] = self.resolve_find_in_map(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_find_in_map(item)
return s
def resolve_attributes(self, s):
'''
looking for something like:
{"Fn::GetAtt" : ["DBInstance", "Endpoint.Address"]}
'''
if isinstance(s, dict):
for i in s:
if i == 'Ref' and s[i] in self.resources:
return self.resources[s[i]].FnGetRefId()
elif i == 'Fn::GetAtt':
resource_name = s[i][0]
key_name = s[i][1]
res = self.resources.get(resource_name)
rc = None
if res:
return res.FnGetAtt(key_name)
else:
raise exception.InvalidTemplateAttribute(
resource=resource_name, key=key_name)
return rc
else:
s[i] = self.resolve_attributes(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_attributes(item)
return s
def resolve_joins(self, s):
'''
looking for { "Fn::join": []}
'''
if isinstance(s, dict):
for i in s:
if i == 'Fn::Join':
j = None
try:
j = s[i][0].join(s[i][1])
except Exception:
logger.error('Could not join %s' % str(s[i]))
return j
else:
s[i] = self.resolve_joins(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_joins(item)
return s
def resolve_base64(self, s):
'''
looking for { "Fn::join": [] }
'''
if isinstance(s, dict):
for i in s:
if i == 'Fn::Base64':
return s[i]
else:
s[i] = self.resolve_base64(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_base64(item)
return s
| 36.981859 | 79 | 0.499785 |
import eventlet
import json
import logging
import sys
from heat.common import exception
from heat.engine import resources
from heat.engine import instance
from heat.engine import volume
from heat.engine import eip
from heat.engine import security_group
from heat.engine import wait_condition
from heat.db import api as db_api
logger = logging.getLogger(__file__)
class Stack(object):
IN_PROGRESS = 'IN_PROGRESS'
CREATE_FAILED = 'CREATE_FAILED'
CREATE_COMPLETE = 'CREATE_COMPLETE'
DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS'
DELETE_FAILED = 'DELETE_FAILED'
DELETE_COMPLETE = 'DELETE_COMPLETE'
def __init__(self, stack_name, template, stack_id=0, parms=None,
metadata_server=None):
self.id = stack_id
self.t = template
self.parms = self.t.get('Parameters', {})
self.maps = self.t.get('Mappings', {})
self.outputs = self.t.get('Outputs', {})
self.res = {}
self.doc = None
self.name = stack_name
self.parsed_template_id = 0
self.metadata_server = metadata_server
self.parms['AWS::StackName'] = {"Description": "AWS StackName",
"Type": "String",
"Value": stack_name}
self.parms['AWS::Region'] = {"Description": "AWS Regions",
"Type": "String",
"Default": "ap-southeast-1",
"AllowedValues": ["us-east-1", "us-west-1", "us-west-2",
"sa-east-1", "eu-west-1", "ap-southeast-1",
"ap-northeast-1"],
"ConstraintDescription": "must be a valid EC2 instance type."}
if parms != None:
self._apply_user_parameters(parms)
if isinstance(parms['KeyStoneCreds'], (basestring, unicode)):
self.creds = eval(parms['KeyStoneCreds'])
else:
self.creds = parms['KeyStoneCreds']
self.resources = {}
for r in self.t['Resources']:
type = self.t['Resources'][r]['Type']
if type == 'AWS::EC2::Instance':
self.resources[r] = instance.Instance(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::Volume':
self.resources[r] = volume.Volume(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::VolumeAttachment':
self.resources[r] = volume.VolumeAttachment(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::EIP':
self.resources[r] = eip.ElasticIp(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::EIPAssociation':
self.resources[r] = eip.ElasticIpAssociation(r,
self.t['Resources'][r], self)
elif type == 'AWS::EC2::SecurityGroup':
self.resources[r] = security_group.SecurityGroup(r,
self.t['Resources'][r], self)
elif type == 'AWS::CloudFormation::WaitConditionHandle':
self.resources[r] = wait_condition.WaitConditionHandle(r,
self.t['Resources'][r], self)
elif type == 'AWS::CloudFormation::WaitCondition':
self.resources[r] = wait_condition.WaitCondition(r,
self.t['Resources'][r], self)
else:
self.resources[r] = resources.GenericResource(r,
self.t['Resources'][r], self)
self.calulate_dependencies(self.t['Resources'][r],
self.resources[r])
def validate(self):
response = None
try:
order = self.get_create_order()
except KeyError:
res = 'A Ref operation referenced a non-existent key '\
'[%s]' % sys.exc_value
response = {'ValidateTemplateResult': {
'Description': 'Malformed Query Response [%s]' % (res),
'Parameters': []}}
return response
for r in order:
try:
res = self.resources[r].validate()
if res:
err_str = 'Malformed Query Response [%s]' % (res)
response = {'ValidateTemplateResult': {
'Description': err_str,
'Parameters': []}}
return response
except Exception as ex:
logger.exception('validate')
failed = True
if response == None:
response = {'ValidateTemplateResult': {
'Description': 'Successfully validated',
'Parameters': []}}
for p in self.parms:
jp = {'member': {}}
res = jp['member']
res['NoEcho'] = 'false'
res['ParameterKey'] = p
res['Description'] = self.parms[p].get('Description', '')
res['DefaultValue'] = self.parms[p].get('Default', '')
response['ValidateTemplateResult']['Parameters'].append(res)
return response
def resource_append_deps(self, resource, order_list):
for r in resource.depends_on:
self.resource_append_deps(self.resources[r], order_list)
if not resource.name in order_list:
order_list.append(resource.name)
def get_create_order(self):
order = []
for r in self.t['Resources']:
if self.t['Resources'][r]['Type'] == 'AWS::EC2::Volume' or \
self.t['Resources'][r]['Type'] == 'AWS::EC2::EIP':
if len(self.resources[r].depends_on) == 0:
order.append(r)
for r in self.t['Resources']:
self.resource_append_deps(self.resources[r], order)
return order
def update_parsed_template(self):
if self.parsed_template_id == 0:
stack = db_api.stack_get(None, self.name)
if stack:
self.parsed_template_id = stack.raw_template.parsed_template.id
else:
return
pt = db_api.parsed_template_get(None, self.parsed_template_id)
if pt:
pt.template = self.t
pt.save()
else:
logger.warn('Cant find parsed template to update %d' % \
self.parsed_template_id)
def status_set(self, new_status, reason='change in resource state'):
self.t['stack_status'] = new_status
self.update_parsed_template()
def create_blocking(self):
order = self.get_create_order()
failed = False
self.status_set(self.IN_PROGRESS)
for r in order:
failed_str = self.resources[r].CREATE_FAILED
if not failed:
try:
self.resources[r].create()
except Exception as ex:
logger.exception('create')
failed = True
self.resources[r].state_set(failed_str, str(ex))
try:
self.update_parsed_template()
except Exception as ex:
logger.exception('update_parsed_template')
else:
self.resources[r].state_set(failed_str)
if failed:
self.status_set(self.CREATE_FAILED)
else:
self.status_set(self.CREATE_COMPLETE)
self.update_parsed_template()
def create(self):
pool = eventlet.GreenPool()
pool.spawn_n(self.create_blocking)
def delete_blocking(self):
self.status_set(self.DELETE_IN_PROGRESS)
order = self.get_create_order()
order.reverse()
for r in order:
try:
self.resources[r].delete()
db_api.resource_get(None, self.resources[r].id).delete()
except Exception as ex:
logger.error('delete: %s' % str(ex))
db_api.stack_delete(None, self.name)
self.status_set(self.DELETE_COMPLETE)
def delete(self):
pool = eventlet.GreenPool()
pool.spawn_n(self.delete_blocking)
def get_outputs(self):
for r in self.resources:
self.resources[r].reload()
self.resolve_attributes(self.outputs)
self.resolve_joins(self.outputs)
outs = []
for o in self.outputs:
out = {}
out['Description'] = self.outputs[o].get('Description',
'No description given')
out['OutputKey'] = o
out['OutputValue'] = self.outputs[o].get('Value', '')
outs.append(out)
return outs
def calulate_dependencies(self, s, r):
if isinstance(s, dict):
for i in s:
if i == 'Fn::GetAtt':
pass
elif i == 'Ref':
r.depends_on.append(s[i])
elif i == 'DependsOn' or i == 'Ref':
r.depends_on.append(s[i])
else:
self.calulate_dependencies(s[i], r)
elif isinstance(s, list):
for index, item in enumerate(s):
self.calulate_dependencies(item, r)
def _apply_user_parameter(self, key, value):
logger.debug('appling user parameter %s=%s ' % (key, value))
if not key in self.parms:
self.parms[key] = {}
self.parms[key]['Value'] = value
def _apply_user_parameters(self, parms):
for p in parms:
if 'Parameters.member.' in p and 'ParameterKey' in p:
s = p.split('.')
try:
key_name = 'Parameters.member.%s.ParameterKey' % s[2]
value_name = 'Parameters.member.%s.ParameterValue' % s[2]
self._apply_user_parameter(parms[key_name],
parms[value_name])
except Exception:
logger.error('Could not apply parameter %s' % p)
def parameter_get(self, key):
if self.parms[key] == None:
raise exception.UserParameterMissing(key=key)
elif 'Value' in self.parms[key]:
return self.parms[key]['Value']
elif 'Default' in self.parms[key]:
return self.parms[key]['Default']
else:
raise exception.UserParameterMissing(key=key)
def resolve_static_refs(self, s):
if isinstance(s, dict):
for i in s:
if i == 'Ref' and \
isinstance(s[i], (basestring, unicode)) and \
s[i] in self.parms:
return self.parameter_get(s[i])
else:
s[i] = self.resolve_static_refs(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_static_refs(item)
return s
def resolve_find_in_map(self, s):
if isinstance(s, dict):
for i in s:
if i == 'Fn::FindInMap':
obj = self.maps
if isinstance(s[i], list):
for index, item in enumerate(s[i]):
if isinstance(item, dict):
item = self.resolve_find_in_map(item)
else:
pass
obj = obj[item]
else:
obj = obj[s[i]]
return obj
else:
s[i] = self.resolve_find_in_map(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_find_in_map(item)
return s
def resolve_attributes(self, s):
if isinstance(s, dict):
for i in s:
if i == 'Ref' and s[i] in self.resources:
return self.resources[s[i]].FnGetRefId()
elif i == 'Fn::GetAtt':
resource_name = s[i][0]
key_name = s[i][1]
res = self.resources.get(resource_name)
rc = None
if res:
return res.FnGetAtt(key_name)
else:
raise exception.InvalidTemplateAttribute(
resource=resource_name, key=key_name)
return rc
else:
s[i] = self.resolve_attributes(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_attributes(item)
return s
def resolve_joins(self, s):
if isinstance(s, dict):
for i in s:
if i == 'Fn::Join':
j = None
try:
j = s[i][0].join(s[i][1])
except Exception:
logger.error('Could not join %s' % str(s[i]))
return j
else:
s[i] = self.resolve_joins(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_joins(item)
return s
def resolve_base64(self, s):
if isinstance(s, dict):
for i in s:
if i == 'Fn::Base64':
return s[i]
else:
s[i] = self.resolve_base64(s[i])
elif isinstance(s, list):
for index, item in enumerate(s):
s[index] = self.resolve_base64(item)
return s
| true | true |
1c2e33aae33d7676078b930aae7d76313c38fcbb | 2,971 | py | Python | main.py | YuhangSong/alley | c20111189d3e83b4a902140361089a7b1d11702a | [
"MIT"
] | null | null | null | main.py | YuhangSong/alley | c20111189d3e83b4a902140361089a7b1d11702a | [
"MIT"
] | null | null | null | main.py | YuhangSong/alley | c20111189d3e83b4a902140361089a7b1d11702a | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Simple example of using Multi-Agent and Hierarchical
(https://ray.readthedocs.io/en/latest/rllib-env.html#multi-agent-and-hierarchical)
from rllib to train an arena environment in ArenaRllibEnv.
"""
import argparse
import random
import time
import numpy as np
import ray
from ray import tune
from ray.rllib.utils import try_import_tf
from envs_layer import ArenaRllibEnv
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--env-id", type=str, default="Test-Discrete")
parser.add_argument("--policy-assignment", type=str, default="independent")
parser.add_argument("--num-iters", type=int, default=20)
policy_id_prefix = "policy"
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
env_config = {
"env_id": args.env_id,
}
dummy_env = ArenaRllibEnv(env_config)
number_agents = dummy_env.number_agents
# For now, we do not support using different spaces across agents
# (i.e., all agents have to share the same brain in Arena-BuildingToolkit)
# This is because we want to consider the transfer/sharing weight between agents.
# If you do have completely different agents in game, one harmless work around is
# to use the same brain, but define different meaning of the action in Arena-BuildingToolkit
obs_space = dummy_env.observation_space
act_space = dummy_env.action_space
def get_policy_id(policy_i):
return "{}_{}".format(policy_id_prefix, policy_i)
# create config of policies
policies = {}
for agent_i in range(number_agents):
policy_id = get_policy_id(agent_i)
policies[policy_id] = (None, obs_space, act_space, {})
# create a map from agent_id to policy_id
agent_id_to_policy_id = {}
if args.policy_assignment in ["independent"]:
# independent learners, each agent is assigned with a independent policy
for agent_i in range(number_agents):
agent_id = dummy_env.get_agent_id(agent_i)
policy_id = get_policy_id(agent_i)
agent_id_to_policy_id[agent_id] = policy_id
else:
raise NotImplementedError
# check if all agent_id are covered in agent_id_to_policy_id
for agent_id in dummy_env.get_agent_ids():
if agent_id not in agent_id_to_policy_id.keys():
raise Exception("All agent_id has to be mentioned in agent_id_to_policy_id.keys(). \
agent_id of {} is not mentioned".format(agent_id))
tune.run(
"PPO",
stop={"training_iteration": args.num_iters},
config={
"env": "arena_env",
"env_config": env_config,
"multiagent": {
"policies": policies,
"policy_mapping_fn": (
lambda agent_id: agent_id_to_policy_id[agent_id]
),
},
},
)
| 32.648352 | 96 | 0.685291 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import random
import time
import numpy as np
import ray
from ray import tune
from ray.rllib.utils import try_import_tf
from envs_layer import ArenaRllibEnv
tf = try_import_tf()
parser = argparse.ArgumentParser()
parser.add_argument("--env-id", type=str, default="Test-Discrete")
parser.add_argument("--policy-assignment", type=str, default="independent")
parser.add_argument("--num-iters", type=int, default=20)
policy_id_prefix = "policy"
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
env_config = {
"env_id": args.env_id,
}
dummy_env = ArenaRllibEnv(env_config)
number_agents = dummy_env.number_agents
obs_space = dummy_env.observation_space
act_space = dummy_env.action_space
def get_policy_id(policy_i):
return "{}_{}".format(policy_id_prefix, policy_i)
policies = {}
for agent_i in range(number_agents):
policy_id = get_policy_id(agent_i)
policies[policy_id] = (None, obs_space, act_space, {})
agent_id_to_policy_id = {}
if args.policy_assignment in ["independent"]:
for agent_i in range(number_agents):
agent_id = dummy_env.get_agent_id(agent_i)
policy_id = get_policy_id(agent_i)
agent_id_to_policy_id[agent_id] = policy_id
else:
raise NotImplementedError
for agent_id in dummy_env.get_agent_ids():
if agent_id not in agent_id_to_policy_id.keys():
raise Exception("All agent_id has to be mentioned in agent_id_to_policy_id.keys(). \
agent_id of {} is not mentioned".format(agent_id))
tune.run(
"PPO",
stop={"training_iteration": args.num_iters},
config={
"env": "arena_env",
"env_config": env_config,
"multiagent": {
"policies": policies,
"policy_mapping_fn": (
lambda agent_id: agent_id_to_policy_id[agent_id]
),
},
},
)
| true | true |
1c2e33dd468219d592ebd684a35db138aaa243e7 | 1,078 | py | Python | slow_processing_times/blueprints/processing.py | GeoffreyDlvl/SlowwwwwProcessingTimes | f0e37d11fef6d4922e6c9c8fc68766f29bd21bb4 | [
"MIT"
] | null | null | null | slow_processing_times/blueprints/processing.py | GeoffreyDlvl/SlowwwwwProcessingTimes | f0e37d11fef6d4922e6c9c8fc68766f29bd21bb4 | [
"MIT"
] | null | null | null | slow_processing_times/blueprints/processing.py | GeoffreyDlvl/SlowwwwwProcessingTimes | f0e37d11fef6d4922e6c9c8fc68766f29bd21bb4 | [
"MIT"
] | null | null | null | import time
from flask import (
Blueprint, request, current_app
)
from .. import utils
from ..enums.state_enum import State
from ..blueprints.crack import archives
bp = Blueprint('processing', __name__, url_prefix='/processing')
def some_processing():
print('PROCESSING...')
time.sleep(10)
print('DONE.')
@bp.route('some_processing', methods=['POST'])
def append_some_processing():
response = utils.check_filename_in(request)
if not utils.is_response_empty(response):
return response
filename = utils.get_filename_from(request)
if not utils.archive_exists(filename):
return utils.create_response({'file': filename, 'message': 'File not found'}, 400)
archive = archives[filename]
archive.append_processing(some_processing)
if archive.state is not State.PROCESSING:
archive.start_processing()
return utils.create_response({'message': 'All processing completed'}, 201)
else:
return utils.create_response({'message': 'Processing has been appended to the queue'}, 201)
| 28.368421 | 99 | 0.702226 | import time
from flask import (
Blueprint, request, current_app
)
from .. import utils
from ..enums.state_enum import State
from ..blueprints.crack import archives
bp = Blueprint('processing', __name__, url_prefix='/processing')
def some_processing():
print('PROCESSING...')
time.sleep(10)
print('DONE.')
@bp.route('some_processing', methods=['POST'])
def append_some_processing():
response = utils.check_filename_in(request)
if not utils.is_response_empty(response):
return response
filename = utils.get_filename_from(request)
if not utils.archive_exists(filename):
return utils.create_response({'file': filename, 'message': 'File not found'}, 400)
archive = archives[filename]
archive.append_processing(some_processing)
if archive.state is not State.PROCESSING:
archive.start_processing()
return utils.create_response({'message': 'All processing completed'}, 201)
else:
return utils.create_response({'message': 'Processing has been appended to the queue'}, 201)
| true | true |
1c2e34a3a089e4d5a4555dea0f99551b3b7517cc | 942 | py | Python | apiclient/request_formatters.py | Phonebooth/api-client | c2820fa4c4997aad8a07e408b80a52df4d6c9978 | [
"MIT"
] | 112 | 2019-02-18T15:07:50.000Z | 2022-03-31T07:05:23.000Z | apiclient/request_formatters.py | Phonebooth/api-client | c2820fa4c4997aad8a07e408b80a52df4d6c9978 | [
"MIT"
] | 34 | 2019-02-20T13:32:47.000Z | 2022-01-22T23:09:50.000Z | apiclient/request_formatters.py | Phonebooth/api-client | c2820fa4c4997aad8a07e408b80a52df4d6c9978 | [
"MIT"
] | 23 | 2019-03-15T10:50:03.000Z | 2022-03-17T09:49:21.000Z | import json
from apiclient.utils.typing import OptionalJsonType, OptionalStr
class BaseRequestFormatter:
"""Format the outgoing data accordingly and set the content-type headers."""
content_type = None
@classmethod
def get_headers(cls) -> dict:
if cls.content_type:
return {"Content-type": cls.content_type}
else:
return {}
@classmethod
def format(cls, data: OptionalJsonType):
raise NotImplementedError
class NoOpRequestFormatter(BaseRequestFormatter):
"""No action request formatter."""
@classmethod
def format(cls, data: OptionalJsonType) -> OptionalJsonType:
return data
class JsonRequestFormatter(BaseRequestFormatter):
"""Format the outgoing data as json."""
content_type = "application/json"
@classmethod
def format(cls, data: OptionalJsonType) -> OptionalStr:
if data:
return json.dumps(data)
| 23.55 | 80 | 0.680467 | import json
from apiclient.utils.typing import OptionalJsonType, OptionalStr
class BaseRequestFormatter:
content_type = None
@classmethod
def get_headers(cls) -> dict:
if cls.content_type:
return {"Content-type": cls.content_type}
else:
return {}
@classmethod
def format(cls, data: OptionalJsonType):
raise NotImplementedError
class NoOpRequestFormatter(BaseRequestFormatter):
@classmethod
def format(cls, data: OptionalJsonType) -> OptionalJsonType:
return data
class JsonRequestFormatter(BaseRequestFormatter):
content_type = "application/json"
@classmethod
def format(cls, data: OptionalJsonType) -> OptionalStr:
if data:
return json.dumps(data)
| true | true |
1c2e34b2c0fc0aca1bbc78728b67afbf95f4a6cd | 6,714 | py | Python | src/python/pants/backend/python/goals/run_pex_binary_integration_test.py | chebbyChefNEQ/pants | a53b9d29a160f36f9af1d1a2c43a693b6a55fa55 | [
"Apache-2.0"
] | 1 | 2016-04-27T15:35:42.000Z | 2016-04-27T15:35:42.000Z | src/python/pants/backend/python/goals/run_pex_binary_integration_test.py | chebbyChefNEQ/pants | a53b9d29a160f36f9af1d1a2c43a693b6a55fa55 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/goals/run_pex_binary_integration_test.py | chebbyChefNEQ/pants | a53b9d29a160f36f9af1d1a2c43a693b6a55fa55 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
from textwrap import dedent
from typing import Optional
import pytest
from pants.backend.python.target_types import PexExecutionMode
from pants.testutil.pants_integration_test import PantsResult, run_pants, setup_tmpdir
@pytest.mark.parametrize(
("entry_point", "execution_mode", "include_tools"),
[
("app.py", PexExecutionMode.UNZIP, True),
("app.py", PexExecutionMode.VENV, True),
("app.py:main", PexExecutionMode.ZIPAPP, False),
("app.py:main", None, False),
],
)
def test_run_sample_script(
entry_point: str, execution_mode: Optional[PexExecutionMode], include_tools: bool
) -> None:
"""Test that we properly run a `pex_binary` target.
This checks a few things:
- We can handle source roots.
- We properly load third party requirements.
- We propagate the error code.
"""
sources = {
"src_root1/project/app.py": dedent(
"""\
import sys
from utils.strutil import upper_case
def main():
print(upper_case("Hello world."))
print("Hola, mundo.", file=sys.stderr)
sys.exit(23)
if __name__ == "__main__":
main()
"""
),
"src_root1/project/BUILD": dedent(
f"""\
python_library(name='lib')
pex_binary(
entry_point={entry_point!r},
execution_mode={execution_mode.value if execution_mode is not None else None!r},
include_tools={include_tools!r},
)
"""
),
"src_root2/utils/strutil.py": dedent(
"""\
def upper_case(s):
return s.upper()
"""
),
"src_root2/utils/BUILD": "python_library()",
}
def run(*extra_args: str, **extra_env: str) -> PantsResult:
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src_root1', '/{tmpdir}/src_root2']",
"--pants-ignore=__pycache__",
"--pants-ignore=/src/python",
"run",
f"{tmpdir}/src_root1/project/app.py",
*extra_args,
]
return run_pants(args, extra_env=extra_env)
result = run()
assert "Hola, mundo.\n" in result.stderr
assert result.stdout == "HELLO WORLD.\n"
assert result.exit_code == 23
if include_tools:
result = run("--", "info", PEX_TOOLS="1")
assert result.exit_code == 0
pex_info = json.loads(result.stdout)
assert (execution_mode is PexExecutionMode.VENV) == pex_info["venv"]
assert ("prepend" if execution_mode is PexExecutionMode.VENV else "false") == pex_info[
"venv_bin_path"
]
assert pex_info["strip_pex_env"] is False
def test_no_strip_pex_env_issues_12057() -> None:
sources = {
"src/app.py": dedent(
"""\
import os
import sys
if __name__ == "__main__":
exit_code = os.environ.get("PANTS_ISSUES_12057")
if exit_code is None:
os.environ["PANTS_ISSUES_12057"] = "42"
os.execv(sys.executable, [sys.executable, *sys.argv])
sys.exit(int(exit_code))
"""
),
"src/BUILD": dedent(
"""\
python_library(name="lib")
pex_binary(entry_point="app.py")
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src/app.py",
]
result = run_pants(args)
assert result.exit_code == 42, result.stderr
def test_no_leak_pex_root_issues_12055() -> None:
read_config_result = run_pants(["help-all"])
read_config_result.assert_success()
config_data = json.loads(read_config_result.stdout)
global_advanced_options = {
option["config_key"]: [
ranked_value["value"] for ranked_value in option["value_history"]["ranked_values"]
][-1]
for option in config_data["scope_to_help_info"][""]["advanced"]
}
named_caches_dir = global_advanced_options["named_caches_dir"]
sources = {
"src/app.py": "import os; print(os.environ['PEX_ROOT'])",
"src/BUILD": dedent(
"""\
python_library(name="lib")
pex_binary(entry_point="app.py")
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src/app.py",
]
result = run_pants(args)
result.assert_success()
assert os.path.join(named_caches_dir, "pex_root") == result.stdout.strip()
def test_local_dist() -> None:
sources = {
"foo/bar.py": "BAR = 'LOCAL DIST'",
"foo/setup.py": dedent(
"""\
from setuptools import setup
# Double-brace the package_dir to avoid setup_tmpdir treating it as a format.
setup(name="foo", version="9.8.7", packages=["foo"], package_dir={{"foo": "."}},)
"""
),
"foo/main.py": "from foo.bar import BAR; print(BAR)",
"foo/BUILD": dedent(
"""\
python_library(name="lib", sources=["bar.py", "setup.py"])
python_library(name="main_lib", sources=["main.py"])
python_distribution(
name="dist",
dependencies=[":lib"],
provides=python_artifact(name="foo", version="9.8.7", setup_script="setup.py"),
setup_py_commands=["bdist_wheel",]
)
pex_binary(
name="bin",
entry_point="main.py",
# Force-exclude any dep on bar.py, so the only way to consume it is via the dist.
dependencies=[":main_lib", ":dist", "!!:lib"])
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}']",
"run",
f"{tmpdir}/foo/main.py",
]
result = run_pants(args)
assert result.stdout == "LOCAL DIST\n"
| 32.434783 | 97 | 0.545576 |
import json
import os
from textwrap import dedent
from typing import Optional
import pytest
from pants.backend.python.target_types import PexExecutionMode
from pants.testutil.pants_integration_test import PantsResult, run_pants, setup_tmpdir
@pytest.mark.parametrize(
("entry_point", "execution_mode", "include_tools"),
[
("app.py", PexExecutionMode.UNZIP, True),
("app.py", PexExecutionMode.VENV, True),
("app.py:main", PexExecutionMode.ZIPAPP, False),
("app.py:main", None, False),
],
)
def test_run_sample_script(
entry_point: str, execution_mode: Optional[PexExecutionMode], include_tools: bool
) -> None:
sources = {
"src_root1/project/app.py": dedent(
"""\
import sys
from utils.strutil import upper_case
def main():
print(upper_case("Hello world."))
print("Hola, mundo.", file=sys.stderr)
sys.exit(23)
if __name__ == "__main__":
main()
"""
),
"src_root1/project/BUILD": dedent(
f"""\
python_library(name='lib')
pex_binary(
entry_point={entry_point!r},
execution_mode={execution_mode.value if execution_mode is not None else None!r},
include_tools={include_tools!r},
)
"""
),
"src_root2/utils/strutil.py": dedent(
"""\
def upper_case(s):
return s.upper()
"""
),
"src_root2/utils/BUILD": "python_library()",
}
def run(*extra_args: str, **extra_env: str) -> PantsResult:
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src_root1', '/{tmpdir}/src_root2']",
"--pants-ignore=__pycache__",
"--pants-ignore=/src/python",
"run",
f"{tmpdir}/src_root1/project/app.py",
*extra_args,
]
return run_pants(args, extra_env=extra_env)
result = run()
assert "Hola, mundo.\n" in result.stderr
assert result.stdout == "HELLO WORLD.\n"
assert result.exit_code == 23
if include_tools:
result = run("--", "info", PEX_TOOLS="1")
assert result.exit_code == 0
pex_info = json.loads(result.stdout)
assert (execution_mode is PexExecutionMode.VENV) == pex_info["venv"]
assert ("prepend" if execution_mode is PexExecutionMode.VENV else "false") == pex_info[
"venv_bin_path"
]
assert pex_info["strip_pex_env"] is False
def test_no_strip_pex_env_issues_12057() -> None:
sources = {
"src/app.py": dedent(
"""\
import os
import sys
if __name__ == "__main__":
exit_code = os.environ.get("PANTS_ISSUES_12057")
if exit_code is None:
os.environ["PANTS_ISSUES_12057"] = "42"
os.execv(sys.executable, [sys.executable, *sys.argv])
sys.exit(int(exit_code))
"""
),
"src/BUILD": dedent(
"""\
python_library(name="lib")
pex_binary(entry_point="app.py")
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src/app.py",
]
result = run_pants(args)
assert result.exit_code == 42, result.stderr
def test_no_leak_pex_root_issues_12055() -> None:
read_config_result = run_pants(["help-all"])
read_config_result.assert_success()
config_data = json.loads(read_config_result.stdout)
global_advanced_options = {
option["config_key"]: [
ranked_value["value"] for ranked_value in option["value_history"]["ranked_values"]
][-1]
for option in config_data["scope_to_help_info"][""]["advanced"]
}
named_caches_dir = global_advanced_options["named_caches_dir"]
sources = {
"src/app.py": "import os; print(os.environ['PEX_ROOT'])",
"src/BUILD": dedent(
"""\
python_library(name="lib")
pex_binary(entry_point="app.py")
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src/app.py",
]
result = run_pants(args)
result.assert_success()
assert os.path.join(named_caches_dir, "pex_root") == result.stdout.strip()
def test_local_dist() -> None:
sources = {
"foo/bar.py": "BAR = 'LOCAL DIST'",
"foo/setup.py": dedent(
"""\
from setuptools import setup
# Double-brace the package_dir to avoid setup_tmpdir treating it as a format.
setup(name="foo", version="9.8.7", packages=["foo"], package_dir={{"foo": "."}},)
"""
),
"foo/main.py": "from foo.bar import BAR; print(BAR)",
"foo/BUILD": dedent(
"""\
python_library(name="lib", sources=["bar.py", "setup.py"])
python_library(name="main_lib", sources=["main.py"])
python_distribution(
name="dist",
dependencies=[":lib"],
provides=python_artifact(name="foo", version="9.8.7", setup_script="setup.py"),
setup_py_commands=["bdist_wheel",]
)
pex_binary(
name="bin",
entry_point="main.py",
# Force-exclude any dep on bar.py, so the only way to consume it is via the dist.
dependencies=[":main_lib", ":dist", "!!:lib"])
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}']",
"run",
f"{tmpdir}/foo/main.py",
]
result = run_pants(args)
assert result.stdout == "LOCAL DIST\n"
| true | true |
1c2e350653f5047348cfa08e90c9d50892d88626 | 318 | py | Python | Practice Problems/13-Decorators/Beginner/test_decorators_beginner.py | vishnu-rvn/PyPractice | 521cf6582b49aabd9a4c1c0aef0dd3608c9ee63b | [
"MIT"
] | 9 | 2018-07-13T16:29:41.000Z | 2018-07-14T14:40:38.000Z | Practice Problems/13-Decorators/Beginner/test_decorators_beginner.py | vishnu-rvn/PyPractice | 521cf6582b49aabd9a4c1c0aef0dd3608c9ee63b | [
"MIT"
] | 11 | 2018-07-15T07:56:57.000Z | 2018-07-21T17:41:13.000Z | Practice Problems/13-Decorators/Beginner/test_decorators_beginner.py | vishnu-rvn/PyPractice | 521cf6582b49aabd9a4c1c0aef0dd3608c9ee63b | [
"MIT"
] | 8 | 2018-07-13T02:37:53.000Z | 2018-07-14T20:36:44.000Z | from unittest import TestCase, TestSuite, TextTestRunner, main
class DecoratorsBeginnerTestCase(TestCase):
pass
def test_one(test_name):
suite = TestSuite()
suite.addTest(DecoratorsBeginnerTestCase(test_name))
runner = TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
main()
| 18.705882 | 62 | 0.732704 | from unittest import TestCase, TestSuite, TextTestRunner, main
class DecoratorsBeginnerTestCase(TestCase):
pass
def test_one(test_name):
suite = TestSuite()
suite.addTest(DecoratorsBeginnerTestCase(test_name))
runner = TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
main()
| true | true |
1c2e35b8dc64792855acbd02758e9d8d72ce1652 | 1,971 | py | Python | test_project/test_project_py3/settings.py | yprez/django-useful | 288aa46df6f40fb0323c3d0c0efcded887472538 | [
"0BSD"
] | 3 | 2015-09-30T09:26:31.000Z | 2019-03-19T05:44:24.000Z | test_project/test_project_py3/settings.py | yprez/django-useful | 288aa46df6f40fb0323c3d0c0efcded887472538 | [
"0BSD"
] | 3 | 2020-02-11T22:13:27.000Z | 2021-06-10T17:40:52.000Z | test_project/test_project_py3/settings.py | yprez/django-useful | 288aa46df6f40fb0323c3d0c0efcded887472538 | [
"0BSD"
] | 1 | 2016-08-08T14:35:02.000Z | 2016-08-08T14:35:02.000Z | # Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 't^4dt#fkxftpborp@%lg*#h2wj%vizl)#pkkt$&0f7b87rbu6y'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.admin',
'django_nose',
'kombu.transport.django',
'useful', # Import the app to run tests
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEMPLATE_CONTEXT_PROCESSORS = (
'useful.context_processors.settings',
)
BROKER_URL = 'django://'
CELERY_RESULT_BACKEND = CELERY_CACHE_BACKEND = BROKER_BACKEND = 'sqlite://'
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'cleanup': {
'task': 'useful.tasks.call_management_command',
'schedule': timedelta(seconds=10),
'args': ('validate', ),
},
}
| 24.333333 | 75 | 0.710299 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 't^4dt#fkxftpborp@%lg*#h2wj%vizl)#pkkt$&0f7b87rbu6y'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_nose',
'kombu.transport.django',
'useful',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEMPLATE_CONTEXT_PROCESSORS = (
'useful.context_processors.settings',
)
BROKER_URL = 'django://'
CELERY_RESULT_BACKEND = CELERY_CACHE_BACKEND = BROKER_BACKEND = 'sqlite://'
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'cleanup': {
'task': 'useful.tasks.call_management_command',
'schedule': timedelta(seconds=10),
'args': ('validate', ),
},
}
| true | true |
1c2e35d4a10895371f644ea4da834848880fabab | 13,265 | py | Python | trove/tests/unittests/common/test_common_extensions.py | zh-f/trove | 4998becb4da14547798cece21858282761409052 | [
"Apache-2.0"
] | 1 | 2017-11-24T10:28:48.000Z | 2017-11-24T10:28:48.000Z | trove/tests/unittests/common/test_common_extensions.py | 2020human/trove-new | 012da9a334bc4e9c7711dc918eea3f011463ec82 | [
"Apache-2.0"
] | null | null | null | trove/tests/unittests/common/test_common_extensions.py | 2020human/trove-new | 012da9a334bc4e9c7711dc918eea3f011463ec82 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mock import Mock
from mock import patch
from oslo_config.cfg import NoSuchOptError
from trove.common import exception
from trove.common import utils
from trove.extensions.common import models
from trove.extensions.common.service import ClusterRootController
from trove.extensions.common.service import DefaultRootController
from trove.extensions.common.service import RootController
from trove.instance import models as instance_models
from trove.instance.models import DBInstance
from trove.tests.unittests import trove_testtools
class TestDefaultRootController(trove_testtools.TestCase):
def setUp(self):
super(TestDefaultRootController, self).setUp()
self.controller = DefaultRootController()
@patch.object(models.Root, "load")
def test_root_index(self, root_load):
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
self.controller.root_index(req, tenant_id, uuid, is_cluster)
root_load.assert_called_with(context, uuid)
def test_root_index_with_cluster(self):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
self.assertRaises(
exception.ClusterOperationNotSupported,
self.controller.root_index,
req, tenant_id, uuid, is_cluster)
@patch.object(models.Root, "create")
def test_root_create(self, root_create):
user = Mock()
context = Mock()
context.user = Mock()
context.user.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
password = Mock()
body = {'password': password}
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
root_create.assert_called_with(context, uuid, context.user, password)
def test_root_create_with_cluster(self):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
password = Mock()
body = {'password': password}
self.assertRaises(
exception.ClusterOperationNotSupported,
self.controller.root_create,
req, body, tenant_id, uuid, is_cluster)
class TestRootController(trove_testtools.TestCase):
def setUp(self):
super(TestRootController, self).setUp()
self.context = trove_testtools.TroveTestContext(self)
self.controller = RootController()
@patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_index(self, service_get_datastore, service_load_root_controller,
service_load_instance):
req = Mock()
req.environ = {'trove.context': self.context}
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
root_controller = Mock()
ret = Mock()
root_controller.root_index = Mock(return_value=ret)
service_load_root_controller.return_value = root_controller
self.assertTrue(ret, self.controller.index(req, tenant_id, uuid))
service_get_datastore.assert_called_with(tenant_id, uuid)
service_load_root_controller.assert_called_with(ds_manager)
root_controller.root_index.assert_called_with(
req, tenant_id, uuid, is_cluster)
@patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_create(self, service_get_datastore, service_load_root_controller,
service_load_instance):
req = Mock()
req.environ = {'trove.context': self.context}
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
root_controller = Mock()
ret = Mock()
root_controller.root_create = Mock(return_value=ret)
service_load_root_controller.return_value = root_controller
self.assertTrue(
ret, self.controller.create(req, tenant_id, uuid, body=body))
service_get_datastore.assert_called_with(tenant_id, uuid)
service_load_root_controller.assert_called_with(ds_manager)
root_controller.root_create.assert_called_with(
req, body, tenant_id, uuid, is_cluster)
@patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_create_with_no_root_controller(self,
service_get_datastore,
service_load_root_controller,
service_load_instance):
req = Mock()
req.environ = {'trove.context': self.context}
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
service_load_root_controller.return_value = None
self.assertRaises(
NoSuchOptError,
self.controller.create,
req, tenant_id, uuid, body=body)
service_get_datastore.assert_called_with(tenant_id, uuid)
service_load_root_controller.assert_called_with(ds_manager)
class TestClusterRootController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterRootController, self).setUp()
self.context = trove_testtools.TroveTestContext(self)
self.controller = ClusterRootController()
@patch.object(ClusterRootController, "cluster_root_index")
def test_root_index_cluster(self, mock_cluster_root_index):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
self.controller.root_index(req, tenant_id, uuid, is_cluster)
mock_cluster_root_index.assert_called_with(req, tenant_id, uuid)
@patch.object(ClusterRootController, "instance_root_index")
def test_root_index_instance(self, mock_instance_root_index):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
self.controller.root_index(req, tenant_id, uuid, is_cluster)
mock_instance_root_index.assert_called_with(req, tenant_id, uuid)
@patch.object(ClusterRootController, "cluster_root_create")
def test_root_create_cluster(self, mock_cluster_root_create):
req = Mock()
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
mock_cluster_root_create.assert_called_with(req, body, tenant_id, uuid)
@patch.object(ClusterRootController, "check_cluster_instance_actions")
@patch.object(ClusterRootController, "instance_root_create")
def test_root_create_instance(self, mock_instance_root_create, mock_check):
req = Mock()
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
mock_check.assert_called_with(uuid)
mock_instance_root_create.assert_called_with(req, body, uuid)
@patch.object(models.ClusterRoot, "load")
def test_instance_root_index(self, mock_cluster_root_load):
req = Mock()
req.environ = {'trove.context': self.context}
tenant_id = Mock()
instance_id = utils.generate_uuid()
self.controller.instance_root_index(req, tenant_id, instance_id)
mock_cluster_root_load.assert_called_with(self.context, instance_id)
@patch.object(models.ClusterRoot, "load",
side_effect=exception.UnprocessableEntity())
def test_instance_root_index_exception(self, mock_cluster_root_load):
req = Mock()
req.environ = {'trove.context': self.context}
tenant_id = Mock()
instance_id = utils.generate_uuid()
self.assertRaises(
exception.UnprocessableEntity,
self.controller.instance_root_index,
req, tenant_id, instance_id
)
mock_cluster_root_load.assert_called_with(self.context, instance_id)
@patch.object(ClusterRootController, "instance_root_index")
@patch.object(ClusterRootController, "_get_cluster_instance_id")
def test_cluster_root_index(self, mock_get_cluster_instance,
mock_instance_root_index):
req = Mock()
tenant_id = Mock()
cluster_id = utils.generate_uuid()
single_instance_id = Mock()
mock_get_cluster_instance.return_value = (single_instance_id, Mock())
self.controller.cluster_root_index(req, tenant_id, cluster_id)
mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id)
mock_instance_root_index.assert_called_with(req, tenant_id,
single_instance_id)
@patch.object(ClusterRootController, "instance_root_create")
@patch.object(ClusterRootController, "_get_cluster_instance_id")
def test_cluster_root_create(self, mock_get_cluster_instance,
mock_instance_root_create):
req = Mock()
body = Mock()
tenant_id = Mock()
cluster_id = utils.generate_uuid()
single_instance_id = Mock()
cluster_instances = Mock()
mock_get_cluster_instance.return_value = (single_instance_id,
cluster_instances)
self.controller.cluster_root_create(req, body, tenant_id, cluster_id)
mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id)
mock_instance_root_create.assert_called_with(req, body,
single_instance_id,
cluster_instances)
@patch.object(DBInstance, "find_all")
def test_get_cluster_instance_id(self, mock_find_all):
tenant_id = Mock()
cluster_id = Mock()
db_inst_1 = Mock()
db_inst_1.id.return_value = utils.generate_uuid()
db_inst_2 = Mock()
db_inst_2.id.return_value = utils.generate_uuid()
cluster_instances = [db_inst_1, db_inst_2]
mock_find_all.return_value.all.return_value = cluster_instances
ret = self.controller._get_cluster_instance_id(tenant_id, cluster_id)
self.assertTrue(db_inst_1.id, ret[0])
self.assertTrue(cluster_instances, ret[1])
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create(self, mock_cluster_root_create):
user = Mock()
self.context.user = Mock()
self.context.user.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = {'trove.context': self.context}
password = Mock()
body = {'password': password}
instance_id = utils.generate_uuid()
cluster_instances = Mock()
self.controller.instance_root_create(
req, body, instance_id, cluster_instances)
mock_cluster_root_create.assert_called_with(
self.context, instance_id, self.context.user, password,
cluster_instances)
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create_no_body(self, mock_cluster_root_create):
user = Mock()
self.context.user = Mock()
self.context.user.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = {'trove.context': self.context}
password = None
body = None
instance_id = utils.generate_uuid()
cluster_instances = Mock()
self.controller.instance_root_create(
req, body, instance_id, cluster_instances)
mock_cluster_root_create.assert_called_with(
self.context, instance_id, self.context.user, password,
cluster_instances)
| 41.583072 | 79 | 0.671014 |
from mock import Mock
from mock import patch
from oslo_config.cfg import NoSuchOptError
from trove.common import exception
from trove.common import utils
from trove.extensions.common import models
from trove.extensions.common.service import ClusterRootController
from trove.extensions.common.service import DefaultRootController
from trove.extensions.common.service import RootController
from trove.instance import models as instance_models
from trove.instance.models import DBInstance
from trove.tests.unittests import trove_testtools
class TestDefaultRootController(trove_testtools.TestCase):
def setUp(self):
super(TestDefaultRootController, self).setUp()
self.controller = DefaultRootController()
@patch.object(models.Root, "load")
def test_root_index(self, root_load):
context = Mock()
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
self.controller.root_index(req, tenant_id, uuid, is_cluster)
root_load.assert_called_with(context, uuid)
def test_root_index_with_cluster(self):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
self.assertRaises(
exception.ClusterOperationNotSupported,
self.controller.root_index,
req, tenant_id, uuid, is_cluster)
@patch.object(models.Root, "create")
def test_root_create(self, root_create):
user = Mock()
context = Mock()
context.user = Mock()
context.user.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
password = Mock()
body = {'password': password}
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
root_create.assert_called_with(context, uuid, context.user, password)
def test_root_create_with_cluster(self):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
password = Mock()
body = {'password': password}
self.assertRaises(
exception.ClusterOperationNotSupported,
self.controller.root_create,
req, body, tenant_id, uuid, is_cluster)
class TestRootController(trove_testtools.TestCase):
def setUp(self):
super(TestRootController, self).setUp()
self.context = trove_testtools.TroveTestContext(self)
self.controller = RootController()
@patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_index(self, service_get_datastore, service_load_root_controller,
service_load_instance):
req = Mock()
req.environ = {'trove.context': self.context}
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
root_controller = Mock()
ret = Mock()
root_controller.root_index = Mock(return_value=ret)
service_load_root_controller.return_value = root_controller
self.assertTrue(ret, self.controller.index(req, tenant_id, uuid))
service_get_datastore.assert_called_with(tenant_id, uuid)
service_load_root_controller.assert_called_with(ds_manager)
root_controller.root_index.assert_called_with(
req, tenant_id, uuid, is_cluster)
@patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_create(self, service_get_datastore, service_load_root_controller,
service_load_instance):
req = Mock()
req.environ = {'trove.context': self.context}
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
root_controller = Mock()
ret = Mock()
root_controller.root_create = Mock(return_value=ret)
service_load_root_controller.return_value = root_controller
self.assertTrue(
ret, self.controller.create(req, tenant_id, uuid, body=body))
service_get_datastore.assert_called_with(tenant_id, uuid)
service_load_root_controller.assert_called_with(ds_manager)
root_controller.root_create.assert_called_with(
req, body, tenant_id, uuid, is_cluster)
@patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_create_with_no_root_controller(self,
service_get_datastore,
service_load_root_controller,
service_load_instance):
req = Mock()
req.environ = {'trove.context': self.context}
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
service_load_root_controller.return_value = None
self.assertRaises(
NoSuchOptError,
self.controller.create,
req, tenant_id, uuid, body=body)
service_get_datastore.assert_called_with(tenant_id, uuid)
service_load_root_controller.assert_called_with(ds_manager)
class TestClusterRootController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterRootController, self).setUp()
self.context = trove_testtools.TroveTestContext(self)
self.controller = ClusterRootController()
@patch.object(ClusterRootController, "cluster_root_index")
def test_root_index_cluster(self, mock_cluster_root_index):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
self.controller.root_index(req, tenant_id, uuid, is_cluster)
mock_cluster_root_index.assert_called_with(req, tenant_id, uuid)
@patch.object(ClusterRootController, "instance_root_index")
def test_root_index_instance(self, mock_instance_root_index):
req = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
self.controller.root_index(req, tenant_id, uuid, is_cluster)
mock_instance_root_index.assert_called_with(req, tenant_id, uuid)
@patch.object(ClusterRootController, "cluster_root_create")
def test_root_create_cluster(self, mock_cluster_root_create):
req = Mock()
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = True
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
mock_cluster_root_create.assert_called_with(req, body, tenant_id, uuid)
@patch.object(ClusterRootController, "check_cluster_instance_actions")
@patch.object(ClusterRootController, "instance_root_create")
def test_root_create_instance(self, mock_instance_root_create, mock_check):
req = Mock()
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
is_cluster = False
self.controller.root_create(req, body, tenant_id, uuid, is_cluster)
mock_check.assert_called_with(uuid)
mock_instance_root_create.assert_called_with(req, body, uuid)
@patch.object(models.ClusterRoot, "load")
def test_instance_root_index(self, mock_cluster_root_load):
req = Mock()
req.environ = {'trove.context': self.context}
tenant_id = Mock()
instance_id = utils.generate_uuid()
self.controller.instance_root_index(req, tenant_id, instance_id)
mock_cluster_root_load.assert_called_with(self.context, instance_id)
@patch.object(models.ClusterRoot, "load",
side_effect=exception.UnprocessableEntity())
def test_instance_root_index_exception(self, mock_cluster_root_load):
req = Mock()
req.environ = {'trove.context': self.context}
tenant_id = Mock()
instance_id = utils.generate_uuid()
self.assertRaises(
exception.UnprocessableEntity,
self.controller.instance_root_index,
req, tenant_id, instance_id
)
mock_cluster_root_load.assert_called_with(self.context, instance_id)
@patch.object(ClusterRootController, "instance_root_index")
@patch.object(ClusterRootController, "_get_cluster_instance_id")
def test_cluster_root_index(self, mock_get_cluster_instance,
mock_instance_root_index):
req = Mock()
tenant_id = Mock()
cluster_id = utils.generate_uuid()
single_instance_id = Mock()
mock_get_cluster_instance.return_value = (single_instance_id, Mock())
self.controller.cluster_root_index(req, tenant_id, cluster_id)
mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id)
mock_instance_root_index.assert_called_with(req, tenant_id,
single_instance_id)
@patch.object(ClusterRootController, "instance_root_create")
@patch.object(ClusterRootController, "_get_cluster_instance_id")
def test_cluster_root_create(self, mock_get_cluster_instance,
mock_instance_root_create):
req = Mock()
body = Mock()
tenant_id = Mock()
cluster_id = utils.generate_uuid()
single_instance_id = Mock()
cluster_instances = Mock()
mock_get_cluster_instance.return_value = (single_instance_id,
cluster_instances)
self.controller.cluster_root_create(req, body, tenant_id, cluster_id)
mock_get_cluster_instance.assert_called_with(tenant_id, cluster_id)
mock_instance_root_create.assert_called_with(req, body,
single_instance_id,
cluster_instances)
@patch.object(DBInstance, "find_all")
def test_get_cluster_instance_id(self, mock_find_all):
tenant_id = Mock()
cluster_id = Mock()
db_inst_1 = Mock()
db_inst_1.id.return_value = utils.generate_uuid()
db_inst_2 = Mock()
db_inst_2.id.return_value = utils.generate_uuid()
cluster_instances = [db_inst_1, db_inst_2]
mock_find_all.return_value.all.return_value = cluster_instances
ret = self.controller._get_cluster_instance_id(tenant_id, cluster_id)
self.assertTrue(db_inst_1.id, ret[0])
self.assertTrue(cluster_instances, ret[1])
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create(self, mock_cluster_root_create):
user = Mock()
self.context.user = Mock()
self.context.user.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = {'trove.context': self.context}
password = Mock()
body = {'password': password}
instance_id = utils.generate_uuid()
cluster_instances = Mock()
self.controller.instance_root_create(
req, body, instance_id, cluster_instances)
mock_cluster_root_create.assert_called_with(
self.context, instance_id, self.context.user, password,
cluster_instances)
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create_no_body(self, mock_cluster_root_create):
user = Mock()
self.context.user = Mock()
self.context.user.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = {'trove.context': self.context}
password = None
body = None
instance_id = utils.generate_uuid()
cluster_instances = Mock()
self.controller.instance_root_create(
req, body, instance_id, cluster_instances)
mock_cluster_root_create.assert_called_with(
self.context, instance_id, self.context.user, password,
cluster_instances)
| true | true |
1c2e370fce6aba1a51bb9131766055f18fc82eea | 5,979 | py | Python | config/defaults.py | Ehsan-Yaghoubi/my_reid_Pytorch_template | 8b346ad8010084536a7c998107979fab2bff2ca3 | [
"MIT"
] | null | null | null | config/defaults.py | Ehsan-Yaghoubi/my_reid_Pytorch_template | 8b346ad8010084536a7c998107979fab2bff2ca3 | [
"MIT"
] | null | null | null | config/defaults.py | Ehsan-Yaghoubi/my_reid_Pytorch_template | 8b346ad8010084536a7c998107979fab2bff2ca3 | [
"MIT"
] | null | null | null | from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# The following configurations will be modified when they are set in the .yml files.
# Therefore, if you want to your configurations, please create your_costumed.yml file, instead of changing this script.
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
# Using 'cuda' or 'cpu' for training
_C.MODEL.DEVICE = "cuda"
# ID number of GPU
_C.MODEL.DEVICE_ID = '0'
# Name of backbone
_C.MODEL.NAME = 'resnet50'
# Last stride of backbone
_C.MODEL.LAST_STRIDE = 1
# Path to pretrained model of backbone
_C.MODEL.PRETRAIN_PATH = "set/the/path/in/.yml/file"
# Use ImageNet pretrained model to initialize backbone or use self trained model to initialize the whole model
# Options: 'imagenet' or 'self'
_C.MODEL.PRETRAIN_CHOICE = 'imagenet'
# If train with BNNeck, options: 'bnneck' or 'no'
_C.MODEL.NECK = 'no'
# If train loss include center loss, options: 'yes' or 'no'. Loss with center loss has different optimizer configuration
_C.MODEL.IF_WITH_CENTER = 'no'
# The loss type of metric loss
# options:['triplet'](without center loss) or ['center','triplet_center'](with center loss)
_C.MODEL.METRIC_LOSS_TYPE = 'triplet'
# For example, if loss type is cross entropy loss + triplet loss + center loss
# the setting should be: _C.MODEL.METRIC_LOSS_TYPE = 'triplet_center' and _C.MODEL.IF_WITH_CENTER = 'yes'
# If train with label smooth, options: 'on', 'off'
_C.MODEL.IF_LABELSMOOTH = 'on'
# evaluation settings
# options: "ClothChangingSetting" or "StandardSetting" or "both"
_C.MODEL.Evaluate = "both"
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the image during training
#_C.INPUT.SIZE_TRAIN = [384, 128]
_C.INPUT.SIZE_TRAIN = [128, 128]
# Size of the image during test
#_C.INPUT.SIZE_TEST = [384, 128]
_C.INPUT.SIZE_TEST = [128, 128]
# Random probability for image horizontal flip
_C.INPUT.PROB = 0.5
# Random probability for random erasing
_C.INPUT.RE_PROB = 0.5
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]
# Value of padding size
_C.INPUT.PADDING = 0
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
_C.DATASETS.multiple = False
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.NAMES = ('set it in the .yml file') # shapes_varcolor, shapes_fixcolor, ltcc_noneID, ltcc_orig
# Root directory where datasets should be used (and downloaded if not found)
_C.DATASETS.ROOT_DIR = ('set it in the .yml file')
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 8
# Sampler for data loading
_C.DATALOADER.IDsampler = False
_C.DATALOADER.SAMPLER = "softmax" # set it in the .yml file. Options are softmax | triplet | softmax_triplet | softmax_CosineSim | triplet_CosineSim | softmax_triplet_CosineSim
# Number of instance for one batch
_C.DATALOADER.NUM_INSTANCE = 16
# Data augmentation
_C.DATALOADER.Aug = False
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
# Name of optimizer
_C.SOLVER.OPTIMIZER_NAME = "Adam"
# Number of max epoches
_C.SOLVER.MAX_EPOCHS = 50
# Base learning rate
_C.SOLVER.BASE_LR = 0.00035
# Factor of learning bias
_C.SOLVER.BIAS_LR_FACTOR = 1
# Momentum
_C.SOLVER.MOMENTUM = 0.9
# Margin of triplet loss
_C.SOLVER.MARGIN = 0.3
# Margin of cluster ;pss
_C.SOLVER.CLUSTER_MARGIN = 0.3
# Learning rate of SGD to learn the centers of center loss
_C.SOLVER.CENTER_LR = 0.5
# Balanced weight of center loss
_C.SOLVER.CENTER_LOSS_WEIGHT = 0.0005
# Settings of range loss
_C.SOLVER.RANGE_K = 2
_C.SOLVER.RANGE_MARGIN = 0.3
_C.SOLVER.RANGE_ALPHA = 0
_C.SOLVER.RANGE_BETA = 1
_C.SOLVER.RANGE_LOSS_WEIGHT = 1
# Settings of weight decay
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
# decay rate of learning rate
_C.SOLVER.GAMMA = 0.1
# decay step of learning rate
_C.SOLVER.STEPS = (30, 55)
# warm up factor
_C.SOLVER.WARMUP_FACTOR = 0.01
# iterations of warm up
_C.SOLVER.WARMUP_ITERS = 5
# method of warm up, option: 'constant','linear'
_C.SOLVER.WARMUP_METHOD = "linear"
# epoch number of saving checkpoints
_C.SOLVER.CHECKPOINT_PERIOD = 10
# iteration of display training log
_C.SOLVER.LOG_PERIOD = 10
# epoch number of validation
_C.SOLVER.EVAL_PERIOD = 10
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 64
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST = CN()
# Number of images per batch during test
_C.TEST.IMS_PER_BATCH = 128
# If test with re-ranking, options: 'yes','no'
_C.TEST.RE_RANKING = 'no'
# Path to trained model
_C.TEST.WEIGHT = "set/the/path/in/.yml/file"
# Which feature of BNNeck to be used for test, before or after BNNneck, options: 'before' or 'after'
_C.TEST.NECK_FEAT = 'before'
# Whether feature is nomalized before test, if yes, it is equivalent to cosine distance
_C.TEST.FEAT_NORM = 'no'
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Path to checkpoint and saved log of trained model
_C.OUTPUT_DIR = "set/the/path/in/.yml/file"
| 36.907407 | 176 | 0.628366 | from yacs.config import CfgNode as CN
_C = CN()
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
_C.MODEL.DEVICE_ID = '0'
_C.MODEL.NAME = 'resnet50'
_C.MODEL.LAST_STRIDE = 1
_C.MODEL.PRETRAIN_PATH = "set/the/path/in/.yml/file"
_C.MODEL.PRETRAIN_CHOICE = 'imagenet'
_C.MODEL.NECK = 'no'
_C.MODEL.IF_WITH_CENTER = 'no'
_C.MODEL.METRIC_LOSS_TYPE = 'triplet'
_C.MODEL.IF_LABELSMOOTH = 'on'
_C.MODEL.Evaluate = "both"
_C.INPUT = CN()
_C.INPUT.SIZE_TRAIN = [128, 128]
_C.INPUT.SIZE_TEST = [128, 128]
_C.INPUT.PROB = 0.5
_C.INPUT.RE_PROB = 0.5
_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]
_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]
_C.INPUT.PADDING = 0
_C.DATASETS = CN()
_C.DATASETS.multiple = False
_C.DATASETS.NAMES = ('set it in the .yml file')
_C.DATASETS.ROOT_DIR = ('set it in the .yml file')
_C.DATALOADER = CN()
_C.DATALOADER.NUM_WORKERS = 8
_C.DATALOADER.IDsampler = False
_C.DATALOADER.SAMPLER = "softmax"
_C.DATALOADER.NUM_INSTANCE = 16
_C.DATALOADER.Aug = False
_C.SOLVER = CN()
_C.SOLVER.OPTIMIZER_NAME = "Adam"
_C.SOLVER.MAX_EPOCHS = 50
_C.SOLVER.BASE_LR = 0.00035
_C.SOLVER.BIAS_LR_FACTOR = 1
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.MARGIN = 0.3
_C.SOLVER.CLUSTER_MARGIN = 0.3
_C.SOLVER.CENTER_LR = 0.5
_C.SOLVER.CENTER_LOSS_WEIGHT = 0.0005
_C.SOLVER.RANGE_K = 2
_C.SOLVER.RANGE_MARGIN = 0.3
_C.SOLVER.RANGE_ALPHA = 0
_C.SOLVER.RANGE_BETA = 1
_C.SOLVER.RANGE_LOSS_WEIGHT = 1
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30, 55)
_C.SOLVER.WARMUP_FACTOR = 0.01
_C.SOLVER.WARMUP_ITERS = 5
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 10
_C.SOLVER.LOG_PERIOD = 10
_C.SOLVER.EVAL_PERIOD = 10
_C.SOLVER.IMS_PER_BATCH = 64
_C.TEST = CN()
_C.TEST.IMS_PER_BATCH = 128
_C.TEST.RE_RANKING = 'no'
_C.TEST.WEIGHT = "set/the/path/in/.yml/file"
_C.TEST.NECK_FEAT = 'before'
_C.TEST.FEAT_NORM = 'no'
_C.OUTPUT_DIR = "set/the/path/in/.yml/file"
| true | true |
1c2e399c351804410c4ec2623c9930aad9d9a7f0 | 15,627 | py | Python | tests/test_refpixel.py | uniomni/PyRate | f77ad6e7fd90f3c0eb255bd553d4666b5db40bcf | [
"Apache-2.0"
] | 1 | 2020-09-12T00:01:33.000Z | 2020-09-12T00:01:33.000Z | tests/test_refpixel.py | uniomni/PyRate | f77ad6e7fd90f3c0eb255bd553d4666b5db40bcf | [
"Apache-2.0"
] | null | null | null | tests/test_refpixel.py | uniomni/PyRate | f77ad6e7fd90f3c0eb255bd553d4666b5db40bcf | [
"Apache-2.0"
] | null | null | null | # This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the refpixel.py PyRate module.
"""
import copy
import shutil
from subprocess import check_call, run
from pathlib import Path
import pytest
from numpy import nan, mean, std, isnan
import pyrate.core.refpixel
from pyrate.core import config as cf
from pyrate.core.refpixel import ref_pixel, _step, RefPixelError
from pyrate.core import shared, ifgconstants as ifc
from pyrate import process
from pyrate.configuration import Configuration
from tests.common import TEST_CONF_ROIPAC
from tests.common import small_data_setup, MockIfg, copy_small_ifg_file_list, \
copy_and_setup_small_data, manipulate_test_conf, assert_two_dirs_equal, PYTHON3P6
# TODO: figure out how editing resource.setrlimit fixes the error
# to fix the open to many files error
# https://stackoverflow.com/questions/18280612/ioerror-errno-24-too-many-open-files
# default testing values
REFNX = 5
REFNY = 7
MIN_FRAC = 0.7
CHIPSIZE = 3
PARALLEL = False
class TestReferencePixelInputTests:
'''
Verifies error checking capabilities of the reference pixel function
'''
@classmethod
def setup_method(cls):
cls.ifgs = small_data_setup()
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.REFNX] = REFNX
cls.params[cf.REFNY] = REFNY
cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE
cls.params[cf.REF_MIN_FRAC] = MIN_FRAC
cls.params[cf.PARALLEL] = PARALLEL
def test_missing_chipsize(self):
self.params[cf.REF_CHIP_SIZE] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
def test_chipsize_valid(self):
for illegal in [0, -1, -15, 1, 2, self.ifgs[0].ncols+1, 4, 6, 10, 20]:
self.params[cf.REF_CHIP_SIZE] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_minimum_fraction_missing(self):
self.params[cf.REF_MIN_FRAC] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
def test_minimum_fraction_threshold(self):
for illegal in [-0.1, 1.1, 1.000001, -0.0000001]:
self.params[cf.REF_MIN_FRAC] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_search_windows(self):
# 45 is max # cells a width 3 sliding window can iterate over
for illegal in [-5, -1, 0, 46, 50, 100]:
self.params[cf.REFNX] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
# 40 is max # cells a width 3 sliding window can iterate over
for illegal in [-5, -1, 0, 71, 85, 100]:
self.params[cf.REFNY] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_missing_search_windows(self):
self.params[cf.REFNX] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
self.params[cf.REFNX] = REFNX
self.params[cf.REFNY] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
class TestReferencePixelTests:
"""
Tests reference pixel search
"""
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.OUT_DIR], cls.ifgs = copy_and_setup_small_data()
cls.params[cf.REFNX] = REFNX
cls.params[cf.REFNY] = REFNY
cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE
cls.params[cf.REF_MIN_FRAC] = MIN_FRAC
cls.params[cf.PARALLEL] = PARALLEL
def test_all_below_threshold_exception(self):
# test failure when no valid stacks in dataset
# rig mock data to be below threshold
mock_ifgs = [MockIfg(i, 6, 7) for i in self.ifgs]
for m in mock_ifgs:
m.phase_data[:1] = nan
m.phase_data[1:5] = 0.1
m.phase_data[5:] = nan
self.params[cf.REFNX] = 2
self.params[cf.REFNY] = 2
self.params[cf.REF_CHIP_SIZE] = CHIPSIZE
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
with pytest.raises(ValueError):
ref_pixel(mock_ifgs, self.params)
def test_refnxy_step_1(self):
# test step of 1 for refnx|y gets the reference pixel for axis centre
mock_ifgs = [MockIfg(i, 47, 72) for i in self.ifgs]
for m in mock_ifgs:
m.phase_data[:1] = 0.2
m.phase_data[1:5] = 0.1
m.phase_data[5:] = 0.3
exp_refpx = (1, 1)
self.params[cf.REFNX] = 1
self.params[cf.REFNY] = 1
self.params[cf.REF_CHIP_SIZE] = CHIPSIZE
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(mock_ifgs, self.params)
assert exp_refpx == res
def test_large_window(self):
# 5x5 view over a 5x5 ifg with 1 window/ref pix search
chps = 5
mockifgs = [MockIfg(i, chps, chps) for i in self.ifgs]
self.params[cf.REFNX] = 1
self.params[cf.REFNY] = 1
self.params[cf.REF_CHIP_SIZE] = chps
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(mockifgs, self.params)
assert (2, 2) == res
def test_step(self):
# test different search windows to verify x/y step calculation
# convenience testing function
def assert_equal(actual, expected):
for a, e in zip(actual, expected):
assert a == e
# start with simple corner only test
width = 47
radius = 2
refnx = 2
exp = [2, 25, 44]
act = _step(width, refnx, radius)
assert_equal(act, exp)
# test with 3 windows
refnx = 3
exp = [2, 17, 32]
act = _step(width, refnx, radius)
assert_equal(act, exp)
# test 4 search windows
refnx = 4
exp = [2, 13, 24, 35]
act = _step(width, refnx, radius)
assert_equal(act, exp)
def test_ref_pixel(self):
exp_refpx = (2, 25)
self.params[cf.REFNX] = 2
self.params[cf.REFNY] = 2
self.params[cf.REF_CHIP_SIZE] = 5
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(self.ifgs, self.params)
assert res == exp_refpx
# Invalidate first data stack, get new refpix coods & retest
for i in self.ifgs:
i.phase_data[:30, :50] = nan
exp_refpx = (38, 2)
res = ref_pixel(self.ifgs, self.params)
assert res == exp_refpx
def _expected_ref_pixel(ifgs, cs):
"""Helper function for finding reference pixel when refnx/y=2"""
# calculate expected data
data = [i.phase_data for i in ifgs] # len 17 list of arrays
ul = [i[:cs, :cs] for i in data] # upper left corner stack
ur = [i[:cs, -cs:] for i in data]
ll = [i[-cs:, :cs] for i in data]
lr = [i[-cs:, -cs:] for i in data]
ulm = mean([std(i[~isnan(i)]) for i in ul]) # mean std of all the layers
urm = mean([std(i[~isnan(i)]) for i in ur])
llm = mean([std(i[~isnan(i)]) for i in ll])
lrm = mean([std(i[~isnan(i)]) for i in lr])
assert isnan([ulm, urm, llm, lrm]).any() is False
# coords of the smallest mean is the result
mn = [ulm, urm, llm, lrm]
class TestLegacyEqualityTest:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.PARALLEL] = 0
cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()
conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')
cf.write_config_file(params=cls.params, output_conf_file=conf_file)
cls.params = Configuration(conf_file).__dict__
cls.params_alt_ref_frac = copy.copy(cls.params)
cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5
cls.params_all_2s = copy.copy(cls.params)
cls.params_all_2s[cf.REFNX] = 2
cls.params_all_2s[cf.REFNY] = 2
cls.params_chipsize_15 = copy.copy(cls.params_all_2s)
cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15
cls.params_all_1s = copy.copy(cls.params)
cls.params_all_1s[cf.REFNX] = 1
cls.params_all_1s[cf.REFNY] = 1
cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7
for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths): # hack
p.sampled_path = q
p.tmp_sampled_path = q
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
def test_small_test_data_ref_pixel_lat_lon_provided(self):
self.params[cf.REFX], self.params[cf.REFY] = 150.941666654, -34.218333314
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_small_test_data_ref_chipsize_15(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
assert refx == 7
assert refy == 7
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_metadata(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
for i in self.ifg_paths:
ifg = shared.Ifg(i)
ifg.open(readonly=True)
md = ifg.meta_data
for k, v in zip([ifc.PYRATE_REFPIX_X, ifc.PYRATE_REFPIX_Y, ifc.PYRATE_REFPIX_LAT,
ifc.PYRATE_REFPIX_LON, ifc.PYRATE_MEAN_REF_AREA, ifc.PYRATE_STDDEV_REF_AREA],
[str(refx), str(refy), 0, 0, 0, 0]):
assert k in md # metadata present
# assert values
ifg.close()
def test_small_test_data_ref_all_1(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)
assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])
assert 1 == self.params_all_1s[cf.REFNX]
assert 1 == self.params_all_1s[cf.REFNY]
assert refx == 2
assert refy == 2
class TestLegacyEqualityTestMultiprocessParallel:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.PARALLEL] = 1
cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()
conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')
cf.write_config_file(params=cls.params, output_conf_file=conf_file)
cls.params = Configuration(conf_file).__dict__
cls.params_alt_ref_frac = copy.copy(cls.params)
cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5
cls.params_all_2s = copy.copy(cls.params)
cls.params_all_2s[cf.REFNX] = 2
cls.params_all_2s[cf.REFNY] = 2
cls.params_chipsize_15 = copy.copy(cls.params_all_2s)
cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15
cls.params_all_1s = copy.copy(cls.params)
cls.params_all_1s[cf.REFNX] = 1
cls.params_all_1s[cf.REFNY] = 1
cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7
for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths): # hack
p.sampled_path = q
p.tmp_sampled_path = q
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
def test_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_more_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_alt_ref_frac)
assert refx == 38
assert refy == 58
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_pixel_all_2(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_2s)
assert refx == 25
assert refy == 2
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_chipsize_15(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
assert refx == 7
assert refy == 7
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_all_1(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)
assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])
assert 1 == self.params_all_1s[cf.REFNX]
assert 1 == self.params_all_1s[cf.REFNY]
assert refx == 2
assert refy == 2
@pytest.mark.slow
@pytest.mark.skip(PYTHON3P6, reason='Skipped in python3p6')
def test_error_msg_refpixel_out_out_bounds(tempdir, gamma_conf):
"check correct latitude/longitude refpixel error is raised when specified refpixel is out of bounds"
for x, (refx, refy) in zip(['longitude', 'latitude', 'longitude and latitude'],
[(150., -34.218333314), (150.941666654, -34.), (150, -34)]):
_, out = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=refx, refy=refy)
msg = "Supplied {} value is outside the bounds of the interferogram data"
assert msg.format(x) in out.stderr
@pytest.mark.slow
@pytest.mark.skip(PYTHON3P6, reason='Skipped in python3p6')
def test_gamma_ref_pixel_search_vs_lat_lon(tempdir, gamma_conf):
params_1, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=-1, refy=-1)
params_2, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=150.941666654, refy=-34.218333314)
assert_two_dirs_equal(params_1[cf.OUT_DIR], params_2[cf.OUT_DIR], f"*{params_1[cf.IFG_CROP_OPT]}cr.tif", 18)
def _get_mlooked_files(gamma_conf, tdir, refx, refy):
params = manipulate_test_conf(gamma_conf, tdir)
params[cf.REFX] = refx
params[cf.REFY] = refy
output_conf_file = 'config.conf'
output_conf = tdir.joinpath(output_conf_file)
cf.write_config_file(params=params, output_conf_file=output_conf)
check_call(f"pyrate conv2tif -f {output_conf}", shell=True)
check_call(f"pyrate prepifg -f {output_conf}", shell=True)
stdout = run(f"pyrate process -f {output_conf}", shell=True, capture_output=True, text=True)
print("============================================", stdout)
return params, stdout
| 38.021898 | 112 | 0.6531 |
import copy
import shutil
from subprocess import check_call, run
from pathlib import Path
import pytest
from numpy import nan, mean, std, isnan
import pyrate.core.refpixel
from pyrate.core import config as cf
from pyrate.core.refpixel import ref_pixel, _step, RefPixelError
from pyrate.core import shared, ifgconstants as ifc
from pyrate import process
from pyrate.configuration import Configuration
from tests.common import TEST_CONF_ROIPAC
from tests.common import small_data_setup, MockIfg, copy_small_ifg_file_list, \
copy_and_setup_small_data, manipulate_test_conf, assert_two_dirs_equal, PYTHON3P6
REFNX = 5
REFNY = 7
MIN_FRAC = 0.7
CHIPSIZE = 3
PARALLEL = False
class TestReferencePixelInputTests:
@classmethod
def setup_method(cls):
cls.ifgs = small_data_setup()
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.REFNX] = REFNX
cls.params[cf.REFNY] = REFNY
cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE
cls.params[cf.REF_MIN_FRAC] = MIN_FRAC
cls.params[cf.PARALLEL] = PARALLEL
def test_missing_chipsize(self):
self.params[cf.REF_CHIP_SIZE] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
def test_chipsize_valid(self):
for illegal in [0, -1, -15, 1, 2, self.ifgs[0].ncols+1, 4, 6, 10, 20]:
self.params[cf.REF_CHIP_SIZE] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_minimum_fraction_missing(self):
self.params[cf.REF_MIN_FRAC] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
def test_minimum_fraction_threshold(self):
for illegal in [-0.1, 1.1, 1.000001, -0.0000001]:
self.params[cf.REF_MIN_FRAC] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_search_windows(self):
self.params[cf.REFNX] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
self.params[cf.REFNY] = illegal
with pytest.raises(RefPixelError):
ref_pixel(self.ifgs, self.params)
def test_missing_search_windows(self):
self.params[cf.REFNX] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
self.params[cf.REFNX] = REFNX
self.params[cf.REFNY] = None
with pytest.raises(cf.ConfigException):
ref_pixel(self.ifgs, self.params)
class TestReferencePixelTests:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.OUT_DIR], cls.ifgs = copy_and_setup_small_data()
cls.params[cf.REFNX] = REFNX
cls.params[cf.REFNY] = REFNY
cls.params[cf.REF_CHIP_SIZE] = CHIPSIZE
cls.params[cf.REF_MIN_FRAC] = MIN_FRAC
cls.params[cf.PARALLEL] = PARALLEL
def test_all_below_threshold_exception(self):
mock_ifgs = [MockIfg(i, 6, 7) for i in self.ifgs]
for m in mock_ifgs:
m.phase_data[:1] = nan
m.phase_data[1:5] = 0.1
m.phase_data[5:] = nan
self.params[cf.REFNX] = 2
self.params[cf.REFNY] = 2
self.params[cf.REF_CHIP_SIZE] = CHIPSIZE
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
with pytest.raises(ValueError):
ref_pixel(mock_ifgs, self.params)
def test_refnxy_step_1(self):
mock_ifgs = [MockIfg(i, 47, 72) for i in self.ifgs]
for m in mock_ifgs:
m.phase_data[:1] = 0.2
m.phase_data[1:5] = 0.1
m.phase_data[5:] = 0.3
exp_refpx = (1, 1)
self.params[cf.REFNX] = 1
self.params[cf.REFNY] = 1
self.params[cf.REF_CHIP_SIZE] = CHIPSIZE
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(mock_ifgs, self.params)
assert exp_refpx == res
def test_large_window(self):
chps = 5
mockifgs = [MockIfg(i, chps, chps) for i in self.ifgs]
self.params[cf.REFNX] = 1
self.params[cf.REFNY] = 1
self.params[cf.REF_CHIP_SIZE] = chps
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(mockifgs, self.params)
assert (2, 2) == res
def test_step(self):
def assert_equal(actual, expected):
for a, e in zip(actual, expected):
assert a == e
width = 47
radius = 2
refnx = 2
exp = [2, 25, 44]
act = _step(width, refnx, radius)
assert_equal(act, exp)
refnx = 3
exp = [2, 17, 32]
act = _step(width, refnx, radius)
assert_equal(act, exp)
refnx = 4
exp = [2, 13, 24, 35]
act = _step(width, refnx, radius)
assert_equal(act, exp)
def test_ref_pixel(self):
exp_refpx = (2, 25)
self.params[cf.REFNX] = 2
self.params[cf.REFNY] = 2
self.params[cf.REF_CHIP_SIZE] = 5
self.params[cf.REF_MIN_FRAC] = MIN_FRAC
self.params[cf.PARALLEL] = PARALLEL
res = ref_pixel(self.ifgs, self.params)
assert res == exp_refpx
for i in self.ifgs:
i.phase_data[:30, :50] = nan
exp_refpx = (38, 2)
res = ref_pixel(self.ifgs, self.params)
assert res == exp_refpx
def _expected_ref_pixel(ifgs, cs):
data = [i.phase_data for i in ifgs]
ul = [i[:cs, :cs] for i in data]
ur = [i[:cs, -cs:] for i in data]
ll = [i[-cs:, :cs] for i in data]
lr = [i[-cs:, -cs:] for i in data]
ulm = mean([std(i[~isnan(i)]) for i in ul])
urm = mean([std(i[~isnan(i)]) for i in ur])
llm = mean([std(i[~isnan(i)]) for i in ll])
lrm = mean([std(i[~isnan(i)]) for i in lr])
assert isnan([ulm, urm, llm, lrm]).any() is False
mn = [ulm, urm, llm, lrm]
class TestLegacyEqualityTest:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.PARALLEL] = 0
cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()
conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')
cf.write_config_file(params=cls.params, output_conf_file=conf_file)
cls.params = Configuration(conf_file).__dict__
cls.params_alt_ref_frac = copy.copy(cls.params)
cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5
cls.params_all_2s = copy.copy(cls.params)
cls.params_all_2s[cf.REFNX] = 2
cls.params_all_2s[cf.REFNY] = 2
cls.params_chipsize_15 = copy.copy(cls.params_all_2s)
cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15
cls.params_all_1s = copy.copy(cls.params)
cls.params_all_1s[cf.REFNX] = 1
cls.params_all_1s[cf.REFNY] = 1
cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7
for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths):
p.sampled_path = q
p.tmp_sampled_path = q
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
def test_small_test_data_ref_pixel_lat_lon_provided(self):
self.params[cf.REFX], self.params[cf.REFY] = 150.941666654, -34.218333314
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_small_test_data_ref_chipsize_15(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
assert refx == 7
assert refy == 7
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_metadata(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
for i in self.ifg_paths:
ifg = shared.Ifg(i)
ifg.open(readonly=True)
md = ifg.meta_data
for k, v in zip([ifc.PYRATE_REFPIX_X, ifc.PYRATE_REFPIX_Y, ifc.PYRATE_REFPIX_LAT,
ifc.PYRATE_REFPIX_LON, ifc.PYRATE_MEAN_REF_AREA, ifc.PYRATE_STDDEV_REF_AREA],
[str(refx), str(refy), 0, 0, 0, 0]):
assert k in md
ifg.close()
def test_small_test_data_ref_all_1(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)
assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])
assert 1 == self.params_all_1s[cf.REFNX]
assert 1 == self.params_all_1s[cf.REFNY]
assert refx == 2
assert refy == 2
class TestLegacyEqualityTestMultiprocessParallel:
@classmethod
def setup_method(cls):
cls.params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.params[cf.PARALLEL] = 1
cls.params[cf.OUT_DIR], cls.ifg_paths = copy_small_ifg_file_list()
conf_file = Path(cls.params[cf.OUT_DIR], 'conf_file.conf')
cf.write_config_file(params=cls.params, output_conf_file=conf_file)
cls.params = Configuration(conf_file).__dict__
cls.params_alt_ref_frac = copy.copy(cls.params)
cls.params_alt_ref_frac[cf.REF_MIN_FRAC] = 0.5
cls.params_all_2s = copy.copy(cls.params)
cls.params_all_2s[cf.REFNX] = 2
cls.params_all_2s[cf.REFNY] = 2
cls.params_chipsize_15 = copy.copy(cls.params_all_2s)
cls.params_chipsize_15[cf.REF_CHIP_SIZE] = 15
cls.params_all_1s = copy.copy(cls.params)
cls.params_all_1s[cf.REFNX] = 1
cls.params_all_1s[cf.REFNY] = 1
cls.params_all_1s[cf.REF_MIN_FRAC] = 0.7
for p, q in zip(cls.params[cf.INTERFEROGRAM_FILES], cls.ifg_paths):
p.sampled_path = q
p.tmp_sampled_path = q
@classmethod
def teardown_method(cls):
shutil.rmtree(cls.params[cf.OUT_DIR])
def test_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params)
assert refx == 38
assert refy == 58
assert 0.8 == pytest.approx(self.params[cf.REF_MIN_FRAC])
def test_more_small_test_data_ref_pixel(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_alt_ref_frac)
assert refx == 38
assert refy == 58
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_pixel_all_2(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_2s)
assert refx == 25
assert refy == 2
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_chipsize_15(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_chipsize_15)
assert refx == 7
assert refy == 7
assert 0.5 == pytest.approx(self.params_alt_ref_frac[cf.REF_MIN_FRAC])
def test_small_test_data_ref_all_1(self):
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(self.params_all_1s)
assert 0.7 == pytest.approx(self.params_all_1s[cf.REF_MIN_FRAC])
assert 1 == self.params_all_1s[cf.REFNX]
assert 1 == self.params_all_1s[cf.REFNY]
assert refx == 2
assert refy == 2
@pytest.mark.slow
@pytest.mark.skip(PYTHON3P6, reason='Skipped in python3p6')
def test_error_msg_refpixel_out_out_bounds(tempdir, gamma_conf):
for x, (refx, refy) in zip(['longitude', 'latitude', 'longitude and latitude'],
[(150., -34.218333314), (150.941666654, -34.), (150, -34)]):
_, out = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=refx, refy=refy)
msg = "Supplied {} value is outside the bounds of the interferogram data"
assert msg.format(x) in out.stderr
@pytest.mark.slow
@pytest.mark.skip(PYTHON3P6, reason='Skipped in python3p6')
def test_gamma_ref_pixel_search_vs_lat_lon(tempdir, gamma_conf):
params_1, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=-1, refy=-1)
params_2, _ = _get_mlooked_files(gamma_conf, Path(tempdir()), refx=150.941666654, refy=-34.218333314)
assert_two_dirs_equal(params_1[cf.OUT_DIR], params_2[cf.OUT_DIR], f"*{params_1[cf.IFG_CROP_OPT]}cr.tif", 18)
def _get_mlooked_files(gamma_conf, tdir, refx, refy):
params = manipulate_test_conf(gamma_conf, tdir)
params[cf.REFX] = refx
params[cf.REFY] = refy
output_conf_file = 'config.conf'
output_conf = tdir.joinpath(output_conf_file)
cf.write_config_file(params=params, output_conf_file=output_conf)
check_call(f"pyrate conv2tif -f {output_conf}", shell=True)
check_call(f"pyrate prepifg -f {output_conf}", shell=True)
stdout = run(f"pyrate process -f {output_conf}", shell=True, capture_output=True, text=True)
print("============================================", stdout)
return params, stdout
| true | true |
1c2e39c02248ac535e7f4e4f4f871e156c50c176 | 2,070 | py | Python | ops-tests/feature/test_switch.py | nshinde5486/ansible_2switchtopo | e49a883d385c36bea7b12ff9f38b2f2ac22431f6 | [
"Apache-2.0"
] | null | null | null | ops-tests/feature/test_switch.py | nshinde5486/ansible_2switchtopo | e49a883d385c36bea7b12ff9f38b2f2ac22431f6 | [
"Apache-2.0"
] | null | null | null | ops-tests/feature/test_switch.py | nshinde5486/ansible_2switchtopo | e49a883d385c36bea7b12ff9f38b2f2ac22431f6 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
TOPOLOGY = """
#
# +-----------------+ +------------+
# | Ansible | eth0 eth0 | |
# | control machine |-------------| OpenSwitch |
# | (server) | | (switch) |
# +-----------------+ +------------+
#
# Nodes
[type=oobmhost name="server"] server
[type=openswitch name="switch"] switch
#
# Links
[force_name=oobm] switch:eth0
server:eth0 -- switch:eth0
"""
def _setup(topo):
""" setup server and switch to be ready for the ansible play """
server = topo.get('server')
switch = topo.get('switch')
# Wait switch to come up
time.sleep(10)
# Server IP address
server.libs.ip.interface('eth0', addr='192.168.1.254/24', up=True)
# Switch IP address
with switch.libs.vtysh.ConfigInterfaceMgmt() as ctx:
ctx.ip_static('192.168.1.1/24')
# Copy SSH public key through playbook
_test_playbook(server, 'utils/copy_public_key.yaml', ops='-u root')
return server
def _cmd(playbook, ops=''):
return "ansible-playbook %s /etc/ansible/%s" % (ops, playbook)
def _test_playbook(server, playbook, ops=''):
server(_cmd(playbook, ops))
assert '0' == server('echo $?'), "fail in %s" % playbook
def test_hostname(topology, step):
playbook = 'roles/switch/tests/test_hostname.yml'
server = _setup(topology)
step("Test %s playbook" % playbook)
_test_playbook(server, playbook, ops='-v')
| 28.75 | 71 | 0.630435 |
import time
TOPOLOGY = """
#
# +-----------------+ +------------+
# | Ansible | eth0 eth0 | |
# | control machine |-------------| OpenSwitch |
# | (server) | | (switch) |
# +-----------------+ +------------+
#
# Nodes
[type=oobmhost name="server"] server
[type=openswitch name="switch"] switch
#
# Links
[force_name=oobm] switch:eth0
server:eth0 -- switch:eth0
"""
def _setup(topo):
server = topo.get('server')
switch = topo.get('switch')
time.sleep(10)
server.libs.ip.interface('eth0', addr='192.168.1.254/24', up=True)
with switch.libs.vtysh.ConfigInterfaceMgmt() as ctx:
ctx.ip_static('192.168.1.1/24')
_test_playbook(server, 'utils/copy_public_key.yaml', ops='-u root')
return server
def _cmd(playbook, ops=''):
return "ansible-playbook %s /etc/ansible/%s" % (ops, playbook)
def _test_playbook(server, playbook, ops=''):
server(_cmd(playbook, ops))
assert '0' == server('echo $?'), "fail in %s" % playbook
def test_hostname(topology, step):
playbook = 'roles/switch/tests/test_hostname.yml'
server = _setup(topology)
step("Test %s playbook" % playbook)
_test_playbook(server, playbook, ops='-v')
| true | true |
1c2e3a596f63c8d59e25f4a3ad1f356d75d198af | 280 | py | Python | other/application/windowApp/test/showTestList.py | Ethan7102/FYP | c6560a0b95ad78d5e1a341ab2d93c063e10c6631 | [
"MIT"
] | null | null | null | other/application/windowApp/test/showTestList.py | Ethan7102/FYP | c6560a0b95ad78d5e1a341ab2d93c063e10c6631 | [
"MIT"
] | null | null | null | other/application/windowApp/test/showTestList.py | Ethan7102/FYP | c6560a0b95ad78d5e1a341ab2d93c063e10c6631 | [
"MIT"
] | 1 | 2021-01-23T07:59:57.000Z | 2021-01-23T07:59:57.000Z | import sys
import testList
from PyQt5.QtWidgets import QApplication, QMainWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = testList.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | 28 | 53 | 0.721429 | import sys
import testList
from PyQt5.QtWidgets import QApplication, QMainWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = testList.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | true | true |
1c2e3bb30191572e984a79039352c93ea0460f76 | 2,861 | py | Python | LogAnalysis.py | ZeinaKittaneh/LogAnalysis_Udacity | 860651042c0dd5376f23aa478bb37d04c3538641 | [
"Unlicense",
"MIT"
] | null | null | null | LogAnalysis.py | ZeinaKittaneh/LogAnalysis_Udacity | 860651042c0dd5376f23aa478bb37d04c3538641 | [
"Unlicense",
"MIT"
] | null | null | null | LogAnalysis.py | ZeinaKittaneh/LogAnalysis_Udacity | 860651042c0dd5376f23aa478bb37d04c3538641 | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
import psycopg2
DBNAME = "news"
question1 = "\nWhat are the most popular three articles of all time?\n"
query1 = '''SELECT
title,
COUNT(substr(path, 10)) AS views
FROM
articles
JOIN
log
ON slug = substr(path, 10)
GROUP BY
title
ORDER BY
views DESC LIMIT 3;'''
question2 = "\nWho are the most popular article authors of all time?\n"
query2 = '''SELECT
auth.name,
SUM(views_qry.views)
FROM
authors auth,
articles artic,
(
SELECT
title,
COUNT(substr(path, 10)) AS views
FROM
articles
JOIN
log
ON slug = substr(path, 10)
GROUP BY
title
)
views_qry
WHERE
auth.id = artic.author
AND views_qry.title = artic.title
GROUP BY
auth.name
ORDER BY
SUM(views_qry.views) DESC;'''
question3 = "\nOn which days did more than 1% of requests lead to errors?\n"
query3 = '''SELECT
*
FROM
(
SELECT
total.DAY,
round(CAST((error.errorCount*100) AS NUMERIC) /
CAST(total.totalCount AS NUMERIC), 2) AS percentage
FROM
(
SELECT
DATE(TIME) AS DAY,
COUNT(*) AS errorCount
FROM
log
WHERE
status NOT LIKE '%200 OK%'
GROUP BY
DAY
)
AS error
INNER JOIN
(
SELECT
DATE(TIME) AS DAY,
COUNT(*) AS totalCount
FROM
log
GROUP BY
DAY
)
AS total
ON total.DAY = error.DAY
)
AS subqry
WHERE
percentage > 1.0;'''
def get_top_articles(cursor):
print (question1)
cursor.execute(query1)
results = cursor.fetchall()
for result in results:
print ('\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')
def get_popular_author(cursor):
print(question2)
cursor.execute(query2)
results = cursor.fetchall()
for result in results:
print ('\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')
def get_day_max_error(cursor):
print(question3)
cursor.execute(query3)
results = cursor.fetchall()
for result in results:
print ('\t' + str(result[0]) + ' - ' + str(result[1]) + '%')
if __name__ == '__main__':
conn = psycopg2.connect(dbname=DBNAME)
cursor = conn.cursor()
get_top_articles(cursor)
get_popular_author(cursor)
get_day_max_error(cursor)
conn.close()
| 24.042017 | 77 | 0.487941 |
import psycopg2
DBNAME = "news"
question1 = "\nWhat are the most popular three articles of all time?\n"
query1 = '''SELECT
title,
COUNT(substr(path, 10)) AS views
FROM
articles
JOIN
log
ON slug = substr(path, 10)
GROUP BY
title
ORDER BY
views DESC LIMIT 3;'''
question2 = "\nWho are the most popular article authors of all time?\n"
query2 = '''SELECT
auth.name,
SUM(views_qry.views)
FROM
authors auth,
articles artic,
(
SELECT
title,
COUNT(substr(path, 10)) AS views
FROM
articles
JOIN
log
ON slug = substr(path, 10)
GROUP BY
title
)
views_qry
WHERE
auth.id = artic.author
AND views_qry.title = artic.title
GROUP BY
auth.name
ORDER BY
SUM(views_qry.views) DESC;'''
question3 = "\nOn which days did more than 1% of requests lead to errors?\n"
query3 = '''SELECT
*
FROM
(
SELECT
total.DAY,
round(CAST((error.errorCount*100) AS NUMERIC) /
CAST(total.totalCount AS NUMERIC), 2) AS percentage
FROM
(
SELECT
DATE(TIME) AS DAY,
COUNT(*) AS errorCount
FROM
log
WHERE
status NOT LIKE '%200 OK%'
GROUP BY
DAY
)
AS error
INNER JOIN
(
SELECT
DATE(TIME) AS DAY,
COUNT(*) AS totalCount
FROM
log
GROUP BY
DAY
)
AS total
ON total.DAY = error.DAY
)
AS subqry
WHERE
percentage > 1.0;'''
def get_top_articles(cursor):
print (question1)
cursor.execute(query1)
results = cursor.fetchall()
for result in results:
print ('\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')
def get_popular_author(cursor):
print(question2)
cursor.execute(query2)
results = cursor.fetchall()
for result in results:
print ('\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')
def get_day_max_error(cursor):
print(question3)
cursor.execute(query3)
results = cursor.fetchall()
for result in results:
print ('\t' + str(result[0]) + ' - ' + str(result[1]) + '%')
if __name__ == '__main__':
conn = psycopg2.connect(dbname=DBNAME)
cursor = conn.cursor()
get_top_articles(cursor)
get_popular_author(cursor)
get_day_max_error(cursor)
conn.close()
| true | true |
1c2e3c444ff71978eb57327f35fbb39ec72a91ea | 9,081 | py | Python | lib/fast_rcnn/config.py | svebk/py-faster-rcnn | 1d0c40c42930f8e89634c057a0ed902aace395bd | [
"BSD-2-Clause"
] | null | null | null | lib/fast_rcnn/config.py | svebk/py-faster-rcnn | 1d0c40c42930f8e89634c057a0ed902aace395bd | [
"BSD-2-Clause"
] | null | null | null | lib/fast_rcnn/config.py | svebk/py-faster-rcnn | 1d0c40c42930f8e89634c057a0ed902aace395bd | [
"BSD-2-Clause"
] | null | null | null | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'selective_search'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net.name)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 31.975352 | 91 | 0.689462 |
import os
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
__C.TRAIN.MAX_SIZE = 1000
__C.TRAIN.IMS_PER_BATCH = 2
__C.TRAIN.BATCH_SIZE = 128
__C.TRAIN.FG_FRACTION = 0.25
__C.TRAIN.FG_THRESH = 0.5
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
__C.TRAIN.USE_FLIPPED = True
__C.TRAIN.BBOX_REG = True
__C.TRAIN.BBOX_THRESH = 0.5
__C.TRAIN.SNAPSHOT_ITERS = 10000
__C.TRAIN.SNAPSHOT_INFIX = ''
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
__C.TEST.MAX_SIZE = 1000
__C.TEST.NMS = 0.3
__C.TEST.SVM = False
__C.TEST.BBOX_REG = True
__C.TEST.HAS_RPN = False
__C.TEST.PROPOSAL_METHOD = 'selective_search'
rained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models'))
__C.MATLAB = 'matlab'
__C.EXP_DIR = 'default'
__C.USE_GPU_NMS = True
__C.GPU_ID = 0
def get_output_dir(imdb, net):
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net.name)
def _merge_a_into_b(a, b):
if type(a) is not edict:
return
for k, v in a.iteritems():
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| true | true |
1c2e3c9b914af797b1b040a129e2c66f74970371 | 1,833 | py | Python | payment_gateway/api/external/sqs_base.py | MayaraMachado/sns_and_sqs_project | 4fcc5bbb5f6841543ea8dda353dd85a43024f683 | [
"MIT"
] | 5 | 2020-06-22T21:29:54.000Z | 2021-11-01T20:12:04.000Z | payment_gateway/api/external/sqs_base.py | MayaraMachado/sns_and_sqs_project | 4fcc5bbb5f6841543ea8dda353dd85a43024f683 | [
"MIT"
] | 5 | 2021-03-30T13:38:15.000Z | 2021-09-22T19:10:27.000Z | payment_gateway/api/external/sqs_base.py | MayaraMachado/sns_and_sqs_project | 4fcc5bbb5f6841543ea8dda353dd85a43024f683 | [
"MIT"
] | null | null | null | import boto3
import logging
from django.conf import settings
class SQSConnection:
def __init__(self):
'''
Instanciação do cliente SQS utilizando boto3;
Returns:
---------
sqs : pyboto3.sqs
Instância sqs.
'''
self.sqs_client = boto3.client(
'sqs',
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name='us-east-1')
def create_sqs_queue(self, queue_name):
return self.sqs_client.create_queue(
QueueName=queue_name
)
def find_queue(self, prefix):
return self.sqs_client.list_queues(
QueueNamePrefix=prefix
)
def list_all_queues(self):
return self.sqs_client.list_queues()
def poll_queue_for_messages(self, queue_url, max_messages_number=10):
return self.sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=max_messages_number
)
def process_message_from_queue(self):
queue_messages = poll_queue_for_messages()
if 'Messages' in queue_messages and len(queue_messages['Messages']) >= 1:
for message in queue_messages['Messages']:
logging.warning(f"Processing message: {message['MessageId']} with text: {message['Body']}.")
change_message_visibility_timeout(message['ReceiptHandle'])
def delete_message_from_queue(self, queue_url, receipt_handle):
return self.sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
def purge_queue(self, queue_url):
return self.sqs_client.purge_queue(
QueueUrl=queue_url
)
| 30.04918 | 108 | 0.623568 | import boto3
import logging
from django.conf import settings
class SQSConnection:
def __init__(self):
self.sqs_client = boto3.client(
'sqs',
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name='us-east-1')
def create_sqs_queue(self, queue_name):
return self.sqs_client.create_queue(
QueueName=queue_name
)
def find_queue(self, prefix):
return self.sqs_client.list_queues(
QueueNamePrefix=prefix
)
def list_all_queues(self):
return self.sqs_client.list_queues()
def poll_queue_for_messages(self, queue_url, max_messages_number=10):
return self.sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=max_messages_number
)
def process_message_from_queue(self):
queue_messages = poll_queue_for_messages()
if 'Messages' in queue_messages and len(queue_messages['Messages']) >= 1:
for message in queue_messages['Messages']:
logging.warning(f"Processing message: {message['MessageId']} with text: {message['Body']}.")
change_message_visibility_timeout(message['ReceiptHandle'])
def delete_message_from_queue(self, queue_url, receipt_handle):
return self.sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
def purge_queue(self, queue_url):
return self.sqs_client.purge_queue(
QueueUrl=queue_url
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.